mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6.git
synced 2026-04-25 00:52:45 -04:00
drm/msm: Get rid of the REG_ADRENO offsets
As newer GPU families are added it makes less sense to maintain a "generic" version functions for older families. Move adreno_submit() and get_rptr() into the target specific code for a2xx, a3xx and a4xx. Add a parameter to adreno_flush to pass the target specific WPTR register instead of relying on the generic register. All of this gets rid of the last of the REG_ADRENO offsets so remove all all the register definitions and infrastructure. Signed-off-by: Jordan Crouse <jcrouse@codeaurora.org> Signed-off-by: Rob Clark <robdclark@chromium.org>
This commit is contained in:
@@ -28,6 +28,61 @@ extern bool hang_debug;
|
||||
static void a3xx_dump(struct msm_gpu *gpu);
|
||||
static bool a3xx_idle(struct msm_gpu *gpu);
|
||||
|
||||
static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
||||
{
|
||||
struct msm_drm_private *priv = gpu->dev->dev_private;
|
||||
struct msm_ringbuffer *ring = submit->ring;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < submit->nr_cmds; i++) {
|
||||
switch (submit->cmd[i].type) {
|
||||
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
|
||||
/* ignore IB-targets */
|
||||
break;
|
||||
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
|
||||
/* ignore if there has not been a ctx switch: */
|
||||
if (priv->lastctx == submit->queue->ctx)
|
||||
break;
|
||||
fallthrough;
|
||||
case MSM_SUBMIT_CMD_BUF:
|
||||
OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2);
|
||||
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
|
||||
OUT_RING(ring, submit->cmd[i].size);
|
||||
OUT_PKT2(ring);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
|
||||
OUT_RING(ring, submit->seqno);
|
||||
|
||||
/* Flush HLSQ lazy updates to make sure there is nothing
|
||||
* pending for indirect loads after the timestamp has
|
||||
* passed:
|
||||
*/
|
||||
OUT_PKT3(ring, CP_EVENT_WRITE, 1);
|
||||
OUT_RING(ring, HLSQ_FLUSH);
|
||||
|
||||
/* wait for idle before cache flush/interrupt */
|
||||
OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
|
||||
OUT_RING(ring, 0x00000000);
|
||||
|
||||
/* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
|
||||
OUT_PKT3(ring, CP_EVENT_WRITE, 3);
|
||||
OUT_RING(ring, CACHE_FLUSH_TS | BIT(31));
|
||||
OUT_RING(ring, rbmemptr(ring, fence));
|
||||
OUT_RING(ring, submit->seqno);
|
||||
|
||||
#if 0
|
||||
/* Dummy set-constant to trigger context rollover */
|
||||
OUT_PKT3(ring, CP_SET_CONSTANT, 2);
|
||||
OUT_RING(ring, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG));
|
||||
OUT_RING(ring, 0x00000000);
|
||||
#endif
|
||||
|
||||
adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR);
|
||||
}
|
||||
|
||||
static bool a3xx_me_init(struct msm_gpu *gpu)
|
||||
{
|
||||
struct msm_ringbuffer *ring = gpu->rb[0];
|
||||
@@ -51,7 +106,7 @@ static bool a3xx_me_init(struct msm_gpu *gpu)
|
||||
OUT_RING(ring, 0x00000000);
|
||||
OUT_RING(ring, 0x00000000);
|
||||
|
||||
gpu->funcs->flush(gpu, ring);
|
||||
adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR);
|
||||
return a3xx_idle(gpu);
|
||||
}
|
||||
|
||||
@@ -423,16 +478,11 @@ static struct msm_gpu_state *a3xx_gpu_state_get(struct msm_gpu *gpu)
|
||||
return state;
|
||||
}
|
||||
|
||||
/* Register offset defines for A3XX */
|
||||
static const unsigned int a3xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
|
||||
REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_AXXX_CP_RB_RPTR_ADDR),
|
||||
REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_AXXX_CP_RB_RPTR),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_AXXX_CP_RB_WPTR),
|
||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_AXXX_CP_RB_CNTL),
|
||||
};
|
||||
static u32 a3xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
|
||||
{
|
||||
ring->memptrs->rptr = gpu_read(gpu, REG_AXXX_CP_RB_RPTR);
|
||||
return ring->memptrs->rptr;
|
||||
}
|
||||
|
||||
static const struct adreno_gpu_funcs funcs = {
|
||||
.base = {
|
||||
@@ -441,8 +491,7 @@ static const struct adreno_gpu_funcs funcs = {
|
||||
.pm_suspend = msm_gpu_pm_suspend,
|
||||
.pm_resume = msm_gpu_pm_resume,
|
||||
.recover = a3xx_recover,
|
||||
.submit = adreno_submit,
|
||||
.flush = adreno_flush,
|
||||
.submit = a3xx_submit,
|
||||
.active_ring = adreno_active_ring,
|
||||
.irq = a3xx_irq,
|
||||
.destroy = a3xx_destroy,
|
||||
@@ -452,6 +501,7 @@ static const struct adreno_gpu_funcs funcs = {
|
||||
.gpu_state_get = a3xx_gpu_state_get,
|
||||
.gpu_state_put = adreno_gpu_state_put,
|
||||
.create_address_space = adreno_iommu_create_address_space,
|
||||
.get_rptr = a3xx_get_rptr,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -490,7 +540,6 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
|
||||
gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs);
|
||||
|
||||
adreno_gpu->registers = a3xx_registers;
|
||||
adreno_gpu->reg_offsets = a3xx_register_offsets;
|
||||
|
||||
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
|
||||
if (ret)
|
||||
|
||||
Reference in New Issue
Block a user