drm/amd/amdgpu embed hw_fence into amdgpu_job

Why: Previously hw fence is alloced separately with job.
It caused historical lifetime issues and corner cases.
The ideal situation is to take fence to manage both job
and fence's lifetime, and simplify the design of gpu-scheduler.

How:
We propose to embed hw_fence into amdgpu_job.
1. We cover the normal job submission by this method.
2. For ib_test, and submit without a parent job keep the
legacy way to create a hw fence separately.
v2:
use AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT to show that the fence is
embedded in a job.
v3:
remove redundant variable ring in amdgpu_job
v4:
add tdr sequence support for this feature. Add a job_run_counter to
indicate whether this job is a resubmit job.
v5
add missing handling in amdgpu_fence_enable_signaling

Signed-off-by: Jingwen Chen <Jingwen.Chen2@amd.com>
Signed-off-by: Jack Zhang <Jack.Zhang7@hotmail.com>
Reviewed-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Reviewed by: Monk Liu <monk.liu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Jack Zhang
2021-05-12 15:06:35 +08:00
committed by Alex Deucher
parent 554594567b
commit c530b02f39
9 changed files with 119 additions and 37 deletions

View File

@@ -127,11 +127,16 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
{
struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
struct dma_fence *f;
struct dma_fence *hw_fence;
unsigned i;
/* use sched fence if available */
f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
if (job->hw_fence.ops == NULL)
hw_fence = job->external_hw_fence;
else
hw_fence = &job->hw_fence;
/* use sched fence if available */
f = job->base.s_fence ? &job->base.s_fence->finished : hw_fence;
for (i = 0; i < job->num_ibs; ++i)
amdgpu_ib_free(ring->adev, &job->ibs[i], f);
}
@@ -142,20 +147,27 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
drm_sched_job_cleanup(s_job);
dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
amdgpu_sync_free(&job->sched_sync);
kfree(job);
/* only put the hw fence if has embedded fence */
if (job->hw_fence.ops != NULL)
dma_fence_put(&job->hw_fence);
else
kfree(job);
}
void amdgpu_job_free(struct amdgpu_job *job)
{
amdgpu_job_free_resources(job);
dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
amdgpu_sync_free(&job->sched_sync);
kfree(job);
/* only put the hw fence if has embedded fence */
if (job->hw_fence.ops != NULL)
dma_fence_put(&job->hw_fence);
else
kfree(job);
}
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
@@ -184,11 +196,14 @@ int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
job->base.sched = &ring->sched;
r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, NULL, fence);
job->fence = dma_fence_get(*fence);
/* record external_hw_fence for direct submit */
job->external_hw_fence = dma_fence_get(*fence);
if (r)
return r;
amdgpu_job_free(job);
dma_fence_put(*fence);
return 0;
}
@@ -246,10 +261,12 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
if (r)
DRM_ERROR("Error scheduling IBs (%d)\n", r);
}
/* if gpu reset, hw fence will be replaced here */
dma_fence_put(job->fence);
job->fence = dma_fence_get(fence);
if (!job->job_run_counter)
dma_fence_get(fence);
else if (finished->error < 0)
dma_fence_put(&job->hw_fence);
job->job_run_counter++;
amdgpu_job_free_resources(job);
fence = r ? ERR_PTR(r) : fence;