Commit a1917b73 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: remove job->adev (v2)

We can get that from the ring.

v2: squash in "drm/amdgpu: always initialize job->base.sched" (Alex)
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarJunwei Zhang <Jerry.Zhang@amd.com>
Acked-by: default avatarChunming Zhou <david1.zhou@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent ee913fd9
...@@ -37,7 +37,7 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job) ...@@ -37,7 +37,7 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
job->base.sched->name, atomic_read(&ring->fence_drv.last_seq), job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
ring->fence_drv.sync_seq); ring->fence_drv.sync_seq);
amdgpu_device_gpu_recover(job->adev, job, false); amdgpu_device_gpu_recover(ring->adev, job, false);
} }
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
...@@ -54,7 +54,11 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, ...@@ -54,7 +54,11 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
if (!*job) if (!*job)
return -ENOMEM; return -ENOMEM;
(*job)->adev = adev; /*
* Initialize the scheduler to at least some ring so that we always
* have a pointer to adev.
*/
(*job)->base.sched = &adev->rings[0]->sched;
(*job)->vm = vm; (*job)->vm = vm;
(*job)->ibs = (void *)&(*job)[1]; (*job)->ibs = (void *)&(*job)[1];
(*job)->num_ibs = num_ibs; (*job)->num_ibs = num_ibs;
...@@ -86,6 +90,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, ...@@ -86,6 +90,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
void amdgpu_job_free_resources(struct amdgpu_job *job) void amdgpu_job_free_resources(struct amdgpu_job *job)
{ {
struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
struct dma_fence *f; struct dma_fence *f;
unsigned i; unsigned i;
...@@ -93,7 +98,7 @@ void amdgpu_job_free_resources(struct amdgpu_job *job) ...@@ -93,7 +98,7 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
f = job->base.s_fence ? &job->base.s_fence->finished : job->fence; f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
for (i = 0; i < job->num_ibs; ++i) for (i = 0; i < job->num_ibs; ++i)
amdgpu_ib_free(job->adev, &job->ibs[i], f); amdgpu_ib_free(ring->adev, &job->ibs[i], f);
} }
static void amdgpu_job_free_cb(struct drm_sched_job *s_job) static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
...@@ -167,7 +172,8 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job, ...@@ -167,7 +172,8 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
if (fence && explicit) { if (fence && explicit) {
if (drm_sched_dependency_optimized(fence, s_entity)) { if (drm_sched_dependency_optimized(fence, s_entity)) {
r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence, false); r = amdgpu_sync_fence(ring->adev, &job->sched_sync,
fence, false);
if (r) if (r)
DRM_ERROR("Error adding fence to sync (%d)\n", r); DRM_ERROR("Error adding fence to sync (%d)\n", r);
} }
...@@ -190,7 +196,6 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job) ...@@ -190,7 +196,6 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
{ {
struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched); struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
struct dma_fence *fence = NULL, *finished; struct dma_fence *fence = NULL, *finished;
struct amdgpu_device *adev;
struct amdgpu_job *job; struct amdgpu_job *job;
int r; int r;
...@@ -200,13 +205,12 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job) ...@@ -200,13 +205,12 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
} }
job = to_amdgpu_job(sched_job); job = to_amdgpu_job(sched_job);
finished = &job->base.s_fence->finished; finished = &job->base.s_fence->finished;
adev = job->adev;
BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL)); BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
trace_amdgpu_sched_run_job(job); trace_amdgpu_sched_run_job(job);
if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter)) if (job->vram_lost_counter != atomic_read(&ring->adev->vram_lost_counter))
dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */ dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */
if (finished->error < 0) { if (finished->error < 0) {
......
...@@ -37,7 +37,6 @@ struct amdgpu_fence; ...@@ -37,7 +37,6 @@ struct amdgpu_fence;
struct amdgpu_job { struct amdgpu_job {
struct drm_sched_job base; struct drm_sched_job base;
struct amdgpu_device *adev;
struct amdgpu_vm *vm; struct amdgpu_vm *vm;
struct amdgpu_sync sync; struct amdgpu_sync sync;
struct amdgpu_sync sched_sync; struct amdgpu_sync sched_sync;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment