Commit 6efa4b46 authored by Luben Tuikov's avatar Luben Tuikov Committed by Christian König

gpu/drm: ring_mirror_list --> pending_list

Rename "ring_mirror_list" to "pending_list",
to describe what something is, not what it does,
how it's used, or how the hardware implements it.

This also abstracts the actual hardware
implementation, i.e. how the low-level driver
communicates with the device it drives, ring, CAM,
etc., shouldn't be exposed to DRM.

The pending_list keeps jobs submitted, which are
out of our control. Usually this means they are
pending execution status in hardware, but the
latter definition is a more general (inclusive)
definition.
Signed-off-by: default avatarLuben Tuikov <luben.tuikov@amd.com>
Acked-by: default avatarChristian König <christian.koenig@amd.com>
Link: https://patchwork.freedesktop.org/patch/405573/

Cc: Alexander Deucher <Alexander.Deucher@amd.com>
Cc: Andrey Grodzovsky <Andrey.Grodzovsky@amd.com>
Cc: Christian König <christian.koenig@amd.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
parent 8935ff00
...@@ -1427,7 +1427,7 @@ static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched) ...@@ -1427,7 +1427,7 @@ static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched)
struct dma_fence *fence; struct dma_fence *fence;
spin_lock(&sched->job_list_lock); spin_lock(&sched->job_list_lock);
list_for_each_entry(s_job, &sched->ring_mirror_list, list) { list_for_each_entry(s_job, &sched->pending_list, list) {
fence = sched->ops->run_job(s_job); fence = sched->ops->run_job(s_job);
dma_fence_put(fence); dma_fence_put(fence);
} }
...@@ -1459,7 +1459,7 @@ static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring) ...@@ -1459,7 +1459,7 @@ static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring)
no_preempt: no_preempt:
spin_lock(&sched->job_list_lock); spin_lock(&sched->job_list_lock);
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, list) { list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
if (dma_fence_is_signaled(&s_job->s_fence->finished)) { if (dma_fence_is_signaled(&s_job->s_fence->finished)) {
/* remove job from ring_mirror_list */ /* remove job from ring_mirror_list */
list_del_init(&s_job->list); list_del_init(&s_job->list);
......
...@@ -4127,8 +4127,8 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev) ...@@ -4127,8 +4127,8 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
continue; continue;
spin_lock(&ring->sched.job_list_lock); spin_lock(&ring->sched.job_list_lock);
job = list_first_entry_or_null(&ring->sched.ring_mirror_list, job = list_first_entry_or_null(&ring->sched.pending_list,
struct drm_sched_job, list); struct drm_sched_job, list);
spin_unlock(&ring->sched.job_list_lock); spin_unlock(&ring->sched.job_list_lock);
if (job) if (job)
return true; return true;
......
...@@ -271,7 +271,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched) ...@@ -271,7 +271,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
} }
/* Signal all jobs already scheduled to HW */ /* Signal all jobs already scheduled to HW */
list_for_each_entry(s_job, &sched->ring_mirror_list, list) { list_for_each_entry(s_job, &sched->pending_list, list) {
struct drm_sched_fence *s_fence = s_job->s_fence; struct drm_sched_fence *s_fence = s_job->s_fence;
dma_fence_set_error(&s_fence->finished, -EHWPOISON); dma_fence_set_error(&s_fence->finished, -EHWPOISON);
......
...@@ -198,7 +198,7 @@ EXPORT_SYMBOL(drm_sched_dependency_optimized); ...@@ -198,7 +198,7 @@ EXPORT_SYMBOL(drm_sched_dependency_optimized);
static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched) static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
{ {
if (sched->timeout != MAX_SCHEDULE_TIMEOUT && if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
!list_empty(&sched->ring_mirror_list)) !list_empty(&sched->pending_list))
schedule_delayed_work(&sched->work_tdr, sched->timeout); schedule_delayed_work(&sched->work_tdr, sched->timeout);
} }
...@@ -258,7 +258,7 @@ void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched, ...@@ -258,7 +258,7 @@ void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
{ {
spin_lock(&sched->job_list_lock); spin_lock(&sched->job_list_lock);
if (list_empty(&sched->ring_mirror_list)) if (list_empty(&sched->pending_list))
cancel_delayed_work(&sched->work_tdr); cancel_delayed_work(&sched->work_tdr);
else else
mod_delayed_work(system_wq, &sched->work_tdr, remaining); mod_delayed_work(system_wq, &sched->work_tdr, remaining);
...@@ -272,7 +272,7 @@ static void drm_sched_job_begin(struct drm_sched_job *s_job) ...@@ -272,7 +272,7 @@ static void drm_sched_job_begin(struct drm_sched_job *s_job)
struct drm_gpu_scheduler *sched = s_job->sched; struct drm_gpu_scheduler *sched = s_job->sched;
spin_lock(&sched->job_list_lock); spin_lock(&sched->job_list_lock);
list_add_tail(&s_job->list, &sched->ring_mirror_list); list_add_tail(&s_job->list, &sched->pending_list);
drm_sched_start_timeout(sched); drm_sched_start_timeout(sched);
spin_unlock(&sched->job_list_lock); spin_unlock(&sched->job_list_lock);
} }
...@@ -286,7 +286,7 @@ static void drm_sched_job_timedout(struct work_struct *work) ...@@ -286,7 +286,7 @@ static void drm_sched_job_timedout(struct work_struct *work)
/* Protects against concurrent deletion in drm_sched_get_cleanup_job */ /* Protects against concurrent deletion in drm_sched_get_cleanup_job */
spin_lock(&sched->job_list_lock); spin_lock(&sched->job_list_lock);
job = list_first_entry_or_null(&sched->ring_mirror_list, job = list_first_entry_or_null(&sched->pending_list,
struct drm_sched_job, list); struct drm_sched_job, list);
if (job) { if (job) {
...@@ -371,7 +371,7 @@ EXPORT_SYMBOL(drm_sched_increase_karma); ...@@ -371,7 +371,7 @@ EXPORT_SYMBOL(drm_sched_increase_karma);
* Stop the scheduler and also removes and frees all completed jobs. * Stop the scheduler and also removes and frees all completed jobs.
* Note: bad job will not be freed as it might be used later and so it's * Note: bad job will not be freed as it might be used later and so it's
* callers responsibility to release it manually if it's not part of the * callers responsibility to release it manually if it's not part of the
* mirror list any more. * pending list any more.
* *
*/ */
void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
...@@ -392,15 +392,15 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) ...@@ -392,15 +392,15 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
* Add at the head of the queue to reflect it was the earliest * Add at the head of the queue to reflect it was the earliest
* job extracted. * job extracted.
*/ */
list_add(&bad->list, &sched->ring_mirror_list); list_add(&bad->list, &sched->pending_list);
/* /*
* Iterate the job list from later to earlier one and either deactive * Iterate the job list from later to earlier one and either deactive
* their HW callbacks or remove them from mirror list if they already * their HW callbacks or remove them from pending list if they already
* signaled. * signaled.
* This iteration is thread safe as sched thread is stopped. * This iteration is thread safe as sched thread is stopped.
*/ */
list_for_each_entry_safe_reverse(s_job, tmp, &sched->ring_mirror_list, list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list,
list) { list) {
if (s_job->s_fence->parent && if (s_job->s_fence->parent &&
dma_fence_remove_callback(s_job->s_fence->parent, dma_fence_remove_callback(s_job->s_fence->parent,
...@@ -408,7 +408,7 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) ...@@ -408,7 +408,7 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
atomic_dec(&sched->hw_rq_count); atomic_dec(&sched->hw_rq_count);
} else { } else {
/* /*
* remove job from ring_mirror_list. * remove job from pending_list.
* Locking here is for concurrent resume timeout * Locking here is for concurrent resume timeout
*/ */
spin_lock(&sched->job_list_lock); spin_lock(&sched->job_list_lock);
...@@ -463,7 +463,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery) ...@@ -463,7 +463,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
* so no new jobs are being inserted or removed. Also concurrent * so no new jobs are being inserted or removed. Also concurrent
* GPU recovers can't run in parallel. * GPU recovers can't run in parallel.
*/ */
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, list) { list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
struct dma_fence *fence = s_job->s_fence->parent; struct dma_fence *fence = s_job->s_fence->parent;
atomic_inc(&sched->hw_rq_count); atomic_inc(&sched->hw_rq_count);
...@@ -494,7 +494,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery) ...@@ -494,7 +494,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
EXPORT_SYMBOL(drm_sched_start); EXPORT_SYMBOL(drm_sched_start);
/** /**
* drm_sched_resubmit_jobs - helper to relunch job from mirror ring list * drm_sched_resubmit_jobs - helper to relunch job from pending ring list
* *
* @sched: scheduler instance * @sched: scheduler instance
* *
...@@ -506,7 +506,7 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched) ...@@ -506,7 +506,7 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
bool found_guilty = false; bool found_guilty = false;
struct dma_fence *fence; struct dma_fence *fence;
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, list) { list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
struct drm_sched_fence *s_fence = s_job->s_fence; struct drm_sched_fence *s_fence = s_job->s_fence;
if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) { if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
...@@ -665,7 +665,7 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb) ...@@ -665,7 +665,7 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
* *
* @sched: scheduler instance * @sched: scheduler instance
* *
* Returns the next finished job from the mirror list (if there is one) * Returns the next finished job from the pending list (if there is one)
* ready for it to be destroyed. * ready for it to be destroyed.
*/ */
static struct drm_sched_job * static struct drm_sched_job *
...@@ -675,7 +675,7 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched) ...@@ -675,7 +675,7 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
/* /*
* Don't destroy jobs while the timeout worker is running OR thread * Don't destroy jobs while the timeout worker is running OR thread
* is being parked and hence assumed to not touch ring_mirror_list * is being parked and hence assumed to not touch pending_list
*/ */
if ((sched->timeout != MAX_SCHEDULE_TIMEOUT && if ((sched->timeout != MAX_SCHEDULE_TIMEOUT &&
!cancel_delayed_work(&sched->work_tdr)) || !cancel_delayed_work(&sched->work_tdr)) ||
...@@ -684,11 +684,11 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched) ...@@ -684,11 +684,11 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
spin_lock(&sched->job_list_lock); spin_lock(&sched->job_list_lock);
job = list_first_entry_or_null(&sched->ring_mirror_list, job = list_first_entry_or_null(&sched->pending_list,
struct drm_sched_job, list); struct drm_sched_job, list);
if (job && dma_fence_is_signaled(&job->s_fence->finished)) { if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
/* remove job from ring_mirror_list */ /* remove job from pending_list */
list_del_init(&job->list); list_del_init(&job->list);
} else { } else {
job = NULL; job = NULL;
...@@ -858,7 +858,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, ...@@ -858,7 +858,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
init_waitqueue_head(&sched->wake_up_worker); init_waitqueue_head(&sched->wake_up_worker);
init_waitqueue_head(&sched->job_scheduled); init_waitqueue_head(&sched->job_scheduled);
INIT_LIST_HEAD(&sched->ring_mirror_list); INIT_LIST_HEAD(&sched->pending_list);
spin_lock_init(&sched->job_list_lock); spin_lock_init(&sched->job_list_lock);
atomic_set(&sched->hw_rq_count, 0); atomic_set(&sched->hw_rq_count, 0);
INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout); INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
......
...@@ -174,7 +174,7 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f); ...@@ -174,7 +174,7 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
* @sched: the scheduler instance on which this job is scheduled. * @sched: the scheduler instance on which this job is scheduled.
* @s_fence: contains the fences for the scheduling of job. * @s_fence: contains the fences for the scheduling of job.
* @finish_cb: the callback for the finished fence. * @finish_cb: the callback for the finished fence.
* @node: used to append this struct to the @drm_gpu_scheduler.ring_mirror_list. * @node: used to append this struct to the @drm_gpu_scheduler.pending_list.
* @id: a unique id assigned to each job scheduled on the scheduler. * @id: a unique id assigned to each job scheduled on the scheduler.
* @karma: increment on every hang caused by this job. If this exceeds the hang * @karma: increment on every hang caused by this job. If this exceeds the hang
* limit of the scheduler then the job is marked guilty and will not * limit of the scheduler then the job is marked guilty and will not
...@@ -203,7 +203,7 @@ struct drm_sched_job { ...@@ -203,7 +203,7 @@ struct drm_sched_job {
static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job, static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
int threshold) int threshold)
{ {
return (s_job && atomic_inc_return(&s_job->karma) > threshold); return s_job && atomic_inc_return(&s_job->karma) > threshold;
} }
/** /**
...@@ -260,8 +260,8 @@ struct drm_sched_backend_ops { ...@@ -260,8 +260,8 @@ struct drm_sched_backend_ops {
* @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the
* timeout interval is over. * timeout interval is over.
* @thread: the kthread on which the scheduler which run. * @thread: the kthread on which the scheduler which run.
* @ring_mirror_list: the list of jobs which are currently in the job queue. * @pending_list: the list of jobs which are currently in the job queue.
* @job_list_lock: lock to protect the ring_mirror_list. * @job_list_lock: lock to protect the pending_list.
* @hang_limit: once the hangs by a job crosses this limit then it is marked * @hang_limit: once the hangs by a job crosses this limit then it is marked
* guilty and it will be considered for scheduling further. * guilty and it will be considered for scheduling further.
* @score: score to help loadbalancer pick a idle sched * @score: score to help loadbalancer pick a idle sched
...@@ -282,7 +282,7 @@ struct drm_gpu_scheduler { ...@@ -282,7 +282,7 @@ struct drm_gpu_scheduler {
atomic64_t job_id_count; atomic64_t job_id_count;
struct delayed_work work_tdr; struct delayed_work work_tdr;
struct task_struct *thread; struct task_struct *thread;
struct list_head ring_mirror_list; struct list_head pending_list;
spinlock_t job_list_lock; spinlock_t job_list_lock;
int hang_limit; int hang_limit;
atomic_t score; atomic_t score;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment