Commit 07bfe6bf authored by Chris Wilson's avatar Chris Wilson

drm/i915/execlists: Convert recursive defer_request() into iterative

As this engine owns the lock around rq->sched.link (for those waiters
submitted to this engine), we can use that link as an element in a local
list. We can thus replace the recursive algorithm with an iterative walk
over the ordered list of waiters.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190625130128.11009-1-chris@chris-wilson.co.uk
parent 9a6a6440
...@@ -833,10 +833,9 @@ last_active(const struct intel_engine_execlists *execlists) ...@@ -833,10 +833,9 @@ last_active(const struct intel_engine_execlists *execlists)
return *last; return *last;
} }
static void static void defer_request(struct i915_request *rq, struct list_head * const pl)
defer_request(struct i915_request * const rq, struct list_head * const pl)
{ {
struct i915_dependency *p; LIST_HEAD(list);
/* /*
* We want to move the interrupted request to the back of * We want to move the interrupted request to the back of
...@@ -845,34 +844,37 @@ defer_request(struct i915_request * const rq, struct list_head * const pl) ...@@ -845,34 +844,37 @@ defer_request(struct i915_request * const rq, struct list_head * const pl)
* flight and were waiting for the interrupted request to * flight and were waiting for the interrupted request to
* be run after it again. * be run after it again.
*/ */
list_move_tail(&rq->sched.link, pl); do {
struct i915_dependency *p;
list_for_each_entry(p, &rq->sched.waiters_list, wait_link) { GEM_BUG_ON(i915_request_is_active(rq));
struct i915_request *w = list_move_tail(&rq->sched.link, pl);
container_of(p->waiter, typeof(*w), sched);
/* Leave semaphores spinning on the other engines */ list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
if (w->engine != rq->engine) struct i915_request *w =
continue; container_of(p->waiter, typeof(*w), sched);
/* No waiter should start before the active request completed */ /* Leave semaphores spinning on the other engines */
GEM_BUG_ON(i915_request_started(w)); if (w->engine != rq->engine)
continue;
GEM_BUG_ON(rq_prio(w) > rq_prio(rq)); /* No waiter should start before its signaler */
if (rq_prio(w) < rq_prio(rq)) GEM_BUG_ON(i915_request_started(w) &&
continue; !i915_request_completed(rq));
if (list_empty(&w->sched.link)) GEM_BUG_ON(i915_request_is_active(w));
continue; /* Not yet submitted; unready */ if (list_empty(&w->sched.link))
continue; /* Not yet submitted; unready */
/* if (rq_prio(w) < rq_prio(rq))
* This should be very shallow as it is limited by the continue;
* number of requests that can fit in a ring (<64) and
* the number of contexts that can be in flight on this GEM_BUG_ON(rq_prio(w) > rq_prio(rq));
* engine. list_move_tail(&w->sched.link, &list);
*/ }
defer_request(w, pl);
} rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
} while (rq);
} }
static void defer_active(struct intel_engine_cs *engine) static void defer_active(struct intel_engine_cs *engine)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment