Commit 52c76fb1 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Pass i915_sched_node around internally

To simplify the next patch, update bump_priority and schedule to accept
the internal i915_sched_ndoe directly and not expect a request pointer.

add/remove: 0/0 grow/shrink: 2/1 up/down: 8/-15 (-7)
Function                                     old     new   delta
i915_schedule_bump_priority                  109     113      +4
i915_schedule                                 50      54      +4
__i915_schedule                              922     907     -15

v2: Adopt node for the old rq local, since it no longer is a request but
the origin node.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190513120102.29660-2-chris@chris-wilson.co.uk
parent 5ae87063
...@@ -186,7 +186,7 @@ static void kick_submission(struct intel_engine_cs *engine, int prio) ...@@ -186,7 +186,7 @@ static void kick_submission(struct intel_engine_cs *engine, int prio)
tasklet_hi_schedule(&engine->execlists.tasklet); tasklet_hi_schedule(&engine->execlists.tasklet);
} }
static void __i915_schedule(struct i915_request *rq, static void __i915_schedule(struct i915_sched_node *node,
const struct i915_sched_attr *attr) const struct i915_sched_attr *attr)
{ {
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
...@@ -200,13 +200,13 @@ static void __i915_schedule(struct i915_request *rq, ...@@ -200,13 +200,13 @@ static void __i915_schedule(struct i915_request *rq,
lockdep_assert_held(&schedule_lock); lockdep_assert_held(&schedule_lock);
GEM_BUG_ON(prio == I915_PRIORITY_INVALID); GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
if (i915_request_completed(rq)) if (node_signaled(node))
return; return;
if (prio <= READ_ONCE(rq->sched.attr.priority)) if (prio <= READ_ONCE(node->attr.priority))
return; return;
stack.signaler = &rq->sched; stack.signaler = node;
list_add(&stack.dfs_link, &dfs); list_add(&stack.dfs_link, &dfs);
/* /*
...@@ -257,9 +257,9 @@ static void __i915_schedule(struct i915_request *rq, ...@@ -257,9 +257,9 @@ static void __i915_schedule(struct i915_request *rq,
* execlists_submit_request()), we can set our own priority and skip * execlists_submit_request()), we can set our own priority and skip
* acquiring the engine locks. * acquiring the engine locks.
*/ */
if (rq->sched.attr.priority == I915_PRIORITY_INVALID) { if (node->attr.priority == I915_PRIORITY_INVALID) {
GEM_BUG_ON(!list_empty(&rq->sched.link)); GEM_BUG_ON(!list_empty(&node->link));
rq->sched.attr = *attr; node->attr = *attr;
if (stack.dfs_link.next == stack.dfs_link.prev) if (stack.dfs_link.next == stack.dfs_link.prev)
return; return;
...@@ -268,15 +268,14 @@ static void __i915_schedule(struct i915_request *rq, ...@@ -268,15 +268,14 @@ static void __i915_schedule(struct i915_request *rq,
} }
memset(&cache, 0, sizeof(cache)); memset(&cache, 0, sizeof(cache));
engine = rq->engine; engine = node_to_request(node)->engine;
spin_lock(&engine->timeline.lock); spin_lock(&engine->timeline.lock);
/* Fifo and depth-first replacement ensure our deps execute before us */ /* Fifo and depth-first replacement ensure our deps execute before us */
list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) { list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
struct i915_sched_node *node = dep->signaler;
INIT_LIST_HEAD(&dep->dfs_link); INIT_LIST_HEAD(&dep->dfs_link);
node = dep->signaler;
engine = sched_lock_engine(node, engine, &cache); engine = sched_lock_engine(node, engine, &cache);
lockdep_assert_held(&engine->timeline.lock); lockdep_assert_held(&engine->timeline.lock);
...@@ -319,13 +318,20 @@ static void __i915_schedule(struct i915_request *rq, ...@@ -319,13 +318,20 @@ static void __i915_schedule(struct i915_request *rq,
void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr) void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
{ {
spin_lock_irq(&schedule_lock); spin_lock_irq(&schedule_lock);
__i915_schedule(rq, attr); __i915_schedule(&rq->sched, attr);
spin_unlock_irq(&schedule_lock); spin_unlock_irq(&schedule_lock);
} }
static void __bump_priority(struct i915_sched_node *node, unsigned int bump)
{
struct i915_sched_attr attr = node->attr;
attr.priority |= bump;
__i915_schedule(node, &attr);
}
void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump) void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
{ {
struct i915_sched_attr attr;
unsigned long flags; unsigned long flags;
GEM_BUG_ON(bump & ~I915_PRIORITY_MASK); GEM_BUG_ON(bump & ~I915_PRIORITY_MASK);
...@@ -334,11 +340,7 @@ void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump) ...@@ -334,11 +340,7 @@ void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
return; return;
spin_lock_irqsave(&schedule_lock, flags); spin_lock_irqsave(&schedule_lock, flags);
__bump_priority(&rq->sched, bump);
attr = rq->sched.attr;
attr.priority |= bump;
__i915_schedule(rq, &attr);
spin_unlock_irqrestore(&schedule_lock, flags); spin_unlock_irqrestore(&schedule_lock, flags);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment