Commit c0fa92ec authored by Chris Wilson's avatar Chris Wilson Committed by Rodrigo Vivi

drm/i915: Protect request peeking with RCU

Since the execlists_active() is no longer protected by the
engine->active.lock, we need to protect the request pointer with RCU to
prevent it being freed as we evaluate whether or not we need to preempt.

Fixes: df403069 ("drm/i915/execlists: Lift process_csb() out of the irq-off spinlock")
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191104090158.2959-2-chris@chris-wilson.co.uk
(cherry picked from commit 7d148635)
Signed-off-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
(cherry picked from commit 8eb4704b)
(cherry picked from commit 7e27238e149ce4f00d9cd801fe3aa0ea55e986a2)
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 2d691aec
......@@ -177,9 +177,37 @@ static inline int rq_prio(const struct i915_request *rq)
return rq->sched.attr.priority | __NO_PREEMPTION;
}
static void kick_submission(struct intel_engine_cs *engine, int prio)
static inline bool need_preempt(int prio, int active)
{
const struct i915_request *inflight = *engine->execlists.active;
/*
* Allow preemption of low -> normal -> high, but we do
* not allow low priority tasks to preempt other low priority
* tasks under the impression that latency for low priority
* tasks does not matter (as much as background throughput),
* so kiss.
*/
return prio >= max(I915_PRIORITY_NORMAL, active);
}
static void kick_submission(struct intel_engine_cs *engine,
const struct i915_request *rq,
int prio)
{
const struct i915_request *inflight;
/*
* We only need to kick the tasklet once for the high priority
* new context we add into the queue.
*/
if (prio <= engine->execlists.queue_priority_hint)
return;
rcu_read_lock();
/* Nothing currently active? We're overdue for a submission! */
inflight = execlists_active(&engine->execlists);
if (!inflight)
goto unlock;
/*
* If we are already the currently executing context, don't
......@@ -188,10 +216,15 @@ static void kick_submission(struct intel_engine_cs *engine, int prio)
* tasklet, i.e. we have not change the priority queue
* sufficiently to oust the running context.
*/
if (!inflight || !i915_scheduler_need_preempt(prio, rq_prio(inflight)))
return;
if (inflight->hw_context == rq->hw_context)
goto unlock;
engine->execlists.queue_priority_hint = prio;
if (need_preempt(prio, rq_prio(inflight)))
tasklet_hi_schedule(&engine->execlists.tasklet);
unlock:
rcu_read_unlock();
}
static void __i915_schedule(struct i915_sched_node *node,
......@@ -317,13 +350,8 @@ static void __i915_schedule(struct i915_sched_node *node,
list_move_tail(&node->link, cache.priolist);
}
if (prio <= engine->execlists.queue_priority_hint)
continue;
engine->execlists.queue_priority_hint = prio;
/* Defer (tasklet) submission until after all of our updates. */
kick_submission(engine, prio);
kick_submission(engine, node_to_request(node), prio);
}
spin_unlock(&engine->active.lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment