Commit 0c7112a0 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Rename priotree to sched

Having moved the priotree struct into i915_scheduler.h, identify it as
the scheduling element and rebrand into i915_sched. This becomes more
useful as we start attaching more information we require to propagate
through the scheduler.

v2: Use i915_sched_node for future distinctiveness
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180418184052.7129-2-chris@chris-wilson.co.uk
parent 98ff5c78
......@@ -1278,7 +1278,7 @@ static void record_request(struct i915_request *request,
struct drm_i915_error_request *erq)
{
erq->context = request->ctx->hw_id;
erq->priority = request->priotree.priority;
erq->priority = request->sched.priority;
erq->ban_score = atomic_read(&request->ctx->ban_score);
erq->seqno = request->global_seqno;
erq->jiffies = request->emitted_jiffies;
......
......@@ -125,22 +125,22 @@ i915_dependency_free(struct drm_i915_private *i915,
}
static void
__i915_priotree_add_dependency(struct i915_priotree *pt,
struct i915_priotree *signal,
__i915_sched_node_add_dependency(struct i915_sched_node *node,
struct i915_sched_node *signal,
struct i915_dependency *dep,
unsigned long flags)
{
INIT_LIST_HEAD(&dep->dfs_link);
list_add(&dep->wait_link, &signal->waiters_list);
list_add(&dep->signal_link, &pt->signalers_list);
list_add(&dep->signal_link, &node->signalers_list);
dep->signaler = signal;
dep->flags = flags;
}
static int
i915_priotree_add_dependency(struct drm_i915_private *i915,
struct i915_priotree *pt,
struct i915_priotree *signal)
i915_sched_node_add_dependency(struct drm_i915_private *i915,
struct i915_sched_node *node,
struct i915_sched_node *signal)
{
struct i915_dependency *dep;
......@@ -148,16 +148,18 @@ i915_priotree_add_dependency(struct drm_i915_private *i915,
if (!dep)
return -ENOMEM;
__i915_priotree_add_dependency(pt, signal, dep, I915_DEPENDENCY_ALLOC);
__i915_sched_node_add_dependency(node, signal, dep,
I915_DEPENDENCY_ALLOC);
return 0;
}
static void
i915_priotree_fini(struct drm_i915_private *i915, struct i915_priotree *pt)
i915_sched_node_fini(struct drm_i915_private *i915,
struct i915_sched_node *node)
{
struct i915_dependency *dep, *next;
struct i915_dependency *dep, *tmp;
GEM_BUG_ON(!list_empty(&pt->link));
GEM_BUG_ON(!list_empty(&node->link));
/*
* Everyone we depended upon (the fences we wait to be signaled)
......@@ -165,8 +167,8 @@ i915_priotree_fini(struct drm_i915_private *i915, struct i915_priotree *pt)
* However, retirement is run independently on each timeline and
* so we may be called out-of-order.
*/
list_for_each_entry_safe(dep, next, &pt->signalers_list, signal_link) {
GEM_BUG_ON(!i915_priotree_signaled(dep->signaler));
list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
GEM_BUG_ON(!i915_sched_node_signaled(dep->signaler));
GEM_BUG_ON(!list_empty(&dep->dfs_link));
list_del(&dep->wait_link);
......@@ -175,8 +177,8 @@ i915_priotree_fini(struct drm_i915_private *i915, struct i915_priotree *pt)
}
/* Remove ourselves from everyone who depends upon us */
list_for_each_entry_safe(dep, next, &pt->waiters_list, wait_link) {
GEM_BUG_ON(dep->signaler != pt);
list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
GEM_BUG_ON(dep->signaler != node);
GEM_BUG_ON(!list_empty(&dep->dfs_link));
list_del(&dep->signal_link);
......@@ -186,12 +188,12 @@ i915_priotree_fini(struct drm_i915_private *i915, struct i915_priotree *pt)
}
static void
i915_priotree_init(struct i915_priotree *pt)
i915_sched_node_init(struct i915_sched_node *node)
{
INIT_LIST_HEAD(&pt->signalers_list);
INIT_LIST_HEAD(&pt->waiters_list);
INIT_LIST_HEAD(&pt->link);
pt->priority = I915_PRIORITY_INVALID;
INIT_LIST_HEAD(&node->signalers_list);
INIT_LIST_HEAD(&node->waiters_list);
INIT_LIST_HEAD(&node->link);
node->priority = I915_PRIORITY_INVALID;
}
static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
......@@ -422,7 +424,7 @@ static void i915_request_retire(struct i915_request *request)
}
spin_unlock_irq(&request->lock);
i915_priotree_fini(request->i915, &request->priotree);
i915_sched_node_fini(request->i915, &request->sched);
i915_request_put(request);
}
......@@ -725,7 +727,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify);
init_waitqueue_head(&rq->execute);
i915_priotree_init(&rq->priotree);
i915_sched_node_init(&rq->sched);
INIT_LIST_HEAD(&rq->active_list);
rq->i915 = i915;
......@@ -777,8 +779,8 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
/* Make sure we didn't add ourselves to external state before freeing */
GEM_BUG_ON(!list_empty(&rq->active_list));
GEM_BUG_ON(!list_empty(&rq->priotree.signalers_list));
GEM_BUG_ON(!list_empty(&rq->priotree.waiters_list));
GEM_BUG_ON(!list_empty(&rq->sched.signalers_list));
GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
kmem_cache_free(i915->requests, rq);
err_unreserve:
......@@ -800,9 +802,9 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
return 0;
if (to->engine->schedule) {
ret = i915_priotree_add_dependency(to->i915,
&to->priotree,
&from->priotree);
ret = i915_sched_node_add_dependency(to->i915,
&to->sched,
&from->sched);
if (ret < 0)
return ret;
}
......@@ -1033,8 +1035,8 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
&request->submitq);
if (engine->schedule)
__i915_priotree_add_dependency(&request->priotree,
&prev->priotree,
__i915_sched_node_add_dependency(&request->sched,
&prev->sched,
&request->dep,
0);
}
......
......@@ -117,7 +117,7 @@ struct i915_request {
* to retirement), i.e. bidirectional dependency information for the
* request not tied to individual fences.
*/
struct i915_priotree priotree;
struct i915_sched_node sched;
struct i915_dependency dep;
/**
......@@ -306,10 +306,10 @@ static inline bool i915_request_started(const struct i915_request *rq)
seqno - 1);
}
static inline bool i915_priotree_signaled(const struct i915_priotree *pt)
static inline bool i915_sched_node_signaled(const struct i915_sched_node *node)
{
const struct i915_request *rq =
container_of(pt, const struct i915_request, priotree);
container_of(node, const struct i915_request, sched);
return i915_request_completed(rq);
}
......
......@@ -38,7 +38,7 @@ enum {
* is ready, and are able to reorder its portion of the graph to accommodate
* dynamic priority changes.
*/
struct i915_priotree {
struct i915_sched_node {
struct list_head signalers_list; /* those before us, we depend upon */
struct list_head waiters_list; /* those after us, they depend upon us */
struct list_head link;
......@@ -46,7 +46,7 @@ struct i915_priotree {
};
struct i915_dependency {
struct i915_priotree *signaler;
struct i915_sched_node *signaler;
struct list_head signal_link;
struct list_head wait_link;
struct list_head dfs_link;
......
......@@ -1123,7 +1123,7 @@ static void print_request(struct drm_printer *m,
rq->global_seqno,
i915_request_completed(rq) ? "!" : "",
rq->fence.context, rq->fence.seqno,
rq->priotree.priority,
rq->sched.priority,
jiffies_to_msecs(jiffies - rq->emitted_jiffies),
name);
}
......@@ -1367,7 +1367,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
struct i915_priolist *p =
rb_entry(rb, typeof(*p), node);
list_for_each_entry(rq, &p->requests, priotree.link)
list_for_each_entry(rq, &p->requests, sched.link)
print_request(m, rq, "\t\tQ ");
}
spin_unlock_irq(&engine->timeline->lock);
......
......@@ -659,7 +659,7 @@ static void port_assign(struct execlist_port *port, struct i915_request *rq)
static inline int rq_prio(const struct i915_request *rq)
{
return rq->priotree.priority;
return rq->sched.priority;
}
static inline int port_prio(const struct execlist_port *port)
......@@ -706,11 +706,11 @@ static void guc_dequeue(struct intel_engine_cs *engine)
struct i915_priolist *p = to_priolist(rb);
struct i915_request *rq, *rn;
list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
if (last && rq->ctx != last->ctx) {
if (port == last_port) {
__list_del_many(&p->requests,
&rq->priotree.link);
&rq->sched.link);
goto done;
}
......@@ -719,7 +719,7 @@ static void guc_dequeue(struct intel_engine_cs *engine)
port++;
}
INIT_LIST_HEAD(&rq->priotree.link);
INIT_LIST_HEAD(&rq->sched.link);
__i915_request_submit(rq);
trace_i915_request_in(rq, port_index(port, execlists));
......
......@@ -177,7 +177,7 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb)
static inline int rq_prio(const struct i915_request *rq)
{
return rq->priotree.priority;
return rq->sched.priority;
}
static inline bool need_preempt(const struct intel_engine_cs *engine,
......@@ -258,7 +258,7 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
static struct i915_priolist *
lookup_priolist(struct intel_engine_cs *engine,
struct i915_priotree *pt,
struct i915_sched_node *node,
int prio)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
......@@ -344,10 +344,10 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
if (rq_prio(rq) != last_prio) {
last_prio = rq_prio(rq);
p = lookup_priolist(engine, &rq->priotree, last_prio);
p = lookup_priolist(engine, &rq->sched, last_prio);
}
list_add(&rq->priotree.link, &p->requests);
list_add(&rq->sched.link, &p->requests);
}
}
......@@ -654,7 +654,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
struct i915_priolist *p = to_priolist(rb);
struct i915_request *rq, *rn;
list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
/*
* Can we combine this request with the current port?
* It has to be the same context/ringbuffer and not
......@@ -674,7 +674,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
*/
if (port == last_port) {
__list_del_many(&p->requests,
&rq->priotree.link);
&rq->sched.link);
goto done;
}
......@@ -688,7 +688,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
if (ctx_single_port_submission(last->ctx) ||
ctx_single_port_submission(rq->ctx)) {
__list_del_many(&p->requests,
&rq->priotree.link);
&rq->sched.link);
goto done;
}
......@@ -701,7 +701,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
GEM_BUG_ON(port_isset(port));
}
INIT_LIST_HEAD(&rq->priotree.link);
INIT_LIST_HEAD(&rq->sched.link);
__i915_request_submit(rq);
trace_i915_request_in(rq, port_index(port, execlists));
last = rq;
......@@ -882,8 +882,8 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
while (rb) {
struct i915_priolist *p = to_priolist(rb);
list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
INIT_LIST_HEAD(&rq->priotree.link);
list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
INIT_LIST_HEAD(&rq->sched.link);
dma_fence_set_error(&rq->fence, -EIO);
__i915_request_submit(rq);
......@@ -1116,10 +1116,11 @@ static void execlists_submission_tasklet(unsigned long data)
}
static void queue_request(struct intel_engine_cs *engine,
struct i915_priotree *pt,
struct i915_sched_node *node,
int prio)
{
list_add_tail(&pt->link, &lookup_priolist(engine, pt, prio)->requests);
list_add_tail(&node->link,
&lookup_priolist(engine, node, prio)->requests);
}
static void __submit_queue(struct intel_engine_cs *engine, int prio)
......@@ -1142,24 +1143,24 @@ static void execlists_submit_request(struct i915_request *request)
/* Will be called from irq-context when using foreign fences. */
spin_lock_irqsave(&engine->timeline->lock, flags);
queue_request(engine, &request->priotree, rq_prio(request));
queue_request(engine, &request->sched, rq_prio(request));
submit_queue(engine, rq_prio(request));
GEM_BUG_ON(!engine->execlists.first);
GEM_BUG_ON(list_empty(&request->priotree.link));
GEM_BUG_ON(list_empty(&request->sched.link));
spin_unlock_irqrestore(&engine->timeline->lock, flags);
}
static struct i915_request *pt_to_request(struct i915_priotree *pt)
static struct i915_request *sched_to_request(struct i915_sched_node *node)
{
return container_of(pt, struct i915_request, priotree);
return container_of(node, struct i915_request, sched);
}
static struct intel_engine_cs *
pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
{
struct intel_engine_cs *engine = pt_to_request(pt)->engine;
struct intel_engine_cs *engine = sched_to_request(node)->engine;
GEM_BUG_ON(!locked);
......@@ -1183,23 +1184,23 @@ static void execlists_schedule(struct i915_request *request, int prio)
if (i915_request_completed(request))
return;
if (prio <= READ_ONCE(request->priotree.priority))
if (prio <= READ_ONCE(request->sched.priority))
return;
/* Need BKL in order to use the temporary link inside i915_dependency */
lockdep_assert_held(&request->i915->drm.struct_mutex);
stack.signaler = &request->priotree;
stack.signaler = &request->sched;
list_add(&stack.dfs_link, &dfs);
/*
* Recursively bump all dependent priorities to match the new request.
*
* A naive approach would be to use recursion:
* static void update_priorities(struct i915_priotree *pt, prio) {
* list_for_each_entry(dep, &pt->signalers_list, signal_link)
* static void update_priorities(struct i915_sched_node *node, prio) {
* list_for_each_entry(dep, &node->signalers_list, signal_link)
* update_priorities(dep->signal, prio)
* queue_request(pt);
* queue_request(node);
* }
* but that may have unlimited recursion depth and so runs a very
* real risk of overunning the kernel stack. Instead, we build
......@@ -1211,7 +1212,7 @@ static void execlists_schedule(struct i915_request *request, int prio)
* last element in the list is the request we must execute first.
*/
list_for_each_entry(dep, &dfs, dfs_link) {
struct i915_priotree *pt = dep->signaler;
struct i915_sched_node *node = dep->signaler;
/*
* Within an engine, there can be no cycle, but we may
......@@ -1219,13 +1220,13 @@ static void execlists_schedule(struct i915_request *request, int prio)
* (redundant dependencies are not eliminated) and across
* engines.
*/
list_for_each_entry(p, &pt->signalers_list, signal_link) {
list_for_each_entry(p, &node->signalers_list, signal_link) {
GEM_BUG_ON(p == dep); /* no cycles! */
if (i915_priotree_signaled(p->signaler))
if (i915_sched_node_signaled(p->signaler))
continue;
GEM_BUG_ON(p->signaler->priority < pt->priority);
GEM_BUG_ON(p->signaler->priority < node->priority);
if (prio > READ_ONCE(p->signaler->priority))
list_move_tail(&p->dfs_link, &dfs);
}
......@@ -1237,9 +1238,9 @@ static void execlists_schedule(struct i915_request *request, int prio)
* execlists_submit_request()), we can set our own priority and skip
* acquiring the engine locks.
*/
if (request->priotree.priority == I915_PRIORITY_INVALID) {
GEM_BUG_ON(!list_empty(&request->priotree.link));
request->priotree.priority = prio;
if (request->sched.priority == I915_PRIORITY_INVALID) {
GEM_BUG_ON(!list_empty(&request->sched.link));
request->sched.priority = prio;
if (stack.dfs_link.next == stack.dfs_link.prev)
return;
__list_del_entry(&stack.dfs_link);
......@@ -1250,23 +1251,23 @@ static void execlists_schedule(struct i915_request *request, int prio)
/* Fifo and depth-first replacement ensure our deps execute before us */
list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
struct i915_priotree *pt = dep->signaler;
struct i915_sched_node *node = dep->signaler;
INIT_LIST_HEAD(&dep->dfs_link);
engine = pt_lock_engine(pt, engine);
engine = sched_lock_engine(node, engine);
if (prio <= pt->priority)
if (prio <= node->priority)
continue;
pt->priority = prio;
if (!list_empty(&pt->link)) {
__list_del_entry(&pt->link);
queue_request(engine, pt, prio);
node->priority = prio;
if (!list_empty(&node->link)) {
__list_del_entry(&node->link);
queue_request(engine, node, prio);
}
if (prio > engine->execlists.queue_priority &&
i915_sw_fence_done(&pt_to_request(pt)->submit))
i915_sw_fence_done(&sched_to_request(node)->submit))
__submit_queue(engine, prio);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment