Commit 6cfe66eb authored by Chris Wilson's avatar Chris Wilson

drm/i915/gt: Track signaled breadcrumbs outside of the breadcrumb spinlock

Make b->signaled_requests a lockless-list so that we can manipulate it
outside of the b->irq_lock.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20201123113717.20500-2-chris@chris-wilson.co.uk
parent 9d5612ca
......@@ -173,26 +173,34 @@ static void add_retire(struct intel_breadcrumbs *b, struct intel_timeline *tl)
intel_engine_add_retire(b->irq_engine, tl);
}
static bool __signal_request(struct i915_request *rq, struct list_head *signals)
static bool __signal_request(struct i915_request *rq)
{
clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
if (!__dma_fence_signal(&rq->fence)) {
i915_request_put(rq);
return false;
}
list_add_tail(&rq->signal_link, signals);
return true;
}
static struct llist_node *
slist_add(struct llist_node *node, struct llist_node *head)
{
node->next = head;
return node;
}
static void signal_irq_work(struct irq_work *work)
{
struct intel_breadcrumbs *b = container_of(work, typeof(*b), irq_work);
const ktime_t timestamp = ktime_get();
struct llist_node *signal, *sn;
struct intel_context *ce, *cn;
struct list_head *pos, *next;
LIST_HEAD(signal);
signal = NULL;
if (unlikely(!llist_empty(&b->signaled_requests)))
signal = llist_del_all(&b->signaled_requests);
spin_lock(&b->irq_lock);
......@@ -224,8 +232,6 @@ static void signal_irq_work(struct irq_work *work)
if (b->irq_armed && list_empty(&b->signalers))
__intel_breadcrumbs_disarm_irq(b);
list_splice_init(&b->signaled_requests, &signal);
list_for_each_entry_safe(ce, cn, &b->signalers, signal_link) {
GEM_BUG_ON(list_empty(&ce->signals));
......@@ -242,7 +248,10 @@ static void signal_irq_work(struct irq_work *work)
* spinlock as the callback chain may end up adding
* more signalers to the same context or engine.
*/
__signal_request(rq, &signal);
clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
if (__signal_request(rq))
/* We own signal_node now, xfer to local list */
signal = slist_add(&rq->signal_node, signal);
}
/*
......@@ -262,9 +271,9 @@ static void signal_irq_work(struct irq_work *work)
spin_unlock(&b->irq_lock);
list_for_each_safe(pos, next, &signal) {
llist_for_each_safe(signal, sn, signal) {
struct i915_request *rq =
list_entry(pos, typeof(*rq), signal_link);
llist_entry(signal, typeof(*rq), signal_node);
struct list_head cb_list;
spin_lock(&rq->lock);
......@@ -291,7 +300,7 @@ intel_breadcrumbs_create(struct intel_engine_cs *irq_engine)
spin_lock_init(&b->irq_lock);
INIT_LIST_HEAD(&b->signalers);
INIT_LIST_HEAD(&b->signaled_requests);
init_llist_head(&b->signaled_requests);
init_irq_work(&b->irq_work, signal_irq_work);
......@@ -355,7 +364,8 @@ static void insert_breadcrumb(struct i915_request *rq,
* its signal completion.
*/
if (__request_completed(rq)) {
if (__signal_request(rq, &b->signaled_requests))
if (__signal_request(rq) &&
llist_add(&rq->signal_node, &b->signaled_requests))
irq_work_queue(&b->irq_work);
return;
}
......
......@@ -35,7 +35,7 @@ struct intel_breadcrumbs {
struct intel_engine_cs *irq_engine;
struct list_head signalers;
struct list_head signaled_requests;
struct llist_head signaled_requests;
struct irq_work irq_work; /* for use from inside irq_lock */
......
......@@ -177,7 +177,11 @@ struct i915_request {
struct intel_context *context;
struct intel_ring *ring;
struct intel_timeline __rcu *timeline;
struct list_head signal_link;
union {
struct list_head signal_link;
struct llist_node signal_node;
};
/*
* The rcu epoch of when this request was allocated. Used to judiciously
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment