Commit 5b116c17 authored by Matthew Brost's avatar Matthew Brost Committed by John Harrison

drm/i915/guc: Drop pin count check trick between sched_disable and re-pin

Drop pin count check trick between a sched_disable and re-pin, now rely
on the lock and counter of the number of committed requests to determine
if scheduling should be disabled on the context.
Reviewed-by: default avatarDaniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Signed-off-by: default avatarJohn Harrison <John.C.Harrison@Intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210909164744.31249-20-matthew.brost@intel.com
parent 1424ba81
...@@ -169,6 +169,8 @@ struct intel_context { ...@@ -169,6 +169,8 @@ struct intel_context {
struct list_head fences; struct list_head fences;
/* GuC context blocked fence */ /* GuC context blocked fence */
struct i915_sw_fence blocked; struct i915_sw_fence blocked;
/* GuC committed requests */
int number_committed_requests;
} guc_state; } guc_state;
struct { struct {
......
...@@ -249,6 +249,25 @@ static inline void decr_context_blocked(struct intel_context *ce) ...@@ -249,6 +249,25 @@ static inline void decr_context_blocked(struct intel_context *ce)
ce->guc_state.sched_state -= SCHED_STATE_BLOCKED; ce->guc_state.sched_state -= SCHED_STATE_BLOCKED;
} }
static inline bool context_has_committed_requests(struct intel_context *ce)
{
return !!ce->guc_state.number_committed_requests;
}
static inline void incr_context_committed_requests(struct intel_context *ce)
{
lockdep_assert_held(&ce->guc_state.lock);
++ce->guc_state.number_committed_requests;
GEM_BUG_ON(ce->guc_state.number_committed_requests < 0);
}
static inline void decr_context_committed_requests(struct intel_context *ce)
{
lockdep_assert_held(&ce->guc_state.lock);
--ce->guc_state.number_committed_requests;
GEM_BUG_ON(ce->guc_state.number_committed_requests < 0);
}
static inline bool context_guc_id_invalid(struct intel_context *ce) static inline bool context_guc_id_invalid(struct intel_context *ce)
{ {
return ce->guc_id == GUC_INVALID_LRC_ID; return ce->guc_id == GUC_INVALID_LRC_ID;
...@@ -1766,24 +1785,18 @@ static void guc_context_sched_disable(struct intel_context *ce) ...@@ -1766,24 +1785,18 @@ static void guc_context_sched_disable(struct intel_context *ce)
spin_lock_irqsave(&ce->guc_state.lock, flags); spin_lock_irqsave(&ce->guc_state.lock, flags);
/* /*
* We have to check if the context has been disabled by another thread. * We have to check if the context has been disabled by another thread,
* We also have to check if the context has been pinned again as another * check if submssion has been disabled to seal a race with reset and
* pin operation is allowed to pass this function. Checking the pin * finally check if any more requests have been committed to the
* count, within ce->guc_state.lock, synchronizes this function with * context ensursing that a request doesn't slip through the
* guc_request_alloc ensuring a request doesn't slip through the * 'context_pending_disable' fence.
* 'context_pending_disable' fence. Checking within the spin lock (can't
* sleep) ensures another process doesn't pin this context and generate
* a request before we set the 'context_pending_disable' flag here.
*/ */
if (unlikely(!context_enabled(ce) || submission_disabled(guc))) { if (unlikely(!context_enabled(ce) || submission_disabled(guc) ||
context_has_committed_requests(ce))) {
clr_context_enabled(ce); clr_context_enabled(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags); spin_unlock_irqrestore(&ce->guc_state.lock, flags);
goto unpin; goto unpin;
} }
if (unlikely(atomic_add_unless(&ce->pin_count, -2, 2))) {
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
return;
}
guc_id = prep_context_pending_disable(ce); guc_id = prep_context_pending_disable(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags); spin_unlock_irqrestore(&ce->guc_state.lock, flags);
...@@ -1813,6 +1826,7 @@ static void __guc_context_destroy(struct intel_context *ce) ...@@ -1813,6 +1826,7 @@ static void __guc_context_destroy(struct intel_context *ce)
ce->guc_prio_count[GUC_CLIENT_PRIORITY_HIGH] || ce->guc_prio_count[GUC_CLIENT_PRIORITY_HIGH] ||
ce->guc_prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] || ce->guc_prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] ||
ce->guc_prio_count[GUC_CLIENT_PRIORITY_NORMAL]); ce->guc_prio_count[GUC_CLIENT_PRIORITY_NORMAL]);
GEM_BUG_ON(ce->guc_state.number_committed_requests);
lrc_fini(ce); lrc_fini(ce);
intel_context_fini(ce); intel_context_fini(ce);
...@@ -2043,6 +2057,10 @@ static void remove_from_context(struct i915_request *rq) ...@@ -2043,6 +2057,10 @@ static void remove_from_context(struct i915_request *rq)
spin_unlock_irq(&ce->guc_active.lock); spin_unlock_irq(&ce->guc_active.lock);
spin_lock_irq(&ce->guc_state.lock);
decr_context_committed_requests(ce);
spin_unlock_irq(&ce->guc_state.lock);
atomic_dec(&ce->guc_id_ref); atomic_dec(&ce->guc_id_ref);
i915_request_notify_execute_cb_imm(rq); i915_request_notify_execute_cb_imm(rq);
} }
...@@ -2193,15 +2211,7 @@ static int guc_request_alloc(struct i915_request *rq) ...@@ -2193,15 +2211,7 @@ static int guc_request_alloc(struct i915_request *rq)
* schedule enable or context registration if either G2H is pending * schedule enable or context registration if either G2H is pending
* respectfully. Once a G2H returns, the fence is released that is * respectfully. Once a G2H returns, the fence is released that is
* blocking these requests (see guc_signal_context_fence). * blocking these requests (see guc_signal_context_fence).
*
* We can safely check the below fields outside of the lock as it isn't
* possible for these fields to transition from being clear to set but
* converse is possible, hence the need for the check within the lock.
*/ */
if (likely(!context_wait_for_deregister_to_register(ce) &&
!context_pending_disable(ce)))
return 0;
spin_lock_irqsave(&ce->guc_state.lock, flags); spin_lock_irqsave(&ce->guc_state.lock, flags);
if (context_wait_for_deregister_to_register(ce) || if (context_wait_for_deregister_to_register(ce) ||
context_pending_disable(ce)) { context_pending_disable(ce)) {
...@@ -2210,6 +2220,7 @@ static int guc_request_alloc(struct i915_request *rq) ...@@ -2210,6 +2220,7 @@ static int guc_request_alloc(struct i915_request *rq)
list_add_tail(&rq->guc_fence_link, &ce->guc_state.fences); list_add_tail(&rq->guc_fence_link, &ce->guc_state.fences);
} }
incr_context_committed_requests(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags); spin_unlock_irqrestore(&ce->guc_state.lock, flags);
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment