Commit 3cb3e343 authored by Matthew Brost's avatar Matthew Brost Committed by John Harrison

drm/i915/guc: Move fields protected by guc->contexts_lock into sub structure

To make ownership of locking clear move fields (guc_id, guc_id_ref,
guc_id_link) to sub structure guc_id in intel_context.
Reviewed-by: default avatarDaniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Signed-off-by: default avatarJohn Harrison <John.C.Harrison@Intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210909164744.31249-22-matthew.brost@intel.com
parent 9798b172
...@@ -398,8 +398,8 @@ intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine) ...@@ -398,8 +398,8 @@ intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine)
spin_lock_init(&ce->guc_active.lock); spin_lock_init(&ce->guc_active.lock);
INIT_LIST_HEAD(&ce->guc_active.requests); INIT_LIST_HEAD(&ce->guc_active.requests);
ce->guc_id = GUC_INVALID_LRC_ID; ce->guc_id.id = GUC_INVALID_LRC_ID;
INIT_LIST_HEAD(&ce->guc_id_link); INIT_LIST_HEAD(&ce->guc_id.link);
/* /*
* Initialize fence to be complete as this is expected to be complete * Initialize fence to be complete as this is expected to be complete
......
...@@ -186,16 +186,18 @@ struct intel_context { ...@@ -186,16 +186,18 @@ struct intel_context {
u32 prio_count[GUC_CLIENT_PRIORITY_NUM]; u32 prio_count[GUC_CLIENT_PRIORITY_NUM];
} guc_active; } guc_active;
struct {
/* GuC LRC descriptor ID */ /* GuC LRC descriptor ID */
u16 guc_id; u16 id;
/* GuC LRC descriptor reference count */ /* GuC LRC descriptor reference count */
atomic_t guc_id_ref; atomic_t ref;
/* /*
* GuC ID link - in list when unpinned but guc_id still valid in GuC * GuC ID link - in list when unpinned but guc_id still valid in GuC
*/ */
struct list_head guc_id_link; struct list_head link;
} guc_id;
#ifdef CONFIG_DRM_I915_SELFTEST #ifdef CONFIG_DRM_I915_SELFTEST
/** /**
......
...@@ -789,7 +789,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active) ...@@ -789,7 +789,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
if (err) if (err)
pr_err("[%s] Wait for request %lld:%lld [0x%04X] failed: %d!\n", pr_err("[%s] Wait for request %lld:%lld [0x%04X] failed: %d!\n",
engine->name, rq->fence.context, engine->name, rq->fence.context,
rq->fence.seqno, rq->context->guc_id, err); rq->fence.seqno, rq->context->guc_id.id, err);
} }
skip: skip:
...@@ -1098,7 +1098,7 @@ static int __igt_reset_engines(struct intel_gt *gt, ...@@ -1098,7 +1098,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
if (err) if (err)
pr_err("[%s] Wait for request %lld:%lld [0x%04X] failed: %d!\n", pr_err("[%s] Wait for request %lld:%lld [0x%04X] failed: %d!\n",
engine->name, rq->fence.context, engine->name, rq->fence.context,
rq->fence.seqno, rq->context->guc_id, err); rq->fence.seqno, rq->context->guc_id.id, err);
} }
count++; count++;
...@@ -1108,7 +1108,7 @@ static int __igt_reset_engines(struct intel_gt *gt, ...@@ -1108,7 +1108,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
pr_err("i915_reset_engine(%s:%s): failed to reset request %lld:%lld [0x%04X]\n", pr_err("i915_reset_engine(%s:%s): failed to reset request %lld:%lld [0x%04X]\n",
engine->name, test_name, engine->name, test_name,
rq->fence.context, rq->fence.context,
rq->fence.seqno, rq->context->guc_id); rq->fence.seqno, rq->context->guc_id.id);
i915_request_put(rq); i915_request_put(rq);
GEM_TRACE_DUMP(); GEM_TRACE_DUMP();
......
...@@ -270,12 +270,12 @@ static inline void decr_context_committed_requests(struct intel_context *ce) ...@@ -270,12 +270,12 @@ static inline void decr_context_committed_requests(struct intel_context *ce)
static inline bool context_guc_id_invalid(struct intel_context *ce) static inline bool context_guc_id_invalid(struct intel_context *ce)
{ {
return ce->guc_id == GUC_INVALID_LRC_ID; return ce->guc_id.id == GUC_INVALID_LRC_ID;
} }
static inline void set_context_guc_id_invalid(struct intel_context *ce) static inline void set_context_guc_id_invalid(struct intel_context *ce)
{ {
ce->guc_id = GUC_INVALID_LRC_ID; ce->guc_id.id = GUC_INVALID_LRC_ID;
} }
static inline struct intel_guc *ce_to_guc(struct intel_context *ce) static inline struct intel_guc *ce_to_guc(struct intel_context *ce)
...@@ -468,14 +468,14 @@ static int guc_add_request(struct intel_guc *guc, struct i915_request *rq) ...@@ -468,14 +468,14 @@ static int guc_add_request(struct intel_guc *guc, struct i915_request *rq)
return 0; return 0;
} }
GEM_BUG_ON(!atomic_read(&ce->guc_id_ref)); GEM_BUG_ON(!atomic_read(&ce->guc_id.ref));
GEM_BUG_ON(context_guc_id_invalid(ce)); GEM_BUG_ON(context_guc_id_invalid(ce));
/* /*
* Corner case where the GuC firmware was blown away and reloaded while * Corner case where the GuC firmware was blown away and reloaded while
* this context was pinned. * this context was pinned.
*/ */
if (unlikely(!lrc_desc_registered(guc, ce->guc_id))) { if (unlikely(!lrc_desc_registered(guc, ce->guc_id.id))) {
err = guc_lrc_desc_pin(ce, false); err = guc_lrc_desc_pin(ce, false);
if (unlikely(err)) if (unlikely(err))
return err; return err;
...@@ -494,14 +494,14 @@ static int guc_add_request(struct intel_guc *guc, struct i915_request *rq) ...@@ -494,14 +494,14 @@ static int guc_add_request(struct intel_guc *guc, struct i915_request *rq)
if (!enabled) { if (!enabled) {
action[len++] = INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET; action[len++] = INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET;
action[len++] = ce->guc_id; action[len++] = ce->guc_id.id;
action[len++] = GUC_CONTEXT_ENABLE; action[len++] = GUC_CONTEXT_ENABLE;
set_context_pending_enable(ce); set_context_pending_enable(ce);
intel_context_get(ce); intel_context_get(ce);
g2h_len_dw = G2H_LEN_DW_SCHED_CONTEXT_MODE_SET; g2h_len_dw = G2H_LEN_DW_SCHED_CONTEXT_MODE_SET;
} else { } else {
action[len++] = INTEL_GUC_ACTION_SCHED_CONTEXT; action[len++] = INTEL_GUC_ACTION_SCHED_CONTEXT;
action[len++] = ce->guc_id; action[len++] = ce->guc_id.id;
} }
err = intel_guc_send_nb(guc, action, len, g2h_len_dw); err = intel_guc_send_nb(guc, action, len, g2h_len_dw);
...@@ -1167,12 +1167,12 @@ static int new_guc_id(struct intel_guc *guc) ...@@ -1167,12 +1167,12 @@ static int new_guc_id(struct intel_guc *guc)
static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce) static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce)
{ {
if (!context_guc_id_invalid(ce)) { if (!context_guc_id_invalid(ce)) {
ida_simple_remove(&guc->guc_ids, ce->guc_id); ida_simple_remove(&guc->guc_ids, ce->guc_id.id);
reset_lrc_desc(guc, ce->guc_id); reset_lrc_desc(guc, ce->guc_id.id);
set_context_guc_id_invalid(ce); set_context_guc_id_invalid(ce);
} }
if (!list_empty(&ce->guc_id_link)) if (!list_empty(&ce->guc_id.link))
list_del_init(&ce->guc_id_link); list_del_init(&ce->guc_id.link);
} }
static void release_guc_id(struct intel_guc *guc, struct intel_context *ce) static void release_guc_id(struct intel_guc *guc, struct intel_context *ce)
...@@ -1194,13 +1194,13 @@ static int steal_guc_id(struct intel_guc *guc) ...@@ -1194,13 +1194,13 @@ static int steal_guc_id(struct intel_guc *guc)
if (!list_empty(&guc->guc_id_list)) { if (!list_empty(&guc->guc_id_list)) {
ce = list_first_entry(&guc->guc_id_list, ce = list_first_entry(&guc->guc_id_list,
struct intel_context, struct intel_context,
guc_id_link); guc_id.link);
GEM_BUG_ON(atomic_read(&ce->guc_id_ref)); GEM_BUG_ON(atomic_read(&ce->guc_id.ref));
GEM_BUG_ON(context_guc_id_invalid(ce)); GEM_BUG_ON(context_guc_id_invalid(ce));
list_del_init(&ce->guc_id_link); list_del_init(&ce->guc_id.link);
guc_id = ce->guc_id; guc_id = ce->guc_id.id;
spin_lock(&ce->guc_state.lock); spin_lock(&ce->guc_state.lock);
clr_context_registered(ce); clr_context_registered(ce);
...@@ -1236,7 +1236,7 @@ static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce) ...@@ -1236,7 +1236,7 @@ static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce)
int ret = 0; int ret = 0;
unsigned long flags, tries = PIN_GUC_ID_TRIES; unsigned long flags, tries = PIN_GUC_ID_TRIES;
GEM_BUG_ON(atomic_read(&ce->guc_id_ref)); GEM_BUG_ON(atomic_read(&ce->guc_id.ref));
try_again: try_again:
spin_lock_irqsave(&guc->contexts_lock, flags); spin_lock_irqsave(&guc->contexts_lock, flags);
...@@ -1244,20 +1244,20 @@ static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce) ...@@ -1244,20 +1244,20 @@ static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce)
might_lock(&ce->guc_state.lock); might_lock(&ce->guc_state.lock);
if (context_guc_id_invalid(ce)) { if (context_guc_id_invalid(ce)) {
ret = assign_guc_id(guc, &ce->guc_id); ret = assign_guc_id(guc, &ce->guc_id.id);
if (ret) if (ret)
goto out_unlock; goto out_unlock;
ret = 1; /* Indidcates newly assigned guc_id */ ret = 1; /* Indidcates newly assigned guc_id */
} }
if (!list_empty(&ce->guc_id_link)) if (!list_empty(&ce->guc_id.link))
list_del_init(&ce->guc_id_link); list_del_init(&ce->guc_id.link);
atomic_inc(&ce->guc_id_ref); atomic_inc(&ce->guc_id.ref);
out_unlock: out_unlock:
spin_unlock_irqrestore(&guc->contexts_lock, flags); spin_unlock_irqrestore(&guc->contexts_lock, flags);
/* /*
* -EAGAIN indicates no guc_ids are available, let's retire any * -EAGAIN indicates no guc_id are available, let's retire any
* outstanding requests to see if that frees up a guc_id. If the first * outstanding requests to see if that frees up a guc_id. If the first
* retire didn't help, insert a sleep with the timeslice duration before * retire didn't help, insert a sleep with the timeslice duration before
* attempting to retire more requests. Double the sleep period each * attempting to retire more requests. Double the sleep period each
...@@ -1285,15 +1285,15 @@ static void unpin_guc_id(struct intel_guc *guc, struct intel_context *ce) ...@@ -1285,15 +1285,15 @@ static void unpin_guc_id(struct intel_guc *guc, struct intel_context *ce)
{ {
unsigned long flags; unsigned long flags;
GEM_BUG_ON(atomic_read(&ce->guc_id_ref) < 0); GEM_BUG_ON(atomic_read(&ce->guc_id.ref) < 0);
if (unlikely(context_guc_id_invalid(ce))) if (unlikely(context_guc_id_invalid(ce)))
return; return;
spin_lock_irqsave(&guc->contexts_lock, flags); spin_lock_irqsave(&guc->contexts_lock, flags);
if (!context_guc_id_invalid(ce) && list_empty(&ce->guc_id_link) && if (!context_guc_id_invalid(ce) && list_empty(&ce->guc_id.link) &&
!atomic_read(&ce->guc_id_ref)) !atomic_read(&ce->guc_id.ref))
list_add_tail(&ce->guc_id_link, &guc->guc_id_list); list_add_tail(&ce->guc_id.link, &guc->guc_id_list);
spin_unlock_irqrestore(&guc->contexts_lock, flags); spin_unlock_irqrestore(&guc->contexts_lock, flags);
} }
...@@ -1316,12 +1316,12 @@ static int register_context(struct intel_context *ce, bool loop) ...@@ -1316,12 +1316,12 @@ static int register_context(struct intel_context *ce, bool loop)
{ {
struct intel_guc *guc = ce_to_guc(ce); struct intel_guc *guc = ce_to_guc(ce);
u32 offset = intel_guc_ggtt_offset(guc, guc->lrc_desc_pool) + u32 offset = intel_guc_ggtt_offset(guc, guc->lrc_desc_pool) +
ce->guc_id * sizeof(struct guc_lrc_desc); ce->guc_id.id * sizeof(struct guc_lrc_desc);
int ret; int ret;
trace_intel_context_register(ce); trace_intel_context_register(ce);
ret = __guc_action_register_context(guc, ce->guc_id, offset, loop); ret = __guc_action_register_context(guc, ce->guc_id.id, offset, loop);
if (likely(!ret)) { if (likely(!ret)) {
unsigned long flags; unsigned long flags;
...@@ -1390,7 +1390,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) ...@@ -1390,7 +1390,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
struct intel_engine_cs *engine = ce->engine; struct intel_engine_cs *engine = ce->engine;
struct intel_runtime_pm *runtime_pm = engine->uncore->rpm; struct intel_runtime_pm *runtime_pm = engine->uncore->rpm;
struct intel_guc *guc = &engine->gt->uc.guc; struct intel_guc *guc = &engine->gt->uc.guc;
u32 desc_idx = ce->guc_id; u32 desc_idx = ce->guc_id.id;
struct guc_lrc_desc *desc; struct guc_lrc_desc *desc;
bool context_registered; bool context_registered;
intel_wakeref_t wakeref; intel_wakeref_t wakeref;
...@@ -1453,7 +1453,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) ...@@ -1453,7 +1453,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
* context whose guc_id was stolen. * context whose guc_id was stolen.
*/ */
with_intel_runtime_pm(runtime_pm, wakeref) with_intel_runtime_pm(runtime_pm, wakeref)
ret = deregister_context(ce, ce->guc_id); ret = deregister_context(ce, ce->guc_id.id);
if (unlikely(ret == -ENODEV)) if (unlikely(ret == -ENODEV))
ret = 0; /* Will get registered later */ ret = 0; /* Will get registered later */
} else { } else {
...@@ -1524,7 +1524,7 @@ static void __guc_context_sched_enable(struct intel_guc *guc, ...@@ -1524,7 +1524,7 @@ static void __guc_context_sched_enable(struct intel_guc *guc,
{ {
u32 action[] = { u32 action[] = {
INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET, INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET,
ce->guc_id, ce->guc_id.id,
GUC_CONTEXT_ENABLE GUC_CONTEXT_ENABLE
}; };
...@@ -1540,7 +1540,7 @@ static void __guc_context_sched_disable(struct intel_guc *guc, ...@@ -1540,7 +1540,7 @@ static void __guc_context_sched_disable(struct intel_guc *guc,
{ {
u32 action[] = { u32 action[] = {
INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET, INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET,
guc_id, /* ce->guc_id not stable */ guc_id, /* ce->guc_id.id not stable */
GUC_CONTEXT_DISABLE GUC_CONTEXT_DISABLE
}; };
...@@ -1585,7 +1585,7 @@ static u16 prep_context_pending_disable(struct intel_context *ce) ...@@ -1585,7 +1585,7 @@ static u16 prep_context_pending_disable(struct intel_context *ce)
guc_blocked_fence_reinit(ce); guc_blocked_fence_reinit(ce);
intel_context_get(ce); intel_context_get(ce);
return ce->guc_id; return ce->guc_id.id;
} }
static struct i915_sw_fence *guc_context_block(struct intel_context *ce) static struct i915_sw_fence *guc_context_block(struct intel_context *ce)
...@@ -1638,7 +1638,7 @@ static bool context_cant_unblock(struct intel_context *ce) ...@@ -1638,7 +1638,7 @@ static bool context_cant_unblock(struct intel_context *ce)
return (ce->guc_state.sched_state & SCHED_STATE_NO_UNBLOCK) || return (ce->guc_state.sched_state & SCHED_STATE_NO_UNBLOCK) ||
context_guc_id_invalid(ce) || context_guc_id_invalid(ce) ||
!lrc_desc_registered(ce_to_guc(ce), ce->guc_id) || !lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id) ||
!intel_context_is_pinned(ce); !intel_context_is_pinned(ce);
} }
...@@ -1757,7 +1757,7 @@ static void guc_context_ban(struct intel_context *ce, struct i915_request *rq) ...@@ -1757,7 +1757,7 @@ static void guc_context_ban(struct intel_context *ce, struct i915_request *rq)
if (!context_guc_id_invalid(ce)) if (!context_guc_id_invalid(ce))
with_intel_runtime_pm(runtime_pm, wakeref) with_intel_runtime_pm(runtime_pm, wakeref)
__guc_context_set_preemption_timeout(guc, __guc_context_set_preemption_timeout(guc,
ce->guc_id, ce->guc_id.id,
1); 1);
spin_unlock_irqrestore(&ce->guc_state.lock, flags); spin_unlock_irqrestore(&ce->guc_state.lock, flags);
} }
...@@ -1802,11 +1802,11 @@ static inline void guc_lrc_desc_unpin(struct intel_context *ce) ...@@ -1802,11 +1802,11 @@ static inline void guc_lrc_desc_unpin(struct intel_context *ce)
{ {
struct intel_guc *guc = ce_to_guc(ce); struct intel_guc *guc = ce_to_guc(ce);
GEM_BUG_ON(!lrc_desc_registered(guc, ce->guc_id)); GEM_BUG_ON(!lrc_desc_registered(guc, ce->guc_id.id));
GEM_BUG_ON(ce != __get_context(guc, ce->guc_id)); GEM_BUG_ON(ce != __get_context(guc, ce->guc_id.id));
GEM_BUG_ON(context_enabled(ce)); GEM_BUG_ON(context_enabled(ce));
deregister_context(ce, ce->guc_id); deregister_context(ce, ce->guc_id.id);
} }
static void __guc_context_destroy(struct intel_context *ce) static void __guc_context_destroy(struct intel_context *ce)
...@@ -1851,7 +1851,7 @@ static void guc_context_destroy(struct kref *kref) ...@@ -1851,7 +1851,7 @@ static void guc_context_destroy(struct kref *kref)
__guc_context_destroy(ce); __guc_context_destroy(ce);
return; return;
} else if (submission_disabled(guc) || } else if (submission_disabled(guc) ||
!lrc_desc_registered(guc, ce->guc_id)) { !lrc_desc_registered(guc, ce->guc_id.id)) {
release_guc_id(guc, ce); release_guc_id(guc, ce);
__guc_context_destroy(ce); __guc_context_destroy(ce);
return; return;
...@@ -1860,10 +1860,10 @@ static void guc_context_destroy(struct kref *kref) ...@@ -1860,10 +1860,10 @@ static void guc_context_destroy(struct kref *kref)
/* /*
* We have to acquire the context spinlock and check guc_id again, if it * We have to acquire the context spinlock and check guc_id again, if it
* is valid it hasn't been stolen and needs to be deregistered. We * is valid it hasn't been stolen and needs to be deregistered. We
* delete this context from the list of unpinned guc_ids available to * delete this context from the list of unpinned guc_id available to
* steal to seal a race with guc_lrc_desc_pin(). When the G2H CTB * steal to seal a race with guc_lrc_desc_pin(). When the G2H CTB
* returns indicating this context has been deregistered the guc_id is * returns indicating this context has been deregistered the guc_id is
* returned to the pool of available guc_ids. * returned to the pool of available guc_id.
*/ */
spin_lock_irqsave(&guc->contexts_lock, flags); spin_lock_irqsave(&guc->contexts_lock, flags);
if (context_guc_id_invalid(ce)) { if (context_guc_id_invalid(ce)) {
...@@ -1872,8 +1872,8 @@ static void guc_context_destroy(struct kref *kref) ...@@ -1872,8 +1872,8 @@ static void guc_context_destroy(struct kref *kref)
return; return;
} }
if (!list_empty(&ce->guc_id_link)) if (!list_empty(&ce->guc_id.link))
list_del_init(&ce->guc_id_link); list_del_init(&ce->guc_id.link);
spin_unlock_irqrestore(&guc->contexts_lock, flags); spin_unlock_irqrestore(&guc->contexts_lock, flags);
/* Seal race with Reset */ /* Seal race with Reset */
...@@ -1918,7 +1918,7 @@ static void guc_context_set_prio(struct intel_guc *guc, ...@@ -1918,7 +1918,7 @@ static void guc_context_set_prio(struct intel_guc *guc,
{ {
u32 action[] = { u32 action[] = {
INTEL_GUC_ACTION_SET_CONTEXT_PRIORITY, INTEL_GUC_ACTION_SET_CONTEXT_PRIORITY,
ce->guc_id, ce->guc_id.id,
prio, prio,
}; };
...@@ -2053,7 +2053,7 @@ static void remove_from_context(struct i915_request *rq) ...@@ -2053,7 +2053,7 @@ static void remove_from_context(struct i915_request *rq)
decr_context_committed_requests(ce); decr_context_committed_requests(ce);
spin_unlock_irq(&ce->guc_state.lock); spin_unlock_irq(&ce->guc_state.lock);
atomic_dec(&ce->guc_id_ref); atomic_dec(&ce->guc_id.ref);
i915_request_notify_execute_cb_imm(rq); i915_request_notify_execute_cb_imm(rq);
} }
...@@ -2123,7 +2123,7 @@ static void guc_signal_context_fence(struct intel_context *ce) ...@@ -2123,7 +2123,7 @@ static void guc_signal_context_fence(struct intel_context *ce)
static bool context_needs_register(struct intel_context *ce, bool new_guc_id) static bool context_needs_register(struct intel_context *ce, bool new_guc_id)
{ {
return (new_guc_id || test_bit(CONTEXT_LRCA_DIRTY, &ce->flags) || return (new_guc_id || test_bit(CONTEXT_LRCA_DIRTY, &ce->flags) ||
!lrc_desc_registered(ce_to_guc(ce), ce->guc_id)) && !lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id)) &&
!submission_disabled(ce_to_guc(ce)); !submission_disabled(ce_to_guc(ce));
} }
...@@ -2179,11 +2179,11 @@ static int guc_request_alloc(struct i915_request *rq) ...@@ -2179,11 +2179,11 @@ static int guc_request_alloc(struct i915_request *rq)
/* /*
* Call pin_guc_id here rather than in the pinning step as with * Call pin_guc_id here rather than in the pinning step as with
* dma_resv, contexts can be repeatedly pinned / unpinned trashing the * dma_resv, contexts can be repeatedly pinned / unpinned trashing the
* guc_ids and creating horrible race conditions. This is especially bad * guc_id and creating horrible race conditions. This is especially bad
* when guc_ids are being stolen due to over subscription. By the time * when guc_id are being stolen due to over subscription. By the time
* this function is reached, it is guaranteed that the guc_id will be * this function is reached, it is guaranteed that the guc_id will be
* persistent until the generated request is retired. Thus, sealing these * persistent until the generated request is retired. Thus, sealing these
* race conditions. It is still safe to fail here if guc_ids are * race conditions. It is still safe to fail here if guc_id are
* exhausted and return -EAGAIN to the user indicating that they can try * exhausted and return -EAGAIN to the user indicating that they can try
* again in the future. * again in the future.
* *
...@@ -2193,7 +2193,7 @@ static int guc_request_alloc(struct i915_request *rq) ...@@ -2193,7 +2193,7 @@ static int guc_request_alloc(struct i915_request *rq)
* decremented on each retire. When it is zero, a lock around the * decremented on each retire. When it is zero, a lock around the
* increment (in pin_guc_id) is needed to seal a race with unpin_guc_id. * increment (in pin_guc_id) is needed to seal a race with unpin_guc_id.
*/ */
if (atomic_add_unless(&ce->guc_id_ref, 1, 0)) if (atomic_add_unless(&ce->guc_id.ref, 1, 0))
goto out; goto out;
ret = pin_guc_id(guc, ce); /* returns 1 if new guc_id assigned */ ret = pin_guc_id(guc, ce); /* returns 1 if new guc_id assigned */
...@@ -2206,7 +2206,7 @@ static int guc_request_alloc(struct i915_request *rq) ...@@ -2206,7 +2206,7 @@ static int guc_request_alloc(struct i915_request *rq)
disable_submission(guc); disable_submission(guc);
goto out; /* GPU will be reset */ goto out; /* GPU will be reset */
} }
atomic_dec(&ce->guc_id_ref); atomic_dec(&ce->guc_id.ref);
unpin_guc_id(guc, ce); unpin_guc_id(guc, ce);
return ret; return ret;
} }
...@@ -3040,7 +3040,7 @@ void intel_guc_submission_print_info(struct intel_guc *guc, ...@@ -3040,7 +3040,7 @@ void intel_guc_submission_print_info(struct intel_guc *guc,
priolist_for_each_request(rq, pl) priolist_for_each_request(rq, pl)
drm_printf(p, "guc_id=%u, seqno=%llu\n", drm_printf(p, "guc_id=%u, seqno=%llu\n",
rq->context->guc_id, rq->context->guc_id.id,
rq->fence.seqno); rq->fence.seqno);
} }
spin_unlock_irqrestore(&sched_engine->lock, flags); spin_unlock_irqrestore(&sched_engine->lock, flags);
...@@ -3071,7 +3071,7 @@ void intel_guc_submission_print_context_info(struct intel_guc *guc, ...@@ -3071,7 +3071,7 @@ void intel_guc_submission_print_context_info(struct intel_guc *guc,
xa_lock_irqsave(&guc->context_lookup, flags); xa_lock_irqsave(&guc->context_lookup, flags);
xa_for_each(&guc->context_lookup, index, ce) { xa_for_each(&guc->context_lookup, index, ce) {
drm_printf(p, "GuC lrc descriptor %u:\n", ce->guc_id); drm_printf(p, "GuC lrc descriptor %u:\n", ce->guc_id.id);
drm_printf(p, "\tHW Context Desc: 0x%08x\n", ce->lrc.lrca); drm_printf(p, "\tHW Context Desc: 0x%08x\n", ce->lrc.lrca);
drm_printf(p, "\t\tLRC Head: Internal %u, Memory %u\n", drm_printf(p, "\t\tLRC Head: Internal %u, Memory %u\n",
ce->ring->head, ce->ring->head,
...@@ -3082,7 +3082,7 @@ void intel_guc_submission_print_context_info(struct intel_guc *guc, ...@@ -3082,7 +3082,7 @@ void intel_guc_submission_print_context_info(struct intel_guc *guc,
drm_printf(p, "\t\tContext Pin Count: %u\n", drm_printf(p, "\t\tContext Pin Count: %u\n",
atomic_read(&ce->pin_count)); atomic_read(&ce->pin_count));
drm_printf(p, "\t\tGuC ID Ref Count: %u\n", drm_printf(p, "\t\tGuC ID Ref Count: %u\n",
atomic_read(&ce->guc_id_ref)); atomic_read(&ce->guc_id.ref));
drm_printf(p, "\t\tSchedule State: 0x%x\n\n", drm_printf(p, "\t\tSchedule State: 0x%x\n\n",
ce->guc_state.sched_state); ce->guc_state.sched_state);
......
...@@ -805,7 +805,7 @@ DECLARE_EVENT_CLASS(i915_request, ...@@ -805,7 +805,7 @@ DECLARE_EVENT_CLASS(i915_request,
__entry->dev = rq->engine->i915->drm.primary->index; __entry->dev = rq->engine->i915->drm.primary->index;
__entry->class = rq->engine->uabi_class; __entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->uabi_instance; __entry->instance = rq->engine->uabi_instance;
__entry->guc_id = rq->context->guc_id; __entry->guc_id = rq->context->guc_id.id;
__entry->ctx = rq->fence.context; __entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno; __entry->seqno = rq->fence.seqno;
__entry->tail = rq->tail; __entry->tail = rq->tail;
...@@ -907,7 +907,7 @@ DECLARE_EVENT_CLASS(intel_context, ...@@ -907,7 +907,7 @@ DECLARE_EVENT_CLASS(intel_context,
), ),
TP_fast_assign( TP_fast_assign(
__entry->guc_id = ce->guc_id; __entry->guc_id = ce->guc_id.id;
__entry->pin_count = atomic_read(&ce->pin_count); __entry->pin_count = atomic_read(&ce->pin_count);
__entry->sched_state = ce->guc_state.sched_state; __entry->sched_state = ce->guc_state.sched_state;
__entry->guc_prio = ce->guc_active.prio; __entry->guc_prio = ce->guc_active.prio;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment