Commit 3cb3e343 authored by Matthew Brost's avatar Matthew Brost Committed by John Harrison

drm/i915/guc: Move fields protected by guc->contexts_lock into sub structure

To make ownership of locking clear move fields (guc_id, guc_id_ref,
guc_id_link) to sub structure guc_id in intel_context.
Reviewed-by: default avatarDaniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Signed-off-by: default avatarJohn Harrison <John.C.Harrison@Intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210909164744.31249-22-matthew.brost@intel.com
parent 9798b172
...@@ -398,8 +398,8 @@ intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine) ...@@ -398,8 +398,8 @@ intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine)
spin_lock_init(&ce->guc_active.lock); spin_lock_init(&ce->guc_active.lock);
INIT_LIST_HEAD(&ce->guc_active.requests); INIT_LIST_HEAD(&ce->guc_active.requests);
ce->guc_id = GUC_INVALID_LRC_ID; ce->guc_id.id = GUC_INVALID_LRC_ID;
INIT_LIST_HEAD(&ce->guc_id_link); INIT_LIST_HEAD(&ce->guc_id.link);
/* /*
* Initialize fence to be complete as this is expected to be complete * Initialize fence to be complete as this is expected to be complete
......
...@@ -186,16 +186,18 @@ struct intel_context { ...@@ -186,16 +186,18 @@ struct intel_context {
u32 prio_count[GUC_CLIENT_PRIORITY_NUM]; u32 prio_count[GUC_CLIENT_PRIORITY_NUM];
} guc_active; } guc_active;
/* GuC LRC descriptor ID */ struct {
u16 guc_id; /* GuC LRC descriptor ID */
u16 id;
/* GuC LRC descriptor reference count */ /* GuC LRC descriptor reference count */
atomic_t guc_id_ref; atomic_t ref;
/* /*
* GuC ID link - in list when unpinned but guc_id still valid in GuC * GuC ID link - in list when unpinned but guc_id still valid in GuC
*/ */
struct list_head guc_id_link; struct list_head link;
} guc_id;
#ifdef CONFIG_DRM_I915_SELFTEST #ifdef CONFIG_DRM_I915_SELFTEST
/** /**
......
...@@ -789,7 +789,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active) ...@@ -789,7 +789,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
if (err) if (err)
pr_err("[%s] Wait for request %lld:%lld [0x%04X] failed: %d!\n", pr_err("[%s] Wait for request %lld:%lld [0x%04X] failed: %d!\n",
engine->name, rq->fence.context, engine->name, rq->fence.context,
rq->fence.seqno, rq->context->guc_id, err); rq->fence.seqno, rq->context->guc_id.id, err);
} }
skip: skip:
...@@ -1098,7 +1098,7 @@ static int __igt_reset_engines(struct intel_gt *gt, ...@@ -1098,7 +1098,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
if (err) if (err)
pr_err("[%s] Wait for request %lld:%lld [0x%04X] failed: %d!\n", pr_err("[%s] Wait for request %lld:%lld [0x%04X] failed: %d!\n",
engine->name, rq->fence.context, engine->name, rq->fence.context,
rq->fence.seqno, rq->context->guc_id, err); rq->fence.seqno, rq->context->guc_id.id, err);
} }
count++; count++;
...@@ -1108,7 +1108,7 @@ static int __igt_reset_engines(struct intel_gt *gt, ...@@ -1108,7 +1108,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
pr_err("i915_reset_engine(%s:%s): failed to reset request %lld:%lld [0x%04X]\n", pr_err("i915_reset_engine(%s:%s): failed to reset request %lld:%lld [0x%04X]\n",
engine->name, test_name, engine->name, test_name,
rq->fence.context, rq->fence.context,
rq->fence.seqno, rq->context->guc_id); rq->fence.seqno, rq->context->guc_id.id);
i915_request_put(rq); i915_request_put(rq);
GEM_TRACE_DUMP(); GEM_TRACE_DUMP();
......
...@@ -805,7 +805,7 @@ DECLARE_EVENT_CLASS(i915_request, ...@@ -805,7 +805,7 @@ DECLARE_EVENT_CLASS(i915_request,
__entry->dev = rq->engine->i915->drm.primary->index; __entry->dev = rq->engine->i915->drm.primary->index;
__entry->class = rq->engine->uabi_class; __entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->uabi_instance; __entry->instance = rq->engine->uabi_instance;
__entry->guc_id = rq->context->guc_id; __entry->guc_id = rq->context->guc_id.id;
__entry->ctx = rq->fence.context; __entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno; __entry->seqno = rq->fence.seqno;
__entry->tail = rq->tail; __entry->tail = rq->tail;
...@@ -907,7 +907,7 @@ DECLARE_EVENT_CLASS(intel_context, ...@@ -907,7 +907,7 @@ DECLARE_EVENT_CLASS(intel_context,
), ),
TP_fast_assign( TP_fast_assign(
__entry->guc_id = ce->guc_id; __entry->guc_id = ce->guc_id.id;
__entry->pin_count = atomic_read(&ce->pin_count); __entry->pin_count = atomic_read(&ce->pin_count);
__entry->sched_state = ce->guc_state.sched_state; __entry->sched_state = ce->guc_state.sched_state;
__entry->guc_prio = ce->guc_active.prio; __entry->guc_prio = ce->guc_active.prio;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment