Commit 9798b172 authored by Matthew Brost's avatar Matthew Brost Committed by John Harrison

drm/i915/guc: Move GuC priority fields in context under guc_active

Move GuC management fields in context under guc_active struct as this is
where the lock that protects theses fields lives. Also only set guc_prio
field once during context init.

v2:
 (Daniele)
  - set CONTEXT_SET_INIT
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Reviewed-by: default avatarDaniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Signed-off-by: default avatarJohn Harrison <John.C.Harrison@Intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210909164744.31249-21-matthew.brost@intel.com
parent 5b116c17
......@@ -112,6 +112,7 @@ struct intel_context {
#define CONTEXT_FORCE_SINGLE_SUBMISSION 7
#define CONTEXT_NOPREEMPT 8
#define CONTEXT_LRCA_DIRTY 9
#define CONTEXT_GUC_INIT 10
struct {
u64 timeout_us;
......@@ -178,6 +179,11 @@ struct intel_context {
spinlock_t lock;
/** requests: active requests on this context */
struct list_head requests;
/*
* GuC priority management
*/
u8 prio;
u32 prio_count[GUC_CLIENT_PRIORITY_NUM];
} guc_active;
/* GuC LRC descriptor ID */
......@@ -191,12 +197,6 @@ struct intel_context {
*/
struct list_head guc_id_link;
/*
* GuC priority management
*/
u8 guc_prio;
u32 guc_prio_count[GUC_CLIENT_PRIORITY_NUM];
#ifdef CONFIG_DRM_I915_SELFTEST
/**
* @drop_schedule_enable: Force drop of schedule enable G2H for selftest
......
......@@ -1385,8 +1385,6 @@ static void guc_context_policy_init(struct intel_engine_cs *engine,
desc->preemption_timeout = engine->props.preempt_timeout_ms * 1000;
}
static inline u8 map_i915_prio_to_guc_prio(int prio);
static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
{
struct intel_engine_cs *engine = ce->engine;
......@@ -1394,8 +1392,6 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
struct intel_guc *guc = &engine->gt->uc.guc;
u32 desc_idx = ce->guc_id;
struct guc_lrc_desc *desc;
const struct i915_gem_context *ctx;
int prio = I915_CONTEXT_DEFAULT_PRIORITY;
bool context_registered;
intel_wakeref_t wakeref;
int ret = 0;
......@@ -1412,12 +1408,6 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
context_registered = lrc_desc_registered(guc, desc_idx);
rcu_read_lock();
ctx = rcu_dereference(ce->gem_context);
if (ctx)
prio = ctx->sched.priority;
rcu_read_unlock();
reset_lrc_desc(guc, desc_idx);
set_lrc_desc_registered(guc, desc_idx, ce);
......@@ -1426,8 +1416,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
desc->engine_submit_mask = adjust_engine_mask(engine->class,
engine->mask);
desc->hw_context_desc = ce->lrc.lrca;
ce->guc_prio = map_i915_prio_to_guc_prio(prio);
desc->priority = ce->guc_prio;
desc->priority = ce->guc_active.prio;
desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
guc_context_policy_init(engine, desc);
......@@ -1822,10 +1811,10 @@ static inline void guc_lrc_desc_unpin(struct intel_context *ce)
static void __guc_context_destroy(struct intel_context *ce)
{
GEM_BUG_ON(ce->guc_prio_count[GUC_CLIENT_PRIORITY_KMD_HIGH] ||
ce->guc_prio_count[GUC_CLIENT_PRIORITY_HIGH] ||
ce->guc_prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] ||
ce->guc_prio_count[GUC_CLIENT_PRIORITY_NORMAL]);
GEM_BUG_ON(ce->guc_active.prio_count[GUC_CLIENT_PRIORITY_KMD_HIGH] ||
ce->guc_active.prio_count[GUC_CLIENT_PRIORITY_HIGH] ||
ce->guc_active.prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] ||
ce->guc_active.prio_count[GUC_CLIENT_PRIORITY_NORMAL]);
GEM_BUG_ON(ce->guc_state.number_committed_requests);
lrc_fini(ce);
......@@ -1935,14 +1924,17 @@ static void guc_context_set_prio(struct intel_guc *guc,
GEM_BUG_ON(prio < GUC_CLIENT_PRIORITY_KMD_HIGH ||
prio > GUC_CLIENT_PRIORITY_NORMAL);
lockdep_assert_held(&ce->guc_active.lock);
if (ce->guc_prio == prio || submission_disabled(guc) ||
!context_registered(ce))
if (ce->guc_active.prio == prio || submission_disabled(guc) ||
!context_registered(ce)) {
ce->guc_active.prio = prio;
return;
}
guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
ce->guc_prio = prio;
ce->guc_active.prio = prio;
trace_intel_context_set_prio(ce);
}
......@@ -1962,24 +1954,24 @@ static inline void add_context_inflight_prio(struct intel_context *ce,
u8 guc_prio)
{
lockdep_assert_held(&ce->guc_active.lock);
GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_prio_count));
GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_active.prio_count));
++ce->guc_prio_count[guc_prio];
++ce->guc_active.prio_count[guc_prio];
/* Overflow protection */
GEM_WARN_ON(!ce->guc_prio_count[guc_prio]);
GEM_WARN_ON(!ce->guc_active.prio_count[guc_prio]);
}
static inline void sub_context_inflight_prio(struct intel_context *ce,
u8 guc_prio)
{
lockdep_assert_held(&ce->guc_active.lock);
GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_prio_count));
GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_active.prio_count));
/* Underflow protection */
GEM_WARN_ON(!ce->guc_prio_count[guc_prio]);
GEM_WARN_ON(!ce->guc_active.prio_count[guc_prio]);
--ce->guc_prio_count[guc_prio];
--ce->guc_active.prio_count[guc_prio];
}
static inline void update_context_prio(struct intel_context *ce)
......@@ -1992,8 +1984,8 @@ static inline void update_context_prio(struct intel_context *ce)
lockdep_assert_held(&ce->guc_active.lock);
for (i = 0; i < ARRAY_SIZE(ce->guc_prio_count); ++i) {
if (ce->guc_prio_count[i]) {
for (i = 0; i < ARRAY_SIZE(ce->guc_active.prio_count); ++i) {
if (ce->guc_active.prio_count[i]) {
guc_context_set_prio(guc, ce, i);
break;
}
......@@ -2135,6 +2127,21 @@ static bool context_needs_register(struct intel_context *ce, bool new_guc_id)
!submission_disabled(ce_to_guc(ce));
}
static void guc_context_init(struct intel_context *ce)
{
const struct i915_gem_context *ctx;
int prio = I915_CONTEXT_DEFAULT_PRIORITY;
rcu_read_lock();
ctx = rcu_dereference(ce->gem_context);
if (ctx)
prio = ctx->sched.priority;
rcu_read_unlock();
ce->guc_active.prio = map_i915_prio_to_guc_prio(prio);
set_bit(CONTEXT_GUC_INIT, &ce->flags);
}
static int guc_request_alloc(struct i915_request *rq)
{
struct intel_context *ce = rq->context;
......@@ -2166,6 +2173,9 @@ static int guc_request_alloc(struct i915_request *rq)
rq->reserved_space -= GUC_REQUEST_SIZE;
if (unlikely(!test_bit(CONTEXT_GUC_INIT, &ce->flags)))
guc_context_init(ce);
/*
* Call pin_guc_id here rather than in the pinning step as with
* dma_resv, contexts can be repeatedly pinned / unpinned trashing the
......@@ -3042,13 +3052,12 @@ static inline void guc_log_context_priority(struct drm_printer *p,
{
int i;
drm_printf(p, "\t\tPriority: %d\n",
ce->guc_prio);
drm_printf(p, "\t\tPriority: %d\n", ce->guc_active.prio);
drm_printf(p, "\t\tNumber Requests (lower index == higher priority)\n");
for (i = GUC_CLIENT_PRIORITY_KMD_HIGH;
i < GUC_CLIENT_PRIORITY_NUM; ++i) {
drm_printf(p, "\t\tNumber requests in priority band[%d]: %d\n",
i, ce->guc_prio_count[i]);
i, ce->guc_active.prio_count[i]);
}
drm_printf(p, "\n");
}
......
......@@ -910,7 +910,7 @@ DECLARE_EVENT_CLASS(intel_context,
__entry->guc_id = ce->guc_id;
__entry->pin_count = atomic_read(&ce->pin_count);
__entry->sched_state = ce->guc_state.sched_state;
__entry->guc_prio = ce->guc_prio;
__entry->guc_prio = ce->guc_active.prio;
),
TP_printk("guc_id=%d, pin_count=%d sched_state=0x%x, guc_prio=%u",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment