Commit 9f3ccd40 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Drop GEM context as a direct link from i915_request

Keep the intel_context as being the primary state for i915_request, with
the GEM context a backpointer from the low level state for the rarer
cases we need client information. Our goal is to remove such references
to clients from the backend, and leave the HW submission agnostic to
client interfaces and self-contained.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Andi Shyti <andi.shyti@intel.com>
Reviewed-by: default avatarAndi Shyti <andi.shyti@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191220101230.256839-1-chris@chris-wilson.co.uk
parent d5e19353
...@@ -69,6 +69,7 @@ ...@@ -69,6 +69,7 @@
#include <drm/i915_drm.h> #include <drm/i915_drm.h>
#include "gt/intel_context.h"
#include "gt/intel_engine_heartbeat.h" #include "gt/intel_engine_heartbeat.h"
#include "gt/intel_engine_pm.h" #include "gt/intel_engine_pm.h"
#include "gt/intel_engine_user.h" #include "gt/intel_engine_user.h"
...@@ -423,15 +424,6 @@ static void kill_context(struct i915_gem_context *ctx) ...@@ -423,15 +424,6 @@ static void kill_context(struct i915_gem_context *ctx)
struct i915_gem_engines_iter it; struct i915_gem_engines_iter it;
struct intel_context *ce; struct intel_context *ce;
/*
* If we are already banned, it was due to a guilty request causing
* a reset and the entire context being evicted from the GPU.
*/
if (i915_gem_context_is_banned(ctx))
return;
i915_gem_context_set_banned(ctx);
/* /*
* Map the user's engine back to the actual engines; one virtual * Map the user's engine back to the actual engines; one virtual
* engine will be mapped to multiple engines, and using ctx->engine[] * engine will be mapped to multiple engines, and using ctx->engine[]
...@@ -442,6 +434,9 @@ static void kill_context(struct i915_gem_context *ctx) ...@@ -442,6 +434,9 @@ static void kill_context(struct i915_gem_context *ctx)
for_each_gem_engine(ce, __context_engines_static(ctx), it) { for_each_gem_engine(ce, __context_engines_static(ctx), it) {
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
if (intel_context_set_banned(ce))
continue;
/* /*
* Check the current active state of this context; if we * Check the current active state of this context; if we
* are currently executing on the GPU we need to evict * are currently executing on the GPU we need to evict
...@@ -1093,7 +1088,7 @@ static void set_ppgtt_barrier(void *data) ...@@ -1093,7 +1088,7 @@ static void set_ppgtt_barrier(void *data)
static int emit_ppgtt_update(struct i915_request *rq, void *data) static int emit_ppgtt_update(struct i915_request *rq, void *data)
{ {
struct i915_address_space *vm = rq->hw_context->vm; struct i915_address_space *vm = rq->context->vm;
struct intel_engine_cs *engine = rq->engine; struct intel_engine_cs *engine = rq->engine;
u32 base = engine->mmio_base; u32 base = engine->mmio_base;
u32 *cs; u32 *cs;
......
...@@ -91,26 +91,6 @@ static inline void i915_gem_context_clear_persistence(struct i915_gem_context *c ...@@ -91,26 +91,6 @@ static inline void i915_gem_context_clear_persistence(struct i915_gem_context *c
clear_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags); clear_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags);
} }
static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx)
{
return test_bit(CONTEXT_BANNED, &ctx->flags);
}
static inline void i915_gem_context_set_banned(struct i915_gem_context *ctx)
{
set_bit(CONTEXT_BANNED, &ctx->flags);
}
static inline bool i915_gem_context_force_single_submission(const struct i915_gem_context *ctx)
{
return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
}
static inline void i915_gem_context_set_force_single_submission(struct i915_gem_context *ctx)
{
__set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
}
static inline bool static inline bool
i915_gem_context_user_engines(const struct i915_gem_context *ctx) i915_gem_context_user_engines(const struct i915_gem_context *ctx)
{ {
...@@ -129,24 +109,6 @@ i915_gem_context_clear_user_engines(struct i915_gem_context *ctx) ...@@ -129,24 +109,6 @@ i915_gem_context_clear_user_engines(struct i915_gem_context *ctx)
clear_bit(CONTEXT_USER_ENGINES, &ctx->flags); clear_bit(CONTEXT_USER_ENGINES, &ctx->flags);
} }
static inline bool
i915_gem_context_nopreempt(const struct i915_gem_context *ctx)
{
return test_bit(CONTEXT_NOPREEMPT, &ctx->flags);
}
static inline void
i915_gem_context_set_nopreempt(struct i915_gem_context *ctx)
{
set_bit(CONTEXT_NOPREEMPT, &ctx->flags);
}
static inline void
i915_gem_context_clear_nopreempt(struct i915_gem_context *ctx)
{
clear_bit(CONTEXT_NOPREEMPT, &ctx->flags);
}
static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx) static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
{ {
return !ctx->file_priv; return !ctx->file_priv;
......
...@@ -134,11 +134,8 @@ struct i915_gem_context { ...@@ -134,11 +134,8 @@ struct i915_gem_context {
* @flags: small set of booleans * @flags: small set of booleans
*/ */
unsigned long flags; unsigned long flags;
#define CONTEXT_BANNED 0 #define CONTEXT_CLOSED 0
#define CONTEXT_CLOSED 1 #define CONTEXT_USER_ENGINES 1
#define CONTEXT_FORCE_SINGLE_SUBMISSION 2
#define CONTEXT_USER_ENGINES 3
#define CONTEXT_NOPREEMPT 4
struct mutex mutex; struct mutex mutex;
......
...@@ -730,9 +730,6 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) ...@@ -730,9 +730,6 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
unsigned int i, batch; unsigned int i, batch;
int err; int err;
if (unlikely(i915_gem_context_is_banned(eb->gem_context)))
return -EIO;
INIT_LIST_HEAD(&eb->relocs); INIT_LIST_HEAD(&eb->relocs);
INIT_LIST_HEAD(&eb->unbound); INIT_LIST_HEAD(&eb->unbound);
...@@ -2175,7 +2172,7 @@ static int eb_submit(struct i915_execbuffer *eb) ...@@ -2175,7 +2172,7 @@ static int eb_submit(struct i915_execbuffer *eb)
return err; return err;
} }
if (i915_gem_context_nopreempt(eb->gem_context)) if (intel_context_nopreempt(eb->context))
eb->request->flags |= I915_REQUEST_NOPREEMPT; eb->request->flags |= I915_REQUEST_NOPREEMPT;
return 0; return 0;
...@@ -2261,6 +2258,9 @@ static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce) ...@@ -2261,6 +2258,9 @@ static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
if (err) if (err)
return err; return err;
if (unlikely(intel_context_is_banned(ce)))
return -EIO;
/* /*
* Pinning the contexts may generate requests in order to acquire * Pinning the contexts may generate requests in order to acquire
* GGTT space, so do this first before we reserve a seqno for * GGTT space, so do this first before we reserve a seqno for
......
...@@ -281,7 +281,7 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq) ...@@ -281,7 +281,7 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq)
if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) { if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) {
struct intel_breadcrumbs *b = &rq->engine->breadcrumbs; struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
struct intel_context *ce = rq->hw_context; struct intel_context *ce = rq->context;
struct list_head *pos; struct list_head *pos;
spin_lock(&b->irq_lock); spin_lock(&b->irq_lock);
...@@ -338,7 +338,7 @@ void i915_request_cancel_breadcrumb(struct i915_request *rq) ...@@ -338,7 +338,7 @@ void i915_request_cancel_breadcrumb(struct i915_request *rq)
*/ */
spin_lock(&b->irq_lock); spin_lock(&b->irq_lock);
if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) { if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) {
struct intel_context *ce = rq->hw_context; struct intel_context *ce = rq->context;
list_del(&rq->signal_link); list_del(&rq->signal_link);
if (list_empty(&ce->signals)) if (list_empty(&ce->signals))
......
...@@ -302,7 +302,7 @@ int intel_context_prepare_remote_request(struct intel_context *ce, ...@@ -302,7 +302,7 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
int err; int err;
/* Only suitable for use in remotely modifying this context */ /* Only suitable for use in remotely modifying this context */
GEM_BUG_ON(rq->hw_context == ce); GEM_BUG_ON(rq->context == ce);
if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */ if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */
/* Queue this switch after current activity by this context. */ /* Queue this switch after current activity by this context. */
......
...@@ -7,7 +7,9 @@ ...@@ -7,7 +7,9 @@
#ifndef __INTEL_CONTEXT_H__ #ifndef __INTEL_CONTEXT_H__
#define __INTEL_CONTEXT_H__ #define __INTEL_CONTEXT_H__
#include <linux/bitops.h>
#include <linux/lockdep.h> #include <linux/lockdep.h>
#include <linux/types.h>
#include "i915_active.h" #include "i915_active.h"
#include "intel_context_types.h" #include "intel_context_types.h"
...@@ -160,4 +162,44 @@ static inline struct intel_ring *__intel_context_ring_size(u64 sz) ...@@ -160,4 +162,44 @@ static inline struct intel_ring *__intel_context_ring_size(u64 sz)
return u64_to_ptr(struct intel_ring, sz); return u64_to_ptr(struct intel_ring, sz);
} }
static inline bool intel_context_is_banned(const struct intel_context *ce)
{
return test_bit(CONTEXT_BANNED, &ce->flags);
}
static inline bool intel_context_set_banned(struct intel_context *ce)
{
return test_and_set_bit(CONTEXT_BANNED, &ce->flags);
}
static inline bool
intel_context_force_single_submission(const struct intel_context *ce)
{
return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags);
}
static inline void
intel_context_set_single_submission(struct intel_context *ce)
{
__set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags);
}
static inline bool
intel_context_nopreempt(const struct intel_context *ce)
{
return test_bit(CONTEXT_NOPREEMPT, &ce->flags);
}
static inline void
intel_context_set_nopreempt(struct intel_context *ce)
{
set_bit(CONTEXT_NOPREEMPT, &ce->flags);
}
static inline void
intel_context_clear_nopreempt(struct intel_context *ce)
{
clear_bit(CONTEXT_NOPREEMPT, &ce->flags);
}
#endif /* __INTEL_CONTEXT_H__ */ #endif /* __INTEL_CONTEXT_H__ */
...@@ -54,8 +54,11 @@ struct intel_context { ...@@ -54,8 +54,11 @@ struct intel_context {
struct intel_timeline *timeline; struct intel_timeline *timeline;
unsigned long flags; unsigned long flags;
#define CONTEXT_ALLOC_BIT 0 #define CONTEXT_ALLOC_BIT 0
#define CONTEXT_VALID_BIT 1 #define CONTEXT_VALID_BIT 1
#define CONTEXT_BANNED 2
#define CONTEXT_FORCE_SINGLE_SUBMISSION 3
#define CONTEXT_NOPREEMPT 4
u32 *lrc_reg_state; u32 *lrc_reg_state;
u64 lrc_desc; u64 lrc_desc;
......
...@@ -1523,9 +1523,9 @@ void intel_engine_dump(struct intel_engine_cs *engine, ...@@ -1523,9 +1523,9 @@ void intel_engine_dump(struct intel_engine_cs *engine,
print_request_ring(m, rq); print_request_ring(m, rq);
if (rq->hw_context->lrc_reg_state) { if (rq->context->lrc_reg_state) {
drm_printf(m, "Logical Ring Context:\n"); drm_printf(m, "Logical Ring Context:\n");
hexdump(m, rq->hw_context->lrc_reg_state, PAGE_SIZE); hexdump(m, rq->context->lrc_reg_state, PAGE_SIZE);
} }
} }
spin_unlock_irqrestore(&engine->active.lock, flags); spin_unlock_irqrestore(&engine->active.lock, flags);
...@@ -1586,7 +1586,7 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine) ...@@ -1586,7 +1586,7 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
for (port = execlists->pending; (rq = *port); port++) { for (port = execlists->pending; (rq = *port); port++) {
/* Exclude any contexts already counted in active */ /* Exclude any contexts already counted in active */
if (!intel_context_inflight_count(rq->hw_context)) if (!intel_context_inflight_count(rq->context))
engine->stats.active++; engine->stats.active++;
} }
......
...@@ -880,7 +880,7 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine) ...@@ -880,7 +880,7 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
list_move(&rq->sched.link, pl); list_move(&rq->sched.link, pl);
active = rq; active = rq;
} else { } else {
struct intel_engine_cs *owner = rq->hw_context->engine; struct intel_engine_cs *owner = rq->context->engine;
/* /*
* Decouple the virtual breadcrumb before moving it * Decouple the virtual breadcrumb before moving it
...@@ -1051,7 +1051,7 @@ static void restore_default_state(struct intel_context *ce, ...@@ -1051,7 +1051,7 @@ static void restore_default_state(struct intel_context *ce,
static void reset_active(struct i915_request *rq, static void reset_active(struct i915_request *rq,
struct intel_engine_cs *engine) struct intel_engine_cs *engine)
{ {
struct intel_context * const ce = rq->hw_context; struct intel_context * const ce = rq->context;
u32 head; u32 head;
/* /*
...@@ -1092,11 +1092,11 @@ static inline struct intel_engine_cs * ...@@ -1092,11 +1092,11 @@ static inline struct intel_engine_cs *
__execlists_schedule_in(struct i915_request *rq) __execlists_schedule_in(struct i915_request *rq)
{ {
struct intel_engine_cs * const engine = rq->engine; struct intel_engine_cs * const engine = rq->engine;
struct intel_context * const ce = rq->hw_context; struct intel_context * const ce = rq->context;
intel_context_get(ce); intel_context_get(ce);
if (unlikely(i915_gem_context_is_banned(ce->gem_context))) if (unlikely(intel_context_is_banned(ce)))
reset_active(rq, engine); reset_active(rq, engine);
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
...@@ -1124,7 +1124,7 @@ __execlists_schedule_in(struct i915_request *rq) ...@@ -1124,7 +1124,7 @@ __execlists_schedule_in(struct i915_request *rq)
static inline struct i915_request * static inline struct i915_request *
execlists_schedule_in(struct i915_request *rq, int idx) execlists_schedule_in(struct i915_request *rq, int idx)
{ {
struct intel_context * const ce = rq->hw_context; struct intel_context * const ce = rq->context;
struct intel_engine_cs *old; struct intel_engine_cs *old;
GEM_BUG_ON(!intel_engine_pm_is_awake(rq->engine)); GEM_BUG_ON(!intel_engine_pm_is_awake(rq->engine));
...@@ -1155,7 +1155,7 @@ static inline void ...@@ -1155,7 +1155,7 @@ static inline void
__execlists_schedule_out(struct i915_request *rq, __execlists_schedule_out(struct i915_request *rq,
struct intel_engine_cs * const engine) struct intel_engine_cs * const engine)
{ {
struct intel_context * const ce = rq->hw_context; struct intel_context * const ce = rq->context;
/* /*
* NB process_csb() is not under the engine->active.lock and hence * NB process_csb() is not under the engine->active.lock and hence
...@@ -1193,7 +1193,7 @@ __execlists_schedule_out(struct i915_request *rq, ...@@ -1193,7 +1193,7 @@ __execlists_schedule_out(struct i915_request *rq,
static inline void static inline void
execlists_schedule_out(struct i915_request *rq) execlists_schedule_out(struct i915_request *rq)
{ {
struct intel_context * const ce = rq->hw_context; struct intel_context * const ce = rq->context;
struct intel_engine_cs *cur, *old; struct intel_engine_cs *cur, *old;
trace_i915_request_out(rq); trace_i915_request_out(rq);
...@@ -1210,7 +1210,7 @@ execlists_schedule_out(struct i915_request *rq) ...@@ -1210,7 +1210,7 @@ execlists_schedule_out(struct i915_request *rq)
static u64 execlists_update_context(struct i915_request *rq) static u64 execlists_update_context(struct i915_request *rq)
{ {
struct intel_context *ce = rq->hw_context; struct intel_context *ce = rq->context;
u64 desc = ce->lrc_desc; u64 desc = ce->lrc_desc;
u32 tail; u32 tail;
...@@ -1311,13 +1311,13 @@ assert_pending_valid(const struct intel_engine_execlists *execlists, ...@@ -1311,13 +1311,13 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
GEM_BUG_ON(!kref_read(&rq->fence.refcount)); GEM_BUG_ON(!kref_read(&rq->fence.refcount));
GEM_BUG_ON(!i915_request_is_active(rq)); GEM_BUG_ON(!i915_request_is_active(rq));
if (ce == rq->hw_context) { if (ce == rq->context) {
GEM_TRACE_ERR("Dup context:%llx in pending[%zd]\n", GEM_TRACE_ERR("Dup context:%llx in pending[%zd]\n",
ce->timeline->fence_context, ce->timeline->fence_context,
port - execlists->pending); port - execlists->pending);
return false; return false;
} }
ce = rq->hw_context; ce = rq->context;
/* Hold tightly onto the lock to prevent concurrent retires! */ /* Hold tightly onto the lock to prevent concurrent retires! */
if (!spin_trylock_irqsave(&rq->lock, flags)) if (!spin_trylock_irqsave(&rq->lock, flags))
...@@ -1326,8 +1326,7 @@ assert_pending_valid(const struct intel_engine_execlists *execlists, ...@@ -1326,8 +1326,7 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
if (i915_request_completed(rq)) if (i915_request_completed(rq))
goto unlock; goto unlock;
if (i915_active_is_idle(&ce->active) && if (i915_active_is_idle(&ce->active) && ce->gem_context) {
!i915_gem_context_is_kernel(ce->gem_context)) {
GEM_TRACE_ERR("Inactive context:%llx in pending[%zd]\n", GEM_TRACE_ERR("Inactive context:%llx in pending[%zd]\n",
ce->timeline->fence_context, ce->timeline->fence_context,
port - execlists->pending); port - execlists->pending);
...@@ -1399,7 +1398,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine) ...@@ -1399,7 +1398,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
static bool ctx_single_port_submission(const struct intel_context *ce) static bool ctx_single_port_submission(const struct intel_context *ce)
{ {
return (IS_ENABLED(CONFIG_DRM_I915_GVT) && return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
i915_gem_context_force_single_submission(ce->gem_context)); intel_context_force_single_submission(ce));
} }
static bool can_merge_ctx(const struct intel_context *prev, static bool can_merge_ctx(const struct intel_context *prev,
...@@ -1435,7 +1434,7 @@ static bool can_merge_rq(const struct i915_request *prev, ...@@ -1435,7 +1434,7 @@ static bool can_merge_rq(const struct i915_request *prev,
(I915_REQUEST_NOPREEMPT | I915_REQUEST_SENTINEL))) (I915_REQUEST_NOPREEMPT | I915_REQUEST_SENTINEL)))
return false; return false;
if (!can_merge_ctx(prev->hw_context, next->hw_context)) if (!can_merge_ctx(prev->context, next->context))
return false; return false;
return true; return true;
...@@ -1622,7 +1621,7 @@ static unsigned long active_preempt_timeout(struct intel_engine_cs *engine) ...@@ -1622,7 +1621,7 @@ static unsigned long active_preempt_timeout(struct intel_engine_cs *engine)
return 0; return 0;
/* Force a fast reset for terminated contexts (ignoring sysfs!) */ /* Force a fast reset for terminated contexts (ignoring sysfs!) */
if (unlikely(i915_gem_context_is_banned(rq->gem_context))) if (unlikely(intel_context_is_banned(rq->context)))
return 1; return 1;
return READ_ONCE(engine->props.preempt_timeout_ms); return READ_ONCE(engine->props.preempt_timeout_ms);
...@@ -1730,7 +1729,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -1730,7 +1729,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* tendency to ignore us rewinding the TAIL to the * tendency to ignore us rewinding the TAIL to the
* end of an earlier request. * end of an earlier request.
*/ */
last->hw_context->lrc_desc |= CTX_DESC_FORCE_RESTORE; last->context->lrc_desc |= CTX_DESC_FORCE_RESTORE;
last = NULL; last = NULL;
} else if (need_timeslice(engine, last) && } else if (need_timeslice(engine, last) &&
timer_expired(&engine->execlists.timer)) { timer_expired(&engine->execlists.timer)) {
...@@ -1802,7 +1801,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -1802,7 +1801,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
GEM_BUG_ON(rq != ve->request); GEM_BUG_ON(rq != ve->request);
GEM_BUG_ON(rq->engine != &ve->base); GEM_BUG_ON(rq->engine != &ve->base);
GEM_BUG_ON(rq->hw_context != &ve->context); GEM_BUG_ON(rq->context != &ve->context);
if (rq_prio(rq) >= queue_prio(execlists)) { if (rq_prio(rq) >= queue_prio(execlists)) {
if (!virtual_matches(ve, rq, engine)) { if (!virtual_matches(ve, rq, engine)) {
...@@ -1921,7 +1920,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -1921,7 +1920,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* same LRCA, i.e. we must submit 2 different * same LRCA, i.e. we must submit 2 different
* contexts if we submit 2 ELSP. * contexts if we submit 2 ELSP.
*/ */
if (last->hw_context == rq->hw_context) if (last->context == rq->context)
goto done; goto done;
if (i915_request_has_sentinel(last)) if (i915_request_has_sentinel(last))
...@@ -1934,8 +1933,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -1934,8 +1933,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* the same context (even though a different * the same context (even though a different
* request) to the second port. * request) to the second port.
*/ */
if (ctx_single_port_submission(last->hw_context) || if (ctx_single_port_submission(last->context) ||
ctx_single_port_submission(rq->hw_context)) ctx_single_port_submission(rq->context))
goto done; goto done;
merge = false; merge = false;
...@@ -1949,8 +1948,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -1949,8 +1948,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
} }
GEM_BUG_ON(last && GEM_BUG_ON(last &&
!can_merge_ctx(last->hw_context, !can_merge_ctx(last->context,
rq->hw_context)); rq->context));
submit = true; submit = true;
last = rq; last = rq;
...@@ -2564,7 +2563,7 @@ static int execlists_request_alloc(struct i915_request *request) ...@@ -2564,7 +2563,7 @@ static int execlists_request_alloc(struct i915_request *request)
{ {
int ret; int ret;
GEM_BUG_ON(!intel_context_is_pinned(request->hw_context)); GEM_BUG_ON(!intel_context_is_pinned(request->context));
/* /*
* Flush enough space to reduce the likelihood of waiting after * Flush enough space to reduce the likelihood of waiting after
...@@ -3071,7 +3070,7 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) ...@@ -3071,7 +3070,7 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
/* We still have requests in-flight; the engine should be active */ /* We still have requests in-flight; the engine should be active */
GEM_BUG_ON(!intel_engine_pm_is_awake(engine)); GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
ce = rq->hw_context; ce = rq->context;
GEM_BUG_ON(!i915_vma_is_pinned(ce->state)); GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
if (i915_request_completed(rq)) { if (i915_request_completed(rq)) {
......
...@@ -41,27 +41,30 @@ static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr) ...@@ -41,27 +41,30 @@ static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
static void engine_skip_context(struct i915_request *rq) static void engine_skip_context(struct i915_request *rq)
{ {
struct intel_engine_cs *engine = rq->engine; struct intel_engine_cs *engine = rq->engine;
struct i915_gem_context *hung_ctx = rq->gem_context; struct intel_context *hung_ctx = rq->context;
if (!i915_request_is_active(rq)) if (!i915_request_is_active(rq))
return; return;
lockdep_assert_held(&engine->active.lock); lockdep_assert_held(&engine->active.lock);
list_for_each_entry_continue(rq, &engine->active.requests, sched.link) list_for_each_entry_continue(rq, &engine->active.requests, sched.link)
if (rq->gem_context == hung_ctx) if (rq->context == hung_ctx)
i915_request_skip(rq, -EIO); i915_request_skip(rq, -EIO);
} }
static void client_mark_guilty(struct drm_i915_file_private *file_priv, static void client_mark_guilty(struct i915_request *rq, bool banned)
const struct i915_gem_context *ctx)
{ {
unsigned int score; struct i915_gem_context *ctx = rq->context->gem_context;
struct drm_i915_file_private *file_priv = ctx->file_priv;
unsigned long prev_hang; unsigned long prev_hang;
unsigned int score;
if (i915_gem_context_is_banned(ctx)) if (IS_ERR_OR_NULL(file_priv))
return;
score = 0;
if (banned)
score = I915_CLIENT_SCORE_CONTEXT_BAN; score = I915_CLIENT_SCORE_CONTEXT_BAN;
else
score = 0;
prev_hang = xchg(&file_priv->hang_timestamp, jiffies); prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES)) if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
...@@ -76,14 +79,15 @@ static void client_mark_guilty(struct drm_i915_file_private *file_priv, ...@@ -76,14 +79,15 @@ static void client_mark_guilty(struct drm_i915_file_private *file_priv,
} }
} }
static bool context_mark_guilty(struct i915_gem_context *ctx) static bool mark_guilty(struct i915_request *rq)
{ {
struct i915_gem_context *ctx = rq->context->gem_context;
unsigned long prev_hang; unsigned long prev_hang;
bool banned; bool banned;
int i; int i;
if (i915_gem_context_is_closed(ctx)) { if (i915_gem_context_is_closed(ctx)) {
i915_gem_context_set_banned(ctx); intel_context_set_banned(rq->context);
return true; return true;
} }
...@@ -110,18 +114,17 @@ static bool context_mark_guilty(struct i915_gem_context *ctx) ...@@ -110,18 +114,17 @@ static bool context_mark_guilty(struct i915_gem_context *ctx)
if (banned) { if (banned) {
DRM_DEBUG_DRIVER("context %s: guilty %d, banned\n", DRM_DEBUG_DRIVER("context %s: guilty %d, banned\n",
ctx->name, atomic_read(&ctx->guilty_count)); ctx->name, atomic_read(&ctx->guilty_count));
i915_gem_context_set_banned(ctx); intel_context_set_banned(rq->context);
} }
if (!IS_ERR_OR_NULL(ctx->file_priv)) client_mark_guilty(rq, banned);
client_mark_guilty(ctx->file_priv, ctx);
return banned; return banned;
} }
static void context_mark_innocent(struct i915_gem_context *ctx) static void mark_innocent(struct i915_request *rq)
{ {
atomic_inc(&ctx->active_count); atomic_inc(&rq->context->gem_context->active_count);
} }
void __i915_request_reset(struct i915_request *rq, bool guilty) void __i915_request_reset(struct i915_request *rq, bool guilty)
...@@ -137,11 +140,11 @@ void __i915_request_reset(struct i915_request *rq, bool guilty) ...@@ -137,11 +140,11 @@ void __i915_request_reset(struct i915_request *rq, bool guilty)
rcu_read_lock(); /* protect the GEM context */ rcu_read_lock(); /* protect the GEM context */
if (guilty) { if (guilty) {
i915_request_skip(rq, -EIO); i915_request_skip(rq, -EIO);
if (context_mark_guilty(rq->gem_context)) if (mark_guilty(rq))
engine_skip_context(rq); engine_skip_context(rq);
} else { } else {
dma_fence_set_error(&rq->fence, -EAGAIN); dma_fence_set_error(&rq->fence, -EAGAIN);
context_mark_innocent(rq->gem_context); mark_innocent(rq);
} }
rcu_read_unlock(); rcu_read_unlock();
} }
......
...@@ -1480,7 +1480,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags) ...@@ -1480,7 +1480,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
*cs++ = MI_NOOP; *cs++ = MI_NOOP;
*cs++ = MI_SET_CONTEXT; *cs++ = MI_SET_CONTEXT;
*cs++ = i915_ggtt_offset(rq->hw_context->state) | flags; *cs++ = i915_ggtt_offset(rq->context->state) | flags;
/* /*
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
* WaMiSetContext_Hang:snb,ivb,vlv * WaMiSetContext_Hang:snb,ivb,vlv
...@@ -1550,7 +1550,7 @@ static int remap_l3_slice(struct i915_request *rq, int slice) ...@@ -1550,7 +1550,7 @@ static int remap_l3_slice(struct i915_request *rq, int slice)
static int remap_l3(struct i915_request *rq) static int remap_l3(struct i915_request *rq)
{ {
struct i915_gem_context *ctx = rq->gem_context; struct i915_gem_context *ctx = rq->context->gem_context;
int i, err; int i, err;
if (!ctx->remap_slice) if (!ctx->remap_slice)
...@@ -1597,7 +1597,7 @@ static int switch_mm(struct i915_request *rq, struct i915_address_space *vm) ...@@ -1597,7 +1597,7 @@ static int switch_mm(struct i915_request *rq, struct i915_address_space *vm)
static int switch_context(struct i915_request *rq) static int switch_context(struct i915_request *rq)
{ {
struct intel_context *ce = rq->hw_context; struct intel_context *ce = rq->context;
int ret; int ret;
GEM_BUG_ON(HAS_EXECLISTS(rq->i915)); GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
...@@ -1631,7 +1631,7 @@ static int ring_request_alloc(struct i915_request *request) ...@@ -1631,7 +1631,7 @@ static int ring_request_alloc(struct i915_request *request)
{ {
int ret; int ret;
GEM_BUG_ON(!intel_context_is_pinned(request->hw_context)); GEM_BUG_ON(!intel_context_is_pinned(request->context));
GEM_BUG_ON(i915_request_timeline(request)->has_initial_breadcrumb); GEM_BUG_ON(i915_request_timeline(request)->has_initial_breadcrumb);
/* /*
......
...@@ -1195,13 +1195,13 @@ static int __cancel_active0(struct live_preempt_cancel *arg) ...@@ -1195,13 +1195,13 @@ static int __cancel_active0(struct live_preempt_cancel *arg)
__func__, arg->engine->name)) __func__, arg->engine->name))
return -EIO; return -EIO;
clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
rq = spinner_create_request(&arg->a.spin, rq = spinner_create_request(&arg->a.spin,
arg->a.ctx, arg->engine, arg->a.ctx, arg->engine,
MI_ARB_CHECK); MI_ARB_CHECK);
if (IS_ERR(rq)) if (IS_ERR(rq))
return PTR_ERR(rq); return PTR_ERR(rq);
clear_bit(CONTEXT_BANNED, &rq->context->flags);
i915_request_get(rq); i915_request_get(rq);
i915_request_add(rq); i915_request_add(rq);
if (!igt_wait_for_spinner(&arg->a.spin, rq)) { if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
...@@ -1209,7 +1209,7 @@ static int __cancel_active0(struct live_preempt_cancel *arg) ...@@ -1209,7 +1209,7 @@ static int __cancel_active0(struct live_preempt_cancel *arg)
goto out; goto out;
} }
i915_gem_context_set_banned(arg->a.ctx); intel_context_set_banned(rq->context);
err = intel_engine_pulse(arg->engine); err = intel_engine_pulse(arg->engine);
if (err) if (err)
goto out; goto out;
...@@ -1244,13 +1244,13 @@ static int __cancel_active1(struct live_preempt_cancel *arg) ...@@ -1244,13 +1244,13 @@ static int __cancel_active1(struct live_preempt_cancel *arg)
__func__, arg->engine->name)) __func__, arg->engine->name))
return -EIO; return -EIO;
clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
rq[0] = spinner_create_request(&arg->a.spin, rq[0] = spinner_create_request(&arg->a.spin,
arg->a.ctx, arg->engine, arg->a.ctx, arg->engine,
MI_NOOP); /* no preemption */ MI_NOOP); /* no preemption */
if (IS_ERR(rq[0])) if (IS_ERR(rq[0]))
return PTR_ERR(rq[0]); return PTR_ERR(rq[0]);
clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
i915_request_get(rq[0]); i915_request_get(rq[0]);
i915_request_add(rq[0]); i915_request_add(rq[0]);
if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) { if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
...@@ -1258,7 +1258,6 @@ static int __cancel_active1(struct live_preempt_cancel *arg) ...@@ -1258,7 +1258,6 @@ static int __cancel_active1(struct live_preempt_cancel *arg)
goto out; goto out;
} }
clear_bit(CONTEXT_BANNED, &arg->b.ctx->flags);
rq[1] = spinner_create_request(&arg->b.spin, rq[1] = spinner_create_request(&arg->b.spin,
arg->b.ctx, arg->engine, arg->b.ctx, arg->engine,
MI_ARB_CHECK); MI_ARB_CHECK);
...@@ -1267,13 +1266,14 @@ static int __cancel_active1(struct live_preempt_cancel *arg) ...@@ -1267,13 +1266,14 @@ static int __cancel_active1(struct live_preempt_cancel *arg)
goto out; goto out;
} }
clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
i915_request_get(rq[1]); i915_request_get(rq[1]);
err = i915_request_await_dma_fence(rq[1], &rq[0]->fence); err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
i915_request_add(rq[1]); i915_request_add(rq[1]);
if (err) if (err)
goto out; goto out;
i915_gem_context_set_banned(arg->b.ctx); intel_context_set_banned(rq[1]->context);
err = intel_engine_pulse(arg->engine); err = intel_engine_pulse(arg->engine);
if (err) if (err)
goto out; goto out;
...@@ -1316,13 +1316,13 @@ static int __cancel_queued(struct live_preempt_cancel *arg) ...@@ -1316,13 +1316,13 @@ static int __cancel_queued(struct live_preempt_cancel *arg)
__func__, arg->engine->name)) __func__, arg->engine->name))
return -EIO; return -EIO;
clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
rq[0] = spinner_create_request(&arg->a.spin, rq[0] = spinner_create_request(&arg->a.spin,
arg->a.ctx, arg->engine, arg->a.ctx, arg->engine,
MI_ARB_CHECK); MI_ARB_CHECK);
if (IS_ERR(rq[0])) if (IS_ERR(rq[0]))
return PTR_ERR(rq[0]); return PTR_ERR(rq[0]);
clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
i915_request_get(rq[0]); i915_request_get(rq[0]);
i915_request_add(rq[0]); i915_request_add(rq[0]);
if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) { if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
...@@ -1330,13 +1330,13 @@ static int __cancel_queued(struct live_preempt_cancel *arg) ...@@ -1330,13 +1330,13 @@ static int __cancel_queued(struct live_preempt_cancel *arg)
goto out; goto out;
} }
clear_bit(CONTEXT_BANNED, &arg->b.ctx->flags);
rq[1] = igt_request_alloc(arg->b.ctx, arg->engine); rq[1] = igt_request_alloc(arg->b.ctx, arg->engine);
if (IS_ERR(rq[1])) { if (IS_ERR(rq[1])) {
err = PTR_ERR(rq[1]); err = PTR_ERR(rq[1]);
goto out; goto out;
} }
clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
i915_request_get(rq[1]); i915_request_get(rq[1]);
err = i915_request_await_dma_fence(rq[1], &rq[0]->fence); err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
i915_request_add(rq[1]); i915_request_add(rq[1]);
...@@ -1357,7 +1357,7 @@ static int __cancel_queued(struct live_preempt_cancel *arg) ...@@ -1357,7 +1357,7 @@ static int __cancel_queued(struct live_preempt_cancel *arg)
if (err) if (err)
goto out; goto out;
i915_gem_context_set_banned(arg->a.ctx); intel_context_set_banned(rq[2]->context);
err = intel_engine_pulse(arg->engine); err = intel_engine_pulse(arg->engine);
if (err) if (err)
goto out; goto out;
...@@ -1404,13 +1404,13 @@ static int __cancel_hostile(struct live_preempt_cancel *arg) ...@@ -1404,13 +1404,13 @@ static int __cancel_hostile(struct live_preempt_cancel *arg)
return 0; return 0;
GEM_TRACE("%s(%s)\n", __func__, arg->engine->name); GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
rq = spinner_create_request(&arg->a.spin, rq = spinner_create_request(&arg->a.spin,
arg->a.ctx, arg->engine, arg->a.ctx, arg->engine,
MI_NOOP); /* preemption disabled */ MI_NOOP); /* preemption disabled */
if (IS_ERR(rq)) if (IS_ERR(rq))
return PTR_ERR(rq); return PTR_ERR(rq);
clear_bit(CONTEXT_BANNED, &rq->context->flags);
i915_request_get(rq); i915_request_get(rq);
i915_request_add(rq); i915_request_add(rq);
if (!igt_wait_for_spinner(&arg->a.spin, rq)) { if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
...@@ -1418,7 +1418,7 @@ static int __cancel_hostile(struct live_preempt_cancel *arg) ...@@ -1418,7 +1418,7 @@ static int __cancel_hostile(struct live_preempt_cancel *arg)
goto out; goto out;
} }
i915_gem_context_set_banned(arg->a.ctx); intel_context_set_banned(rq->context);
err = intel_engine_pulse(arg->engine); /* force reset */ err = intel_engine_pulse(arg->engine); /* force reset */
if (err) if (err)
goto out; goto out;
......
...@@ -217,7 +217,7 @@ static void guc_wq_item_append(struct intel_guc *guc, ...@@ -217,7 +217,7 @@ static void guc_wq_item_append(struct intel_guc *guc,
static void guc_add_request(struct intel_guc *guc, struct i915_request *rq) static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
{ {
struct intel_engine_cs *engine = rq->engine; struct intel_engine_cs *engine = rq->engine;
u32 ctx_desc = lower_32_bits(rq->hw_context->lrc_desc); u32 ctx_desc = lower_32_bits(rq->context->lrc_desc);
u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64); u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
guc_wq_item_append(guc, engine->guc_id, ctx_desc, guc_wq_item_append(guc, engine->guc_id, ctx_desc,
...@@ -315,7 +315,7 @@ static void __guc_dequeue(struct intel_engine_cs *engine) ...@@ -315,7 +315,7 @@ static void __guc_dequeue(struct intel_engine_cs *engine)
int i; int i;
priolist_for_each_request_consume(rq, rn, p, i) { priolist_for_each_request_consume(rq, rn, p, i) {
if (last && rq->hw_context != last->hw_context) { if (last && rq->context != last->context) {
if (port == last_port) if (port == last_port)
goto done; goto done;
...@@ -420,7 +420,7 @@ static void guc_reset(struct intel_engine_cs *engine, bool stalled) ...@@ -420,7 +420,7 @@ static void guc_reset(struct intel_engine_cs *engine, bool stalled)
stalled = false; stalled = false;
__i915_request_reset(rq, stalled); __i915_request_reset(rq, stalled);
intel_lr_context_reset(engine, rq->hw_context, rq->head, stalled); intel_lr_context_reset(engine, rq->context, rq->head, stalled);
out_unlock: out_unlock:
spin_unlock_irqrestore(&engine->active.lock, flags); spin_unlock_irqrestore(&engine->active.lock, flags);
......
...@@ -59,7 +59,7 @@ static void set_context_pdp_root_pointer( ...@@ -59,7 +59,7 @@ static void set_context_pdp_root_pointer(
static void update_shadow_pdps(struct intel_vgpu_workload *workload) static void update_shadow_pdps(struct intel_vgpu_workload *workload)
{ {
struct drm_i915_gem_object *ctx_obj = struct drm_i915_gem_object *ctx_obj =
workload->req->hw_context->state->obj; workload->req->context->state->obj;
struct execlist_ring_context *shadow_ring_context; struct execlist_ring_context *shadow_ring_context;
struct page *page; struct page *page;
...@@ -130,7 +130,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) ...@@ -130,7 +130,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt *gvt = vgpu->gvt;
int ring_id = workload->ring_id; int ring_id = workload->ring_id;
struct drm_i915_gem_object *ctx_obj = struct drm_i915_gem_object *ctx_obj =
workload->req->hw_context->state->obj; workload->req->context->state->obj;
struct execlist_ring_context *shadow_ring_context; struct execlist_ring_context *shadow_ring_context;
struct page *page; struct page *page;
void *dst; void *dst;
...@@ -205,9 +205,9 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) ...@@ -205,9 +205,9 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
return 0; return 0;
} }
static inline bool is_gvt_request(struct i915_request *req) static inline bool is_gvt_request(struct i915_request *rq)
{ {
return i915_gem_context_force_single_submission(req->gem_context); return intel_context_force_single_submission(rq->context);
} }
static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id) static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
...@@ -307,7 +307,7 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload) ...@@ -307,7 +307,7 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
u32 *cs; u32 *cs;
int err; int err;
if (IS_GEN(req->i915, 9) && is_inhibit_context(req->hw_context)) if (IS_GEN(req->i915, 9) && is_inhibit_context(req->context))
intel_vgpu_restore_inhibit_context(vgpu, req); intel_vgpu_restore_inhibit_context(vgpu, req);
/* /*
...@@ -363,11 +363,10 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) ...@@ -363,11 +363,10 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
} }
static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
struct i915_gem_context *ctx) struct intel_context *ce)
{ {
struct intel_vgpu_mm *mm = workload->shadow_mm; struct intel_vgpu_mm *mm = workload->shadow_mm;
struct i915_ppgtt *ppgtt = struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ce->vm);
i915_vm_to_ppgtt(i915_gem_context_get_vm_rcu(ctx));
int i = 0; int i = 0;
if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
...@@ -380,8 +379,6 @@ static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, ...@@ -380,8 +379,6 @@ static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i]; px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i];
} }
} }
i915_vm_put(&ppgtt->vm);
} }
static int static int
...@@ -529,7 +526,7 @@ static void update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx) ...@@ -529,7 +526,7 @@ static void update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
container_of(wa_ctx, struct intel_vgpu_workload, wa_ctx); container_of(wa_ctx, struct intel_vgpu_workload, wa_ctx);
struct i915_request *rq = workload->req; struct i915_request *rq = workload->req;
struct execlist_ring_context *shadow_ring_context = struct execlist_ring_context *shadow_ring_context =
(struct execlist_ring_context *)rq->hw_context->lrc_reg_state; (struct execlist_ring_context *)rq->context->lrc_reg_state;
shadow_ring_context->bb_per_ctx_ptr.val = shadow_ring_context->bb_per_ctx_ptr.val =
(shadow_ring_context->bb_per_ctx_ptr.val & (shadow_ring_context->bb_per_ctx_ptr.val &
...@@ -628,7 +625,7 @@ static int prepare_workload(struct intel_vgpu_workload *workload) ...@@ -628,7 +625,7 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
update_shadow_pdps(workload); update_shadow_pdps(workload);
set_context_ppgtt_from_shadow(workload, s->shadow[ring]->gem_context); set_context_ppgtt_from_shadow(workload, s->shadow[ring]);
ret = intel_vgpu_sync_oos_pages(workload->vgpu); ret = intel_vgpu_sync_oos_pages(workload->vgpu);
if (ret) { if (ret) {
...@@ -787,7 +784,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload) ...@@ -787,7 +784,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
struct i915_request *rq = workload->req; struct i915_request *rq = workload->req;
struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_gem_object *ctx_obj = rq->hw_context->state->obj; struct drm_i915_gem_object *ctx_obj = rq->context->state->obj;
struct execlist_ring_context *shadow_ring_context; struct execlist_ring_context *shadow_ring_context;
struct page *page; struct page *page;
void *src; void *src;
...@@ -1232,8 +1229,6 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) ...@@ -1232,8 +1229,6 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
if (IS_ERR(ctx)) if (IS_ERR(ctx))
return PTR_ERR(ctx); return PTR_ERR(ctx);
i915_gem_context_set_force_single_submission(ctx);
ppgtt = i915_vm_to_ppgtt(i915_gem_context_get_vm_rcu(ctx)); ppgtt = i915_vm_to_ppgtt(i915_gem_context_get_vm_rcu(ctx));
i915_context_ppgtt_root_save(s, ppgtt); i915_context_ppgtt_root_save(s, ppgtt);
...@@ -1249,6 +1244,8 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) ...@@ -1249,6 +1244,8 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
goto out_shadow_ctx; goto out_shadow_ctx;
} }
intel_context_set_single_submission(ce);
if (!USES_GUC_SUBMISSION(i915)) { /* Max ring buffer size */ if (!USES_GUC_SUBMISSION(i915)) { /* Max ring buffer size */
const unsigned int ring_size = 512 * SZ_4K; const unsigned int ring_size = 512 * SZ_4K;
......
...@@ -1168,19 +1168,18 @@ static int __intel_engines_record_defaults(struct intel_gt *gt) ...@@ -1168,19 +1168,18 @@ static int __intel_engines_record_defaults(struct intel_gt *gt)
if (!rq) if (!rq)
continue; continue;
GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT, GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT, &rq->context->flags));
&rq->hw_context->flags)); state = rq->context->state;
state = rq->hw_context->state;
if (!state) if (!state)
continue; continue;
/* Serialise with retirement on another CPU */ /* Serialise with retirement on another CPU */
err = __intel_context_flush_retire(rq->hw_context); err = __intel_context_flush_retire(rq->context);
if (err) if (err)
goto out; goto out;
/* We want to be able to unbind the state from the GGTT */ /* We want to be able to unbind the state from the GGTT */
GEM_BUG_ON(intel_context_is_pinned(rq->hw_context)); GEM_BUG_ON(intel_context_is_pinned(rq->context));
/* /*
* As we will hold a reference to the logical state, it will * As we will hold a reference to the logical state, it will
...@@ -1230,7 +1229,7 @@ static int __intel_engines_record_defaults(struct intel_gt *gt) ...@@ -1230,7 +1229,7 @@ static int __intel_engines_record_defaults(struct intel_gt *gt)
if (!rq) if (!rq)
continue; continue;
ce = rq->hw_context; ce = rq->context;
i915_request_put(rq); i915_request_put(rq);
intel_context_put(ce); intel_context_put(ce);
} }
......
...@@ -1221,7 +1221,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error, ...@@ -1221,7 +1221,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
static void record_request(const struct i915_request *request, static void record_request(const struct i915_request *request,
struct drm_i915_error_request *erq) struct drm_i915_error_request *erq)
{ {
const struct i915_gem_context *ctx = request->gem_context; const struct i915_gem_context *ctx = request->context->gem_context;
erq->flags = request->fence.flags; erq->flags = request->fence.flags;
erq->context = request->fence.context; erq->context = request->fence.context;
...@@ -1231,7 +1231,7 @@ static void record_request(const struct i915_request *request, ...@@ -1231,7 +1231,7 @@ static void record_request(const struct i915_request *request,
erq->start = i915_ggtt_offset(request->ring->vma); erq->start = i915_ggtt_offset(request->ring->vma);
erq->head = request->head; erq->head = request->head;
erq->tail = request->tail; erq->tail = request->tail;
erq->pid = ctx->pid ? pid_nr(ctx->pid) : 0; erq->pid = ctx && ctx->pid ? pid_nr(ctx->pid) : 0;
} }
static void engine_record_requests(struct intel_engine_cs *engine, static void engine_record_requests(struct intel_engine_cs *engine,
...@@ -1298,7 +1298,10 @@ static void error_record_engine_execlists(const struct intel_engine_cs *engine, ...@@ -1298,7 +1298,10 @@ static void error_record_engine_execlists(const struct intel_engine_cs *engine,
static bool record_context(struct drm_i915_error_context *e, static bool record_context(struct drm_i915_error_context *e,
const struct i915_request *rq) const struct i915_request *rq)
{ {
const struct i915_gem_context *ctx = rq->gem_context; const struct i915_gem_context *ctx = rq->context->gem_context;
if (!ctx)
return false;
if (ctx->pid) { if (ctx->pid) {
struct task_struct *task; struct task_struct *task;
...@@ -1452,7 +1455,7 @@ gem_record_rings(struct i915_gpu_state *error, struct compress *compress) ...@@ -1452,7 +1455,7 @@ gem_record_rings(struct i915_gpu_state *error, struct compress *compress)
capture = request_record_user_bo(request, ee, capture); capture = request_record_user_bo(request, ee, capture);
capture = capture_vma(capture, capture = capture_vma(capture,
request->hw_context->state, request->context->state,
&ee->ctx); &ee->ctx);
capture = capture_vma(capture, capture = capture_vma(capture,
......
...@@ -3112,7 +3112,7 @@ static void i915_perf_enable_locked(struct i915_perf_stream *stream) ...@@ -3112,7 +3112,7 @@ static void i915_perf_enable_locked(struct i915_perf_stream *stream)
stream->ops->enable(stream); stream->ops->enable(stream);
if (stream->hold_preemption) if (stream->hold_preemption)
i915_gem_context_set_nopreempt(stream->ctx); intel_context_set_nopreempt(stream->pinned_ctx);
} }
/** /**
...@@ -3138,7 +3138,7 @@ static void i915_perf_disable_locked(struct i915_perf_stream *stream) ...@@ -3138,7 +3138,7 @@ static void i915_perf_disable_locked(struct i915_perf_stream *stream)
stream->enabled = false; stream->enabled = false;
if (stream->hold_preemption) if (stream->hold_preemption)
i915_gem_context_clear_nopreempt(stream->ctx); intel_context_clear_nopreempt(stream->pinned_ctx);
if (stream->ops->disable) if (stream->ops->disable)
stream->ops->disable(stream); stream->ops->disable(stream);
......
...@@ -62,6 +62,8 @@ static const char *i915_fence_get_driver_name(struct dma_fence *fence) ...@@ -62,6 +62,8 @@ static const char *i915_fence_get_driver_name(struct dma_fence *fence)
static const char *i915_fence_get_timeline_name(struct dma_fence *fence) static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
{ {
const struct i915_gem_context *ctx;
/* /*
* The timeline struct (as part of the ppgtt underneath a context) * The timeline struct (as part of the ppgtt underneath a context)
* may be freed when the request is no longer in use by the GPU. * may be freed when the request is no longer in use by the GPU.
...@@ -74,7 +76,11 @@ static const char *i915_fence_get_timeline_name(struct dma_fence *fence) ...@@ -74,7 +76,11 @@ static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return "signaled"; return "signaled";
return to_request(fence)->gem_context->name ?: "[" DRIVER_NAME "]"; ctx = to_request(fence)->context->gem_context;
if (!ctx)
return "[" DRIVER_NAME "]";
return ctx->name;
} }
static bool i915_fence_signaled(struct dma_fence *fence) static bool i915_fence_signaled(struct dma_fence *fence)
...@@ -269,8 +275,8 @@ bool i915_request_retire(struct i915_request *rq) ...@@ -269,8 +275,8 @@ bool i915_request_retire(struct i915_request *rq)
remove_from_client(rq); remove_from_client(rq);
list_del(&rq->link); list_del(&rq->link);
intel_context_exit(rq->hw_context); intel_context_exit(rq->context);
intel_context_unpin(rq->hw_context); intel_context_unpin(rq->context);
free_capture_list(rq); free_capture_list(rq);
i915_sched_node_fini(&rq->sched); i915_sched_node_fini(&rq->sched);
...@@ -369,7 +375,7 @@ bool __i915_request_submit(struct i915_request *request) ...@@ -369,7 +375,7 @@ bool __i915_request_submit(struct i915_request *request)
if (i915_request_completed(request)) if (i915_request_completed(request))
goto xfer; goto xfer;
if (i915_gem_context_is_banned(request->gem_context)) if (intel_context_is_banned(request->context))
i915_request_skip(request, -EIO); i915_request_skip(request, -EIO);
/* /*
...@@ -648,8 +654,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp) ...@@ -648,8 +654,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
goto err_free; goto err_free;
rq->i915 = ce->engine->i915; rq->i915 = ce->engine->i915;
rq->hw_context = ce; rq->context = ce;
rq->gem_context = ce->gem_context;
rq->engine = ce->engine; rq->engine = ce->engine;
rq->ring = ce->ring; rq->ring = ce->ring;
rq->execution_mask = ce->engine->mask; rq->execution_mask = ce->engine->mask;
...@@ -917,7 +922,7 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from) ...@@ -917,7 +922,7 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
&from->submit, &from->submit,
I915_FENCE_GFP); I915_FENCE_GFP);
} else if (intel_engine_has_semaphores(to->engine) && } else if (intel_engine_has_semaphores(to->engine) &&
to->gem_context->sched.priority >= I915_PRIORITY_NORMAL) { to->context->gem_context->sched.priority >= I915_PRIORITY_NORMAL) {
ret = emit_semaphore_wait(to, from, I915_FENCE_GFP); ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
} else { } else {
ret = i915_sw_fence_await_dma_fence(&to->submit, ret = i915_sw_fence_await_dma_fence(&to->submit,
...@@ -1298,7 +1303,7 @@ void __i915_request_queue(struct i915_request *rq, ...@@ -1298,7 +1303,7 @@ void __i915_request_queue(struct i915_request *rq,
void i915_request_add(struct i915_request *rq) void i915_request_add(struct i915_request *rq)
{ {
struct i915_sched_attr attr = rq->gem_context->sched; struct i915_sched_attr attr = rq->context->gem_context->sched;
struct intel_timeline * const tl = i915_request_timeline(rq); struct intel_timeline * const tl = i915_request_timeline(rq);
struct i915_request *prev; struct i915_request *prev;
......
...@@ -115,9 +115,8 @@ struct i915_request { ...@@ -115,9 +115,8 @@ struct i915_request {
* i915_request_free() will then decrement the refcount on the * i915_request_free() will then decrement the refcount on the
* context. * context.
*/ */
struct i915_gem_context *gem_context;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
struct intel_context *hw_context; struct intel_context *context;
struct intel_ring *ring; struct intel_ring *ring;
struct intel_timeline __rcu *timeline; struct intel_timeline __rcu *timeline;
struct list_head signal_link; struct list_head signal_link;
......
...@@ -213,7 +213,7 @@ static void kick_submission(struct intel_engine_cs *engine, ...@@ -213,7 +213,7 @@ static void kick_submission(struct intel_engine_cs *engine,
* If we are already the currently executing context, don't * If we are already the currently executing context, don't
* bother evaluating if we should preempt ourselves. * bother evaluating if we should preempt ourselves.
*/ */
if (inflight->hw_context == rq->hw_context) if (inflight->context == rq->context)
goto unlock; goto unlock;
engine->execlists.queue_priority_hint = prio; engine->execlists.queue_priority_hint = prio;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment