Commit 977253df authored by Chris Wilson's avatar Chris Wilson

drm/i915/gt: Stop holding onto the pinned_default_state

As we only restore the default context state upon banning a context, we
only need enough of the state to run the ring and nothing more. That is
we only need our bare protocontext.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Andi Shyti <andi.shyti@intel.com>
Reviewed-by: default avatarAndi Shyti <andi.shyti@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200504180745.15645-1-chris@chris-wilson.co.uk
parent b68be5c6
...@@ -21,18 +21,11 @@ static int __engine_unpark(struct intel_wakeref *wf) ...@@ -21,18 +21,11 @@ static int __engine_unpark(struct intel_wakeref *wf)
struct intel_engine_cs *engine = struct intel_engine_cs *engine =
container_of(wf, typeof(*engine), wakeref); container_of(wf, typeof(*engine), wakeref);
struct intel_context *ce; struct intel_context *ce;
void *map;
ENGINE_TRACE(engine, "\n"); ENGINE_TRACE(engine, "\n");
intel_gt_pm_get(engine->gt); intel_gt_pm_get(engine->gt);
/* Pin the default state for fast resets from atomic context. */
map = NULL;
if (engine->default_state)
map = shmem_pin_map(engine->default_state);
engine->pinned_default_state = map;
/* Discard stale context state from across idling */ /* Discard stale context state from across idling */
ce = engine->kernel_context; ce = engine->kernel_context;
if (ce) { if (ce) {
...@@ -42,6 +35,7 @@ static int __engine_unpark(struct intel_wakeref *wf) ...@@ -42,6 +35,7 @@ static int __engine_unpark(struct intel_wakeref *wf)
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && ce->state) { if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && ce->state) {
struct drm_i915_gem_object *obj = ce->state->obj; struct drm_i915_gem_object *obj = ce->state->obj;
int type = i915_coherent_map_type(engine->i915); int type = i915_coherent_map_type(engine->i915);
void *map;
map = i915_gem_object_pin_map(obj, type); map = i915_gem_object_pin_map(obj, type);
if (!IS_ERR(map)) { if (!IS_ERR(map)) {
...@@ -260,12 +254,6 @@ static int __engine_park(struct intel_wakeref *wf) ...@@ -260,12 +254,6 @@ static int __engine_park(struct intel_wakeref *wf)
if (engine->park) if (engine->park)
engine->park(engine); engine->park(engine);
if (engine->pinned_default_state) {
shmem_unpin_map(engine->default_state,
engine->pinned_default_state);
engine->pinned_default_state = NULL;
}
engine->execlists.no_priolist = false; engine->execlists.no_priolist = false;
/* While gt calls i915_vma_parked(), we have to break the lock cycle */ /* While gt calls i915_vma_parked(), we have to break the lock cycle */
......
...@@ -344,7 +344,6 @@ struct intel_engine_cs { ...@@ -344,7 +344,6 @@ struct intel_engine_cs {
unsigned long wakeref_serial; unsigned long wakeref_serial;
struct intel_wakeref wakeref; struct intel_wakeref wakeref;
struct file *default_state; struct file *default_state;
void *pinned_default_state;
struct { struct {
struct intel_ring *ring; struct intel_ring *ring;
......
...@@ -1271,14 +1271,11 @@ execlists_check_context(const struct intel_context *ce, ...@@ -1271,14 +1271,11 @@ execlists_check_context(const struct intel_context *ce,
static void restore_default_state(struct intel_context *ce, static void restore_default_state(struct intel_context *ce,
struct intel_engine_cs *engine) struct intel_engine_cs *engine)
{ {
u32 *regs = ce->lrc_reg_state; u32 *regs;
if (engine->pinned_default_state) regs = memset(ce->lrc_reg_state, 0, engine->context_size - PAGE_SIZE);
memcpy(regs, /* skip restoring the vanilla PPHWSP */ execlists_init_reg_state(regs, ce, engine, ce->ring, true);
engine->pinned_default_state + LRC_STATE_OFFSET,
engine->context_size - PAGE_SIZE);
execlists_init_reg_state(regs, ce, engine, ce->ring, false);
ce->runtime.last = intel_context_get_runtime(ce); ce->runtime.last = intel_context_get_runtime(ce);
} }
...@@ -4168,8 +4165,6 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) ...@@ -4168,8 +4165,6 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
* image back to the expected values to skip over the guilty request. * image back to the expected values to skip over the guilty request.
*/ */
__i915_request_reset(rq, stalled); __i915_request_reset(rq, stalled);
if (!stalled)
goto out_replay;
/* /*
* We want a simple context + ring to execute the breadcrumb update. * We want a simple context + ring to execute the breadcrumb update.
...@@ -4179,9 +4174,6 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) ...@@ -4179,9 +4174,6 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
* future request will be after userspace has had the opportunity * future request will be after userspace has had the opportunity
* to recreate its own state. * to recreate its own state.
*/ */
GEM_BUG_ON(!intel_context_is_pinned(ce));
restore_default_state(ce, engine);
out_replay: out_replay:
ENGINE_TRACE(engine, "replay {head:%04x, tail:%04x}\n", ENGINE_TRACE(engine, "replay {head:%04x, tail:%04x}\n",
head, ce->ring->tail); head, ce->ring->tail);
......
...@@ -154,10 +154,7 @@ static int live_context_size(void *arg) ...@@ -154,10 +154,7 @@ static int live_context_size(void *arg)
*/ */
for_each_engine(engine, gt, id) { for_each_engine(engine, gt, id) {
struct { struct file *saved;
struct file *state;
void *pinned;
} saved;
if (!engine->context_size) if (!engine->context_size)
continue; continue;
...@@ -171,8 +168,7 @@ static int live_context_size(void *arg) ...@@ -171,8 +168,7 @@ static int live_context_size(void *arg)
* active state is sufficient, we are only checking that we * active state is sufficient, we are only checking that we
* don't use more than we planned. * don't use more than we planned.
*/ */
saved.state = fetch_and_zero(&engine->default_state); saved = fetch_and_zero(&engine->default_state);
saved.pinned = fetch_and_zero(&engine->pinned_default_state);
/* Overlaps with the execlists redzone */ /* Overlaps with the execlists redzone */
engine->context_size += I915_GTT_PAGE_SIZE; engine->context_size += I915_GTT_PAGE_SIZE;
...@@ -181,8 +177,7 @@ static int live_context_size(void *arg) ...@@ -181,8 +177,7 @@ static int live_context_size(void *arg)
engine->context_size -= I915_GTT_PAGE_SIZE; engine->context_size -= I915_GTT_PAGE_SIZE;
engine->pinned_default_state = saved.pinned; engine->default_state = saved;
engine->default_state = saved.state;
intel_engine_pm_put(engine); intel_engine_pm_put(engine);
......
...@@ -5177,6 +5177,7 @@ store_context(struct intel_context *ce, struct i915_vma *scratch) ...@@ -5177,6 +5177,7 @@ store_context(struct intel_context *ce, struct i915_vma *scratch)
{ {
struct i915_vma *batch; struct i915_vma *batch;
u32 dw, x, *cs, *hw; u32 dw, x, *cs, *hw;
u32 *defaults;
batch = create_user_vma(ce->vm, SZ_64K); batch = create_user_vma(ce->vm, SZ_64K);
if (IS_ERR(batch)) if (IS_ERR(batch))
...@@ -5188,9 +5189,16 @@ store_context(struct intel_context *ce, struct i915_vma *scratch) ...@@ -5188,9 +5189,16 @@ store_context(struct intel_context *ce, struct i915_vma *scratch)
return ERR_CAST(cs); return ERR_CAST(cs);
} }
defaults = shmem_pin_map(ce->engine->default_state);
if (!defaults) {
i915_gem_object_unpin_map(batch->obj);
i915_vma_put(batch);
return ERR_PTR(-ENOMEM);
}
x = 0; x = 0;
dw = 0; dw = 0;
hw = ce->engine->pinned_default_state; hw = defaults;
hw += LRC_STATE_OFFSET / sizeof(*hw); hw += LRC_STATE_OFFSET / sizeof(*hw);
do { do {
u32 len = hw[dw] & 0x7f; u32 len = hw[dw] & 0x7f;
...@@ -5221,6 +5229,8 @@ store_context(struct intel_context *ce, struct i915_vma *scratch) ...@@ -5221,6 +5229,8 @@ store_context(struct intel_context *ce, struct i915_vma *scratch)
*cs++ = MI_BATCH_BUFFER_END; *cs++ = MI_BATCH_BUFFER_END;
shmem_unpin_map(ce->engine->default_state, defaults);
i915_gem_object_flush_map(batch->obj); i915_gem_object_flush_map(batch->obj);
i915_gem_object_unpin_map(batch->obj); i915_gem_object_unpin_map(batch->obj);
...@@ -5331,6 +5341,7 @@ static struct i915_vma *load_context(struct intel_context *ce, u32 poison) ...@@ -5331,6 +5341,7 @@ static struct i915_vma *load_context(struct intel_context *ce, u32 poison)
{ {
struct i915_vma *batch; struct i915_vma *batch;
u32 dw, *cs, *hw; u32 dw, *cs, *hw;
u32 *defaults;
batch = create_user_vma(ce->vm, SZ_64K); batch = create_user_vma(ce->vm, SZ_64K);
if (IS_ERR(batch)) if (IS_ERR(batch))
...@@ -5342,8 +5353,15 @@ static struct i915_vma *load_context(struct intel_context *ce, u32 poison) ...@@ -5342,8 +5353,15 @@ static struct i915_vma *load_context(struct intel_context *ce, u32 poison)
return ERR_CAST(cs); return ERR_CAST(cs);
} }
defaults = shmem_pin_map(ce->engine->default_state);
if (!defaults) {
i915_gem_object_unpin_map(batch->obj);
i915_vma_put(batch);
return ERR_PTR(-ENOMEM);
}
dw = 0; dw = 0;
hw = ce->engine->pinned_default_state; hw = defaults;
hw += LRC_STATE_OFFSET / sizeof(*hw); hw += LRC_STATE_OFFSET / sizeof(*hw);
do { do {
u32 len = hw[dw] & 0x7f; u32 len = hw[dw] & 0x7f;
...@@ -5371,6 +5389,8 @@ static struct i915_vma *load_context(struct intel_context *ce, u32 poison) ...@@ -5371,6 +5389,8 @@ static struct i915_vma *load_context(struct intel_context *ce, u32 poison)
*cs++ = MI_BATCH_BUFFER_END; *cs++ = MI_BATCH_BUFFER_END;
shmem_unpin_map(ce->engine->default_state, defaults);
i915_gem_object_flush_map(batch->obj); i915_gem_object_flush_map(batch->obj);
i915_gem_object_unpin_map(batch->obj); i915_gem_object_unpin_map(batch->obj);
...@@ -5438,6 +5458,7 @@ static int compare_isolation(struct intel_engine_cs *engine, ...@@ -5438,6 +5458,7 @@ static int compare_isolation(struct intel_engine_cs *engine,
{ {
u32 x, dw, *hw, *lrc; u32 x, dw, *hw, *lrc;
u32 *A[2], *B[2]; u32 *A[2], *B[2];
u32 *defaults;
int err = 0; int err = 0;
A[0] = i915_gem_object_pin_map(ref[0]->obj, I915_MAP_WC); A[0] = i915_gem_object_pin_map(ref[0]->obj, I915_MAP_WC);
...@@ -5470,9 +5491,15 @@ static int compare_isolation(struct intel_engine_cs *engine, ...@@ -5470,9 +5491,15 @@ static int compare_isolation(struct intel_engine_cs *engine,
} }
lrc += LRC_STATE_OFFSET / sizeof(*hw); lrc += LRC_STATE_OFFSET / sizeof(*hw);
defaults = shmem_pin_map(ce->engine->default_state);
if (!defaults) {
err = -ENOMEM;
goto err_lrc;
}
x = 0; x = 0;
dw = 0; dw = 0;
hw = engine->pinned_default_state; hw = defaults;
hw += LRC_STATE_OFFSET / sizeof(*hw); hw += LRC_STATE_OFFSET / sizeof(*hw);
do { do {
u32 len = hw[dw] & 0x7f; u32 len = hw[dw] & 0x7f;
...@@ -5512,6 +5539,8 @@ static int compare_isolation(struct intel_engine_cs *engine, ...@@ -5512,6 +5539,8 @@ static int compare_isolation(struct intel_engine_cs *engine,
} while (dw < PAGE_SIZE / sizeof(u32) && } while (dw < PAGE_SIZE / sizeof(u32) &&
(hw[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END); (hw[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
shmem_unpin_map(ce->engine->default_state, defaults);
err_lrc:
i915_gem_object_unpin_map(ce->state->obj); i915_gem_object_unpin_map(ce->state->obj);
err_B1: err_B1:
i915_gem_object_unpin_map(result[1]->obj); i915_gem_object_unpin_map(result[1]->obj);
...@@ -5661,18 +5690,16 @@ static int live_lrc_isolation(void *arg) ...@@ -5661,18 +5690,16 @@ static int live_lrc_isolation(void *arg)
continue; continue;
intel_engine_pm_get(engine); intel_engine_pm_get(engine);
if (engine->pinned_default_state) { for (i = 0; i < ARRAY_SIZE(poison); i++) {
for (i = 0; i < ARRAY_SIZE(poison); i++) { int result;
int result;
result = __lrc_isolation(engine, poison[i]); result = __lrc_isolation(engine, poison[i]);
if (result && !err) if (result && !err)
err = result; err = result;
result = __lrc_isolation(engine, ~poison[i]); result = __lrc_isolation(engine, ~poison[i]);
if (result && !err) if (result && !err)
err = result; err = result;
}
} }
intel_engine_pm_put(engine); intel_engine_pm_put(engine);
if (igt_flush_test(gt->i915)) { if (igt_flush_test(gt->i915)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment