Commit e7af3116 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Introduce a preempt context

Add another perma-pinned context for using for preemption at any time.
We cannot just reuse the existing kernel context, as first and foremost
we need to ensure that we can preempt the kernel context itself, so
require a distinct context id. Similar to the kernel context, we may
want to interrupt execution and switch to the preempt context at any
time, and so it needs to be permanently pinned and available.

To compensate for yet another permanent allocation, we shrink the
existing context and the new context by reducing their ringbuffer to the
minimum.

v2: Assert that we never allocate a request from the preemption context.
v3: Limit perma-pin to engines that may preempt.
v4: Onion cleanup for early driver death
v5: Onion ordering in main driver cleanup as well.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMichał Winiarski <michal.winiarski@intel.com>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171003203453.15692-4-chris@chris-wilson.co.uk
parent d6c05113
...@@ -783,6 +783,7 @@ struct intel_csr { ...@@ -783,6 +783,7 @@ struct intel_csr {
func(has_l3_dpf); \ func(has_l3_dpf); \
func(has_llc); \ func(has_llc); \
func(has_logical_ring_contexts); \ func(has_logical_ring_contexts); \
func(has_logical_ring_preemption); \
func(has_overlay); \ func(has_overlay); \
func(has_pipe_cxsr); \ func(has_pipe_cxsr); \
func(has_pooled_eu); \ func(has_pooled_eu); \
...@@ -2251,8 +2252,11 @@ struct drm_i915_private { ...@@ -2251,8 +2252,11 @@ struct drm_i915_private {
wait_queue_head_t gmbus_wait_queue; wait_queue_head_t gmbus_wait_queue;
struct pci_dev *bridge_dev; struct pci_dev *bridge_dev;
struct i915_gem_context *kernel_context;
struct intel_engine_cs *engine[I915_NUM_ENGINES]; struct intel_engine_cs *engine[I915_NUM_ENGINES];
/* Context used internally to idle the GPU and setup initial state */
struct i915_gem_context *kernel_context;
/* Context only to be used for injecting preemption commands */
struct i915_gem_context *preempt_context;
struct i915_vma *semaphore; struct i915_vma *semaphore;
struct drm_dma_handle *status_page_dmah; struct drm_dma_handle *status_page_dmah;
......
...@@ -416,14 +416,43 @@ i915_gem_context_create_gvt(struct drm_device *dev) ...@@ -416,14 +416,43 @@ i915_gem_context_create_gvt(struct drm_device *dev)
return ctx; return ctx;
} }
static struct i915_gem_context *
create_kernel_context(struct drm_i915_private *i915, int prio)
{
struct i915_gem_context *ctx;
ctx = i915_gem_create_context(i915, NULL);
if (IS_ERR(ctx))
return ctx;
i915_gem_context_clear_bannable(ctx);
ctx->priority = prio;
ctx->ring_size = PAGE_SIZE;
GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
return ctx;
}
static void
destroy_kernel_context(struct i915_gem_context **ctxp)
{
struct i915_gem_context *ctx;
/* Keep the context ref so that we can free it immediately ourselves */
ctx = i915_gem_context_get(fetch_and_zero(ctxp));
GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
context_close(ctx);
i915_gem_context_free(ctx);
}
int i915_gem_contexts_init(struct drm_i915_private *dev_priv) int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
{ {
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
int err;
/* Init should only be called once per module load. Eventually the GEM_BUG_ON(dev_priv->kernel_context);
* restriction on the context_disabled check can be loosened. */
if (WARN_ON(dev_priv->kernel_context))
return 0;
INIT_LIST_HEAD(&dev_priv->contexts.list); INIT_LIST_HEAD(&dev_priv->contexts.list);
INIT_WORK(&dev_priv->contexts.free_work, contexts_free_worker); INIT_WORK(&dev_priv->contexts.free_work, contexts_free_worker);
...@@ -441,28 +470,38 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv) ...@@ -441,28 +470,38 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX); BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
ida_init(&dev_priv->contexts.hw_ida); ida_init(&dev_priv->contexts.hw_ida);
ctx = i915_gem_create_context(dev_priv, NULL); /* lowest priority; idle task */
ctx = create_kernel_context(dev_priv, I915_PRIORITY_MIN);
if (IS_ERR(ctx)) { if (IS_ERR(ctx)) {
DRM_ERROR("Failed to create default global context (error %ld)\n", DRM_ERROR("Failed to create default global context\n");
PTR_ERR(ctx)); err = PTR_ERR(ctx);
return PTR_ERR(ctx); goto err;
} }
/*
/* For easy recognisablity, we want the kernel context to be 0 and then * For easy recognisablity, we want the kernel context to be 0 and then
* all user contexts will have non-zero hw_id. * all user contexts will have non-zero hw_id.
*/ */
GEM_BUG_ON(ctx->hw_id); GEM_BUG_ON(ctx->hw_id);
i915_gem_context_clear_bannable(ctx);
ctx->priority = I915_PRIORITY_MIN; /* lowest priority; idle task */
dev_priv->kernel_context = ctx; dev_priv->kernel_context = ctx;
GEM_BUG_ON(!i915_gem_context_is_kernel(ctx)); /* highest priority; preempting task */
ctx = create_kernel_context(dev_priv, INT_MAX);
if (IS_ERR(ctx)) {
DRM_ERROR("Failed to create default preempt context\n");
err = PTR_ERR(ctx);
goto err_kernel_context;
}
dev_priv->preempt_context = ctx;
DRM_DEBUG_DRIVER("%s context support initialized\n", DRM_DEBUG_DRIVER("%s context support initialized\n",
dev_priv->engine[RCS]->context_size ? "logical" : dev_priv->engine[RCS]->context_size ? "logical" :
"fake"); "fake");
return 0; return 0;
err_kernel_context:
destroy_kernel_context(&dev_priv->kernel_context);
err:
return err;
} }
void i915_gem_contexts_lost(struct drm_i915_private *dev_priv) void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
...@@ -507,15 +546,10 @@ void i915_gem_contexts_lost(struct drm_i915_private *dev_priv) ...@@ -507,15 +546,10 @@ void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
void i915_gem_contexts_fini(struct drm_i915_private *i915) void i915_gem_contexts_fini(struct drm_i915_private *i915)
{ {
struct i915_gem_context *ctx;
lockdep_assert_held(&i915->drm.struct_mutex); lockdep_assert_held(&i915->drm.struct_mutex);
/* Keep the context so that we can free it immediately ourselves */ destroy_kernel_context(&i915->preempt_context);
ctx = i915_gem_context_get(fetch_and_zero(&i915->kernel_context)); destroy_kernel_context(&i915->kernel_context);
GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
context_close(ctx);
i915_gem_context_free(ctx);
/* Must free all deferred contexts (via flush_workqueue) first */ /* Must free all deferred contexts (via flush_workqueue) first */
ida_destroy(&i915->contexts.hw_ida); ida_destroy(&i915->contexts.hw_ida);
......
...@@ -587,6 +587,13 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, ...@@ -587,6 +587,13 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
lockdep_assert_held(&dev_priv->drm.struct_mutex); lockdep_assert_held(&dev_priv->drm.struct_mutex);
/*
* Preempt contexts are reserved for exclusive use to inject a
* preemption context switch. They are never to be used for any trivial
* request!
*/
GEM_BUG_ON(ctx == dev_priv->preempt_context);
/* ABI: Before userspace accesses the GPU (e.g. execbuffer), report /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
* EIO if the GPU is already wedged. * EIO if the GPU is already wedged.
*/ */
......
...@@ -613,9 +613,22 @@ int intel_engine_init_common(struct intel_engine_cs *engine) ...@@ -613,9 +613,22 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
if (IS_ERR(ring)) if (IS_ERR(ring))
return PTR_ERR(ring); return PTR_ERR(ring);
/*
* Similarly the preempt context must always be available so that
* we can interrupt the engine at any time.
*/
if (INTEL_INFO(engine->i915)->has_logical_ring_preemption) {
ring = engine->context_pin(engine,
engine->i915->preempt_context);
if (IS_ERR(ring)) {
ret = PTR_ERR(ring);
goto err_unpin_kernel;
}
}
ret = intel_engine_init_breadcrumbs(engine); ret = intel_engine_init_breadcrumbs(engine);
if (ret) if (ret)
goto err_unpin; goto err_unpin_preempt;
ret = i915_gem_render_state_init(engine); ret = i915_gem_render_state_init(engine);
if (ret) if (ret)
...@@ -634,7 +647,10 @@ int intel_engine_init_common(struct intel_engine_cs *engine) ...@@ -634,7 +647,10 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
i915_gem_render_state_fini(engine); i915_gem_render_state_fini(engine);
err_breadcrumbs: err_breadcrumbs:
intel_engine_fini_breadcrumbs(engine); intel_engine_fini_breadcrumbs(engine);
err_unpin: err_unpin_preempt:
if (INTEL_INFO(engine->i915)->has_logical_ring_preemption)
engine->context_unpin(engine, engine->i915->preempt_context);
err_unpin_kernel:
engine->context_unpin(engine, engine->i915->kernel_context); engine->context_unpin(engine, engine->i915->kernel_context);
return ret; return ret;
} }
...@@ -660,6 +676,8 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) ...@@ -660,6 +676,8 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
intel_engine_cleanup_cmd_parser(engine); intel_engine_cleanup_cmd_parser(engine);
i915_gem_batch_pool_fini(&engine->batch_pool); i915_gem_batch_pool_fini(&engine->batch_pool);
if (INTEL_INFO(engine->i915)->has_logical_ring_preemption)
engine->context_unpin(engine, engine->i915->preempt_context);
engine->context_unpin(engine, engine->i915->kernel_context); engine->context_unpin(engine, engine->i915->kernel_context);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment