Commit aa9b7810 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Late request cancellations are harmful

Conceptually, each request is a record of a hardware transaction - we
build up a list of pending commands and then either commit them to
hardware, or cancel them. However, whilst building up the list of
pending commands, we may modify state outside of the request and make
references to the pending request. If we do so and then cancel that
request, external objects then point to the deleted request leading to
both graphical and memory corruption.

The easiest example is to consider object/VMA tracking. When we mark an
object as active in a request, we store a pointer to this, the most
recent request, in the object. Then we want to free that object, we wait
for the most recent request to be idle before proceeding (otherwise the
hardware will write to pages now owned by the system, or we will attempt
to read from those pages before the hardware is finished writing). If
the request was cancelled instead, that wait completes immediately. As a
result, all requests must be committed and not cancelled if the external
state is unknown.

All that remains of i915_gem_request_cancel() users are just a couple of
extremely unlikely allocation failures, so remove the API entirely.

A consequence of committing all incomplete requests is that we generate
excess breadcrumbs and fill the ring much more often with dummy work. We
have completely undone the outstanding_last_seqno optimisation.

Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=93907Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Cc: stable@vger.kernel.org
Reviewed-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/1460565315-7748-16-git-send-email-chris@chris-wilson.co.uk
parent fcb5106d
...@@ -2331,7 +2331,6 @@ struct drm_i915_gem_request { ...@@ -2331,7 +2331,6 @@ struct drm_i915_gem_request {
struct drm_i915_gem_request * __must_check struct drm_i915_gem_request * __must_check
i915_gem_request_alloc(struct intel_engine_cs *engine, i915_gem_request_alloc(struct intel_engine_cs *engine,
struct intel_context *ctx); struct intel_context *ctx);
void i915_gem_request_cancel(struct drm_i915_gem_request *req);
void i915_gem_request_free(struct kref *req_ref); void i915_gem_request_free(struct kref *req_ref);
int i915_gem_request_add_to_client(struct drm_i915_gem_request *req, int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
struct drm_file *file); struct drm_file *file);
...@@ -2888,7 +2887,6 @@ int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, ...@@ -2888,7 +2887,6 @@ int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
void i915_gem_execbuffer_move_to_active(struct list_head *vmas, void i915_gem_execbuffer_move_to_active(struct list_head *vmas,
struct drm_i915_gem_request *req); struct drm_i915_gem_request *req);
void i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params);
int i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, int i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
struct drm_i915_gem_execbuffer2 *args, struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas); struct list_head *vmas);
......
...@@ -2791,7 +2791,8 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine, ...@@ -2791,7 +2791,8 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
* fully prepared. Thus it can be cleaned up using the proper * fully prepared. Thus it can be cleaned up using the proper
* free code. * free code.
*/ */
i915_gem_request_cancel(req); intel_ring_reserved_space_cancel(req->ringbuf);
i915_gem_request_unreference(req);
return ret; return ret;
} }
...@@ -2828,13 +2829,6 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, ...@@ -2828,13 +2829,6 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
return err ? ERR_PTR(err) : req; return err ? ERR_PTR(err) : req;
} }
void i915_gem_request_cancel(struct drm_i915_gem_request *req)
{
intel_ring_reserved_space_cancel(req->ringbuf);
i915_gem_request_unreference(req);
}
struct drm_i915_gem_request * struct drm_i915_gem_request *
i915_gem_find_active_request(struct intel_engine_cs *engine) i915_gem_find_active_request(struct intel_engine_cs *engine)
{ {
...@@ -3444,12 +3438,9 @@ int i915_gpu_idle(struct drm_device *dev) ...@@ -3444,12 +3438,9 @@ int i915_gpu_idle(struct drm_device *dev)
return PTR_ERR(req); return PTR_ERR(req);
ret = i915_switch_context(req); ret = i915_switch_context(req);
if (ret) {
i915_gem_request_cancel(req);
return ret;
}
i915_add_request_no_flush(req); i915_add_request_no_flush(req);
if (ret)
return ret;
} }
ret = intel_engine_idle(engine); ret = intel_engine_idle(engine);
...@@ -4949,34 +4940,33 @@ i915_gem_init_hw(struct drm_device *dev) ...@@ -4949,34 +4940,33 @@ i915_gem_init_hw(struct drm_device *dev)
req = i915_gem_request_alloc(engine, NULL); req = i915_gem_request_alloc(engine, NULL);
if (IS_ERR(req)) { if (IS_ERR(req)) {
ret = PTR_ERR(req); ret = PTR_ERR(req);
i915_gem_cleanup_engines(dev); break;
goto out;
} }
if (engine->id == RCS) { if (engine->id == RCS) {
for (j = 0; j < NUM_L3_SLICES(dev); j++) for (j = 0; j < NUM_L3_SLICES(dev); j++) {
i915_gem_l3_remap(req, j); ret = i915_gem_l3_remap(req, j);
if (ret)
goto err_request;
}
} }
ret = i915_ppgtt_init_ring(req); ret = i915_ppgtt_init_ring(req);
if (ret && ret != -EIO) { if (ret)
DRM_ERROR("PPGTT enable %s failed %d\n", goto err_request;
engine->name, ret);
i915_gem_request_cancel(req);
i915_gem_cleanup_engines(dev);
goto out;
}
ret = i915_gem_context_enable(req); ret = i915_gem_context_enable(req);
if (ret && ret != -EIO) { if (ret)
DRM_ERROR("Context enable %s failed %d\n", goto err_request;
err_request:
i915_add_request_no_flush(req);
if (ret) {
DRM_ERROR("Failed to enable %s, error=%d\n",
engine->name, ret); engine->name, ret);
i915_gem_request_cancel(req);
i915_gem_cleanup_engines(dev); i915_gem_cleanup_engines(dev);
goto out; break;
} }
i915_add_request_no_flush(req);
} }
out: out:
......
...@@ -1137,7 +1137,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, ...@@ -1137,7 +1137,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
} }
} }
void static void
i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params) i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
{ {
/* Unconditionally force add_request to emit a full flush. */ /* Unconditionally force add_request to emit a full flush. */
...@@ -1322,7 +1322,6 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, ...@@ -1322,7 +1322,6 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags); trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
i915_gem_execbuffer_move_to_active(vmas, params->request); i915_gem_execbuffer_move_to_active(vmas, params->request);
i915_gem_execbuffer_retire_commands(params);
return 0; return 0;
} }
...@@ -1624,7 +1623,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -1624,7 +1623,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
ret = i915_gem_request_add_to_client(req, file); ret = i915_gem_request_add_to_client(req, file);
if (ret) if (ret)
goto err_batch_unpin; goto err_request;
/* /*
* Save assorted stuff away to pass through to *_submission(). * Save assorted stuff away to pass through to *_submission().
...@@ -1641,6 +1640,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -1641,6 +1640,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
params->request = req; params->request = req;
ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas); ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
err_request:
i915_gem_execbuffer_retire_commands(params);
err_batch_unpin: err_batch_unpin:
/* /*
...@@ -1657,14 +1658,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -1657,14 +1658,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
i915_gem_context_unreference(ctx); i915_gem_context_unreference(ctx);
eb_destroy(eb); eb_destroy(eb);
/*
* If the request was created but not successfully submitted then it
* must be freed again. If it was submitted then it is being tracked
* on the active request list and no clean up is required here.
*/
if (ret && !IS_ERR_OR_NULL(req))
i915_gem_request_cancel(req);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
pre_mutex_err: pre_mutex_err:
......
...@@ -11664,7 +11664,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ...@@ -11664,7 +11664,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
intel_unpin_fb_obj(fb, crtc->primary->state->rotation); intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
cleanup_pending: cleanup_pending:
if (!IS_ERR_OR_NULL(request)) if (!IS_ERR_OR_NULL(request))
i915_gem_request_cancel(request); i915_add_request_no_flush(request);
atomic_dec(&intel_crtc->unpin_work_count); atomic_dec(&intel_crtc->unpin_work_count);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
cleanup: cleanup:
......
...@@ -1007,7 +1007,6 @@ int intel_execlists_submission(struct i915_execbuffer_params *params, ...@@ -1007,7 +1007,6 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags); trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
i915_gem_execbuffer_move_to_active(vmas, params->request); i915_gem_execbuffer_move_to_active(vmas, params->request);
i915_gem_execbuffer_retire_commands(params);
return 0; return 0;
} }
...@@ -2700,13 +2699,12 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx, ...@@ -2700,13 +2699,12 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
} }
ret = engine->init_context(req); ret = engine->init_context(req);
i915_add_request_no_flush(req);
if (ret) { if (ret) {
DRM_ERROR("ring init context: %d\n", DRM_ERROR("ring init context: %d\n",
ret); ret);
i915_gem_request_cancel(req);
goto error_ringbuf; goto error_ringbuf;
} }
i915_add_request_no_flush(req);
} }
return 0; return 0;
......
...@@ -247,7 +247,7 @@ static int intel_overlay_on(struct intel_overlay *overlay) ...@@ -247,7 +247,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
ret = intel_ring_begin(req, 4); ret = intel_ring_begin(req, 4);
if (ret) { if (ret) {
i915_gem_request_cancel(req); i915_add_request_no_flush(req);
return ret; return ret;
} }
...@@ -290,7 +290,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay, ...@@ -290,7 +290,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
ret = intel_ring_begin(req, 2); ret = intel_ring_begin(req, 2);
if (ret) { if (ret) {
i915_gem_request_cancel(req); i915_add_request_no_flush(req);
return ret; return ret;
} }
...@@ -356,7 +356,7 @@ static int intel_overlay_off(struct intel_overlay *overlay) ...@@ -356,7 +356,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
ret = intel_ring_begin(req, 6); ret = intel_ring_begin(req, 6);
if (ret) { if (ret) {
i915_gem_request_cancel(req); i915_add_request_no_flush(req);
return ret; return ret;
} }
...@@ -431,7 +431,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) ...@@ -431,7 +431,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
ret = intel_ring_begin(req, 2); ret = intel_ring_begin(req, 2);
if (ret) { if (ret) {
i915_gem_request_cancel(req); i915_add_request_no_flush(req);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment