Commit 3fef5cda authored by Chris Wilson's avatar Chris Wilson

drm/i915: Automatic i915_switch_context for legacy

During request construction, after pinning the context we know whether
or not we have to emit a context switch. So move this common operation
from every caller into i915_gem_request_alloc() itself.

v2: Always submit the request if we emitted some commands during request
construction, as typically it also involves changes in global state.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171120102002.22254-2-chris@chris-wilson.co.uk
parent 2113184c
...@@ -5045,7 +5045,7 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915) ...@@ -5045,7 +5045,7 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
goto out_ctx; goto out_ctx;
} }
err = i915_switch_context(rq); err = 0;
if (engine->init_context) if (engine->init_context)
err = engine->init_context(rq); err = engine->init_context(rq);
......
...@@ -842,8 +842,7 @@ int i915_switch_context(struct drm_i915_gem_request *req) ...@@ -842,8 +842,7 @@ int i915_switch_context(struct drm_i915_gem_request *req)
struct intel_engine_cs *engine = req->engine; struct intel_engine_cs *engine = req->engine;
lockdep_assert_held(&req->i915->drm.struct_mutex); lockdep_assert_held(&req->i915->drm.struct_mutex);
if (i915_modparams.enable_execlists) GEM_BUG_ON(i915_modparams.enable_execlists);
return 0;
if (!req->ctx->engine[engine->id].state) { if (!req->ctx->engine[engine->id].state) {
struct i915_gem_context *to = req->ctx; struct i915_gem_context *to = req->ctx;
...@@ -899,7 +898,6 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv) ...@@ -899,7 +898,6 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
for_each_engine(engine, dev_priv, id) { for_each_engine(engine, dev_priv, id) {
struct drm_i915_gem_request *req; struct drm_i915_gem_request *req;
int ret;
if (engine_has_idle_kernel_context(engine)) if (engine_has_idle_kernel_context(engine))
continue; continue;
...@@ -922,10 +920,7 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv) ...@@ -922,10 +920,7 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
GFP_KERNEL); GFP_KERNEL);
} }
ret = i915_switch_context(req);
i915_add_request(req); i915_add_request(req);
if (ret)
return ret;
} }
return 0; return 0;
......
...@@ -1111,10 +1111,6 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, ...@@ -1111,10 +1111,6 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
if (err) if (err)
goto err_request; goto err_request;
err = i915_switch_context(rq);
if (err)
goto err_request;
err = eb->engine->emit_bb_start(rq, err = eb->engine->emit_bb_start(rq,
batch->node.start, PAGE_SIZE, batch->node.start, PAGE_SIZE,
cache->gen > 5 ? 0 : I915_DISPATCH_SECURE); cache->gen > 5 ? 0 : I915_DISPATCH_SECURE);
...@@ -1960,10 +1956,6 @@ static int eb_submit(struct i915_execbuffer *eb) ...@@ -1960,10 +1956,6 @@ static int eb_submit(struct i915_execbuffer *eb)
if (err) if (err)
return err; return err;
err = i915_switch_context(eb->request);
if (err)
return err;
if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) { if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) {
err = i915_reset_gen7_sol_offsets(eb->request); err = i915_reset_gen7_sol_offsets(eb->request);
if (err) if (err)
......
...@@ -624,6 +624,10 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, ...@@ -624,6 +624,10 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
if (ret) if (ret)
goto err_unpin; goto err_unpin;
ret = intel_ring_wait_for_space(ring, MIN_SPACE_FOR_ADD_REQUEST);
if (ret)
goto err_unreserve;
/* Move the oldest request to the slab-cache (if not in use!) */ /* Move the oldest request to the slab-cache (if not in use!) */
req = list_first_entry_or_null(&engine->timeline->requests, req = list_first_entry_or_null(&engine->timeline->requests,
typeof(*req), link); typeof(*req), link);
......
...@@ -1726,10 +1726,9 @@ static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_pr ...@@ -1726,10 +1726,9 @@ static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_pr
GFP_KERNEL); GFP_KERNEL);
} }
ret = i915_switch_context(req);
i915_add_request(req); i915_add_request(req);
return ret; return 0;
} }
/* /*
......
...@@ -1592,6 +1592,10 @@ static int ring_request_alloc(struct drm_i915_gem_request *request) ...@@ -1592,6 +1592,10 @@ static int ring_request_alloc(struct drm_i915_gem_request *request)
if (ret) if (ret)
return ret; return ret;
ret = i915_switch_context(request);
if (ret)
return ret;
request->reserved_space -= LEGACY_REQUEST_SIZE; request->reserved_space -= LEGACY_REQUEST_SIZE;
return 0; return 0;
} }
......
...@@ -989,13 +989,9 @@ static int gpu_write(struct i915_vma *vma, ...@@ -989,13 +989,9 @@ static int gpu_write(struct i915_vma *vma,
i915_vma_unpin(batch); i915_vma_unpin(batch);
i915_vma_close(batch); i915_vma_close(batch);
err = i915_switch_context(rq); err = engine->emit_bb_start(rq,
if (err) batch->node.start, batch->node.size,
goto err_request; flags);
err = rq->engine->emit_bb_start(rq,
batch->node.start, batch->node.size,
flags);
if (err) if (err)
goto err_request; goto err_request;
......
...@@ -158,10 +158,6 @@ static int gpu_fill(struct drm_i915_gem_object *obj, ...@@ -158,10 +158,6 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
goto err_batch; goto err_batch;
} }
err = i915_switch_context(rq);
if (err)
goto err_request;
flags = 0; flags = 0;
if (INTEL_GEN(vm->i915) <= 5) if (INTEL_GEN(vm->i915) <= 5)
flags |= I915_DISPATCH_SECURE; flags |= I915_DISPATCH_SECURE;
......
...@@ -459,10 +459,6 @@ empty_request(struct intel_engine_cs *engine, ...@@ -459,10 +459,6 @@ empty_request(struct intel_engine_cs *engine,
if (IS_ERR(request)) if (IS_ERR(request))
return request; return request;
err = i915_switch_context(request);
if (err)
goto out_request;
err = engine->emit_bb_start(request, err = engine->emit_bb_start(request,
batch->node.start, batch->node.start,
batch->node.size, batch->node.size,
...@@ -671,9 +667,6 @@ static int live_all_engines(void *arg) ...@@ -671,9 +667,6 @@ static int live_all_engines(void *arg)
goto out_request; goto out_request;
} }
err = i915_switch_context(request[id]);
GEM_BUG_ON(err);
err = engine->emit_bb_start(request[id], err = engine->emit_bb_start(request[id],
batch->node.start, batch->node.start,
batch->node.size, batch->node.size,
...@@ -790,9 +783,6 @@ static int live_sequential_engines(void *arg) ...@@ -790,9 +783,6 @@ static int live_sequential_engines(void *arg)
} }
} }
err = i915_switch_context(request[id]);
GEM_BUG_ON(err);
err = engine->emit_bb_start(request[id], err = engine->emit_bb_start(request[id],
batch->node.start, batch->node.start,
batch->node.size, batch->node.size,
......
...@@ -114,10 +114,6 @@ static int emit_recurse_batch(struct hang *h, ...@@ -114,10 +114,6 @@ static int emit_recurse_batch(struct hang *h,
if (err) if (err)
goto unpin_vma; goto unpin_vma;
err = i915_switch_context(rq);
if (err)
goto unpin_hws;
i915_vma_move_to_active(vma, rq, 0); i915_vma_move_to_active(vma, rq, 0);
if (!i915_gem_object_has_active_reference(vma->obj)) { if (!i915_gem_object_has_active_reference(vma->obj)) {
i915_gem_object_get(vma->obj); i915_gem_object_get(vma->obj);
...@@ -169,7 +165,6 @@ static int emit_recurse_batch(struct hang *h, ...@@ -169,7 +165,6 @@ static int emit_recurse_batch(struct hang *h,
err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags); err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
unpin_hws:
i915_vma_unpin(hws); i915_vma_unpin(hws);
unpin_vma: unpin_vma:
i915_vma_unpin(vma); i915_vma_unpin(vma);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment