Commit b0a997ae authored by Chris Wilson's avatar Chris Wilson

drm/i915: Emit await(batch) before MI_BB_START

Be consistent and ensure that we always emit the asynchronous waits
prior to issuing instructions that use the address. This ensures that if
we do emit GPU commands to do the await, they are before our use!
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20200510102431.21959-1-chris@chris-wilson.co.uk
parent c7e8a3d6
...@@ -972,12 +972,6 @@ emit_rpcs_query(struct drm_i915_gem_object *obj, ...@@ -972,12 +972,6 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
goto err_batch; goto err_batch;
} }
err = rq->engine->emit_bb_start(rq,
batch->node.start, batch->node.size,
0);
if (err)
goto err_request;
i915_vma_lock(batch); i915_vma_lock(batch);
err = i915_request_await_object(rq, batch->obj, false); err = i915_request_await_object(rq, batch->obj, false);
if (err == 0) if (err == 0)
...@@ -994,6 +988,18 @@ emit_rpcs_query(struct drm_i915_gem_object *obj, ...@@ -994,6 +988,18 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
if (err) if (err)
goto skip_request; goto skip_request;
if (rq->engine->emit_init_breadcrumb) {
err = rq->engine->emit_init_breadcrumb(rq);
if (err)
goto skip_request;
}
err = rq->engine->emit_bb_start(rq,
batch->node.start, batch->node.size,
0);
if (err)
goto skip_request;
i915_vma_unpin_and_release(&batch, 0); i915_vma_unpin_and_release(&batch, 0);
i915_vma_unpin(vma); i915_vma_unpin(vma);
...@@ -1005,7 +1011,6 @@ emit_rpcs_query(struct drm_i915_gem_object *obj, ...@@ -1005,7 +1011,6 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
skip_request: skip_request:
i915_request_set_error_once(rq, err); i915_request_set_error_once(rq, err);
err_request:
i915_request_add(rq); i915_request_add(rq);
err_batch: err_batch:
i915_vma_unpin_and_release(&batch, 0); i915_vma_unpin_and_release(&batch, 0);
...@@ -1541,10 +1546,6 @@ static int write_to_scratch(struct i915_gem_context *ctx, ...@@ -1541,10 +1546,6 @@ static int write_to_scratch(struct i915_gem_context *ctx,
goto err_unpin; goto err_unpin;
} }
err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
if (err)
goto err_request;
i915_vma_lock(vma); i915_vma_lock(vma);
err = i915_request_await_object(rq, vma->obj, false); err = i915_request_await_object(rq, vma->obj, false);
if (err == 0) if (err == 0)
...@@ -1553,6 +1554,16 @@ static int write_to_scratch(struct i915_gem_context *ctx, ...@@ -1553,6 +1554,16 @@ static int write_to_scratch(struct i915_gem_context *ctx,
if (err) if (err)
goto skip_request; goto skip_request;
if (rq->engine->emit_init_breadcrumb) {
err = rq->engine->emit_init_breadcrumb(rq);
if (err)
goto skip_request;
}
err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
if (err)
goto skip_request;
i915_vma_unpin(vma); i915_vma_unpin(vma);
i915_request_add(rq); i915_request_add(rq);
...@@ -1560,7 +1571,6 @@ static int write_to_scratch(struct i915_gem_context *ctx, ...@@ -1560,7 +1571,6 @@ static int write_to_scratch(struct i915_gem_context *ctx,
goto out_vm; goto out_vm;
skip_request: skip_request:
i915_request_set_error_once(rq, err); i915_request_set_error_once(rq, err);
err_request:
i915_request_add(rq); i915_request_add(rq);
err_unpin: err_unpin:
i915_vma_unpin(vma); i915_vma_unpin(vma);
...@@ -1674,10 +1684,6 @@ static int read_from_scratch(struct i915_gem_context *ctx, ...@@ -1674,10 +1684,6 @@ static int read_from_scratch(struct i915_gem_context *ctx,
goto err_unpin; goto err_unpin;
} }
err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, flags);
if (err)
goto err_request;
i915_vma_lock(vma); i915_vma_lock(vma);
err = i915_request_await_object(rq, vma->obj, true); err = i915_request_await_object(rq, vma->obj, true);
if (err == 0) if (err == 0)
...@@ -1686,6 +1692,16 @@ static int read_from_scratch(struct i915_gem_context *ctx, ...@@ -1686,6 +1692,16 @@ static int read_from_scratch(struct i915_gem_context *ctx,
if (err) if (err)
goto skip_request; goto skip_request;
if (rq->engine->emit_init_breadcrumb) {
err = rq->engine->emit_init_breadcrumb(rq);
if (err)
goto skip_request;
}
err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, flags);
if (err)
goto skip_request;
i915_vma_unpin(vma); i915_vma_unpin(vma);
i915_request_add(rq); i915_request_add(rq);
...@@ -1708,7 +1724,6 @@ static int read_from_scratch(struct i915_gem_context *ctx, ...@@ -1708,7 +1724,6 @@ static int read_from_scratch(struct i915_gem_context *ctx,
goto out_vm; goto out_vm;
skip_request: skip_request:
i915_request_set_error_once(rq, err); i915_request_set_error_once(rq, err);
err_request:
i915_request_add(rq); i915_request_add(rq);
err_unpin: err_unpin:
i915_vma_unpin(vma); i915_vma_unpin(vma);
......
...@@ -83,6 +83,7 @@ igt_emit_store_dw(struct i915_vma *vma, ...@@ -83,6 +83,7 @@ igt_emit_store_dw(struct i915_vma *vma,
offset += PAGE_SIZE; offset += PAGE_SIZE;
} }
*cmd = MI_BATCH_BUFFER_END; *cmd = MI_BATCH_BUFFER_END;
i915_gem_object_unpin_map(obj); i915_gem_object_unpin_map(obj);
intel_gt_chipset_flush(vma->vm->gt); intel_gt_chipset_flush(vma->vm->gt);
...@@ -126,16 +127,6 @@ int igt_gpu_fill_dw(struct intel_context *ce, ...@@ -126,16 +127,6 @@ int igt_gpu_fill_dw(struct intel_context *ce,
goto err_batch; goto err_batch;
} }
flags = 0;
if (INTEL_GEN(ce->vm->i915) <= 5)
flags |= I915_DISPATCH_SECURE;
err = rq->engine->emit_bb_start(rq,
batch->node.start, batch->node.size,
flags);
if (err)
goto err_request;
i915_vma_lock(batch); i915_vma_lock(batch);
err = i915_request_await_object(rq, batch->obj, false); err = i915_request_await_object(rq, batch->obj, false);
if (err == 0) if (err == 0)
...@@ -152,15 +143,17 @@ int igt_gpu_fill_dw(struct intel_context *ce, ...@@ -152,15 +143,17 @@ int igt_gpu_fill_dw(struct intel_context *ce,
if (err) if (err)
goto skip_request; goto skip_request;
i915_request_add(rq); flags = 0;
if (INTEL_GEN(ce->vm->i915) <= 5)
i915_vma_unpin_and_release(&batch, 0); flags |= I915_DISPATCH_SECURE;
return 0; err = rq->engine->emit_bb_start(rq,
batch->node.start, batch->node.size,
flags);
skip_request: skip_request:
i915_request_set_error_once(rq, err); if (err)
err_request: i915_request_set_error_once(rq, err);
i915_request_add(rq); i915_request_add(rq);
err_batch: err_batch:
i915_vma_unpin_and_release(&batch, 0); i915_vma_unpin_and_release(&batch, 0);
......
...@@ -219,6 +219,14 @@ int intel_renderstate_emit(struct intel_renderstate *so, ...@@ -219,6 +219,14 @@ int intel_renderstate_emit(struct intel_renderstate *so,
if (!so->vma) if (!so->vma)
return 0; return 0;
i915_vma_lock(so->vma);
err = i915_request_await_object(rq, so->vma->obj, false);
if (err == 0)
err = i915_vma_move_to_active(so->vma, rq, 0);
i915_vma_unlock(so->vma);
if (err)
return err;
err = engine->emit_bb_start(rq, err = engine->emit_bb_start(rq,
so->batch_offset, so->batch_size, so->batch_offset, so->batch_size,
I915_DISPATCH_SECURE); I915_DISPATCH_SECURE);
...@@ -233,13 +241,7 @@ int intel_renderstate_emit(struct intel_renderstate *so, ...@@ -233,13 +241,7 @@ int intel_renderstate_emit(struct intel_renderstate *so,
return err; return err;
} }
i915_vma_lock(so->vma); return 0;
err = i915_request_await_object(rq, so->vma->obj, false);
if (err == 0)
err = i915_vma_move_to_active(so->vma, rq, 0);
i915_vma_unlock(so->vma);
return err;
} }
void intel_renderstate_fini(struct intel_renderstate *so) void intel_renderstate_fini(struct intel_renderstate *so)
......
...@@ -865,13 +865,6 @@ static int live_all_engines(void *arg) ...@@ -865,13 +865,6 @@ static int live_all_engines(void *arg)
goto out_request; goto out_request;
} }
err = engine->emit_bb_start(request[idx],
batch->node.start,
batch->node.size,
0);
GEM_BUG_ON(err);
request[idx]->batch = batch;
i915_vma_lock(batch); i915_vma_lock(batch);
err = i915_request_await_object(request[idx], batch->obj, 0); err = i915_request_await_object(request[idx], batch->obj, 0);
if (err == 0) if (err == 0)
...@@ -879,6 +872,13 @@ static int live_all_engines(void *arg) ...@@ -879,6 +872,13 @@ static int live_all_engines(void *arg)
i915_vma_unlock(batch); i915_vma_unlock(batch);
GEM_BUG_ON(err); GEM_BUG_ON(err);
err = engine->emit_bb_start(request[idx],
batch->node.start,
batch->node.size,
0);
GEM_BUG_ON(err);
request[idx]->batch = batch;
i915_request_get(request[idx]); i915_request_get(request[idx]);
i915_request_add(request[idx]); i915_request_add(request[idx]);
idx++; idx++;
...@@ -993,13 +993,6 @@ static int live_sequential_engines(void *arg) ...@@ -993,13 +993,6 @@ static int live_sequential_engines(void *arg)
} }
} }
err = engine->emit_bb_start(request[idx],
batch->node.start,
batch->node.size,
0);
GEM_BUG_ON(err);
request[idx]->batch = batch;
i915_vma_lock(batch); i915_vma_lock(batch);
err = i915_request_await_object(request[idx], err = i915_request_await_object(request[idx],
batch->obj, false); batch->obj, false);
...@@ -1008,6 +1001,13 @@ static int live_sequential_engines(void *arg) ...@@ -1008,6 +1001,13 @@ static int live_sequential_engines(void *arg)
i915_vma_unlock(batch); i915_vma_unlock(batch);
GEM_BUG_ON(err); GEM_BUG_ON(err);
err = engine->emit_bb_start(request[idx],
batch->node.start,
batch->node.size,
0);
GEM_BUG_ON(err);
request[idx]->batch = batch;
i915_request_get(request[idx]); i915_request_get(request[idx]);
i915_request_add(request[idx]); i915_request_add(request[idx]);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment