Commit 16f2941a authored by Chris Wilson's avatar Chris Wilson

drm/i915/gt: Replace direct submit with direct call to tasklet

Rather than having special case code for opportunistically calling
process_csb() and performing a direct submit while holding the engine
spinlock for submitting the request, simply call the tasklet directly.
This allows us to retain the direct submission path, including the CS
draining to allow fast/immediate submissions, without requiring any
duplicated code paths, and most importantly greatly simplifying the
control flow by removing reentrancy. This will enable us to close a few
races in the virtual engines in the next few patches.

The trickiest part here is to ensure that paired operations (such as
schedule_in/schedule_out) remain under consistent locking domains,
e.g. when pulled outside of the engine->active.lock

v2: Use bh kicking, see commit 3c53776e ("Mark HI and TASKLET
softirq synchronous").
v3: Update engine-reset to be tasklet aware
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20201224135544.1713-1-chris@chris-wilson.co.uk
parent 6d393ef5
......@@ -1019,32 +1019,39 @@ static unsigned long stop_timeout(const struct intel_engine_cs *engine)
return READ_ONCE(engine->props.stop_timeout_ms);
}
int intel_engine_stop_cs(struct intel_engine_cs *engine)
static int __intel_engine_stop_cs(struct intel_engine_cs *engine,
int fast_timeout_us,
int slow_timeout_ms)
{
struct intel_uncore *uncore = engine->uncore;
const u32 base = engine->mmio_base;
const i915_reg_t mode = RING_MI_MODE(base);
const i915_reg_t mode = RING_MI_MODE(engine->mmio_base);
int err;
intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
err = __intel_wait_for_register_fw(engine->uncore, mode,
MODE_IDLE, MODE_IDLE,
fast_timeout_us,
slow_timeout_ms,
NULL);
/* A final mmio read to let GPU writes be hopefully flushed to memory */
intel_uncore_posting_read_fw(uncore, mode);
return err;
}
int intel_engine_stop_cs(struct intel_engine_cs *engine)
{
int err = 0;
if (INTEL_GEN(engine->i915) < 3)
return -ENODEV;
ENGINE_TRACE(engine, "\n");
intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
err = 0;
if (__intel_wait_for_register_fw(uncore,
mode, MODE_IDLE, MODE_IDLE,
1000, stop_timeout(engine),
NULL)) {
if (__intel_engine_stop_cs(engine, 1000, stop_timeout(engine))) {
ENGINE_TRACE(engine, "timed out on STOP_RING -> IDLE\n");
err = -ETIMEDOUT;
}
/* A final mmio read to let GPU writes be hopefully flushed to memory */
intel_uncore_posting_read_fw(uncore, mode);
return err;
}
......
......@@ -144,7 +144,7 @@ __queue_and_release_pm(struct i915_request *rq,
list_add_tail(&tl->link, &timelines->active_list);
/* Hand the request over to HW and so engine_retire() */
__i915_request_queue(rq, NULL);
__i915_request_queue_bh(rq);
/* Let new submissions commence (and maybe retire this timeline) */
__intel_wakeref_defer_park(&engine->wakeref);
......
......@@ -184,7 +184,8 @@ struct intel_engine_execlists {
* Reserve the upper 16b for tracking internal errors.
*/
u32 error_interrupt;
#define ERROR_CSB BIT(31)
#define ERROR_CSB BIT(31)
#define ERROR_PREEMPT BIT(30)
/**
* @reset_ccid: Active CCID [EXECLISTS_STATUS_HI] at the time of reset
......
......@@ -40,20 +40,19 @@ static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
intel_uncore_rmw_fw(uncore, reg, clr, 0);
}
static void engine_skip_context(struct i915_request *rq)
static void skip_context(struct i915_request *rq)
{
struct intel_engine_cs *engine = rq->engine;
struct intel_context *hung_ctx = rq->context;
if (!i915_request_is_active(rq))
return;
list_for_each_entry_from_rcu(rq, &hung_ctx->timeline->requests, link) {
if (!i915_request_is_active(rq))
return;
lockdep_assert_held(&engine->active.lock);
list_for_each_entry_continue(rq, &engine->active.requests, sched.link)
if (rq->context == hung_ctx) {
i915_request_set_error_once(rq, -EIO);
__i915_request_skip(rq);
}
}
}
static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
......@@ -160,7 +159,7 @@ void __i915_request_reset(struct i915_request *rq, bool guilty)
i915_request_set_error_once(rq, -EIO);
__i915_request_skip(rq);
if (mark_guilty(rq))
engine_skip_context(rq);
skip_context(rq);
} else {
i915_request_set_error_once(rq, -EAGAIN);
mark_innocent(rq);
......@@ -753,8 +752,10 @@ static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
if (err)
return err;
local_bh_disable();
for_each_engine(engine, gt, id)
__intel_engine_reset(engine, stalled_mask & engine->mask);
local_bh_enable();
intel_ggtt_restore_fences(gt->ggtt);
......@@ -832,9 +833,11 @@ static void __intel_gt_set_wedged(struct intel_gt *gt)
set_bit(I915_WEDGED, &gt->reset.flags);
/* Mark all executing requests as skipped */
local_bh_disable();
for_each_engine(engine, gt, id)
if (engine->reset.cancel)
engine->reset.cancel(engine);
local_bh_enable();
reset_finish(gt, awake);
......@@ -1109,20 +1112,7 @@ static inline int intel_gt_reset_engine(struct intel_engine_cs *engine)
return __intel_gt_reset(engine->gt, engine->mask);
}
/**
* intel_engine_reset - reset GPU engine to recover from a hang
* @engine: engine to reset
* @msg: reason for GPU reset; or NULL for no drm_notice()
*
* Reset a specific GPU engine. Useful if a hang is detected.
* Returns zero on successful reset or otherwise an error code.
*
* Procedure is:
* - identifies the request that caused the hang and it is dropped
* - reset engine (which will force the engine to idle)
* - re-init/configure engine
*/
int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
int __intel_engine_reset_bh(struct intel_engine_cs *engine, const char *msg)
{
struct intel_gt *gt = engine->gt;
bool uses_guc = intel_engine_in_guc_submission_mode(engine);
......@@ -1172,6 +1162,30 @@ int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
return ret;
}
/**
* intel_engine_reset - reset GPU engine to recover from a hang
* @engine: engine to reset
* @msg: reason for GPU reset; or NULL for no drm_notice()
*
* Reset a specific GPU engine. Useful if a hang is detected.
* Returns zero on successful reset or otherwise an error code.
*
* Procedure is:
* - identifies the request that caused the hang and it is dropped
* - reset engine (which will force the engine to idle)
* - re-init/configure engine
*/
int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
{
int err;
local_bh_disable();
err = __intel_engine_reset_bh(engine, msg);
local_bh_enable();
return err;
}
static void intel_gt_reset_global(struct intel_gt *gt,
u32 engine_mask,
const char *reason)
......@@ -1258,18 +1272,20 @@ void intel_gt_handle_error(struct intel_gt *gt,
* single reset fails.
*/
if (intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) {
local_bh_disable();
for_each_engine_masked(engine, gt, engine_mask, tmp) {
BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
&gt->reset.flags))
continue;
if (intel_engine_reset(engine, msg) == 0)
if (__intel_engine_reset_bh(engine, msg) == 0)
engine_mask &= ~engine->mask;
clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
&gt->reset.flags);
}
local_bh_enable();
}
if (!engine_mask)
......
......@@ -34,6 +34,8 @@ void intel_gt_reset(struct intel_gt *gt,
const char *reason);
int intel_engine_reset(struct intel_engine_cs *engine,
const char *reason);
int __intel_engine_reset_bh(struct intel_engine_cs *engine,
const char *reason);
void __i915_request_reset(struct i915_request *rq, bool guilty);
......
......@@ -25,7 +25,7 @@ static int request_sync(struct i915_request *rq)
/* Opencode i915_request_add() so we can keep the timeline locked. */
__i915_request_commit(rq);
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
__i915_request_queue(rq, NULL);
__i915_request_queue_bh(rq);
timeout = i915_request_wait(rq, 0, HZ / 10);
if (timeout < 0)
......
......@@ -599,8 +599,10 @@ static int live_hold_reset(void *arg)
/* We have our request executing, now remove it and reset */
local_bh_disable();
if (test_and_set_bit(I915_RESET_ENGINE + id,
&gt->reset.flags)) {
local_bh_enable();
intel_gt_set_wedged(gt);
err = -EBUSY;
goto out;
......@@ -614,12 +616,13 @@ static int live_hold_reset(void *arg)
execlists_hold(engine, rq);
GEM_BUG_ON(!i915_request_on_hold(rq));
intel_engine_reset(engine, NULL);
__intel_engine_reset_bh(engine, NULL);
GEM_BUG_ON(rq->fence.error != -EIO);
tasklet_enable(&engine->execlists.tasklet);
clear_and_wake_up_bit(I915_RESET_ENGINE + id,
&gt->reset.flags);
local_bh_enable();
/* Check that we do not resubmit the held request */
if (!i915_request_wait(rq, 0, HZ / 5)) {
......@@ -4546,8 +4549,10 @@ static int reset_virtual_engine(struct intel_gt *gt,
GEM_BUG_ON(engine == ve->engine);
/* Take ownership of the reset and tasklet */
local_bh_disable();
if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
&gt->reset.flags)) {
local_bh_enable();
intel_gt_set_wedged(gt);
err = -EBUSY;
goto out_heartbeat;
......@@ -4567,12 +4572,13 @@ static int reset_virtual_engine(struct intel_gt *gt,
execlists_hold(engine, rq);
GEM_BUG_ON(!i915_request_on_hold(rq));
intel_engine_reset(engine, NULL);
__intel_engine_reset_bh(engine, NULL);
GEM_BUG_ON(rq->fence.error != -EIO);
/* Release our grasp on the engine, letting CS flow again */
tasklet_enable(&engine->execlists.tasklet);
clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, &gt->reset.flags);
local_bh_enable();
/* Check that we do not resubmit the held request */
i915_request_get(rq);
......
......@@ -1576,12 +1576,17 @@ static int __igt_atomic_reset_engine(struct intel_engine_cs *engine,
engine->name, mode, p->name);
tasklet_disable(t);
if (strcmp(p->name, "softirq"))
local_bh_disable();
p->critical_section_begin();
err = intel_engine_reset(engine, NULL);
err = __intel_engine_reset_bh(engine, NULL);
p->critical_section_end();
if (strcmp(p->name, "softirq"))
local_bh_enable();
tasklet_enable(t);
tasklet_hi_schedule(t);
if (err)
pr_err("i915_reset_engine(%s:%s) failed under %s\n",
......
......@@ -1607,16 +1607,17 @@ static void garbage_reset(struct intel_engine_cs *engine,
const unsigned int bit = I915_RESET_ENGINE + engine->id;
unsigned long *lock = &engine->gt->reset.flags;
if (test_and_set_bit(bit, lock))
return;
local_bh_disable();
if (!test_and_set_bit(bit, lock)) {
tasklet_disable(&engine->execlists.tasklet);
tasklet_disable(&engine->execlists.tasklet);
if (!rq->fence.error)
__intel_engine_reset_bh(engine, NULL);
if (!rq->fence.error)
intel_engine_reset(engine, NULL);
tasklet_enable(&engine->execlists.tasklet);
clear_and_wake_up_bit(bit, lock);
tasklet_enable(&engine->execlists.tasklet);
clear_and_wake_up_bit(bit, lock);
}
local_bh_enable();
}
static struct i915_request *garbage(struct intel_context *ce,
......
......@@ -327,11 +327,16 @@ static int igt_atomic_engine_reset(void *arg)
for (p = igt_atomic_phases; p->name; p++) {
GEM_TRACE("intel_engine_reset(%s) under %s\n",
engine->name, p->name);
if (strcmp(p->name, "softirq"))
local_bh_disable();
p->critical_section_begin();
err = intel_engine_reset(engine, NULL);
err = __intel_engine_reset_bh(engine, NULL);
p->critical_section_end();
if (strcmp(p->name, "softirq"))
local_bh_enable();
if (err) {
pr_err("intel_engine_reset(%s) failed under %s\n",
engine->name, p->name);
......@@ -341,6 +346,7 @@ static int igt_atomic_engine_reset(void *arg)
intel_engine_pm_put(engine);
tasklet_enable(&engine->execlists.tasklet);
tasklet_hi_schedule(&engine->execlists.tasklet);
if (err)
break;
}
......
......@@ -1584,6 +1584,12 @@ struct i915_request *__i915_request_commit(struct i915_request *rq)
return __i915_request_add_to_timeline(rq);
}
void __i915_request_queue_bh(struct i915_request *rq)
{
i915_sw_fence_commit(&rq->semaphore);
i915_sw_fence_commit(&rq->submit);
}
void __i915_request_queue(struct i915_request *rq,
const struct i915_sched_attr *attr)
{
......@@ -1600,8 +1606,10 @@ void __i915_request_queue(struct i915_request *rq,
*/
if (attr && rq->engine->schedule)
rq->engine->schedule(rq, attr);
i915_sw_fence_commit(&rq->semaphore);
i915_sw_fence_commit(&rq->submit);
local_bh_disable();
__i915_request_queue_bh(rq);
local_bh_enable(); /* kick tasklets */
}
void i915_request_add(struct i915_request *rq)
......
......@@ -315,6 +315,7 @@ void __i915_request_skip(struct i915_request *rq);
struct i915_request *__i915_request_commit(struct i915_request *request);
void __i915_request_queue(struct i915_request *rq,
const struct i915_sched_attr *attr);
void __i915_request_queue_bh(struct i915_request *rq);
bool i915_request_retire(struct i915_request *rq);
void i915_request_retire_upto(struct i915_request *rq);
......
......@@ -458,14 +458,10 @@ int i915_sched_node_add_dependency(struct i915_sched_node *node,
if (!dep)
return -ENOMEM;
local_bh_disable();
if (!__i915_sched_node_add_dependency(node, signal, dep,
flags | I915_DEPENDENCY_ALLOC))
i915_dependency_free(dep);
local_bh_enable(); /* kick submission tasklet */
return 0;
}
......
......@@ -1933,9 +1933,7 @@ static int measure_inter_request(struct intel_context *ce)
intel_ring_advance(rq, cs);
i915_request_add(rq);
}
local_bh_disable();
i915_sw_fence_commit(submit);
local_bh_enable();
intel_engine_flush_submission(ce->engine);
heap_fence_put(submit);
......@@ -2221,11 +2219,9 @@ static int measure_completion(struct intel_context *ce)
intel_ring_advance(rq, cs);
dma_fence_add_callback(&rq->fence, &cb.base, signal_cb);
local_bh_disable();
i915_request_add(rq);
local_bh_enable();
intel_engine_flush_submission(ce->engine);
if (wait_for(READ_ONCE(sema[i]) == -1, 50)) {
err = -EIO;
goto err;
......
......@@ -220,6 +220,9 @@ void igt_spinner_fini(struct igt_spinner *spin)
bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
{
if (i915_request_is_ready(rq))
intel_engine_flush_submission(rq->engine);
return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
rq->fence.seqno),
100) &&
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment