Commit 84383d2e authored by Chris Wilson's avatar Chris Wilson

drm/i915: Refine i915_reset.lock_map

We already use a mutex to serialise i915_reset() and wedging, so all we
need it to link that into i915_request_wait() and we have our lock cycle
detection.

v2.5: Take error mutex for selftests
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614071023.17929-3-chris@chris-wilson.co.uk
parent 6e4e9708
......@@ -978,7 +978,7 @@ void i915_reset(struct drm_i915_private *i915,
might_sleep();
GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
lock_map_acquire(&i915->gt.reset_lockmap);
mutex_lock(&error->wedge_mutex);
/* Clear any previous failed attempts at recovery. Time to try again. */
if (!__i915_gem_unset_wedged(i915))
......@@ -1031,7 +1031,7 @@ void i915_reset(struct drm_i915_private *i915,
finish:
reset_finish(i915);
unlock:
lock_map_release(&i915->gt.reset_lockmap);
mutex_unlock(&error->wedge_mutex);
return;
taint:
......@@ -1147,9 +1147,7 @@ static void i915_reset_device(struct drm_i915_private *i915,
/* Flush everyone using a resource about to be clobbered */
synchronize_srcu_expedited(&error->reset_backoff_srcu);
mutex_lock(&error->wedge_mutex);
i915_reset(i915, engine_mask, reason);
mutex_unlock(&error->wedge_mutex);
intel_finish_reset(i915);
}
......
......@@ -1901,14 +1901,6 @@ struct drm_i915_private {
ktime_t last_init_time;
struct i915_vma *scratch;
/*
* We must never wait on the GPU while holding a lock as we
* may need to perform a GPU reset. So while we don't need to
* serialise wait/reset with an explicit lock, we do want
* lockdep to detect potential dependency cycles.
*/
struct lockdep_map reset_lockmap;
} gt;
struct {
......
......@@ -1746,7 +1746,6 @@ static void i915_gem_init__mm(struct drm_i915_private *i915)
int i915_gem_init_early(struct drm_i915_private *dev_priv)
{
static struct lock_class_key reset_key;
int err;
intel_gt_pm_init(dev_priv);
......@@ -1754,8 +1753,6 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv)
INIT_LIST_HEAD(&dev_priv->gt.active_rings);
INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
spin_lock_init(&dev_priv->gt.closed_lock);
lockdep_init_map(&dev_priv->gt.reset_lockmap,
"i915.reset", &reset_key, 0);
i915_gem_init__mm(dev_priv);
i915_gem_init__pm(dev_priv);
......
......@@ -1444,7 +1444,15 @@ long i915_request_wait(struct i915_request *rq,
return -ETIME;
trace_i915_request_wait_begin(rq, flags);
lock_map_acquire(&rq->i915->gt.reset_lockmap);
/*
* We must never wait on the GPU while holding a lock as we
* may need to perform a GPU reset. So while we don't need to
* serialise wait/reset with an explicit lock, we do want
* lockdep to detect potential dependency cycles.
*/
mutex_acquire(&rq->i915->gpu_error.wedge_mutex.dep_map,
0, 0, _THIS_IP_);
/*
* Optimistic spin before touching IRQs.
......@@ -1520,7 +1528,7 @@ long i915_request_wait(struct i915_request *rq,
dma_fence_remove_callback(&rq->fence, &wait.cb);
out:
lock_map_release(&rq->i915->gt.reset_lockmap);
mutex_release(&rq->i915->gpu_error.wedge_mutex.dep_map, 0, _THIS_IP_);
trace_i915_request_wait_end(rq);
return timeout;
}
......
......@@ -130,7 +130,6 @@ static struct dev_pm_domain pm_domain = {
struct drm_i915_private *mock_gem_device(void)
{
static struct lock_class_key reset_key;
struct drm_i915_private *i915;
struct pci_dev *pdev;
int err;
......@@ -205,7 +204,6 @@ struct drm_i915_private *mock_gem_device(void)
INIT_LIST_HEAD(&i915->gt.active_rings);
INIT_LIST_HEAD(&i915->gt.closed_vma);
spin_lock_init(&i915->gt.closed_lock);
lockdep_init_map(&i915->gt.reset_lockmap, "i915.reset", &reset_key, 0);
mutex_lock(&i915->drm.struct_mutex);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment