Commit 221fe799 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Perform a direct reset of the GPU from the waiter

If a waiter is holding the struct_mutex, then the reset worker cannot
reset the GPU until the waiter returns. We do not want to return -EAGAIN
form i915_wait_request as that breaks delicate operations like
i915_vma_unbind() which often cannot be restarted easily, and returning
-EIO is just as useless (and has in the past proven dangerous). The
remaining WARN_ON(i915_wait_request) serve as a valuable reminder that
handling errors from an indefinite wait are tricky.

We can keep the current semantic that knowing after a reset is complete,
so is the request, by performing the reset ourselves if we hold the
mutex.

uevent emission is still handled by the reset worker, so it may appear
slightly out of order with respect to the actual reset (and concurrent
use of the device).
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160909131201.16673-11-chris@chris-wilson.co.uk
parent 22dd3bb9
...@@ -1729,6 +1729,8 @@ int i915_resume_switcheroo(struct drm_device *dev) ...@@ -1729,6 +1729,8 @@ int i915_resume_switcheroo(struct drm_device *dev)
* Reset the chip. Useful if a hang is detected. Returns zero on successful * Reset the chip. Useful if a hang is detected. Returns zero on successful
* reset or otherwise an error code. * reset or otherwise an error code.
* *
* Caller must hold the struct_mutex.
*
* Procedure is fairly simple: * Procedure is fairly simple:
* - reset the chip using the reset reg * - reset the chip using the reset reg
* - re-init context state * - re-init context state
...@@ -1743,7 +1745,10 @@ int i915_reset(struct drm_i915_private *dev_priv) ...@@ -1743,7 +1745,10 @@ int i915_reset(struct drm_i915_private *dev_priv)
struct i915_gpu_error *error = &dev_priv->gpu_error; struct i915_gpu_error *error = &dev_priv->gpu_error;
int ret; int ret;
mutex_lock(&dev->struct_mutex); lockdep_assert_held(&dev->struct_mutex);
if (!test_and_clear_bit(I915_RESET_IN_PROGRESS, &error->flags))
return test_bit(I915_WEDGED, &error->flags) ? -EIO : 0;
/* Clear any previous failed attempts at recovery. Time to try again. */ /* Clear any previous failed attempts at recovery. Time to try again. */
__clear_bit(I915_WEDGED, &error->flags); __clear_bit(I915_WEDGED, &error->flags);
...@@ -1784,9 +1789,6 @@ int i915_reset(struct drm_i915_private *dev_priv) ...@@ -1784,9 +1789,6 @@ int i915_reset(struct drm_i915_private *dev_priv)
goto error; goto error;
} }
clear_bit(I915_RESET_IN_PROGRESS, &error->flags);
mutex_unlock(&dev->struct_mutex);
/* /*
* rps/rc6 re-init is necessary to restore state lost after the * rps/rc6 re-init is necessary to restore state lost after the
* reset and the re-install of gt irqs. Skip for ironlake per * reset and the re-install of gt irqs. Skip for ironlake per
...@@ -1800,7 +1802,6 @@ int i915_reset(struct drm_i915_private *dev_priv) ...@@ -1800,7 +1802,6 @@ int i915_reset(struct drm_i915_private *dev_priv)
error: error:
set_bit(I915_WEDGED, &error->flags); set_bit(I915_WEDGED, &error->flags);
mutex_unlock(&dev->struct_mutex);
return ret; return ret;
} }
......
...@@ -3863,7 +3863,9 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) ...@@ -3863,7 +3863,9 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
schedule_timeout_uninterruptible(remaining_jiffies); schedule_timeout_uninterruptible(remaining_jiffies);
} }
} }
static inline bool __i915_request_irq_complete(struct drm_i915_gem_request *req)
static inline bool
__i915_request_irq_complete(struct drm_i915_gem_request *req)
{ {
struct intel_engine_cs *engine = req->engine; struct intel_engine_cs *engine = req->engine;
...@@ -3925,17 +3927,6 @@ static inline bool __i915_request_irq_complete(struct drm_i915_gem_request *req) ...@@ -3925,17 +3927,6 @@ static inline bool __i915_request_irq_complete(struct drm_i915_gem_request *req)
return true; return true;
} }
/* We need to check whether any gpu reset happened in between
* the request being submitted and now. If a reset has occurred,
* the seqno will have been advance past ours and our request
* is complete. If we are in the process of handling a reset,
* the request is effectively complete as the rendering will
* be discarded, but we need to return in order to drop the
* struct_mutex.
*/
if (i915_reset_in_progress(&req->i915->gpu_error))
return true;
return false; return false;
} }
......
...@@ -533,6 +533,16 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches) ...@@ -533,6 +533,16 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
engine->submit_request(request); engine->submit_request(request);
} }
static void reset_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
{
unsigned long flags;
spin_lock_irqsave(&q->lock, flags);
if (list_empty(&wait->task_list))
__add_wait_queue(q, wait);
spin_unlock_irqrestore(&q->lock, flags);
}
static unsigned long local_clock_us(unsigned int *cpu) static unsigned long local_clock_us(unsigned int *cpu)
{ {
unsigned long t; unsigned long t;
...@@ -710,6 +720,25 @@ int i915_wait_request(struct drm_i915_gem_request *req, ...@@ -710,6 +720,25 @@ int i915_wait_request(struct drm_i915_gem_request *req,
if (__i915_request_irq_complete(req)) if (__i915_request_irq_complete(req))
break; break;
/* If the GPU is hung, and we hold the lock, reset the GPU
* and then check for completion. On a full reset, the engine's
* HW seqno will be advanced passed us and we are complete.
* If we do a partial reset, we have to wait for the GPU to
* resume and update the breadcrumb.
*
* If we don't hold the mutex, we can just wait for the worker
* to come along and update the breadcrumb (either directly
* itself, or indirectly by recovering the GPU).
*/
if (flags & I915_WAIT_LOCKED &&
i915_reset_in_progress(&req->i915->gpu_error)) {
__set_current_state(TASK_RUNNING);
i915_reset(req->i915);
reset_wait_queue(&req->i915->gpu_error.wait_queue,
&reset);
continue;
}
/* Only spin if we know the GPU is processing this request */ /* Only spin if we know the GPU is processing this request */
if (i915_spin_request(req, state, 2)) if (i915_spin_request(req, state, 2))
break; break;
......
...@@ -2521,7 +2521,9 @@ static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv) ...@@ -2521,7 +2521,9 @@ static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
* pending state and not properly drop locks, resulting in * pending state and not properly drop locks, resulting in
* deadlocks with the reset work. * deadlocks with the reset work.
*/ */
mutex_lock(&dev_priv->drm.struct_mutex);
ret = i915_reset(dev_priv); ret = i915_reset(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
intel_finish_reset(dev_priv); intel_finish_reset(dev_priv);
......
...@@ -2229,9 +2229,6 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes) ...@@ -2229,9 +2229,6 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
if (ret) if (ret)
return ret; return ret;
if (i915_reset_in_progress(&target->i915->gpu_error))
return -EAGAIN;
i915_gem_request_retire_upto(target); i915_gem_request_retire_upto(target);
intel_ring_update_space(ring); intel_ring_update_space(ring);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment