Commit 737b1506 authored by Chris Wilson's avatar Chris Wilson Committed by Daniel Vetter

drm/i915: Convert hangcheck from a timer into a delayed work item

When run as a timer, i915_hangcheck_elapsed() must adhere to all the
rules of running in a softirq context. This is advantageous to us as we
want to minimise the risk that a driver bug will prevent us from
detecting a hung GPU. However, that is irrelevant if the driver bug
prevents us from resetting and recovering. Still it is prudent not to
rely on mutexes inside the checker, but given the coarseness of
dev->struct_mutex doing so is extremely hard.

Give in and run from a work queue, i.e. outside of softirq.

v2: Use own workqueue to avoid deadlocks (Daniel)
    Cleanup commit msg and add comment to i915_queue_hangcheck() (Chris)

Cc: Jani Nikula <jani.nikula@intel.com>
Cc: Daniel Vetter <dnaiel.vetter@ffwll.chm>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> (v1)
Signed-off-by: default avatarMika Kuoppala <mika.kuoppala@intel.com>
[danvet: Remove accidental kerneldoc comment starter, to appease the 0
day builder.]
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 983d308c
...@@ -790,6 +790,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -790,6 +790,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_freewq; goto out_freewq;
} }
dev_priv->gpu_error.hangcheck_wq =
alloc_ordered_workqueue("i915-hangcheck", 0);
if (dev_priv->gpu_error.hangcheck_wq == NULL) {
DRM_ERROR("Failed to create our hangcheck workqueue.\n");
ret = -ENOMEM;
goto out_freedpwq;
}
intel_irq_init(dev_priv); intel_irq_init(dev_priv);
intel_uncore_sanitize(dev); intel_uncore_sanitize(dev);
...@@ -864,6 +872,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -864,6 +872,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_teardown_gmbus(dev); intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev); intel_teardown_mchbar(dev);
pm_qos_remove_request(&dev_priv->pm_qos); pm_qos_remove_request(&dev_priv->pm_qos);
destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
out_freedpwq:
destroy_workqueue(dev_priv->dp_wq); destroy_workqueue(dev_priv->dp_wq);
out_freewq: out_freewq:
destroy_workqueue(dev_priv->wq); destroy_workqueue(dev_priv->wq);
...@@ -934,7 +944,7 @@ int i915_driver_unload(struct drm_device *dev) ...@@ -934,7 +944,7 @@ int i915_driver_unload(struct drm_device *dev)
} }
/* Free error state after interrupts are fully disabled. */ /* Free error state after interrupts are fully disabled. */
del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
cancel_work_sync(&dev_priv->gpu_error.work); cancel_work_sync(&dev_priv->gpu_error.work);
i915_destroy_error_state(dev); i915_destroy_error_state(dev);
...@@ -960,6 +970,7 @@ int i915_driver_unload(struct drm_device *dev) ...@@ -960,6 +970,7 @@ int i915_driver_unload(struct drm_device *dev)
destroy_workqueue(dev_priv->dp_wq); destroy_workqueue(dev_priv->dp_wq);
destroy_workqueue(dev_priv->wq); destroy_workqueue(dev_priv->wq);
destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
pm_qos_remove_request(&dev_priv->pm_qos); pm_qos_remove_request(&dev_priv->pm_qos);
i915_global_gtt_cleanup(dev); i915_global_gtt_cleanup(dev);
......
...@@ -1402,7 +1402,7 @@ static int intel_runtime_suspend(struct device *device) ...@@ -1402,7 +1402,7 @@ static int intel_runtime_suspend(struct device *device)
return ret; return ret;
} }
del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
intel_uncore_forcewake_reset(dev, false); intel_uncore_forcewake_reset(dev, false);
dev_priv->pm.suspended = true; dev_priv->pm.suspended = true;
......
...@@ -1345,7 +1345,8 @@ struct i915_gpu_error { ...@@ -1345,7 +1345,8 @@ struct i915_gpu_error {
/* Hang gpu twice in this window and your context gets banned */ /* Hang gpu twice in this window and your context gets banned */
#define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000) #define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
struct timer_list hangcheck_timer; struct workqueue_struct *hangcheck_wq;
struct delayed_work hangcheck_work;
/* For reset and error_state handling. */ /* For reset and error_state handling. */
spinlock_t lock; spinlock_t lock;
......
...@@ -4608,7 +4608,7 @@ i915_gem_suspend(struct drm_device *dev) ...@@ -4608,7 +4608,7 @@ i915_gem_suspend(struct drm_device *dev)
i915_gem_stop_ringbuffers(dev); i915_gem_stop_ringbuffers(dev);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
cancel_delayed_work_sync(&dev_priv->mm.retire_work); cancel_delayed_work_sync(&dev_priv->mm.retire_work);
flush_delayed_work(&dev_priv->mm.idle_work); flush_delayed_work(&dev_priv->mm.idle_work);
......
...@@ -2974,7 +2974,7 @@ ring_stuck(struct intel_engine_cs *ring, u64 acthd) ...@@ -2974,7 +2974,7 @@ ring_stuck(struct intel_engine_cs *ring, u64 acthd)
return HANGCHECK_HUNG; return HANGCHECK_HUNG;
} }
/** /*
* This is called when the chip hasn't reported back with completed * This is called when the chip hasn't reported back with completed
* batchbuffers in a long time. We keep track per ring seqno progress and * batchbuffers in a long time. We keep track per ring seqno progress and
* if there are no progress, hangcheck score for that ring is increased. * if there are no progress, hangcheck score for that ring is increased.
...@@ -2982,10 +2982,12 @@ ring_stuck(struct intel_engine_cs *ring, u64 acthd) ...@@ -2982,10 +2982,12 @@ ring_stuck(struct intel_engine_cs *ring, u64 acthd)
* we kick the ring. If we see no progress on three subsequent calls * we kick the ring. If we see no progress on three subsequent calls
* we assume chip is wedged and try to fix it by resetting the chip. * we assume chip is wedged and try to fix it by resetting the chip.
*/ */
static void i915_hangcheck_elapsed(unsigned long data) static void i915_hangcheck_elapsed(struct work_struct *work)
{ {
struct drm_device *dev = (struct drm_device *)data; struct drm_i915_private *dev_priv =
struct drm_i915_private *dev_priv = dev->dev_private; container_of(work, typeof(*dev_priv),
gpu_error.hangcheck_work.work);
struct drm_device *dev = dev_priv->dev;
struct intel_engine_cs *ring; struct intel_engine_cs *ring;
int i; int i;
int busy_count = 0, rings_hung = 0; int busy_count = 0, rings_hung = 0;
...@@ -3099,17 +3101,18 @@ static void i915_hangcheck_elapsed(unsigned long data) ...@@ -3099,17 +3101,18 @@ static void i915_hangcheck_elapsed(unsigned long data)
void i915_queue_hangcheck(struct drm_device *dev) void i915_queue_hangcheck(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
struct timer_list *timer = &dev_priv->gpu_error.hangcheck_timer;
if (!i915.enable_hangcheck) if (!i915.enable_hangcheck)
return; return;
/* Don't continually defer the hangcheck, but make sure it is active */ /* Don't continually defer the hangcheck so that it is always run at
if (timer_pending(timer)) * least once after work has been scheduled on any ring. Otherwise,
return; * we will ignore a hung ring if a second ring is kept busy.
mod_timer(timer, */
round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
} }
static void ibx_irq_reset(struct drm_device *dev) static void ibx_irq_reset(struct drm_device *dev)
...@@ -4353,9 +4356,8 @@ void intel_irq_init(struct drm_i915_private *dev_priv) ...@@ -4353,9 +4356,8 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
else else
dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
setup_timer(&dev_priv->gpu_error.hangcheck_timer, INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
i915_hangcheck_elapsed, i915_hangcheck_elapsed);
(unsigned long) dev);
INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work, INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
intel_hpd_irq_reenable_work); intel_hpd_irq_reenable_work);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment