Commit c19ae989 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Hide the atomic_read(reset_counter) behind a helper

This is principally a little bit of syntatic sugar to hide the
atomic_read()s throughout the code to retrieve the current reset_counter.
It also provides the other utility functions to check the reset state on the
already read reset_counter, so that (in later patches) we can read it once
and do multiple tests rather than risk the value changing between tests.

v2: Be more strict on converting existing i915_reset_in_progress() over to
the more verbose i915_reset_in_progress_or_wedged().
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Reviewed-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/1460565315-7748-4-git-send-email-chris@chris-wilson.co.uk
parent d501b1d2
...@@ -4722,7 +4722,7 @@ i915_wedged_get(void *data, u64 *val) ...@@ -4722,7 +4722,7 @@ i915_wedged_get(void *data, u64 *val)
struct drm_device *dev = data; struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
*val = atomic_read(&dev_priv->gpu_error.reset_counter); *val = i915_reset_counter(&dev_priv->gpu_error);
return 0; return 0;
} }
...@@ -4741,7 +4741,7 @@ i915_wedged_set(void *data, u64 val) ...@@ -4741,7 +4741,7 @@ i915_wedged_set(void *data, u64 val)
* while it is writing to 'i915_wedged' * while it is writing to 'i915_wedged'
*/ */
if (i915_reset_in_progress(&dev_priv->gpu_error)) if (i915_reset_in_progress_or_wedged(&dev_priv->gpu_error))
return -EAGAIN; return -EAGAIN;
intel_runtime_pm_get(dev_priv); intel_runtime_pm_get(dev_priv);
......
...@@ -3093,20 +3093,44 @@ void i915_gem_retire_requests_ring(struct intel_engine_cs *engine); ...@@ -3093,20 +3093,44 @@ void i915_gem_retire_requests_ring(struct intel_engine_cs *engine);
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible); bool interruptible);
static inline u32 i915_reset_counter(struct i915_gpu_error *error)
{
return atomic_read(&error->reset_counter);
}
static inline bool __i915_reset_in_progress(u32 reset)
{
return unlikely(reset & I915_RESET_IN_PROGRESS_FLAG);
}
static inline bool __i915_reset_in_progress_or_wedged(u32 reset)
{
return unlikely(reset & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
}
static inline bool __i915_terminally_wedged(u32 reset)
{
return unlikely(reset & I915_WEDGED);
}
static inline bool i915_reset_in_progress(struct i915_gpu_error *error) static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
{ {
return unlikely(atomic_read(&error->reset_counter) return __i915_reset_in_progress(i915_reset_counter(error));
& (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED)); }
static inline bool i915_reset_in_progress_or_wedged(struct i915_gpu_error *error)
{
return __i915_reset_in_progress_or_wedged(i915_reset_counter(error));
} }
static inline bool i915_terminally_wedged(struct i915_gpu_error *error) static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
{ {
return atomic_read(&error->reset_counter) & I915_WEDGED; return __i915_terminally_wedged(i915_reset_counter(error));
} }
static inline u32 i915_reset_count(struct i915_gpu_error *error) static inline u32 i915_reset_count(struct i915_gpu_error *error)
{ {
return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2; return ((i915_reset_counter(error) & ~I915_WEDGED) + 1) / 2;
} }
static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv) static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv)
......
...@@ -83,7 +83,7 @@ i915_gem_wait_for_error(struct i915_gpu_error *error) ...@@ -83,7 +83,7 @@ i915_gem_wait_for_error(struct i915_gpu_error *error)
{ {
int ret; int ret;
#define EXIT_COND (!i915_reset_in_progress(error) || \ #define EXIT_COND (!i915_reset_in_progress_or_wedged(error) || \
i915_terminally_wedged(error)) i915_terminally_wedged(error))
if (EXIT_COND) if (EXIT_COND)
return 0; return 0;
...@@ -1112,7 +1112,7 @@ int ...@@ -1112,7 +1112,7 @@ int
i915_gem_check_wedge(struct i915_gpu_error *error, i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible) bool interruptible)
{ {
if (i915_reset_in_progress(error)) { if (i915_reset_in_progress_or_wedged(error)) {
/* Non-interruptible callers can't handle -EAGAIN, hence return /* Non-interruptible callers can't handle -EAGAIN, hence return
* -EIO unconditionally for these. */ * -EIO unconditionally for these. */
if (!interruptible) if (!interruptible)
...@@ -1299,7 +1299,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req, ...@@ -1299,7 +1299,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
/* We need to check whether any gpu reset happened in between /* We need to check whether any gpu reset happened in between
* the caller grabbing the seqno and now ... */ * the caller grabbing the seqno and now ... */
if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) { if (reset_counter != i915_reset_counter(&dev_priv->gpu_error)) {
/* ... but upgrade the -EAGAIN to an -EIO if the gpu /* ... but upgrade the -EAGAIN to an -EIO if the gpu
* is truely gone. */ * is truely gone. */
ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible); ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
...@@ -1474,7 +1474,7 @@ i915_wait_request(struct drm_i915_gem_request *req) ...@@ -1474,7 +1474,7 @@ i915_wait_request(struct drm_i915_gem_request *req)
return ret; return ret;
ret = __i915_wait_request(req, ret = __i915_wait_request(req,
atomic_read(&dev_priv->gpu_error.reset_counter), i915_reset_counter(&dev_priv->gpu_error),
interruptible, NULL, NULL); interruptible, NULL, NULL);
if (ret) if (ret)
return ret; return ret;
...@@ -1563,7 +1563,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, ...@@ -1563,7 +1563,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
if (ret) if (ret)
return ret; return ret;
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); reset_counter = i915_reset_counter(&dev_priv->gpu_error);
if (readonly) { if (readonly) {
struct drm_i915_gem_request *req; struct drm_i915_gem_request *req;
...@@ -3179,7 +3179,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -3179,7 +3179,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
} }
drm_gem_object_unreference(&obj->base); drm_gem_object_unreference(&obj->base);
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); reset_counter = i915_reset_counter(&dev_priv->gpu_error);
for (i = 0; i < I915_NUM_ENGINES; i++) { for (i = 0; i < I915_NUM_ENGINES; i++) {
if (obj->last_read_req[i] == NULL) if (obj->last_read_req[i] == NULL)
...@@ -3224,7 +3224,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj, ...@@ -3224,7 +3224,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
if (!i915_semaphore_is_enabled(obj->base.dev)) { if (!i915_semaphore_is_enabled(obj->base.dev)) {
struct drm_i915_private *i915 = to_i915(obj->base.dev); struct drm_i915_private *i915 = to_i915(obj->base.dev);
ret = __i915_wait_request(from_req, ret = __i915_wait_request(from_req,
atomic_read(&i915->gpu_error.reset_counter), i915_reset_counter(&i915->gpu_error),
i915->mm.interruptible, i915->mm.interruptible,
NULL, NULL,
&i915->rps.semaphores); &i915->rps.semaphores);
...@@ -4205,7 +4205,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) ...@@ -4205,7 +4205,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
target = request; target = request;
} }
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); reset_counter = i915_reset_counter(&dev_priv->gpu_error);
if (target) if (target)
i915_gem_request_reference(target); i915_gem_request_reference(target);
spin_unlock(&file_priv->mm.lock); spin_unlock(&file_priv->mm.lock);
......
...@@ -2501,7 +2501,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev) ...@@ -2501,7 +2501,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
* the reset in-progress bit is only ever set by code outside of this * the reset in-progress bit is only ever set by code outside of this
* work we don't need to worry about any other races. * work we don't need to worry about any other races.
*/ */
if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { if (i915_reset_in_progress_or_wedged(error) && !i915_terminally_wedged(error)) {
DRM_DEBUG_DRIVER("resetting chip\n"); DRM_DEBUG_DRIVER("resetting chip\n");
kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
reset_event); reset_event);
......
...@@ -3200,10 +3200,12 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) ...@@ -3200,10 +3200,12 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
unsigned reset_counter;
bool pending; bool pending;
if (i915_reset_in_progress(&dev_priv->gpu_error) || reset_counter = i915_reset_counter(&dev_priv->gpu_error);
intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) if (intel_crtc->reset_counter != reset_counter ||
__i915_reset_in_progress_or_wedged(reset_counter))
return false; return false;
spin_lock_irq(&dev->event_lock); spin_lock_irq(&dev->event_lock);
...@@ -10908,9 +10910,11 @@ static bool page_flip_finished(struct intel_crtc *crtc) ...@@ -10908,9 +10910,11 @@ static bool page_flip_finished(struct intel_crtc *crtc)
{ {
struct drm_device *dev = crtc->base.dev; struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
unsigned reset_counter;
if (i915_reset_in_progress(&dev_priv->gpu_error) || reset_counter = i915_reset_counter(&dev_priv->gpu_error);
crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) if (crtc->reset_counter != reset_counter ||
__i915_reset_in_progress_or_wedged(reset_counter))
return true; return true;
/* /*
...@@ -11573,7 +11577,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ...@@ -11573,7 +11577,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
goto cleanup; goto cleanup;
atomic_inc(&intel_crtc->unpin_work_count); atomic_inc(&intel_crtc->unpin_work_count);
intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); intel_crtc->reset_counter = i915_reset_counter(&dev_priv->gpu_error);
if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1; work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
...@@ -13419,10 +13423,10 @@ static int intel_atomic_prepare_commit(struct drm_device *dev, ...@@ -13419,10 +13423,10 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
return ret; return ret;
ret = drm_atomic_helper_prepare_planes(dev, state); ret = drm_atomic_helper_prepare_planes(dev, state);
if (!ret && !async && !i915_reset_in_progress(&dev_priv->gpu_error)) { if (!ret && !async && !i915_reset_in_progress_or_wedged(&dev_priv->gpu_error)) {
u32 reset_counter; u32 reset_counter;
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); reset_counter = i915_reset_counter(&dev_priv->gpu_error);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
for_each_plane_in_state(state, plane, plane_state, i) { for_each_plane_in_state(state, plane, plane_state, i) {
......
...@@ -1055,7 +1055,7 @@ void intel_logical_ring_stop(struct intel_engine_cs *engine) ...@@ -1055,7 +1055,7 @@ void intel_logical_ring_stop(struct intel_engine_cs *engine)
return; return;
ret = intel_engine_idle(engine); ret = intel_engine_idle(engine);
if (ret && !i915_reset_in_progress(&to_i915(engine->dev)->gpu_error)) if (ret && !i915_reset_in_progress_or_wedged(&dev_priv->gpu_error))
DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
engine->name, ret); engine->name, ret);
......
...@@ -2364,8 +2364,8 @@ int intel_engine_idle(struct intel_engine_cs *engine) ...@@ -2364,8 +2364,8 @@ int intel_engine_idle(struct intel_engine_cs *engine)
/* Make sure we do not trigger any retires */ /* Make sure we do not trigger any retires */
return __i915_wait_request(req, return __i915_wait_request(req,
atomic_read(&to_i915(engine->dev)->gpu_error.reset_counter), i915_reset_counter(&req->i915->gpu_error),
to_i915(engine->dev)->mm.interruptible, req->i915->mm.interruptible,
NULL, NULL); NULL, NULL);
} }
...@@ -3190,7 +3190,8 @@ intel_stop_engine(struct intel_engine_cs *engine) ...@@ -3190,7 +3190,8 @@ intel_stop_engine(struct intel_engine_cs *engine)
return; return;
ret = intel_engine_idle(engine); ret = intel_engine_idle(engine);
if (ret && !i915_reset_in_progress(&to_i915(engine->dev)->gpu_error)) if (ret &&
!i915_reset_in_progress_or_wedged(&to_i915(engine->dev)->gpu_error))
DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
engine->name, ret); engine->name, ret);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment