Commit 22dd3bb9 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Mark up all locked waiters

In the next patch we want to handle reset directly by a locked waiter in
order to avoid issues with returning before the reset is handled. To
handle the reset, we must first know whether we hold the struct_mutex.
If we do not hold the struct_mtuex we can not perform the reset, but we do
not block the reset worker either (and so we can just continue to wait for
request completion) - otherwise we must relinquish the mutex.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160909131201.16673-10-chris@chris-wilson.co.uk
parent ea746f36
...@@ -4794,7 +4794,9 @@ i915_drop_caches_set(void *data, u64 val) ...@@ -4794,7 +4794,9 @@ i915_drop_caches_set(void *data, u64 val)
return ret; return ret;
if (val & DROP_ACTIVE) { if (val & DROP_ACTIVE) {
ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE); ret = i915_gem_wait_for_idle(dev_priv,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED);
if (ret) if (ret)
goto unlock; goto unlock;
} }
......
...@@ -2802,7 +2802,8 @@ __i915_gem_object_sync(struct drm_i915_gem_request *to, ...@@ -2802,7 +2802,8 @@ __i915_gem_object_sync(struct drm_i915_gem_request *to,
if (!i915.semaphores) { if (!i915.semaphores) {
ret = i915_wait_request(from, ret = i915_wait_request(from,
from->i915->mm.interruptible, from->i915->mm.interruptible |
I915_WAIT_LOCKED,
NULL, NULL,
NO_WAITBOOST); NO_WAITBOOST);
if (ret) if (ret)
...@@ -4304,7 +4305,9 @@ int i915_gem_suspend(struct drm_device *dev) ...@@ -4304,7 +4305,9 @@ int i915_gem_suspend(struct drm_device *dev)
if (ret) if (ret)
goto err; goto err;
ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE); ret = i915_gem_wait_for_idle(dev_priv,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED);
if (ret) if (ret)
goto err; goto err;
......
...@@ -170,7 +170,9 @@ i915_gem_evict_something(struct i915_address_space *vm, ...@@ -170,7 +170,9 @@ i915_gem_evict_something(struct i915_address_space *vm,
if (ret) if (ret)
return ret; return ret;
ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE); ret = i915_gem_wait_for_idle(dev_priv,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED);
if (ret) if (ret)
return ret; return ret;
...@@ -275,7 +277,9 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle) ...@@ -275,7 +277,9 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
return ret; return ret;
} }
ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE); ret = i915_gem_wait_for_idle(dev_priv,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED);
if (ret) if (ret)
return ret; return ret;
......
...@@ -2683,7 +2683,7 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj) ...@@ -2683,7 +2683,7 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_ggtt *ggtt = &dev_priv->ggtt;
if (unlikely(ggtt->do_idle_maps)) { if (unlikely(ggtt->do_idle_maps)) {
if (i915_gem_wait_for_idle(dev_priv, 0)) { if (i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED)) {
DRM_ERROR("Failed to wait for idle; VT'd may hang.\n"); DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
/* Wait a bit, in hopes it avoids the hang */ /* Wait a bit, in hopes it avoids the hang */
udelay(10); udelay(10);
......
...@@ -260,7 +260,9 @@ static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno) ...@@ -260,7 +260,9 @@ static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
/* Carefully retire all requests without writing to the rings */ /* Carefully retire all requests without writing to the rings */
for_each_engine(engine, dev_priv) { for_each_engine(engine, dev_priv) {
ret = intel_engine_idle(engine, I915_WAIT_INTERRUPTIBLE); ret = intel_engine_idle(engine,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -625,6 +627,10 @@ int i915_wait_request(struct drm_i915_gem_request *req, ...@@ -625,6 +627,10 @@ int i915_wait_request(struct drm_i915_gem_request *req,
int ret = 0; int ret = 0;
might_sleep(); might_sleep();
#if IS_ENABLED(CONFIG_LOCKDEP)
GEM_BUG_ON(!!lockdep_is_held(&req->i915->drm.struct_mutex) !=
!!(flags & I915_WAIT_LOCKED));
#endif
if (i915_gem_request_completed(req)) if (i915_gem_request_completed(req))
return 0; return 0;
...@@ -667,7 +673,8 @@ int i915_wait_request(struct drm_i915_gem_request *req, ...@@ -667,7 +673,8 @@ int i915_wait_request(struct drm_i915_gem_request *req,
goto complete; goto complete;
set_current_state(state); set_current_state(state);
add_wait_queue(&req->i915->gpu_error.wait_queue, &reset); if (flags & I915_WAIT_LOCKED)
add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
intel_wait_init(&wait, req->fence.seqno); intel_wait_init(&wait, req->fence.seqno);
if (intel_engine_add_wait(req->engine, &wait)) if (intel_engine_add_wait(req->engine, &wait))
...@@ -707,10 +714,12 @@ int i915_wait_request(struct drm_i915_gem_request *req, ...@@ -707,10 +714,12 @@ int i915_wait_request(struct drm_i915_gem_request *req,
if (i915_spin_request(req, state, 2)) if (i915_spin_request(req, state, 2))
break; break;
} }
remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
intel_engine_remove_wait(req->engine, &wait); intel_engine_remove_wait(req->engine, &wait);
if (flags & I915_WAIT_LOCKED)
remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
complete: complete:
trace_i915_gem_request_wait_end(req); trace_i915_gem_request_wait_end(req);
......
...@@ -222,7 +222,8 @@ int i915_wait_request(struct drm_i915_gem_request *req, ...@@ -222,7 +222,8 @@ int i915_wait_request(struct drm_i915_gem_request *req,
s64 *timeout, s64 *timeout,
struct intel_rps_client *rps) struct intel_rps_client *rps)
__attribute__((nonnull(1))); __attribute__((nonnull(1)));
#define I915_WAIT_INTERRUPTIBLE BIT(0) #define I915_WAIT_INTERRUPTIBLE BIT(0)
#define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */
static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine); static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
...@@ -576,7 +577,9 @@ i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex) ...@@ -576,7 +577,9 @@ i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex)
if (!request) if (!request)
return 0; return 0;
return i915_wait_request(request, I915_WAIT_INTERRUPTIBLE, NULL, NULL); return i915_wait_request(request,
I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
NULL, NULL);
} }
/** /**
...@@ -639,7 +642,9 @@ i915_gem_active_retire(struct i915_gem_active *active, ...@@ -639,7 +642,9 @@ i915_gem_active_retire(struct i915_gem_active *active,
if (!request) if (!request)
return 0; return 0;
ret = i915_wait_request(request, I915_WAIT_INTERRUPTIBLE, NULL, NULL); ret = i915_wait_request(request,
I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
NULL, NULL);
if (ret) if (ret)
return ret; return ret;
......
...@@ -414,7 +414,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr ...@@ -414,7 +414,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
return NOTIFY_DONE; return NOTIFY_DONE;
/* Force everything onto the inactive lists */ /* Force everything onto the inactive lists */
ret = i915_gem_wait_for_idle(dev_priv, 0); ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);
if (ret) if (ret)
goto out; goto out;
......
...@@ -2223,7 +2223,8 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes) ...@@ -2223,7 +2223,8 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
if (WARN_ON(&target->ring_link == &ring->request_list)) if (WARN_ON(&target->ring_link == &ring->request_list))
return -ENOSPC; return -ENOSPC;
ret = i915_wait_request(target, I915_WAIT_INTERRUPTIBLE, ret = i915_wait_request(target,
I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
NULL, NO_WAITBOOST); NULL, NO_WAITBOOST);
if (ret) if (ret)
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment