Commit 22dd3bb9 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Mark up all locked waiters

In the next patch we want to handle reset directly by a locked waiter in
order to avoid issues with returning before the reset is handled. To
handle the reset, we must first know whether we hold the struct_mutex.
If we do not hold the struct_mtuex we can not perform the reset, but we do
not block the reset worker either (and so we can just continue to wait for
request completion) - otherwise we must relinquish the mutex.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160909131201.16673-10-chris@chris-wilson.co.uk
parent ea746f36
......@@ -4794,7 +4794,9 @@ i915_drop_caches_set(void *data, u64 val)
return ret;
if (val & DROP_ACTIVE) {
ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
ret = i915_gem_wait_for_idle(dev_priv,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED);
if (ret)
goto unlock;
}
......
......@@ -2802,7 +2802,8 @@ __i915_gem_object_sync(struct drm_i915_gem_request *to,
if (!i915.semaphores) {
ret = i915_wait_request(from,
from->i915->mm.interruptible,
from->i915->mm.interruptible |
I915_WAIT_LOCKED,
NULL,
NO_WAITBOOST);
if (ret)
......@@ -4304,7 +4305,9 @@ int i915_gem_suspend(struct drm_device *dev)
if (ret)
goto err;
ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
ret = i915_gem_wait_for_idle(dev_priv,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED);
if (ret)
goto err;
......
......@@ -170,7 +170,9 @@ i915_gem_evict_something(struct i915_address_space *vm,
if (ret)
return ret;
ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
ret = i915_gem_wait_for_idle(dev_priv,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED);
if (ret)
return ret;
......@@ -275,7 +277,9 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
return ret;
}
ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
ret = i915_gem_wait_for_idle(dev_priv,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED);
if (ret)
return ret;
......
......@@ -2683,7 +2683,7 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
struct i915_ggtt *ggtt = &dev_priv->ggtt;
if (unlikely(ggtt->do_idle_maps)) {
if (i915_gem_wait_for_idle(dev_priv, 0)) {
if (i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED)) {
DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
/* Wait a bit, in hopes it avoids the hang */
udelay(10);
......
......@@ -260,7 +260,9 @@ static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
/* Carefully retire all requests without writing to the rings */
for_each_engine(engine, dev_priv) {
ret = intel_engine_idle(engine, I915_WAIT_INTERRUPTIBLE);
ret = intel_engine_idle(engine,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED);
if (ret)
return ret;
}
......@@ -625,6 +627,10 @@ int i915_wait_request(struct drm_i915_gem_request *req,
int ret = 0;
might_sleep();
#if IS_ENABLED(CONFIG_LOCKDEP)
GEM_BUG_ON(!!lockdep_is_held(&req->i915->drm.struct_mutex) !=
!!(flags & I915_WAIT_LOCKED));
#endif
if (i915_gem_request_completed(req))
return 0;
......@@ -667,7 +673,8 @@ int i915_wait_request(struct drm_i915_gem_request *req,
goto complete;
set_current_state(state);
add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
if (flags & I915_WAIT_LOCKED)
add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
intel_wait_init(&wait, req->fence.seqno);
if (intel_engine_add_wait(req->engine, &wait))
......@@ -707,10 +714,12 @@ int i915_wait_request(struct drm_i915_gem_request *req,
if (i915_spin_request(req, state, 2))
break;
}
remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
intel_engine_remove_wait(req->engine, &wait);
if (flags & I915_WAIT_LOCKED)
remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
__set_current_state(TASK_RUNNING);
complete:
trace_i915_gem_request_wait_end(req);
......
......@@ -222,7 +222,8 @@ int i915_wait_request(struct drm_i915_gem_request *req,
s64 *timeout,
struct intel_rps_client *rps)
__attribute__((nonnull(1)));
#define I915_WAIT_INTERRUPTIBLE BIT(0)
#define I915_WAIT_INTERRUPTIBLE BIT(0)
#define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */
static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
......@@ -576,7 +577,9 @@ i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex)
if (!request)
return 0;
return i915_wait_request(request, I915_WAIT_INTERRUPTIBLE, NULL, NULL);
return i915_wait_request(request,
I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
NULL, NULL);
}
/**
......@@ -639,7 +642,9 @@ i915_gem_active_retire(struct i915_gem_active *active,
if (!request)
return 0;
ret = i915_wait_request(request, I915_WAIT_INTERRUPTIBLE, NULL, NULL);
ret = i915_wait_request(request,
I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
NULL, NULL);
if (ret)
return ret;
......
......@@ -414,7 +414,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
return NOTIFY_DONE;
/* Force everything onto the inactive lists */
ret = i915_gem_wait_for_idle(dev_priv, 0);
ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);
if (ret)
goto out;
......
......@@ -2223,7 +2223,8 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
if (WARN_ON(&target->ring_link == &ring->request_list))
return -ENOSPC;
ret = i915_wait_request(target, I915_WAIT_INTERRUPTIBLE,
ret = i915_wait_request(target,
I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
NULL, NO_WAITBOOST);
if (ret)
return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment