Commit ff865885 authored by John Harrison's avatar John Harrison Committed by Daniel Vetter

drm/i915: Ensure requests stick around during waits

Added reference counting of the request structure around __wait_seqno() calls.
This is a precursor to updating the wait code itself to take the request rather
than a seqno. At that point, it would be a Bad Idea for a request object to be
retired and freed while the wait code is still using it.

v3:

Note that even though the mutex lock is held during a call to i915_wait_seqno(),
it is still necessary to explicitly bump the reference count. It appears that
the shrinker can asynchronously retire items even though the mutex is locked.

For: VIZ-4377
Signed-off-by: default avatarJohn Harrison <John.C.Harrison@Intel.com>
Reviewed-by: default avatarThomas Daniel <Thomas.Daniel@intel.com>
[danvet: Remove wrongly squashed hunk which breaks the build.]
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 54fb2411
...@@ -1417,10 +1417,12 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, ...@@ -1417,10 +1417,12 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
return ret; return ret;
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
i915_gem_request_reference(req);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL, ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL,
file_priv); file_priv);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
i915_gem_request_unreference(req);
if (ret) if (ret)
return ret; return ret;
...@@ -2920,6 +2922,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -2920,6 +2922,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_wait *args = data; struct drm_i915_gem_wait *args = data;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct drm_i915_gem_request *req;
struct intel_engine_cs *ring = NULL; struct intel_engine_cs *ring = NULL;
unsigned reset_counter; unsigned reset_counter;
u32 seqno = 0; u32 seqno = 0;
...@@ -2946,7 +2949,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -2946,7 +2949,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (!obj->active || !obj->last_read_req) if (!obj->active || !obj->last_read_req)
goto out; goto out;
seqno = i915_gem_request_get_seqno(obj->last_read_req); req = obj->last_read_req;
seqno = i915_gem_request_get_seqno(req);
WARN_ON(seqno == 0); WARN_ON(seqno == 0);
ring = obj->ring; ring = obj->ring;
...@@ -2960,10 +2964,15 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -2960,10 +2964,15 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
drm_gem_object_unreference(&obj->base); drm_gem_object_unreference(&obj->base);
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
i915_gem_request_reference(req);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return __i915_wait_seqno(ring, seqno, reset_counter, true, ret = __i915_wait_seqno(ring, seqno, reset_counter, true, &args->timeout_ns,
&args->timeout_ns, file->driver_priv); file->driver_priv);
mutex_lock(&dev->struct_mutex);
i915_gem_request_unreference(req);
mutex_unlock(&dev->struct_mutex);
return ret;
out: out:
drm_gem_object_unreference(&obj->base); drm_gem_object_unreference(&obj->base);
...@@ -4118,6 +4127,8 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) ...@@ -4118,6 +4127,8 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
target = request; target = request;
} }
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
if (target)
i915_gem_request_reference(target);
spin_unlock(&file_priv->mm.lock); spin_unlock(&file_priv->mm.lock);
if (target == NULL) if (target == NULL)
...@@ -4129,6 +4140,10 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) ...@@ -4129,6 +4140,10 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (ret == 0) if (ret == 0)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
mutex_lock(&dev->struct_mutex);
i915_gem_request_unreference(target);
mutex_unlock(&dev->struct_mutex);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment