Commit a5ac0f90 authored by John Harrison's avatar John Harrison Committed by Daniel Vetter

drm/i915: Remove the now obsolete 'i915_gem_check_olr()'

As there is no OLR to check, the check_olr() function is now a no-op and can be
removed.

For: VIZ-5115
Signed-off-by: default avatarJohn Harrison <John.C.Harrison@Intel.com>
Reviewed-by: default avatarTomas Elf <tomas.elf@intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent ae70797d
...@@ -2852,7 +2852,6 @@ bool i915_gem_retire_requests(struct drm_device *dev); ...@@ -2852,7 +2852,6 @@ bool i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_requests_ring(struct intel_engine_cs *ring); void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible); bool interruptible);
int __must_check i915_gem_check_olr(struct drm_i915_gem_request *req);
static inline bool i915_reset_in_progress(struct i915_gpu_error *error) static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
{ {
......
...@@ -1149,17 +1149,6 @@ i915_gem_check_wedge(struct i915_gpu_error *error, ...@@ -1149,17 +1149,6 @@ i915_gem_check_wedge(struct i915_gpu_error *error,
return 0; return 0;
} }
/*
* Compare arbitrary request against outstanding lazy request. Emit on match.
*/
int
i915_gem_check_olr(struct drm_i915_gem_request *req)
{
WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
return 0;
}
static void fake_irq(unsigned long data) static void fake_irq(unsigned long data)
{ {
wake_up_process((struct task_struct *)data); wake_up_process((struct task_struct *)data);
...@@ -1440,10 +1429,6 @@ i915_wait_request(struct drm_i915_gem_request *req) ...@@ -1440,10 +1429,6 @@ i915_wait_request(struct drm_i915_gem_request *req)
if (ret) if (ret)
return ret; return ret;
ret = i915_gem_check_olr(req);
if (ret)
return ret;
ret = __i915_wait_request(req, ret = __i915_wait_request(req,
atomic_read(&dev_priv->gpu_error.reset_counter), atomic_read(&dev_priv->gpu_error.reset_counter),
interruptible, NULL, NULL); interruptible, NULL, NULL);
...@@ -1543,10 +1528,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, ...@@ -1543,10 +1528,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
if (req == NULL) if (req == NULL)
return 0; return 0;
ret = i915_gem_check_olr(req);
if (ret)
goto err;
requests[n++] = i915_gem_request_reference(req); requests[n++] = i915_gem_request_reference(req);
} else { } else {
for (i = 0; i < I915_NUM_RINGS; i++) { for (i = 0; i < I915_NUM_RINGS; i++) {
...@@ -1556,10 +1537,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, ...@@ -1556,10 +1537,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
if (req == NULL) if (req == NULL)
continue; continue;
ret = i915_gem_check_olr(req);
if (ret)
goto err;
requests[n++] = i915_gem_request_reference(req); requests[n++] = i915_gem_request_reference(req);
} }
} }
...@@ -1570,7 +1547,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, ...@@ -1570,7 +1547,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
NULL, rps); NULL, rps);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
err:
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
if (ret == 0) if (ret == 0)
i915_gem_object_retire_request(obj, requests[i]); i915_gem_object_retire_request(obj, requests[i]);
...@@ -2983,7 +2959,7 @@ i915_gem_idle_work_handler(struct work_struct *work) ...@@ -2983,7 +2959,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
static int static int
i915_gem_object_flush_active(struct drm_i915_gem_object *obj) i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
{ {
int ret, i; int i;
if (!obj->active) if (!obj->active)
return 0; return 0;
...@@ -2998,10 +2974,6 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj) ...@@ -2998,10 +2974,6 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
if (list_empty(&req->list)) if (list_empty(&req->list))
goto retire; goto retire;
ret = i915_gem_check_olr(req);
if (ret)
return ret;
if (i915_gem_request_completed(req, true)) { if (i915_gem_request_completed(req, true)) {
__i915_gem_request_retire__upto(req); __i915_gem_request_retire__upto(req);
retire: retire:
...@@ -3117,10 +3089,6 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj, ...@@ -3117,10 +3089,6 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
if (i915_gem_request_completed(from_req, true)) if (i915_gem_request_completed(from_req, true))
return 0; return 0;
ret = i915_gem_check_olr(from_req);
if (ret)
return ret;
if (!i915_semaphore_is_enabled(obj->base.dev)) { if (!i915_semaphore_is_enabled(obj->base.dev)) {
struct drm_i915_private *i915 = to_i915(obj->base.dev); struct drm_i915_private *i915 = to_i915(obj->base.dev);
ret = __i915_wait_request(from_req, ret = __i915_wait_request(from_req,
......
...@@ -11476,12 +11476,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ...@@ -11476,12 +11476,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
i915_gem_request_assign(&work->flip_queued_req, i915_gem_request_assign(&work->flip_queued_req,
obj->last_write_req); obj->last_write_req);
} else { } else {
if (obj->last_write_req) {
ret = i915_gem_check_olr(obj->last_write_req);
if (ret)
goto cleanup_unpin;
}
if (!request) { if (!request) {
ret = i915_gem_request_alloc(ring, ring->default_context, &request); ret = i915_gem_request_alloc(ring, ring->default_context, &request);
if (ret) if (ret)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment