Commit 58b4d720 authored by Maarten Lankhorst's avatar Maarten Lankhorst

drm/ttm: add interruptible parameter to ttm_eu_reserve_buffers

It seems some drivers really want this as a parameter,
like vmwgfx.
Signed-off-by: default avatarMaarten Lankhorst <maarten.lankhorst@canonical.com>
parent dd7cfd64
...@@ -159,7 +159,7 @@ int qxl_release_reserve_list(struct qxl_release *release, bool no_intr) ...@@ -159,7 +159,7 @@ int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
if (list_is_singular(&release->bos)) if (list_is_singular(&release->bos))
return 0; return 0;
ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos); ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos, !no_intr);
if (ret) if (ret)
return ret; return ret;
......
...@@ -482,7 +482,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev, ...@@ -482,7 +482,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
u64 bytes_moved = 0, initial_bytes_moved; u64 bytes_moved = 0, initial_bytes_moved;
u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev); u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
r = ttm_eu_reserve_buffers(ticket, head); r = ttm_eu_reserve_buffers(ticket, head, true);
if (unlikely(r != 0)) { if (unlikely(r != 0)) {
return r; return r;
} }
......
...@@ -399,7 +399,7 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev, ...@@ -399,7 +399,7 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev,
INIT_LIST_HEAD(&head); INIT_LIST_HEAD(&head);
list_add(&tv.head, &head); list_add(&tv.head, &head);
r = ttm_eu_reserve_buffers(&ticket, &head); r = ttm_eu_reserve_buffers(&ticket, &head, true);
if (r) if (r)
return r; return r;
......
...@@ -112,7 +112,7 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation); ...@@ -112,7 +112,7 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
*/ */
int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
struct list_head *list) struct list_head *list, bool intr)
{ {
struct ttm_bo_global *glob; struct ttm_bo_global *glob;
struct ttm_validate_buffer *entry; struct ttm_validate_buffer *entry;
...@@ -140,7 +140,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, ...@@ -140,7 +140,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
if (entry->reserved) if (entry->reserved)
continue; continue;
ret = __ttm_bo_reserve(bo, true, (ticket == NULL), true, ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), true,
ticket); ticket);
if (ret == -EDEADLK) { if (ret == -EDEADLK) {
...@@ -153,13 +153,17 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, ...@@ -153,13 +153,17 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
ttm_eu_backoff_reservation_locked(list); ttm_eu_backoff_reservation_locked(list);
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list); ttm_eu_list_ref_sub(list);
ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
ticket); if (intr) {
if (unlikely(ret != 0)) { ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
if (ret == -EINTR) ticket);
ret = -ERESTARTSYS; if (unlikely(ret != 0)) {
goto err_fini; if (ret == -EINTR)
} ret = -ERESTARTSYS;
goto err_fini;
}
} else
ww_mutex_lock_slow(&bo->resv->lock, ticket);
entry->reserved = true; entry->reserved = true;
if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
......
...@@ -2496,7 +2496,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, ...@@ -2496,7 +2496,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_err_nores; goto out_err_nores;
ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes); ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes, true);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_err; goto out_err;
...@@ -2684,10 +2684,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, ...@@ -2684,10 +2684,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo); query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
list_add_tail(&query_val.head, &validate_list); list_add_tail(&query_val.head, &validate_list);
do { ret = ttm_eu_reserve_buffers(&ticket, &validate_list, false);
ret = ttm_eu_reserve_buffers(&ticket, &validate_list);
} while (ret == -ERESTARTSYS);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
vmw_execbuf_unpin_panic(dev_priv); vmw_execbuf_unpin_panic(dev_priv);
goto out_no_reserve; goto out_no_reserve;
......
...@@ -1216,7 +1216,7 @@ vmw_resource_check_buffer(struct vmw_resource *res, ...@@ -1216,7 +1216,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
INIT_LIST_HEAD(&val_list); INIT_LIST_HEAD(&val_list);
val_buf->bo = ttm_bo_reference(&res->backup->base); val_buf->bo = ttm_bo_reference(&res->backup->base);
list_add_tail(&val_buf->head, &val_list); list_add_tail(&val_buf->head, &val_list);
ret = ttm_eu_reserve_buffers(NULL, &val_list); ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_no_reserve; goto out_no_reserve;
......
...@@ -73,6 +73,7 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, ...@@ -73,6 +73,7 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
* @ticket: [out] ww_acquire_ctx filled in by call, or NULL if only * @ticket: [out] ww_acquire_ctx filled in by call, or NULL if only
* non-blocking reserves should be tried. * non-blocking reserves should be tried.
* @list: thread private list of ttm_validate_buffer structs. * @list: thread private list of ttm_validate_buffer structs.
* @intr: should the wait be interruptible
* *
* Tries to reserve bos pointed to by the list entries for validation. * Tries to reserve bos pointed to by the list entries for validation.
* If the function returns 0, all buffers are marked as "unfenced", * If the function returns 0, all buffers are marked as "unfenced",
...@@ -84,9 +85,9 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, ...@@ -84,9 +85,9 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
* CPU write reservations to be cleared, and for other threads to * CPU write reservations to be cleared, and for other threads to
* unreserve their buffers. * unreserve their buffers.
* *
* This function may return -ERESTART or -EAGAIN if the calling process * If intr is set to true, this function may return -ERESTARTSYS if the
* receives a signal while waiting. In that case, no buffers on the list * calling process receives a signal while waiting. In that case, no
* will be reserved upon return. * buffers on the list will be reserved upon return.
* *
* Buffers reserved by this function should be unreserved by * Buffers reserved by this function should be unreserved by
* a call to either ttm_eu_backoff_reservation() or * a call to either ttm_eu_backoff_reservation() or
...@@ -95,7 +96,7 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, ...@@ -95,7 +96,7 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
*/ */
extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
struct list_head *list); struct list_head *list, bool intr);
/** /**
* function ttm_eu_fence_buffer_objects. * function ttm_eu_fence_buffer_objects.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment