Commit ecff665f authored by Maarten Lankhorst's avatar Maarten Lankhorst Committed by Dave Airlie

drm/ttm: make ttm reservation calls behave like reservation calls

This commit converts the source of the val_seq counter to
the ww_mutex api. The reservation objects are converted later,
because there is still a lockdep splat in nouveau that has to
resolved first.
Signed-off-by: default avatarMaarten Lankhorst <maarten.lankhorst@canonical.com>
Reviewed-by: default avatarJerome Glisse <jglisse@redhat.com>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parent 786d7257
...@@ -277,10 +277,12 @@ struct validate_op { ...@@ -277,10 +277,12 @@ struct validate_op {
struct list_head vram_list; struct list_head vram_list;
struct list_head gart_list; struct list_head gart_list;
struct list_head both_list; struct list_head both_list;
struct ww_acquire_ctx ticket;
}; };
static void static void
validate_fini_list(struct list_head *list, struct nouveau_fence *fence) validate_fini_list(struct list_head *list, struct nouveau_fence *fence,
struct ww_acquire_ctx *ticket)
{ {
struct list_head *entry, *tmp; struct list_head *entry, *tmp;
struct nouveau_bo *nvbo; struct nouveau_bo *nvbo;
...@@ -297,17 +299,24 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence) ...@@ -297,17 +299,24 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
list_del(&nvbo->entry); list_del(&nvbo->entry);
nvbo->reserved_by = NULL; nvbo->reserved_by = NULL;
ttm_bo_unreserve(&nvbo->bo); ttm_bo_unreserve_ticket(&nvbo->bo, ticket);
drm_gem_object_unreference_unlocked(nvbo->gem); drm_gem_object_unreference_unlocked(nvbo->gem);
} }
} }
static void static void
validate_fini(struct validate_op *op, struct nouveau_fence* fence) validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence)
{ {
validate_fini_list(&op->vram_list, fence); validate_fini_list(&op->vram_list, fence, &op->ticket);
validate_fini_list(&op->gart_list, fence); validate_fini_list(&op->gart_list, fence, &op->ticket);
validate_fini_list(&op->both_list, fence); validate_fini_list(&op->both_list, fence, &op->ticket);
}
static void
validate_fini(struct validate_op *op, struct nouveau_fence *fence)
{
validate_fini_no_ticket(op, fence);
ww_acquire_fini(&op->ticket);
} }
static int static int
...@@ -317,13 +326,11 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv, ...@@ -317,13 +326,11 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
{ {
struct nouveau_cli *cli = nouveau_cli(file_priv); struct nouveau_cli *cli = nouveau_cli(file_priv);
struct drm_device *dev = chan->drm->dev; struct drm_device *dev = chan->drm->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
uint32_t sequence;
int trycnt = 0; int trycnt = 0;
int ret, i; int ret, i;
struct nouveau_bo *res_bo = NULL; struct nouveau_bo *res_bo = NULL;
sequence = atomic_add_return(1, &drm->ttm.validate_sequence); ww_acquire_init(&op->ticket, &reservation_ww_class);
retry: retry:
if (++trycnt > 100000) { if (++trycnt > 100000) {
NV_ERROR(cli, "%s failed and gave up.\n", __func__); NV_ERROR(cli, "%s failed and gave up.\n", __func__);
...@@ -338,6 +345,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv, ...@@ -338,6 +345,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
gem = drm_gem_object_lookup(dev, file_priv, b->handle); gem = drm_gem_object_lookup(dev, file_priv, b->handle);
if (!gem) { if (!gem) {
NV_ERROR(cli, "Unknown handle 0x%08x\n", b->handle); NV_ERROR(cli, "Unknown handle 0x%08x\n", b->handle);
ww_acquire_done(&op->ticket);
validate_fini(op, NULL); validate_fini(op, NULL);
return -ENOENT; return -ENOENT;
} }
...@@ -352,21 +360,23 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv, ...@@ -352,21 +360,23 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
NV_ERROR(cli, "multiple instances of buffer %d on " NV_ERROR(cli, "multiple instances of buffer %d on "
"validation list\n", b->handle); "validation list\n", b->handle);
drm_gem_object_unreference_unlocked(gem); drm_gem_object_unreference_unlocked(gem);
ww_acquire_done(&op->ticket);
validate_fini(op, NULL); validate_fini(op, NULL);
return -EINVAL; return -EINVAL;
} }
ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence); ret = ttm_bo_reserve(&nvbo->bo, true, false, true, &op->ticket);
if (ret) { if (ret) {
validate_fini(op, NULL); validate_fini_no_ticket(op, NULL);
if (unlikely(ret == -EAGAIN)) { if (unlikely(ret == -EAGAIN)) {
sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
ret = ttm_bo_reserve_slowpath(&nvbo->bo, true, ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
sequence); &op->ticket);
if (!ret) if (!ret)
res_bo = nvbo; res_bo = nvbo;
} }
if (unlikely(ret)) { if (unlikely(ret)) {
ww_acquire_done(&op->ticket);
ww_acquire_fini(&op->ticket);
drm_gem_object_unreference_unlocked(gem); drm_gem_object_unreference_unlocked(gem);
if (ret != -ERESTARTSYS) if (ret != -ERESTARTSYS)
NV_ERROR(cli, "fail reserve\n"); NV_ERROR(cli, "fail reserve\n");
...@@ -390,6 +400,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv, ...@@ -390,6 +400,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
NV_ERROR(cli, "invalid valid domains: 0x%08x\n", NV_ERROR(cli, "invalid valid domains: 0x%08x\n",
b->valid_domains); b->valid_domains);
list_add_tail(&nvbo->entry, &op->both_list); list_add_tail(&nvbo->entry, &op->both_list);
ww_acquire_done(&op->ticket);
validate_fini(op, NULL); validate_fini(op, NULL);
return -EINVAL; return -EINVAL;
} }
...@@ -397,6 +408,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv, ...@@ -397,6 +408,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
goto retry; goto retry;
} }
ww_acquire_done(&op->ticket);
return 0; return 0;
} }
......
...@@ -979,6 +979,7 @@ struct radeon_cs_parser { ...@@ -979,6 +979,7 @@ struct radeon_cs_parser {
u32 cs_flags; u32 cs_flags;
u32 ring; u32 ring;
s32 priority; s32 priority;
struct ww_acquire_ctx ticket;
}; };
extern int radeon_cs_finish_pages(struct radeon_cs_parser *p); extern int radeon_cs_finish_pages(struct radeon_cs_parser *p);
......
...@@ -106,7 +106,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) ...@@ -106,7 +106,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
radeon_bo_list_add_object(&p->relocs[i].lobj, radeon_bo_list_add_object(&p->relocs[i].lobj,
&p->validated); &p->validated);
} }
return radeon_bo_list_validate(&p->validated, p->ring); return radeon_bo_list_validate(&p->ticket, &p->validated, p->ring);
} }
static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority) static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
...@@ -314,15 +314,17 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) ...@@ -314,15 +314,17 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
* If error is set than unvalidate buffer, otherwise just free memory * If error is set than unvalidate buffer, otherwise just free memory
* used by parsing context. * used by parsing context.
**/ **/
static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error) static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bool backoff)
{ {
unsigned i; unsigned i;
if (!error) { if (!error) {
ttm_eu_fence_buffer_objects(&parser->validated, ttm_eu_fence_buffer_objects(&parser->ticket,
&parser->validated,
parser->ib.fence); parser->ib.fence);
} else { } else if (backoff) {
ttm_eu_backoff_reservation(&parser->validated); ttm_eu_backoff_reservation(&parser->ticket,
&parser->validated);
} }
if (parser->relocs != NULL) { if (parser->relocs != NULL) {
...@@ -535,7 +537,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -535,7 +537,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
r = radeon_cs_parser_init(&parser, data); r = radeon_cs_parser_init(&parser, data);
if (r) { if (r) {
DRM_ERROR("Failed to initialize parser !\n"); DRM_ERROR("Failed to initialize parser !\n");
radeon_cs_parser_fini(&parser, r); radeon_cs_parser_fini(&parser, r, false);
up_read(&rdev->exclusive_lock); up_read(&rdev->exclusive_lock);
r = radeon_cs_handle_lockup(rdev, r); r = radeon_cs_handle_lockup(rdev, r);
return r; return r;
...@@ -544,7 +546,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -544,7 +546,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (r) { if (r) {
if (r != -ERESTARTSYS) if (r != -ERESTARTSYS)
DRM_ERROR("Failed to parse relocation %d!\n", r); DRM_ERROR("Failed to parse relocation %d!\n", r);
radeon_cs_parser_fini(&parser, r); radeon_cs_parser_fini(&parser, r, false);
up_read(&rdev->exclusive_lock); up_read(&rdev->exclusive_lock);
r = radeon_cs_handle_lockup(rdev, r); r = radeon_cs_handle_lockup(rdev, r);
return r; return r;
...@@ -563,7 +565,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -563,7 +565,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
goto out; goto out;
} }
out: out:
radeon_cs_parser_fini(&parser, r); radeon_cs_parser_fini(&parser, r, true);
up_read(&rdev->exclusive_lock); up_read(&rdev->exclusive_lock);
r = radeon_cs_handle_lockup(rdev, r); r = radeon_cs_handle_lockup(rdev, r);
return r; return r;
......
...@@ -349,14 +349,15 @@ void radeon_bo_list_add_object(struct radeon_bo_list *lobj, ...@@ -349,14 +349,15 @@ void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
} }
} }
int radeon_bo_list_validate(struct list_head *head, int ring) int radeon_bo_list_validate(struct ww_acquire_ctx *ticket,
struct list_head *head, int ring)
{ {
struct radeon_bo_list *lobj; struct radeon_bo_list *lobj;
struct radeon_bo *bo; struct radeon_bo *bo;
u32 domain; u32 domain;
int r; int r;
r = ttm_eu_reserve_buffers(head); r = ttm_eu_reserve_buffers(ticket, head);
if (unlikely(r != 0)) { if (unlikely(r != 0)) {
return r; return r;
} }
......
...@@ -128,7 +128,8 @@ extern int radeon_bo_init(struct radeon_device *rdev); ...@@ -128,7 +128,8 @@ extern int radeon_bo_init(struct radeon_device *rdev);
extern void radeon_bo_fini(struct radeon_device *rdev); extern void radeon_bo_fini(struct radeon_device *rdev);
extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj, extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
struct list_head *head); struct list_head *head);
extern int radeon_bo_list_validate(struct list_head *head, int ring); extern int radeon_bo_list_validate(struct ww_acquire_ctx *ticket,
struct list_head *head, int ring);
extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo, extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
struct vm_area_struct *vma); struct vm_area_struct *vma);
extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo, extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
......
...@@ -550,6 +550,7 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev, ...@@ -550,6 +550,7 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev,
struct radeon_fence **fence) struct radeon_fence **fence)
{ {
struct ttm_validate_buffer tv; struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
struct list_head head; struct list_head head;
struct radeon_ib ib; struct radeon_ib ib;
uint64_t addr; uint64_t addr;
...@@ -561,7 +562,7 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev, ...@@ -561,7 +562,7 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev,
INIT_LIST_HEAD(&head); INIT_LIST_HEAD(&head);
list_add(&tv.head, &head); list_add(&tv.head, &head);
r = ttm_eu_reserve_buffers(&head); r = ttm_eu_reserve_buffers(&ticket, &head);
if (r) if (r)
return r; return r;
...@@ -569,16 +570,12 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev, ...@@ -569,16 +570,12 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev,
radeon_uvd_force_into_uvd_segment(bo); radeon_uvd_force_into_uvd_segment(bo);
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
if (r) { if (r)
ttm_eu_backoff_reservation(&head); goto err;
return r;
}
r = radeon_ib_get(rdev, ring, &ib, NULL, 16); r = radeon_ib_get(rdev, ring, &ib, NULL, 16);
if (r) { if (r)
ttm_eu_backoff_reservation(&head); goto err;
return r;
}
addr = radeon_bo_gpu_offset(bo); addr = radeon_bo_gpu_offset(bo);
ib.ptr[0] = PACKET0(UVD_GPCOM_VCPU_DATA0, 0); ib.ptr[0] = PACKET0(UVD_GPCOM_VCPU_DATA0, 0);
...@@ -592,11 +589,9 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev, ...@@ -592,11 +589,9 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev,
ib.length_dw = 16; ib.length_dw = 16;
r = radeon_ib_schedule(rdev, &ib, NULL); r = radeon_ib_schedule(rdev, &ib, NULL);
if (r) { if (r)
ttm_eu_backoff_reservation(&head); goto err;
return r; ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence);
}
ttm_eu_fence_buffer_objects(&head, ib.fence);
if (fence) if (fence)
*fence = radeon_fence_ref(ib.fence); *fence = radeon_fence_ref(ib.fence);
...@@ -604,6 +599,10 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev, ...@@ -604,6 +599,10 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev,
radeon_ib_free(rdev, &ib); radeon_ib_free(rdev, &ib);
radeon_bo_unref(&bo); radeon_bo_unref(&bo);
return 0; return 0;
err:
ttm_eu_backoff_reservation(&ticket, &head);
return r;
} }
/* multiple fence commands without any stream commands in between can /* multiple fence commands without any stream commands in between can
......
...@@ -215,7 +215,8 @@ int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) ...@@ -215,7 +215,8 @@ int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo, int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
bool interruptible, bool interruptible,
bool no_wait, bool use_sequence, uint32_t sequence) bool no_wait, bool use_ticket,
struct ww_acquire_ctx *ticket)
{ {
int ret; int ret;
...@@ -223,17 +224,17 @@ int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo, ...@@ -223,17 +224,17 @@ int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
/** /**
* Deadlock avoidance for multi-bo reserving. * Deadlock avoidance for multi-bo reserving.
*/ */
if (use_sequence && bo->seq_valid) { if (use_ticket && bo->seq_valid) {
/** /**
* We've already reserved this one. * We've already reserved this one.
*/ */
if (unlikely(sequence == bo->val_seq)) if (unlikely(ticket->stamp == bo->val_seq))
return -EDEADLK; return -EDEADLK;
/** /**
* Already reserved by a thread that will not back * Already reserved by a thread that will not back
* off for us. We need to back off. * off for us. We need to back off.
*/ */
if (unlikely(sequence - bo->val_seq < (1 << 31))) if (unlikely(ticket->stamp - bo->val_seq <= LONG_MAX))
return -EAGAIN; return -EAGAIN;
} }
...@@ -246,13 +247,14 @@ int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo, ...@@ -246,13 +247,14 @@ int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
return ret; return ret;
} }
if (use_sequence) { if (use_ticket) {
bool wake_up = false; bool wake_up = false;
/** /**
* Wake up waiters that may need to recheck for deadlock, * Wake up waiters that may need to recheck for deadlock,
* if we decreased the sequence number. * if we decreased the sequence number.
*/ */
if (unlikely((bo->val_seq - sequence < (1 << 31)) if (unlikely((bo->val_seq - ticket->stamp <= LONG_MAX)
|| !bo->seq_valid)) || !bo->seq_valid))
wake_up = true; wake_up = true;
...@@ -266,7 +268,7 @@ int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo, ...@@ -266,7 +268,7 @@ int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
* written before val_seq was, and just means some slightly * written before val_seq was, and just means some slightly
* increased cpu usage * increased cpu usage
*/ */
bo->val_seq = sequence; bo->val_seq = ticket->stamp;
bo->seq_valid = true; bo->seq_valid = true;
if (wake_up) if (wake_up)
wake_up_all(&bo->event_queue); wake_up_all(&bo->event_queue);
...@@ -292,14 +294,15 @@ void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count, ...@@ -292,14 +294,15 @@ void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
int ttm_bo_reserve(struct ttm_buffer_object *bo, int ttm_bo_reserve(struct ttm_buffer_object *bo,
bool interruptible, bool interruptible,
bool no_wait, bool use_sequence, uint32_t sequence) bool no_wait, bool use_ticket,
struct ww_acquire_ctx *ticket)
{ {
struct ttm_bo_global *glob = bo->glob; struct ttm_bo_global *glob = bo->glob;
int put_count = 0; int put_count = 0;
int ret; int ret;
ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_sequence, ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_ticket,
sequence); ticket);
if (likely(ret == 0)) { if (likely(ret == 0)) {
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
put_count = ttm_bo_del_from_lru(bo); put_count = ttm_bo_del_from_lru(bo);
...@@ -311,13 +314,14 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo, ...@@ -311,13 +314,14 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo,
} }
int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo, int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
bool interruptible, uint32_t sequence) bool interruptible,
struct ww_acquire_ctx *ticket)
{ {
bool wake_up = false; bool wake_up = false;
int ret; int ret;
while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) { while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
WARN_ON(bo->seq_valid && sequence == bo->val_seq); WARN_ON(bo->seq_valid && ticket->stamp == bo->val_seq);
ret = ttm_bo_wait_unreserved(bo, interruptible); ret = ttm_bo_wait_unreserved(bo, interruptible);
...@@ -325,14 +329,14 @@ int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo, ...@@ -325,14 +329,14 @@ int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
return ret; return ret;
} }
if ((bo->val_seq - sequence < (1 << 31)) || !bo->seq_valid) if (bo->val_seq - ticket->stamp < LONG_MAX || !bo->seq_valid)
wake_up = true; wake_up = true;
/** /**
* Wake up waiters that may need to recheck for deadlock, * Wake up waiters that may need to recheck for deadlock,
* if we decreased the sequence number. * if we decreased the sequence number.
*/ */
bo->val_seq = sequence; bo->val_seq = ticket->stamp;
bo->seq_valid = true; bo->seq_valid = true;
if (wake_up) if (wake_up)
wake_up_all(&bo->event_queue); wake_up_all(&bo->event_queue);
...@@ -341,12 +345,12 @@ int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo, ...@@ -341,12 +345,12 @@ int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
} }
int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
bool interruptible, uint32_t sequence) bool interruptible, struct ww_acquire_ctx *ticket)
{ {
struct ttm_bo_global *glob = bo->glob; struct ttm_bo_global *glob = bo->glob;
int put_count, ret; int put_count, ret;
ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, sequence); ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, ticket);
if (likely(!ret)) { if (likely(!ret)) {
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
put_count = ttm_bo_del_from_lru(bo); put_count = ttm_bo_del_from_lru(bo);
...@@ -357,7 +361,7 @@ int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, ...@@ -357,7 +361,7 @@ int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
} }
EXPORT_SYMBOL(ttm_bo_reserve_slowpath); EXPORT_SYMBOL(ttm_bo_reserve_slowpath);
void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo) void ttm_bo_unreserve_ticket_locked(struct ttm_buffer_object *bo, struct ww_acquire_ctx *ticket)
{ {
ttm_bo_add_to_lru(bo); ttm_bo_add_to_lru(bo);
atomic_set(&bo->reserved, 0); atomic_set(&bo->reserved, 0);
...@@ -369,11 +373,21 @@ void ttm_bo_unreserve(struct ttm_buffer_object *bo) ...@@ -369,11 +373,21 @@ void ttm_bo_unreserve(struct ttm_buffer_object *bo)
struct ttm_bo_global *glob = bo->glob; struct ttm_bo_global *glob = bo->glob;
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
ttm_bo_unreserve_locked(bo); ttm_bo_unreserve_ticket_locked(bo, NULL);
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
} }
EXPORT_SYMBOL(ttm_bo_unreserve); EXPORT_SYMBOL(ttm_bo_unreserve);
void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo, struct ww_acquire_ctx *ticket)
{
struct ttm_bo_global *glob = bo->glob;
spin_lock(&glob->lru_lock);
ttm_bo_unreserve_ticket_locked(bo, ticket);
spin_unlock(&glob->lru_lock);
}
EXPORT_SYMBOL(ttm_bo_unreserve_ticket);
/* /*
* Call bo->mutex locked. * Call bo->mutex locked.
*/ */
......
...@@ -32,7 +32,8 @@ ...@@ -32,7 +32,8 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/module.h> #include <linux/module.h>
static void ttm_eu_backoff_reservation_locked(struct list_head *list) static void ttm_eu_backoff_reservation_locked(struct list_head *list,
struct ww_acquire_ctx *ticket)
{ {
struct ttm_validate_buffer *entry; struct ttm_validate_buffer *entry;
...@@ -41,15 +42,16 @@ static void ttm_eu_backoff_reservation_locked(struct list_head *list) ...@@ -41,15 +42,16 @@ static void ttm_eu_backoff_reservation_locked(struct list_head *list)
if (!entry->reserved) if (!entry->reserved)
continue; continue;
entry->reserved = false;
if (entry->removed) { if (entry->removed) {
ttm_bo_add_to_lru(bo); ttm_bo_unreserve_ticket_locked(bo, ticket);
entry->removed = false; entry->removed = false;
} } else {
entry->reserved = false;
atomic_set(&bo->reserved, 0); atomic_set(&bo->reserved, 0);
wake_up_all(&bo->event_queue); wake_up_all(&bo->event_queue);
} }
}
} }
static void ttm_eu_del_from_lru_locked(struct list_head *list) static void ttm_eu_del_from_lru_locked(struct list_head *list)
...@@ -82,7 +84,8 @@ static void ttm_eu_list_ref_sub(struct list_head *list) ...@@ -82,7 +84,8 @@ static void ttm_eu_list_ref_sub(struct list_head *list)
} }
} }
void ttm_eu_backoff_reservation(struct list_head *list) void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
struct list_head *list)
{ {
struct ttm_validate_buffer *entry; struct ttm_validate_buffer *entry;
struct ttm_bo_global *glob; struct ttm_bo_global *glob;
...@@ -93,7 +96,8 @@ void ttm_eu_backoff_reservation(struct list_head *list) ...@@ -93,7 +96,8 @@ void ttm_eu_backoff_reservation(struct list_head *list)
entry = list_first_entry(list, struct ttm_validate_buffer, head); entry = list_first_entry(list, struct ttm_validate_buffer, head);
glob = entry->bo->glob; glob = entry->bo->glob;
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
ttm_eu_backoff_reservation_locked(list); ttm_eu_backoff_reservation_locked(list, ticket);
ww_acquire_fini(ticket);
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
} }
EXPORT_SYMBOL(ttm_eu_backoff_reservation); EXPORT_SYMBOL(ttm_eu_backoff_reservation);
...@@ -110,12 +114,12 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation); ...@@ -110,12 +114,12 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
* buffers in different orders. * buffers in different orders.
*/ */
int ttm_eu_reserve_buffers(struct list_head *list) int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
struct list_head *list)
{ {
struct ttm_bo_global *glob; struct ttm_bo_global *glob;
struct ttm_validate_buffer *entry; struct ttm_validate_buffer *entry;
int ret; int ret;
uint32_t val_seq;
if (list_empty(list)) if (list_empty(list))
return 0; return 0;
...@@ -129,8 +133,8 @@ int ttm_eu_reserve_buffers(struct list_head *list) ...@@ -129,8 +133,8 @@ int ttm_eu_reserve_buffers(struct list_head *list)
entry = list_first_entry(list, struct ttm_validate_buffer, head); entry = list_first_entry(list, struct ttm_validate_buffer, head);
glob = entry->bo->glob; glob = entry->bo->glob;
ww_acquire_init(ticket, &reservation_ww_class);
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
val_seq = entry->bo->bdev->val_seq++;
retry: retry:
list_for_each_entry(entry, list, head) { list_for_each_entry(entry, list, head) {
...@@ -140,7 +144,7 @@ int ttm_eu_reserve_buffers(struct list_head *list) ...@@ -140,7 +144,7 @@ int ttm_eu_reserve_buffers(struct list_head *list)
if (entry->reserved) if (entry->reserved)
continue; continue;
ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq); ret = ttm_bo_reserve_nolru(bo, true, true, true, ticket);
switch (ret) { switch (ret) {
case 0: case 0:
break; break;
...@@ -148,8 +152,9 @@ int ttm_eu_reserve_buffers(struct list_head *list) ...@@ -148,8 +152,9 @@ int ttm_eu_reserve_buffers(struct list_head *list)
ttm_eu_del_from_lru_locked(list); ttm_eu_del_from_lru_locked(list);
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
ret = ttm_bo_reserve_nolru(bo, true, false, ret = ttm_bo_reserve_nolru(bo, true, false,
true, val_seq); true, ticket);
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
if (!ret) if (!ret)
break; break;
...@@ -158,21 +163,13 @@ int ttm_eu_reserve_buffers(struct list_head *list) ...@@ -158,21 +163,13 @@ int ttm_eu_reserve_buffers(struct list_head *list)
/* fallthrough */ /* fallthrough */
case -EAGAIN: case -EAGAIN:
ttm_eu_backoff_reservation_locked(list); ttm_eu_backoff_reservation_locked(list, ticket);
/*
* temporarily increase sequence number every retry,
* to prevent us from seeing our old reservation
* sequence when someone else reserved the buffer,
* but hasn't updated the seq_valid/seqno members yet.
*/
val_seq = entry->bo->bdev->val_seq++;
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list); ttm_eu_list_ref_sub(list);
ret = ttm_bo_reserve_slowpath_nolru(bo, true, val_seq); ret = ttm_bo_reserve_slowpath_nolru(bo, true, ticket);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; goto err_fini;
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
entry->reserved = true; entry->reserved = true;
if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
...@@ -191,21 +188,25 @@ int ttm_eu_reserve_buffers(struct list_head *list) ...@@ -191,21 +188,25 @@ int ttm_eu_reserve_buffers(struct list_head *list)
} }
} }
ww_acquire_done(ticket);
ttm_eu_del_from_lru_locked(list); ttm_eu_del_from_lru_locked(list);
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list); ttm_eu_list_ref_sub(list);
return 0; return 0;
err: err:
ttm_eu_backoff_reservation_locked(list); ttm_eu_backoff_reservation_locked(list, ticket);
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list); ttm_eu_list_ref_sub(list);
err_fini:
ww_acquire_done(ticket);
ww_acquire_fini(ticket);
return ret; return ret;
} }
EXPORT_SYMBOL(ttm_eu_reserve_buffers); EXPORT_SYMBOL(ttm_eu_reserve_buffers);
void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj) void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
struct list_head *list, void *sync_obj)
{ {
struct ttm_validate_buffer *entry; struct ttm_validate_buffer *entry;
struct ttm_buffer_object *bo; struct ttm_buffer_object *bo;
...@@ -228,11 +229,12 @@ void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj) ...@@ -228,11 +229,12 @@ void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
bo = entry->bo; bo = entry->bo;
entry->old_sync_obj = bo->sync_obj; entry->old_sync_obj = bo->sync_obj;
bo->sync_obj = driver->sync_obj_ref(sync_obj); bo->sync_obj = driver->sync_obj_ref(sync_obj);
ttm_bo_unreserve_locked(bo); ttm_bo_unreserve_ticket_locked(bo, ticket);
entry->reserved = false; entry->reserved = false;
} }
spin_unlock(&bdev->fence_lock); spin_unlock(&bdev->fence_lock);
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
ww_acquire_fini(ticket);
list_for_each_entry(entry, list, head) { list_for_each_entry(entry, list, head) {
if (entry->old_sync_obj) if (entry->old_sync_obj)
......
...@@ -1432,6 +1432,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, ...@@ -1432,6 +1432,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
struct vmw_fence_obj *fence = NULL; struct vmw_fence_obj *fence = NULL;
struct vmw_resource *error_resource; struct vmw_resource *error_resource;
struct list_head resource_list; struct list_head resource_list;
struct ww_acquire_ctx ticket;
uint32_t handle; uint32_t handle;
void *cmd; void *cmd;
int ret; int ret;
...@@ -1488,7 +1489,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, ...@@ -1488,7 +1489,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_err; goto out_err;
ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes); ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_err; goto out_err;
...@@ -1537,7 +1538,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, ...@@ -1537,7 +1538,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
DRM_ERROR("Fence submission error. Syncing.\n"); DRM_ERROR("Fence submission error. Syncing.\n");
vmw_resource_list_unreserve(&sw_context->resource_list, false); vmw_resource_list_unreserve(&sw_context->resource_list, false);
ttm_eu_fence_buffer_objects(&sw_context->validate_nodes, ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
(void *) fence); (void *) fence);
if (unlikely(dev_priv->pinned_bo != NULL && if (unlikely(dev_priv->pinned_bo != NULL &&
...@@ -1570,7 +1571,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, ...@@ -1570,7 +1571,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
out_err: out_err:
vmw_resource_relocations_free(&sw_context->res_relocations); vmw_resource_relocations_free(&sw_context->res_relocations);
vmw_free_relocations(sw_context); vmw_free_relocations(sw_context);
ttm_eu_backoff_reservation(&sw_context->validate_nodes); ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
vmw_resource_list_unreserve(&sw_context->resource_list, true); vmw_resource_list_unreserve(&sw_context->resource_list, true);
vmw_clear_validations(sw_context); vmw_clear_validations(sw_context);
if (unlikely(dev_priv->pinned_bo != NULL && if (unlikely(dev_priv->pinned_bo != NULL &&
...@@ -1644,6 +1645,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, ...@@ -1644,6 +1645,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
struct list_head validate_list; struct list_head validate_list;
struct ttm_validate_buffer pinned_val, query_val; struct ttm_validate_buffer pinned_val, query_val;
struct vmw_fence_obj *lfence = NULL; struct vmw_fence_obj *lfence = NULL;
struct ww_acquire_ctx ticket;
if (dev_priv->pinned_bo == NULL) if (dev_priv->pinned_bo == NULL)
goto out_unlock; goto out_unlock;
...@@ -1657,7 +1659,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, ...@@ -1657,7 +1659,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
list_add_tail(&query_val.head, &validate_list); list_add_tail(&query_val.head, &validate_list);
do { do {
ret = ttm_eu_reserve_buffers(&validate_list); ret = ttm_eu_reserve_buffers(&ticket, &validate_list);
} while (ret == -ERESTARTSYS); } while (ret == -ERESTARTSYS);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
...@@ -1684,7 +1686,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, ...@@ -1684,7 +1686,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
NULL); NULL);
fence = lfence; fence = lfence;
} }
ttm_eu_fence_buffer_objects(&validate_list, (void *) fence); ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
if (lfence != NULL) if (lfence != NULL)
vmw_fence_obj_unreference(&lfence); vmw_fence_obj_unreference(&lfence);
...@@ -1696,7 +1698,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, ...@@ -1696,7 +1698,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
return; return;
out_no_emit: out_no_emit:
ttm_eu_backoff_reservation(&validate_list); ttm_eu_backoff_reservation(&ticket, &validate_list);
out_no_reserve: out_no_reserve:
ttm_bo_unref(&query_val.bo); ttm_bo_unref(&query_val.bo);
ttm_bo_unref(&pinned_val.bo); ttm_bo_unref(&pinned_val.bo);
......
...@@ -990,7 +990,9 @@ void vmw_resource_unreserve(struct vmw_resource *res, ...@@ -990,7 +990,9 @@ void vmw_resource_unreserve(struct vmw_resource *res,
* @val_buf: On successful return contains data about the * @val_buf: On successful return contains data about the
* reserved and validated backup buffer. * reserved and validated backup buffer.
*/ */
int vmw_resource_check_buffer(struct vmw_resource *res, static int
vmw_resource_check_buffer(struct vmw_resource *res,
struct ww_acquire_ctx *ticket,
bool interruptible, bool interruptible,
struct ttm_validate_buffer *val_buf) struct ttm_validate_buffer *val_buf)
{ {
...@@ -1007,7 +1009,7 @@ int vmw_resource_check_buffer(struct vmw_resource *res, ...@@ -1007,7 +1009,7 @@ int vmw_resource_check_buffer(struct vmw_resource *res,
INIT_LIST_HEAD(&val_list); INIT_LIST_HEAD(&val_list);
val_buf->bo = ttm_bo_reference(&res->backup->base); val_buf->bo = ttm_bo_reference(&res->backup->base);
list_add_tail(&val_buf->head, &val_list); list_add_tail(&val_buf->head, &val_list);
ret = ttm_eu_reserve_buffers(&val_list); ret = ttm_eu_reserve_buffers(ticket, &val_list);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_no_reserve; goto out_no_reserve;
...@@ -1025,7 +1027,7 @@ int vmw_resource_check_buffer(struct vmw_resource *res, ...@@ -1025,7 +1027,7 @@ int vmw_resource_check_buffer(struct vmw_resource *res,
return 0; return 0;
out_no_validate: out_no_validate:
ttm_eu_backoff_reservation(&val_list); ttm_eu_backoff_reservation(ticket, &val_list);
out_no_reserve: out_no_reserve:
ttm_bo_unref(&val_buf->bo); ttm_bo_unref(&val_buf->bo);
if (backup_dirty) if (backup_dirty)
...@@ -1069,7 +1071,9 @@ int vmw_resource_reserve(struct vmw_resource *res, bool no_backup) ...@@ -1069,7 +1071,9 @@ int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
*. *.
* @val_buf: Backup buffer information. * @val_buf: Backup buffer information.
*/ */
void vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf) static void
vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
struct ttm_validate_buffer *val_buf)
{ {
struct list_head val_list; struct list_head val_list;
...@@ -1078,7 +1082,7 @@ void vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf) ...@@ -1078,7 +1082,7 @@ void vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
INIT_LIST_HEAD(&val_list); INIT_LIST_HEAD(&val_list);
list_add_tail(&val_buf->head, &val_list); list_add_tail(&val_buf->head, &val_list);
ttm_eu_backoff_reservation(&val_list); ttm_eu_backoff_reservation(ticket, &val_list);
ttm_bo_unref(&val_buf->bo); ttm_bo_unref(&val_buf->bo);
} }
...@@ -1092,12 +1096,13 @@ int vmw_resource_do_evict(struct vmw_resource *res) ...@@ -1092,12 +1096,13 @@ int vmw_resource_do_evict(struct vmw_resource *res)
{ {
struct ttm_validate_buffer val_buf; struct ttm_validate_buffer val_buf;
const struct vmw_res_func *func = res->func; const struct vmw_res_func *func = res->func;
struct ww_acquire_ctx ticket;
int ret; int ret;
BUG_ON(!func->may_evict); BUG_ON(!func->may_evict);
val_buf.bo = NULL; val_buf.bo = NULL;
ret = vmw_resource_check_buffer(res, true, &val_buf); ret = vmw_resource_check_buffer(res, &ticket, true, &val_buf);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -1112,7 +1117,7 @@ int vmw_resource_do_evict(struct vmw_resource *res) ...@@ -1112,7 +1117,7 @@ int vmw_resource_do_evict(struct vmw_resource *res)
res->backup_dirty = true; res->backup_dirty = true;
res->res_dirty = false; res->res_dirty = false;
out_no_unbind: out_no_unbind:
vmw_resource_backoff_reservation(&val_buf); vmw_resource_backoff_reservation(&ticket, &val_buf);
return ret; return ret;
} }
......
...@@ -234,7 +234,7 @@ struct ttm_buffer_object { ...@@ -234,7 +234,7 @@ struct ttm_buffer_object {
struct list_head ddestroy; struct list_head ddestroy;
struct list_head swap; struct list_head swap;
struct list_head io_reserve_lru; struct list_head io_reserve_lru;
uint32_t val_seq; unsigned long val_seq;
bool seq_valid; bool seq_valid;
/** /**
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/reservation.h>
struct ttm_backend_func { struct ttm_backend_func {
/** /**
...@@ -778,7 +779,7 @@ extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man); ...@@ -778,7 +779,7 @@ extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
* @bo: A pointer to a struct ttm_buffer_object. * @bo: A pointer to a struct ttm_buffer_object.
* @interruptible: Sleep interruptible if waiting. * @interruptible: Sleep interruptible if waiting.
* @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
* @use_sequence: If @bo is already reserved, Only sleep waiting for * @use_ticket: If @bo is already reserved, Only sleep waiting for
* it to become unreserved if @sequence < (@bo)->sequence. * it to become unreserved if @sequence < (@bo)->sequence.
* *
* Locks a buffer object for validation. (Or prevents other processes from * Locks a buffer object for validation. (Or prevents other processes from
...@@ -819,7 +820,8 @@ extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man); ...@@ -819,7 +820,8 @@ extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
*/ */
extern int ttm_bo_reserve(struct ttm_buffer_object *bo, extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
bool interruptible, bool interruptible,
bool no_wait, bool use_sequence, uint32_t sequence); bool no_wait, bool use_ticket,
struct ww_acquire_ctx *ticket);
/** /**
* ttm_bo_reserve_slowpath_nolru: * ttm_bo_reserve_slowpath_nolru:
...@@ -836,7 +838,7 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo, ...@@ -836,7 +838,7 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
*/ */
extern int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo, extern int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
bool interruptible, bool interruptible,
uint32_t sequence); struct ww_acquire_ctx *ticket);
/** /**
...@@ -850,7 +852,8 @@ extern int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo, ...@@ -850,7 +852,8 @@ extern int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
* held by us, this function cannot deadlock any more. * held by us, this function cannot deadlock any more.
*/ */
extern int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, extern int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
bool interruptible, uint32_t sequence); bool interruptible,
struct ww_acquire_ctx *ticket);
/** /**
* ttm_bo_reserve_nolru: * ttm_bo_reserve_nolru:
...@@ -876,8 +879,8 @@ extern int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, ...@@ -876,8 +879,8 @@ extern int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
*/ */
extern int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo, extern int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
bool interruptible, bool interruptible,
bool no_wait, bool use_sequence, bool no_wait, bool use_ticket,
uint32_t sequence); struct ww_acquire_ctx *ticket);
/** /**
* ttm_bo_unreserve * ttm_bo_unreserve
...@@ -889,14 +892,25 @@ extern int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo, ...@@ -889,14 +892,25 @@ extern int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
extern void ttm_bo_unreserve(struct ttm_buffer_object *bo); extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
/** /**
* ttm_bo_unreserve_locked * ttm_bo_unreserve_ticket
* @bo: A pointer to a struct ttm_buffer_object.
* @ticket: ww_acquire_ctx used for reserving
* *
* Unreserve a previous reservation of @bo made with @ticket.
*/
extern void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo,
struct ww_acquire_ctx *ticket);
/**
* ttm_bo_unreserve_locked
* @bo: A pointer to a struct ttm_buffer_object. * @bo: A pointer to a struct ttm_buffer_object.
* @ticket: ww_acquire_ctx used for reserving, or NULL
* *
* Unreserve a previous reservation of @bo. * Unreserve a previous reservation of @bo made with @ticket.
* Needs to be called with struct ttm_bo_global::lru_lock held. * Needs to be called with struct ttm_bo_global::lru_lock held.
*/ */
extern void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo); extern void ttm_bo_unreserve_ticket_locked(struct ttm_buffer_object *bo,
struct ww_acquire_ctx *ticket);
/* /*
* ttm_bo_util.c * ttm_bo_util.c
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <ttm/ttm_bo_api.h> #include <ttm/ttm_bo_api.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/reservation.h>
/** /**
* struct ttm_validate_buffer * struct ttm_validate_buffer
...@@ -57,17 +58,20 @@ struct ttm_validate_buffer { ...@@ -57,17 +58,20 @@ struct ttm_validate_buffer {
/** /**
* function ttm_eu_backoff_reservation * function ttm_eu_backoff_reservation
* *
* @ticket: ww_acquire_ctx from reserve call
* @list: thread private list of ttm_validate_buffer structs. * @list: thread private list of ttm_validate_buffer structs.
* *
* Undoes all buffer validation reservations for bos pointed to by * Undoes all buffer validation reservations for bos pointed to by
* the list entries. * the list entries.
*/ */
extern void ttm_eu_backoff_reservation(struct list_head *list); extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
struct list_head *list);
/** /**
* function ttm_eu_reserve_buffers * function ttm_eu_reserve_buffers
* *
* @ticket: [out] ww_acquire_ctx returned by call.
* @list: thread private list of ttm_validate_buffer structs. * @list: thread private list of ttm_validate_buffer structs.
* *
* Tries to reserve bos pointed to by the list entries for validation. * Tries to reserve bos pointed to by the list entries for validation.
...@@ -90,11 +94,13 @@ extern void ttm_eu_backoff_reservation(struct list_head *list); ...@@ -90,11 +94,13 @@ extern void ttm_eu_backoff_reservation(struct list_head *list);
* has failed. * has failed.
*/ */
extern int ttm_eu_reserve_buffers(struct list_head *list); extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
struct list_head *list);
/** /**
* function ttm_eu_fence_buffer_objects. * function ttm_eu_fence_buffer_objects.
* *
* @ticket: ww_acquire_ctx from reserve call
* @list: thread private list of ttm_validate_buffer structs. * @list: thread private list of ttm_validate_buffer structs.
* @sync_obj: The new sync object for the buffers. * @sync_obj: The new sync object for the buffers.
* *
...@@ -104,6 +110,7 @@ extern int ttm_eu_reserve_buffers(struct list_head *list); ...@@ -104,6 +110,7 @@ extern int ttm_eu_reserve_buffers(struct list_head *list);
* *
*/ */
extern void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj); extern void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
struct list_head *list, void *sync_obj);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment