Commit 3756310e authored by Thomas Hellström's avatar Thomas Hellström Committed by Christian König

drm/ttm: Use the LRU walker for eviction

Use the LRU walker for eviction. This helps
removing a lot of code with weird locking
semantics.

The functionality is slightly changed so that
when trylocked buffer objects are exhausted, we
continue to interleave walks with ticket-locks while
there is still progress made. The list walks are
not restarted in-between evictions.

Also provide a separate ttm_bo_evict_first()
function for its single user. The context of that
user allows sleeping dma_resv locks.

v6:
- Various cleanups suggested by Matthew Brost.
- Fix error return code of ttm_bo_evict_first(). (Matthew Brost)
- Fix an error check that was inverted. (Matthew Brost)
v7:
- Use s64 rather than long (Christian König)
- Early ttm_resource_cursor_fini() in ttm_bo_evict_first().
- Simplify check for bo_moved in ttm_bo_evict_first().
  (Christian König)
- Don't evict pinned bos.

Cc: Christian König <christian.koenig@amd.com>
Cc: Somalapuram Amaranath <Amaranath.Somalapuram@amd.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: <dri-devel@lists.freedesktop.org>
Signed-off-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com> #v6
Link: https://patchwork.freedesktop.org/patch/msgid/20240705153206.68526-8-thomas.hellstrom@linux.intel.comSigned-off-by: default avatarChristian König <christian.koenig@amd.com>
parent 10efe34d
...@@ -224,80 +224,6 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) ...@@ -224,80 +224,6 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
dma_resv_iter_end(&cursor); dma_resv_iter_end(&cursor);
} }
/**
* ttm_bo_cleanup_refs
* If bo idle, remove from lru lists, and unref.
* If not idle, block if possible.
*
* Must be called with lru_lock and reservation held, this function
* will drop the lru lock and optionally the reservation lock before returning.
*
* @bo: The buffer object to clean-up
* @interruptible: Any sleeps should occur interruptibly.
* @no_wait_gpu: Never wait for gpu. Return -EBUSY instead.
* @unlock_resv: Unlock the reservation lock as well.
*/
static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
bool interruptible, bool no_wait_gpu,
bool unlock_resv)
{
struct dma_resv *resv = &bo->base._resv;
int ret;
if (dma_resv_test_signaled(resv, DMA_RESV_USAGE_BOOKKEEP))
ret = 0;
else
ret = -EBUSY;
if (ret && !no_wait_gpu) {
long lret;
if (unlock_resv)
dma_resv_unlock(bo->base.resv);
spin_unlock(&bo->bdev->lru_lock);
lret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP,
interruptible,
30 * HZ);
if (lret < 0)
return lret;
else if (lret == 0)
return -EBUSY;
spin_lock(&bo->bdev->lru_lock);
if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
/*
* We raced, and lost, someone else holds the reservation now,
* and is probably busy in ttm_bo_cleanup_memtype_use.
*
* Even if it's not the case, because we finished waiting any
* delayed destruction would succeed, so just return success
* here.
*/
spin_unlock(&bo->bdev->lru_lock);
return 0;
}
ret = 0;
}
if (ret) {
if (unlock_resv)
dma_resv_unlock(bo->base.resv);
spin_unlock(&bo->bdev->lru_lock);
return ret;
}
spin_unlock(&bo->bdev->lru_lock);
ttm_bo_cleanup_memtype_use(bo);
if (unlock_resv)
dma_resv_unlock(bo->base.resv);
return 0;
}
/* /*
* Block for the dma_resv object to become idle, lock the buffer and clean up * Block for the dma_resv object to become idle, lock the buffer and clean up
* the resource and tt object. * the resource and tt object.
...@@ -505,151 +431,152 @@ bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, ...@@ -505,151 +431,152 @@ bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
} }
EXPORT_SYMBOL(ttm_bo_eviction_valuable); EXPORT_SYMBOL(ttm_bo_eviction_valuable);
/* /**
* Check the target bo is allowable to be evicted or swapout, including cases: * ttm_bo_evict_first() - Evict the first bo on the manager's LRU list.
* * @bdev: The ttm device.
* a. if share same reservation object with ctx->resv, have assumption * @man: The manager whose bo to evict.
* reservation objects should already be locked, so not lock again and * @ctx: The TTM operation ctx governing the eviction.
* return true directly when either the opreation allow_reserved_eviction
* or the target bo already is in delayed free list;
* *
* b. Otherwise, trylock it. * Return: 0 if successful or the resource disappeared. Negative error code on error.
*/ */
static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo, int ttm_bo_evict_first(struct ttm_device *bdev, struct ttm_resource_manager *man,
struct ttm_operation_ctx *ctx, struct ttm_operation_ctx *ctx)
const struct ttm_place *place,
bool *locked, bool *busy)
{ {
bool ret = false; struct ttm_resource_cursor cursor;
struct ttm_buffer_object *bo;
struct ttm_resource *res;
unsigned int mem_type;
int ret = 0;
if (bo->pin_count) { spin_lock(&bdev->lru_lock);
*locked = false; res = ttm_resource_manager_first(man, &cursor);
if (busy) ttm_resource_cursor_fini(&cursor);
*busy = false; if (!res) {
return false; ret = -ENOENT;
goto out_no_ref;
} }
bo = res->bo;
if (!ttm_bo_get_unless_zero(bo))
goto out_no_ref;
mem_type = res->mem_type;
spin_unlock(&bdev->lru_lock);
ret = ttm_bo_reserve(bo, ctx->interruptible, ctx->no_wait_gpu, NULL);
if (ret)
goto out_no_lock;
if (!bo->resource || bo->resource->mem_type != mem_type)
goto out_bo_moved;
if (bo->base.resv == ctx->resv) { if (bo->deleted) {
dma_resv_assert_held(bo->base.resv); ret = ttm_bo_wait_ctx(bo, ctx);
if (ctx->allow_res_evict) if (!ret)
ret = true; ttm_bo_cleanup_memtype_use(bo);
*locked = false;
if (busy)
*busy = false;
} else { } else {
ret = dma_resv_trylock(bo->base.resv); ret = ttm_bo_evict(bo, ctx);
*locked = ret;
if (busy)
*busy = !ret;
}
if (ret && place && (bo->resource->mem_type != place->mem_type ||
!bo->bdev->funcs->eviction_valuable(bo, place))) {
ret = false;
if (*locked) {
dma_resv_unlock(bo->base.resv);
*locked = false;
}
} }
out_bo_moved:
dma_resv_unlock(bo->base.resv);
out_no_lock:
ttm_bo_put(bo);
return ret;
out_no_ref:
spin_unlock(&bdev->lru_lock);
return ret; return ret;
} }
/** /**
* ttm_mem_evict_wait_busy - wait for a busy BO to become available * struct ttm_bo_evict_walk - Parameters for the evict walk.
*
* @busy_bo: BO which couldn't be locked with trylock
* @ctx: operation context
* @ticket: acquire ticket
*
* Try to lock a busy buffer object to avoid failing eviction.
*/ */
static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo, struct ttm_bo_evict_walk {
struct ttm_operation_ctx *ctx, /** @walk: The walk base parameters. */
struct ww_acquire_ctx *ticket) struct ttm_lru_walk walk;
{ /** @place: The place passed to the resource allocation. */
int r; const struct ttm_place *place;
/** @evictor: The buffer object we're trying to make room for. */
if (!busy_bo || !ticket) struct ttm_buffer_object *evictor;
return -EBUSY; /** @res: The allocated resource if any. */
struct ttm_resource **res;
if (ctx->interruptible) /** @evicted: Number of successful evictions. */
r = dma_resv_lock_interruptible(busy_bo->base.resv, unsigned long evicted;
ticket); };
else
r = dma_resv_lock(busy_bo->base.resv, ticket);
/*
* TODO: It would be better to keep the BO locked until allocation is at
* least tried one more time, but that would mean a much larger rework
* of TTM.
*/
if (!r)
dma_resv_unlock(busy_bo->base.resv);
return r == -EDEADLK ? -EBUSY : r;
}
int ttm_mem_evict_first(struct ttm_device *bdev, static s64 ttm_bo_evict_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo)
struct ttm_resource_manager *man,
const struct ttm_place *place,
struct ttm_operation_ctx *ctx,
struct ww_acquire_ctx *ticket)
{ {
struct ttm_buffer_object *bo = NULL, *busy_bo = NULL; struct ttm_bo_evict_walk *evict_walk =
struct ttm_resource_cursor cursor; container_of(walk, typeof(*evict_walk), walk);
struct ttm_resource *res; s64 lret;
bool locked = false;
int ret;
spin_lock(&bdev->lru_lock); if (bo->pin_count || !bo->bdev->funcs->eviction_valuable(bo, evict_walk->place))
ttm_resource_manager_for_each_res(man, &cursor, res) { return 0;
bool busy;
if (!ttm_bo_evict_swapout_allowable(res->bo, ctx, place,
&locked, &busy)) {
if (busy && !busy_bo && ticket !=
dma_resv_locking_ctx(res->bo->base.resv))
busy_bo = res->bo;
continue;
}
if (ttm_bo_get_unless_zero(res->bo)) { if (bo->deleted) {
bo = res->bo; lret = ttm_bo_wait_ctx(bo, walk->ctx);
break; if (!lret)
} ttm_bo_cleanup_memtype_use(bo);
if (locked) } else {
dma_resv_unlock(res->bo->base.resv); lret = ttm_bo_evict(bo, walk->ctx);
} }
ttm_resource_cursor_fini(&cursor);
if (!bo) { if (lret)
if (busy_bo && !ttm_bo_get_unless_zero(busy_bo)) goto out;
busy_bo = NULL;
spin_unlock(&bdev->lru_lock);
ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
if (busy_bo)
ttm_bo_put(busy_bo);
return ret;
}
if (bo->deleted) { evict_walk->evicted++;
ret = ttm_bo_cleanup_refs(bo, ctx->interruptible, if (evict_walk->res)
ctx->no_wait_gpu, locked); lret = ttm_resource_alloc(evict_walk->evictor, evict_walk->place,
ttm_bo_put(bo); evict_walk->res);
return ret; if (lret == 0)
} return 1;
out:
/* Errors that should terminate the walk. */
if (lret == -ENOSPC)
return -EBUSY;
spin_unlock(&bdev->lru_lock); return lret;
}
ret = ttm_bo_evict(bo, ctx); static const struct ttm_lru_walk_ops ttm_evict_walk_ops = {
if (locked) .process_bo = ttm_bo_evict_cb,
ttm_bo_unreserve(bo); };
else
ttm_bo_move_to_lru_tail_unlocked(bo);
ttm_bo_put(bo); static int ttm_bo_evict_alloc(struct ttm_device *bdev,
return ret; struct ttm_resource_manager *man,
const struct ttm_place *place,
struct ttm_buffer_object *evictor,
struct ttm_operation_ctx *ctx,
struct ww_acquire_ctx *ticket,
struct ttm_resource **res)
{
struct ttm_bo_evict_walk evict_walk = {
.walk = {
.ops = &ttm_evict_walk_ops,
.ctx = ctx,
.ticket = ticket,
},
.place = place,
.evictor = evictor,
.res = res,
};
s64 lret;
evict_walk.walk.trylock_only = true;
lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
if (lret || !ticket)
goto out;
/* If ticket-locking, repeat while making progress. */
evict_walk.walk.trylock_only = false;
do {
/* The walk may clear the evict_walk.walk.ticket field */
evict_walk.walk.ticket = ticket;
evict_walk.evicted = 0;
lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
} while (!lret && evict_walk.evicted);
out:
if (lret < 0)
return lret;
if (lret == 0)
return -EBUSY;
return 0;
} }
/** /**
...@@ -760,6 +687,7 @@ static int ttm_bo_alloc_resource(struct ttm_buffer_object *bo, ...@@ -760,6 +687,7 @@ static int ttm_bo_alloc_resource(struct ttm_buffer_object *bo,
for (i = 0; i < placement->num_placement; ++i) { for (i = 0; i < placement->num_placement; ++i) {
const struct ttm_place *place = &placement->placement[i]; const struct ttm_place *place = &placement->placement[i];
struct ttm_resource_manager *man; struct ttm_resource_manager *man;
bool may_evict;
man = ttm_manager_type(bdev, place->mem_type); man = ttm_manager_type(bdev, place->mem_type);
if (!man || !ttm_resource_manager_used(man)) if (!man || !ttm_resource_manager_used(man))
...@@ -769,22 +697,21 @@ static int ttm_bo_alloc_resource(struct ttm_buffer_object *bo, ...@@ -769,22 +697,21 @@ static int ttm_bo_alloc_resource(struct ttm_buffer_object *bo,
TTM_PL_FLAG_FALLBACK)) TTM_PL_FLAG_FALLBACK))
continue; continue;
do { may_evict = (force_space && place->mem_type != TTM_PL_SYSTEM);
ret = ttm_resource_alloc(bo, place, res); ret = ttm_resource_alloc(bo, place, res);
if (unlikely(ret && ret != -ENOSPC)) if (ret) {
if (ret != -ENOSPC)
return ret; return ret;
if (likely(!ret) || !force_space) if (!may_evict)
break; continue;
ret = ttm_mem_evict_first(bdev, man, place, ctx, ret = ttm_bo_evict_alloc(bdev, man, place, bo, ctx,
ticket); ticket, res);
if (unlikely(ret == -EBUSY)) if (ret == -EBUSY)
break; continue;
if (unlikely(ret)) if (ret)
return ret; return ret;
} while (1); }
if (ret)
continue;
ret = ttm_bo_add_move_fence(bo, man, ctx->no_wait_gpu); ret = ttm_bo_add_move_fence(bo, man, ctx->no_wait_gpu);
if (unlikely(ret)) { if (unlikely(ret)) {
......
...@@ -495,24 +495,11 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev, ...@@ -495,24 +495,11 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev,
}; };
struct dma_fence *fence; struct dma_fence *fence;
int ret; int ret;
unsigned i;
/*
* Can't use standard list traversal since we're unlocking.
*/
spin_lock(&bdev->lru_lock); do {
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { ret = ttm_bo_evict_first(bdev, man, &ctx);
while (!list_empty(&man->lru[i])) { cond_resched();
spin_unlock(&bdev->lru_lock); } while (!ret);
ret = ttm_mem_evict_first(bdev, man, NULL, &ctx,
NULL);
if (ret)
return ret;
spin_lock(&bdev->lru_lock);
}
}
spin_unlock(&bdev->lru_lock);
spin_lock(&man->move_lock); spin_lock(&man->move_lock);
fence = dma_fence_get(man->move); fence = dma_fence_get(man->move);
......
...@@ -422,11 +422,9 @@ s64 ttm_bo_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx, ...@@ -422,11 +422,9 @@ s64 ttm_bo_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
s64 target); s64 target);
void ttm_bo_pin(struct ttm_buffer_object *bo); void ttm_bo_pin(struct ttm_buffer_object *bo);
void ttm_bo_unpin(struct ttm_buffer_object *bo); void ttm_bo_unpin(struct ttm_buffer_object *bo);
int ttm_mem_evict_first(struct ttm_device *bdev, int ttm_bo_evict_first(struct ttm_device *bdev,
struct ttm_resource_manager *man, struct ttm_resource_manager *man,
const struct ttm_place *place, struct ttm_operation_ctx *ctx);
struct ttm_operation_ctx *ctx,
struct ww_acquire_ctx *ticket);
vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo, vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
struct vm_fault *vmf); struct vm_fault *vmf);
vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment