Commit 6d820003 authored by Dave Airlie's avatar Dave Airlie

drm/ttm: drop move notify around move.

The drivers now do this in the move callback.

move_notify is still needed in the destroy path.
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20201020010319.1692445-7-airlied@gmail.com
parent 28ee846e
...@@ -666,6 +666,8 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, ...@@ -666,6 +666,8 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
struct ttm_resource *old_mem = &bo->mem; struct ttm_resource *old_mem = &bo->mem;
int r; int r;
amdgpu_bo_move_notify(bo, evict, new_mem);
/* Can't move a pinned BO */ /* Can't move a pinned BO */
abo = ttm_to_amdgpu_bo(bo); abo = ttm_to_amdgpu_bo(bo);
if (WARN_ON_ONCE(abo->tbo.pin_count > 0)) if (WARN_ON_ONCE(abo->tbo.pin_count > 0))
...@@ -687,7 +689,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, ...@@ -687,7 +689,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
new_mem->mem_type == TTM_PL_SYSTEM) { new_mem->mem_type == TTM_PL_SYSTEM) {
r = ttm_bo_wait_ctx(bo, ctx); r = ttm_bo_wait_ctx(bo, ctx);
if (r) if (r)
return r; goto fail;
amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm); amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
ttm_resource_free(bo, &bo->mem); ttm_resource_free(bo, &bo->mem);
...@@ -728,12 +730,12 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, ...@@ -728,12 +730,12 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
if (!amdgpu_mem_visible(adev, old_mem) || if (!amdgpu_mem_visible(adev, old_mem) ||
!amdgpu_mem_visible(adev, new_mem)) { !amdgpu_mem_visible(adev, new_mem)) {
pr_err("Move buffer fallback to memcpy unavailable\n"); pr_err("Move buffer fallback to memcpy unavailable\n");
return r; goto fail;
} }
r = ttm_bo_move_memcpy(bo, ctx, new_mem); r = ttm_bo_move_memcpy(bo, ctx, new_mem);
if (r) if (r)
return r; goto fail;
} }
if (bo->type == ttm_bo_type_device && if (bo->type == ttm_bo_type_device &&
...@@ -748,6 +750,11 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, ...@@ -748,6 +750,11 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
/* update statistics */ /* update statistics */
atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved); atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved);
return 0; return 0;
fail:
swap(*new_mem, bo->mem);
amdgpu_bo_move_notify(bo, false, new_mem);
swap(*new_mem, bo->mem);
return r;
} }
/** /**
......
...@@ -590,7 +590,16 @@ static int drm_gem_vram_bo_driver_move(struct drm_gem_vram_object *gbo, ...@@ -590,7 +590,16 @@ static int drm_gem_vram_bo_driver_move(struct drm_gem_vram_object *gbo,
struct ttm_operation_ctx *ctx, struct ttm_operation_ctx *ctx,
struct ttm_resource *new_mem) struct ttm_resource *new_mem)
{ {
return ttm_bo_move_memcpy(&gbo->bo, ctx, new_mem); int ret;
drm_gem_vram_bo_driver_move_notify(gbo, evict, new_mem);
ret = ttm_bo_move_memcpy(&gbo->bo, ctx, new_mem);
if (ret) {
swap(*new_mem, gbo->bo.mem);
drm_gem_vram_bo_driver_move_notify(gbo, false, new_mem);
swap(*new_mem, gbo->bo.mem);
}
return ret;
} }
/* /*
......
...@@ -1032,9 +1032,10 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, ...@@ -1032,9 +1032,10 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
struct nouveau_drm_tile *new_tile = NULL; struct nouveau_drm_tile *new_tile = NULL;
int ret = 0; int ret = 0;
nouveau_bo_move_ntfy(bo, evict, new_reg);
ret = ttm_bo_wait_ctx(bo, ctx); ret = ttm_bo_wait_ctx(bo, ctx);
if (ret) if (ret)
return ret; goto out_ntfy;
if (nvbo->bo.pin_count) if (nvbo->bo.pin_count)
NV_WARN(drm, "Moving pinned object %p!\n", nvbo); NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
...@@ -1042,7 +1043,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, ...@@ -1042,7 +1043,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
ret = nouveau_bo_vm_bind(bo, new_reg, &new_tile); ret = nouveau_bo_vm_bind(bo, new_reg, &new_tile);
if (ret) if (ret)
return ret; goto out_ntfy;
} }
/* Fake bo copy. */ /* Fake bo copy. */
...@@ -1090,7 +1091,12 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, ...@@ -1090,7 +1091,12 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
else else
nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile); nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
} }
out_ntfy:
if (ret) {
swap(*new_reg, bo->mem);
nouveau_bo_move_ntfy(bo, false, new_reg);
swap(*new_reg, bo->mem);
}
return ret; return ret;
} }
......
...@@ -136,24 +136,6 @@ static struct ttm_tt *qxl_ttm_tt_create(struct ttm_buffer_object *bo, ...@@ -136,24 +136,6 @@ static struct ttm_tt *qxl_ttm_tt_create(struct ttm_buffer_object *bo,
return ttm; return ttm;
} }
static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict,
struct ttm_operation_ctx *ctx,
struct ttm_resource *new_mem)
{
struct ttm_resource *old_mem = &bo->mem;
int ret;
ret = ttm_bo_wait_ctx(bo, ctx);
if (ret)
return ret;
if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
ttm_bo_move_null(bo, new_mem);
return 0;
}
return ttm_bo_move_memcpy(bo, ctx, new_mem);
}
static void qxl_bo_move_notify(struct ttm_buffer_object *bo, static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
bool evict, bool evict,
struct ttm_resource *new_mem) struct ttm_resource *new_mem)
...@@ -170,6 +152,33 @@ static void qxl_bo_move_notify(struct ttm_buffer_object *bo, ...@@ -170,6 +152,33 @@ static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
qxl_surface_evict(qdev, qbo, new_mem ? true : false); qxl_surface_evict(qdev, qbo, new_mem ? true : false);
} }
static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict,
struct ttm_operation_ctx *ctx,
struct ttm_resource *new_mem)
{
struct ttm_resource *old_mem = &bo->mem;
int ret;
qxl_bo_move_notify(bo, evict, new_mem);
ret = ttm_bo_wait_ctx(bo, ctx);
if (ret)
goto out;
if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
ttm_bo_move_null(bo, new_mem);
return 0;
}
ret = ttm_bo_move_memcpy(bo, ctx, new_mem);
out:
if (ret) {
swap(*new_mem, bo->mem);
qxl_bo_move_notify(bo, false, new_mem);
swap(*new_mem, bo->mem);
}
return ret;
}
static struct ttm_bo_driver qxl_bo_driver = { static struct ttm_bo_driver qxl_bo_driver = {
.ttm_tt_create = &qxl_ttm_tt_create, .ttm_tt_create = &qxl_ttm_tt_create,
.ttm_tt_bind = &qxl_ttm_backend_bind, .ttm_tt_bind = &qxl_ttm_backend_bind,
......
...@@ -311,9 +311,11 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict, ...@@ -311,9 +311,11 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
struct ttm_resource *old_mem = &bo->mem; struct ttm_resource *old_mem = &bo->mem;
int r; int r;
radeon_bo_move_notify(bo, evict, new_mem);
r = ttm_bo_wait_ctx(bo, ctx); r = ttm_bo_wait_ctx(bo, ctx);
if (r) if (r)
return r; goto fail;
/* Can't move a pinned BO */ /* Can't move a pinned BO */
rbo = container_of(bo, struct radeon_bo, tbo); rbo = container_of(bo, struct radeon_bo, tbo);
...@@ -359,13 +361,18 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict, ...@@ -359,13 +361,18 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
memcpy: memcpy:
r = ttm_bo_move_memcpy(bo, ctx, new_mem); r = ttm_bo_move_memcpy(bo, ctx, new_mem);
if (r) { if (r) {
return r; goto fail;
} }
} }
/* update statistics */ /* update statistics */
atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &rdev->num_bytes_moved); atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &rdev->num_bytes_moved);
return 0; return 0;
fail:
swap(*new_mem, bo->mem);
radeon_bo_move_notify(bo, false, new_mem);
swap(*new_mem, bo->mem);
return r;
} }
static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem) static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem)
......
...@@ -263,19 +263,9 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, ...@@ -263,19 +263,9 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
} }
} }
if (bdev->driver->move_notify)
bdev->driver->move_notify(bo, evict, mem);
ret = bdev->driver->move(bo, evict, ctx, mem); ret = bdev->driver->move(bo, evict, ctx, mem);
if (ret) { if (ret)
if (bdev->driver->move_notify) {
swap(*mem, bo->mem);
bdev->driver->move_notify(bo, false, mem);
swap(*mem, bo->mem);
}
goto out_err; goto out_err;
}
ctx->bytes_moved += bo->num_pages << PAGE_SHIFT; ctx->bytes_moved += bo->num_pages << PAGE_SHIFT;
return 0; return 0;
......
...@@ -737,6 +737,8 @@ static int vmw_move(struct ttm_buffer_object *bo, ...@@ -737,6 +737,8 @@ static int vmw_move(struct ttm_buffer_object *bo,
struct ttm_resource_manager *new_man = ttm_manager_type(bo->bdev, new_mem->mem_type); struct ttm_resource_manager *new_man = ttm_manager_type(bo->bdev, new_mem->mem_type);
int ret; int ret;
vmw_move_notify(bo, evict, new_mem);
if (old_man->use_tt && new_man->use_tt) { if (old_man->use_tt && new_man->use_tt) {
if (bo->mem.mem_type == TTM_PL_SYSTEM) { if (bo->mem.mem_type == TTM_PL_SYSTEM) {
ttm_bo_assign_mem(bo, new_mem); ttm_bo_assign_mem(bo, new_mem);
...@@ -744,15 +746,23 @@ static int vmw_move(struct ttm_buffer_object *bo, ...@@ -744,15 +746,23 @@ static int vmw_move(struct ttm_buffer_object *bo,
} }
ret = ttm_bo_wait_ctx(bo, ctx); ret = ttm_bo_wait_ctx(bo, ctx);
if (ret) if (ret)
return ret; goto fail;
vmw_ttm_unbind(bo->bdev, bo->ttm); vmw_ttm_unbind(bo->bdev, bo->ttm);
ttm_resource_free(bo, &bo->mem); ttm_resource_free(bo, &bo->mem);
ttm_bo_assign_mem(bo, new_mem); ttm_bo_assign_mem(bo, new_mem);
return 0; return 0;
} else { } else {
return ttm_bo_move_memcpy(bo, ctx, new_mem); ret = ttm_bo_move_memcpy(bo, ctx, new_mem);
if (ret)
goto fail;
} }
return 0;
fail:
swap(*new_mem, bo->mem);
vmw_move_notify(bo, false, new_mem);
swap(*new_mem, bo->mem);
return ret;
} }
struct ttm_bo_driver vmw_bo_driver = { struct ttm_bo_driver vmw_bo_driver = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment