Commit 9eca33f4 authored by Dave Airlie's avatar Dave Airlie

drm/ttm: add wrapper to get manager from bdev.

This will allow different abstractions later.
Acked-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarBen Skeggs <bskeggs@redhat.com>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200804025632.3868079-37-airlied@gmail.com
parent 0cf0a798
...@@ -108,7 +108,7 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, ...@@ -108,7 +108,7 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
return; return;
drm_printf(&p, " placement[%d]=0x%08X (%d)\n", drm_printf(&p, " placement[%d]=0x%08X (%d)\n",
i, placement->placement[i].flags, mem_type); i, placement->placement[i].flags, mem_type);
man = &bo->bdev->man[mem_type]; man = ttm_manager_type(bo->bdev, mem_type);
ttm_mem_type_manager_debug(man, &p); ttm_mem_type_manager_debug(man, &p);
} }
} }
...@@ -157,7 +157,7 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo, ...@@ -157,7 +157,7 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
if (mem->placement & TTM_PL_FLAG_NO_EVICT) if (mem->placement & TTM_PL_FLAG_NO_EVICT)
return; return;
man = &bdev->man[mem->mem_type]; man = ttm_manager_type(bdev, mem->mem_type);
list_add_tail(&bo->lru, &man->lru[bo->priority]); list_add_tail(&bo->lru, &man->lru[bo->priority]);
if (man->use_tt && bo->ttm && if (man->use_tt && bo->ttm &&
...@@ -232,7 +232,7 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk) ...@@ -232,7 +232,7 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
dma_resv_assert_held(pos->first->base.resv); dma_resv_assert_held(pos->first->base.resv);
dma_resv_assert_held(pos->last->base.resv); dma_resv_assert_held(pos->last->base.resv);
man = &pos->first->bdev->man[TTM_PL_TT]; man = ttm_manager_type(pos->first->bdev, TTM_PL_TT);
list_bulk_move_tail(&man->lru[i], &pos->first->lru, list_bulk_move_tail(&man->lru[i], &pos->first->lru,
&pos->last->lru); &pos->last->lru);
} }
...@@ -247,7 +247,7 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk) ...@@ -247,7 +247,7 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
dma_resv_assert_held(pos->first->base.resv); dma_resv_assert_held(pos->first->base.resv);
dma_resv_assert_held(pos->last->base.resv); dma_resv_assert_held(pos->last->base.resv);
man = &pos->first->bdev->man[TTM_PL_VRAM]; man = ttm_manager_type(pos->first->bdev, TTM_PL_VRAM);
list_bulk_move_tail(&man->lru[i], &pos->first->lru, list_bulk_move_tail(&man->lru[i], &pos->first->lru,
&pos->last->lru); &pos->last->lru);
} }
...@@ -273,8 +273,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, ...@@ -273,8 +273,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx) struct ttm_operation_ctx *ctx)
{ {
struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type]; struct ttm_mem_type_manager *old_man = ttm_manager_type(bdev, bo->mem.mem_type);
struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type]; struct ttm_mem_type_manager *new_man = ttm_manager_type(bdev, mem->mem_type);
int ret; int ret;
ret = ttm_mem_io_lock(old_man, true); ret = ttm_mem_io_lock(old_man, true);
...@@ -340,7 +340,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, ...@@ -340,7 +340,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
return 0; return 0;
out_err: out_err:
new_man = &bdev->man[bo->mem.mem_type]; new_man = ttm_manager_type(bdev, bo->mem.mem_type);
if (!new_man->use_tt) { if (!new_man->use_tt) {
ttm_tt_destroy(bo->ttm); ttm_tt_destroy(bo->ttm);
bo->ttm = NULL; bo->ttm = NULL;
...@@ -552,7 +552,7 @@ static void ttm_bo_release(struct kref *kref) ...@@ -552,7 +552,7 @@ static void ttm_bo_release(struct kref *kref)
struct ttm_buffer_object *bo = struct ttm_buffer_object *bo =
container_of(kref, struct ttm_buffer_object, kref); container_of(kref, struct ttm_buffer_object, kref);
struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; struct ttm_mem_type_manager *man = ttm_manager_type(bdev, bo->mem.mem_type);
size_t acc_size = bo->acc_size; size_t acc_size = bo->acc_size;
int ret; int ret;
...@@ -844,7 +844,7 @@ static int ttm_bo_mem_get(struct ttm_buffer_object *bo, ...@@ -844,7 +844,7 @@ static int ttm_bo_mem_get(struct ttm_buffer_object *bo,
const struct ttm_place *place, const struct ttm_place *place,
struct ttm_mem_reg *mem) struct ttm_mem_reg *mem)
{ {
struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type]; struct ttm_mem_type_manager *man = ttm_manager_type(bo->bdev, mem->mem_type);
mem->mm_node = NULL; mem->mm_node = NULL;
if (!man->func || !man->func->get_node) if (!man->func || !man->func->get_node)
...@@ -855,7 +855,7 @@ static int ttm_bo_mem_get(struct ttm_buffer_object *bo, ...@@ -855,7 +855,7 @@ static int ttm_bo_mem_get(struct ttm_buffer_object *bo,
void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
{ {
struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type]; struct ttm_mem_type_manager *man = ttm_manager_type(bo->bdev, mem->mem_type);
if (!man->func || !man->func->put_node) if (!man->func || !man->func->put_node)
return; return;
...@@ -910,7 +910,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, ...@@ -910,7 +910,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx) struct ttm_operation_ctx *ctx)
{ {
struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; struct ttm_mem_type_manager *man = ttm_manager_type(bdev, mem->mem_type);
struct ww_acquire_ctx *ticket; struct ww_acquire_ctx *ticket;
int ret; int ret;
...@@ -1000,7 +1000,7 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo, ...@@ -1000,7 +1000,7 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
if (ret) if (ret)
return ret; return ret;
man = &bdev->man[mem_type]; man = ttm_manager_type(bdev, mem_type);
if (!man->has_type || !man->use_type) if (!man->has_type || !man->use_type)
return -EBUSY; return -EBUSY;
...@@ -1063,7 +1063,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, ...@@ -1063,7 +1063,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (unlikely(ret)) if (unlikely(ret))
goto error; goto error;
man = &bdev->man[mem->mem_type]; man = ttm_manager_type(bdev, mem->mem_type);
ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu); ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
if (unlikely(ret)) { if (unlikely(ret)) {
ttm_bo_mem_put(bo, mem); ttm_bo_mem_put(bo, mem);
...@@ -1453,7 +1453,7 @@ EXPORT_SYMBOL(ttm_mem_type_manager_force_list_clean); ...@@ -1453,7 +1453,7 @@ EXPORT_SYMBOL(ttm_mem_type_manager_force_list_clean);
int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
{ {
struct ttm_mem_type_manager *man = &bdev->man[mem_type]; struct ttm_mem_type_manager *man = ttm_manager_type(bdev, mem_type);
if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) { if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
pr_err("Illegal memory manager memory type %u\n", mem_type); pr_err("Illegal memory manager memory type %u\n", mem_type);
...@@ -1556,7 +1556,7 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev) ...@@ -1556,7 +1556,7 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
unsigned i; unsigned i;
struct ttm_mem_type_manager *man; struct ttm_mem_type_manager *man;
man = &bdev->man[TTM_PL_SYSTEM]; man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
ttm_mem_type_manager_disable(man); ttm_mem_type_manager_disable(man);
mutex_lock(&ttm_global_mutex); mutex_lock(&ttm_global_mutex);
...@@ -1583,7 +1583,7 @@ EXPORT_SYMBOL(ttm_bo_device_release); ...@@ -1583,7 +1583,7 @@ EXPORT_SYMBOL(ttm_bo_device_release);
static void ttm_bo_init_sysman(struct ttm_bo_device *bdev) static void ttm_bo_init_sysman(struct ttm_bo_device *bdev)
{ {
struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_SYSTEM]; struct ttm_mem_type_manager *man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
/* /*
* Initialize the system memory buffer type. * Initialize the system memory buffer type.
...@@ -1647,7 +1647,7 @@ void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) ...@@ -1647,7 +1647,7 @@ void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
{ {
struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; struct ttm_mem_type_manager *man = ttm_manager_type(bdev, bo->mem.mem_type);
ttm_mem_io_lock(man, false); ttm_mem_io_lock(man, false);
ttm_bo_unmap_virtual_locked(bo); ttm_bo_unmap_virtual_locked(bo);
......
...@@ -129,7 +129,7 @@ static int ttm_mem_io_evict(struct ttm_mem_type_manager *man) ...@@ -129,7 +129,7 @@ static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
int ttm_mem_io_reserve(struct ttm_bo_device *bdev, int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem) struct ttm_mem_reg *mem)
{ {
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; struct ttm_mem_type_manager *man = ttm_manager_type(bdev, mem->mem_type);
int ret; int ret;
if (mem->bus.io_reserved_count++) if (mem->bus.io_reserved_count++)
...@@ -162,7 +162,7 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev, ...@@ -162,7 +162,7 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev,
int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
{ {
struct ttm_mem_type_manager *man = &bo->bdev->man[bo->mem.mem_type]; struct ttm_mem_type_manager *man = ttm_manager_type(bo->bdev, bo->mem.mem_type);
struct ttm_mem_reg *mem = &bo->mem; struct ttm_mem_reg *mem = &bo->mem;
int ret; int ret;
...@@ -195,7 +195,7 @@ static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, ...@@ -195,7 +195,7 @@ static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem, struct ttm_mem_reg *mem,
void **virtual) void **virtual)
{ {
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; struct ttm_mem_type_manager *man = ttm_manager_type(bdev, mem->mem_type);
int ret; int ret;
void *addr; void *addr;
...@@ -232,7 +232,7 @@ static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, ...@@ -232,7 +232,7 @@ static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev,
{ {
struct ttm_mem_type_manager *man; struct ttm_mem_type_manager *man;
man = &bdev->man[mem->mem_type]; man = ttm_manager_type(bdev, mem->mem_type);
if (virtual && mem->bus.addr == NULL) if (virtual && mem->bus.addr == NULL)
iounmap(virtual); iounmap(virtual);
...@@ -303,7 +303,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, ...@@ -303,7 +303,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem) struct ttm_mem_reg *new_mem)
{ {
struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; struct ttm_mem_type_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
struct ttm_tt *ttm = bo->ttm; struct ttm_tt *ttm = bo->ttm;
struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg old_copy = *old_mem; struct ttm_mem_reg old_copy = *old_mem;
...@@ -571,7 +571,7 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, ...@@ -571,7 +571,7 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
struct ttm_bo_kmap_obj *map) struct ttm_bo_kmap_obj *map)
{ {
struct ttm_mem_type_manager *man = struct ttm_mem_type_manager *man =
&bo->bdev->man[bo->mem.mem_type]; ttm_manager_type(bo->bdev, bo->mem.mem_type);
unsigned long offset, size; unsigned long offset, size;
int ret; int ret;
...@@ -601,7 +601,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) ...@@ -601,7 +601,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
{ {
struct ttm_buffer_object *bo = map->bo; struct ttm_buffer_object *bo = map->bo;
struct ttm_mem_type_manager *man = struct ttm_mem_type_manager *man =
&bo->bdev->man[bo->mem.mem_type]; ttm_manager_type(bo->bdev, bo->mem.mem_type);
if (!map->virtual) if (!map->virtual)
return; return;
...@@ -634,7 +634,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, ...@@ -634,7 +634,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem) struct ttm_mem_reg *new_mem)
{ {
struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; struct ttm_mem_type_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg *old_mem = &bo->mem;
int ret; int ret;
struct ttm_buffer_object *ghost_obj; struct ttm_buffer_object *ghost_obj;
...@@ -697,8 +697,8 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, ...@@ -697,8 +697,8 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type]; struct ttm_mem_type_manager *from = ttm_manager_type(bdev, old_mem->mem_type);
struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type]; struct ttm_mem_type_manager *to = ttm_manager_type(bdev, new_mem->mem_type);
int ret; int ret;
......
...@@ -282,7 +282,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, ...@@ -282,7 +282,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
vm_fault_t ret = VM_FAULT_NOPAGE; vm_fault_t ret = VM_FAULT_NOPAGE;
unsigned long address = vmf->address; unsigned long address = vmf->address;
struct ttm_mem_type_manager *man = struct ttm_mem_type_manager *man =
&bdev->man[bo->mem.mem_type]; ttm_manager_type(bdev, bo->mem.mem_type);
/* /*
* Refuse to fault imported pages. This should be handled * Refuse to fault imported pages. This should be handled
......
...@@ -444,6 +444,12 @@ struct ttm_bo_device { ...@@ -444,6 +444,12 @@ struct ttm_bo_device {
bool no_retry; bool no_retry;
}; };
static inline struct ttm_mem_type_manager *ttm_manager_type(struct ttm_bo_device *bdev,
int mem_type)
{
return &bdev->man[mem_type];
}
/** /**
* struct ttm_lru_bulk_move_pos * struct ttm_lru_bulk_move_pos
* *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment