Commit c780b2ee authored by Felix Kuehling's avatar Felix Kuehling Committed by Alex Deucher

drm/amdgpu: Rename kfd_bo_va_list to kfd_mem_attachment

This name is more fitting, especially for the changes coming next to
support multi-GPU systems with proper DMA mappings. Cleaned up the code
and renamed some related functions and variables to improve readability.
Signed-off-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Acked-by: default avatarOak Zeng <Oak.Zeng@amd.com>
Acked-by: default avatarRamesh Errabolu <Ramesh.Errabolu@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 0a6fb502
......@@ -44,10 +44,10 @@ enum TLB_FLUSH_TYPE {
struct amdgpu_device;
struct kfd_bo_va_list {
struct list_head bo_list;
struct kfd_mem_attachment {
struct list_head list;
struct amdgpu_bo_va *bo_va;
void *kgd_dev;
struct amdgpu_device *adev;
bool is_mapped;
uint64_t va;
uint64_t pte_flags;
......@@ -56,7 +56,7 @@ struct kfd_bo_va_list {
struct kgd_mem {
struct mutex lock;
struct amdgpu_bo *bo;
struct list_head bo_va_list;
struct list_head attachments;
/* protected by amdkfd_process_info.lock */
struct ttm_validate_buffer validate_list;
struct ttm_validate_buffer resv_list;
......
......@@ -72,16 +72,16 @@ static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
return (struct amdgpu_device *)kgd;
}
static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
static bool kfd_mem_is_attached(struct amdgpu_vm *avm,
struct kgd_mem *mem)
{
struct kfd_bo_va_list *entry;
struct kfd_mem_attachment *entry;
list_for_each_entry(entry, &mem->bo_va_list, bo_list)
list_for_each_entry(entry, &mem->attachments, list)
if (entry->bo_va->base.vm == avm)
return false;
return true;
return false;
}
/* Set memory usage limits. Current, limits are
......@@ -475,7 +475,7 @@ static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
return pte_flags;
}
/* add_bo_to_vm - Add a BO to a VM
/* kfd_mem_attach - Add a BO to a VM
*
* Everything that needs to bo done only once when a BO is first added
* to a VM. It can later be mapped and unmapped many times without
......@@ -487,15 +487,14 @@ static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
* 4. Alloc page tables and directories if needed
* 4a. Validate new page tables and directories
*/
static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
struct amdgpu_vm *vm, bool is_aql,
struct kfd_bo_va_list **p_bo_va_entry)
struct kfd_mem_attachment **p_attachment)
{
int ret;
struct kfd_bo_va_list *bo_va_entry;
struct kfd_mem_attachment *attachment;
struct amdgpu_bo *bo = mem->bo;
uint64_t va = mem->va;
struct list_head *list_bo_va = &mem->bo_va_list;
unsigned long bo_size = bo->tbo.base.size;
if (!va) {
......@@ -506,29 +505,29 @@ static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
if (is_aql)
va += bo_size;
bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL);
if (!bo_va_entry)
attachment = kzalloc(sizeof(*attachment), GFP_KERNEL);
if (!attachment)
return -ENOMEM;
pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
va + bo_size, vm);
/* Add BO to VM internal data structures*/
bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo);
if (!bo_va_entry->bo_va) {
attachment->bo_va = amdgpu_vm_bo_add(adev, vm, bo);
if (!attachment->bo_va) {
ret = -EINVAL;
pr_err("Failed to add BO object to VM. ret == %d\n",
ret);
goto err_vmadd;
}
bo_va_entry->va = va;
bo_va_entry->pte_flags = get_pte_flags(adev, mem);
bo_va_entry->kgd_dev = (void *)adev;
list_add(&bo_va_entry->bo_list, list_bo_va);
attachment->va = va;
attachment->pte_flags = get_pte_flags(adev, mem);
attachment->adev = adev;
list_add(&attachment->list, &mem->attachments);
if (p_bo_va_entry)
*p_bo_va_entry = bo_va_entry;
if (p_attachment)
*p_attachment = attachment;
/* Allocate validate page tables if needed */
ret = vm_validate_pt_pd_bos(vm);
......@@ -540,22 +539,20 @@ static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
return 0;
err_alloc_pts:
amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va);
list_del(&bo_va_entry->bo_list);
amdgpu_vm_bo_rmv(adev, attachment->bo_va);
list_del(&attachment->list);
err_vmadd:
kfree(bo_va_entry);
kfree(attachment);
return ret;
}
static void remove_bo_from_vm(struct amdgpu_device *adev,
struct kfd_bo_va_list *entry, unsigned long size)
static void kfd_mem_detach(struct kfd_mem_attachment *attachment)
{
pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n",
entry->va,
entry->va + size, entry);
amdgpu_vm_bo_rmv(adev, entry->bo_va);
list_del(&entry->bo_list);
kfree(entry);
pr_debug("\t remove VA 0x%llx in entry %p\n",
attachment->va, attachment);
amdgpu_vm_bo_rmv(attachment->adev, attachment->bo_va);
list_del(&attachment->list);
kfree(attachment);
}
static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
......@@ -730,7 +727,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
struct bo_vm_reservation_context *ctx)
{
struct amdgpu_bo *bo = mem->bo;
struct kfd_bo_va_list *entry;
struct kfd_mem_attachment *entry;
unsigned int i;
int ret;
......@@ -742,7 +739,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
INIT_LIST_HEAD(&ctx->list);
INIT_LIST_HEAD(&ctx->duplicates);
list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
list_for_each_entry(entry, &mem->attachments, list) {
if ((vm && vm != entry->bo_va->base.vm) ||
(entry->is_mapped != map_type
&& map_type != BO_VM_ALL))
......@@ -764,7 +761,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
list_add(&ctx->kfd_bo.tv.head, &ctx->list);
i = 0;
list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
list_for_each_entry(entry, &mem->attachments, list) {
if ((vm && vm != entry->bo_va->base.vm) ||
(entry->is_mapped != map_type
&& map_type != BO_VM_ALL))
......@@ -819,7 +816,7 @@ static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
}
static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
struct kfd_bo_va_list *entry,
struct kfd_mem_attachment *entry,
struct amdgpu_sync *sync)
{
struct amdgpu_bo_va *bo_va = entry->bo_va;
......@@ -835,7 +832,7 @@ static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
}
static int update_gpuvm_pte(struct amdgpu_device *adev,
struct kfd_bo_va_list *entry,
struct kfd_mem_attachment *entry,
struct amdgpu_sync *sync)
{
int ret;
......@@ -852,7 +849,7 @@ static int update_gpuvm_pte(struct amdgpu_device *adev,
}
static int map_bo_to_gpuvm(struct amdgpu_device *adev,
struct kfd_bo_va_list *entry, struct amdgpu_sync *sync,
struct kfd_mem_attachment *entry, struct amdgpu_sync *sync,
bool no_update_pte)
{
int ret;
......@@ -1196,7 +1193,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
ret = -ENOMEM;
goto err;
}
INIT_LIST_HEAD(&(*mem)->bo_va_list);
INIT_LIST_HEAD(&(*mem)->attachments);
mutex_init(&(*mem)->lock);
(*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
......@@ -1285,7 +1282,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
{
struct amdkfd_process_info *process_info = mem->process_info;
unsigned long bo_size = mem->bo->tbo.base.size;
struct kfd_bo_va_list *entry, *tmp;
struct kfd_mem_attachment *entry, *tmp;
struct bo_vm_reservation_context ctx;
struct ttm_validate_buffer *bo_list_entry;
unsigned int mapped_to_gpu_memory;
......@@ -1329,9 +1326,8 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
mem->va + bo_size * (1 + mem->aql_queue));
/* Remove from VM internal data structures */
list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list)
remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev,
entry, bo_size);
list_for_each_entry_safe(entry, tmp, &mem->attachments, list)
kfd_mem_detach(entry);
ret = unreserve_bo_and_vms(&ctx, false, false);
......@@ -1374,10 +1370,10 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
int ret;
struct amdgpu_bo *bo;
uint32_t domain;
struct kfd_bo_va_list *entry;
struct kfd_mem_attachment *entry;
struct bo_vm_reservation_context ctx;
struct kfd_bo_va_list *bo_va_entry = NULL;
struct kfd_bo_va_list *bo_va_entry_aql = NULL;
struct kfd_mem_attachment *attachment = NULL;
struct kfd_mem_attachment *attachment_aql = NULL;
unsigned long bo_size;
bool is_invalid_userptr = false;
......@@ -1426,21 +1422,20 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
is_invalid_userptr = true;
if (check_if_add_bo_to_vm(avm, mem)) {
ret = add_bo_to_vm(adev, mem, avm, false,
&bo_va_entry);
if (!kfd_mem_is_attached(avm, mem)) {
ret = kfd_mem_attach(adev, mem, avm, false, &attachment);
if (ret)
goto add_bo_to_vm_failed;
goto attach_failed;
if (mem->aql_queue) {
ret = add_bo_to_vm(adev, mem, avm,
true, &bo_va_entry_aql);
ret = kfd_mem_attach(adev, mem, avm, true,
&attachment_aql);
if (ret)
goto add_bo_to_vm_failed_aql;
goto attach_failed_aql;
}
} else {
ret = vm_validate_pt_pd_bos(avm);
if (unlikely(ret))
goto add_bo_to_vm_failed;
goto attach_failed;
}
if (mem->mapped_to_gpu_memory == 0 &&
......@@ -1456,11 +1451,12 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
}
}
list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
if (entry->bo_va->base.vm == avm && !entry->is_mapped) {
list_for_each_entry(entry, &mem->attachments, list) {
if (entry->bo_va->base.vm != avm || entry->is_mapped)
continue;
pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
entry->va, entry->va + bo_size,
entry);
entry->va, entry->va + bo_size, entry);
ret = map_bo_to_gpuvm(adev, entry, ctx.sync,
is_invalid_userptr);
......@@ -1480,7 +1476,6 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
pr_debug("\t INC mapping count %d\n",
mem->mapped_to_gpu_memory);
}
}
if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count)
amdgpu_bo_fence(bo,
......@@ -1491,12 +1486,12 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
goto out;
map_bo_to_gpuvm_failed:
if (bo_va_entry_aql)
remove_bo_from_vm(adev, bo_va_entry_aql, bo_size);
add_bo_to_vm_failed_aql:
if (bo_va_entry)
remove_bo_from_vm(adev, bo_va_entry, bo_size);
add_bo_to_vm_failed:
if (attachment_aql)
kfd_mem_detach(attachment_aql);
attach_failed_aql:
if (attachment)
kfd_mem_detach(attachment);
attach_failed:
unreserve_bo_and_vms(&ctx, false, false);
out:
mutex_unlock(&mem->process_info->lock);
......@@ -1511,7 +1506,7 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
struct amdkfd_process_info *process_info = avm->process_info;
unsigned long bo_size = mem->bo->tbo.base.size;
struct kfd_bo_va_list *entry;
struct kfd_mem_attachment *entry;
struct bo_vm_reservation_context ctx;
int ret;
......@@ -1535,19 +1530,18 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
mem->va + bo_size * (1 + mem->aql_queue),
avm);
list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
if (entry->bo_va->base.vm == avm && entry->is_mapped) {
list_for_each_entry(entry, &mem->attachments, list) {
if (entry->bo_va->base.vm != avm || !entry->is_mapped)
continue;
pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
entry->va,
entry->va + bo_size,
entry);
entry->va, entry->va + bo_size, entry);
ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync);
if (ret == 0) {
entry->is_mapped = false;
} else {
pr_err("failed to unmap VA 0x%llx\n",
mem->va);
pr_err("failed to unmap VA 0x%llx\n", mem->va);
goto unreserve_out;
}
......@@ -1555,7 +1549,6 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
pr_debug("\t DEC mapping count %d\n",
mem->mapped_to_gpu_memory);
}
}
/* If BO is unmapped from all VMs, unfence it. It can be evicted if
* required.
......@@ -1703,7 +1696,7 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
if (mmap_offset)
*mmap_offset = amdgpu_bo_mmap_offset(bo);
INIT_LIST_HEAD(&(*mem)->bo_va_list);
INIT_LIST_HEAD(&(*mem)->attachments);
mutex_init(&(*mem)->lock);
(*mem)->alloc_flags =
......@@ -1900,7 +1893,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
list_for_each_entry_safe(mem, tmp_mem,
&process_info->userptr_inval_list,
validate_list.head) {
struct kfd_bo_va_list *bo_va_entry;
struct kfd_mem_attachment *attachment;
bo = mem->bo;
......@@ -1923,13 +1916,13 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
* VM faults if the GPU tries to access the invalid
* memory.
*/
list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) {
if (!bo_va_entry->is_mapped)
list_for_each_entry(attachment, &mem->attachments, list) {
if (!attachment->is_mapped)
continue;
ret = update_gpuvm_pte((struct amdgpu_device *)
bo_va_entry->kgd_dev,
bo_va_entry, &sync);
attachment->adev,
attachment, &sync);
if (ret) {
pr_err("%s: update PTE failed\n", __func__);
/* make sure this gets validated again */
......@@ -2110,7 +2103,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
struct amdgpu_bo *bo = mem->bo;
uint32_t domain = mem->domain;
struct kfd_bo_va_list *bo_va_entry;
struct kfd_mem_attachment *attachment;
total_size += amdgpu_bo_size(bo);
......@@ -2130,11 +2123,9 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
goto validate_map_fail;
}
list_for_each_entry(bo_va_entry, &mem->bo_va_list,
bo_list) {
list_for_each_entry(attachment, &mem->attachments, list) {
ret = update_gpuvm_pte((struct amdgpu_device *)
bo_va_entry->kgd_dev,
bo_va_entry,
attachment->adev, attachment,
&sync_obj);
if (ret) {
pr_debug("Memory eviction: update PTE failed. Try again\n");
......@@ -2210,7 +2201,7 @@ int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem
return -ENOMEM;
mutex_init(&(*mem)->lock);
INIT_LIST_HEAD(&(*mem)->bo_va_list);
INIT_LIST_HEAD(&(*mem)->attachments);
(*mem)->bo = amdgpu_bo_ref(gws_bo);
(*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
(*mem)->process_info = process_info;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment