Commit 7f74d4d3 authored by Omer Shpigelman's avatar Omer Shpigelman Committed by Oded Gabbay

habanalabs: re-factor memory module code

Some of the functions in the memory module code were too long and/or
contained multiple operations that are not always done together. Re-factor
the code by dividing those functions to smaller functions which are more
readable and maintainable.
Signed-off-by: default avatarOmer Shpigelman <oshpigelman@habana.ai>
Reviewed-by: default avatarOded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: default avatarOded Gabbay <oded.gabbay@gmail.com>
parent 5d101257
...@@ -3935,7 +3935,7 @@ static int goya_parse_cb_no_ext_queue(struct hl_device *hdev, ...@@ -3935,7 +3935,7 @@ static int goya_parse_cb_no_ext_queue(struct hl_device *hdev,
return 0; return 0;
dev_err(hdev->dev, dev_err(hdev->dev,
"Internal CB address %px + 0x%x is not in SRAM nor in DRAM\n", "Internal CB address 0x%px + 0x%x is not in SRAM nor in DRAM\n",
parser->user_cb, parser->user_cb_size); parser->user_cb, parser->user_cb_size);
return -EFAULT; return -EFAULT;
......
...@@ -692,7 +692,7 @@ struct hl_ctx_mgr { ...@@ -692,7 +692,7 @@ struct hl_ctx_mgr {
* @sgt: pointer to the scatter-gather table that holds the pages. * @sgt: pointer to the scatter-gather table that holds the pages.
* @dir: for DMA unmapping, the direction must be supplied, so save it. * @dir: for DMA unmapping, the direction must be supplied, so save it.
* @debugfs_list: node in debugfs list of command submissions. * @debugfs_list: node in debugfs list of command submissions.
* @addr: user-space virtual pointer to the start of the memory area. * @addr: user-space virtual address of the start of the memory area.
* @size: size of the memory area to pin & map. * @size: size of the memory area to pin & map.
* @dma_mapped: true if the SG was mapped to DMA addresses, false otherwise. * @dma_mapped: true if the SG was mapped to DMA addresses, false otherwise.
*/ */
...@@ -1527,7 +1527,7 @@ void hl_vm_fini(struct hl_device *hdev); ...@@ -1527,7 +1527,7 @@ void hl_vm_fini(struct hl_device *hdev);
int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size, int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
struct hl_userptr *userptr); struct hl_userptr *userptr);
int hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr); void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr);
void hl_userptr_delete_list(struct hl_device *hdev, void hl_userptr_delete_list(struct hl_device *hdev,
struct list_head *userptr_list); struct list_head *userptr_list);
bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr, u32 size, bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr, u32 size,
......
...@@ -159,20 +159,19 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args, ...@@ -159,20 +159,19 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
} }
/* /*
* get_userptr_from_host_va - initialize userptr structure from given host * dma_map_host_va - DMA mapping of the given host virtual address.
* virtual address * @hdev: habanalabs device structure
* * @addr: the host virtual address of the memory area
* @hdev : habanalabs device structure * @size: the size of the memory area
* @args : parameters containing the virtual address and size * @p_userptr: pointer to result userptr structure
* @p_userptr : pointer to result userptr structure
* *
* This function does the following: * This function does the following:
* - Allocate userptr structure * - Allocate userptr structure
* - Pin the given host memory using the userptr structure * - Pin the given host memory using the userptr structure
* - Perform DMA mapping to have the DMA addresses of the pages * - Perform DMA mapping to have the DMA addresses of the pages
*/ */
static int get_userptr_from_host_va(struct hl_device *hdev, static int dma_map_host_va(struct hl_device *hdev, u64 addr, u64 size,
struct hl_mem_in *args, struct hl_userptr **p_userptr) struct hl_userptr **p_userptr)
{ {
struct hl_userptr *userptr; struct hl_userptr *userptr;
int rc; int rc;
...@@ -183,8 +182,7 @@ static int get_userptr_from_host_va(struct hl_device *hdev, ...@@ -183,8 +182,7 @@ static int get_userptr_from_host_va(struct hl_device *hdev,
goto userptr_err; goto userptr_err;
} }
rc = hl_pin_host_memory(hdev, args->map_host.host_virt_addr, rc = hl_pin_host_memory(hdev, addr, size, userptr);
args->map_host.mem_size, userptr);
if (rc) { if (rc) {
dev_err(hdev->dev, "Failed to pin host memory\n"); dev_err(hdev->dev, "Failed to pin host memory\n");
goto pin_err; goto pin_err;
...@@ -215,16 +213,16 @@ static int get_userptr_from_host_va(struct hl_device *hdev, ...@@ -215,16 +213,16 @@ static int get_userptr_from_host_va(struct hl_device *hdev,
} }
/* /*
* free_userptr - free userptr structure * dma_unmap_host_va - DMA unmapping of the given host virtual address.
* * @hdev: habanalabs device structure
* @hdev : habanalabs device structure * @userptr: userptr to free
* @userptr : userptr to free
* *
* This function does the following: * This function does the following:
* - Unpins the physical pages * - Unpins the physical pages
* - Frees the userptr structure * - Frees the userptr structure
*/ */
static void free_userptr(struct hl_device *hdev, struct hl_userptr *userptr) static void dma_unmap_host_va(struct hl_device *hdev,
struct hl_userptr *userptr)
{ {
hl_unpin_host_memory(hdev, userptr); hl_unpin_host_memory(hdev, userptr);
kfree(userptr); kfree(userptr);
...@@ -254,9 +252,8 @@ static void dram_pg_pool_do_release(struct kref *ref) ...@@ -254,9 +252,8 @@ static void dram_pg_pool_do_release(struct kref *ref)
/* /*
* free_phys_pg_pack - free physical page pack * free_phys_pg_pack - free physical page pack
* * @hdev: habanalabs device structure
* @hdev : habanalabs device structure * @phys_pg_pack: physical page pack to free
* @phys_pg_pack : physical page pack to free
* *
* This function does the following: * This function does the following:
* - For DRAM memory only, iterate over the pack and free each physical block * - For DRAM memory only, iterate over the pack and free each physical block
...@@ -632,18 +629,16 @@ static u32 get_sg_info(struct scatterlist *sg, dma_addr_t *dma_addr) ...@@ -632,18 +629,16 @@ static u32 get_sg_info(struct scatterlist *sg, dma_addr_t *dma_addr)
/* /*
* init_phys_pg_pack_from_userptr - initialize physical page pack from host * init_phys_pg_pack_from_userptr - initialize physical page pack from host
* memory * memory
* * @asid: current context ASID
* @ctx : current context * @userptr: userptr to initialize from
* @userptr : userptr to initialize from * @pphys_pg_pack: result pointer
* @pphys_pg_pack : res pointer
* *
* This function does the following: * This function does the following:
* - Pin the physical pages related to the given virtual block * - Pin the physical pages related to the given virtual block
* - Create a physical page pack from the physical pages related to the given * - Create a physical page pack from the physical pages related to the given
* virtual block * virtual block
*/ */
static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx, static int init_phys_pg_pack_from_userptr(u32 asid, struct hl_userptr *userptr,
struct hl_userptr *userptr,
struct hl_vm_phys_pg_pack **pphys_pg_pack) struct hl_vm_phys_pg_pack **pphys_pg_pack)
{ {
struct hl_vm_phys_pg_pack *phys_pg_pack; struct hl_vm_phys_pg_pack *phys_pg_pack;
...@@ -660,7 +655,7 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx, ...@@ -660,7 +655,7 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
phys_pg_pack->vm_type = userptr->vm_type; phys_pg_pack->vm_type = userptr->vm_type;
phys_pg_pack->created_from_userptr = true; phys_pg_pack->created_from_userptr = true;
phys_pg_pack->asid = ctx->asid; phys_pg_pack->asid = asid;
atomic_set(&phys_pg_pack->mapping_cnt, 1); atomic_set(&phys_pg_pack->mapping_cnt, 1);
/* Only if all dma_addrs are aligned to 2MB and their /* Only if all dma_addrs are aligned to 2MB and their
...@@ -731,18 +726,17 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx, ...@@ -731,18 +726,17 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
} }
/* /*
* map_phys_page_pack - maps the physical page pack * map_phys_pg_pack - maps the physical page pack.
* * @ctx: current context
* @ctx : current context * @vaddr: start address of the virtual area to map from
* @vaddr : start address of the virtual area to map from * @phys_pg_pack: the pack of physical pages to map to
* @phys_pg_pack : the pack of physical pages to map to
* *
* This function does the following: * This function does the following:
* - Maps each chunk of virtual memory to matching physical chunk * - Maps each chunk of virtual memory to matching physical chunk
* - Stores number of successful mappings in the given argument * - Stores number of successful mappings in the given argument
* - Returns 0 on success, error code otherwise. * - Returns 0 on success, error code otherwise
*/ */
static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr, static int map_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
struct hl_vm_phys_pg_pack *phys_pg_pack) struct hl_vm_phys_pg_pack *phys_pg_pack)
{ {
struct hl_device *hdev = ctx->hdev; struct hl_device *hdev = ctx->hdev;
...@@ -783,6 +777,36 @@ static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr, ...@@ -783,6 +777,36 @@ static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr,
return rc; return rc;
} }
/*
* unmap_phys_pg_pack - unmaps the physical page pack
* @ctx: current context
* @vaddr: start address of the virtual area to unmap
* @phys_pg_pack: the pack of physical pages to unmap
*/
static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
struct hl_vm_phys_pg_pack *phys_pg_pack)
{
struct hl_device *hdev = ctx->hdev;
u64 next_vaddr, i;
u32 page_size;
page_size = phys_pg_pack->page_size;
next_vaddr = vaddr;
for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) {
if (hl_mmu_unmap(ctx, next_vaddr, page_size))
dev_warn_ratelimited(hdev->dev,
"unmap failed for vaddr: 0x%llx\n", next_vaddr);
/*
* unmapping on Palladium can be really long, so avoid a CPU
* soft lockup bug by sleeping a little between unmapping pages
*/
if (hdev->pldm)
usleep_range(500, 1000);
}
}
static int get_paddr_from_handle(struct hl_ctx *ctx, struct hl_mem_in *args, static int get_paddr_from_handle(struct hl_ctx *ctx, struct hl_mem_in *args,
u64 *paddr) u64 *paddr)
{ {
...@@ -839,18 +863,21 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, ...@@ -839,18 +863,21 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
*device_addr = 0; *device_addr = 0;
if (is_userptr) { if (is_userptr) {
rc = get_userptr_from_host_va(hdev, args, &userptr); u64 addr = args->map_host.host_virt_addr,
size = args->map_host.mem_size;
rc = dma_map_host_va(hdev, addr, size, &userptr);
if (rc) { if (rc) {
dev_err(hdev->dev, "failed to get userptr from va\n"); dev_err(hdev->dev, "failed to get userptr from va\n");
return rc; return rc;
} }
rc = init_phys_pg_pack_from_userptr(ctx, userptr, rc = init_phys_pg_pack_from_userptr(ctx->asid, userptr,
&phys_pg_pack); &phys_pg_pack);
if (rc) { if (rc) {
dev_err(hdev->dev, dev_err(hdev->dev,
"unable to init page pack for vaddr 0x%llx\n", "unable to init page pack for vaddr 0x%llx\n",
args->map_host.host_virt_addr); addr);
goto init_page_pack_err; goto init_page_pack_err;
} }
...@@ -909,7 +936,7 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, ...@@ -909,7 +936,7 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
mutex_lock(&ctx->mmu_lock); mutex_lock(&ctx->mmu_lock);
rc = map_phys_page_pack(ctx, ret_vaddr, phys_pg_pack); rc = map_phys_pg_pack(ctx, ret_vaddr, phys_pg_pack);
if (rc) { if (rc) {
mutex_unlock(&ctx->mmu_lock); mutex_unlock(&ctx->mmu_lock);
dev_err(hdev->dev, "mapping page pack failed for handle %u\n", dev_err(hdev->dev, "mapping page pack failed for handle %u\n",
...@@ -955,7 +982,7 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, ...@@ -955,7 +982,7 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
free_phys_pg_pack(hdev, phys_pg_pack); free_phys_pg_pack(hdev, phys_pg_pack);
init_page_pack_err: init_page_pack_err:
if (is_userptr) if (is_userptr)
free_userptr(hdev, userptr); dma_unmap_host_va(hdev, userptr);
return rc; return rc;
} }
...@@ -977,8 +1004,6 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr) ...@@ -977,8 +1004,6 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
struct hl_vm_hash_node *hnode = NULL; struct hl_vm_hash_node *hnode = NULL;
struct hl_userptr *userptr = NULL; struct hl_userptr *userptr = NULL;
enum vm_type_t *vm_type; enum vm_type_t *vm_type;
u64 next_vaddr, i;
u32 page_size;
bool is_userptr; bool is_userptr;
int rc; int rc;
...@@ -1004,7 +1029,7 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr) ...@@ -1004,7 +1029,7 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
if (*vm_type == VM_TYPE_USERPTR) { if (*vm_type == VM_TYPE_USERPTR) {
is_userptr = true; is_userptr = true;
userptr = hnode->ptr; userptr = hnode->ptr;
rc = init_phys_pg_pack_from_userptr(ctx, userptr, rc = init_phys_pg_pack_from_userptr(ctx->asid, userptr,
&phys_pg_pack); &phys_pg_pack);
if (rc) { if (rc) {
dev_err(hdev->dev, dev_err(hdev->dev,
...@@ -1029,24 +1054,11 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr) ...@@ -1029,24 +1054,11 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
goto mapping_cnt_err; goto mapping_cnt_err;
} }
page_size = phys_pg_pack->page_size; vaddr &= ~(((u64) phys_pg_pack->page_size) - 1);
vaddr &= ~(((u64) page_size) - 1);
next_vaddr = vaddr;
mutex_lock(&ctx->mmu_lock); mutex_lock(&ctx->mmu_lock);
for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) { unmap_phys_pg_pack(ctx, vaddr, phys_pg_pack);
if (hl_mmu_unmap(ctx, next_vaddr, page_size))
dev_warn_ratelimited(hdev->dev,
"unmap failed for vaddr: 0x%llx\n", next_vaddr);
/* unmapping on Palladium can be really long, so avoid a CPU
* soft lockup bug by sleeping a little between unmapping pages
*/
if (hdev->pldm)
usleep_range(500, 1000);
}
hdev->asic_funcs->mmu_invalidate_cache(hdev, true); hdev->asic_funcs->mmu_invalidate_cache(hdev, true);
...@@ -1064,7 +1076,7 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr) ...@@ -1064,7 +1076,7 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
if (is_userptr) { if (is_userptr) {
free_phys_pg_pack(hdev, phys_pg_pack); free_phys_pg_pack(hdev, phys_pg_pack);
free_userptr(hdev, userptr); dma_unmap_host_va(hdev, userptr);
} }
return 0; return 0;
...@@ -1203,17 +1215,69 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data) ...@@ -1203,17 +1215,69 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
return rc; return rc;
} }
static int get_user_memory(struct hl_device *hdev, u64 addr, u64 size,
u32 npages, u64 start, u32 offset,
struct hl_userptr *userptr)
{
int rc;
if (!access_ok((void __user *) (uintptr_t) addr, size)) {
dev_err(hdev->dev, "user pointer is invalid - 0x%llx\n", addr);
return -EFAULT;
}
userptr->vec = frame_vector_create(npages);
if (!userptr->vec) {
dev_err(hdev->dev, "Failed to create frame vector\n");
return -ENOMEM;
}
rc = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE,
userptr->vec);
if (rc != npages) {
dev_err(hdev->dev,
"Failed to map host memory, user ptr probably wrong\n");
if (rc < 0)
goto destroy_framevec;
rc = -EFAULT;
goto put_framevec;
}
if (frame_vector_to_pages(userptr->vec) < 0) {
dev_err(hdev->dev,
"Failed to translate frame vector to pages\n");
rc = -EFAULT;
goto put_framevec;
}
rc = sg_alloc_table_from_pages(userptr->sgt,
frame_vector_pages(userptr->vec),
npages, offset, size, GFP_ATOMIC);
if (rc < 0) {
dev_err(hdev->dev, "failed to create SG table from pages\n");
goto put_framevec;
}
return 0;
put_framevec:
put_vaddr_frames(userptr->vec);
destroy_framevec:
frame_vector_destroy(userptr->vec);
return rc;
}
/* /*
* hl_pin_host_memory - pins a chunk of host memory * hl_pin_host_memory - pins a chunk of host memory.
* * @hdev: pointer to the habanalabs device structure
* @hdev : pointer to the habanalabs device structure * @addr: the host virtual address of the memory area
* @addr : the user-space virtual address of the memory area * @size: the size of the memory area
* @size : the size of the memory area * @userptr: pointer to hl_userptr structure
* @userptr : pointer to hl_userptr structure
* *
* This function does the following: * This function does the following:
* - Pins the physical pages * - Pins the physical pages
* - Create a SG list from those pages * - Create an SG list from those pages
*/ */
int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size, int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
struct hl_userptr *userptr) struct hl_userptr *userptr)
...@@ -1227,11 +1291,6 @@ int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size, ...@@ -1227,11 +1291,6 @@ int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
return -EINVAL; return -EINVAL;
} }
if (!access_ok((void __user *) (uintptr_t) addr, size)) {
dev_err(hdev->dev, "user pointer is invalid - 0x%llx\n", addr);
return -EFAULT;
}
/* /*
* If the combination of the address and size requested for this memory * If the combination of the address and size requested for this memory
* region causes an integer overflow, return error. * region causes an integer overflow, return error.
...@@ -1244,6 +1303,14 @@ int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size, ...@@ -1244,6 +1303,14 @@ int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
return -EINVAL; return -EINVAL;
} }
/*
* This function can be called also from data path, hence use atomic
* always as it is not a big allocation.
*/
userptr->sgt = kzalloc(sizeof(*userptr->sgt), GFP_ATOMIC);
if (!userptr->sgt)
return -ENOMEM;
start = addr & PAGE_MASK; start = addr & PAGE_MASK;
offset = addr & ~PAGE_MASK; offset = addr & ~PAGE_MASK;
end = PAGE_ALIGN(addr + size); end = PAGE_ALIGN(addr + size);
...@@ -1254,42 +1321,12 @@ int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size, ...@@ -1254,42 +1321,12 @@ int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
userptr->dma_mapped = false; userptr->dma_mapped = false;
INIT_LIST_HEAD(&userptr->job_node); INIT_LIST_HEAD(&userptr->job_node);
userptr->vec = frame_vector_create(npages); rc = get_user_memory(hdev, addr, size, npages, start, offset,
if (!userptr->vec) { userptr);
dev_err(hdev->dev, "Failed to create frame vector\n"); if (rc) {
return -ENOMEM;
}
rc = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE,
userptr->vec);
if (rc != npages) {
dev_err(hdev->dev,
"Failed to map host memory, user ptr probably wrong\n");
if (rc < 0)
goto destroy_framevec;
rc = -EFAULT;
goto put_framevec;
}
if (frame_vector_to_pages(userptr->vec) < 0) {
dev_err(hdev->dev, dev_err(hdev->dev,
"Failed to translate frame vector to pages\n"); "failed to get user memory for address 0x%llx\n",
rc = -EFAULT; addr);
goto put_framevec;
}
userptr->sgt = kzalloc(sizeof(*userptr->sgt), GFP_ATOMIC);
if (!userptr->sgt) {
rc = -ENOMEM;
goto put_framevec;
}
rc = sg_alloc_table_from_pages(userptr->sgt,
frame_vector_pages(userptr->vec),
npages, offset, size, GFP_ATOMIC);
if (rc < 0) {
dev_err(hdev->dev, "failed to create SG table from pages\n");
goto free_sgt; goto free_sgt;
} }
...@@ -1299,32 +1336,26 @@ int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size, ...@@ -1299,32 +1336,26 @@ int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
free_sgt: free_sgt:
kfree(userptr->sgt); kfree(userptr->sgt);
put_framevec:
put_vaddr_frames(userptr->vec);
destroy_framevec:
frame_vector_destroy(userptr->vec);
return rc; return rc;
} }
/* /*
* hl_unpin_host_memory - unpins a chunk of host memory * hl_unpin_host_memory - unpins a chunk of host memory.
* * @hdev: pointer to the habanalabs device structure
* @hdev : pointer to the habanalabs device structure * @userptr: pointer to hl_userptr structure
* @userptr : pointer to hl_userptr structure
* *
* This function does the following: * This function does the following:
* - Unpins the physical pages related to the host memory * - Unpins the physical pages related to the host memory
* - Free the SG list * - Free the SG list
*/ */
int hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr) void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
{ {
struct page **pages; struct page **pages;
hl_debugfs_remove_userptr(hdev, userptr); hl_debugfs_remove_userptr(hdev, userptr);
if (userptr->dma_mapped) if (userptr->dma_mapped)
hdev->asic_funcs->hl_dma_unmap_sg(hdev, hdev->asic_funcs->hl_dma_unmap_sg(hdev, userptr->sgt->sgl,
userptr->sgt->sgl,
userptr->sgt->nents, userptr->sgt->nents,
userptr->dir); userptr->dir);
...@@ -1342,8 +1373,6 @@ int hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr) ...@@ -1342,8 +1373,6 @@ int hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
sg_free_table(userptr->sgt); sg_free_table(userptr->sgt);
kfree(userptr->sgt); kfree(userptr->sgt);
return 0;
} }
/* /*
...@@ -1627,7 +1656,7 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx) ...@@ -1627,7 +1656,7 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_list, i) idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_list, i)
if (phys_pg_list->asid == ctx->asid) { if (phys_pg_list->asid == ctx->asid) {
dev_dbg(hdev->dev, dev_dbg(hdev->dev,
"page list 0x%p of asid %d is still alive\n", "page list 0x%px of asid %d is still alive\n",
phys_pg_list, ctx->asid); phys_pg_list, ctx->asid);
atomic64_sub(phys_pg_list->total_size, atomic64_sub(phys_pg_list->total_size,
&hdev->dram_used_mem); &hdev->dram_used_mem);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment