Commit 0188006d authored by Felix Kuehling's avatar Felix Kuehling Committed by Alex Deucher

drm/amdkfd: Import DMABufs for interop through DRM

Use drm_gem_prime_fd_to_handle to import DMABufs for interop. This
ensures that a GEM handle is created on import and that obj->dma_buf
will be set and remain set as long as the object is imported into KFD.
Signed-off-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: default avatarRamesh Errabolu <Ramesh.Errabolu@amd.com>
Reviewed-by: default avatarXiaogang.Chen <Xiaogang.Chen@amd.com>
Acked-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 18192001
...@@ -314,11 +314,10 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info, ...@@ -314,11 +314,10 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
struct dma_fence **ef); struct dma_fence **ef);
int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev, int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,
struct kfd_vm_fault_info *info); struct kfd_vm_fault_info *info);
int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev, int amdgpu_amdkfd_gpuvm_import_dmabuf_fd(struct amdgpu_device *adev, int fd,
struct dma_buf *dmabuf, uint64_t va, void *drm_priv,
uint64_t va, void *drm_priv, struct kgd_mem **mem, uint64_t *size,
struct kgd_mem **mem, uint64_t *size, uint64_t *mmap_offset);
uint64_t *mmap_offset);
int amdgpu_amdkfd_gpuvm_export_dmabuf(struct kgd_mem *mem, int amdgpu_amdkfd_gpuvm_export_dmabuf(struct kgd_mem *mem,
struct dma_buf **dmabuf); struct dma_buf **dmabuf);
void amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device *adev); void amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device *adev);
......
...@@ -1956,8 +1956,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( ...@@ -1956,8 +1956,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
/* Free the BO*/ /* Free the BO*/
drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv); drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv);
if (!mem->is_imported) drm_gem_handle_delete(adev->kfd.client.file, mem->gem_handle);
drm_gem_handle_delete(adev->kfd.client.file, mem->gem_handle);
if (mem->dmabuf) { if (mem->dmabuf) {
dma_buf_put(mem->dmabuf); dma_buf_put(mem->dmabuf);
mem->dmabuf = NULL; mem->dmabuf = NULL;
...@@ -2313,34 +2312,26 @@ int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev, ...@@ -2313,34 +2312,26 @@ int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,
return 0; return 0;
} }
int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev, static int import_obj_create(struct amdgpu_device *adev,
struct dma_buf *dma_buf, struct dma_buf *dma_buf,
uint64_t va, void *drm_priv, struct drm_gem_object *obj,
struct kgd_mem **mem, uint64_t *size, uint64_t va, void *drm_priv,
uint64_t *mmap_offset) struct kgd_mem **mem, uint64_t *size,
uint64_t *mmap_offset)
{ {
struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
struct drm_gem_object *obj;
struct amdgpu_bo *bo; struct amdgpu_bo *bo;
int ret; int ret;
obj = amdgpu_gem_prime_import(adev_to_drm(adev), dma_buf);
if (IS_ERR(obj))
return PTR_ERR(obj);
bo = gem_to_amdgpu_bo(obj); bo = gem_to_amdgpu_bo(obj);
if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM | if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT))) { AMDGPU_GEM_DOMAIN_GTT)))
/* Only VRAM and GTT BOs are supported */ /* Only VRAM and GTT BOs are supported */
ret = -EINVAL; return -EINVAL;
goto err_put_obj;
}
*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
if (!*mem) { if (!*mem)
ret = -ENOMEM; return -ENOMEM;
goto err_put_obj;
}
ret = drm_vma_node_allow(&obj->vma_node, drm_priv); ret = drm_vma_node_allow(&obj->vma_node, drm_priv);
if (ret) if (ret)
...@@ -2390,8 +2381,41 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev, ...@@ -2390,8 +2381,41 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev,
drm_vma_node_revoke(&obj->vma_node, drm_priv); drm_vma_node_revoke(&obj->vma_node, drm_priv);
err_free_mem: err_free_mem:
kfree(*mem); kfree(*mem);
return ret;
}
int amdgpu_amdkfd_gpuvm_import_dmabuf_fd(struct amdgpu_device *adev, int fd,
uint64_t va, void *drm_priv,
struct kgd_mem **mem, uint64_t *size,
uint64_t *mmap_offset)
{
struct drm_gem_object *obj;
uint32_t handle;
int ret;
ret = drm_gem_prime_fd_to_handle(&adev->ddev, adev->kfd.client.file, fd,
&handle);
if (ret)
return ret;
obj = drm_gem_object_lookup(adev->kfd.client.file, handle);
if (!obj) {
ret = -EINVAL;
goto err_release_handle;
}
ret = import_obj_create(adev, obj->dma_buf, obj, va, drm_priv, mem, size,
mmap_offset);
if (ret)
goto err_put_obj;
(*mem)->gem_handle = handle;
return 0;
err_put_obj: err_put_obj:
drm_gem_object_put(obj); drm_gem_object_put(obj);
err_release_handle:
drm_gem_handle_delete(adev->kfd.client.file, handle);
return ret; return ret;
} }
......
...@@ -1564,16 +1564,11 @@ static int kfd_ioctl_import_dmabuf(struct file *filep, ...@@ -1564,16 +1564,11 @@ static int kfd_ioctl_import_dmabuf(struct file *filep,
{ {
struct kfd_ioctl_import_dmabuf_args *args = data; struct kfd_ioctl_import_dmabuf_args *args = data;
struct kfd_process_device *pdd; struct kfd_process_device *pdd;
struct dma_buf *dmabuf;
int idr_handle; int idr_handle;
uint64_t size; uint64_t size;
void *mem; void *mem;
int r; int r;
dmabuf = dma_buf_get(args->dmabuf_fd);
if (IS_ERR(dmabuf))
return PTR_ERR(dmabuf);
mutex_lock(&p->mutex); mutex_lock(&p->mutex);
pdd = kfd_process_device_data_by_id(p, args->gpu_id); pdd = kfd_process_device_data_by_id(p, args->gpu_id);
if (!pdd) { if (!pdd) {
...@@ -1587,10 +1582,10 @@ static int kfd_ioctl_import_dmabuf(struct file *filep, ...@@ -1587,10 +1582,10 @@ static int kfd_ioctl_import_dmabuf(struct file *filep,
goto err_unlock; goto err_unlock;
} }
r = amdgpu_amdkfd_gpuvm_import_dmabuf(pdd->dev->adev, dmabuf, r = amdgpu_amdkfd_gpuvm_import_dmabuf_fd(pdd->dev->adev, args->dmabuf_fd,
args->va_addr, pdd->drm_priv, args->va_addr, pdd->drm_priv,
(struct kgd_mem **)&mem, &size, (struct kgd_mem **)&mem, &size,
NULL); NULL);
if (r) if (r)
goto err_unlock; goto err_unlock;
...@@ -1601,7 +1596,6 @@ static int kfd_ioctl_import_dmabuf(struct file *filep, ...@@ -1601,7 +1596,6 @@ static int kfd_ioctl_import_dmabuf(struct file *filep,
} }
mutex_unlock(&p->mutex); mutex_unlock(&p->mutex);
dma_buf_put(dmabuf);
args->handle = MAKE_HANDLE(args->gpu_id, idr_handle); args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
...@@ -1612,7 +1606,6 @@ static int kfd_ioctl_import_dmabuf(struct file *filep, ...@@ -1612,7 +1606,6 @@ static int kfd_ioctl_import_dmabuf(struct file *filep,
pdd->drm_priv, NULL); pdd->drm_priv, NULL);
err_unlock: err_unlock:
mutex_unlock(&p->mutex); mutex_unlock(&p->mutex);
dma_buf_put(dmabuf);
return r; return r;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment