Commit 61184114 authored by Danilo Krummrich's avatar Danilo Krummrich

drm/nouveau: make use of the GPUVM's shared dma-resv

DRM GEM objects private to a single GPUVM can use a shared dma-resv.
Make use of the shared dma-resv of GPUVM rather than a driver specific
one.

The shared dma-resv originates from a "root" GEM object serving as
container for the dma-resv to make it compatible with drm_exec.

In order to make sure the object proving the shared dma-resv can't be
freed up before the objects making use of it, let every such GEM object
take a reference on it.
Reviewed-by: default avatarDave Airlie <airlied@redhat.com>
Signed-off-by: default avatarDanilo Krummrich <dakr@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20231108001259.15123-7-dakr@redhat.com
parent bbe84580
...@@ -148,10 +148,17 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) ...@@ -148,10 +148,17 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
* If nouveau_bo_new() allocated this buffer, the GEM object was never * If nouveau_bo_new() allocated this buffer, the GEM object was never
* initialized, so don't attempt to release it. * initialized, so don't attempt to release it.
*/ */
if (bo->base.dev) if (bo->base.dev) {
/* Gem objects not being shared with other VMs get their
* dma_resv from a root GEM object.
*/
if (nvbo->no_share)
drm_gem_object_put(nvbo->r_obj);
drm_gem_object_release(&bo->base); drm_gem_object_release(&bo->base);
else } else {
dma_resv_fini(&bo->base._resv); dma_resv_fini(&bo->base._resv);
}
kfree(nvbo); kfree(nvbo);
} }
......
...@@ -26,6 +26,11 @@ struct nouveau_bo { ...@@ -26,6 +26,11 @@ struct nouveau_bo {
struct list_head entry; struct list_head entry;
int pbbo_index; int pbbo_index;
bool validate_mapped; bool validate_mapped;
/* Root GEM object we derive the dma_resv of in case this BO is not
* shared between VMs.
*/
struct drm_gem_object *r_obj;
bool no_share; bool no_share;
/* GPU address space is independent of CPU word size */ /* GPU address space is independent of CPU word size */
......
...@@ -111,7 +111,8 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) ...@@ -111,7 +111,8 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50) if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
return 0; return 0;
if (nvbo->no_share && uvmm && &uvmm->resv != nvbo->bo.base.resv) if (nvbo->no_share && uvmm &&
drm_gpuvm_resv(&uvmm->base) != nvbo->bo.base.resv)
return -EPERM; return -EPERM;
ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL); ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
...@@ -245,7 +246,7 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain, ...@@ -245,7 +246,7 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
if (unlikely(!uvmm)) if (unlikely(!uvmm))
return -EINVAL; return -EINVAL;
resv = &uvmm->resv; resv = drm_gpuvm_resv(&uvmm->base);
} }
if (!(domain & (NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART))) if (!(domain & (NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART)))
...@@ -288,6 +289,11 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain, ...@@ -288,6 +289,11 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
nvbo->valid_domains &= domain; nvbo->valid_domains &= domain;
if (nvbo->no_share) {
nvbo->r_obj = drm_gpuvm_resv_obj(&uvmm->base);
drm_gem_object_get(nvbo->r_obj);
}
*pnvbo = nvbo; *pnvbo = nvbo;
return 0; return 0;
} }
......
...@@ -1802,7 +1802,6 @@ nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli, ...@@ -1802,7 +1802,6 @@ nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli,
int ret; int ret;
mutex_init(&uvmm->mutex); mutex_init(&uvmm->mutex);
dma_resv_init(&uvmm->resv);
mt_init_flags(&uvmm->region_mt, MT_FLAGS_LOCK_EXTERN); mt_init_flags(&uvmm->region_mt, MT_FLAGS_LOCK_EXTERN);
mt_set_external_lock(&uvmm->region_mt, &uvmm->mutex); mt_set_external_lock(&uvmm->region_mt, &uvmm->mutex);
...@@ -1842,14 +1841,14 @@ nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli, ...@@ -1842,14 +1841,14 @@ nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli,
kernel_managed_addr, kernel_managed_size, kernel_managed_addr, kernel_managed_size,
NULL, 0, &cli->uvmm.vmm.vmm); NULL, 0, &cli->uvmm.vmm.vmm);
if (ret) if (ret)
goto out_free_gpuva_mgr; goto out_gpuvm_fini;
cli->uvmm.vmm.cli = cli; cli->uvmm.vmm.cli = cli;
mutex_unlock(&cli->mutex); mutex_unlock(&cli->mutex);
return 0; return 0;
out_free_gpuva_mgr: out_gpuvm_fini:
drm_gpuvm_destroy(&uvmm->base); drm_gpuvm_destroy(&uvmm->base);
out_unlock: out_unlock:
mutex_unlock(&cli->mutex); mutex_unlock(&cli->mutex);
...@@ -1907,6 +1906,4 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm) ...@@ -1907,6 +1906,4 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm)
nouveau_vmm_fini(&uvmm->vmm); nouveau_vmm_fini(&uvmm->vmm);
drm_gpuvm_destroy(&uvmm->base); drm_gpuvm_destroy(&uvmm->base);
mutex_unlock(&cli->mutex); mutex_unlock(&cli->mutex);
dma_resv_fini(&uvmm->resv);
} }
...@@ -12,7 +12,6 @@ struct nouveau_uvmm { ...@@ -12,7 +12,6 @@ struct nouveau_uvmm {
struct nouveau_vmm vmm; struct nouveau_vmm vmm;
struct maple_tree region_mt; struct maple_tree region_mt;
struct mutex mutex; struct mutex mutex;
struct dma_resv resv;
bool disabled; bool disabled;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment