Commit f72c2db4 authored by Danilo Krummrich's avatar Danilo Krummrich

drm/gpuvm: rename struct drm_gpuva_manager to struct drm_gpuvm

Rename struct drm_gpuva_manager to struct drm_gpuvm including
corresponding functions. This way the GPUVA manager's structures align
very well with the documentation of VM_BIND [1] and VM_BIND locking [2].

It also provides a better foundation for the naming of data structures
and functions introduced for implementing a common dma-resv per GPU-VM
including tracking of external and evicted objects in subsequent
patches.

[1] Documentation/gpu/drm-vm-bind-async.rst
[2] Documentation/gpu/drm-vm-bind-locking.rst

Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Acked-by: default avatarDave Airlie <airlied@redhat.com>
Acked-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarDanilo Krummrich <dakr@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230920144343.64830-2-dakr@redhat.com
parent 89755ee1
...@@ -45,7 +45,7 @@ drm-y := \ ...@@ -45,7 +45,7 @@ drm-y := \
drm_vblank.o \ drm_vblank.o \
drm_vblank_work.o \ drm_vblank_work.o \
drm_vma_manager.o \ drm_vma_manager.o \
drm_gpuva_mgr.o \ drm_gpuvm.o \
drm_writeback.o drm_writeback.o
drm-$(CONFIG_DRM_LEGACY) += \ drm-$(CONFIG_DRM_LEGACY) += \
drm_agpsupport.o \ drm_agpsupport.o \
......
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
#include <drm/drm_file.h> #include <drm/drm_file.h>
#include <drm/drm_gem.h> #include <drm/drm_gem.h>
#include <drm/drm_managed.h> #include <drm/drm_managed.h>
#include <drm/drm_gpuva_mgr.h> #include <drm/drm_gpuvm.h>
#include "drm_crtc_internal.h" #include "drm_crtc_internal.h"
#include "drm_internal.h" #include "drm_internal.h"
...@@ -189,31 +189,31 @@ static const struct file_operations drm_debugfs_fops = { ...@@ -189,31 +189,31 @@ static const struct file_operations drm_debugfs_fops = {
/** /**
* drm_debugfs_gpuva_info - dump the given DRM GPU VA space * drm_debugfs_gpuva_info - dump the given DRM GPU VA space
* @m: pointer to the &seq_file to write * @m: pointer to the &seq_file to write
* @mgr: the &drm_gpuva_manager representing the GPU VA space * @gpuvm: the &drm_gpuvm representing the GPU VA space
* *
* Dumps the GPU VA mappings of a given DRM GPU VA manager. * Dumps the GPU VA mappings of a given DRM GPU VA manager.
* *
* For each DRM GPU VA space drivers should call this function from their * For each DRM GPU VA space drivers should call this function from their
* &drm_info_list's show callback. * &drm_info_list's show callback.
* *
* Returns: 0 on success, -ENODEV if the &mgr is not initialized * Returns: 0 on success, -ENODEV if the &gpuvm is not initialized
*/ */
int drm_debugfs_gpuva_info(struct seq_file *m, int drm_debugfs_gpuva_info(struct seq_file *m,
struct drm_gpuva_manager *mgr) struct drm_gpuvm *gpuvm)
{ {
struct drm_gpuva *va, *kva = &mgr->kernel_alloc_node; struct drm_gpuva *va, *kva = &gpuvm->kernel_alloc_node;
if (!mgr->name) if (!gpuvm->name)
return -ENODEV; return -ENODEV;
seq_printf(m, "DRM GPU VA space (%s) [0x%016llx;0x%016llx]\n", seq_printf(m, "DRM GPU VA space (%s) [0x%016llx;0x%016llx]\n",
mgr->name, mgr->mm_start, mgr->mm_start + mgr->mm_range); gpuvm->name, gpuvm->mm_start, gpuvm->mm_start + gpuvm->mm_range);
seq_printf(m, "Kernel reserved node [0x%016llx;0x%016llx]\n", seq_printf(m, "Kernel reserved node [0x%016llx;0x%016llx]\n",
kva->va.addr, kva->va.addr + kva->va.range); kva->va.addr, kva->va.addr + kva->va.range);
seq_puts(m, "\n"); seq_puts(m, "\n");
seq_puts(m, " VAs | start | range | end | object | object offset\n"); seq_puts(m, " VAs | start | range | end | object | object offset\n");
seq_puts(m, "-------------------------------------------------------------------------------------------------------------\n"); seq_puts(m, "-------------------------------------------------------------------------------------------------------------\n");
drm_gpuva_for_each_va(va, mgr) { drm_gpuvm_for_each_va(va, gpuvm) {
if (unlikely(va == kva)) if (unlikely(va == kva))
continue; continue;
......
...@@ -106,7 +106,7 @@ nouveau_exec_job_submit(struct nouveau_job *job) ...@@ -106,7 +106,7 @@ nouveau_exec_job_submit(struct nouveau_job *job)
drm_exec_until_all_locked(exec) { drm_exec_until_all_locked(exec) {
struct drm_gpuva *va; struct drm_gpuva *va;
drm_gpuva_for_each_va(va, &uvmm->umgr) { drm_gpuvm_for_each_va(va, &uvmm->umgr) {
if (unlikely(va == &uvmm->umgr.kernel_alloc_node)) if (unlikely(va == &uvmm->umgr.kernel_alloc_node))
continue; continue;
......
...@@ -329,7 +329,7 @@ nouveau_uvma_region_create(struct nouveau_uvmm *uvmm, ...@@ -329,7 +329,7 @@ nouveau_uvma_region_create(struct nouveau_uvmm *uvmm,
struct nouveau_uvma_region *reg; struct nouveau_uvma_region *reg;
int ret; int ret;
if (!drm_gpuva_interval_empty(&uvmm->umgr, addr, range)) if (!drm_gpuvm_interval_empty(&uvmm->umgr, addr, range))
return -ENOSPC; return -ENOSPC;
ret = nouveau_uvma_region_alloc(&reg); ret = nouveau_uvma_region_alloc(&reg);
...@@ -384,7 +384,7 @@ nouveau_uvma_region_empty(struct nouveau_uvma_region *reg) ...@@ -384,7 +384,7 @@ nouveau_uvma_region_empty(struct nouveau_uvma_region *reg)
{ {
struct nouveau_uvmm *uvmm = reg->uvmm; struct nouveau_uvmm *uvmm = reg->uvmm;
return drm_gpuva_interval_empty(&uvmm->umgr, return drm_gpuvm_interval_empty(&uvmm->umgr,
reg->va.addr, reg->va.addr,
reg->va.range); reg->va.range);
} }
...@@ -444,7 +444,7 @@ op_map_prepare_unwind(struct nouveau_uvma *uvma) ...@@ -444,7 +444,7 @@ op_map_prepare_unwind(struct nouveau_uvma *uvma)
static void static void
op_unmap_prepare_unwind(struct drm_gpuva *va) op_unmap_prepare_unwind(struct drm_gpuva *va)
{ {
drm_gpuva_insert(va->mgr, va); drm_gpuva_insert(va->vm, va);
} }
static void static void
...@@ -1194,7 +1194,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job) ...@@ -1194,7 +1194,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
goto unwind_continue; goto unwind_continue;
} }
op->ops = drm_gpuva_sm_unmap_ops_create(&uvmm->umgr, op->ops = drm_gpuvm_sm_unmap_ops_create(&uvmm->umgr,
op->va.addr, op->va.addr,
op->va.range); op->va.range);
if (IS_ERR(op->ops)) { if (IS_ERR(op->ops)) {
...@@ -1240,7 +1240,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job) ...@@ -1240,7 +1240,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
} }
} }
op->ops = drm_gpuva_sm_map_ops_create(&uvmm->umgr, op->ops = drm_gpuvm_sm_map_ops_create(&uvmm->umgr,
op->va.addr, op->va.addr,
op->va.range, op->va.range,
op->gem.obj, op->gem.obj,
...@@ -1264,7 +1264,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job) ...@@ -1264,7 +1264,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
break; break;
} }
case OP_UNMAP: case OP_UNMAP:
op->ops = drm_gpuva_sm_unmap_ops_create(&uvmm->umgr, op->ops = drm_gpuvm_sm_unmap_ops_create(&uvmm->umgr,
op->va.addr, op->va.addr,
op->va.range); op->va.range);
if (IS_ERR(op->ops)) { if (IS_ERR(op->ops)) {
...@@ -1836,11 +1836,11 @@ nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli, ...@@ -1836,11 +1836,11 @@ nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli,
uvmm->kernel_managed_addr = kernel_managed_addr; uvmm->kernel_managed_addr = kernel_managed_addr;
uvmm->kernel_managed_size = kernel_managed_size; uvmm->kernel_managed_size = kernel_managed_size;
drm_gpuva_manager_init(&uvmm->umgr, cli->name, drm_gpuvm_init(&uvmm->umgr, cli->name,
NOUVEAU_VA_SPACE_START, NOUVEAU_VA_SPACE_START,
NOUVEAU_VA_SPACE_END, NOUVEAU_VA_SPACE_END,
kernel_managed_addr, kernel_managed_size, kernel_managed_addr, kernel_managed_size,
NULL); NULL);
ret = nvif_vmm_ctor(&cli->mmu, "uvmm", ret = nvif_vmm_ctor(&cli->mmu, "uvmm",
cli->vmm.vmm.object.oclass, RAW, cli->vmm.vmm.object.oclass, RAW,
...@@ -1855,7 +1855,7 @@ nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli, ...@@ -1855,7 +1855,7 @@ nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli,
return 0; return 0;
out_free_gpuva_mgr: out_free_gpuva_mgr:
drm_gpuva_manager_destroy(&uvmm->umgr); drm_gpuvm_destroy(&uvmm->umgr);
out_unlock: out_unlock:
mutex_unlock(&cli->mutex); mutex_unlock(&cli->mutex);
return ret; return ret;
...@@ -1877,7 +1877,7 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm) ...@@ -1877,7 +1877,7 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm)
wait_event(entity->job.wq, list_empty(&entity->job.list.head)); wait_event(entity->job.wq, list_empty(&entity->job.list.head));
nouveau_uvmm_lock(uvmm); nouveau_uvmm_lock(uvmm);
drm_gpuva_for_each_va_safe(va, next, &uvmm->umgr) { drm_gpuvm_for_each_va_safe(va, next, &uvmm->umgr) {
struct nouveau_uvma *uvma = uvma_from_va(va); struct nouveau_uvma *uvma = uvma_from_va(va);
struct drm_gem_object *obj = va->gem.obj; struct drm_gem_object *obj = va->gem.obj;
...@@ -1910,7 +1910,7 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm) ...@@ -1910,7 +1910,7 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm)
mutex_lock(&cli->mutex); mutex_lock(&cli->mutex);
nouveau_vmm_fini(&uvmm->vmm); nouveau_vmm_fini(&uvmm->vmm);
drm_gpuva_manager_destroy(&uvmm->umgr); drm_gpuvm_destroy(&uvmm->umgr);
mutex_unlock(&cli->mutex); mutex_unlock(&cli->mutex);
dma_resv_fini(&uvmm->resv); dma_resv_fini(&uvmm->resv);
......
...@@ -3,13 +3,13 @@ ...@@ -3,13 +3,13 @@
#ifndef __NOUVEAU_UVMM_H__ #ifndef __NOUVEAU_UVMM_H__
#define __NOUVEAU_UVMM_H__ #define __NOUVEAU_UVMM_H__
#include <drm/drm_gpuva_mgr.h> #include <drm/drm_gpuvm.h>
#include "nouveau_drv.h" #include "nouveau_drv.h"
struct nouveau_uvmm { struct nouveau_uvmm {
struct nouveau_vmm vmm; struct nouveau_vmm vmm;
struct drm_gpuva_manager umgr; struct drm_gpuvm umgr;
struct maple_tree region_mt; struct maple_tree region_mt;
struct mutex mutex; struct mutex mutex;
struct dma_resv resv; struct dma_resv resv;
...@@ -44,7 +44,7 @@ struct nouveau_uvma { ...@@ -44,7 +44,7 @@ struct nouveau_uvma {
#define uvmm_from_mgr(x) container_of((x), struct nouveau_uvmm, umgr) #define uvmm_from_mgr(x) container_of((x), struct nouveau_uvmm, umgr)
#define uvma_from_va(x) container_of((x), struct nouveau_uvma, va) #define uvma_from_va(x) container_of((x), struct nouveau_uvma, va)
#define to_uvmm(x) uvmm_from_mgr((x)->va.mgr) #define to_uvmm(x) uvmm_from_mgr((x)->va.vm)
struct nouveau_uvmm_bind_job { struct nouveau_uvmm_bind_job {
struct nouveau_job base; struct nouveau_job base;
......
...@@ -35,7 +35,7 @@ ...@@ -35,7 +35,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <drm/drm_gpuva_mgr.h> #include <drm/drm_gpuvm.h>
/** /**
* DRM_DEBUGFS_GPUVA_INFO - &drm_info_list entry to dump a GPU VA space * DRM_DEBUGFS_GPUVA_INFO - &drm_info_list entry to dump a GPU VA space
...@@ -152,7 +152,7 @@ void drm_debugfs_add_files(struct drm_device *dev, ...@@ -152,7 +152,7 @@ void drm_debugfs_add_files(struct drm_device *dev,
const struct drm_debugfs_info *files, int count); const struct drm_debugfs_info *files, int count);
int drm_debugfs_gpuva_info(struct seq_file *m, int drm_debugfs_gpuva_info(struct seq_file *m,
struct drm_gpuva_manager *mgr); struct drm_gpuvm *gpuvm);
#else #else
static inline void drm_debugfs_create_files(const struct drm_info_list *files, static inline void drm_debugfs_create_files(const struct drm_info_list *files,
int count, struct dentry *root, int count, struct dentry *root,
...@@ -177,7 +177,7 @@ static inline void drm_debugfs_add_files(struct drm_device *dev, ...@@ -177,7 +177,7 @@ static inline void drm_debugfs_add_files(struct drm_device *dev,
{} {}
static inline int drm_debugfs_gpuva_info(struct seq_file *m, static inline int drm_debugfs_gpuva_info(struct seq_file *m,
struct drm_gpuva_manager *mgr) struct drm_gpuvm *gpuvm)
{ {
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment