Commit 0b11b30d authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/mmu/nv04-nv4x: move global vmm to nvkm_mmu

In a future commit, this will be constructed by common code.
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent ffd937bb
...@@ -58,6 +58,8 @@ struct nvkm_mmu { ...@@ -58,6 +58,8 @@ struct nvkm_mmu {
u64 limit; u64 limit;
u8 dma_bits; u8 dma_bits;
u8 lpg_shift; u8 lpg_shift;
struct nvkm_vmm *vmm;
}; };
int nv04_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); int nv04_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
......
...@@ -192,16 +192,13 @@ const struct ttm_mem_type_manager_func nouveau_gart_manager = { ...@@ -192,16 +192,13 @@ const struct ttm_mem_type_manager_func nouveau_gart_manager = {
.debug = nouveau_gart_manager_debug .debug = nouveau_gart_manager_debug
}; };
/*XXX*/
#include <subdev/mmu/nv04.h>
static int static int
nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
{ {
struct nouveau_drm *drm = nouveau_bdev(man->bdev); struct nouveau_drm *drm = nouveau_bdev(man->bdev);
struct nvkm_mmu *mmu = nvxx_mmu(&drm->client.device); struct nvkm_mmu *mmu = nvxx_mmu(&drm->client.device);
struct nv04_mmu *priv = (void *)mmu;
struct nvkm_vm *vm = NULL; struct nvkm_vm *vm = NULL;
nvkm_vm_ref(priv->vm, &vm, NULL); nvkm_vm_ref(mmu->vmm, &vm, NULL);
man->priv = vm; man->priv = vm;
return 0; return 0;
} }
......
...@@ -49,8 +49,7 @@ nv04_dmaobj_bind(struct nvkm_dmaobj *base, struct nvkm_gpuobj *parent, ...@@ -49,8 +49,7 @@ nv04_dmaobj_bind(struct nvkm_dmaobj *base, struct nvkm_gpuobj *parent,
int ret; int ret;
if (dmaobj->clone) { if (dmaobj->clone) {
struct nv04_mmu *mmu = nv04_mmu(device->mmu); struct nvkm_memory *pgt = device->mmu->vmm->pgt[0].mem[0];
struct nvkm_memory *pgt = mmu->vm->pgt[0].mem[0];
if (!dmaobj->base.start) if (!dmaobj->base.start)
return nvkm_gpuobj_wrap(pgt, pgpuobj); return nvkm_gpuobj_wrap(pgt, pgpuobj);
nvkm_kmap(pgt); nvkm_kmap(pgt);
......
...@@ -81,15 +81,15 @@ nv04_mmu_oneinit(struct nvkm_mmu *base) ...@@ -81,15 +81,15 @@ nv04_mmu_oneinit(struct nvkm_mmu *base)
int ret; int ret;
ret = nvkm_vm_create(&mmu->base, 0, NV04_PDMA_SIZE, 0, 4096, NULL, ret = nvkm_vm_create(&mmu->base, 0, NV04_PDMA_SIZE, 0, 4096, NULL,
&mmu->vm); &mmu->base.vmm);
if (ret) if (ret)
return ret; return ret;
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
(NV04_PDMA_SIZE / NV04_PDMA_PAGE) * 4 + 8, (NV04_PDMA_SIZE / NV04_PDMA_PAGE) * 4 + 8,
16, true, &dma); 16, true, &dma);
mmu->vm->pgt[0].mem[0] = dma; mmu->base.vmm->pgt[0].mem[0] = dma;
mmu->vm->pgt[0].refcount[0] = 1; mmu->base.vmm->pgt[0].refcount[0] = 1;
if (ret) if (ret)
return ret; return ret;
...@@ -105,9 +105,9 @@ nv04_mmu_dtor(struct nvkm_mmu *base) ...@@ -105,9 +105,9 @@ nv04_mmu_dtor(struct nvkm_mmu *base)
{ {
struct nv04_mmu *mmu = nv04_mmu(base); struct nv04_mmu *mmu = nv04_mmu(base);
struct nvkm_device *device = mmu->base.subdev.device; struct nvkm_device *device = mmu->base.subdev.device;
if (mmu->vm) { if (mmu->base.vmm) {
nvkm_memory_del(&mmu->vm->pgt[0].mem[0]); nvkm_memory_del(&mmu->base.vmm->pgt[0].mem[0]);
nvkm_vm_ref(NULL, &mmu->vm, NULL); nvkm_vm_ref(NULL, &mmu->base.vmm, NULL);
} }
if (mmu->nullp) { if (mmu->nullp) {
dma_free_coherent(device->dev, 16 * 1024, dma_free_coherent(device->dev, 16 * 1024,
......
...@@ -5,7 +5,6 @@ ...@@ -5,7 +5,6 @@
struct nv04_mmu { struct nv04_mmu {
struct nvkm_mmu base; struct nvkm_mmu base;
struct nvkm_vm *vm;
dma_addr_t null; dma_addr_t null;
void *nullp; void *nullp;
}; };
......
...@@ -93,14 +93,14 @@ nv41_mmu_oneinit(struct nvkm_mmu *base) ...@@ -93,14 +93,14 @@ nv41_mmu_oneinit(struct nvkm_mmu *base)
int ret; int ret;
ret = nvkm_vm_create(&mmu->base, 0, NV41_GART_SIZE, 0, 4096, NULL, ret = nvkm_vm_create(&mmu->base, 0, NV41_GART_SIZE, 0, 4096, NULL,
&mmu->vm); &mmu->base.vmm);
if (ret) if (ret)
return ret; return ret;
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
(NV41_GART_SIZE / NV41_GART_PAGE) * 4, 16, true, (NV41_GART_SIZE / NV41_GART_PAGE) * 4, 16, true,
&mmu->vm->pgt[0].mem[0]); &mmu->base.vmm->pgt[0].mem[0]);
mmu->vm->pgt[0].refcount[0] = 1; mmu->base.vmm->pgt[0].refcount[0] = 1;
return ret; return ret;
} }
...@@ -109,7 +109,7 @@ nv41_mmu_init(struct nvkm_mmu *base) ...@@ -109,7 +109,7 @@ nv41_mmu_init(struct nvkm_mmu *base)
{ {
struct nv04_mmu *mmu = nv04_mmu(base); struct nv04_mmu *mmu = nv04_mmu(base);
struct nvkm_device *device = mmu->base.subdev.device; struct nvkm_device *device = mmu->base.subdev.device;
struct nvkm_memory *dma = mmu->vm->pgt[0].mem[0]; struct nvkm_memory *dma = mmu->base.vmm->pgt[0].mem[0];
nvkm_wr32(device, 0x100800, 0x00000002 | nvkm_memory_addr(dma)); nvkm_wr32(device, 0x100800, 0x00000002 | nvkm_memory_addr(dma));
nvkm_mask(device, 0x10008c, 0x00000100, 0x00000100); nvkm_mask(device, 0x10008c, 0x00000100, 0x00000100);
nvkm_wr32(device, 0x100820, 0x00000000); nvkm_wr32(device, 0x100820, 0x00000000);
......
...@@ -173,15 +173,15 @@ nv44_mmu_oneinit(struct nvkm_mmu *base) ...@@ -173,15 +173,15 @@ nv44_mmu_oneinit(struct nvkm_mmu *base)
} }
ret = nvkm_vm_create(&mmu->base, 0, NV44_GART_SIZE, 0, 4096, NULL, ret = nvkm_vm_create(&mmu->base, 0, NV44_GART_SIZE, 0, 4096, NULL,
&mmu->vm); &mmu->base.vmm);
if (ret) if (ret)
return ret; return ret;
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
(NV44_GART_SIZE / NV44_GART_PAGE) * 4, (NV44_GART_SIZE / NV44_GART_PAGE) * 4,
512 * 1024, true, 512 * 1024, true,
&mmu->vm->pgt[0].mem[0]); &mmu->base.vmm->pgt[0].mem[0]);
mmu->vm->pgt[0].refcount[0] = 1; mmu->base.vmm->pgt[0].refcount[0] = 1;
return ret; return ret;
} }
...@@ -190,7 +190,7 @@ nv44_mmu_init(struct nvkm_mmu *base) ...@@ -190,7 +190,7 @@ nv44_mmu_init(struct nvkm_mmu *base)
{ {
struct nv04_mmu *mmu = nv04_mmu(base); struct nv04_mmu *mmu = nv04_mmu(base);
struct nvkm_device *device = mmu->base.subdev.device; struct nvkm_device *device = mmu->base.subdev.device;
struct nvkm_memory *gart = mmu->vm->pgt[0].mem[0]; struct nvkm_memory *gart = mmu->base.vmm->pgt[0].mem[0];
u32 addr; u32 addr;
/* calculate vram address of this PRAMIN block, object must be /* calculate vram address of this PRAMIN block, object must be
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment