Commit 83f56106 authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/mmu: switch to device pri macros

Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent 25e3a463
...@@ -154,7 +154,8 @@ static void ...@@ -154,7 +154,8 @@ static void
gf100_vm_flush(struct nvkm_vm *vm) gf100_vm_flush(struct nvkm_vm *vm)
{ {
struct nvkm_mmu *mmu = (void *)vm->mmu; struct nvkm_mmu *mmu = (void *)vm->mmu;
struct nvkm_bar *bar = nvkm_bar(mmu); struct nvkm_device *device = mmu->subdev.device;
struct nvkm_bar *bar = device->bar;
struct nvkm_vm_pgd *vpgd; struct nvkm_vm_pgd *vpgd;
u32 type; u32 type;
...@@ -171,16 +172,16 @@ gf100_vm_flush(struct nvkm_vm *vm) ...@@ -171,16 +172,16 @@ gf100_vm_flush(struct nvkm_vm *vm)
*/ */
if (!nv_wait_ne(mmu, 0x100c80, 0x00ff0000, 0x00000000)) { if (!nv_wait_ne(mmu, 0x100c80, 0x00ff0000, 0x00000000)) {
nv_error(mmu, "vm timeout 0: 0x%08x %d\n", nv_error(mmu, "vm timeout 0: 0x%08x %d\n",
nv_rd32(mmu, 0x100c80), type); nvkm_rd32(device, 0x100c80), type);
} }
nv_wr32(mmu, 0x100cb8, vpgd->obj->addr >> 8); nvkm_wr32(device, 0x100cb8, vpgd->obj->addr >> 8);
nv_wr32(mmu, 0x100cbc, 0x80000000 | type); nvkm_wr32(device, 0x100cbc, 0x80000000 | type);
/* wait for flush to be queued? */ /* wait for flush to be queued? */
if (!nv_wait(mmu, 0x100c80, 0x00008000, 0x00008000)) { if (!nv_wait(mmu, 0x100c80, 0x00008000, 0x00008000)) {
nv_error(mmu, "vm timeout 1: 0x%08x %d\n", nv_error(mmu, "vm timeout 1: 0x%08x %d\n",
nv_rd32(mmu, 0x100c80), type); nvkm_rd32(device, 0x100c80), type);
} }
} }
mutex_unlock(&nv_subdev(mmu)->mutex); mutex_unlock(&nv_subdev(mmu)->mutex);
......
...@@ -65,14 +65,15 @@ static void ...@@ -65,14 +65,15 @@ static void
nv41_vm_flush(struct nvkm_vm *vm) nv41_vm_flush(struct nvkm_vm *vm)
{ {
struct nv04_mmu *mmu = (void *)vm->mmu; struct nv04_mmu *mmu = (void *)vm->mmu;
struct nvkm_device *device = mmu->base.subdev.device;
mutex_lock(&nv_subdev(mmu)->mutex); mutex_lock(&nv_subdev(mmu)->mutex);
nv_wr32(mmu, 0x100810, 0x00000022); nvkm_wr32(device, 0x100810, 0x00000022);
if (!nv_wait(mmu, 0x100810, 0x00000020, 0x00000020)) { if (!nv_wait(mmu, 0x100810, 0x00000020, 0x00000020)) {
nv_warn(mmu, "flush timeout, 0x%08x\n", nv_warn(mmu, "flush timeout, 0x%08x\n",
nv_rd32(mmu, 0x100810)); nvkm_rd32(device, 0x100810));
} }
nv_wr32(mmu, 0x100810, 0x00000000); nvkm_wr32(device, 0x100810, 0x00000000);
mutex_unlock(&nv_subdev(mmu)->mutex); mutex_unlock(&nv_subdev(mmu)->mutex);
} }
...@@ -131,6 +132,7 @@ static int ...@@ -131,6 +132,7 @@ static int
nv41_mmu_init(struct nvkm_object *object) nv41_mmu_init(struct nvkm_object *object)
{ {
struct nv04_mmu *mmu = (void *)object; struct nv04_mmu *mmu = (void *)object;
struct nvkm_device *device = mmu->base.subdev.device;
struct nvkm_gpuobj *dma = mmu->vm->pgt[0].obj[0]; struct nvkm_gpuobj *dma = mmu->vm->pgt[0].obj[0];
int ret; int ret;
...@@ -138,9 +140,9 @@ nv41_mmu_init(struct nvkm_object *object) ...@@ -138,9 +140,9 @@ nv41_mmu_init(struct nvkm_object *object)
if (ret) if (ret)
return ret; return ret;
nv_wr32(mmu, 0x100800, dma->addr | 0x00000002); nvkm_wr32(device, 0x100800, dma->addr | 0x00000002);
nv_mask(mmu, 0x10008c, 0x00000100, 0x00000100); nvkm_mask(device, 0x10008c, 0x00000100, 0x00000100);
nv_wr32(mmu, 0x100820, 0x00000000); nvkm_wr32(device, 0x100820, 0x00000000);
return 0; return 0;
} }
......
...@@ -140,11 +140,12 @@ static void ...@@ -140,11 +140,12 @@ static void
nv44_vm_flush(struct nvkm_vm *vm) nv44_vm_flush(struct nvkm_vm *vm)
{ {
struct nv04_mmu *mmu = (void *)vm->mmu; struct nv04_mmu *mmu = (void *)vm->mmu;
nv_wr32(mmu, 0x100814, mmu->base.limit - NV44_GART_PAGE); struct nvkm_device *device = mmu->base.subdev.device;
nv_wr32(mmu, 0x100808, 0x00000020); nvkm_wr32(device, 0x100814, mmu->base.limit - NV44_GART_PAGE);
nvkm_wr32(device, 0x100808, 0x00000020);
if (!nv_wait(mmu, 0x100808, 0x00000001, 0x00000001)) if (!nv_wait(mmu, 0x100808, 0x00000001, 0x00000001))
nv_error(mmu, "timeout: 0x%08x\n", nv_rd32(mmu, 0x100808)); nv_error(mmu, "timeout: 0x%08x\n", nvkm_rd32(device, 0x100808));
nv_wr32(mmu, 0x100808, 0x00000000); nvkm_wr32(device, 0x100808, 0x00000000);
} }
/******************************************************************************* /*******************************************************************************
...@@ -208,6 +209,7 @@ static int ...@@ -208,6 +209,7 @@ static int
nv44_mmu_init(struct nvkm_object *object) nv44_mmu_init(struct nvkm_object *object)
{ {
struct nv04_mmu *mmu = (void *)object; struct nv04_mmu *mmu = (void *)object;
struct nvkm_device *device = mmu->base.subdev.device;
struct nvkm_gpuobj *gart = mmu->vm->pgt[0].obj[0]; struct nvkm_gpuobj *gart = mmu->vm->pgt[0].obj[0];
u32 addr; u32 addr;
int ret; int ret;
...@@ -220,17 +222,17 @@ nv44_mmu_init(struct nvkm_object *object) ...@@ -220,17 +222,17 @@ nv44_mmu_init(struct nvkm_object *object)
* allocated on 512KiB alignment, and not exceed a total size * allocated on 512KiB alignment, and not exceed a total size
* of 512KiB for this to work correctly * of 512KiB for this to work correctly
*/ */
addr = nv_rd32(mmu, 0x10020c); addr = nvkm_rd32(device, 0x10020c);
addr -= ((gart->addr >> 19) + 1) << 19; addr -= ((gart->addr >> 19) + 1) << 19;
nv_wr32(mmu, 0x100850, 0x80000000); nvkm_wr32(device, 0x100850, 0x80000000);
nv_wr32(mmu, 0x100818, mmu->null); nvkm_wr32(device, 0x100818, mmu->null);
nv_wr32(mmu, 0x100804, NV44_GART_SIZE); nvkm_wr32(device, 0x100804, NV44_GART_SIZE);
nv_wr32(mmu, 0x100850, 0x00008000); nvkm_wr32(device, 0x100850, 0x00008000);
nv_mask(mmu, 0x10008c, 0x00000200, 0x00000200); nvkm_mask(device, 0x10008c, 0x00000200, 0x00000200);
nv_wr32(mmu, 0x100820, 0x00000000); nvkm_wr32(device, 0x100820, 0x00000000);
nv_wr32(mmu, 0x10082c, 0x00000001); nvkm_wr32(device, 0x10082c, 0x00000001);
nv_wr32(mmu, 0x100800, addr | 0x00000010); nvkm_wr32(device, 0x100800, addr | 0x00000010);
return 0; return 0;
} }
......
...@@ -146,7 +146,8 @@ static void ...@@ -146,7 +146,8 @@ static void
nv50_vm_flush(struct nvkm_vm *vm) nv50_vm_flush(struct nvkm_vm *vm)
{ {
struct nvkm_mmu *mmu = (void *)vm->mmu; struct nvkm_mmu *mmu = (void *)vm->mmu;
struct nvkm_bar *bar = nvkm_bar(mmu); struct nvkm_device *device = mmu->subdev.device;
struct nvkm_bar *bar = device->bar;
struct nvkm_engine *engine; struct nvkm_engine *engine;
int i, vme; int i, vme;
...@@ -180,7 +181,7 @@ nv50_vm_flush(struct nvkm_vm *vm) ...@@ -180,7 +181,7 @@ nv50_vm_flush(struct nvkm_vm *vm)
continue; continue;
} }
nv_wr32(mmu, 0x100c80, (vme << 16) | 1); nvkm_wr32(device, 0x100c80, (vme << 16) | 1);
if (!nv_wait(mmu, 0x100c80, 0x00000001, 0x00000000)) if (!nv_wait(mmu, 0x100c80, 0x00000001, 0x00000000))
nv_error(mmu, "vm flush timeout: engine %d\n", vme); nv_error(mmu, "vm flush timeout: engine %d\n", vme);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment