Commit d30af7ce authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/mmu: handle instance block setup

We previously required each VMM user to allocate their own page directory
and fill in the instance block themselves.

It makes more sense to handle this in a common location.
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent af3b8d53
...@@ -78,7 +78,7 @@ struct nvkm_falcon_func { ...@@ -78,7 +78,7 @@ struct nvkm_falcon_func {
void (*load_imem)(struct nvkm_falcon *, void *, u32, u32, u16, u8, bool); void (*load_imem)(struct nvkm_falcon *, void *, u32, u32, u16, u8, bool);
void (*load_dmem)(struct nvkm_falcon *, void *, u32, u32, u8); void (*load_dmem)(struct nvkm_falcon *, void *, u32, u32, u8);
void (*read_dmem)(struct nvkm_falcon *, u32, u32, u8, void *); void (*read_dmem)(struct nvkm_falcon *, u32, u32, u8, void *);
void (*bind_context)(struct nvkm_falcon *, struct nvkm_gpuobj *); void (*bind_context)(struct nvkm_falcon *, struct nvkm_memory *);
int (*wait_for_halt)(struct nvkm_falcon *, u32); int (*wait_for_halt)(struct nvkm_falcon *, u32);
int (*clear_interrupt)(struct nvkm_falcon *, u32); int (*clear_interrupt)(struct nvkm_falcon *, u32);
void (*set_start_addr)(struct nvkm_falcon *, u32 start_addr); void (*set_start_addr)(struct nvkm_falcon *, u32 start_addr);
...@@ -113,7 +113,7 @@ void nvkm_falcon_load_imem(struct nvkm_falcon *, void *, u32, u32, u16, u8, ...@@ -113,7 +113,7 @@ void nvkm_falcon_load_imem(struct nvkm_falcon *, void *, u32, u32, u16, u8,
bool); bool);
void nvkm_falcon_load_dmem(struct nvkm_falcon *, void *, u32, u32, u8); void nvkm_falcon_load_dmem(struct nvkm_falcon *, void *, u32, u32, u8);
void nvkm_falcon_read_dmem(struct nvkm_falcon *, u32, u32, u8, void *); void nvkm_falcon_read_dmem(struct nvkm_falcon *, u32, u32, u8, void *);
void nvkm_falcon_bind_context(struct nvkm_falcon *, struct nvkm_gpuobj *); void nvkm_falcon_bind_context(struct nvkm_falcon *, struct nvkm_memory *);
void nvkm_falcon_set_start_addr(struct nvkm_falcon *, u32); void nvkm_falcon_set_start_addr(struct nvkm_falcon *, u32);
void nvkm_falcon_start(struct nvkm_falcon *); void nvkm_falcon_start(struct nvkm_falcon *);
int nvkm_falcon_wait_for_halt(struct nvkm_falcon *, u32); int nvkm_falcon_wait_for_halt(struct nvkm_falcon *, u32);
......
...@@ -10,11 +10,6 @@ struct nvkm_vm_pgt { ...@@ -10,11 +10,6 @@ struct nvkm_vm_pgt {
u32 refcount[2]; u32 refcount[2];
}; };
struct nvkm_vm_pgd {
struct list_head head;
struct nvkm_gpuobj *obj;
};
struct nvkm_vma { struct nvkm_vma {
struct nvkm_vm *vm; struct nvkm_vm *vm;
struct nvkm_mm_node *node; struct nvkm_mm_node *node;
...@@ -40,7 +35,6 @@ struct nvkm_vm { ...@@ -40,7 +35,6 @@ struct nvkm_vm {
struct nvkm_mm mm; struct nvkm_mm mm;
struct kref refcount; struct kref refcount;
struct list_head pgd_list;
struct nvkm_vm_pgt *pgt; struct nvkm_vm_pgt *pgt;
u32 fpde; u32 fpde;
u32 lpde; u32 lpde;
...@@ -54,7 +48,7 @@ struct nvkm_vm { ...@@ -54,7 +48,7 @@ struct nvkm_vm {
int nvkm_vm_new(struct nvkm_device *, u64 offset, u64 length, u64 mm_offset, int nvkm_vm_new(struct nvkm_device *, u64 offset, u64 length, u64 mm_offset,
struct lock_class_key *, struct nvkm_vm **); struct lock_class_key *, struct nvkm_vm **);
int nvkm_vm_ref(struct nvkm_vm *, struct nvkm_vm **, struct nvkm_gpuobj *pgd); int nvkm_vm_ref(struct nvkm_vm *, struct nvkm_vm **, struct nvkm_memory *inst);
int nvkm_vm_boot(struct nvkm_vm *, u64 size); int nvkm_vm_boot(struct nvkm_vm *, u64 size);
int nvkm_vm_get(struct nvkm_vm *, u64 size, u32 page_shift, u32 access, int nvkm_vm_get(struct nvkm_vm *, u64 size, u32 page_shift, u32 access,
struct nvkm_vma *); struct nvkm_vma *);
......
...@@ -281,5 +281,5 @@ g84_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vm, u64 push, ...@@ -281,5 +281,5 @@ g84_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vm, u64 push,
if (ret) if (ret)
return ret; return ret;
return nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd); return nvkm_vm_ref(chan->base.vm, &chan->vm, chan->base.inst->memory);
} }
...@@ -11,7 +11,6 @@ struct gf100_fifo_chan { ...@@ -11,7 +11,6 @@ struct gf100_fifo_chan {
struct list_head head; struct list_head head;
bool killed; bool killed;
struct nvkm_gpuobj *pgd;
struct nvkm_vm *vm; struct nvkm_vm *vm;
struct { struct {
......
...@@ -12,7 +12,6 @@ struct gk104_fifo_chan { ...@@ -12,7 +12,6 @@ struct gk104_fifo_chan {
struct list_head head; struct list_head head;
bool killed; bool killed;
struct nvkm_gpuobj *pgd;
struct nvkm_vm *vm; struct nvkm_vm *vm;
struct { struct {
......
...@@ -206,7 +206,8 @@ void * ...@@ -206,7 +206,8 @@ void *
nv50_fifo_chan_dtor(struct nvkm_fifo_chan *base) nv50_fifo_chan_dtor(struct nvkm_fifo_chan *base)
{ {
struct nv50_fifo_chan *chan = nv50_fifo_chan(base); struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
nvkm_vm_ref(NULL, &chan->vm, chan->pgd); if (chan->base.inst)
nvkm_vm_ref(NULL, &chan->vm, chan->base.inst->memory);
nvkm_ramht_del(&chan->ramht); nvkm_ramht_del(&chan->ramht);
nvkm_gpuobj_del(&chan->pgd); nvkm_gpuobj_del(&chan->pgd);
nvkm_gpuobj_del(&chan->eng); nvkm_gpuobj_del(&chan->eng);
...@@ -266,5 +267,5 @@ nv50_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vm, u64 push, ...@@ -266,5 +267,5 @@ nv50_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vm, u64 push,
if (ret) if (ret)
return ret; return ret;
return nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd); return nvkm_vm_ref(chan->base.vm, &chan->vm, chan->base.inst->memory);
} }
...@@ -200,8 +200,8 @@ static void * ...@@ -200,8 +200,8 @@ static void *
gf100_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base) gf100_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
{ {
struct gf100_fifo_chan *chan = gf100_fifo_chan(base); struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
nvkm_vm_ref(NULL, &chan->vm, chan->pgd); if (chan->base.inst)
nvkm_gpuobj_del(&chan->pgd); nvkm_vm_ref(NULL, &chan->vm, chan->base.inst->memory);
return chan; return chan;
} }
...@@ -225,7 +225,6 @@ gf100_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass, ...@@ -225,7 +225,6 @@ gf100_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
struct fermi_channel_gpfifo_v0 v0; struct fermi_channel_gpfifo_v0 v0;
} *args = data; } *args = data;
struct gf100_fifo *fifo = gf100_fifo(base); struct gf100_fifo *fifo = gf100_fifo(base);
struct nvkm_device *device = fifo->base.engine.subdev.device;
struct nvkm_object *parent = oclass->parent; struct nvkm_object *parent = oclass->parent;
struct gf100_fifo_chan *chan; struct gf100_fifo_chan *chan;
u64 usermem, ioffset, ilength; u64 usermem, ioffset, ilength;
...@@ -263,19 +262,7 @@ gf100_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass, ...@@ -263,19 +262,7 @@ gf100_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
args->v0.chid = chan->base.chid; args->v0.chid = chan->base.chid;
/* page directory */ ret = nvkm_vm_ref(chan->base.vm, &chan->vm, chan->base.inst->memory);
ret = nvkm_gpuobj_new(device, 0x10000, 0x1000, false, NULL, &chan->pgd);
if (ret)
return ret;
nvkm_kmap(chan->base.inst);
nvkm_wo32(chan->base.inst, 0x0200, lower_32_bits(chan->pgd->addr));
nvkm_wo32(chan->base.inst, 0x0204, upper_32_bits(chan->pgd->addr));
nvkm_wo32(chan->base.inst, 0x0208, 0xffffffff);
nvkm_wo32(chan->base.inst, 0x020c, 0x000000ff);
nvkm_done(chan->base.inst);
ret = nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd);
if (ret) if (ret)
return ret; return ret;
......
...@@ -213,8 +213,8 @@ static void * ...@@ -213,8 +213,8 @@ static void *
gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base) gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
{ {
struct gk104_fifo_chan *chan = gk104_fifo_chan(base); struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
nvkm_vm_ref(NULL, &chan->vm, chan->pgd); if (chan->base.inst)
nvkm_gpuobj_del(&chan->pgd); nvkm_vm_ref(NULL, &chan->vm, chan->base.inst->memory);
return chan; return chan;
} }
...@@ -242,7 +242,6 @@ gk104_fifo_gpfifo_new_(const struct gk104_fifo_chan_func *func, ...@@ -242,7 +242,6 @@ gk104_fifo_gpfifo_new_(const struct gk104_fifo_chan_func *func,
const struct nvkm_oclass *oclass, const struct nvkm_oclass *oclass,
struct nvkm_object **pobject) struct nvkm_object **pobject)
{ {
struct nvkm_device *device = fifo->base.engine.subdev.device;
struct gk104_fifo_chan *chan; struct gk104_fifo_chan *chan;
int runlist = -1, ret = -ENOSYS, i, j; int runlist = -1, ret = -ENOSYS, i, j;
u32 engines = 0, present = 0; u32 engines = 0, present = 0;
...@@ -302,19 +301,7 @@ gk104_fifo_gpfifo_new_(const struct gk104_fifo_chan_func *func, ...@@ -302,19 +301,7 @@ gk104_fifo_gpfifo_new_(const struct gk104_fifo_chan_func *func,
*chid = chan->base.chid; *chid = chan->base.chid;
/* Page directory. */ ret = nvkm_vm_ref(chan->base.vm, &chan->vm, chan->base.inst->memory);
ret = nvkm_gpuobj_new(device, 0x10000, 0x1000, false, NULL, &chan->pgd);
if (ret)
return ret;
nvkm_kmap(chan->base.inst);
nvkm_wo32(chan->base.inst, 0x0200, lower_32_bits(chan->pgd->addr));
nvkm_wo32(chan->base.inst, 0x0204, upper_32_bits(chan->pgd->addr));
nvkm_wo32(chan->base.inst, 0x0208, 0xffffffff);
nvkm_wo32(chan->base.inst, 0x020c, 0x000000ff);
nvkm_done(chan->base.inst);
ret = nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd);
if (ret) if (ret)
return ret; return ret;
......
...@@ -60,7 +60,7 @@ nvkm_falcon_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size, u8 port, ...@@ -60,7 +60,7 @@ nvkm_falcon_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size, u8 port,
} }
void void
nvkm_falcon_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *inst) nvkm_falcon_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *inst)
{ {
if (!falcon->func->bind_context) { if (!falcon->func->bind_context) {
nvkm_error(falcon->user, nvkm_error(falcon->user,
......
...@@ -180,7 +180,7 @@ nvkm_falcon_v1_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size, ...@@ -180,7 +180,7 @@ nvkm_falcon_v1_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size,
} }
static void static void
nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *ctx) nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *ctx)
{ {
u32 inst_loc; u32 inst_loc;
u32 fbif; u32 fbif;
...@@ -216,7 +216,7 @@ nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *ctx) ...@@ -216,7 +216,7 @@ nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *ctx)
nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_PHYS_SYS_NCOH, 0x6); nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_PHYS_SYS_NCOH, 0x6);
/* Set context */ /* Set context */
switch (nvkm_memory_target(ctx->memory)) { switch (nvkm_memory_target(ctx)) {
case NVKM_MEM_TARGET_VRAM: inst_loc = 0; break; case NVKM_MEM_TARGET_VRAM: inst_loc = 0; break;
case NVKM_MEM_TARGET_HOST: inst_loc = 2; break; case NVKM_MEM_TARGET_HOST: inst_loc = 2; break;
case NVKM_MEM_TARGET_NCOH: inst_loc = 3; break; case NVKM_MEM_TARGET_NCOH: inst_loc = 3; break;
...@@ -228,7 +228,7 @@ nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *ctx) ...@@ -228,7 +228,7 @@ nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *ctx)
/* Enable context */ /* Enable context */
nvkm_falcon_mask(falcon, 0x048, 0x1, 0x1); nvkm_falcon_mask(falcon, 0x048, 0x1, 0x1);
nvkm_falcon_wr32(falcon, 0x054, nvkm_falcon_wr32(falcon, 0x054,
((ctx->addr >> 12) & 0xfffffff) | ((nvkm_memory_addr(ctx) >> 12) & 0xfffffff) |
(inst_loc << 28) | (1 << 30)); (inst_loc << 28) | (1 << 30));
nvkm_falcon_mask(falcon, 0x090, 0x10000, 0x10000); nvkm_falcon_mask(falcon, 0x090, 0x10000, 0x10000);
......
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
*/ */
#include "gf100.h" #include "gf100.h"
#include <core/gpuobj.h> #include <core/memory.h>
#include <core/option.h> #include <core/option.h>
#include <subdev/fb.h> #include <subdev/fb.h>
#include <subdev/mmu.h> #include <subdev/mmu.h>
...@@ -53,7 +53,7 @@ gf100_bar_bar1_init(struct nvkm_bar *base) ...@@ -53,7 +53,7 @@ gf100_bar_bar1_init(struct nvkm_bar *base)
{ {
struct nvkm_device *device = base->subdev.device; struct nvkm_device *device = base->subdev.device;
struct gf100_bar *bar = gf100_bar(base); struct gf100_bar *bar = gf100_bar(base);
const u32 addr = nvkm_memory_addr(bar->bar[1].mem) >> 12; const u32 addr = nvkm_memory_addr(bar->bar[1].inst) >> 12;
nvkm_wr32(device, 0x001704, 0x80000000 | addr); nvkm_wr32(device, 0x001704, 0x80000000 | addr);
} }
...@@ -74,7 +74,7 @@ gf100_bar_bar2_init(struct nvkm_bar *base) ...@@ -74,7 +74,7 @@ gf100_bar_bar2_init(struct nvkm_bar *base)
{ {
struct nvkm_device *device = base->subdev.device; struct nvkm_device *device = base->subdev.device;
struct gf100_bar *bar = gf100_bar(base); struct gf100_bar *bar = gf100_bar(base);
u32 addr = nvkm_memory_addr(bar->bar[0].mem) >> 12; u32 addr = nvkm_memory_addr(bar->bar[0].inst) >> 12;
if (bar->bar2_halve) if (bar->bar2_halve)
addr |= 0x40000000; addr |= 0x40000000;
nvkm_wr32(device, 0x001714, 0x80000000 | addr); nvkm_wr32(device, 0x001714, 0x80000000 | addr);
...@@ -90,11 +90,7 @@ gf100_bar_oneinit_bar(struct gf100_bar *bar, struct gf100_barN *bar_vm, ...@@ -90,11 +90,7 @@ gf100_bar_oneinit_bar(struct gf100_bar *bar, struct gf100_barN *bar_vm,
int ret; int ret;
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0, false, ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0, false,
&bar_vm->mem); &bar_vm->inst);
if (ret)
return ret;
ret = nvkm_gpuobj_new(device, 0x8000, 0, false, NULL, &bar_vm->pgd);
if (ret) if (ret)
return ret; return ret;
...@@ -119,17 +115,11 @@ gf100_bar_oneinit_bar(struct gf100_bar *bar, struct gf100_barN *bar_vm, ...@@ -119,17 +115,11 @@ gf100_bar_oneinit_bar(struct gf100_bar *bar, struct gf100_barN *bar_vm,
} }
} }
ret = nvkm_vm_ref(vm, &bar_vm->vm, bar_vm->pgd); ret = nvkm_vm_ref(vm, &bar_vm->vm, bar_vm->inst);
nvkm_vm_ref(NULL, &vm, NULL); nvkm_vm_ref(NULL, &vm, NULL);
if (ret) if (ret)
return ret; return ret;
nvkm_kmap(bar_vm->mem);
nvkm_wo32(bar_vm->mem, 0x0200, lower_32_bits(bar_vm->pgd->addr));
nvkm_wo32(bar_vm->mem, 0x0204, upper_32_bits(bar_vm->pgd->addr));
nvkm_wo32(bar_vm->mem, 0x0208, lower_32_bits(bar_len - 1));
nvkm_wo32(bar_vm->mem, 0x020c, upper_32_bits(bar_len - 1));
nvkm_done(bar_vm->mem);
return 0; return 0;
} }
...@@ -164,13 +154,11 @@ gf100_bar_dtor(struct nvkm_bar *base) ...@@ -164,13 +154,11 @@ gf100_bar_dtor(struct nvkm_bar *base)
{ {
struct gf100_bar *bar = gf100_bar(base); struct gf100_bar *bar = gf100_bar(base);
nvkm_vm_ref(NULL, &bar->bar[1].vm, bar->bar[1].pgd); nvkm_vm_ref(NULL, &bar->bar[1].vm, bar->bar[1].inst);
nvkm_gpuobj_del(&bar->bar[1].pgd); nvkm_memory_unref(&bar->bar[1].inst);
nvkm_memory_unref(&bar->bar[1].mem);
nvkm_vm_ref(NULL, &bar->bar[0].vm, bar->bar[0].pgd); nvkm_vm_ref(NULL, &bar->bar[0].vm, bar->bar[0].inst);
nvkm_gpuobj_del(&bar->bar[0].pgd); nvkm_memory_unref(&bar->bar[0].inst);
nvkm_memory_unref(&bar->bar[0].mem);
return bar; return bar;
} }
......
...@@ -4,8 +4,7 @@ ...@@ -4,8 +4,7 @@
#include "priv.h" #include "priv.h"
struct gf100_barN { struct gf100_barN {
struct nvkm_memory *mem; struct nvkm_memory *inst;
struct nvkm_gpuobj *pgd;
struct nvkm_vm *vm; struct nvkm_vm *vm;
}; };
......
...@@ -140,7 +140,7 @@ nv50_bar_oneinit(struct nvkm_bar *base) ...@@ -140,7 +140,7 @@ nv50_bar_oneinit(struct nvkm_bar *base)
if (ret) if (ret)
return ret; return ret;
ret = nvkm_vm_ref(vm, &bar->bar2_vm, bar->pgd); ret = nvkm_vm_ref(vm, &bar->bar2_vm, bar->mem->memory);
nvkm_vm_ref(NULL, &vm, NULL); nvkm_vm_ref(NULL, &vm, NULL);
if (ret) if (ret)
return ret; return ret;
...@@ -172,7 +172,7 @@ nv50_bar_oneinit(struct nvkm_bar *base) ...@@ -172,7 +172,7 @@ nv50_bar_oneinit(struct nvkm_bar *base)
atomic_inc(&vm->engref[NVKM_SUBDEV_BAR]); atomic_inc(&vm->engref[NVKM_SUBDEV_BAR]);
ret = nvkm_vm_ref(vm, &bar->bar1_vm, bar->pgd); ret = nvkm_vm_ref(vm, &bar->bar1_vm, bar->mem->memory);
nvkm_vm_ref(NULL, &vm, NULL); nvkm_vm_ref(NULL, &vm, NULL);
if (ret) if (ret)
return ret; return ret;
...@@ -197,13 +197,15 @@ void * ...@@ -197,13 +197,15 @@ void *
nv50_bar_dtor(struct nvkm_bar *base) nv50_bar_dtor(struct nvkm_bar *base)
{ {
struct nv50_bar *bar = nv50_bar(base); struct nv50_bar *bar = nv50_bar(base);
nvkm_gpuobj_del(&bar->bar1); if (bar->mem) {
nvkm_vm_ref(NULL, &bar->bar1_vm, bar->pgd); nvkm_gpuobj_del(&bar->bar1);
nvkm_gpuobj_del(&bar->bar2); nvkm_vm_ref(NULL, &bar->bar1_vm, bar->mem->memory);
nvkm_vm_ref(NULL, &bar->bar2_vm, bar->pgd); nvkm_gpuobj_del(&bar->bar2);
nvkm_gpuobj_del(&bar->pgd); nvkm_vm_ref(NULL, &bar->bar2_vm, bar->mem->memory);
nvkm_gpuobj_del(&bar->pad); nvkm_gpuobj_del(&bar->pgd);
nvkm_gpuobj_del(&bar->mem); nvkm_gpuobj_del(&bar->pad);
nvkm_gpuobj_del(&bar->mem);
}
return bar; return bar;
} }
......
...@@ -446,7 +446,6 @@ static void ...@@ -446,7 +446,6 @@ static void
nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde) nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde)
{ {
struct nvkm_mmu *mmu = vm->mmu; struct nvkm_mmu *mmu = vm->mmu;
struct nvkm_vm_pgd *vpgd;
struct nvkm_vm_pgt *vpgt; struct nvkm_vm_pgt *vpgt;
struct nvkm_memory *pgt; struct nvkm_memory *pgt;
u32 pde; u32 pde;
...@@ -459,9 +458,8 @@ nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde) ...@@ -459,9 +458,8 @@ nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde)
pgt = vpgt->mem[big]; pgt = vpgt->mem[big];
vpgt->mem[big] = NULL; vpgt->mem[big] = NULL;
list_for_each_entry(vpgd, &vm->pgd_list, head) { if (mmu->func->map_pgt)
mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem); mmu->func->map_pgt(vm, pde, vpgt->mem);
}
mmu->func->flush(vm); mmu->func->flush(vm);
...@@ -474,7 +472,6 @@ nvkm_vm_map_pgt(struct nvkm_vm *vm, u32 pde, u32 type) ...@@ -474,7 +472,6 @@ nvkm_vm_map_pgt(struct nvkm_vm *vm, u32 pde, u32 type)
{ {
struct nvkm_mmu *mmu = vm->mmu; struct nvkm_mmu *mmu = vm->mmu;
struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
struct nvkm_vm_pgd *vpgd;
int big = (type != mmu->func->spg_shift); int big = (type != mmu->func->spg_shift);
u32 pgt_size; u32 pgt_size;
int ret; int ret;
...@@ -487,9 +484,8 @@ nvkm_vm_map_pgt(struct nvkm_vm *vm, u32 pde, u32 type) ...@@ -487,9 +484,8 @@ nvkm_vm_map_pgt(struct nvkm_vm *vm, u32 pde, u32 type)
if (unlikely(ret)) if (unlikely(ret))
return ret; return ret;
list_for_each_entry(vpgd, &vm->pgd_list, head) { if (mmu->func->map_pgt)
mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem); mmu->func->map_pgt(vm, pde, vpgt->mem);
}
vpgt->refcount[big]++; vpgt->refcount[big]++;
return 0; return 0;
...@@ -592,7 +588,6 @@ nvkm_vm_legacy(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset, ...@@ -592,7 +588,6 @@ nvkm_vm_legacy(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
u64 mm_length = (offset + length) - mm_offset; u64 mm_length = (offset + length) - mm_offset;
int ret; int ret;
INIT_LIST_HEAD(&vm->pgd_list);
kref_init(&vm->refcount); kref_init(&vm->refcount);
vm->fpde = offset >> (mmu->func->pgt_bits + 12); vm->fpde = offset >> (mmu->func->pgt_bits + 12);
vm->lpde = (offset + length - 1) >> (mmu->func->pgt_bits + 12); vm->lpde = (offset + length - 1) >> (mmu->func->pgt_bits + 12);
...@@ -644,58 +639,10 @@ nvkm_vm_new(struct nvkm_device *device, u64 offset, u64 length, u64 mm_offset, ...@@ -644,58 +639,10 @@ nvkm_vm_new(struct nvkm_device *device, u64 offset, u64 length, u64 mm_offset,
return -EINVAL; return -EINVAL;
} }
static int
nvkm_vm_link(struct nvkm_vm *vm, struct nvkm_gpuobj *pgd)
{
struct nvkm_mmu *mmu = vm->mmu;
struct nvkm_vm_pgd *vpgd;
int i;
if (!pgd)
return 0;
vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL);
if (!vpgd)
return -ENOMEM;
vpgd->obj = pgd;
mutex_lock(&vm->mutex);
for (i = vm->fpde; i <= vm->lpde; i++)
mmu->func->map_pgt(pgd, i, vm->pgt[i - vm->fpde].mem);
list_add(&vpgd->head, &vm->pgd_list);
mutex_unlock(&vm->mutex);
return 0;
}
static void
nvkm_vm_unlink(struct nvkm_vm *vm, struct nvkm_gpuobj *mpgd)
{
struct nvkm_vm_pgd *vpgd, *tmp;
if (!mpgd)
return;
mutex_lock(&vm->mutex);
list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
if (vpgd->obj == mpgd) {
list_del(&vpgd->head);
kfree(vpgd);
break;
}
}
mutex_unlock(&vm->mutex);
}
static void static void
nvkm_vm_del(struct kref *kref) nvkm_vm_del(struct kref *kref)
{ {
struct nvkm_vm *vm = container_of(kref, typeof(*vm), refcount); struct nvkm_vm *vm = container_of(kref, typeof(*vm), refcount);
struct nvkm_vm_pgd *vpgd, *tmp;
list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
nvkm_vm_unlink(vm, vpgd->obj);
}
nvkm_mm_fini(&vm->mm); nvkm_mm_fini(&vm->mm);
vfree(vm->pgt); vfree(vm->pgt);
...@@ -705,20 +652,28 @@ nvkm_vm_del(struct kref *kref) ...@@ -705,20 +652,28 @@ nvkm_vm_del(struct kref *kref)
} }
int int
nvkm_vm_ref(struct nvkm_vm *ref, struct nvkm_vm **ptr, struct nvkm_gpuobj *pgd) nvkm_vm_ref(struct nvkm_vm *ref, struct nvkm_vm **ptr, struct nvkm_memory *inst)
{ {
if (ref) { if (ref) {
int ret = nvkm_vm_link(ref, pgd); if (ref->func->join && inst) {
if (ret) int ret = ref->func->join(ref, inst), i;
return ret; if (ret)
return ret;
if (ref->mmu->func->map_pgt) {
for (i = ref->fpde; i <= ref->lpde; i++)
ref->mmu->func->map_pgt(ref, i, ref->pgt[i - ref->fpde].mem);
}
}
kref_get(&ref->refcount); kref_get(&ref->refcount);
} }
if (*ptr) { if (*ptr) {
if ((*ptr)->bootstrapped && pgd) if ((*ptr)->func->part && inst)
(*ptr)->func->part(*ptr, inst);
if ((*ptr)->bootstrapped && inst)
nvkm_memory_unref(&(*ptr)->pgt[0].mem[0]); nvkm_memory_unref(&(*ptr)->pgt[0].mem[0]);
nvkm_vm_unlink(*ptr, pgd);
kref_put(&(*ptr)->refcount, nvkm_vm_del); kref_put(&(*ptr)->refcount, nvkm_vm_del);
} }
......
...@@ -70,8 +70,9 @@ const u8 gf100_pte_storage_type_map[256] = ...@@ -70,8 +70,9 @@ const u8 gf100_pte_storage_type_map[256] =
void void
gf100_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 index, struct nvkm_memory *pgt[2]) gf100_vm_map_pgt(struct nvkm_vmm *vmm, u32 index, struct nvkm_memory *pgt[2])
{ {
struct nvkm_memory *pgd = vmm->pd->pt[0]->memory;
u32 pde[2] = { 0, 0 }; u32 pde[2] = { 0, 0 };
if (pgt[0]) if (pgt[0])
...@@ -161,7 +162,6 @@ gf100_vm_flush(struct nvkm_vm *vm) ...@@ -161,7 +162,6 @@ gf100_vm_flush(struct nvkm_vm *vm)
{ {
struct nvkm_mmu *mmu = vm->mmu; struct nvkm_mmu *mmu = vm->mmu;
struct nvkm_device *device = mmu->subdev.device; struct nvkm_device *device = mmu->subdev.device;
struct nvkm_vm_pgd *vpgd;
u32 type; u32 type;
type = 0x00000001; /* PAGE_ALL */ type = 0x00000001; /* PAGE_ALL */
...@@ -169,24 +169,22 @@ gf100_vm_flush(struct nvkm_vm *vm) ...@@ -169,24 +169,22 @@ gf100_vm_flush(struct nvkm_vm *vm)
type |= 0x00000004; /* HUB_ONLY */ type |= 0x00000004; /* HUB_ONLY */
mutex_lock(&mmu->subdev.mutex); mutex_lock(&mmu->subdev.mutex);
list_for_each_entry(vpgd, &vm->pgd_list, head) { /* looks like maybe a "free flush slots" counter, the
/* looks like maybe a "free flush slots" counter, the * faster you write to 0x100cbc to more it decreases
* faster you write to 0x100cbc to more it decreases */
*/ nvkm_msec(device, 2000,
nvkm_msec(device, 2000, if (nvkm_rd32(device, 0x100c80) & 0x00ff0000)
if (nvkm_rd32(device, 0x100c80) & 0x00ff0000) break;
break; );
);
nvkm_wr32(device, 0x100cb8, vm->pd->pt[0]->addr >> 8);
nvkm_wr32(device, 0x100cb8, vpgd->obj->addr >> 8); nvkm_wr32(device, 0x100cbc, 0x80000000 | type);
nvkm_wr32(device, 0x100cbc, 0x80000000 | type);
/* wait for flush to be queued? */
/* wait for flush to be queued? */ nvkm_msec(device, 2000,
nvkm_msec(device, 2000, if (nvkm_rd32(device, 0x100c80) & 0x00008000)
if (nvkm_rd32(device, 0x100c80) & 0x00008000) break;
break; );
);
}
mutex_unlock(&mmu->subdev.mutex); mutex_unlock(&mmu->subdev.mutex);
} }
......
...@@ -31,8 +31,10 @@ ...@@ -31,8 +31,10 @@
#include <nvif/class.h> #include <nvif/class.h>
void void
nv50_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 pde, struct nvkm_memory *pgt[2]) nv50_vm_map_pgt(struct nvkm_vmm *vmm, u32 pde, struct nvkm_memory *pgt[2])
{ {
struct nvkm_vmm_join *join;
u32 pdeo = vmm->mmu->func->vmm.pd_offset + (pde * 8);
u64 phys = 0xdeadcafe00000000ULL; u64 phys = 0xdeadcafe00000000ULL;
u32 coverage = 0; u32 coverage = 0;
...@@ -56,10 +58,12 @@ nv50_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 pde, struct nvkm_memory *pgt[2]) ...@@ -56,10 +58,12 @@ nv50_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 pde, struct nvkm_memory *pgt[2])
phys |= 0x20; phys |= 0x20;
} }
nvkm_kmap(pgd); list_for_each_entry(join, &vmm->join, head) {
nvkm_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys)); nvkm_kmap(join->inst);
nvkm_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys)); nvkm_wo32(join->inst, pdeo + 0, lower_32_bits(phys));
nvkm_done(pgd); nvkm_wo32(join->inst, pdeo + 4, upper_32_bits(phys));
nvkm_done(join->inst);
}
} }
static inline u64 static inline u64
......
...@@ -18,7 +18,7 @@ struct nvkm_mmu_func { ...@@ -18,7 +18,7 @@ struct nvkm_mmu_func {
u8 spg_shift; u8 spg_shift;
u8 lpg_shift; u8 lpg_shift;
void (*map_pgt)(struct nvkm_gpuobj *pgd, u32 pde, void (*map_pgt)(struct nvkm_vmm *, u32 pde,
struct nvkm_memory *pgt[2]); struct nvkm_memory *pgt[2]);
void (*map)(struct nvkm_vma *, struct nvkm_memory *, void (*map)(struct nvkm_vma *, struct nvkm_memory *,
struct nvkm_mem *, u32 pte, u32 cnt, struct nvkm_mem *, u32 pte, u32 cnt,
...@@ -41,7 +41,7 @@ struct nvkm_mmu_func { ...@@ -41,7 +41,7 @@ struct nvkm_mmu_func {
extern const struct nvkm_mmu_func nv04_mmu; extern const struct nvkm_mmu_func nv04_mmu;
void nv50_vm_map_pgt(struct nvkm_gpuobj *, u32, struct nvkm_memory **); void nv50_vm_map_pgt(struct nvkm_vmm *, u32, struct nvkm_memory **);
void nv50_vm_map(struct nvkm_vma *, struct nvkm_memory *, struct nvkm_mem *, void nv50_vm_map(struct nvkm_vma *, struct nvkm_memory *, struct nvkm_mem *,
u32, u32, u64, u64); u32, u32, u64, u64);
void nv50_vm_map_sg(struct nvkm_vma *, struct nvkm_memory *, struct nvkm_mem *, void nv50_vm_map_sg(struct nvkm_vma *, struct nvkm_memory *, struct nvkm_mem *,
...@@ -49,7 +49,7 @@ void nv50_vm_map_sg(struct nvkm_vma *, struct nvkm_memory *, struct nvkm_mem *, ...@@ -49,7 +49,7 @@ void nv50_vm_map_sg(struct nvkm_vma *, struct nvkm_memory *, struct nvkm_mem *,
void nv50_vm_unmap(struct nvkm_vma *, struct nvkm_memory *, u32, u32); void nv50_vm_unmap(struct nvkm_vma *, struct nvkm_memory *, u32, u32);
void nv50_vm_flush(struct nvkm_vm *); void nv50_vm_flush(struct nvkm_vm *);
void gf100_vm_map_pgt(struct nvkm_gpuobj *, u32, struct nvkm_memory **); void gf100_vm_map_pgt(struct nvkm_vmm *, u32, struct nvkm_memory **);
void gf100_vm_map(struct nvkm_vma *, struct nvkm_memory *, struct nvkm_mem *, void gf100_vm_map(struct nvkm_vma *, struct nvkm_memory *, struct nvkm_mem *,
u32, u32, u64, u64); u32, u32, u64, u64);
void gf100_vm_map_sg(struct nvkm_vma *, struct nvkm_memory *, struct nvkm_mem *, void gf100_vm_map_sg(struct nvkm_vma *, struct nvkm_memory *, struct nvkm_mem *,
......
...@@ -131,7 +131,7 @@ nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu, ...@@ -131,7 +131,7 @@ nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
/* ... and the GPU storage for it, except on Tesla-class GPUs that /* ... and the GPU storage for it, except on Tesla-class GPUs that
* have the PD embedded in the instance structure. * have the PD embedded in the instance structure.
*/ */
if (desc->size && mmu->func->vmm.global) { if (desc->size) {
const u32 size = pd_header + desc->size * (1 << desc->bits); const u32 size = pd_header + desc->size * (1 << desc->bits);
vmm->pd->pt[0] = nvkm_mmu_ptc_get(mmu, size, desc->align, true); vmm->pd->pt[0] = nvkm_mmu_ptc_get(mmu, size, desc->align, true);
if (!vmm->pd->pt[0]) if (!vmm->pd->pt[0])
......
...@@ -112,11 +112,8 @@ gm200_secboot_oneinit(struct nvkm_secboot *sb) ...@@ -112,11 +112,8 @@ gm200_secboot_oneinit(struct nvkm_secboot *sb)
int ret; int ret;
/* Allocate instance block and VM */ /* Allocate instance block and VM */
ret = nvkm_gpuobj_new(device, 0x1000, 0, true, NULL, &gsb->inst); ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0, true,
if (ret) &gsb->inst);
return ret;
ret = nvkm_gpuobj_new(device, 0x8000, 0, true, NULL, &gsb->pgd);
if (ret) if (ret)
return ret; return ret;
...@@ -126,18 +123,11 @@ gm200_secboot_oneinit(struct nvkm_secboot *sb) ...@@ -126,18 +123,11 @@ gm200_secboot_oneinit(struct nvkm_secboot *sb)
atomic_inc(&vm->engref[NVKM_SUBDEV_PMU]); atomic_inc(&vm->engref[NVKM_SUBDEV_PMU]);
ret = nvkm_vm_ref(vm, &gsb->vm, gsb->pgd); ret = nvkm_vm_ref(vm, &gsb->vm, gsb->inst);
nvkm_vm_ref(NULL, &vm, NULL); nvkm_vm_ref(NULL, &vm, NULL);
if (ret) if (ret)
return ret; return ret;
nvkm_kmap(gsb->inst);
nvkm_wo32(gsb->inst, 0x200, lower_32_bits(gsb->pgd->addr));
nvkm_wo32(gsb->inst, 0x204, upper_32_bits(gsb->pgd->addr));
nvkm_wo32(gsb->inst, 0x208, lower_32_bits(vm_area_len - 1));
nvkm_wo32(gsb->inst, 0x20c, upper_32_bits(vm_area_len - 1));
nvkm_done(gsb->inst);
if (sb->acr->func->oneinit) { if (sb->acr->func->oneinit) {
ret = sb->acr->func->oneinit(sb->acr, sb); ret = sb->acr->func->oneinit(sb->acr, sb);
if (ret) if (ret)
...@@ -165,9 +155,8 @@ gm200_secboot_dtor(struct nvkm_secboot *sb) ...@@ -165,9 +155,8 @@ gm200_secboot_dtor(struct nvkm_secboot *sb)
sb->acr->func->dtor(sb->acr); sb->acr->func->dtor(sb->acr);
nvkm_vm_ref(NULL, &gsb->vm, gsb->pgd); nvkm_vm_ref(NULL, &gsb->vm, gsb->inst);
nvkm_gpuobj_del(&gsb->pgd); nvkm_memory_unref(&gsb->inst);
nvkm_gpuobj_del(&gsb->inst);
return gsb; return gsb;
} }
......
...@@ -29,8 +29,7 @@ struct gm200_secboot { ...@@ -29,8 +29,7 @@ struct gm200_secboot {
struct nvkm_secboot base; struct nvkm_secboot base;
/* Instance block & address space used for HS FW execution */ /* Instance block & address space used for HS FW execution */
struct nvkm_gpuobj *inst; struct nvkm_memory *inst;
struct nvkm_gpuobj *pgd;
struct nvkm_vm *vm; struct nvkm_vm *vm;
}; };
#define gm200_secboot(sb) container_of(sb, struct gm200_secboot, base) #define gm200_secboot(sb) container_of(sb, struct gm200_secboot, base)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment