Commit 19a82e49 authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/core/memory: change map interface to support upcoming mmu changes

Map flags (access, kind, etc) are currently defined in either the VMA,
or the memory object, which turns out to not be ideal for things like
suballocated buffers, etc.

These will become per-map flags instead, so we need to support passing
these arguments in nvkm_memory_map().
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent 7f53d6dc
...@@ -2,8 +2,6 @@ ...@@ -2,8 +2,6 @@
#define __NVKM_GPUOBJ_H__ #define __NVKM_GPUOBJ_H__
#include <core/memory.h> #include <core/memory.h>
#include <core/mm.h> #include <core/mm.h>
struct nvkm_vma;
struct nvkm_vm;
#define NVOBJ_FLAG_ZERO_ALLOC 0x00000001 #define NVOBJ_FLAG_ZERO_ALLOC 0x00000001
#define NVOBJ_FLAG_HEAP 0x00000004 #define NVOBJ_FLAG_HEAP 0x00000004
...@@ -29,15 +27,14 @@ struct nvkm_gpuobj_func { ...@@ -29,15 +27,14 @@ struct nvkm_gpuobj_func {
void (*release)(struct nvkm_gpuobj *); void (*release)(struct nvkm_gpuobj *);
u32 (*rd32)(struct nvkm_gpuobj *, u32 offset); u32 (*rd32)(struct nvkm_gpuobj *, u32 offset);
void (*wr32)(struct nvkm_gpuobj *, u32 offset, u32 data); void (*wr32)(struct nvkm_gpuobj *, u32 offset, u32 data);
int (*map)(struct nvkm_gpuobj *, u64 offset, struct nvkm_vmm *,
struct nvkm_vma *, void *argv, u32 argc);
}; };
int nvkm_gpuobj_new(struct nvkm_device *, u32 size, int align, bool zero, int nvkm_gpuobj_new(struct nvkm_device *, u32 size, int align, bool zero,
struct nvkm_gpuobj *parent, struct nvkm_gpuobj **); struct nvkm_gpuobj *parent, struct nvkm_gpuobj **);
void nvkm_gpuobj_del(struct nvkm_gpuobj **); void nvkm_gpuobj_del(struct nvkm_gpuobj **);
int nvkm_gpuobj_wrap(struct nvkm_memory *, struct nvkm_gpuobj **); int nvkm_gpuobj_wrap(struct nvkm_memory *, struct nvkm_gpuobj **);
int nvkm_gpuobj_map(struct nvkm_gpuobj *, struct nvkm_vm *, u32 access,
struct nvkm_vma *);
void nvkm_gpuobj_unmap(struct nvkm_vma *);
void nvkm_gpuobj_memcpy_to(struct nvkm_gpuobj *dst, u32 dstoffset, void *src, void nvkm_gpuobj_memcpy_to(struct nvkm_gpuobj *dst, u32 dstoffset, void *src,
u32 length); u32 length);
void nvkm_gpuobj_memcpy_from(void *dst, struct nvkm_gpuobj *src, u32 srcoffset, void nvkm_gpuobj_memcpy_from(void *dst, struct nvkm_gpuobj *src, u32 srcoffset,
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
#include <core/os.h> #include <core/os.h>
struct nvkm_device; struct nvkm_device;
struct nvkm_vma; struct nvkm_vma;
struct nvkm_vm; struct nvkm_vmm;
struct nvkm_tags { struct nvkm_tags {
struct nvkm_mm_node *mn; struct nvkm_mm_node *mn;
...@@ -28,10 +28,11 @@ struct nvkm_memory_func { ...@@ -28,10 +28,11 @@ struct nvkm_memory_func {
enum nvkm_memory_target (*target)(struct nvkm_memory *); enum nvkm_memory_target (*target)(struct nvkm_memory *);
u64 (*addr)(struct nvkm_memory *); u64 (*addr)(struct nvkm_memory *);
u64 (*size)(struct nvkm_memory *); u64 (*size)(struct nvkm_memory *);
void (*boot)(struct nvkm_memory *, struct nvkm_vm *); void (*boot)(struct nvkm_memory *, struct nvkm_vmm *);
void __iomem *(*acquire)(struct nvkm_memory *); void __iomem *(*acquire)(struct nvkm_memory *);
void (*release)(struct nvkm_memory *); void (*release)(struct nvkm_memory *);
void (*map)(struct nvkm_memory *, struct nvkm_vma *, u64 offset); int (*map)(struct nvkm_memory *, u64 offset, struct nvkm_vmm *,
struct nvkm_vma *, void *argv, u32 argc);
}; };
struct nvkm_memory_ptrs { struct nvkm_memory_ptrs {
...@@ -53,7 +54,8 @@ void nvkm_memory_tags_put(struct nvkm_memory *, struct nvkm_device *, ...@@ -53,7 +54,8 @@ void nvkm_memory_tags_put(struct nvkm_memory *, struct nvkm_device *,
#define nvkm_memory_addr(p) (p)->func->addr(p) #define nvkm_memory_addr(p) (p)->func->addr(p)
#define nvkm_memory_size(p) (p)->func->size(p) #define nvkm_memory_size(p) (p)->func->size(p)
#define nvkm_memory_boot(p,v) (p)->func->boot((p),(v)) #define nvkm_memory_boot(p,v) (p)->func->boot((p),(v))
#define nvkm_memory_map(p,v,o) (p)->func->map((p),(v),(o)) #define nvkm_memory_map(p,o,vm,va,av,ac) \
(p)->func->map((p),(o),(vm),(va),(av),(ac))
/* accessor macros - kmap()/done() must bracket use of the other accessor /* accessor macros - kmap()/done() must bracket use of the other accessor
* macros to guarantee correct behaviour across all chipsets * macros to guarantee correct behaviour across all chipsets
......
...@@ -42,6 +42,14 @@ nvkm_gpuobj_wr32_fast(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data) ...@@ -42,6 +42,14 @@ nvkm_gpuobj_wr32_fast(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data)
} }
/* accessor functions for gpuobjs allocated directly from instmem */ /* accessor functions for gpuobjs allocated directly from instmem */
static int
nvkm_gpuobj_heap_map(struct nvkm_gpuobj *gpuobj, u64 offset,
struct nvkm_vmm *vmm, struct nvkm_vma *vma,
void *argv, u32 argc)
{
return nvkm_memory_map(gpuobj->memory, offset, vmm, vma, argv, argc);
}
static u32 static u32
nvkm_gpuobj_heap_rd32(struct nvkm_gpuobj *gpuobj, u32 offset) nvkm_gpuobj_heap_rd32(struct nvkm_gpuobj *gpuobj, u32 offset)
{ {
...@@ -67,6 +75,7 @@ nvkm_gpuobj_heap_fast = { ...@@ -67,6 +75,7 @@ nvkm_gpuobj_heap_fast = {
.release = nvkm_gpuobj_heap_release, .release = nvkm_gpuobj_heap_release,
.rd32 = nvkm_gpuobj_rd32_fast, .rd32 = nvkm_gpuobj_rd32_fast,
.wr32 = nvkm_gpuobj_wr32_fast, .wr32 = nvkm_gpuobj_wr32_fast,
.map = nvkm_gpuobj_heap_map,
}; };
static const struct nvkm_gpuobj_func static const struct nvkm_gpuobj_func
...@@ -74,6 +83,7 @@ nvkm_gpuobj_heap_slow = { ...@@ -74,6 +83,7 @@ nvkm_gpuobj_heap_slow = {
.release = nvkm_gpuobj_heap_release, .release = nvkm_gpuobj_heap_release,
.rd32 = nvkm_gpuobj_heap_rd32, .rd32 = nvkm_gpuobj_heap_rd32,
.wr32 = nvkm_gpuobj_heap_wr32, .wr32 = nvkm_gpuobj_heap_wr32,
.map = nvkm_gpuobj_heap_map,
}; };
static void * static void *
...@@ -90,9 +100,19 @@ nvkm_gpuobj_heap_acquire(struct nvkm_gpuobj *gpuobj) ...@@ -90,9 +100,19 @@ nvkm_gpuobj_heap_acquire(struct nvkm_gpuobj *gpuobj)
static const struct nvkm_gpuobj_func static const struct nvkm_gpuobj_func
nvkm_gpuobj_heap = { nvkm_gpuobj_heap = {
.acquire = nvkm_gpuobj_heap_acquire, .acquire = nvkm_gpuobj_heap_acquire,
.map = nvkm_gpuobj_heap_map,
}; };
/* accessor functions for gpuobjs sub-allocated from a parent gpuobj */ /* accessor functions for gpuobjs sub-allocated from a parent gpuobj */
static int
nvkm_gpuobj_map(struct nvkm_gpuobj *gpuobj, u64 offset,
struct nvkm_vmm *vmm, struct nvkm_vma *vma,
void *argv, u32 argc)
{
return nvkm_memory_map(gpuobj->parent, gpuobj->node->offset + offset,
vmm, vma, argv, argc);
}
static u32 static u32
nvkm_gpuobj_rd32(struct nvkm_gpuobj *gpuobj, u32 offset) nvkm_gpuobj_rd32(struct nvkm_gpuobj *gpuobj, u32 offset)
{ {
...@@ -118,6 +138,7 @@ nvkm_gpuobj_fast = { ...@@ -118,6 +138,7 @@ nvkm_gpuobj_fast = {
.release = nvkm_gpuobj_release, .release = nvkm_gpuobj_release,
.rd32 = nvkm_gpuobj_rd32_fast, .rd32 = nvkm_gpuobj_rd32_fast,
.wr32 = nvkm_gpuobj_wr32_fast, .wr32 = nvkm_gpuobj_wr32_fast,
.map = nvkm_gpuobj_map,
}; };
static const struct nvkm_gpuobj_func static const struct nvkm_gpuobj_func
...@@ -125,6 +146,7 @@ nvkm_gpuobj_slow = { ...@@ -125,6 +146,7 @@ nvkm_gpuobj_slow = {
.release = nvkm_gpuobj_release, .release = nvkm_gpuobj_release,
.rd32 = nvkm_gpuobj_rd32, .rd32 = nvkm_gpuobj_rd32,
.wr32 = nvkm_gpuobj_wr32, .wr32 = nvkm_gpuobj_wr32,
.map = nvkm_gpuobj_map,
}; };
static void * static void *
...@@ -143,6 +165,7 @@ nvkm_gpuobj_acquire(struct nvkm_gpuobj *gpuobj) ...@@ -143,6 +165,7 @@ nvkm_gpuobj_acquire(struct nvkm_gpuobj *gpuobj)
static const struct nvkm_gpuobj_func static const struct nvkm_gpuobj_func
nvkm_gpuobj_func = { nvkm_gpuobj_func = {
.acquire = nvkm_gpuobj_acquire, .acquire = nvkm_gpuobj_acquire,
.map = nvkm_gpuobj_map,
}; };
static int static int
...@@ -218,26 +241,6 @@ nvkm_gpuobj_new(struct nvkm_device *device, u32 size, int align, bool zero, ...@@ -218,26 +241,6 @@ nvkm_gpuobj_new(struct nvkm_device *device, u32 size, int align, bool zero,
return ret; return ret;
} }
int
nvkm_gpuobj_map(struct nvkm_gpuobj *gpuobj, struct nvkm_vm *vm,
u32 access, struct nvkm_vma *vma)
{
struct nvkm_memory *memory = gpuobj->memory;
int ret = nvkm_vm_get(vm, gpuobj->size, 12, access, vma);
if (ret == 0)
nvkm_memory_map(memory, vma, 0);
return ret;
}
void
nvkm_gpuobj_unmap(struct nvkm_vma *vma)
{
if (vma->node) {
nvkm_vm_unmap(vma);
nvkm_vm_put(vma);
}
}
/* the below is basically only here to support sharing the paged dma object /* the below is basically only here to support sharing the paged dma object
* for PCI(E)GART on <=nv4x chipsets, and should *not* be expected to work * for PCI(E)GART on <=nv4x chipsets, and should *not* be expected to work
* anywhere else. * anywhere else.
......
...@@ -591,8 +591,7 @@ gf100_fifo_oneinit(struct nvkm_fifo *base) ...@@ -591,8 +591,7 @@ gf100_fifo_oneinit(struct nvkm_fifo *base)
if (ret) if (ret)
return ret; return ret;
nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0); return nvkm_memory_map(fifo->user.mem, 0, bar, &fifo->user.bar, NULL, 0);
return 0;
} }
static void static void
......
...@@ -841,8 +841,7 @@ gk104_fifo_oneinit(struct nvkm_fifo *base) ...@@ -841,8 +841,7 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
if (ret) if (ret)
return ret; return ret;
nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0); return nvkm_memory_map(fifo->user.mem, 0, bar, &fifo->user.bar, NULL, 0);
return 0;
} }
static void static void
......
...@@ -126,7 +126,11 @@ gf100_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base, ...@@ -126,7 +126,11 @@ gf100_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine) struct nvkm_engine *engine)
{ {
struct gf100_fifo_chan *chan = gf100_fifo_chan(base); struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
nvkm_gpuobj_unmap(&chan->engn[engine->subdev.index].vma); struct nvkm_vma *vma = &chan->engn[engine->subdev.index].vma;
if (vma->vm) {
nvkm_vm_unmap(vma);
nvkm_vm_put(vma);
}
nvkm_gpuobj_del(&chan->engn[engine->subdev.index].inst); nvkm_gpuobj_del(&chan->engn[engine->subdev.index].inst);
} }
...@@ -146,8 +150,13 @@ gf100_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base, ...@@ -146,8 +150,13 @@ gf100_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
if (ret) if (ret)
return ret; return ret;
return nvkm_gpuobj_map(chan->engn[engn].inst, chan->vm, ret = nvkm_vm_get(chan->vm, chan->engn[engn].inst->size, 12,
NV_MEM_ACCESS_RW, &chan->engn[engn].vma); NV_MEM_ACCESS_RW, &chan->engn[engn].vma);
if (ret)
return ret;
return nvkm_memory_map(chan->engn[engn].inst, 0, chan->vm,
&chan->engn[engn].vma, NULL, 0);
} }
static void static void
......
...@@ -138,7 +138,11 @@ gk104_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base, ...@@ -138,7 +138,11 @@ gk104_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine) struct nvkm_engine *engine)
{ {
struct gk104_fifo_chan *chan = gk104_fifo_chan(base); struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
nvkm_gpuobj_unmap(&chan->engn[engine->subdev.index].vma); struct nvkm_vma *vma = &chan->engn[engine->subdev.index].vma;
if (vma->vm) {
nvkm_vm_unmap(vma);
nvkm_vm_put(vma);
}
nvkm_gpuobj_del(&chan->engn[engine->subdev.index].inst); nvkm_gpuobj_del(&chan->engn[engine->subdev.index].inst);
} }
...@@ -158,8 +162,13 @@ gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base, ...@@ -158,8 +162,13 @@ gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
if (ret) if (ret)
return ret; return ret;
return nvkm_gpuobj_map(chan->engn[engn].inst, chan->vm, ret = nvkm_vm_get(chan->vm, chan->engn[engn].inst->size, 12,
NV_MEM_ACCESS_RW, &chan->engn[engn].vma); NV_MEM_ACCESS_RW, &chan->engn[engn].vma);
if (ret)
return ret;
return nvkm_memory_map(chan->engn[engn].inst, 0, chan->vm,
&chan->engn[engn].vma, NULL, 0);
} }
static void static void
......
...@@ -403,7 +403,10 @@ gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch, ...@@ -403,7 +403,10 @@ gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
if (ret) if (ret)
return ret; return ret;
nvkm_memory_map(chan->mmio, &chan->mmio_vma, 0); ret = nvkm_memory_map(chan->mmio, 0, fifoch->vm,
&chan->mmio_vma, NULL, 0);
if (ret)
return ret;
/* allocate buffers referenced by mmio list */ /* allocate buffers referenced by mmio list */
for (i = 0; data->size && i < ARRAY_SIZE(gr->mmio_data); i++) { for (i = 0; data->size && i < ARRAY_SIZE(gr->mmio_data); i++) {
...@@ -419,7 +422,11 @@ gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch, ...@@ -419,7 +422,11 @@ gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
if (ret) if (ret)
return ret; return ret;
nvkm_memory_map(chan->data[i].mem, &chan->data[i].vma, 0); ret = nvkm_memory_map(chan->data[i].mem, 0, fifoch->vm,
&chan->data[i].vma, NULL, 0);
if (ret)
return ret;
data++; data++;
} }
......
...@@ -272,12 +272,13 @@ gk20a_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data) ...@@ -272,12 +272,13 @@ gk20a_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
node->vaddr[offset / 4] = data; node->vaddr[offset / 4] = data;
} }
static void static int
gk20a_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset) gk20a_instobj_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
struct nvkm_vma *vma, void *argv, u32 argc)
{ {
struct gk20a_instobj *node = gk20a_instobj(memory); struct gk20a_instobj *node = gk20a_instobj(memory);
nvkm_vm_map_at(vma, 0, &node->mem);
nvkm_vm_map_at(vma, offset, &node->mem); return 0;
} }
static void * static void *
......
...@@ -159,7 +159,7 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm) ...@@ -159,7 +159,7 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
} }
if (ret == 0) if (ret == 0)
nvkm_memory_map(memory, &bar, 0); ret = nvkm_memory_map(memory, 0, vmm, &bar, NULL, 0);
mutex_lock(&subdev->mutex); mutex_lock(&subdev->mutex);
if (ret || iobj->bar.node) { if (ret || iobj->bar.node) {
/* We either failed, or another thread beat us. */ /* We either failed, or another thread beat us. */
...@@ -179,11 +179,13 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm) ...@@ -179,11 +179,13 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
} }
} }
static void static int
nv50_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset) nv50_instobj_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
struct nvkm_vma *vma, void *argv, u32 argc)
{ {
struct nv50_instobj *iobj = nv50_instobj(memory); struct nv50_instobj *iobj = nv50_instobj(memory);
nvkm_vm_map_at(vma, offset, iobj->mem); nvkm_vm_map_at(vma, offset, iobj->mem);
return 0;
} }
static void static void
......
...@@ -48,12 +48,16 @@ gm200_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob, ...@@ -48,12 +48,16 @@ gm200_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob,
return ret; return ret;
/* Map the HS firmware so the HS bootloader can see it */ /* Map the HS firmware so the HS bootloader can see it */
ret = nvkm_gpuobj_map(blob, gsb->vm, NV_MEM_ACCESS_RW, &vma); ret = nvkm_vm_get(gsb->vm, blob->size, 12, NV_MEM_ACCESS_RW, &vma);
if (ret) { if (ret) {
nvkm_falcon_put(falcon, subdev); nvkm_falcon_put(falcon, subdev);
return ret; return ret;
} }
ret = nvkm_memory_map(blob, 0, gsb->vm, &vma, NULL, 0);
if (ret)
goto end;
/* Reset and set the falcon up */ /* Reset and set the falcon up */
ret = nvkm_falcon_reset(falcon); ret = nvkm_falcon_reset(falcon);
if (ret) if (ret)
...@@ -91,7 +95,8 @@ gm200_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob, ...@@ -91,7 +95,8 @@ gm200_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob,
nvkm_mc_intr_mask(sb->subdev.device, falcon->owner->index, true); nvkm_mc_intr_mask(sb->subdev.device, falcon->owner->index, true);
/* We don't need the ACR firmware anymore */ /* We don't need the ACR firmware anymore */
nvkm_gpuobj_unmap(&vma); nvkm_vm_unmap(&vma);
nvkm_vm_put(&vma);
nvkm_falcon_put(falcon, subdev); nvkm_falcon_put(falcon, subdev);
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment