Commit 4d058fab authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/core/mm: have users explicitly define heap identifiers

Different sections of VRAM may have different properties (ie. can't be used
for compression/display, can't be mapped, etc).

We currently already support this, but it's a bit magic.  This change makes
it more obvious where we're allocating from.
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent 24e8375b
...@@ -30,7 +30,7 @@ nvkm_mm_initialised(struct nvkm_mm *mm) ...@@ -30,7 +30,7 @@ nvkm_mm_initialised(struct nvkm_mm *mm)
return mm->heap_nodes; return mm->heap_nodes;
} }
int nvkm_mm_init(struct nvkm_mm *, u32 offset, u32 length, u32 block); int nvkm_mm_init(struct nvkm_mm *, u8 heap, u32 offset, u32 length, u32 block);
int nvkm_mm_fini(struct nvkm_mm *); int nvkm_mm_fini(struct nvkm_mm *);
int nvkm_mm_head(struct nvkm_mm *, u8 heap, u8 type, u32 size_max, int nvkm_mm_head(struct nvkm_mm *, u8 heap, u8 type, u32 size_max,
u32 size_min, u32 align, struct nvkm_mm_node **); u32 size_min, u32 align, struct nvkm_mm_node **);
......
...@@ -123,6 +123,10 @@ struct nvkm_ram { ...@@ -123,6 +123,10 @@ struct nvkm_ram {
u64 size; u64 size;
#define NVKM_RAM_MM_SHIFT 12 #define NVKM_RAM_MM_SHIFT 12
#define NVKM_RAM_MM_ANY (NVKM_MM_HEAP_ANY + 0)
#define NVKM_RAM_MM_NORMAL (NVKM_MM_HEAP_ANY + 1)
#define NVKM_RAM_MM_NOMAP (NVKM_MM_HEAP_ANY + 2)
#define NVKM_RAM_MM_MIXED (NVKM_MM_HEAP_ANY + 3)
struct nvkm_mm vram; struct nvkm_mm vram;
struct nvkm_mm tags; struct nvkm_mm tags;
u64 stolen; u64 stolen;
......
...@@ -340,7 +340,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) ...@@ -340,7 +340,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
if (ret) if (ret)
goto done; goto done;
ret = nvkm_mm_init(&chan->heap, 0, PAGE_SIZE, 1); ret = nvkm_mm_init(&chan->heap, 0, 0, PAGE_SIZE, 1);
done: done:
if (ret) if (ret)
nouveau_abi16_chan_fini(abi16, chan); nouveau_abi16_chan_fini(abi16, chan);
......
...@@ -185,7 +185,7 @@ nvkm_gpuobj_ctor(struct nvkm_device *device, u32 size, int align, bool zero, ...@@ -185,7 +185,7 @@ nvkm_gpuobj_ctor(struct nvkm_device *device, u32 size, int align, bool zero,
gpuobj->size = nvkm_memory_size(gpuobj->memory); gpuobj->size = nvkm_memory_size(gpuobj->memory);
} }
return nvkm_mm_init(&gpuobj->heap, 0, gpuobj->size, 1); return nvkm_mm_init(&gpuobj->heap, 0, 0, gpuobj->size, 1);
} }
void void
......
...@@ -237,7 +237,7 @@ nvkm_mm_tail(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min, ...@@ -237,7 +237,7 @@ nvkm_mm_tail(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min,
} }
int int
nvkm_mm_init(struct nvkm_mm *mm, u32 offset, u32 length, u32 block) nvkm_mm_init(struct nvkm_mm *mm, u8 heap, u32 offset, u32 length, u32 block)
{ {
struct nvkm_mm_node *node, *prev; struct nvkm_mm_node *node, *prev;
u32 next; u32 next;
...@@ -274,7 +274,8 @@ nvkm_mm_init(struct nvkm_mm *mm, u32 offset, u32 length, u32 block) ...@@ -274,7 +274,8 @@ nvkm_mm_init(struct nvkm_mm *mm, u32 offset, u32 length, u32 block)
list_add_tail(&node->nl_entry, &mm->nodes); list_add_tail(&node->nl_entry, &mm->nodes);
list_add_tail(&node->fl_entry, &mm->free); list_add_tail(&node->fl_entry, &mm->free);
node->heap = ++mm->heap_nodes; node->heap = heap;
mm->heap_nodes++;
return 0; return 0;
} }
......
...@@ -136,7 +136,7 @@ nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev) ...@@ -136,7 +136,7 @@ nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
if (ret) if (ret)
goto free_domain; goto free_domain;
ret = nvkm_mm_init(&tdev->iommu.mm, 0, ret = nvkm_mm_init(&tdev->iommu.mm, 0, 0,
(1ULL << tdev->func->iommu_bit) >> (1ULL << tdev->func->iommu_bit) >>
tdev->iommu.pgshift, 1); tdev->iommu.pgshift, 1);
if (ret) if (ret)
......
...@@ -73,13 +73,14 @@ nvkm_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb, ...@@ -73,13 +73,14 @@ nvkm_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
ram->size = size; ram->size = size;
if (!nvkm_mm_initialised(&ram->vram)) { if (!nvkm_mm_initialised(&ram->vram)) {
ret = nvkm_mm_init(&ram->vram, 0, size >> NVKM_RAM_MM_SHIFT, 1); ret = nvkm_mm_init(&ram->vram, NVKM_RAM_MM_NORMAL, 0,
size >> NVKM_RAM_MM_SHIFT, 1);
if (ret) if (ret)
return ret; return ret;
} }
if (!nvkm_mm_initialised(&ram->tags)) { if (!nvkm_mm_initialised(&ram->tags)) {
ret = nvkm_mm_init(&ram->tags, 0, tags ? ++tags : 0, 1); ret = nvkm_mm_init(&ram->tags, 0, 0, tags ? ++tags : 0, 1);
if (ret) if (ret)
return ret; return ret;
......
...@@ -617,7 +617,8 @@ gf100_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb, ...@@ -617,7 +617,8 @@ gf100_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
*/ */
if (lower != total) { if (lower != total) {
/* The common memory amount is addressed normally. */ /* The common memory amount is addressed normally. */
ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT, ret = nvkm_mm_init(&ram->vram, NVKM_RAM_MM_NORMAL,
rsvd_head >> NVKM_RAM_MM_SHIFT,
(lower - rsvd_head) >> NVKM_RAM_MM_SHIFT, 1); (lower - rsvd_head) >> NVKM_RAM_MM_SHIFT, 1);
if (ret) if (ret)
return ret; return ret;
...@@ -625,13 +626,15 @@ gf100_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb, ...@@ -625,13 +626,15 @@ gf100_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
/* And the rest is much higher in the physical address /* And the rest is much higher in the physical address
* space, and may not be usable for certain operations. * space, and may not be usable for certain operations.
*/ */
ret = nvkm_mm_init(&ram->vram, ubase >> NVKM_RAM_MM_SHIFT, ret = nvkm_mm_init(&ram->vram, NVKM_RAM_MM_MIXED,
ubase >> NVKM_RAM_MM_SHIFT,
(usize - rsvd_tail) >> NVKM_RAM_MM_SHIFT, 1); (usize - rsvd_tail) >> NVKM_RAM_MM_SHIFT, 1);
if (ret) if (ret)
return ret; return ret;
} else { } else {
/* GPUs without mixed-memory are a lot nicer... */ /* GPUs without mixed-memory are a lot nicer... */
ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT, ret = nvkm_mm_init(&ram->vram, NVKM_RAM_MM_NORMAL,
rsvd_head >> NVKM_RAM_MM_SHIFT,
(total - rsvd_head - rsvd_tail) >> (total - rsvd_head - rsvd_tail) >>
NVKM_RAM_MM_SHIFT, 1); NVKM_RAM_MM_SHIFT, 1);
if (ret) if (ret)
......
...@@ -81,7 +81,8 @@ mcp77_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram) ...@@ -81,7 +81,8 @@ mcp77_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
ram->base.stolen = base; ram->base.stolen = base;
nvkm_mm_fini(&ram->base.vram); nvkm_mm_fini(&ram->base.vram);
return nvkm_mm_init(&ram->base.vram, rsvd_head >> NVKM_RAM_MM_SHIFT, return nvkm_mm_init(&ram->base.vram, NVKM_RAM_MM_NORMAL,
rsvd_head >> NVKM_RAM_MM_SHIFT,
(size - rsvd_head - rsvd_tail) >> (size - rsvd_head - rsvd_tail) >>
NVKM_RAM_MM_SHIFT, 1); NVKM_RAM_MM_SHIFT, 1);
} }
...@@ -669,7 +669,8 @@ nv50_ram_ctor(const struct nvkm_ram_func *func, ...@@ -669,7 +669,8 @@ nv50_ram_ctor(const struct nvkm_ram_func *func,
ram->ranks = (nvkm_rd32(device, 0x100200) & 0x4) ? 2 : 1; ram->ranks = (nvkm_rd32(device, 0x100200) & 0x4) ? 2 : 1;
nvkm_mm_fini(&ram->vram); nvkm_mm_fini(&ram->vram);
return nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT, return nvkm_mm_init(&ram->vram, NVKM_RAM_MM_NORMAL,
rsvd_head >> NVKM_RAM_MM_SHIFT,
(size - rsvd_head - rsvd_tail) >> NVKM_RAM_MM_SHIFT, (size - rsvd_head - rsvd_tail) >> NVKM_RAM_MM_SHIFT,
nv50_fb_vram_rblock(ram) >> NVKM_RAM_MM_SHIFT); nv50_fb_vram_rblock(ram) >> NVKM_RAM_MM_SHIFT);
} }
......
...@@ -165,7 +165,7 @@ nv04_instmem_oneinit(struct nvkm_instmem *base) ...@@ -165,7 +165,7 @@ nv04_instmem_oneinit(struct nvkm_instmem *base)
/* PRAMIN aperture maps over the end of VRAM, reserve it */ /* PRAMIN aperture maps over the end of VRAM, reserve it */
imem->base.reserved = 512 * 1024; imem->base.reserved = 512 * 1024;
ret = nvkm_mm_init(&imem->heap, 0, imem->base.reserved, 1); ret = nvkm_mm_init(&imem->heap, 0, 0, imem->base.reserved, 1);
if (ret) if (ret)
return ret; return ret;
......
...@@ -177,7 +177,7 @@ nv40_instmem_oneinit(struct nvkm_instmem *base) ...@@ -177,7 +177,7 @@ nv40_instmem_oneinit(struct nvkm_instmem *base)
imem->base.reserved += 512 * 1024; /* object storage */ imem->base.reserved += 512 * 1024; /* object storage */
imem->base.reserved = round_up(imem->base.reserved, 4096); imem->base.reserved = round_up(imem->base.reserved, 4096);
ret = nvkm_mm_init(&imem->heap, 0, imem->base.reserved, 1); ret = nvkm_mm_init(&imem->heap, 0, 0, imem->base.reserved, 1);
if (ret) if (ret)
return ret; return ret;
......
...@@ -183,8 +183,8 @@ gf100_ltc_oneinit_tag_ram(struct nvkm_ltc *ltc) ...@@ -183,8 +183,8 @@ gf100_ltc_oneinit_tag_ram(struct nvkm_ltc *ltc)
tag_size += tag_align; tag_size += tag_align;
tag_size = (tag_size + 0xfff) >> 12; /* round up */ tag_size = (tag_size + 0xfff) >> 12; /* round up */
ret = nvkm_mm_tail(&ram->vram, 1, 1, tag_size, tag_size, 1, ret = nvkm_mm_tail(&ram->vram, NVKM_RAM_MM_NORMAL, 1, tag_size,
&ltc->tag_ram); tag_size, 1, &ltc->tag_ram);
if (ret) { if (ret) {
ltc->num_tags = 0; ltc->num_tags = 0;
} else { } else {
...@@ -197,7 +197,7 @@ gf100_ltc_oneinit_tag_ram(struct nvkm_ltc *ltc) ...@@ -197,7 +197,7 @@ gf100_ltc_oneinit_tag_ram(struct nvkm_ltc *ltc)
} }
mm_init: mm_init:
return nvkm_mm_init(&ltc->tags, 0, ltc->num_tags, 1); return nvkm_mm_init(&ltc->tags, 0, 0, ltc->num_tags, 1);
} }
int int
......
...@@ -45,7 +45,7 @@ gp100_ltc_oneinit(struct nvkm_ltc *ltc) ...@@ -45,7 +45,7 @@ gp100_ltc_oneinit(struct nvkm_ltc *ltc)
ltc->ltc_nr = nvkm_rd32(device, 0x12006c); ltc->ltc_nr = nvkm_rd32(device, 0x12006c);
ltc->lts_nr = nvkm_rd32(device, 0x17e280) >> 28; ltc->lts_nr = nvkm_rd32(device, 0x17e280) >> 28;
/*XXX: tagram allocation - TBD */ /*XXX: tagram allocation - TBD */
return nvkm_mm_init(&ltc->tags, 0, 0, 1); return nvkm_mm_init(&ltc->tags, 0, 0, 0, 1);
} }
static void static void
......
...@@ -388,7 +388,7 @@ nvkm_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset, ...@@ -388,7 +388,7 @@ nvkm_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
return -ENOMEM; return -ENOMEM;
} }
ret = nvkm_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12, ret = nvkm_mm_init(&vm->mm, 0, mm_offset >> 12, mm_length >> 12,
block >> 12); block >> 12);
if (ret) { if (ret) {
vfree(vm->pgt); vfree(vm->pgt);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment