Commit eea5cf0f authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/mmu: define user interfaces to mmu

Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent 68af607d
...@@ -14,6 +14,11 @@ ...@@ -14,6 +14,11 @@
#define NVIF_CLASS_SW_NV50 /* if0005.h */ -0x00000006 #define NVIF_CLASS_SW_NV50 /* if0005.h */ -0x00000006
#define NVIF_CLASS_SW_GF100 /* if0005.h */ -0x00000007 #define NVIF_CLASS_SW_GF100 /* if0005.h */ -0x00000007
#define NVIF_CLASS_MMU /* if0008.h */ 0x80000008
#define NVIF_CLASS_MMU_NV04 /* if0008.h */ 0x80000009
#define NVIF_CLASS_MMU_NV50 /* if0008.h */ 0x80005009
#define NVIF_CLASS_MMU_GF100 /* if0008.h */ 0x80009009
#define NVIF_CLASS_MEM /* if000a.h */ 0x8000000a #define NVIF_CLASS_MEM /* if000a.h */ 0x8000000a
#define NVIF_CLASS_MEM_NV04 /* if000b.h */ 0x8000000b #define NVIF_CLASS_MEM_NV04 /* if000b.h */ 0x8000000b
#define NVIF_CLASS_MEM_NV50 /* if500b.h */ 0x8000500b #define NVIF_CLASS_MEM_NV50 /* if500b.h */ 0x8000500b
......
#ifndef __NVIF_IF0008_H__
#define __NVIF_IF0008_H__
struct nvif_mmu_v0 {
__u8 version;
__u8 dmabits;
__u8 heap_nr;
__u8 type_nr;
__u16 kind_nr;
};
#define NVIF_MMU_V0_HEAP 0x00
#define NVIF_MMU_V0_TYPE 0x01
#define NVIF_MMU_V0_KIND 0x02
struct nvif_mmu_heap_v0 {
__u8 version;
__u8 index;
__u8 pad02[6];
__u64 size;
};
struct nvif_mmu_type_v0 {
__u8 version;
__u8 index;
__u8 heap;
__u8 vram;
__u8 host;
__u8 comp;
__u8 disp;
__u8 kind;
__u8 mappable;
__u8 coherent;
__u8 uncached;
};
struct nvif_mmu_kind_v0 {
__u8 version;
__u8 pad01[1];
__u16 count;
__u8 data[];
};
#endif
#ifndef __NVIF_MMU_H__
#define __NVIF_MMU_H__
#include <nvif/object.h>
struct nvif_mmu {
struct nvif_object object;
u8 dmabits;
u8 heap_nr;
u8 type_nr;
u16 kind_nr;
struct {
u64 size;
} *heap;
struct {
#define NVIF_MEM_VRAM 0x01
#define NVIF_MEM_HOST 0x02
#define NVIF_MEM_COMP 0x04
#define NVIF_MEM_DISP 0x08
#define NVIF_MEM_KIND 0x10
#define NVIF_MEM_MAPPABLE 0x20
#define NVIF_MEM_COHERENT 0x40
#define NVIF_MEM_UNCACHED 0x80
u8 type;
u8 heap;
} *type;
u8 *kind;
};
int nvif_mmu_init(struct nvif_object *, s32 oclass, struct nvif_mmu *);
void nvif_mmu_fini(struct nvif_mmu *);
static inline bool
nvif_mmu_kind_valid(struct nvif_mmu *mmu, u8 kind)
{
const u8 invalid = mmu->kind_nr - 1;
if (kind) {
if (kind >= mmu->kind_nr || mmu->kind[kind] == invalid)
return false;
}
return true;
}
static inline int
nvif_mmu_type(struct nvif_mmu *mmu, u8 mask)
{
int i;
for (i = 0; i < mmu->type_nr; i++) {
if ((mmu->type[i].type & mask) == mask)
return i;
}
return -EINVAL;
}
#endif
...@@ -134,6 +134,8 @@ struct nvkm_mmu { ...@@ -134,6 +134,8 @@ struct nvkm_mmu {
struct mutex mutex; struct mutex mutex;
struct list_head list; struct list_head list;
} ptc, ptp; } ptc, ptp;
struct nvkm_device_oclass user;
}; };
int nv04_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); int nv04_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
......
...@@ -2,4 +2,5 @@ nvif-y := nvif/object.o ...@@ -2,4 +2,5 @@ nvif-y := nvif/object.o
nvif-y += nvif/client.o nvif-y += nvif/client.o
nvif-y += nvif/device.o nvif-y += nvif/device.o
nvif-y += nvif/driver.o nvif-y += nvif/driver.o
nvif-y += nvif/mmu.o
nvif-y += nvif/notify.o nvif-y += nvif/notify.o
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <nvif/mmu.h>
#include <nvif/class.h>
#include <nvif/if0008.h>
void
nvif_mmu_fini(struct nvif_mmu *mmu)
{
kfree(mmu->kind);
kfree(mmu->type);
kfree(mmu->heap);
nvif_object_fini(&mmu->object);
}
int
nvif_mmu_init(struct nvif_object *parent, s32 oclass, struct nvif_mmu *mmu)
{
struct nvif_mmu_v0 args;
int ret, i;
args.version = 0;
mmu->heap = NULL;
mmu->type = NULL;
mmu->kind = NULL;
ret = nvif_object_init(parent, 0, oclass, &args, sizeof(args),
&mmu->object);
if (ret)
goto done;
mmu->dmabits = args.dmabits;
mmu->heap_nr = args.heap_nr;
mmu->type_nr = args.type_nr;
mmu->kind_nr = args.kind_nr;
mmu->heap = kmalloc(sizeof(*mmu->heap) * mmu->heap_nr, GFP_KERNEL);
mmu->type = kmalloc(sizeof(*mmu->type) * mmu->type_nr, GFP_KERNEL);
if (ret = -ENOMEM, !mmu->heap || !mmu->type)
goto done;
mmu->kind = kmalloc(sizeof(*mmu->kind) * mmu->kind_nr, GFP_KERNEL);
if (!mmu->kind && mmu->kind_nr)
goto done;
for (i = 0; i < mmu->heap_nr; i++) {
struct nvif_mmu_heap_v0 args = { .index = i };
ret = nvif_object_mthd(&mmu->object, NVIF_MMU_V0_HEAP,
&args, sizeof(args));
if (ret)
goto done;
mmu->heap[i].size = args.size;
}
for (i = 0; i < mmu->type_nr; i++) {
struct nvif_mmu_type_v0 args = { .index = i };
ret = nvif_object_mthd(&mmu->object, NVIF_MMU_V0_TYPE,
&args, sizeof(args));
if (ret)
goto done;
mmu->type[i].type = 0;
if (args.vram) mmu->type[i].type |= NVIF_MEM_VRAM;
if (args.host) mmu->type[i].type |= NVIF_MEM_HOST;
if (args.comp) mmu->type[i].type |= NVIF_MEM_COMP;
if (args.disp) mmu->type[i].type |= NVIF_MEM_DISP;
if (args.kind ) mmu->type[i].type |= NVIF_MEM_KIND;
if (args.mappable) mmu->type[i].type |= NVIF_MEM_MAPPABLE;
if (args.coherent) mmu->type[i].type |= NVIF_MEM_COHERENT;
if (args.uncached) mmu->type[i].type |= NVIF_MEM_UNCACHED;
mmu->type[i].heap = args.heap;
}
if (mmu->kind_nr) {
struct nvif_mmu_kind_v0 *kind;
u32 argc = sizeof(*kind) + sizeof(*kind->data) * mmu->kind_nr;
if (ret = -ENOMEM, !(kind = kmalloc(argc, GFP_KERNEL)))
goto done;
kind->version = 0;
kind->count = mmu->kind_nr;
ret = nvif_object_mthd(&mmu->object, NVIF_MMU_V0_KIND,
kind, argc);
if (ret == 0)
memcpy(mmu->kind, kind->data, kind->count);
kfree(kind);
}
done:
if (ret)
nvif_mmu_fini(mmu);
return ret;
}
...@@ -294,6 +294,11 @@ nvkm_udevice_child_get(struct nvkm_object *object, int index, ...@@ -294,6 +294,11 @@ nvkm_udevice_child_get(struct nvkm_object *object, int index,
if (!sclass) { if (!sclass) {
switch (index) { switch (index) {
case 0: sclass = &nvkm_control_oclass; break; case 0: sclass = &nvkm_control_oclass; break;
case 1:
if (!device->mmu)
return -EINVAL;
sclass = &device->mmu->user;
break;
default: default:
return -EINVAL; return -EINVAL;
} }
......
...@@ -29,3 +29,5 @@ nvkm-y += nvkm/subdev/mmu/vmmgm200.o ...@@ -29,3 +29,5 @@ nvkm-y += nvkm/subdev/mmu/vmmgm200.o
nvkm-y += nvkm/subdev/mmu/vmmgm20b.o nvkm-y += nvkm/subdev/mmu/vmmgm20b.o
nvkm-y += nvkm/subdev/mmu/vmmgp100.o nvkm-y += nvkm/subdev/mmu/vmmgp100.o
nvkm-y += nvkm/subdev/mmu/vmmgp10b.o nvkm-y += nvkm/subdev/mmu/vmmgp10b.o
nvkm-y += nvkm/subdev/mmu/ummu.o
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
* *
* Authors: Ben Skeggs * Authors: Ben Skeggs
*/ */
#include "priv.h" #include "ummu.h"
#include "vmm.h" #include "vmm.h"
#include <subdev/bar.h> #include <subdev/bar.h>
...@@ -615,6 +615,8 @@ nvkm_mmu_ctor(const struct nvkm_mmu_func *func, struct nvkm_device *device, ...@@ -615,6 +615,8 @@ nvkm_mmu_ctor(const struct nvkm_mmu_func *func, struct nvkm_device *device,
mmu->dma_bits = func->dma_bits; mmu->dma_bits = func->dma_bits;
mmu->lpg_shift = func->lpg_shift; mmu->lpg_shift = func->lpg_shift;
nvkm_mmu_ptc_init(mmu); nvkm_mmu_ptc_init(mmu);
mmu->user.ctor = nvkm_ummu_new;
mmu->user.base = func->mmu.user;
} }
int int
......
...@@ -29,6 +29,7 @@ g84_mmu = { ...@@ -29,6 +29,7 @@ g84_mmu = {
.limit = (1ULL << 40), .limit = (1ULL << 40),
.dma_bits = 40, .dma_bits = 40,
.lpg_shift = 16, .lpg_shift = 16,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_NV50}},
.mem = {{ -1, 0, NVIF_CLASS_MEM_NV50}, nv50_mem_new, nv50_mem_map }, .mem = {{ -1, 0, NVIF_CLASS_MEM_NV50}, nv50_mem_new, nv50_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_NV50}, nv50_vmm_new, false, 0x0200 }, .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV50}, nv50_vmm_new, false, 0x0200 },
.kind = nv50_mmu_kind, .kind = nv50_mmu_kind,
......
...@@ -77,6 +77,7 @@ gf100_mmu = { ...@@ -77,6 +77,7 @@ gf100_mmu = {
.limit = (1ULL << 40), .limit = (1ULL << 40),
.dma_bits = 40, .dma_bits = 40,
.lpg_shift = 17, .lpg_shift = 17,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map }, .mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gf100_vmm_new }, .vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gf100_vmm_new },
.kind = gf100_mmu_kind, .kind = gf100_mmu_kind,
......
...@@ -29,6 +29,7 @@ gk104_mmu = { ...@@ -29,6 +29,7 @@ gk104_mmu = {
.limit = (1ULL << 40), .limit = (1ULL << 40),
.dma_bits = 40, .dma_bits = 40,
.lpg_shift = 17, .lpg_shift = 17,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map }, .mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gk104_vmm_new }, .vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gk104_vmm_new },
.kind = gf100_mmu_kind, .kind = gf100_mmu_kind,
......
...@@ -29,6 +29,7 @@ gk20a_mmu = { ...@@ -29,6 +29,7 @@ gk20a_mmu = {
.limit = (1ULL << 40), .limit = (1ULL << 40),
.dma_bits = 40, .dma_bits = 40,
.lpg_shift = 17, .lpg_shift = 17,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map }, .mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gk20a_vmm_new }, .vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gk20a_vmm_new },
.kind = gf100_mmu_kind, .kind = gf100_mmu_kind,
......
...@@ -73,6 +73,7 @@ gm200_mmu = { ...@@ -73,6 +73,7 @@ gm200_mmu = {
.limit = (1ULL << 40), .limit = (1ULL << 40),
.dma_bits = 40, .dma_bits = 40,
.lpg_shift = 17, .lpg_shift = 17,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map }, .mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
.vmm = {{ -1, 0, NVIF_CLASS_VMM_GM200}, gm200_vmm_new }, .vmm = {{ -1, 0, NVIF_CLASS_VMM_GM200}, gm200_vmm_new },
.kind = gm200_mmu_kind, .kind = gm200_mmu_kind,
...@@ -84,6 +85,7 @@ gm200_mmu_fixed = { ...@@ -84,6 +85,7 @@ gm200_mmu_fixed = {
.limit = (1ULL << 40), .limit = (1ULL << 40),
.dma_bits = 40, .dma_bits = 40,
.lpg_shift = 17, .lpg_shift = 17,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map }, .mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_GM200}, gm200_vmm_new_fixed }, .vmm = {{ -1, -1, NVIF_CLASS_VMM_GM200}, gm200_vmm_new_fixed },
.kind = gm200_mmu_kind, .kind = gm200_mmu_kind,
......
...@@ -31,6 +31,7 @@ gm20b_mmu = { ...@@ -31,6 +31,7 @@ gm20b_mmu = {
.limit = (1ULL << 40), .limit = (1ULL << 40),
.dma_bits = 40, .dma_bits = 40,
.lpg_shift = 17, .lpg_shift = 17,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map }, .mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map },
.vmm = {{ -1, 0, NVIF_CLASS_VMM_GM200}, gm20b_vmm_new }, .vmm = {{ -1, 0, NVIF_CLASS_VMM_GM200}, gm20b_vmm_new },
.kind = gm200_mmu_kind, .kind = gm200_mmu_kind,
...@@ -42,6 +43,7 @@ gm20b_mmu_fixed = { ...@@ -42,6 +43,7 @@ gm20b_mmu_fixed = {
.limit = (1ULL << 40), .limit = (1ULL << 40),
.dma_bits = 40, .dma_bits = 40,
.lpg_shift = 17, .lpg_shift = 17,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map }, .mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_GM200}, gm20b_vmm_new_fixed }, .vmm = {{ -1, -1, NVIF_CLASS_VMM_GM200}, gm20b_vmm_new_fixed },
.kind = gm200_mmu_kind, .kind = gm200_mmu_kind,
......
...@@ -31,6 +31,7 @@ gp100_mmu = { ...@@ -31,6 +31,7 @@ gp100_mmu = {
.limit = (1ULL << 49), .limit = (1ULL << 49),
.dma_bits = 47, .dma_bits = 47,
.lpg_shift = 16, .lpg_shift = 16,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map }, .mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_GP100}, gp100_vmm_new }, .vmm = {{ -1, -1, NVIF_CLASS_VMM_GP100}, gp100_vmm_new },
.kind = gm200_mmu_kind, .kind = gm200_mmu_kind,
......
...@@ -31,6 +31,7 @@ gp10b_mmu = { ...@@ -31,6 +31,7 @@ gp10b_mmu = {
.limit = (1ULL << 49), .limit = (1ULL << 49),
.dma_bits = 47, .dma_bits = 47,
.lpg_shift = 16, .lpg_shift = 16,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map }, .mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_GP100}, gp10b_vmm_new }, .vmm = {{ -1, -1, NVIF_CLASS_VMM_GP100}, gp10b_vmm_new },
.kind = gm200_mmu_kind, .kind = gm200_mmu_kind,
......
...@@ -33,6 +33,7 @@ nv04_mmu = { ...@@ -33,6 +33,7 @@ nv04_mmu = {
.limit = NV04_PDMA_SIZE, .limit = NV04_PDMA_SIZE,
.dma_bits = 32, .dma_bits = 32,
.lpg_shift = 12, .lpg_shift = 12,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_NV04}},
.mem = {{ -1, -1, NVIF_CLASS_MEM_NV04}, nv04_mem_new, nv04_mem_map }, .mem = {{ -1, -1, NVIF_CLASS_MEM_NV04}, nv04_mem_new, nv04_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv04_vmm_new, true }, .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv04_vmm_new, true },
}; };
......
...@@ -45,6 +45,7 @@ nv41_mmu = { ...@@ -45,6 +45,7 @@ nv41_mmu = {
.limit = NV41_GART_SIZE, .limit = NV41_GART_SIZE,
.dma_bits = 39, .dma_bits = 39,
.lpg_shift = 12, .lpg_shift = 12,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_NV04}},
.mem = {{ -1, -1, NVIF_CLASS_MEM_NV04}, nv04_mem_new, nv04_mem_map }, .mem = {{ -1, -1, NVIF_CLASS_MEM_NV04}, nv04_mem_new, nv04_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv41_vmm_new, true }, .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv41_vmm_new, true },
}; };
......
...@@ -60,6 +60,7 @@ nv44_mmu = { ...@@ -60,6 +60,7 @@ nv44_mmu = {
.limit = NV44_GART_SIZE, .limit = NV44_GART_SIZE,
.dma_bits = 39, .dma_bits = 39,
.lpg_shift = 12, .lpg_shift = 12,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_NV04}},
.mem = {{ -1, -1, NVIF_CLASS_MEM_NV04}, nv04_mem_new, nv04_mem_map }, .mem = {{ -1, -1, NVIF_CLASS_MEM_NV04}, nv04_mem_new, nv04_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv44_vmm_new, true }, .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv44_vmm_new, true },
}; };
......
...@@ -65,6 +65,7 @@ nv50_mmu = { ...@@ -65,6 +65,7 @@ nv50_mmu = {
.limit = (1ULL << 40), .limit = (1ULL << 40),
.dma_bits = 40, .dma_bits = 40,
.lpg_shift = 16, .lpg_shift = 16,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_NV50}},
.mem = {{ -1, 0, NVIF_CLASS_MEM_NV50}, nv50_mem_new, nv50_mem_map }, .mem = {{ -1, 0, NVIF_CLASS_MEM_NV50}, nv50_mem_new, nv50_mem_map },
.vmm = {{ -1, -1, NVIF_CLASS_VMM_NV50}, nv50_vmm_new, false, 0x1400 }, .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV50}, nv50_vmm_new, false, 0x1400 },
.kind = nv50_mmu_kind, .kind = nv50_mmu_kind,
......
...@@ -15,6 +15,10 @@ struct nvkm_mmu_func { ...@@ -15,6 +15,10 @@ struct nvkm_mmu_func {
u8 dma_bits; u8 dma_bits;
u8 lpg_shift; u8 lpg_shift;
struct {
struct nvkm_sclass user;
} mmu;
struct { struct {
struct nvkm_sclass user; struct nvkm_sclass user;
int (*vram)(struct nvkm_mmu *, int type, u8 page, u64 size, int (*vram)(struct nvkm_mmu *, int type, u8 page, u64 size,
......
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "ummu.h"
#include <nvif/if0008.h>
#include <nvif/unpack.h>
static int
nvkm_ummu_heap(struct nvkm_ummu *ummu, void *argv, u32 argc)
{
struct nvkm_mmu *mmu = ummu->mmu;
union {
struct nvif_mmu_heap_v0 v0;
} *args = argv;
int ret = -ENOSYS;
u8 index;
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
if ((index = args->v0.index) >= mmu->heap_nr)
return -EINVAL;
args->v0.size = mmu->heap[index].size;
} else
return ret;
return 0;
}
static int
nvkm_ummu_type(struct nvkm_ummu *ummu, void *argv, u32 argc)
{
struct nvkm_mmu *mmu = ummu->mmu;
union {
struct nvif_mmu_type_v0 v0;
} *args = argv;
int ret = -ENOSYS;
u8 type, index;
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
if ((index = args->v0.index) >= mmu->type_nr)
return -EINVAL;
type = mmu->type[index].type;
args->v0.heap = mmu->type[index].heap;
args->v0.vram = !!(type & NVKM_MEM_VRAM);
args->v0.host = !!(type & NVKM_MEM_HOST);
args->v0.comp = !!(type & NVKM_MEM_COMP);
args->v0.disp = !!(type & NVKM_MEM_DISP);
args->v0.kind = !!(type & NVKM_MEM_KIND);
args->v0.mappable = !!(type & NVKM_MEM_MAPPABLE);
args->v0.coherent = !!(type & NVKM_MEM_COHERENT);
args->v0.uncached = !!(type & NVKM_MEM_UNCACHED);
} else
return ret;
return 0;
}
static int
nvkm_ummu_kind(struct nvkm_ummu *ummu, void *argv, u32 argc)
{
struct nvkm_mmu *mmu = ummu->mmu;
union {
struct nvif_mmu_kind_v0 v0;
} *args = argv;
const u8 *kind = NULL;
int ret = -ENOSYS, count = 0;
if (mmu->func->kind)
kind = mmu->func->kind(mmu, &count);
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) {
if (argc != args->v0.count * sizeof(*args->v0.data))
return -EINVAL;
if (args->v0.count > count)
return -EINVAL;
memcpy(args->v0.data, kind, args->v0.count);
} else
return ret;
return 0;
}
static int
nvkm_ummu_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
{
struct nvkm_ummu *ummu = nvkm_ummu(object);
switch (mthd) {
case NVIF_MMU_V0_HEAP: return nvkm_ummu_heap(ummu, argv, argc);
case NVIF_MMU_V0_TYPE: return nvkm_ummu_type(ummu, argv, argc);
case NVIF_MMU_V0_KIND: return nvkm_ummu_kind(ummu, argv, argc);
default:
break;
}
return -EINVAL;
}
static const struct nvkm_object_func
nvkm_ummu = {
.mthd = nvkm_ummu_mthd,
};
int
nvkm_ummu_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
void *argv, u32 argc, struct nvkm_object **pobject)
{
union {
struct nvif_mmu_v0 v0;
} *args = argv;
struct nvkm_mmu *mmu = device->mmu;
struct nvkm_ummu *ummu;
int ret = -ENOSYS, kinds = 0;
if (mmu->func->kind)
mmu->func->kind(mmu, &kinds);
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
args->v0.dmabits = mmu->dma_bits;
args->v0.heap_nr = mmu->heap_nr;
args->v0.type_nr = mmu->type_nr;
args->v0.kind_nr = kinds;
} else
return ret;
if (!(ummu = kzalloc(sizeof(*ummu), GFP_KERNEL)))
return -ENOMEM;
nvkm_object_ctor(&nvkm_ummu, oclass, &ummu->object);
ummu->mmu = mmu;
*pobject = &ummu->object;
return 0;
}
#ifndef __NVKM_UMMU_H__
#define __NVKM_UMMU_H__
#define nvkm_ummu(p) container_of((p), struct nvkm_ummu, object)
#include <core/object.h>
#include "priv.h"
struct nvkm_ummu {
struct nvkm_object object;
struct nvkm_mmu *mmu;
};
int nvkm_ummu_new(struct nvkm_device *, const struct nvkm_oclass *,
void *argv, u32 argc, struct nvkm_object **);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment