Commit 4acfd707 authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/dma: audit and version NV_DMA classes

The full object interfaces are about to be exposed to userspace, so we
need to check for any security-related issues and version the structs
to make it easier to handle any changes we may need in the future.
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent b2c81703
......@@ -23,9 +23,12 @@
*/
#include <core/object.h>
#include <core/class.h>
#include <core/client.h>
#include <nvif/unpack.h>
#include <nvif/class.h>
#include <subdev/fb.h>
#include <subdev/instmem.h>
#include "priv.h"
......@@ -57,57 +60,87 @@ nvkm_dmaobj_create_(struct nouveau_object *parent,
struct nouveau_oclass *oclass, void **pdata, u32 *psize,
int length, void **pobject)
{
struct nv_dma_class *args = *pdata;
union {
struct nv_dma_v0 v0;
} *args = *pdata;
struct nouveau_instmem *instmem = nouveau_instmem(parent);
struct nouveau_client *client = nouveau_client(parent);
struct nouveau_device *device = nv_device(parent);
struct nouveau_fb *pfb = nouveau_fb(parent);
struct nouveau_dmaobj *dmaobj;
void *data = *pdata;
u32 size = *psize;
int ret;
if (*psize < sizeof(*args))
return -EINVAL;
*pdata = &args->conf0;
ret = nouveau_object_create_(parent, engine, oclass, 0, length, pobject);
dmaobj = *pobject;
if (ret)
return ret;
switch (args->flags & NV_DMA_TARGET_MASK) {
case NV_DMA_TARGET_VM:
nv_ioctl(parent, "create dma size %d\n", *psize);
if (nvif_unpack(args->v0, 0, 0, true)) {
nv_ioctl(parent, "create dma vers %d target %d access %d "
"start %016llx limit %016llx\n",
args->v0.version, args->v0.target, args->v0.access,
args->v0.start, args->v0.limit);
dmaobj->target = args->v0.target;
dmaobj->access = args->v0.access;
dmaobj->start = args->v0.start;
dmaobj->limit = args->v0.limit;
} else
return ret;
*pdata = data;
*psize = size;
if (dmaobj->start > dmaobj->limit)
return -EINVAL;
switch (dmaobj->target) {
case NV_DMA_V0_TARGET_VM:
dmaobj->target = NV_MEM_TARGET_VM;
break;
case NV_DMA_TARGET_VRAM:
case NV_DMA_V0_TARGET_VRAM:
if (!client->super) {
if (dmaobj->limit >= pfb->ram->size - instmem->reserved)
return -EACCES;
if (device->card_type >= NV_50)
return -EACCES;
}
dmaobj->target = NV_MEM_TARGET_VRAM;
break;
case NV_DMA_TARGET_PCI:
case NV_DMA_V0_TARGET_PCI:
if (!client->super)
return -EACCES;
dmaobj->target = NV_MEM_TARGET_PCI;
break;
case NV_DMA_TARGET_PCI_US:
case NV_DMA_TARGET_AGP:
case NV_DMA_V0_TARGET_PCI_US:
case NV_DMA_V0_TARGET_AGP:
if (!client->super)
return -EACCES;
dmaobj->target = NV_MEM_TARGET_PCI_NOSNOOP;
break;
default:
return -EINVAL;
}
switch (args->flags & NV_DMA_ACCESS_MASK) {
case NV_DMA_ACCESS_VM:
switch (dmaobj->access) {
case NV_DMA_V0_ACCESS_VM:
dmaobj->access = NV_MEM_ACCESS_VM;
break;
case NV_DMA_ACCESS_RD:
case NV_DMA_V0_ACCESS_RD:
dmaobj->access = NV_MEM_ACCESS_RO;
break;
case NV_DMA_ACCESS_WR:
case NV_DMA_V0_ACCESS_WR:
dmaobj->access = NV_MEM_ACCESS_WO;
break;
case NV_DMA_ACCESS_RDWR:
case NV_DMA_V0_ACCESS_RDWR:
dmaobj->access = NV_MEM_ACCESS_RW;
break;
default:
return -EINVAL;
}
dmaobj->start = args->start;
dmaobj->limit = args->limit;
dmaobj->conf0 = args->conf0;
return ret;
}
......
......@@ -24,6 +24,7 @@
#include <core/gpuobj.h>
#include <core/class.h>
#include <nvif/class.h>
#include <subdev/fb.h>
#include <subdev/vm/nv04.h>
......@@ -94,7 +95,7 @@ nv04_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv);
*pobject = nv_object(priv);
if (ret)
if (ret || (ret = -ENOSYS, size))
return ret;
if (priv->base.target == NV_MEM_TARGET_VM) {
......@@ -145,9 +146,9 @@ nv04_dmaobj_ofuncs = {
static struct nouveau_oclass
nv04_dmaeng_sclass[] = {
{ NV_DMA_FROM_MEMORY_CLASS, &nv04_dmaobj_ofuncs },
{ NV_DMA_TO_MEMORY_CLASS, &nv04_dmaobj_ofuncs },
{ NV_DMA_IN_MEMORY_CLASS, &nv04_dmaobj_ofuncs },
{ NV_DMA_FROM_MEMORY, &nv04_dmaobj_ofuncs },
{ NV_DMA_TO_MEMORY, &nv04_dmaobj_ofuncs },
{ NV_DMA_IN_MEMORY, &nv04_dmaobj_ofuncs },
{}
};
......
......@@ -22,8 +22,11 @@
* Authors: Ben Skeggs
*/
#include <core/client.h>
#include <core/gpuobj.h>
#include <core/class.h>
#include <nvif/unpack.h>
#include <nvif/class.h>
#include <subdev/fb.h>
......@@ -90,10 +93,11 @@ nv50_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_object **pobject)
{
struct nouveau_dmaeng *dmaeng = (void *)engine;
struct nv50_dmaobj_priv *priv;
union {
u32 conf0;
struct nv50_dma_v0 v0;
} *args;
struct nv50_dmaobj_priv *priv;
u32 user, part, comp, kind;
int ret;
ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv);
......@@ -102,24 +106,36 @@ nv50_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
args = data;
if (!(args->conf0 & NV50_DMA_CONF0_ENABLE)) {
if (priv->base.target == NV_MEM_TARGET_VM) {
args->conf0 = NV50_DMA_CONF0_PRIV_VM;
args->conf0 |= NV50_DMA_CONF0_PART_VM;
args->conf0 |= NV50_DMA_CONF0_COMP_VM;
args->conf0 |= NV50_DMA_CONF0_TYPE_VM;
nv_ioctl(parent, "create nv50 dma size %d\n", size);
if (nvif_unpack(args->v0, 0, 0, false)) {
nv_ioctl(parent, "create nv50 dma vers %d priv %d part %d "
"comp %d kind %02x\n", args->v0.version,
args->v0.priv, args->v0.part, args->v0.comp,
args->v0.kind);
user = args->v0.priv;
part = args->v0.part;
comp = args->v0.comp;
kind = args->v0.kind;
} else
if (size == 0) {
if (priv->base.target != NV_MEM_TARGET_VM) {
user = NV50_DMA_V0_PRIV_US;
part = NV50_DMA_V0_PART_256;
comp = NV50_DMA_V0_COMP_NONE;
kind = NV50_DMA_V0_KIND_PITCH;
} else {
args->conf0 = NV50_DMA_CONF0_PRIV_US;
args->conf0 |= NV50_DMA_CONF0_PART_256;
args->conf0 |= NV50_DMA_CONF0_COMP_NONE;
args->conf0 |= NV50_DMA_CONF0_TYPE_LINEAR;
user = NV50_DMA_V0_PRIV_VM;
part = NV50_DMA_V0_PART_VM;
comp = NV50_DMA_V0_COMP_VM;
kind = NV50_DMA_V0_KIND_VM;
}
}
} else
return ret;
priv->flags0 |= (args->conf0 & NV50_DMA_CONF0_COMP) << 22;
priv->flags0 |= (args->conf0 & NV50_DMA_CONF0_TYPE) << 22;
priv->flags0 |= (args->conf0 & NV50_DMA_CONF0_PRIV);
priv->flags5 |= (args->conf0 & NV50_DMA_CONF0_PART);
if (user > 2 || part > 2 || comp > 3 || kind > 0x7f)
return -EINVAL;
priv->flags0 = (comp << 29) | (kind << 22) | (user << 20);
priv->flags5 = (part << 16);
switch (priv->base.target) {
case NV_MEM_TARGET_VM:
......@@ -165,9 +181,9 @@ nv50_dmaobj_ofuncs = {
static struct nouveau_oclass
nv50_dmaeng_sclass[] = {
{ NV_DMA_FROM_MEMORY_CLASS, &nv50_dmaobj_ofuncs },
{ NV_DMA_TO_MEMORY_CLASS, &nv50_dmaobj_ofuncs },
{ NV_DMA_IN_MEMORY_CLASS, &nv50_dmaobj_ofuncs },
{ NV_DMA_FROM_MEMORY, &nv50_dmaobj_ofuncs },
{ NV_DMA_TO_MEMORY, &nv50_dmaobj_ofuncs },
{ NV_DMA_IN_MEMORY, &nv50_dmaobj_ofuncs },
{}
};
......
......@@ -22,9 +22,12 @@
* Authors: Ben Skeggs
*/
#include <core/client.h>
#include <core/device.h>
#include <core/gpuobj.h>
#include <core/class.h>
#include <nvif/unpack.h>
#include <nvif/class.h>
#include <subdev/fb.h>
......@@ -76,10 +79,11 @@ nvc0_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_object **pobject)
{
struct nouveau_dmaeng *dmaeng = (void *)engine;
struct nvc0_dmaobj_priv *priv;
union {
u32 conf0;
struct gf100_dma_v0 v0;
} *args;
struct nvc0_dmaobj_priv *priv;
u32 kind, user, unkn;
int ret;
ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv);
......@@ -88,20 +92,31 @@ nvc0_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
args = data;
if (!(args->conf0 & NVC0_DMA_CONF0_ENABLE)) {
if (priv->base.target == NV_MEM_TARGET_VM) {
args->conf0 = NVC0_DMA_CONF0_PRIV_VM;
args->conf0 |= NVC0_DMA_CONF0_TYPE_VM;
nv_ioctl(parent, "create gf100 dma size %d\n", size);
if (nvif_unpack(args->v0, 0, 0, false)) {
nv_ioctl(parent, "create gf100 dma vers %d priv %d kind %02x\n",
args->v0.version, args->v0.priv, args->v0.kind);
kind = args->v0.kind;
user = args->v0.priv;
unkn = 0;
} else
if (size == 0) {
if (priv->base.target != NV_MEM_TARGET_VM) {
kind = GF100_DMA_V0_KIND_PITCH;
user = GF100_DMA_V0_PRIV_US;
unkn = 2;
} else {
args->conf0 = NVC0_DMA_CONF0_PRIV_US;
args->conf0 |= NVC0_DMA_CONF0_TYPE_LINEAR;
args->conf0 |= 0x00020000;
kind = GF100_DMA_V0_KIND_VM;
user = GF100_DMA_V0_PRIV_VM;
unkn = 0;
}
}
} else
return ret;
priv->flags0 |= (args->conf0 & NVC0_DMA_CONF0_TYPE) << 22;
priv->flags0 |= (args->conf0 & NVC0_DMA_CONF0_PRIV);
priv->flags5 |= (args->conf0 & NVC0_DMA_CONF0_UNKN);
if (user > 2)
return -EINVAL;
priv->flags0 |= (kind << 22) | (user << 20);
priv->flags5 |= (unkn << 16);
switch (priv->base.target) {
case NV_MEM_TARGET_VM:
......@@ -145,9 +160,9 @@ nvc0_dmaobj_ofuncs = {
static struct nouveau_oclass
nvc0_dmaeng_sclass[] = {
{ NV_DMA_FROM_MEMORY_CLASS, &nvc0_dmaobj_ofuncs },
{ NV_DMA_TO_MEMORY_CLASS, &nvc0_dmaobj_ofuncs },
{ NV_DMA_IN_MEMORY_CLASS, &nvc0_dmaobj_ofuncs },
{ NV_DMA_FROM_MEMORY, &nvc0_dmaobj_ofuncs },
{ NV_DMA_TO_MEMORY, &nvc0_dmaobj_ofuncs },
{ NV_DMA_IN_MEMORY, &nvc0_dmaobj_ofuncs },
{}
};
......
......@@ -22,9 +22,12 @@
* Authors: Ben Skeggs
*/
#include <core/client.h>
#include <core/device.h>
#include <core/gpuobj.h>
#include <core/class.h>
#include <nvif/unpack.h>
#include <nvif/class.h>
#include <subdev/fb.h>
......@@ -83,10 +86,11 @@ nvd0_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_object **pobject)
{
struct nouveau_dmaeng *dmaeng = (void *)engine;
struct nvd0_dmaobj_priv *priv;
union {
u32 conf0;
struct gf110_dma_v0 v0;
} *args;
struct nvd0_dmaobj_priv *priv;
u32 kind, page;
int ret;
ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv);
......@@ -95,18 +99,27 @@ nvd0_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
args = data;
if (!(args->conf0 & NVD0_DMA_CONF0_ENABLE)) {
if (priv->base.target == NV_MEM_TARGET_VM) {
args->conf0 |= NVD0_DMA_CONF0_TYPE_VM;
args->conf0 |= NVD0_DMA_CONF0_PAGE_LP;
nv_ioctl(parent, "create gf110 dma size %d\n", size);
if (nvif_unpack(args->v0, 0, 0, false)) {
nv_ioctl(parent, "create gf100 dma vers %d page %d kind %02x\n",
args->v0.version, args->v0.page, args->v0.kind);
kind = args->v0.kind;
page = args->v0.page;
} else
if (size == 0) {
if (priv->base.target != NV_MEM_TARGET_VM) {
kind = GF110_DMA_V0_KIND_PITCH;
page = GF110_DMA_V0_PAGE_SP;
} else {
args->conf0 |= NVD0_DMA_CONF0_TYPE_LINEAR;
args->conf0 |= NVD0_DMA_CONF0_PAGE_SP;
kind = GF110_DMA_V0_KIND_VM;
page = GF110_DMA_V0_PAGE_LP;
}
}
} else
return ret;
priv->flags0 |= (args->conf0 & NVD0_DMA_CONF0_TYPE) << 20;
priv->flags0 |= (args->conf0 & NVD0_DMA_CONF0_PAGE) >> 4;
if (page > 1)
return -EINVAL;
priv->flags0 = (kind << 20) | (page << 6);
switch (priv->base.target) {
case NV_MEM_TARGET_VRAM:
......@@ -138,9 +151,9 @@ nvd0_dmaobj_ofuncs = {
static struct nouveau_oclass
nvd0_dmaeng_sclass[] = {
{ NV_DMA_FROM_MEMORY_CLASS, &nvd0_dmaobj_ofuncs },
{ NV_DMA_TO_MEMORY_CLASS, &nvd0_dmaobj_ofuncs },
{ NV_DMA_IN_MEMORY_CLASS, &nvd0_dmaobj_ofuncs },
{ NV_DMA_FROM_MEMORY, &nvd0_dmaobj_ofuncs },
{ NV_DMA_TO_MEMORY, &nvd0_dmaobj_ofuncs },
{ NV_DMA_IN_MEMORY, &nvd0_dmaobj_ofuncs },
{}
};
......
......@@ -76,8 +76,8 @@ nouveau_fifo_channel_create_(struct nouveau_object *parent,
dmaeng = (void *)chan->pushdma->base.engine;
switch (chan->pushdma->base.oclass->handle) {
case NV_DMA_FROM_MEMORY_CLASS:
case NV_DMA_IN_MEMORY_CLASS:
case NV_DMA_FROM_MEMORY:
case NV_DMA_IN_MEMORY:
break;
default:
return -EINVAL;
......
......@@ -3,73 +3,6 @@
#include <nvif/class.h>
/* DMA object classes
*
* 0002: NV_DMA_FROM_MEMORY
* 0003: NV_DMA_TO_MEMORY
* 003d: NV_DMA_IN_MEMORY
*/
#define NV_DMA_FROM_MEMORY_CLASS 0x00000002
#define NV_DMA_TO_MEMORY_CLASS 0x00000003
#define NV_DMA_IN_MEMORY_CLASS 0x0000003d
#define NV_DMA_TARGET_MASK 0x000000ff
#define NV_DMA_TARGET_VM 0x00000000
#define NV_DMA_TARGET_VRAM 0x00000001
#define NV_DMA_TARGET_PCI 0x00000002
#define NV_DMA_TARGET_PCI_US 0x00000003
#define NV_DMA_TARGET_AGP 0x00000004
#define NV_DMA_ACCESS_MASK 0x00000f00
#define NV_DMA_ACCESS_VM 0x00000000
#define NV_DMA_ACCESS_RD 0x00000100
#define NV_DMA_ACCESS_WR 0x00000200
#define NV_DMA_ACCESS_RDWR 0x00000300
/* NV50:NVC0 */
#define NV50_DMA_CONF0_ENABLE 0x80000000
#define NV50_DMA_CONF0_PRIV 0x00300000
#define NV50_DMA_CONF0_PRIV_VM 0x00000000
#define NV50_DMA_CONF0_PRIV_US 0x00100000
#define NV50_DMA_CONF0_PRIV__S 0x00200000
#define NV50_DMA_CONF0_PART 0x00030000
#define NV50_DMA_CONF0_PART_VM 0x00000000
#define NV50_DMA_CONF0_PART_256 0x00010000
#define NV50_DMA_CONF0_PART_1KB 0x00020000
#define NV50_DMA_CONF0_COMP 0x00000180
#define NV50_DMA_CONF0_COMP_NONE 0x00000000
#define NV50_DMA_CONF0_COMP_VM 0x00000180
#define NV50_DMA_CONF0_TYPE 0x0000007f
#define NV50_DMA_CONF0_TYPE_LINEAR 0x00000000
#define NV50_DMA_CONF0_TYPE_VM 0x0000007f
/* NVC0:NVD9 */
#define NVC0_DMA_CONF0_ENABLE 0x80000000
#define NVC0_DMA_CONF0_PRIV 0x00300000
#define NVC0_DMA_CONF0_PRIV_VM 0x00000000
#define NVC0_DMA_CONF0_PRIV_US 0x00100000
#define NVC0_DMA_CONF0_PRIV__S 0x00200000
#define NVC0_DMA_CONF0_UNKN /* PART? */ 0x00030000
#define NVC0_DMA_CONF0_TYPE 0x000000ff
#define NVC0_DMA_CONF0_TYPE_LINEAR 0x00000000
#define NVC0_DMA_CONF0_TYPE_VM 0x000000ff
/* NVD9- */
#define NVD0_DMA_CONF0_ENABLE 0x80000000
#define NVD0_DMA_CONF0_PAGE 0x00000400
#define NVD0_DMA_CONF0_PAGE_LP 0x00000000
#define NVD0_DMA_CONF0_PAGE_SP 0x00000400
#define NVD0_DMA_CONF0_TYPE 0x000000ff
#define NVD0_DMA_CONF0_TYPE_LINEAR 0x00000000
#define NVD0_DMA_CONF0_TYPE_VM 0x000000ff
struct nv_dma_class {
u32 flags;
u32 pad0;
u64 start;
u64 limit;
u32 conf0;
};
/* Perfmon counter class
*
* XXXX: NV_PERFCTR
......
......@@ -12,7 +12,6 @@ struct nouveau_dmaobj {
u32 access;
u64 start;
u64 limit;
u32 conf0;
};
struct nouveau_dmaeng {
......
......@@ -413,7 +413,7 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
struct {
struct nvif_ioctl_v0 ioctl;
struct nvif_ioctl_new_v0 new;
struct nv_dma_class ctxdma;
struct nv_dma_v0 ctxdma;
} args = {
.ioctl.owner = NVIF_IOCTL_V0_OWNER_ANY,
.ioctl.type = NVIF_IOCTL_V0_NEW,
......@@ -423,7 +423,7 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
.ioctl.path[0] = NOUVEAU_ABI16_CHAN(info->channel),
.new.route = NVDRM_OBJECT_ABI16,
.new.handle = info->handle,
.new.oclass = NV_DMA_IN_MEMORY_CLASS,
.new.oclass = NV_DMA_IN_MEMORY,
};
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
......@@ -460,17 +460,20 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
args.ctxdma.start = ntfy->node->offset;
args.ctxdma.limit = ntfy->node->offset + ntfy->node->length - 1;
if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
args.ctxdma.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
args.ctxdma.target = NV_DMA_V0_TARGET_VM;
args.ctxdma.access = NV_DMA_V0_ACCESS_VM;
args.ctxdma.start += chan->ntfy_vma.offset;
args.ctxdma.limit += chan->ntfy_vma.offset;
} else
if (drm->agp.stat == ENABLED) {
args.ctxdma.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR;
args.ctxdma.target = NV_DMA_V0_TARGET_AGP;
args.ctxdma.access = NV_DMA_V0_ACCESS_RDWR;
args.ctxdma.start += drm->agp.base + chan->ntfy->bo.offset;
args.ctxdma.limit += drm->agp.base + chan->ntfy->bo.offset;
client->super = true;
} else {
args.ctxdma.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR;
args.ctxdma.target = NV_DMA_V0_TARGET_VM;
args.ctxdma.access = NV_DMA_V0_ACCESS_RDWR;
args.ctxdma.start += chan->ntfy->bo.offset;
args.ctxdma.limit += chan->ntfy->bo.offset;
}
......
......@@ -91,7 +91,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
struct nouveau_instmem *imem = nvkm_instmem(device);
struct nouveau_vmmgr *vmm = nvkm_vmmgr(device);
struct nouveau_fb *pfb = nvkm_fb(device);
struct nv_dma_class args = {};
struct nv_dma_v0 args = {};
struct nouveau_channel *chan;
u32 target;
int ret;
......@@ -135,7 +135,8 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
return ret;
}
args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_VM;
args.start = 0;
args.limit = cli->vm->vmm->limit - 1;
} else
......@@ -146,29 +147,33 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
* the framebuffer bar rather than direct vram access..
* nfi why this exists, it came from the -nv ddx.
*/
args.flags = NV_DMA_TARGET_PCI | NV_DMA_ACCESS_RDWR;
args.target = NV_DMA_V0_TARGET_PCI;
args.access = NV_DMA_V0_ACCESS_RDWR;
args.start = nv_device_resource_start(nvkm_device(device), 1);
args.limit = args.start + limit;
} else {
args.flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR;
args.target = NV_DMA_V0_TARGET_VRAM;
args.access = NV_DMA_V0_ACCESS_RDWR;
args.start = 0;
args.limit = limit;
}
} else {
if (chan->drm->agp.stat == ENABLED) {
args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR;
args.target = NV_DMA_V0_TARGET_AGP;
args.access = NV_DMA_V0_ACCESS_RDWR;
args.start = chan->drm->agp.base;
args.limit = chan->drm->agp.base +
chan->drm->agp.size - 1;
} else {
args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR;
args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_RDWR;
args.start = 0;
args.limit = vmm->limit - 1;
}
}
ret = nvif_object_init(nvif_object(device), NULL, NVDRM_PUSH |
(handle & 0xffff), NV_DMA_FROM_MEMORY_CLASS,
(handle & 0xffff), NV_DMA_FROM_MEMORY,
&args, sizeof(args), &chan->push.ctxdma);
if (ret) {
nouveau_channel_del(pchan);
......@@ -259,45 +264,50 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
struct nouveau_vmmgr *vmm = nvkm_vmmgr(device);
struct nouveau_fb *pfb = nvkm_fb(device);
struct nouveau_software_chan *swch;
struct nv_dma_class args = {};
struct nv_dma_v0 args = {};
int ret, i;
/* allocate dma objects to cover all allowed vram, and gart */
if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_VM;
args.start = 0;
args.limit = cli->vm->vmm->limit - 1;
} else {
args.flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR;
args.target = NV_DMA_V0_TARGET_VRAM;
args.access = NV_DMA_V0_ACCESS_RDWR;
args.start = 0;
args.limit = pfb->ram->size - imem->reserved - 1;
}
ret = nvif_object_init(chan->object, NULL, vram,
NV_DMA_IN_MEMORY_CLASS, &args,
NV_DMA_IN_MEMORY, &args,
sizeof(args), &chan->vram);
if (ret)
return ret;
if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_VM;
args.start = 0;
args.limit = cli->vm->vmm->limit - 1;
} else
if (chan->drm->agp.stat == ENABLED) {
args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR;
args.target = NV_DMA_V0_TARGET_AGP;
args.access = NV_DMA_V0_ACCESS_RDWR;
args.start = chan->drm->agp.base;
args.limit = chan->drm->agp.base +
chan->drm->agp.size - 1;
} else {
args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR;
args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_RDWR;
args.start = 0;
args.limit = vmm->limit - 1;
}
ret = nvif_object_init(chan->object, NULL, gart,
NV_DMA_IN_MEMORY_CLASS, &args,
NV_DMA_IN_MEMORY, &args,
sizeof(args), &chan->gart);
if (ret)
return ret;
......
......@@ -257,13 +257,13 @@ nouveau_accel_init(struct nouveau_drm *drm)
}
ret = nvif_object_init(drm->channel->object, NULL, NvNotify0,
NV_DMA_IN_MEMORY_CLASS,
&(struct nv_dma_class) {
.flags = NV_DMA_TARGET_VRAM |
NV_DMA_ACCESS_RDWR,
NV_DMA_IN_MEMORY,
&(struct nv_dma_v0) {
.target = NV_DMA_V0_TARGET_VRAM,
.access = NV_DMA_V0_ACCESS_RDWR,
.start = drm->notify->addr,
.limit = drm->notify->addr + 31
}, sizeof(struct nv_dma_class),
}, sizeof(struct nv_dma_v0),
&drm->ntfy);
if (ret) {
nouveau_accel_fini(drm);
......
......@@ -89,14 +89,13 @@ nv17_fence_context_new(struct nouveau_channel *chan)
fctx->base.read = nv10_fence_read;
fctx->base.sync = nv17_fence_sync;
ret = nvif_object_init(chan->object, NULL, NvSema,
NV_DMA_FROM_MEMORY_CLASS,
&(struct nv_dma_class) {
.flags = NV_DMA_TARGET_VRAM |
NV_DMA_ACCESS_RDWR,
ret = nvif_object_init(chan->object, NULL, NvSema, NV_DMA_FROM_MEMORY,
&(struct nv_dma_v0) {
.target = NV_DMA_V0_TARGET_VRAM,
.access = NV_DMA_V0_ACCESS_RDWR,
.start = start,
.limit = limit,
}, sizeof(struct nv_dma_class),
}, sizeof(struct nv_dma_v0),
&fctx->sema);
if (ret)
nv10_fence_context_del(chan);
......
......@@ -160,13 +160,13 @@ nv50_dmac_create(struct nvif_object *disp, u32 bclass, u8 head,
return -ENOMEM;
ret = nvif_object_init(nvif_object(nvif_device(disp)), NULL, handle,
NV_DMA_FROM_MEMORY_CLASS,
&(struct nv_dma_class) {
.flags = NV_DMA_TARGET_PCI_US |
NV_DMA_ACCESS_RD,
NV_DMA_FROM_MEMORY,
&(struct nv_dma_v0) {
.target = NV_DMA_V0_TARGET_PCI_US,
.access = NV_DMA_V0_ACCESS_RD,
.start = dmac->handle + 0x0000,
.limit = dmac->handle + 0x0fff,
}, sizeof(struct nv_dma_class), &pushbuf);
}, sizeof(struct nv_dma_v0), &pushbuf);
if (ret)
return ret;
......@@ -176,25 +176,25 @@ nv50_dmac_create(struct nvif_object *disp, u32 bclass, u8 head,
return ret;
ret = nvif_object_init(&dmac->base.user, NULL, 0xf0000000,
NV_DMA_IN_MEMORY_CLASS,
&(struct nv_dma_class) {
.flags = NV_DMA_TARGET_VRAM |
NV_DMA_ACCESS_RDWR,
NV_DMA_IN_MEMORY,
&(struct nv_dma_v0) {
.target = NV_DMA_V0_TARGET_VRAM,
.access = NV_DMA_V0_ACCESS_RDWR,
.start = syncbuf + 0x0000,
.limit = syncbuf + 0x0fff,
}, sizeof(struct nv_dma_class),
}, sizeof(struct nv_dma_v0),
&dmac->sync);
if (ret)
return ret;
ret = nvif_object_init(&dmac->base.user, NULL, 0xf0000001,
NV_DMA_IN_MEMORY_CLASS,
&(struct nv_dma_class) {
.flags = NV_DMA_TARGET_VRAM |
NV_DMA_ACCESS_RDWR,
NV_DMA_IN_MEMORY,
&(struct nv_dma_v0) {
.target = NV_DMA_V0_TARGET_VRAM,
.access = NV_DMA_V0_ACCESS_RDWR,
.start = 0,
.limit = pfb->ram->size - 1,
}, sizeof(struct nv_dma_class),
}, sizeof(struct nv_dma_v0),
&dmac->vram);
if (ret)
return ret;
......@@ -2073,9 +2073,17 @@ nv50_fbdma_init(struct drm_device *dev, u32 name, u64 offset, u64 length, u8 kin
struct nouveau_drm *drm = nouveau_drm(dev);
struct nv50_disp *disp = nv50_disp(dev);
struct nv50_mast *mast = nv50_mast(dev);
struct nv_dma_class args;
struct __attribute__ ((packed)) {
struct nv_dma_v0 base;
union {
struct nv50_dma_v0 nv50;
struct gf100_dma_v0 gf100;
struct gf110_dma_v0 gf110;
};
} args = {};
struct nv50_fbdma *fbdma;
struct drm_crtc *crtc;
u32 size = sizeof(args.base);
int ret;
list_for_each_entry(fbdma, &disp->fbdma, head) {
......@@ -2088,31 +2096,33 @@ nv50_fbdma_init(struct drm_device *dev, u32 name, u64 offset, u64 length, u8 kin
return -ENOMEM;
list_add(&fbdma->head, &disp->fbdma);
args.flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR;
args.start = offset;
args.limit = offset + length - 1;
args.conf0 = kind;
args.base.target = NV_DMA_V0_TARGET_VRAM;
args.base.access = NV_DMA_V0_ACCESS_RDWR;
args.base.start = offset;
args.base.limit = offset + length - 1;
if (drm->device.info.chipset < 0x80) {
args.conf0 = NV50_DMA_CONF0_ENABLE;
args.conf0 |= NV50_DMA_CONF0_PART_256;
args.nv50.part = NV50_DMA_V0_PART_256;
size += sizeof(args.nv50);
} else
if (drm->device.info.chipset < 0xc0) {
args.conf0 |= NV50_DMA_CONF0_ENABLE;
args.conf0 |= NV50_DMA_CONF0_PART_256;
args.nv50.part = NV50_DMA_V0_PART_256;
args.nv50.kind = kind;
size += sizeof(args.nv50);
} else
if (drm->device.info.chipset < 0xd0) {
args.conf0 |= NVC0_DMA_CONF0_ENABLE;
args.gf100.kind = kind;
size += sizeof(args.gf100);
} else {
args.conf0 |= NVD0_DMA_CONF0_ENABLE;
args.conf0 |= NVD0_DMA_CONF0_PAGE_LP;
args.gf110.page = GF110_DMA_V0_PAGE_LP;
args.gf110.kind = kind;
size += sizeof(args.gf110);
}
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nv50_head *head = nv50_head(crtc);
int ret = nvif_object_init(&head->sync.base.base.user, NULL,
name, NV_DMA_IN_MEMORY_CLASS,
&args, sizeof(args),
name, NV_DMA_IN_MEMORY, &args, size,
&fbdma->base[head->base.index]);
if (ret) {
nv50_fbdma_fini(fbdma);
......@@ -2121,7 +2131,7 @@ nv50_fbdma_init(struct drm_device *dev, u32 name, u64 offset, u64 length, u8 kin
}
ret = nvif_object_init(&mast->base.base.user, NULL, name,
NV_DMA_IN_MEMORY_CLASS, &args, sizeof(args),
NV_DMA_IN_MEMORY, &args, size,
&fbdma->core);
if (ret) {
nv50_fbdma_fini(fbdma);
......
......@@ -51,14 +51,13 @@ nv50_fence_context_new(struct nouveau_channel *chan)
fctx->base.read = nv10_fence_read;
fctx->base.sync = nv17_fence_sync;
ret = nvif_object_init(chan->object, NULL, NvSema,
NV_DMA_IN_MEMORY_CLASS,
&(struct nv_dma_class) {
.flags = NV_DMA_TARGET_VRAM |
NV_DMA_ACCESS_RDWR,
ret = nvif_object_init(chan->object, NULL, NvSema, NV_DMA_IN_MEMORY,
&(struct nv_dma_v0) {
.target = NV_DMA_V0_TARGET_VRAM,
.access = NV_DMA_V0_ACCESS_RDWR,
.start = start,
.limit = limit,
}, sizeof(struct nv_dma_class),
}, sizeof(struct nv_dma_v0),
&fctx->sema);
/* dma objects for display sync channel semaphore blocks */
......@@ -68,13 +67,12 @@ nv50_fence_context_new(struct nouveau_channel *chan)
u32 limit = start + bo->bo.mem.size - 1;
ret = nvif_object_init(chan->object, NULL, NvEvoSema0 + i,
NV_DMA_IN_MEMORY_CLASS,
&(struct nv_dma_class) {
.flags = NV_DMA_TARGET_VRAM |
NV_DMA_ACCESS_RDWR,
NV_DMA_IN_MEMORY, &(struct nv_dma_v0) {
.target = NV_DMA_V0_TARGET_VRAM,
.access = NV_DMA_V0_ACCESS_RDWR,
.start = start,
.limit = limit,
}, sizeof(struct nv_dma_class),
}, sizeof(struct nv_dma_v0),
&fctx->head[i]);
}
......
......@@ -8,6 +8,10 @@
/* the below match nvidia-assigned (either in hw, or sw) class numbers */
#define NV_DEVICE 0x00000080
#define NV_DMA_FROM_MEMORY 0x00000002
#define NV_DMA_TO_MEMORY 0x00000003
#define NV_DMA_IN_MEMORY 0x0000003d
/*******************************************************************************
* client
......@@ -79,4 +83,72 @@ struct nv_device_info_v0 {
__u64 ram_user;
};
/*******************************************************************************
* context dma
******************************************************************************/
struct nv_dma_v0 {
__u8 version;
#define NV_DMA_V0_TARGET_VM 0x00
#define NV_DMA_V0_TARGET_VRAM 0x01
#define NV_DMA_V0_TARGET_PCI 0x02
#define NV_DMA_V0_TARGET_PCI_US 0x03
#define NV_DMA_V0_TARGET_AGP 0x04
__u8 target;
#define NV_DMA_V0_ACCESS_VM 0x00
#define NV_DMA_V0_ACCESS_RD 0x01
#define NV_DMA_V0_ACCESS_WR 0x02
#define NV_DMA_V0_ACCESS_RDWR (NV_DMA_V0_ACCESS_RD | NV_DMA_V0_ACCESS_WR)
__u8 access;
__u8 pad03[5];
__u64 start;
__u64 limit;
/* ... chipset-specific class data */
};
struct nv50_dma_v0 {
__u8 version;
#define NV50_DMA_V0_PRIV_VM 0x00
#define NV50_DMA_V0_PRIV_US 0x01
#define NV50_DMA_V0_PRIV__S 0x02
__u8 priv;
#define NV50_DMA_V0_PART_VM 0x00
#define NV50_DMA_V0_PART_256 0x01
#define NV50_DMA_V0_PART_1KB 0x02
__u8 part;
#define NV50_DMA_V0_COMP_NONE 0x00
#define NV50_DMA_V0_COMP_1 0x01
#define NV50_DMA_V0_COMP_2 0x02
#define NV50_DMA_V0_COMP_VM 0x03
__u8 comp;
#define NV50_DMA_V0_KIND_PITCH 0x00
#define NV50_DMA_V0_KIND_VM 0x7f
__u8 kind;
__u8 pad05[3];
};
struct gf100_dma_v0 {
__u8 version;
#define GF100_DMA_V0_PRIV_VM 0x00
#define GF100_DMA_V0_PRIV_US 0x01
#define GF100_DMA_V0_PRIV__S 0x02
__u8 priv;
#define GF100_DMA_V0_KIND_PITCH 0x00
#define GF100_DMA_V0_KIND_VM 0xff
__u8 kind;
__u8 pad03[5];
};
struct gf110_dma_v0 {
__u8 version;
#define GF110_DMA_V0_PAGE_LP 0x00
#define GF110_DMA_V0_PAGE_SP 0x01
__u8 page;
#define GF110_DMA_V0_KIND_PITCH 0x00
#define GF110_DMA_V0_KIND_VM 0xff
__u8 kind;
__u8 pad03[5];
};
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment