Commit 3863c9bc authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/instmem: completely new implementation, as a subdev module

v2 (Ben Skeggs):
- some fixes for 64KiB PAGE_SIZE
- fix porting issues in (currently unused) nv41/nv44 pciegart code
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent 8a9b889e
......@@ -9,6 +9,7 @@ ccflags-y += -I$(src)
nouveau-y := core/core/client.o
nouveau-y += core/core/engine.o
nouveau-y += core/core/enum.o
nouveau-y += core/core/gpuobj.o
nouveau-y += core/core/handle.o
nouveau-y += core/core/mm.o
nouveau-y += core/core/namedb.o
......@@ -19,6 +20,9 @@ nouveau-y += core/core/printk.o
nouveau-y += core/core/ramht.o
nouveau-y += core/core/subdev.o
nouveau-y += core/subdev/bar/base.o
nouveau-y += core/subdev/bar/nv50.o
nouveau-y += core/subdev/bar/nvc0.o
nouveau-y += core/subdev/bios/base.o
nouveau-y += core/subdev/bios/bit.o
nouveau-y += core/subdev/bios/conn.o
......@@ -66,10 +70,10 @@ nouveau-y += core/subdev/gpio/nvd0.o
nouveau-y += core/subdev/i2c/base.o
nouveau-y += core/subdev/i2c/aux.o
nouveau-y += core/subdev/i2c/bit.o
nouveau-y += core/subdev/instmem/base.o
nouveau-y += core/subdev/instmem/nv04.o
nouveau-y += core/subdev/instmem/nv40.o
nouveau-y += core/subdev/instmem/nv50.o
nouveau-y += core/subdev/instmem/nvc0.o
nouveau-y += core/subdev/ltcg/nvc0.o
nouveau-y += core/subdev/mc/base.o
nouveau-y += core/subdev/mc/nv04.o
......@@ -80,6 +84,9 @@ nouveau-y += core/subdev/mc/nvc0.o
nouveau-y += core/subdev/timer/base.o
nouveau-y += core/subdev/timer/nv04.o
nouveau-y += core/subdev/vm/base.o
nouveau-y += core/subdev/vm/nv04.o
nouveau-y += core/subdev/vm/nv41.o
nouveau-y += core/subdev/vm/nv44.o
nouveau-y += core/subdev/vm/nv50.o
nouveau-y += core/subdev/vm/nvc0.o
......
......@@ -86,7 +86,6 @@ nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle,
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
struct nouveau_ramht_entry *entry;
struct nouveau_gpuobj *ramht = chan->ramht->gpuobj;
unsigned long flags;
......@@ -104,21 +103,21 @@ nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle,
nouveau_gpuobj_ref(gpuobj, &entry->gpuobj);
if (dev_priv->card_type < NV_40) {
ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->pinst >> 4) |
ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->addr >> 4) |
(chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
(gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
} else
if (dev_priv->card_type < NV_50) {
ctx = (gpuobj->pinst >> 4) |
ctx = (gpuobj->addr >> 4) |
(chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
(gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
} else {
if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
ctx = (gpuobj->cinst << 10) |
ctx = (gpuobj->node->offset << 10) |
(chan->id << 28) |
chan->id; /* HASH_TAG */
} else {
ctx = (gpuobj->cinst >> 4) |
ctx = (gpuobj->node->offset >> 4) |
((gpuobj->engine <<
NV40_RAMHT_CONTEXT_ENGINE_SHIFT));
}
......@@ -137,7 +136,7 @@ nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle,
nv_wo32(ramht, co + 4, ctx);
spin_unlock_irqrestore(&chan->ramht->lock, flags);
instmem->flush(dev);
nvimem_flush(dev);
return 0;
}
NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n",
......@@ -184,8 +183,6 @@ static void
nouveau_ramht_remove_hash(struct nouveau_channel *chan, u32 handle)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
struct nouveau_gpuobj *ramht = chan->ramht->gpuobj;
unsigned long flags;
u32 co, ho;
......@@ -201,7 +198,7 @@ nouveau_ramht_remove_hash(struct nouveau_channel *chan, u32 handle)
chan->id, co, handle, nv_ro32(ramht, co + 4));
nv_wo32(ramht, co + 0, 0x00000000);
nv_wo32(ramht, co + 4, 0x00000000);
instmem->flush(dev);
nvimem_flush(dev);
goto out;
}
......
......@@ -25,7 +25,6 @@
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_util.h"
#include <subdev/vm.h>
#include <core/ramht.h>
/*XXX: This stub is currently used on NV98+ also, as soon as this becomes
......
......@@ -26,7 +26,6 @@
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_util.h"
#include <subdev/vm.h>
#include <core/ramht.h>
#include "fuc/nva3.fuc.h"
......@@ -38,7 +37,6 @@ static int
nva3_copy_context_new(struct nouveau_channel *chan, int engine)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *ramin = chan->ramin;
struct nouveau_gpuobj *ctx = NULL;
int ret;
......@@ -51,14 +49,14 @@ nva3_copy_context_new(struct nouveau_channel *chan, int engine)
return ret;
nv_wo32(ramin, 0xc0, 0x00190000);
nv_wo32(ramin, 0xc4, ctx->vinst + ctx->size - 1);
nv_wo32(ramin, 0xc8, ctx->vinst);
nv_wo32(ramin, 0xc4, ctx->addr + ctx->size - 1);
nv_wo32(ramin, 0xc8, ctx->addr);
nv_wo32(ramin, 0xcc, 0x00000000);
nv_wo32(ramin, 0xd0, 0x00000000);
nv_wo32(ramin, 0xd4, 0x00000000);
dev_priv->engine.instmem.flush(dev);
nvimem_flush(dev);
atomic_inc(&chan->vm->engref[engine]);
nvvm_engref(chan->vm, engine, 1);
chan->engctx[engine] = ctx;
return 0;
}
......@@ -84,7 +82,7 @@ nva3_copy_context_del(struct nouveau_channel *chan, int engine)
for (i = 0xc0; i <= 0xd4; i += 4)
nv_wo32(chan->ramin, i, 0x00000000);
atomic_dec(&chan->vm->engref[engine]);
nvvm_engref(chan->vm, engine, -1);
nouveau_gpuobj_ref(NULL, &ctx);
chan->engctx[engine] = ctx;
}
......
......@@ -26,7 +26,6 @@
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_util.h"
#include <subdev/vm.h>
#include <core/ramht.h>
#include "fuc/nvc0.fuc.h"
......@@ -49,7 +48,6 @@ nvc0_copy_context_new(struct nouveau_channel *chan, int engine)
struct nvc0_copy_engine *pcopy = nv_engine(chan->dev, engine);
struct nvc0_copy_chan *cctx;
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *ramin = chan->ramin;
int ret;
......@@ -62,14 +60,14 @@ nvc0_copy_context_new(struct nouveau_channel *chan, int engine)
if (ret)
return ret;
ret = nouveau_gpuobj_map_vm(cctx->mem, NV_MEM_ACCESS_RW, chan->vm,
ret = nouveau_gpuobj_map_vm(cctx->mem, chan->vm, NV_MEM_ACCESS_RW,
&cctx->vma);
if (ret)
return ret;
nv_wo32(ramin, pcopy->ctx + 0, lower_32_bits(cctx->vma.offset));
nv_wo32(ramin, pcopy->ctx + 4, upper_32_bits(cctx->vma.offset));
dev_priv->engine.instmem.flush(dev);
nvimem_flush(dev);
return 0;
}
......@@ -88,7 +86,7 @@ nvc0_copy_context_del(struct nouveau_channel *chan, int engine)
struct drm_device *dev = chan->dev;
u32 inst;
inst = (chan->ramin->vinst >> 12);
inst = (chan->ramin->addr >> 12);
inst |= 0x40000000;
/* disable fifo access */
......
......@@ -25,7 +25,6 @@
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_util.h"
#include <subdev/vm.h>
#include <core/ramht.h>
struct nv84_crypt_engine {
......@@ -36,7 +35,6 @@ static int
nv84_crypt_context_new(struct nouveau_channel *chan, int engine)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *ramin = chan->ramin;
struct nouveau_gpuobj *ctx;
int ret;
......@@ -49,14 +47,14 @@ nv84_crypt_context_new(struct nouveau_channel *chan, int engine)
return ret;
nv_wo32(ramin, 0xa0, 0x00190000);
nv_wo32(ramin, 0xa4, ctx->vinst + ctx->size - 1);
nv_wo32(ramin, 0xa8, ctx->vinst);
nv_wo32(ramin, 0xa4, ctx->addr + ctx->size - 1);
nv_wo32(ramin, 0xa8, ctx->addr);
nv_wo32(ramin, 0xac, 0);
nv_wo32(ramin, 0xb0, 0);
nv_wo32(ramin, 0xb4, 0);
dev_priv->engine.instmem.flush(dev);
nvimem_flush(dev);
atomic_inc(&chan->vm->engref[engine]);
nvvm_engref(chan->vm, engine, 1);
chan->engctx[engine] = ctx;
return 0;
}
......@@ -68,7 +66,7 @@ nv84_crypt_context_del(struct nouveau_channel *chan, int engine)
struct drm_device *dev = chan->dev;
u32 inst;
inst = (chan->ramin->vinst >> 12);
inst = (chan->ramin->addr >> 12);
inst |= 0x80000000;
/* mark context as invalid if still on the hardware, not
......@@ -84,7 +82,7 @@ nv84_crypt_context_del(struct nouveau_channel *chan, int engine)
nouveau_gpuobj_ref(NULL, &ctx);
atomic_dec(&chan->vm->engref[engine]);
nvvm_engref(chan->vm, engine, -1);
chan->engctx[engine] = NULL;
}
......@@ -93,7 +91,6 @@ nv84_crypt_object_new(struct nouveau_channel *chan, int engine,
u32 handle, u16 class)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *obj = NULL;
int ret;
......@@ -104,7 +101,7 @@ nv84_crypt_object_new(struct nouveau_channel *chan, int engine,
obj->class = class;
nv_wo32(obj, 0x00, class);
dev_priv->engine.instmem.flush(dev);
nvimem_flush(dev);
ret = nouveau_ramht_insert(chan, handle, obj);
nouveau_gpuobj_ref(NULL, &obj);
......
......@@ -26,7 +26,6 @@
#include "nouveau_drv.h"
#include "nouveau_util.h"
#include <subdev/vm.h>
#include <core/ramht.h>
#include "fuc/nv98.fuc.h"
......@@ -43,7 +42,6 @@ static int
nv98_crypt_context_new(struct nouveau_channel *chan, int engine)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv98_crypt_priv *priv = nv_engine(dev, engine);
struct nv98_crypt_chan *cctx;
int ret;
......@@ -52,7 +50,7 @@ nv98_crypt_context_new(struct nouveau_channel *chan, int engine)
if (!cctx)
return -ENOMEM;
atomic_inc(&chan->vm->engref[engine]);
nvvm_engref(chan->vm, engine, 1);
ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ZERO_FREE, &cctx->mem);
......@@ -60,12 +58,12 @@ nv98_crypt_context_new(struct nouveau_channel *chan, int engine)
goto error;
nv_wo32(chan->ramin, 0xa0, 0x00190000);
nv_wo32(chan->ramin, 0xa4, cctx->mem->vinst + cctx->mem->size - 1);
nv_wo32(chan->ramin, 0xa8, cctx->mem->vinst);
nv_wo32(chan->ramin, 0xa4, cctx->mem->addr + cctx->mem->size - 1);
nv_wo32(chan->ramin, 0xa8, cctx->mem->addr);
nv_wo32(chan->ramin, 0xac, 0x00000000);
nv_wo32(chan->ramin, 0xb0, 0x00000000);
nv_wo32(chan->ramin, 0xb4, 0x00000000);
dev_priv->engine.instmem.flush(dev);
nvimem_flush(dev);
error:
if (ret)
......@@ -84,7 +82,7 @@ nv98_crypt_context_del(struct nouveau_channel *chan, int engine)
nouveau_gpuobj_ref(NULL, &cctx->mem);
atomic_dec(&chan->vm->engref[engine]);
nvvm_engref(chan->vm, engine, -1);
chan->engctx[engine] = NULL;
kfree(cctx);
}
......
......@@ -32,8 +32,6 @@
#include <core/ramht.h>
#include "nouveau_software.h"
#include <core/subdev/instmem/nv04.h>
static struct ramfc_desc {
unsigned bits:6;
unsigned ctxs:5;
......@@ -120,7 +118,7 @@ nv04_fifo_context_new(struct nouveau_channel *chan, int engine)
/* initialise default fifo context */
nv_wo32(priv->ramfc, fctx->ramfc + 0x00, chan->pushbuf_base);
nv_wo32(priv->ramfc, fctx->ramfc + 0x04, chan->pushbuf_base);
nv_wo32(priv->ramfc, fctx->ramfc + 0x08, chan->pushbuf->pinst >> 4);
nv_wo32(priv->ramfc, fctx->ramfc + 0x08, chan->pushbuf->addr >> 4);
nv_wo32(priv->ramfc, fctx->ramfc + 0x10,
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
......@@ -203,9 +201,9 @@ nv04_fifo_init(struct drm_device *dev, int engine)
nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
((dev_priv->ramht->bits - 9) << 16) |
(dev_priv->ramht->gpuobj->pinst >> 8));
nv_wr32(dev, NV03_PFIFO_RAMRO, priv->ramro->pinst >> 8);
nv_wr32(dev, NV03_PFIFO_RAMFC, priv->ramfc->pinst >> 8);
(dev_priv->ramht->gpuobj->addr >> 8));
nv_wr32(dev, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
nv_wr32(dev, NV03_PFIFO_RAMFC, priv->ramfc->addr >> 8);
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
......@@ -486,15 +484,14 @@ int
nv04_fifo_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv04_instmem_priv *imem = dev_priv->engine.instmem.priv;
struct nv04_fifo_priv *priv;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
nouveau_gpuobj_ref(nvimem_ramro(dev), &priv->ramro);
nouveau_gpuobj_ref(nvimem_ramfc(dev), &priv->ramfc);
priv->base.base.destroy = nv04_fifo_destroy;
priv->base.base.init = nv04_fifo_init;
......
......@@ -31,8 +31,6 @@
#include "nouveau_util.h"
#include <core/ramht.h>
#include <core/subdev/instmem/nv04.h>
static struct ramfc_desc {
unsigned bits:6;
unsigned ctxs:5;
......@@ -91,7 +89,7 @@ nv10_fifo_context_new(struct nouveau_channel *chan, int engine)
/* initialise default fifo context */
nv_wo32(priv->ramfc, fctx->ramfc + 0x00, chan->pushbuf_base);
nv_wo32(priv->ramfc, fctx->ramfc + 0x04, chan->pushbuf_base);
nv_wo32(priv->ramfc, fctx->ramfc + 0x0c, chan->pushbuf->pinst >> 4);
nv_wo32(priv->ramfc, fctx->ramfc + 0x0c, chan->pushbuf->addr >> 4);
nv_wo32(priv->ramfc, fctx->ramfc + 0x14,
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
......@@ -115,15 +113,14 @@ int
nv10_fifo_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv04_instmem_priv *imem = dev_priv->engine.instmem.priv;
struct nv10_fifo_priv *priv;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
nouveau_gpuobj_ref(nvimem_ramro(dev), &priv->ramro);
nouveau_gpuobj_ref(nvimem_ramfc(dev), &priv->ramfc);
priv->base.base.destroy = nv04_fifo_destroy;
priv->base.base.init = nv04_fifo_init;
......
......@@ -31,8 +31,6 @@
#include "nouveau_util.h"
#include <core/ramht.h>
#include <core/subdev/instmem/nv04.h>
static struct ramfc_desc {
unsigned bits:6;
unsigned ctxs:5;
......@@ -96,7 +94,7 @@ nv17_fifo_context_new(struct nouveau_channel *chan, int engine)
/* initialise default fifo context */
nv_wo32(priv->ramfc, fctx->ramfc + 0x00, chan->pushbuf_base);
nv_wo32(priv->ramfc, fctx->ramfc + 0x04, chan->pushbuf_base);
nv_wo32(priv->ramfc, fctx->ramfc + 0x0c, chan->pushbuf->pinst >> 4);
nv_wo32(priv->ramfc, fctx->ramfc + 0x0c, chan->pushbuf->addr >> 4);
nv_wo32(priv->ramfc, fctx->ramfc + 0x14,
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
......@@ -131,10 +129,10 @@ nv17_fifo_init(struct drm_device *dev, int engine)
nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
((dev_priv->ramht->bits - 9) << 16) |
(dev_priv->ramht->gpuobj->pinst >> 8));
nv_wr32(dev, NV03_PFIFO_RAMRO, priv->ramro->pinst >> 8);
(dev_priv->ramht->gpuobj->addr >> 8));
nv_wr32(dev, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
nv_wr32(dev, NV03_PFIFO_RAMFC, 0x00010000 |
priv->ramfc->pinst >> 8);
priv->ramfc->addr >> 8);
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
......@@ -157,15 +155,14 @@ int
nv17_fifo_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv04_instmem_priv *imem = dev_priv->engine.instmem.priv;
struct nv17_fifo_priv *priv;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
nouveau_gpuobj_ref(nvimem_ramro(dev), &priv->ramro);
nouveau_gpuobj_ref(nvimem_ramfc(dev), &priv->ramfc);
priv->base.base.destroy = nv04_fifo_destroy;
priv->base.base.init = nv17_fifo_init;
......
......@@ -31,8 +31,6 @@
#include "nouveau_util.h"
#include <core/ramht.h>
#include <core/subdev/instmem/nv04.h>
static struct ramfc_desc {
unsigned bits:6;
unsigned ctxs:5;
......@@ -104,7 +102,7 @@ nv40_fifo_context_new(struct nouveau_channel *chan, int engine)
/* initialise default fifo context */
nv_wo32(priv->ramfc, fctx->ramfc + 0x00, chan->pushbuf_base);
nv_wo32(priv->ramfc, fctx->ramfc + 0x04, chan->pushbuf_base);
nv_wo32(priv->ramfc, fctx->ramfc + 0x0c, chan->pushbuf->pinst >> 4);
nv_wo32(priv->ramfc, fctx->ramfc + 0x0c, chan->pushbuf->addr >> 4);
nv_wo32(priv->ramfc, fctx->ramfc + 0x18, 0x30000000 |
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
......@@ -144,8 +142,8 @@ nv40_fifo_init(struct drm_device *dev, int engine)
nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
((dev_priv->ramht->bits - 9) << 16) |
(dev_priv->ramht->gpuobj->pinst >> 8));
nv_wr32(dev, NV03_PFIFO_RAMRO, priv->ramro->pinst >> 8);
(dev_priv->ramht->gpuobj->addr >> 8));
nv_wr32(dev, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
switch (dev_priv->chipset) {
case 0x47:
......@@ -163,7 +161,7 @@ nv40_fifo_init(struct drm_device *dev, int engine)
default:
nv_wr32(dev, 0x002230, 0x00000000);
nv_wr32(dev, 0x002220, ((nvfb_vram_size(dev) - 512 * 1024 +
priv->ramfc->pinst) >> 16) |
priv->ramfc->addr) >> 16) |
0x00030000);
break;
}
......@@ -189,15 +187,14 @@ int
nv40_fifo_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv04_instmem_priv *imem = dev_priv->engine.instmem.priv;
struct nv40_fifo_priv *priv;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
nouveau_gpuobj_ref(nvimem_ramro(dev), &priv->ramro);
nouveau_gpuobj_ref(nvimem_ramfc(dev), &priv->ramfc);
priv->base.base.destroy = nv04_fifo_destroy;
priv->base.base.init = nv40_fifo_init;
......
......@@ -29,7 +29,6 @@
#include "nouveau_drv.h"
#include <engine/fifo.h>
#include <core/ramht.h>
#include <subdev/vm.h>
struct nv50_fifo_priv {
struct nouveau_fifo_priv base;
......@@ -45,7 +44,6 @@ void
nv50_fifo_playlist_update(struct drm_device *dev)
{
struct nv50_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *cur;
int i, p;
......@@ -57,9 +55,9 @@ nv50_fifo_playlist_update(struct drm_device *dev)
nv_wo32(cur, p++ * 4, i);
}
dev_priv->engine.instmem.flush(dev);
nvimem_flush(dev);
nv_wr32(dev, 0x0032f4, cur->vinst >> 12);
nv_wr32(dev, 0x0032f4, cur->addr >> 12);
nv_wr32(dev, 0x0032ec, p);
nv_wr32(dev, 0x002500, 0x00000101);
}
......@@ -72,14 +70,14 @@ nv50_fifo_context_new(struct nouveau_channel *chan, int engine)
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
u64 ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4;
u64 instance = chan->ramin->vinst >> 12;
u64 instance = chan->ramin->addr >> 12;
unsigned long flags;
int ret = 0, i;
fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
if (!fctx)
return -ENOMEM;
atomic_inc(&chan->vm->engref[engine]);
nvvm_engref(chan->vm, engine, 1);
chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
NV50_USER(chan->id), PAGE_SIZE);
......@@ -93,7 +91,7 @@ nv50_fifo_context_new(struct nouveau_channel *chan, int engine)
nv_wo32(chan->ramin, 0x3c, 0x403f6078);
nv_wo32(chan->ramin, 0x40, 0x00000000);
nv_wo32(chan->ramin, 0x44, 0x01003fff);
nv_wo32(chan->ramin, 0x48, chan->pushbuf->cinst >> 4);
nv_wo32(chan->ramin, 0x48, chan->pushbuf->node->offset >> 4);
nv_wo32(chan->ramin, 0x50, lower_32_bits(ib_offset));
nv_wo32(chan->ramin, 0x54, upper_32_bits(ib_offset) |
drm_order(chan->dma.ib_max + 1) << 16);
......@@ -102,9 +100,9 @@ nv50_fifo_context_new(struct nouveau_channel *chan, int engine)
nv_wo32(chan->ramin, 0x7c, 0x30000001);
nv_wo32(chan->ramin, 0x80, ((chan->ramht->bits - 9) << 27) |
(4 << 24) /* SEARCH_FULL */ |
(chan->ramht->gpuobj->cinst >> 4));
(chan->ramht->gpuobj->node->offset >> 4));
dev_priv->engine.instmem.flush(dev);
nvimem_flush(dev);
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
nv_wr32(dev, 0x002600 + (chan->id * 4), 0x80000000 | instance);
......@@ -141,7 +139,7 @@ nv50_fifo_kickoff(struct nouveau_channel *chan)
me = nv_mask(dev, 0x00b860, 0x00000001, 0x00000001);
/* do the kickoff... */
nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12);
nv_wr32(dev, 0x0032fc, chan->ramin->addr >> 12);
if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) {
NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id);
done = false;
......@@ -177,7 +175,7 @@ nv50_fifo_context_del(struct nouveau_channel *chan, int engine)
chan->user = NULL;
}
atomic_dec(&chan->vm->engref[engine]);
nvvm_engref(chan->vm, engine, -1);
chan->engctx[engine] = NULL;
kfree(fctx);
}
......@@ -200,7 +198,7 @@ nv50_fifo_init(struct drm_device *dev, int engine)
for (i = 0; i < 128; i++) {
struct nouveau_channel *chan = dev_priv->channels.ptr[i];
if (chan && chan->engctx[engine])
instance = 0x80000000 | chan->ramin->vinst >> 12;
instance = 0x80000000 | chan->ramin->addr >> 12;
else
instance = 0x00000000;
nv_wr32(dev, 0x002600 + (i * 4), instance);
......
......@@ -29,7 +29,6 @@
#include "nouveau_drv.h"
#include <engine/fifo.h>
#include <core/ramht.h>
#include <subdev/vm.h>
struct nv84_fifo_priv {
struct nouveau_fifo_priv base;
......@@ -58,7 +57,7 @@ nv84_fifo_context_new(struct nouveau_channel *chan, int engine)
fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
if (!fctx)
return -ENOMEM;
atomic_inc(&chan->vm->engref[engine]);
nvvm_engref(chan->vm, engine, 1);
chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
NV50_USER(chan->id), PAGE_SIZE);
......@@ -72,7 +71,7 @@ nv84_fifo_context_new(struct nouveau_channel *chan, int engine)
if (ret)
goto error;
instance = fctx->ramfc->vinst >> 8;
instance = fctx->ramfc->addr >> 8;
ret = nouveau_gpuobj_new(dev, chan, 4096, 1024, 0, &fctx->cache);
if (ret)
......@@ -81,7 +80,7 @@ nv84_fifo_context_new(struct nouveau_channel *chan, int engine)
nv_wo32(fctx->ramfc, 0x3c, 0x403f6078);
nv_wo32(fctx->ramfc, 0x40, 0x00000000);
nv_wo32(fctx->ramfc, 0x44, 0x01003fff);
nv_wo32(fctx->ramfc, 0x48, chan->pushbuf->cinst >> 4);
nv_wo32(fctx->ramfc, 0x48, chan->pushbuf->node->offset >> 4);
nv_wo32(fctx->ramfc, 0x50, lower_32_bits(ib_offset));
nv_wo32(fctx->ramfc, 0x54, upper_32_bits(ib_offset) |
drm_order(chan->dma.ib_max + 1) << 16);
......@@ -90,14 +89,14 @@ nv84_fifo_context_new(struct nouveau_channel *chan, int engine)
nv_wo32(fctx->ramfc, 0x7c, 0x30000001);
nv_wo32(fctx->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
(4 << 24) /* SEARCH_FULL */ |
(chan->ramht->gpuobj->cinst >> 4));
nv_wo32(fctx->ramfc, 0x88, fctx->cache->vinst >> 10);
nv_wo32(fctx->ramfc, 0x98, chan->ramin->vinst >> 12);
(chan->ramht->gpuobj->node->offset >> 4));
nv_wo32(fctx->ramfc, 0x88, fctx->cache->addr >> 10);
nv_wo32(fctx->ramfc, 0x98, chan->ramin->addr >> 12);
nv_wo32(chan->ramin, 0x00, chan->id);
nv_wo32(chan->ramin, 0x04, fctx->ramfc->vinst >> 8);
nv_wo32(chan->ramin, 0x04, fctx->ramfc->addr >> 8);
dev_priv->engine.instmem.flush(dev);
nvimem_flush(dev);
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
nv_wr32(dev, 0x002600 + (chan->id * 4), 0x80000000 | instance);
......@@ -127,7 +126,7 @@ nv84_fifo_context_del(struct nouveau_channel *chan, int engine)
save = nv_mask(dev, 0x002520, 0x0000003f, 0x15);
/* tell any engines on this channel to unload their contexts */
nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12);
nv_wr32(dev, 0x0032fc, chan->ramin->addr >> 12);
if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff))
NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id);
......@@ -145,7 +144,7 @@ nv84_fifo_context_del(struct nouveau_channel *chan, int engine)
nouveau_gpuobj_ref(NULL, &fctx->ramfc);
nouveau_gpuobj_ref(NULL, &fctx->cache);
atomic_dec(&chan->vm->engref[engine]);
nvvm_engref(chan->vm, engine, -1);
chan->engctx[engine] = NULL;
kfree(fctx);
}
......@@ -169,7 +168,7 @@ nv84_fifo_init(struct drm_device *dev, int engine)
for (i = 0; i < 128; i++) {
struct nouveau_channel *chan = dev_priv->channels.ptr[i];
if (chan && (fctx = chan->engctx[engine]))
instance = 0x80000000 | fctx->ramfc->vinst >> 8;
instance = 0x80000000 | fctx->ramfc->addr >> 8;
else
instance = 0x00000000;
nv_wr32(dev, 0x002600 + (i * 4), instance);
......@@ -200,7 +199,7 @@ nv84_fifo_fini(struct drm_device *dev, int engine, bool suspend)
for (i = 0; i < priv->base.channels; i++) {
struct nouveau_channel *chan = dev_priv->channels.ptr[i];
if (chan)
nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12);
nv_wr32(dev, 0x0032fc, chan->ramin->addr >> 12);
if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) {
NV_INFO(dev, "PFIFO: channel %d unload timeout\n", i);
return -EBUSY;
......
......@@ -48,8 +48,6 @@ struct nvc0_fifo_chan {
static void
nvc0_fifo_playlist_update(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct nouveau_gpuobj *cur;
int i, p;
......@@ -64,9 +62,9 @@ nvc0_fifo_playlist_update(struct drm_device *dev)
nv_wo32(cur, p + 4, 0x00000004);
p += 8;
}
pinstmem->flush(dev);
nvimem_flush(dev);
nv_wr32(dev, 0x002270, cur->vinst >> 12);
nv_wr32(dev, 0x002270, cur->addr >> 12);
nv_wr32(dev, 0x002274, 0x01f00000 | (p >> 3));
if (!nv_wait(dev, 0x00227c, 0x00100000, 0x00000000))
NV_ERROR(dev, "PFIFO - playlist update failed\n");
......@@ -76,11 +74,9 @@ static int
nvc0_fifo_context_new(struct nouveau_channel *chan, int engine)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
struct nvc0_fifo_priv *priv = nv_engine(dev, engine);
struct nvc0_fifo_chan *fctx;
u64 usermem = priv->user.mem->vinst + chan->id * 0x1000;
u64 usermem = priv->user.mem->addr + chan->id * 0x1000;
u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
int ret, i;
......@@ -115,10 +111,10 @@ nvc0_fifo_context_new(struct nouveau_channel *chan, int engine)
nv_wo32(chan->ramin, 0xb8, 0xf8000000);
nv_wo32(chan->ramin, 0xf8, 0x10003080); /* 0x002310 */
nv_wo32(chan->ramin, 0xfc, 0x10000010); /* 0x002350 */
pinstmem->flush(dev);
nvimem_flush(dev);
nv_wr32(dev, 0x003000 + (chan->id * 8), 0xc0000000 |
(chan->ramin->vinst >> 12));
(chan->ramin->addr >> 12));
nv_wr32(dev, 0x003004 + (chan->id * 8), 0x001f0001);
nvc0_fifo_playlist_update(dev);
......@@ -198,7 +194,7 @@ nvc0_fifo_init(struct drm_device *dev, int engine)
continue;
nv_wr32(dev, 0x003000 + (i * 8), 0xc0000000 |
(chan->ramin->vinst >> 12));
(chan->ramin->addr >> 12));
nv_wr32(dev, 0x003004 + (i * 8), 0x001f0001);
}
nvc0_fifo_playlist_update(dev);
......
......@@ -55,8 +55,6 @@ struct nve0_fifo_chan {
static void
nve0_fifo_playlist_update(struct drm_device *dev, u32 engine)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
struct nve0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct nve0_fifo_engine *peng = &priv->engine[engine];
struct nouveau_gpuobj *cur;
......@@ -84,9 +82,9 @@ nve0_fifo_playlist_update(struct drm_device *dev, u32 engine)
nv_wo32(cur, p + 4, 0x00000000);
p += 8;
}
pinstmem->flush(dev);
nvimem_flush(dev);
nv_wr32(dev, 0x002270, cur->vinst >> 12);
nv_wr32(dev, 0x002270, cur->addr >> 12);
nv_wr32(dev, 0x002274, (engine << 20) | (p >> 3));
if (!nv_wait(dev, 0x002284 + (engine * 4), 0x00100000, 0x00000000))
NV_ERROR(dev, "PFIFO: playlist %d update timeout\n", engine);
......@@ -96,11 +94,9 @@ static int
nve0_fifo_context_new(struct nouveau_channel *chan, int engine)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
struct nve0_fifo_priv *priv = nv_engine(dev, engine);
struct nve0_fifo_chan *fctx;
u64 usermem = priv->user.mem->vinst + chan->id * 512;
u64 usermem = priv->user.mem->addr + chan->id * 512;
u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
int ret = 0, i;
......@@ -135,10 +131,10 @@ nve0_fifo_context_new(struct nouveau_channel *chan, int engine)
nv_wo32(chan->ramin, 0xe8, chan->id);
nv_wo32(chan->ramin, 0xf8, 0x10003080); /* 0x002310 */
nv_wo32(chan->ramin, 0xfc, 0x10000010); /* 0x002350 */
pinstmem->flush(dev);
nvimem_flush(dev);
nv_wr32(dev, 0x800000 + (chan->id * 8), 0x80000000 |
(chan->ramin->vinst >> 12));
(chan->ramin->addr >> 12));
nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
nve0_fifo_playlist_update(dev, fctx->engine);
nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
......@@ -207,7 +203,7 @@ nve0_fifo_init(struct drm_device *dev, int engine)
continue;
nv_wr32(dev, 0x800000 + (i * 8), 0x80000000 |
(chan->ramin->vinst >> 12));
(chan->ramin->addr >> 12));
nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400);
nve0_fifo_playlist_update(dev, fctx->engine);
nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400);
......
......@@ -52,7 +52,7 @@ nv20_graph_unload_context(struct drm_device *dev)
return 0;
grctx = chan->engctx[NVOBJ_ENGINE_GR];
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, grctx->pinst >> 4);
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, grctx->addr >> 4);
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER,
NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE);
......@@ -437,7 +437,7 @@ nv20_graph_context_new(struct nouveau_channel *chan, int engine)
/* CTX_USER */
nv_wo32(grctx, pgraph->grctx_user, (chan->id << 24) | 0x1);
nv_wo32(pgraph->ctxtab, chan->id * 4, grctx->pinst >> 4);
nv_wo32(pgraph->ctxtab, chan->id * 4, grctx->addr >> 4);
chan->engctx[engine] = grctx;
return 0;
}
......@@ -505,7 +505,7 @@ nv20_graph_init(struct drm_device *dev, int engine)
nv_wr32(dev, NV03_PMC_ENABLE,
nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH);
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, pgraph->ctxtab->pinst >> 4);
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, pgraph->ctxtab->addr >> 4);
nv20_graph_rdi(dev);
......@@ -592,7 +592,7 @@ nv30_graph_init(struct drm_device *dev, int engine)
nv_wr32(dev, NV03_PMC_ENABLE,
nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH);
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, pgraph->ctxtab->pinst >> 4);
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, pgraph->ctxtab->addr >> 4);
nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
......
......@@ -52,16 +52,16 @@ nv40_graph_context_new(struct nouveau_channel *chan, int engine)
/* Initialise default context values */
nv40_grctx_fill(dev, grctx);
nv_wo32(grctx, 0, grctx->vinst);
nv_wo32(grctx, 0, grctx->addr);
/* init grctx pointer in ramfc, and on PFIFO if channel is
* already active there
*/
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
nv_wo32(chan->ramfc, 0x38, grctx->vinst >> 4);
nv_wo32(chan->ramfc, 0x38, grctx->addr >> 4);
nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
if ((nv_rd32(dev, 0x003204) & 0x0000001f) == chan->id)
nv_wr32(dev, 0x0032e0, grctx->vinst >> 4);
nv_wr32(dev, 0x0032e0, grctx->addr >> 4);
nv_mask(dev, 0x002500, 0x00000001, 0x00000001);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
......@@ -75,7 +75,7 @@ nv40_graph_context_del(struct nouveau_channel *chan, int engine)
struct nouveau_gpuobj *grctx = chan->engctx[engine];
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
u32 inst = 0x01000000 | (grctx->pinst >> 4);
u32 inst = 0x01000000 | (grctx->addr >> 4);
unsigned long flags;
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
......@@ -357,7 +357,7 @@ nv40_graph_isr_chid(struct drm_device *dev, u32 inst)
continue;
grctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_GR];
if (grctx && grctx->pinst == inst)
if (grctx && grctx->addr == inst)
break;
}
spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
......
......@@ -30,7 +30,6 @@
#include <engine/fifo.h>
#include <core/ramht.h>
#include "nouveau_dma.h"
#include <subdev/vm.h>
#include "nv50_evo.h"
struct nv50_graph_engine {
......@@ -155,18 +154,18 @@ nv50_graph_context_new(struct nouveau_channel *chan, int engine)
hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
nv_wo32(ramin, hdr + 0x00, 0x00190002);
nv_wo32(ramin, hdr + 0x04, grctx->vinst + grctx->size - 1);
nv_wo32(ramin, hdr + 0x08, grctx->vinst);
nv_wo32(ramin, hdr + 0x04, grctx->addr + grctx->size - 1);
nv_wo32(ramin, hdr + 0x08, grctx->addr);
nv_wo32(ramin, hdr + 0x0c, 0);
nv_wo32(ramin, hdr + 0x10, 0);
nv_wo32(ramin, hdr + 0x14, 0x00010000);
nv50_grctx_fill(dev, grctx);
nv_wo32(grctx, 0x00000, chan->ramin->vinst >> 12);
nv_wo32(grctx, 0x00000, chan->ramin->addr >> 12);
dev_priv->engine.instmem.flush(dev);
nvimem_flush(dev);
atomic_inc(&chan->vm->engref[NVOBJ_ENGINE_GR]);
nvvm_engref(chan->vm, engine, 1);
chan->engctx[NVOBJ_ENGINE_GR] = grctx;
return 0;
}
......@@ -181,9 +180,9 @@ nv50_graph_context_del(struct nouveau_channel *chan, int engine)
for (i = hdr; i < hdr + 24; i += 4)
nv_wo32(chan->ramin, i, 0);
dev_priv->engine.instmem.flush(dev);
nvimem_flush(dev);
atomic_dec(&chan->vm->engref[engine]);
nvvm_engref(chan->vm, engine, -1);
nouveau_gpuobj_ref(NULL, &grctx);
chan->engctx[engine] = NULL;
}
......@@ -193,7 +192,6 @@ nv50_graph_object_new(struct nouveau_channel *chan, int engine,
u32 handle, u16 class)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *obj = NULL;
int ret;
......@@ -207,7 +205,7 @@ nv50_graph_object_new(struct nouveau_channel *chan, int engine,
nv_wo32(obj, 0x04, 0x00000000);
nv_wo32(obj, 0x08, 0x00000000);
nv_wo32(obj, 0x0c, 0x00000000);
dev_priv->engine.instmem.flush(dev);
nvimem_flush(dev);
ret = nouveau_ramht_insert(chan, handle, obj);
nouveau_gpuobj_ref(NULL, &obj);
......@@ -723,7 +721,7 @@ nv50_graph_isr_chid(struct drm_device *dev, u64 inst)
if (!chan || !chan->ramin)
continue;
if (inst == chan->ramin->vinst)
if (inst == chan->ramin->addr)
break;
}
spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
......
......@@ -65,7 +65,7 @@ nvc0_graph_load_context(struct nouveau_channel *chan)
struct drm_device *dev = chan->dev;
nv_wr32(dev, 0x409840, 0x00000030);
nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->addr >> 12);
nv_wr32(dev, 0x409504, 0x00000003);
if (!nv_wait(dev, 0x409800, 0x00000010, 0x00000010))
NV_ERROR(dev, "PGRAPH: load_ctx timeout\n");
......@@ -90,7 +90,6 @@ nvc0_graph_unload_context_to(struct drm_device *dev, u64 chan)
static int
nvc0_graph_construct_context(struct nouveau_channel *chan)
{
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
struct drm_device *dev = chan->dev;
......@@ -103,7 +102,7 @@ nvc0_graph_construct_context(struct nouveau_channel *chan)
if (!nouveau_ctxfw) {
nv_wr32(dev, 0x409840, 0x80000000);
nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->addr >> 12);
nv_wr32(dev, 0x409504, 0x00000001);
if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) {
NV_ERROR(dev, "PGRAPH: HUB_SET_CHAN timeout\n");
......@@ -118,7 +117,7 @@ nvc0_graph_construct_context(struct nouveau_channel *chan)
nv_wo32(grch->grctx, 0x20, 0);
nv_wo32(grch->grctx, 0x28, 0);
nv_wo32(grch->grctx, 0x2c, 0);
dev_priv->engine.instmem.flush(dev);
nvimem_flush(dev);
}
ret = nvc0_grctx_generate(chan);
......@@ -127,7 +126,7 @@ nvc0_graph_construct_context(struct nouveau_channel *chan)
if (!nouveau_ctxfw) {
nv_wr32(dev, 0x409840, 0x80000000);
nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->addr >> 12);
nv_wr32(dev, 0x409504, 0x00000002);
if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) {
NV_ERROR(dev, "PGRAPH: HUB_CTX_SAVE timeout\n");
......@@ -136,7 +135,7 @@ nvc0_graph_construct_context(struct nouveau_channel *chan)
goto err;
}
} else {
ret = nvc0_graph_unload_context_to(dev, chan->ramin->vinst);
ret = nvc0_graph_unload_context_to(dev, chan->ramin->addr);
if (ret)
goto err;
}
......@@ -165,8 +164,8 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
if (ret)
return ret;
ret = nouveau_gpuobj_map_vm(grch->unk408004, NV_MEM_ACCESS_RW |
NV_MEM_ACCESS_SYS, chan->vm,
ret = nouveau_gpuobj_map_vm(grch->unk408004, chan->vm,
NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS,
&grch->unk408004_vma);
if (ret)
return ret;
......@@ -175,8 +174,8 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
if (ret)
return ret;
ret = nouveau_gpuobj_map_vm(grch->unk40800c, NV_MEM_ACCESS_RW |
NV_MEM_ACCESS_SYS, chan->vm,
ret = nouveau_gpuobj_map_vm(grch->unk40800c, chan->vm,
NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS,
&grch->unk40800c_vma);
if (ret)
return ret;
......@@ -186,8 +185,8 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
if (ret)
return ret;
ret = nouveau_gpuobj_map_vm(grch->unk418810, NV_MEM_ACCESS_RW,
chan->vm, &grch->unk418810_vma);
ret = nouveau_gpuobj_map_vm(grch->unk418810, chan->vm,
NV_MEM_ACCESS_RW, &grch->unk418810_vma);
if (ret)
return ret;
......@@ -195,9 +194,8 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
if (ret)
return ret;
ret = nouveau_gpuobj_map_vm(grch->mmio, NV_MEM_ACCESS_RW |
NV_MEM_ACCESS_SYS, chan->vm,
&grch->mmio_vma);
ret = nouveau_gpuobj_map_vm(grch->mmio, chan->vm, NV_MEM_ACCESS_RW |
NV_MEM_ACCESS_SYS, &grch->mmio_vma);
if (ret)
return ret;
......@@ -268,8 +266,6 @@ static int
nvc0_graph_context_new(struct nouveau_channel *chan, int engine)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
struct nvc0_graph_priv *priv = nv_engine(dev, engine);
struct nvc0_graph_chan *grch;
struct nouveau_gpuobj *grctx;
......@@ -285,9 +281,8 @@ nvc0_graph_context_new(struct nouveau_channel *chan, int engine)
if (ret)
goto error;
ret = nouveau_gpuobj_map_vm(grch->grctx, NV_MEM_ACCESS_RW |
NV_MEM_ACCESS_SYS, chan->vm,
&grch->grctx_vma);
ret = nouveau_gpuobj_map_vm(grch->grctx, chan->vm, NV_MEM_ACCESS_RW |
NV_MEM_ACCESS_SYS, &grch->grctx_vma);
if (ret)
return ret;
......@@ -299,7 +294,7 @@ nvc0_graph_context_new(struct nouveau_channel *chan, int engine)
nv_wo32(chan->ramin, 0x0210, lower_32_bits(grch->grctx_vma.offset) | 4);
nv_wo32(chan->ramin, 0x0214, upper_32_bits(grch->grctx_vma.offset));
pinstmem->flush(dev);
nvimem_flush(dev);
if (!priv->grctx_vals) {
ret = nvc0_graph_construct_context(chan);
......@@ -324,7 +319,7 @@ nvc0_graph_context_new(struct nouveau_channel *chan, int engine)
nv_wo32(grctx, 0x28, 0);
nv_wo32(grctx, 0x2c, 0);
}
pinstmem->flush(dev);
nvimem_flush(dev);
return 0;
error:
......@@ -373,8 +368,8 @@ nvc0_graph_init_obj418880(struct drm_device *dev)
nv_wr32(dev, GPC_BCAST(0x08a4), 0x00000000);
for (i = 0; i < 4; i++)
nv_wr32(dev, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
nv_wr32(dev, GPC_BCAST(0x08b4), priv->unk4188b4->vinst >> 8);
nv_wr32(dev, GPC_BCAST(0x08b8), priv->unk4188b8->vinst >> 8);
nv_wr32(dev, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
nv_wr32(dev, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
}
static void
......@@ -662,7 +657,7 @@ nvc0_graph_isr_chid(struct drm_device *dev, u64 inst)
if (!chan || !chan->ramin)
continue;
if (inst == chan->ramin->vinst)
if (inst == chan->ramin->addr)
break;
}
spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
......
......@@ -63,7 +63,7 @@ nve0_graph_load_context(struct nouveau_channel *chan)
struct drm_device *dev = chan->dev;
nv_wr32(dev, 0x409840, 0x00000030);
nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->addr >> 12);
nv_wr32(dev, 0x409504, 0x00000003);
if (!nv_wait(dev, 0x409800, 0x00000010, 0x00000010))
NV_ERROR(dev, "PGRAPH: load_ctx timeout\n");
......@@ -88,7 +88,6 @@ nve0_graph_unload_context_to(struct drm_device *dev, u64 chan)
static int
nve0_graph_construct_context(struct nouveau_channel *chan)
{
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct nve0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
struct nve0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
struct drm_device *dev = chan->dev;
......@@ -105,13 +104,13 @@ nve0_graph_construct_context(struct nouveau_channel *chan)
nv_wo32(grch->grctx, 0x20, 0);
nv_wo32(grch->grctx, 0x28, 0);
nv_wo32(grch->grctx, 0x2c, 0);
dev_priv->engine.instmem.flush(dev);
nvimem_flush(dev);
ret = nve0_grctx_generate(chan);
if (ret)
goto err;
ret = nve0_graph_unload_context_to(dev, chan->ramin->vinst);
ret = nve0_graph_unload_context_to(dev, chan->ramin->addr);
if (ret)
goto err;
......@@ -141,8 +140,8 @@ nve0_graph_create_context_mmio_list(struct nouveau_channel *chan)
if (ret)
return ret;
ret = nouveau_gpuobj_map_vm(grch->unk408004, NV_MEM_ACCESS_RW |
NV_MEM_ACCESS_SYS, chan->vm,
ret = nouveau_gpuobj_map_vm(grch->unk408004, chan->vm,
NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS,
&grch->unk408004_vma);
if (ret)
return ret;
......@@ -151,8 +150,8 @@ nve0_graph_create_context_mmio_list(struct nouveau_channel *chan)
if (ret)
return ret;
ret = nouveau_gpuobj_map_vm(grch->unk40800c, NV_MEM_ACCESS_RW |
NV_MEM_ACCESS_SYS, chan->vm,
ret = nouveau_gpuobj_map_vm(grch->unk40800c, chan->vm,
NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS,
&grch->unk40800c_vma);
if (ret)
return ret;
......@@ -162,8 +161,8 @@ nve0_graph_create_context_mmio_list(struct nouveau_channel *chan)
if (ret)
return ret;
ret = nouveau_gpuobj_map_vm(grch->unk418810, NV_MEM_ACCESS_RW,
chan->vm, &grch->unk418810_vma);
ret = nouveau_gpuobj_map_vm(grch->unk418810, chan->vm,
NV_MEM_ACCESS_RW, &grch->unk418810_vma);
if (ret)
return ret;
......@@ -171,8 +170,8 @@ nve0_graph_create_context_mmio_list(struct nouveau_channel *chan)
if (ret)
return ret;
ret = nouveau_gpuobj_map_vm(grch->mmio, NV_MEM_ACCESS_RW |
NV_MEM_ACCESS_SYS, chan->vm,
ret = nouveau_gpuobj_map_vm(grch->mmio, chan->vm,
NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS,
&grch->mmio_vma);
if (ret)
return ret;
......@@ -221,8 +220,6 @@ static int
nve0_graph_context_new(struct nouveau_channel *chan, int engine)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
struct nve0_graph_priv *priv = nv_engine(dev, engine);
struct nve0_graph_chan *grch;
struct nouveau_gpuobj *grctx;
......@@ -238,9 +235,8 @@ nve0_graph_context_new(struct nouveau_channel *chan, int engine)
if (ret)
goto error;
ret = nouveau_gpuobj_map_vm(grch->grctx, NV_MEM_ACCESS_RW |
NV_MEM_ACCESS_SYS, chan->vm,
&grch->grctx_vma);
ret = nouveau_gpuobj_map_vm(grch->grctx, chan->vm, NV_MEM_ACCESS_RW |
NV_MEM_ACCESS_SYS, &grch->grctx_vma);
if (ret)
return ret;
......@@ -252,7 +248,7 @@ nve0_graph_context_new(struct nouveau_channel *chan, int engine)
nv_wo32(chan->ramin, 0x0210, lower_32_bits(grch->grctx_vma.offset) | 4);
nv_wo32(chan->ramin, 0x0214, upper_32_bits(grch->grctx_vma.offset));
pinstmem->flush(dev);
nvimem_flush(dev);
if (!priv->grctx_vals) {
ret = nve0_graph_construct_context(chan);
......@@ -272,7 +268,7 @@ nve0_graph_context_new(struct nouveau_channel *chan, int engine)
nv_wo32(grctx, 0x28, 0);
nv_wo32(grctx, 0x2c, 0);
pinstmem->flush(dev);
nvimem_flush(dev);
return 0;
error:
......@@ -321,8 +317,8 @@ nve0_graph_init_obj418880(struct drm_device *dev)
nv_wr32(dev, GPC_BCAST(0x08a4), 0x00000000);
for (i = 0; i < 4; i++)
nv_wr32(dev, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
nv_wr32(dev, GPC_BCAST(0x08b4), priv->unk4188b4->vinst >> 8);
nv_wr32(dev, GPC_BCAST(0x08b8), priv->unk4188b8->vinst >> 8);
nv_wr32(dev, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
nv_wr32(dev, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
}
static void
......@@ -591,7 +587,7 @@ nve0_graph_isr_chid(struct drm_device *dev, u64 inst)
if (!chan || !chan->ramin)
continue;
if (inst == chan->ramin->vinst)
if (inst == chan->ramin->addr)
break;
}
spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
......
......@@ -74,8 +74,8 @@ nv40_mpeg_context_new(struct nouveau_channel *chan, int engine)
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
if ((nv_rd32(dev, 0x003204) & 0x1f) == chan->id)
nv_wr32(dev, 0x00330c, ctx->pinst >> 4);
nv_wo32(chan->ramfc, 0x54, ctx->pinst >> 4);
nv_wr32(dev, 0x00330c, ctx->addr >> 4);
nv_wo32(chan->ramfc, 0x54, ctx->addr >> 4);
nv_mask(dev, 0x002500, 0x00000001, 0x00000001);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
......@@ -90,7 +90,7 @@ nv40_mpeg_context_del(struct nouveau_channel *chan, int engine)
struct nouveau_gpuobj *ctx = chan->engctx[engine];
struct drm_device *dev = chan->dev;
unsigned long flags;
u32 inst = 0x80000000 | (ctx->pinst >> 4);
u32 inst = 0x80000000 | (ctx->addr >> 4);
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
......@@ -224,7 +224,7 @@ nv31_mpeg_isr_chid(struct drm_device *dev, u32 inst)
continue;
ctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_MPEG];
if (ctx && ctx->pinst == inst)
if (ctx && ctx->addr == inst)
break;
}
spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
......
......@@ -47,7 +47,6 @@ static int
nv50_mpeg_context_new(struct nouveau_channel *chan, int engine)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *ramin = chan->ramin;
struct nouveau_gpuobj *ctx = NULL;
int ret;
......@@ -60,15 +59,15 @@ nv50_mpeg_context_new(struct nouveau_channel *chan, int engine)
return ret;
nv_wo32(ramin, CTX_PTR(dev, 0x00), 0x80190002);
nv_wo32(ramin, CTX_PTR(dev, 0x04), ctx->vinst + ctx->size - 1);
nv_wo32(ramin, CTX_PTR(dev, 0x08), ctx->vinst);
nv_wo32(ramin, CTX_PTR(dev, 0x04), ctx->addr + ctx->size - 1);
nv_wo32(ramin, CTX_PTR(dev, 0x08), ctx->addr);
nv_wo32(ramin, CTX_PTR(dev, 0x0c), 0);
nv_wo32(ramin, CTX_PTR(dev, 0x10), 0);
nv_wo32(ramin, CTX_PTR(dev, 0x14), 0x00010000);
nv_wo32(ctx, 0x70, 0x00801ec1);
nv_wo32(ctx, 0x7c, 0x0000037c);
dev_priv->engine.instmem.flush(dev);
nvimem_flush(dev);
chan->engctx[engine] = ctx;
return 0;
......@@ -93,7 +92,6 @@ nv50_mpeg_object_new(struct nouveau_channel *chan, int engine,
u32 handle, u16 class)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *obj = NULL;
int ret;
......@@ -107,7 +105,7 @@ nv50_mpeg_object_new(struct nouveau_channel *chan, int engine,
nv_wo32(obj, 0x04, 0x00000000);
nv_wo32(obj, 0x08, 0x00000000);
nv_wo32(obj, 0x0c, 0x00000000);
dev_priv->engine.instmem.flush(dev);
nvimem_flush(dev);
ret = nouveau_ramht_insert(chan, handle, obj);
nouveau_gpuobj_ref(NULL, &obj);
......
......@@ -25,7 +25,6 @@
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_util.h"
#include <subdev/vm.h>
#include <core/ramht.h>
struct nv98_ppp_engine {
......
......@@ -25,7 +25,6 @@
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_util.h"
#include <subdev/vm.h>
#include <core/ramht.h>
/*XXX: This stub is currently used on NV98+ also, as soon as this becomes
......
#ifndef __NOUVEAU_BAR_H__
#define __NOUVEAU_BAR_H__
#include <core/subdev.h>
#include <core/device.h>
#include <subdev/fb.h>
struct nouveau_vma;
struct nouveau_bar {
struct nouveau_subdev base;
int (*alloc)(struct nouveau_bar *, struct nouveau_object *,
struct nouveau_mem *, struct nouveau_object **);
void __iomem *iomem;
int (*kmap)(struct nouveau_bar *, struct nouveau_mem *,
u32 flags, struct nouveau_vma *);
int (*umap)(struct nouveau_bar *, struct nouveau_mem *,
u32 flags, struct nouveau_vma *);
void (*unmap)(struct nouveau_bar *, struct nouveau_vma *);
void (*flush)(struct nouveau_bar *);
};
static inline struct nouveau_bar *
nouveau_bar(void *obj)
{
return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_BAR];
}
#define nouveau_bar_create(p,e,o,d) \
nouveau_bar_create_((p), (e), (o), sizeof(**d), (void **)d)
#define nouveau_bar_init(p) \
nouveau_subdev_init(&(p)->base)
#define nouveau_bar_fini(p,s) \
nouveau_subdev_fini(&(p)->base, (s))
int nouveau_bar_create_(struct nouveau_object *, struct nouveau_object *,
struct nouveau_oclass *, int, void **);
void nouveau_bar_destroy(struct nouveau_bar *);
void _nouveau_bar_dtor(struct nouveau_object *);
#define _nouveau_bar_init _nouveau_subdev_init
#define _nouveau_bar_fini _nouveau_subdev_fini
extern struct nouveau_oclass nv50_bar_oclass;
extern struct nouveau_oclass nvc0_bar_oclass;
int nouveau_bar_alloc(struct nouveau_bar *, struct nouveau_object *,
struct nouveau_mem *, struct nouveau_object **);
void nv84_bar_flush(struct nouveau_bar *);
#endif
......@@ -6,6 +6,7 @@
#include <core/device.h>
#endif
#include <core/mm.h>
#include <subdev/vm.h>
/* memory type/access flags, do not match hardware values */
......
#ifndef __NOUVEAU_INSTMEM_H__
#define __NOUVEAU_INSTMEM_H__
#include <core/subdev.h>
#include <core/device.h>
#include <core/mm.h>
struct nouveau_instobj {
struct nouveau_object base;
struct list_head head;
struct nouveau_mm heap;
u32 *suspend;
u64 addr;
u32 size;
};
static inline struct nouveau_instobj *
nv_memobj(void *obj)
{
#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
if (unlikely(!nv_iclass(obj, NV_MEMOBJ_CLASS)))
nv_assert("BAD CAST -> NvMemObj, %08x", nv_hclass(obj));
#endif
return obj;
}
#define nouveau_instobj_create(p,e,o,d) \
nouveau_instobj_create_((p), (e), (o), sizeof(**d), (void **)d)
#define nouveau_instobj_init(p) \
nouveau_object_init(&(p)->base)
#define nouveau_instobj_fini(p,s) \
nouveau_object_fini(&(p)->base, (s))
int nouveau_instobj_create_(struct nouveau_object *, struct nouveau_object *,
struct nouveau_oclass *, int, void **);
void nouveau_instobj_destroy(struct nouveau_instobj *);
void _nouveau_instobj_dtor(struct nouveau_object *);
#define _nouveau_instobj_init nouveau_object_init
#define _nouveau_instobj_fini nouveau_object_fini
struct nouveau_instmem {
struct nouveau_subdev base;
struct list_head list;
u32 reserved;
int (*alloc)(struct nouveau_instmem *, struct nouveau_object *,
u32 size, u32 align, struct nouveau_object **);
};
static inline struct nouveau_instmem *
nouveau_instmem(void *obj)
{
return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_INSTMEM];
}
#define nouveau_instmem_create(p,e,o,d) \
nouveau_instmem_create_((p), (e), (o), sizeof(**d), (void **)d)
#define nouveau_instmem_destroy(p) \
nouveau_subdev_destroy(&(p)->base)
int nouveau_instmem_create_(struct nouveau_object *, struct nouveau_object *,
struct nouveau_oclass *, int, void **);
int nouveau_instmem_init(struct nouveau_instmem *);
int nouveau_instmem_fini(struct nouveau_instmem *, bool);
#define _nouveau_instmem_dtor _nouveau_subdev_dtor
int _nouveau_instmem_init(struct nouveau_object *);
int _nouveau_instmem_fini(struct nouveau_object *, bool);
extern struct nouveau_oclass nv04_instmem_oclass;
extern struct nouveau_oclass nv40_instmem_oclass;
extern struct nouveau_oclass nv50_instmem_oclass;
#endif
......@@ -25,10 +25,14 @@
#ifndef __NOUVEAU_VM_H__
#define __NOUVEAU_VM_H__
#ifndef XXX_THIS_IS_A_HACK
#include <core/object.h>
#include <core/subdev.h>
#include <core/device.h>
#endif
#include <core/mm.h>
struct nouveau_mem;
#ifndef XXX_THIS_IS_A_HACK
struct nouveau_vm_pgt {
struct nouveau_gpuobj *obj[2];
u32 refcount[2];
......@@ -38,6 +42,10 @@ struct nouveau_vm_pgd {
struct list_head head;
struct nouveau_gpuobj *obj;
};
#endif
struct nouveau_gpuobj;
struct nouveau_mem;
struct nouveau_vma {
struct list_head head;
......@@ -49,21 +57,29 @@ struct nouveau_vma {
};
struct nouveau_vm {
struct drm_device *dev;
struct nouveau_vmmgr *vmm;
struct nouveau_mm mm;
int refcount;
struct list_head pgd_list;
atomic_t engref[16];
atomic_t engref[64]; //NVDEV_SUBDEV_NR];
struct nouveau_vm_pgt *pgt;
u32 fpde;
u32 lpde;
};
#ifndef XXX_THIS_IS_A_HACK
struct nouveau_vmmgr {
struct nouveau_subdev base;
u32 pgt_bits;
u8 spg_shift;
u8 lpg_shift;
int (*create)(struct nouveau_vmmgr *, u64 offset, u64 length,
u64 mm_offset, struct nouveau_vm **);
void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 pde,
struct nouveau_gpuobj *pgt[2]);
void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *,
......@@ -71,16 +87,48 @@ struct nouveau_vm {
u64 phys, u64 delta);
void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *,
struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
void (*map_sg_table)(struct nouveau_vma *, struct nouveau_gpuobj *,
struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt);
void (*flush)(struct nouveau_vm *);
};
/* nouveau_vm.c */
int nouveau_vm_new(struct drm_device *, u64 offset, u64 length, u64 mm_offset,
static inline struct nouveau_vmmgr *
nouveau_vmmgr(void *obj)
{
return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_VM];
}
#define nouveau_vmmgr_create(p,e,o,i,f,d) \
nouveau_subdev_create((p), (e), (o), 0, (i), (f), (d))
#define nouveau_vmmgr_destroy(p) \
nouveau_subdev_destroy(&(p)->base)
#define nouveau_vmmgr_init(p) \
nouveau_subdev_init(&(p)->base)
#define nouveau_vmmgr_fini(p,s) \
nouveau_subdev_fini(&(p)->base, (s))
#define _nouveau_vmmgr_dtor _nouveau_subdev_dtor
#define _nouveau_vmmgr_init _nouveau_subdev_init
#define _nouveau_vmmgr_fini _nouveau_subdev_fini
extern struct nouveau_oclass nv04_vmmgr_oclass;
extern struct nouveau_oclass nv41_vmmgr_oclass;
extern struct nouveau_oclass nv44_vmmgr_oclass;
extern struct nouveau_oclass nv50_vmmgr_oclass;
extern struct nouveau_oclass nvc0_vmmgr_oclass;
int nv04_vm_create(struct nouveau_vmmgr *, u64, u64, u64,
struct nouveau_vm **);
void nv04_vmmgr_dtor(struct nouveau_object *);
void nv50_vm_flush_engine(struct nouveau_subdev *, int engine);
void nvc0_vm_flush_engine(struct nouveau_subdev *, u64 addr, int type);
/* nouveau_vm.c */
int nouveau_vm_create(struct nouveau_vmmgr *, u64 offset, u64 length,
u64 mm_offset, u32 block, struct nouveau_vm **);
int nouveau_vm_new(struct nouveau_device *, u64 offset, u64 length,
u64 mm_offset, struct nouveau_vm **);
#endif
int nouveau_vm_ref(struct nouveau_vm *, struct nouveau_vm **,
struct nouveau_gpuobj *pgd);
int nouveau_vm_get(struct nouveau_vm *, u64 size, u32 page_shift,
......@@ -94,25 +142,5 @@ void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
struct nouveau_mem *);
void nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
struct nouveau_mem *mem);
/* nv50_vm.c */
void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
struct nouveau_gpuobj *pgt[2]);
void nv50_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
struct nouveau_mem *, u32 pte, u32 cnt, u64 phys, u64 delta);
void nv50_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
void nv50_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
void nv50_vm_flush(struct nouveau_vm *);
void nv50_vm_flush_engine(struct drm_device *, int engine);
/* nvc0_vm.c */
void nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
struct nouveau_gpuobj *pgt[2]);
void nvc0_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
struct nouveau_mem *, u32 pte, u32 cnt, u64 phys, u64 delta);
void nvc0_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
void nvc0_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
void nvc0_vm_flush(struct nouveau_vm *);
#endif
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <core/object.h>
#include <subdev/bar.h>
struct nouveau_barobj {
struct nouveau_object base;
struct nouveau_vma vma;
void __iomem *iomem;
};
static int
nouveau_barobj_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *mem, u32 size,
struct nouveau_object **pobject)
{
struct nouveau_bar *bar = (void *)engine;
struct nouveau_barobj *barobj;
int ret;
ret = nouveau_object_create(parent, engine, oclass, 0, &barobj);
*pobject = nv_object(barobj);
if (ret)
return ret;
ret = bar->kmap(bar, mem, NV_MEM_ACCESS_RW, &barobj->vma);
if (ret)
return ret;
barobj->iomem = bar->iomem + (u32)barobj->vma.offset;
return 0;
}
static void
nouveau_barobj_dtor(struct nouveau_object *object)
{
struct nouveau_bar *bar = (void *)object->engine;
struct nouveau_barobj *barobj = (void *)object;
if (barobj->vma.node)
bar->unmap(bar, &barobj->vma);
nouveau_object_destroy(&barobj->base);
}
static u32
nouveau_barobj_rd32(struct nouveau_object *object, u32 addr)
{
struct nouveau_barobj *barobj = (void *)object;
return ioread32_native(barobj->iomem + addr);
}
static void
nouveau_barobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
{
struct nouveau_barobj *barobj = (void *)object;
iowrite32_native(data, barobj->iomem + addr);
}
static struct nouveau_oclass
nouveau_barobj_oclass = {
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nouveau_barobj_ctor,
.dtor = nouveau_barobj_dtor,
.init = nouveau_object_init,
.fini = nouveau_object_fini,
.rd32 = nouveau_barobj_rd32,
.wr32 = nouveau_barobj_wr32,
},
};
int
nouveau_bar_alloc(struct nouveau_bar *bar, struct nouveau_object *parent,
struct nouveau_mem *mem, struct nouveau_object **pobject)
{
struct nouveau_object *engine = nv_object(bar);
return nouveau_object_ctor(parent, engine, &nouveau_barobj_oclass,
mem, 0, pobject);
}
int
nouveau_bar_create_(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, int length, void **pobject)
{
struct nouveau_device *device = nv_device(parent);
struct nouveau_bar *bar;
int ret;
ret = nouveau_subdev_create_(parent, engine, oclass, 0, "BARCTL",
"bar", length, pobject);
bar = *pobject;
if (ret)
return ret;
bar->iomem = ioremap(pci_resource_start(device->pdev, 3),
pci_resource_len(device->pdev, 3));
return 0;
}
void
nouveau_bar_destroy(struct nouveau_bar *bar)
{
if (bar->iomem)
iounmap(bar->iomem);
nouveau_subdev_destroy(&bar->base);
}
void
_nouveau_bar_dtor(struct nouveau_object *object)
{
struct nouveau_bar *bar = (void *)object;
nouveau_bar_destroy(bar);
}
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <core/gpuobj.h>
#include <subdev/timer.h>
#include <subdev/bar.h>
#include <subdev/fb.h>
#include <subdev/vm.h>
struct nv50_bar_priv {
struct nouveau_bar base;
spinlock_t lock;
struct nouveau_gpuobj *mem;
struct nouveau_gpuobj *pad;
struct nouveau_gpuobj *pgd;
struct nouveau_vm *bar1_vm;
struct nouveau_gpuobj *bar1;
struct nouveau_vm *bar3_vm;
struct nouveau_gpuobj *bar3;
};
static int
nv50_bar_kmap(struct nouveau_bar *bar, struct nouveau_mem *mem,
u32 flags, struct nouveau_vma *vma)
{
struct nv50_bar_priv *priv = (void *)bar;
int ret;
ret = nouveau_vm_get(priv->bar3_vm, mem->size << 12, 12, flags, vma);
if (ret)
return ret;
nouveau_vm_map(vma, mem);
nv50_vm_flush_engine(nv_subdev(bar), 6);
return 0;
}
static int
nv50_bar_umap(struct nouveau_bar *bar, struct nouveau_mem *mem,
u32 flags, struct nouveau_vma *vma)
{
struct nv50_bar_priv *priv = (void *)bar;
int ret;
ret = nouveau_vm_get(priv->bar1_vm, mem->size << 12, 12, flags, vma);
if (ret)
return ret;
nouveau_vm_map(vma, mem);
nv50_vm_flush_engine(nv_subdev(bar), 6);
return 0;
}
static void
nv50_bar_unmap(struct nouveau_bar *bar, struct nouveau_vma *vma)
{
nouveau_vm_unmap(vma);
nv50_vm_flush_engine(nv_subdev(bar), 6);
nouveau_vm_put(vma);
}
static void
nv50_bar_flush(struct nouveau_bar *bar)
{
struct nv50_bar_priv *priv = (void *)bar;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
nv_wr32(priv, 0x00330c, 0x00000001);
if (!nv_wait(priv, 0x00330c, 0x00000002, 0x00000000))
nv_warn(priv, "flush timeout\n");
spin_unlock_irqrestore(&priv->lock, flags);
}
void
nv84_bar_flush(struct nouveau_bar *bar)
{
struct nv50_bar_priv *priv = (void *)bar;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
nv_wr32(bar, 0x070000, 0x00000001);
if (!nv_wait(priv, 0x070000, 0x00000002, 0x00000000))
nv_warn(priv, "flush timeout\n");
spin_unlock_irqrestore(&priv->lock, flags);
}
static int
nv50_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nouveau_device *device = nv_device(parent);
struct nouveau_object *heap;
struct nouveau_vm *vm;
struct nv50_bar_priv *priv;
u64 start, limit;
int ret;
ret = nouveau_bar_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
ret = nouveau_gpuobj_new(parent, NULL, 0x20000, 0, NVOBJ_FLAG_HEAP,
&priv->mem);
heap = nv_object(priv->mem);
if (ret)
return ret;
ret = nouveau_gpuobj_new(parent, heap, (device->chipset == 0x50) ?
0x1400 : 0x0200, 0, 0, &priv->pad);
if (ret)
return ret;
ret = nouveau_gpuobj_new(parent, heap, 0x4000, 0, 0, &priv->pgd);
if (ret)
return ret;
/* BAR3 */
start = 0x0100000000ULL;
limit = start + pci_resource_len(device->pdev, 3);
ret = nouveau_vm_new(device, start, limit, start, &vm);
if (ret)
return ret;
ret = nouveau_gpuobj_new(parent, heap, ((limit-- - start) >> 12) * 8,
0x1000, NVOBJ_FLAG_ZERO_ALLOC,
&vm->pgt[0].obj[0]);
vm->pgt[0].refcount[0] = 1;
if (ret)
return ret;
ret = nouveau_vm_ref(vm, &priv->bar3_vm, priv->pgd);
nouveau_vm_ref(NULL, &vm, NULL);
if (ret)
return ret;
ret = nouveau_gpuobj_new(parent, heap, 24, 16, 0, &priv->bar3);
if (ret)
return ret;
nv_wo32(priv->bar3, 0x00, 0x7fc00000);
nv_wo32(priv->bar3, 0x04, lower_32_bits(limit));
nv_wo32(priv->bar3, 0x08, lower_32_bits(start));
nv_wo32(priv->bar3, 0x0c, upper_32_bits(limit) << 24 |
upper_32_bits(start));
nv_wo32(priv->bar3, 0x10, 0x00000000);
nv_wo32(priv->bar3, 0x14, 0x00000000);
/* BAR1 */
start = 0x0000000000ULL;
limit = start + pci_resource_len(device->pdev, 1);
ret = nouveau_vm_new(device, start, limit--, start, &vm);
if (ret)
return ret;
ret = nouveau_vm_ref(vm, &priv->bar1_vm, priv->pgd);
nouveau_vm_ref(NULL, &vm, NULL);
if (ret)
return ret;
ret = nouveau_gpuobj_new(parent, heap, 24, 16, 0, &priv->bar1);
if (ret)
return ret;
nv_wo32(priv->bar1, 0x00, 0x7fc00000);
nv_wo32(priv->bar1, 0x04, lower_32_bits(limit));
nv_wo32(priv->bar1, 0x08, lower_32_bits(start));
nv_wo32(priv->bar1, 0x0c, upper_32_bits(limit) << 24 |
upper_32_bits(start));
nv_wo32(priv->bar1, 0x10, 0x00000000);
nv_wo32(priv->bar1, 0x14, 0x00000000);
priv->base.alloc = nouveau_bar_alloc;
priv->base.kmap = nv50_bar_kmap;
priv->base.umap = nv50_bar_umap;
priv->base.unmap = nv50_bar_unmap;
if (device->chipset == 0x50)
priv->base.flush = nv50_bar_flush;
else
priv->base.flush = nv84_bar_flush;
spin_lock_init(&priv->lock);
return 0;
}
static void
nv50_bar_dtor(struct nouveau_object *object)
{
struct nv50_bar_priv *priv = (void *)object;
nouveau_gpuobj_ref(NULL, &priv->bar1);
nouveau_vm_ref(NULL, &priv->bar1_vm, priv->pgd);
nouveau_gpuobj_ref(NULL, &priv->bar3);
if (priv->bar3_vm) {
nouveau_gpuobj_ref(NULL, &priv->bar3_vm->pgt[0].obj[0]);
nouveau_vm_ref(NULL, &priv->bar3_vm, priv->pgd);
}
nouveau_gpuobj_ref(NULL, &priv->pgd);
nouveau_gpuobj_ref(NULL, &priv->pad);
nouveau_gpuobj_ref(NULL, &priv->mem);
nouveau_bar_destroy(&priv->base);
}
static int
nv50_bar_init(struct nouveau_object *object)
{
struct nv50_bar_priv *priv = (void *)object;
int ret;
ret = nouveau_bar_init(&priv->base);
if (ret)
return ret;
nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
nv50_vm_flush_engine(nv_subdev(priv), 6);
nv_wr32(priv, 0x001704, 0x00000000 | priv->mem->addr >> 12);
nv_wr32(priv, 0x001704, 0x40000000 | priv->mem->addr >> 12);
nv_wr32(priv, 0x001708, 0x80000000 | priv->bar1->node->offset >> 4);
nv_wr32(priv, 0x00170c, 0x80000000 | priv->bar3->node->offset >> 4);
return 0;
}
static int
nv50_bar_fini(struct nouveau_object *object, bool suspend)
{
struct nv50_bar_priv *priv = (void *)object;
return nouveau_bar_fini(&priv->base, suspend);
}
struct nouveau_oclass
nv50_bar_oclass = {
.handle = NV_SUBDEV(BAR, 0x50),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv50_bar_ctor,
.dtor = nv50_bar_dtor,
.init = nv50_bar_init,
.fini = nv50_bar_fini,
},
};
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <core/gpuobj.h>
#include <subdev/timer.h>
#include <subdev/bar.h>
#include <subdev/fb.h>
#include <subdev/vm.h>
struct nvc0_bar_priv {
struct nouveau_bar base;
spinlock_t lock;
struct {
struct nouveau_gpuobj *mem;
struct nouveau_gpuobj *pgd;
struct nouveau_vm *vm;
} bar[2];
};
static int
nvc0_bar_kmap(struct nouveau_bar *bar, struct nouveau_mem *mem,
u32 flags, struct nouveau_vma *vma)
{
struct nvc0_bar_priv *priv = (void *)bar;
int ret;
ret = nouveau_vm_get(priv->bar[0].vm, mem->size << 12, 12, flags, vma);
if (ret)
return ret;
nouveau_vm_map(vma, mem);
nvc0_vm_flush_engine(nv_subdev(bar), priv->bar[0].pgd->addr, 5);
return 0;
}
static int
nvc0_bar_umap(struct nouveau_bar *bar, struct nouveau_mem *mem,
u32 flags, struct nouveau_vma *vma)
{
struct nvc0_bar_priv *priv = (void *)bar;
int ret;
ret = nouveau_vm_get(priv->bar[1].vm, mem->size << 12,
mem->page_shift, flags, vma);
if (ret)
return ret;
nouveau_vm_map(vma, mem);
nvc0_vm_flush_engine(nv_subdev(bar), priv->bar[1].pgd->addr, 5);
return 0;
}
static void
nvc0_bar_unmap(struct nouveau_bar *bar, struct nouveau_vma *vma)
{
struct nvc0_bar_priv *priv = (void *)bar;
int i = !(vma->vm == priv->bar[0].vm);
nouveau_vm_unmap(vma);
nvc0_vm_flush_engine(nv_subdev(bar), priv->bar[i].pgd->addr, 5);
nouveau_vm_put(vma);
}
static int
nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nouveau_device *device = nv_device(parent);
struct pci_dev *pdev = device->pdev;
struct nvc0_bar_priv *priv;
struct nouveau_gpuobj *mem;
struct nouveau_vm *vm;
int ret;
ret = nouveau_bar_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
/* BAR3 */
ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0, 0, &priv->bar[0].mem);
mem = priv->bar[0].mem;
if (ret)
return ret;
ret = nouveau_gpuobj_new(parent, NULL, 0x8000, 0, 0, &priv->bar[0].pgd);
if (ret)
return ret;
ret = nouveau_vm_new(device, 0, pci_resource_len(pdev, 3), 0, &vm);
if (ret)
return ret;
ret = nouveau_gpuobj_new(parent, NULL,
(pci_resource_len(pdev, 3) >> 12) * 8,
0x1000, NVOBJ_FLAG_ZERO_ALLOC,
&vm->pgt[0].obj[0]);
vm->pgt[0].refcount[0] = 1;
if (ret)
return ret;
ret = nouveau_vm_ref(vm, &priv->bar[0].vm, priv->bar[0].pgd);
nouveau_vm_ref(NULL, &vm, NULL);
if (ret)
return ret;
nv_wo32(mem, 0x0200, lower_32_bits(priv->bar[0].pgd->addr));
nv_wo32(mem, 0x0204, upper_32_bits(priv->bar[0].pgd->addr));
nv_wo32(mem, 0x0208, lower_32_bits(pci_resource_len(pdev, 3) - 1));
nv_wo32(mem, 0x020c, upper_32_bits(pci_resource_len(pdev, 3) - 1));
/* BAR1 */
ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0, 0, &priv->bar[1].mem);
mem = priv->bar[1].mem;
if (ret)
return ret;
ret = nouveau_gpuobj_new(parent, NULL, 0x8000, 0, 0, &priv->bar[1].pgd);
if (ret)
return ret;
ret = nouveau_vm_new(device, 0, pci_resource_len(pdev, 1), 0, &vm);
if (ret)
return ret;
ret = nouveau_vm_ref(vm, &priv->bar[1].vm, priv->bar[1].pgd);
nouveau_vm_ref(NULL, &vm, NULL);
if (ret)
return ret;
nv_wo32(mem, 0x0200, lower_32_bits(priv->bar[1].pgd->addr));
nv_wo32(mem, 0x0204, upper_32_bits(priv->bar[1].pgd->addr));
nv_wo32(mem, 0x0208, lower_32_bits(pci_resource_len(pdev, 1) - 1));
nv_wo32(mem, 0x020c, upper_32_bits(pci_resource_len(pdev, 1) - 1));
priv->base.alloc = nouveau_bar_alloc;
priv->base.kmap = nvc0_bar_kmap;
priv->base.umap = nvc0_bar_umap;
priv->base.unmap = nvc0_bar_unmap;
priv->base.flush = nv84_bar_flush;
spin_lock_init(&priv->lock);
return 0;
}
static void
nvc0_bar_dtor(struct nouveau_object *object)
{
struct nvc0_bar_priv *priv = (void *)object;
nouveau_vm_ref(NULL, &priv->bar[1].vm, priv->bar[1].pgd);
nouveau_gpuobj_ref(NULL, &priv->bar[1].pgd);
nouveau_gpuobj_ref(NULL, &priv->bar[1].mem);
if (priv->bar[0].vm) {
nouveau_gpuobj_ref(NULL, &priv->bar[0].vm->pgt[0].obj[0]);
nouveau_vm_ref(NULL, &priv->bar[0].vm, priv->bar[0].pgd);
}
nouveau_gpuobj_ref(NULL, &priv->bar[0].pgd);
nouveau_gpuobj_ref(NULL, &priv->bar[0].mem);
nouveau_bar_destroy(&priv->base);
}
static int
nvc0_bar_init(struct nouveau_object *object)
{
struct nvc0_bar_priv *priv = (void *)object;
int ret;
ret = nouveau_bar_init(&priv->base);
if (ret)
return ret;
nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
nv_mask(priv, 0x100c80, 0x00000001, 0x00000000);
nv_wr32(priv, 0x001704, 0x80000000 | priv->bar[1].mem->addr >> 12);
nv_wr32(priv, 0x001714, 0xc0000000 | priv->bar[0].mem->addr >> 12);
return 0;
}
struct nouveau_oclass
nvc0_bar_oclass = {
.handle = NV_SUBDEV(BAR, 0xc0),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvc0_bar_ctor,
.dtor = nvc0_bar_dtor,
.init = nvc0_bar_init,
.fini = _nouveau_bar_fini,
},
};
......@@ -30,6 +30,8 @@
#include <subdev/mc.h>
#include <subdev/timer.h>
#include <subdev/fb.h>
#include <subdev/instmem.h>
#include <subdev/vm.h>
int
nv04_identify(struct nouveau_device *device)
......@@ -43,6 +45,8 @@ nv04_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv04_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
break;
case 0x05:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -52,6 +56,8 @@ nv04_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv04_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
break;
default:
nv_fatal(device, "unknown RIVA chipset\n");
......
......@@ -31,6 +31,8 @@
#include <subdev/mc.h>
#include <subdev/timer.h>
#include <subdev/fb.h>
#include <subdev/instmem.h>
#include <subdev/vm.h>
int
nv10_identify(struct nouveau_device *device)
......@@ -45,6 +47,8 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
break;
case 0x15:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -55,6 +59,8 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
break;
case 0x16:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -65,6 +71,8 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
break;
case 0x1a:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -75,6 +83,8 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
break;
case 0x11:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -85,6 +95,8 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
break;
case 0x17:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -95,6 +107,8 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
break;
case 0x1f:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -105,6 +119,8 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
break;
case 0x18:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -115,6 +131,8 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
break;
default:
nv_fatal(device, "unknown Celsius chipset\n");
......
......@@ -31,6 +31,8 @@
#include <subdev/mc.h>
#include <subdev/timer.h>
#include <subdev/fb.h>
#include <subdev/instmem.h>
#include <subdev/vm.h>
int
nv20_identify(struct nouveau_device *device)
......@@ -45,6 +47,8 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
break;
case 0x25:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -55,6 +59,8 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
break;
case 0x28:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -65,6 +71,8 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
break;
case 0x2a:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -75,6 +83,8 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
break;
default:
nv_fatal(device, "unknown Kelvin chipset\n");
......
......@@ -31,6 +31,8 @@
#include <subdev/mc.h>
#include <subdev/timer.h>
#include <subdev/fb.h>
#include <subdev/instmem.h>
#include <subdev/vm.h>
int
nv30_identify(struct nouveau_device *device)
......@@ -45,6 +47,8 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
break;
case 0x35:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -55,6 +59,8 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
break;
case 0x31:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -65,6 +71,8 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
break;
case 0x36:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -75,6 +83,8 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
break;
case 0x34:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -85,6 +95,8 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
break;
default:
nv_fatal(device, "unknown Rankine chipset\n");
......
......@@ -31,6 +31,8 @@
#include <subdev/mc.h>
#include <subdev/timer.h>
#include <subdev/fb.h>
#include <subdev/instmem.h>
#include <subdev/vm.h>
int
nv40_identify(struct nouveau_device *device)
......@@ -45,6 +47,8 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
break;
case 0x41:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -55,6 +59,8 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
break;
case 0x42:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -65,6 +71,8 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
break;
case 0x43:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -75,6 +83,8 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
break;
case 0x45:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -85,6 +95,8 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
break;
case 0x47:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -95,6 +107,8 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
break;
case 0x49:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -105,6 +119,8 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
break;
case 0x4b:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -115,6 +131,8 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
break;
case 0x44:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -125,6 +143,8 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
break;
case 0x46:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -135,6 +155,8 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
break;
case 0x4a:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -145,6 +167,8 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
break;
case 0x4c:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -155,6 +179,8 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
break;
case 0x4e:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -165,6 +191,8 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
break;
case 0x63:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -175,6 +203,8 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
break;
case 0x67:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -185,6 +215,8 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
break;
case 0x68:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -195,6 +227,8 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
break;
default:
nv_fatal(device, "unknown Curie chipset\n");
......
......@@ -31,6 +31,9 @@
#include <subdev/mc.h>
#include <subdev/timer.h>
#include <subdev/fb.h>
#include <subdev/instmem.h>
#include <subdev/vm.h>
#include <subdev/bar.h>
int
nv50_identify(struct nouveau_device *device)
......@@ -45,6 +48,9 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
break;
case 0x84:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -55,6 +61,9 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
break;
case 0x86:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -65,6 +74,9 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
break;
case 0x92:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -75,6 +87,9 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
break;
case 0x94:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -85,6 +100,9 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
break;
case 0x96:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -95,6 +113,9 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
break;
case 0x98:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -105,6 +126,9 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
break;
case 0xa0:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -115,6 +139,9 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
break;
case 0xaa:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -125,6 +152,9 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
break;
case 0xac:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -135,6 +165,9 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
break;
case 0xa3:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -145,6 +178,9 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
break;
case 0xa5:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -155,6 +191,9 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
break;
case 0xa8:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -165,6 +204,9 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
break;
case 0xaf:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -175,6 +217,9 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
break;
default:
nv_fatal(device, "unknown Tesla chipset\n");
......
......@@ -32,6 +32,9 @@
#include <subdev/timer.h>
#include <subdev/fb.h>
#include <subdev/ltcg.h>
#include <subdev/instmem.h>
#include <subdev/vm.h>
#include <subdev/bar.h>
int
nvc0_identify(struct nouveau_device *device)
......@@ -47,6 +50,9 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
break;
case 0xc4:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -58,6 +64,9 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
break;
case 0xc3:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -69,6 +78,9 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
break;
case 0xce:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -80,6 +92,9 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
break;
case 0xcf:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -91,6 +106,9 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
break;
case 0xc1:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -102,6 +120,9 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
break;
case 0xc8:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -113,6 +134,9 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
break;
case 0xd9:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -124,6 +148,9 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
break;
default:
nv_fatal(device, "unknown Fermi chipset\n");
......
......@@ -32,6 +32,9 @@
#include <subdev/timer.h>
#include <subdev/fb.h>
#include <subdev/ltcg.h>
#include <subdev/instmem.h>
#include <subdev/vm.h>
#include <subdev/bar.h>
int
nve0_identify(struct nouveau_device *device)
......@@ -47,6 +50,9 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
break;
case 0xe7:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
......@@ -58,6 +64,9 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
break;
default:
nv_fatal(device, "unknown Kepler chipset\n");
......
......@@ -50,54 +50,14 @@ nv40_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
static void
nv40_fb_init_gart(struct nv40_fb_priv *priv)
{
#if 0
struct nouveau_gpuobj *gart = ndev->gart_info.sg_ctxdma;
if (ndev->gart_info.type != NOUVEAU_GART_HW) {
#endif
nv_wr32(priv, 0x100800, 0x00000001);
#if 0
return;
}
nv_wr32(ndev, 0x100800, gart->pinst | 0x00000002);
nv_mask(ndev, 0x10008c, 0x00000100, 0x00000100);
nv_wr32(ndev, 0x100820, 0x00000000);
#endif
}
static void
nv44_fb_init_gart(struct nv40_fb_priv *priv)
{
#if 0
struct nouveau_gpuobj *gart = ndev->gart_info.sg_ctxdma;
u32 vinst;
if (ndev->gart_info.type != NOUVEAU_GART_HW) {
#endif
nv_wr32(priv, 0x100850, 0x80000000);
nv_wr32(priv, 0x100800, 0x00000001);
#if 0
return;
}
/* calculate vram address of this PRAMIN block, object
* must be allocated on 512KiB alignment, and not exceed
* a total size of 512KiB for this to work correctly
*/
vinst = nv_rd32(ndev, 0x10020c);
vinst -= ((gart->pinst >> 19) + 1) << 19;
nv_wr32(ndev, 0x100850, 0x80000000);
nv_wr32(ndev, 0x100818, ndev->gart_info.dummy.addr);
nv_wr32(ndev, 0x100804, ndev->gart_info.aper_size);
nv_wr32(ndev, 0x100850, 0x00008000);
nv_mask(ndev, 0x10008c, 0x00000200, 0x00000200);
nv_wr32(ndev, 0x100820, 0x00000000);
nv_wr32(ndev, 0x10082c, 0x00000001);
nv_wr32(ndev, 0x100800, vinst | 0x00000010);
#endif
}
static int
......
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <subdev/instmem.h>
int
nouveau_instobj_create_(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass,
int length, void **pobject)
{
struct nouveau_instmem *imem = (void *)engine;
struct nouveau_instobj *iobj;
int ret;
ret = nouveau_object_create_(parent, engine, oclass, NV_MEMOBJ_CLASS,
length, pobject);
iobj = *pobject;
if (ret)
return ret;
list_add(&iobj->head, &imem->list);
return 0;
}
void
nouveau_instobj_destroy(struct nouveau_instobj *iobj)
{
if (iobj->head.prev)
list_del(&iobj->head);
return nouveau_object_destroy(&iobj->base);
}
void
_nouveau_instobj_dtor(struct nouveau_object *object)
{
struct nouveau_instobj *iobj = (void *)object;
return nouveau_instobj_destroy(iobj);
}
int
nouveau_instmem_create_(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass,
int length, void **pobject)
{
struct nouveau_instmem *imem;
int ret;
ret = nouveau_subdev_create_(parent, engine, oclass, 0,
"INSTMEM", "instmem", length, pobject);
imem = *pobject;
if (ret)
return ret;
INIT_LIST_HEAD(&imem->list);
return 0;
}
int
nouveau_instmem_init(struct nouveau_instmem *imem)
{
struct nouveau_instobj *iobj;
int ret, i;
ret = nouveau_subdev_init(&imem->base);
if (ret)
return ret;
list_for_each_entry(iobj, &imem->list, head) {
if (iobj->suspend) {
for (i = 0; i < iobj->size; i += 4)
nv_wo32(iobj, i, iobj->suspend[i / 4]);
vfree(iobj->suspend);
iobj->suspend = NULL;
}
}
return 0;
}
int
nouveau_instmem_fini(struct nouveau_instmem *imem, bool suspend)
{
struct nouveau_instobj *iobj;
int i;
if (suspend) {
list_for_each_entry(iobj, &imem->list, head) {
iobj->suspend = vmalloc(iobj->size);
if (iobj->suspend) {
for (i = 0; i < iobj->size; i += 4)
iobj->suspend[i / 4] = nv_ro32(iobj, i);
} else
return -ENOMEM;
}
}
return nouveau_subdev_fini(&imem->base, suspend);
}
int
_nouveau_instmem_init(struct nouveau_object *object)
{
struct nouveau_instmem *imem = (void *)object;
return nouveau_instmem_init(imem);
}
int
_nouveau_instmem_fini(struct nouveau_object *object, bool suspend)
{
struct nouveau_instmem *imem = (void *)object;
return nouveau_instmem_fini(imem, suspend);
}
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
#include <engine/fifo.h>
#include <core/ramht.h>
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <subdev/fb.h>
#include "nv04.h"
static int
nv04_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv04_instmem_priv *priv = (void *)engine;
struct nv04_instobj_priv *node;
int ret, align;
align = (unsigned long)data;
if (!align)
align = 1;
ret = nouveau_instobj_create(parent, engine, oclass, &node);
*pobject = nv_object(node);
if (ret)
return ret;
ret = nouveau_mm_head(&priv->heap, 1, size, size, align, &node->mem);
if (ret)
return ret;
node->base.addr = node->mem->offset;
node->base.size = node->mem->length;
return 0;
}
static void
nv04_instobj_dtor(struct nouveau_object *object)
{
struct nv04_instmem_priv *priv = (void *)object->engine;
struct nv04_instobj_priv *node = (void *)object;
nouveau_mm_free(&priv->heap, &node->mem);
nouveau_instobj_destroy(&node->base);
}
static u32
nv04_instobj_rd32(struct nouveau_object *object, u32 addr)
{
struct nv04_instobj_priv *node = (void *)object;
return nv_ro32(object->engine, node->mem->offset + addr);
}
static void
nv04_instobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
{
struct nv04_instobj_priv *node = (void *)object;
nv_wo32(object->engine, node->mem->offset + addr, data);
}
static struct nouveau_oclass
nv04_instobj_oclass = {
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv04_instobj_ctor,
.dtor = nv04_instobj_dtor,
.init = _nouveau_instobj_init,
.fini = _nouveau_instobj_fini,
.rd32 = nv04_instobj_rd32,
.wr32 = nv04_instobj_wr32,
},
};
int
nv04_instmem_init(struct drm_device *dev)
nv04_instmem_alloc(struct nouveau_instmem *imem, struct nouveau_object *parent,
u32 size, u32 align, struct nouveau_object **pobject)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv04_instmem_priv *priv;
struct nouveau_object *engine = nv_object(imem);
struct nv04_instmem_priv *priv = (void *)(imem);
int ret;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
dev_priv->engine.instmem.priv = priv;
ret = nouveau_object_ctor(parent, engine, &nv04_instobj_oclass,
(void *)(unsigned long)align, size, pobject);
if (ret)
return ret;
/* INSTMEM itself creates objects to reserve (and preserve across
* suspend/resume) various fixed data locations, each one of these
* takes a reference on INSTMEM itself, causing it to never be
* freed. We drop all the self-references here to avoid this.
*/
if (unlikely(!priv->created))
atomic_dec(&engine->refcount);
return 0;
}
/* PRAMIN aperture maps over the end of vram, reserve the space */
dev_priv->ramin_available = true;
dev_priv->ramin_rsvd_vram = 512 * 1024;
static int
nv04_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv04_instmem_priv *priv;
int ret;
ret = drm_mm_init(&dev_priv->ramin_heap, 0, dev_priv->ramin_rsvd_vram);
ret = nouveau_instmem_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
/* 0x00000-0x10000: reserve for probable vbios image */
ret = nouveau_gpuobj_new(dev, NULL, 0x10000, 0, 0, &priv->vbios);
/* PRAMIN aperture maps over the end of VRAM, reserve it */
priv->base.reserved = 512 * 1024;
priv->base.alloc = nv04_instmem_alloc;
ret = nouveau_mm_init(&priv->heap, 0, priv->base.reserved, 1);
if (ret)
return ret;
/* 0x10000-0x18000: reserve for RAMHT */
ret = nouveau_gpuobj_new(dev, NULL, 0x08000, 0, NVOBJ_FLAG_ZERO_ALLOC,
&priv->ramht);
/* 0x00000-0x10000: reserve for probable vbios image */
ret = nouveau_gpuobj_new(parent, NULL, 0x10000, 0, 0, &priv->vbios);
if (ret)
return ret;
/* 0x18000-0x18200: reserve for RAMRO */
ret = nouveau_gpuobj_new(dev, NULL, 0x00200, 0, 0, &priv->ramro);
/* 0x10000-0x18000: reserve for RAMHT */
ret = nouveau_gpuobj_new(parent, NULL, 0x08000, 0,
NVOBJ_FLAG_ZERO_ALLOC, &priv->ramht);
if (ret)
return ret;
/* 0x18200-0x18a00: reserve for RAMFC (enough for 32 nv30 channels) */
ret = nouveau_gpuobj_new(dev, NULL, 0x00800, 0, NVOBJ_FLAG_ZERO_ALLOC,
&priv->ramfc);
/* 0x18000-0x18800: reserve for RAMFC (enough for 32 nv30 channels) */
ret = nouveau_gpuobj_new(parent, NULL, 0x00800, 0,
NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc);
if (ret)
return ret;
ret = nouveau_ramht_new(dev, priv->ramht, &dev_priv->ramht);
/* 0x18800-0x18a00: reserve for RAMRO */
ret = nouveau_gpuobj_new(parent, NULL, 0x00200, 0, 0, &priv->ramro);
if (ret)
return ret;
priv->created = true;
return 0;
}
void
nv04_instmem_takedown(struct drm_device *dev)
nv04_instmem_dtor(struct nouveau_object *object)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv04_instmem_priv *priv = dev_priv->engine.instmem.priv;
nouveau_ramht_ref(NULL, &dev_priv->ramht, NULL);
struct nv04_instmem_priv *priv = (void *)object;
nouveau_gpuobj_ref(NULL, &priv->ramfc);
nouveau_gpuobj_ref(NULL, &priv->ramro);
nouveau_gpuobj_ref(NULL, &priv->ramht);
if (drm_mm_initialized(&dev_priv->ramin_heap))
drm_mm_takedown(&dev_priv->ramin_heap);
kfree(priv);
dev_priv->engine.instmem.priv = NULL;
nouveau_gpuobj_ref(NULL, &priv->vbios);
nouveau_mm_fini(&priv->heap);
if (priv->iomem)
iounmap(priv->iomem);
nouveau_instmem_destroy(&priv->base);
}
int
nv04_instmem_suspend(struct drm_device *dev)
{
return 0;
}
void
nv04_instmem_resume(struct drm_device *dev)
static u32
nv04_instmem_rd32(struct nouveau_object *object, u32 addr)
{
return nv_rd32(object, 0x700000 + addr);
}
int
nv04_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan,
u32 size, u32 align)
static void
nv04_instmem_wr32(struct nouveau_object *object, u32 addr, u32 data)
{
struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
struct drm_mm_node *ramin = NULL;
do {
if (drm_mm_pre_get(&dev_priv->ramin_heap))
return -ENOMEM;
spin_lock(&dev_priv->ramin_lock);
ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, align, 0);
if (ramin == NULL) {
spin_unlock(&dev_priv->ramin_lock);
return -ENOMEM;
}
ramin = drm_mm_get_block_atomic(ramin, size, align);
spin_unlock(&dev_priv->ramin_lock);
} while (ramin == NULL);
gpuobj->node = ramin;
gpuobj->vinst = ramin->start;
return 0;
return nv_wr32(object, 0x700000 + addr, data);
}
void
nv04_instmem_put(struct nouveau_gpuobj *gpuobj)
{
struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
spin_lock(&dev_priv->ramin_lock);
drm_mm_put_block(gpuobj->node);
gpuobj->node = NULL;
spin_unlock(&dev_priv->ramin_lock);
}
int
nv04_instmem_map(struct nouveau_gpuobj *gpuobj)
{
gpuobj->pinst = gpuobj->vinst;
return 0;
}
void
nv04_instmem_unmap(struct nouveau_gpuobj *gpuobj)
{
}
void
nv04_instmem_flush(struct drm_device *dev)
{
}
struct nouveau_oclass
nv04_instmem_oclass = {
.handle = NV_SUBDEV(INSTMEM, 0x04),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv04_instmem_ctor,
.dtor = nv04_instmem_dtor,
.init = _nouveau_instmem_init,
.fini = _nouveau_instmem_fini,
.rd32 = nv04_instmem_rd32,
.wr32 = nv04_instmem_wr32,
},
};
#ifndef __NV04_INSTMEM_H__
#define __NV04_INSTMEM_H__
#include <core/gpuobj.h>
#include <core/mm.h>
#include <subdev/instmem.h>
struct nv04_instmem_priv {
struct nouveau_instmem base;
bool created;
void __iomem *iomem;
struct nouveau_mm heap;
struct nouveau_gpuobj *vbios;
struct nouveau_gpuobj *ramht;
struct nouveau_gpuobj *ramro;
struct nouveau_gpuobj *ramfc;
};
struct nv04_instobj_priv {
struct nouveau_instobj base;
struct nouveau_mm_node *mem;
};
void nv04_instmem_dtor(struct nouveau_object *);
int nv04_instmem_alloc(struct nouveau_instmem *, struct nouveau_object *,
u32 size, u32 align, struct nouveau_object **pobject);
#endif
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
#include <engine/fifo.h>
#include <core/ramht.h>
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "nv04.h"
int nv40_instmem_init(struct drm_device *dev)
static inline int
nv44_graph_class(struct nv04_instmem_priv *priv)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
if ((nv_device(priv)->chipset & 0xf0) == 0x60)
return 1;
return !(0x0baf & (1 << (nv_device(priv)->chipset & 0x0f)));
}
static int
nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nouveau_device *device = nv_device(parent);
struct pci_dev *pdev = device->pdev;
struct nv04_instmem_priv *priv;
u32 vs, rsvd;
int ret;
int ret, bar, vs;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
dev_priv->engine.instmem.priv = priv;
ret = nouveau_instmem_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
/* map bar */
if (pci_resource_len(pdev, 2))
bar = 2;
else
bar = 3;
priv->iomem = ioremap(pci_resource_start(pdev, bar),
pci_resource_len(pdev, bar));
if (!priv->iomem) {
nv_error(priv, "unable to map PRAMIN BAR\n");
return -EFAULT;
}
/* PRAMIN aperture maps over the end of vram, reserve enough space
* to fit graphics contexts for every channel, the magics come
* from engine/graph/nv40.c
*/
vs = hweight8((nv_rd32(dev, 0x001540) & 0x0000ff00) >> 8);
if (dev_priv->chipset == 0x40) rsvd = 0x6aa0 * vs;
else if (dev_priv->chipset < 0x43) rsvd = 0x4f00 * vs;
else if (nv44_graph_class(dev)) rsvd = 0x4980 * vs;
else rsvd = 0x4a40 * vs;
rsvd += 16 * 1024;
rsvd *= 32; /* per-channel */
rsvd += 512 * 1024; /* pci(e)gart table */
rsvd += 512 * 1024; /* object storage */
dev_priv->ramin_rsvd_vram = round_up(rsvd, 4096);
dev_priv->ramin_available = true;
ret = drm_mm_init(&dev_priv->ramin_heap, 0, dev_priv->ramin_rsvd_vram);
vs = hweight8((nv_rd32(priv, 0x001540) & 0x0000ff00) >> 8);
if (device->chipset == 0x40) priv->base.reserved = 0x6aa0 * vs;
else if (device->chipset < 0x43) priv->base.reserved = 0x4f00 * vs;
else if (nv44_graph_class(priv)) priv->base.reserved = 0x4980 * vs;
else priv->base.reserved = 0x4a40 * vs;
priv->base.reserved += 16 * 1024;
priv->base.reserved *= 32; /* per-channel */
priv->base.reserved += 512 * 1024; /* pci(e)gart table */
priv->base.reserved += 512 * 1024; /* object storage */
priv->base.reserved = round_up(priv->base.reserved, 4096);
priv->base.alloc = nv04_instmem_alloc;
ret = nouveau_mm_init(&priv->heap, 0, priv->base.reserved, 1);
if (ret)
return ret;
/* 0x00000-0x10000: reserve for probable vbios image */
ret = nouveau_gpuobj_new(dev, NULL, 0x10000, 0, 0, &priv->vbios);
ret = nouveau_gpuobj_new(parent, NULL, 0x10000, 0, 0, &priv->vbios);
if (ret)
return ret;
/* 0x10000-0x18000: reserve for RAMHT */
ret = nouveau_gpuobj_new(dev, NULL, 0x08000, 0, NVOBJ_FLAG_ZERO_ALLOC,
&priv->ramht);
ret = nouveau_gpuobj_new(parent, NULL, 0x08000, 0,
NVOBJ_FLAG_ZERO_ALLOC, &priv->ramht);
if (ret)
return ret;
/* 0x18000-0x18200: reserve for RAMRO
* 0x18200-0x20000: padding
*/
ret = nouveau_gpuobj_new(dev, NULL, 0x08000, 0, 0, &priv->ramro);
ret = nouveau_gpuobj_new(parent, NULL, 0x08000, 0, 0, &priv->ramro);
if (ret)
return ret;
/* 0x20000-0x21000: reserve for RAMFC
* 0x21000-0x40000: padding + some unknown stuff (see below)
*
* It appears something is controlled by 0x2220/0x2230 on certain
* NV4x chipsets as well as RAMFC. When 0x2230 == 0 ("new style"
* control) the upper 16-bits of 0x2220 points at this other
* mysterious table that's clobbering important things.
*
* We're now pointing this at RAMIN+0x30000 to avoid RAMFC getting
* smashed to pieces on us, so reserve 0x30000-0x40000 too..
* 0x21000-0x40000: padding and some unknown crap
*/
ret = nouveau_gpuobj_new(dev, NULL, 0x20000, 0, NVOBJ_FLAG_ZERO_ALLOC,
&priv->ramfc);
if (ret)
return ret;
ret = nouveau_ramht_new(dev, priv->ramht, &dev_priv->ramht);
ret = nouveau_gpuobj_new(parent, NULL, 0x20000, 0,
NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc);
if (ret)
return ret;
priv->created = true;
return 0;
}
void
nv40_instmem_takedown(struct drm_device *dev)
static u32
nv40_instmem_rd32(struct nouveau_object *object, u32 addr)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv04_instmem_priv *priv = dev_priv->engine.instmem.priv;
nouveau_ramht_ref(NULL, &dev_priv->ramht, NULL);
nouveau_gpuobj_ref(NULL, &priv->ramfc);
nouveau_gpuobj_ref(NULL, &priv->ramro);
nouveau_gpuobj_ref(NULL, &priv->ramht);
if (drm_mm_initialized(&dev_priv->ramin_heap))
drm_mm_takedown(&dev_priv->ramin_heap);
kfree(priv);
dev_priv->engine.instmem.priv = NULL;
struct nv04_instmem_priv *priv = (void *)object;
return ioread32_native(priv->iomem + addr);
}
int
nv40_instmem_suspend(struct drm_device *dev)
static void
nv40_instmem_wr32(struct nouveau_object *object, u32 addr, u32 data)
{
return 0;
struct nv04_instmem_priv *priv = (void *)object;
iowrite32_native(data, priv->iomem + addr);
}
void
nv40_instmem_resume(struct drm_device *dev)
{
}
int
nv40_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan,
u32 size, u32 align)
{
struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
struct drm_mm_node *ramin = NULL;
do {
if (drm_mm_pre_get(&dev_priv->ramin_heap))
return -ENOMEM;
spin_lock(&dev_priv->ramin_lock);
ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, align, 0);
if (ramin == NULL) {
spin_unlock(&dev_priv->ramin_lock);
return -ENOMEM;
}
ramin = drm_mm_get_block_atomic(ramin, size, align);
spin_unlock(&dev_priv->ramin_lock);
} while (ramin == NULL);
gpuobj->node = ramin;
gpuobj->vinst = ramin->start;
return 0;
}
void
nv40_instmem_put(struct nouveau_gpuobj *gpuobj)
{
struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
spin_lock(&dev_priv->ramin_lock);
drm_mm_put_block(gpuobj->node);
gpuobj->node = NULL;
spin_unlock(&dev_priv->ramin_lock);
}
int
nv40_instmem_map(struct nouveau_gpuobj *gpuobj)
{
gpuobj->pinst = gpuobj->vinst;
return 0;
}
void
nv40_instmem_unmap(struct nouveau_gpuobj *gpuobj)
{
}
void
nv40_instmem_flush(struct drm_device *dev)
{
}
struct nouveau_oclass
nv40_instmem_oclass = {
.handle = NV_SUBDEV(INSTMEM, 0x40),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv40_instmem_ctor,
.dtor = nv04_instmem_dtor,
.init = _nouveau_instmem_init,
.fini = _nouveau_instmem_fini,
.rd32 = nv40_instmem_rd32,
.wr32 = nv40_instmem_wr32,
},
};
/*
* Copyright 2010 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "drmP.h"
#include "nouveau_drv.h"
#include <subdev/vm.h>
struct nvc0_instmem_priv {
struct nouveau_gpuobj *bar1_pgd;
struct nouveau_channel *bar1;
struct nouveau_gpuobj *bar3_pgd;
struct nouveau_channel *bar3;
};
int
nvc0_instmem_suspend(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
dev_priv->ramin_available = false;
return 0;
}
void
nvc0_instmem_resume(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nvc0_instmem_priv *priv = dev_priv->engine.instmem.priv;
nv_mask(dev, 0x100c80, 0x00000001, 0x00000000);
nv_wr32(dev, 0x001704, 0x80000000 | priv->bar1->ramin->vinst >> 12);
nv_wr32(dev, 0x001714, 0xc0000000 | priv->bar3->ramin->vinst >> 12);
dev_priv->ramin_available = true;
}
static void
nvc0_channel_del(struct nouveau_channel **pchan)
{
struct nouveau_channel *chan;
chan = *pchan;
*pchan = NULL;
if (!chan)
return;
nouveau_vm_ref(NULL, &chan->vm, NULL);
if (drm_mm_initialized(&chan->ramin_heap))
drm_mm_takedown(&chan->ramin_heap);
nouveau_gpuobj_ref(NULL, &chan->ramin);
kfree(chan);
}
static int
nvc0_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm,
struct nouveau_channel **pchan,
struct nouveau_gpuobj *pgd, u64 vm_size)
{
struct nouveau_channel *chan;
int ret;
chan = kzalloc(sizeof(*chan), GFP_KERNEL);
if (!chan)
return -ENOMEM;
chan->dev = dev;
ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
if (ret) {
nvc0_channel_del(&chan);
return ret;
}
ret = drm_mm_init(&chan->ramin_heap, 0x1000, size - 0x1000);
if (ret) {
nvc0_channel_del(&chan);
return ret;
}
ret = nouveau_vm_ref(vm, &chan->vm, NULL);
if (ret) {
nvc0_channel_del(&chan);
return ret;
}
nv_wo32(chan->ramin, 0x0200, lower_32_bits(pgd->vinst));
nv_wo32(chan->ramin, 0x0204, upper_32_bits(pgd->vinst));
nv_wo32(chan->ramin, 0x0208, lower_32_bits(vm_size - 1));
nv_wo32(chan->ramin, 0x020c, upper_32_bits(vm_size - 1));
*pchan = chan;
return 0;
}
int
nvc0_instmem_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
struct pci_dev *pdev = dev->pdev;
struct nvc0_instmem_priv *priv;
struct nouveau_vm *vm = NULL;
int ret;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
pinstmem->priv = priv;
/* BAR3 VM */
ret = nouveau_vm_new(dev, 0, pci_resource_len(pdev, 3), 0,
&dev_priv->bar3_vm);
if (ret)
goto error;
ret = nouveau_gpuobj_new(dev, NULL,
(pci_resource_len(pdev, 3) >> 12) * 8, 0,
NVOBJ_FLAG_DONT_MAP |
NVOBJ_FLAG_ZERO_ALLOC,
&dev_priv->bar3_vm->pgt[0].obj[0]);
if (ret)
goto error;
dev_priv->bar3_vm->pgt[0].refcount[0] = 1;
nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj[0]);
ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096,
NVOBJ_FLAG_ZERO_ALLOC, &priv->bar3_pgd);
if (ret)
goto error;
ret = nouveau_vm_ref(dev_priv->bar3_vm, &vm, priv->bar3_pgd);
if (ret)
goto error;
nouveau_vm_ref(NULL, &vm, NULL);
ret = nvc0_channel_new(dev, 8192, dev_priv->bar3_vm, &priv->bar3,
priv->bar3_pgd, pci_resource_len(dev->pdev, 3));
if (ret)
goto error;
/* BAR1 VM */
ret = nouveau_vm_new(dev, 0, pci_resource_len(pdev, 1), 0, &vm);
if (ret)
goto error;
ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096,
NVOBJ_FLAG_ZERO_ALLOC, &priv->bar1_pgd);
if (ret)
goto error;
ret = nouveau_vm_ref(vm, &dev_priv->bar1_vm, priv->bar1_pgd);
if (ret)
goto error;
nouveau_vm_ref(NULL, &vm, NULL);
ret = nvc0_channel_new(dev, 8192, dev_priv->bar1_vm, &priv->bar1,
priv->bar1_pgd, pci_resource_len(dev->pdev, 1));
if (ret)
goto error;
/* channel vm */
ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL,
&dev_priv->chan_vm);
if (ret)
goto error;
nvc0_instmem_resume(dev);
return 0;
error:
nvc0_instmem_takedown(dev);
return ret;
}
void
nvc0_instmem_takedown(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nvc0_instmem_priv *priv = dev_priv->engine.instmem.priv;
struct nouveau_vm *vm = NULL;
nvc0_instmem_suspend(dev);
nv_wr32(dev, 0x1704, 0x00000000);
nv_wr32(dev, 0x1714, 0x00000000);
nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL);
nvc0_channel_del(&priv->bar1);
nouveau_vm_ref(NULL, &dev_priv->bar1_vm, priv->bar1_pgd);
nouveau_gpuobj_ref(NULL, &priv->bar1_pgd);
nvc0_channel_del(&priv->bar3);
nouveau_vm_ref(dev_priv->bar3_vm, &vm, NULL);
nouveau_vm_ref(NULL, &vm, priv->bar3_pgd);
nouveau_gpuobj_ref(NULL, &priv->bar3_pgd);
nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]);
nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL);
dev_priv->engine.instmem.priv = NULL;
kfree(priv);
}
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <core/gpuobj.h>
#include "nv04.h"
#define NV04_PDMA_SIZE (128 * 1024 * 1024)
#define NV04_PDMA_PAGE ( 4 * 1024)
/*******************************************************************************
* VM map/unmap callbacks
******************************************************************************/
static void
nv04_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
{
pte = 0x00008 + (pte * 4);
while (cnt) {
u32 page = PAGE_SIZE / NV04_PDMA_PAGE;
u32 phys = (u32)*list++;
while (cnt && page--) {
nv_wo32(pgt, pte, phys | 3);
phys += NV04_PDMA_PAGE;
pte += 4;
cnt -= 1;
}
}
}
static void
nv04_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
{
pte = 0x00008 + (pte * 4);
while (cnt--) {
nv_wo32(pgt, pte, 0x00000000);
pte += 4;
}
}
static void
nv04_vm_flush(struct nouveau_vm *vm)
{
}
/*******************************************************************************
* VM object
******************************************************************************/
int
nv04_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length, u64 mmstart,
struct nouveau_vm **pvm)
{
return -EINVAL;
}
/*******************************************************************************
* VMMGR subdev
******************************************************************************/
static int
nv04_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv04_vmmgr_priv *priv;
struct nouveau_gpuobj *dma;
int ret;
ret = nouveau_vmmgr_create(parent, engine, oclass, "PCIGART",
"pcigart", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
priv->base.create = nv04_vm_create;
priv->base.pgt_bits = 32 - 12;
priv->base.spg_shift = 12;
priv->base.lpg_shift = 12;
priv->base.map_sg = nv04_vm_map_sg;
priv->base.unmap = nv04_vm_unmap;
priv->base.flush = nv04_vm_flush;
ret = nouveau_vm_create(&priv->base, 0, NV04_PDMA_SIZE, 0, 4096,
&priv->vm);
if (ret)
return ret;
ret = nouveau_gpuobj_new(parent, NULL,
(NV04_PDMA_SIZE / NV04_PDMA_PAGE) * 4 +
8, 16, NVOBJ_FLAG_ZERO_ALLOC,
&priv->vm->pgt[0].obj[0]);
dma = priv->vm->pgt[0].obj[0];
priv->vm->pgt[0].refcount[0] = 1;
if (ret)
return ret;
nv_wo32(dma, 0x00000, 0x0002103d); /* PCI, RW, PT, !LN */
nv_wo32(dma, 0x00004, NV04_PDMA_SIZE - 1);
return 0;
}
void
nv04_vmmgr_dtor(struct nouveau_object *object)
{
struct nv04_vmmgr_priv *priv = (void *)object;
if (priv->vm) {
nouveau_gpuobj_ref(NULL, &priv->vm->pgt[0].obj[0]);
nouveau_vm_ref(NULL, &priv->vm, NULL);
}
if (priv->page) {
pci_unmap_page(nv_device(priv)->pdev, priv->null,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
__free_page(priv->page);
}
nouveau_vmmgr_destroy(&priv->base);
}
struct nouveau_oclass
nv04_vmmgr_oclass = {
.handle = NV_SUBDEV(VM, 0x04),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv04_vmmgr_ctor,
.dtor = nv04_vmmgr_dtor,
.init = _nouveau_vmmgr_init,
.fini = _nouveau_vmmgr_fini,
},
};
#ifndef __NV04_VMMGR_PRIV__
#define __NV04_VMMGR_PRIV__
#include <subdev/vm.h>
struct nv04_vmmgr_priv {
struct nouveau_vmmgr base;
struct nouveau_vm *vm;
struct page *page;
dma_addr_t null;
};
#endif
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <core/gpuobj.h>
#include <subdev/timer.h>
#include <subdev/vm.h>
#include "nv04.h"
#define NV41_GART_SIZE (512 * 1024 * 1024)
#define NV41_GART_PAGE ( 4 * 1024)
/*******************************************************************************
* VM map/unmap callbacks
******************************************************************************/
static void
nv41_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
{
pte = pte * 4;
while (cnt) {
u32 page = PAGE_SIZE / NV41_GART_PAGE;
u64 phys = (u64)*list++;
while (cnt && page--) {
nv_wo32(pgt, pte, (phys >> 7) | 1);
phys += NV41_GART_PAGE;
pte += 4;
cnt -= 1;
}
}
}
static void
nv41_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
{
pte = pte * 4;
while (cnt--) {
nv_wo32(pgt, pte, 0x00000000);
pte += 4;
}
}
static void
nv41_vm_flush(struct nouveau_vm *vm)
{
struct nv04_vm_priv *priv = (void *)vm->vmm;
mutex_lock(&nv_subdev(priv)->mutex);
nv_wr32(priv, 0x100810, 0x00000022);
if (!nv_wait(priv, 0x100810, 0x00000100, 0x00000100)) {
nv_warn(priv, "flush timeout, 0x%08x\n",
nv_rd32(priv, 0x100810));
}
nv_wr32(priv, 0x100810, 0x00000000);
mutex_unlock(&nv_subdev(priv)->mutex);
}
/*******************************************************************************
* VMMGR subdev
******************************************************************************/
static int
nv41_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv04_vmmgr_priv *priv;
int ret;
ret = nouveau_vmmgr_create(parent, engine, oclass, "PCIEGART",
"pciegart", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
priv->base.create = nv04_vm_create;
priv->base.pgt_bits = 32 - 12;
priv->base.spg_shift = 12;
priv->base.lpg_shift = 12;
priv->base.map_sg = nv41_vm_map_sg;
priv->base.unmap = nv41_vm_unmap;
priv->base.flush = nv41_vm_flush;
ret = nouveau_vm_create(&priv->base, 0, NV41_GART_SIZE, 0, 4096,
&priv->vm);
if (ret)
return ret;
ret = nouveau_gpuobj_new(parent, NULL,
(NV41_GART_SIZE / NV41_GART_PAGE) * 4,
16, NVOBJ_FLAG_ZERO_ALLOC,
&priv->vm->pgt[0].obj[0]);
priv->vm->pgt[0].refcount[0] = 1;
if (ret)
return ret;
return 0;
}
static int
nv41_vmmgr_init(struct nouveau_object *object)
{
struct nv04_vmmgr_priv *priv = (void *)object;
struct nouveau_gpuobj *dma = priv->vm->pgt[0].obj[0];
int ret;
ret = nouveau_vmmgr_init(&priv->base);
if (ret)
return ret;
nv_wr32(priv, 0x100800, dma->addr | 0x00000002);
nv_mask(priv, 0x10008c, 0x00000100, 0x00000100);
nv_wr32(priv, 0x100820, 0x00000000);
return 0;
}
struct nouveau_oclass
nv41_vmmgr_oclass = {
.handle = NV_SUBDEV(VM, 0x41),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv41_vmmgr_ctor,
.dtor = nv04_vmmgr_dtor,
.init = nv41_vmmgr_init,
.fini = _nouveau_vmmgr_fini,
},
};
This diff is collapsed.
......@@ -22,11 +22,18 @@
* Authors: Ben Skeggs
*/
#include "drmP.h"
#include <core/device.h>
#include <core/gpuobj.h>
#include "nouveau_drv.h"
#include <subdev/timer.h>
#include <subdev/fb.h>
#include <subdev/vm.h>
struct nv50_vmmgr_priv {
struct nouveau_vmmgr base;
spinlock_t lock;
};
void
nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
struct nouveau_gpuobj *pgt[2])
......@@ -35,11 +42,11 @@ nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
u32 coverage = 0;
if (pgt[0]) {
phys = 0x00000003 | pgt[0]->vinst; /* present, 4KiB pages */
phys = 0x00000003 | pgt[0]->addr; /* present, 4KiB pages */
coverage = (pgt[0]->size >> 3) << 12;
} else
if (pgt[1]) {
phys = 0x00000001 | pgt[1]->vinst; /* present */
phys = 0x00000001 | pgt[1]->addr; /* present */
coverage = (pgt[1]->size >> 3) << 16;
}
......@@ -73,15 +80,14 @@ void
nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
{
struct drm_nouveau_private *dev_priv = vma->vm->dev->dev_private;
u32 comp = (mem->memtype & 0x180) >> 7;
u32 block, target;
int i;
/* IGPs don't have real VRAM, re-target to stolen system memory */
target = 0;
if (nvfb_vram_sys_base(dev_priv->dev)) {
phys += nvfb_vram_sys_base(dev_priv->dev);
if (nouveau_fb(vma->vm->vmm)->ram.stolen) {
phys += nouveau_fb(vma->vm->vmm)->ram.stolen;
target = 3;
}
......@@ -145,33 +151,81 @@ nv50_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
void
nv50_vm_flush(struct nouveau_vm *vm)
{
struct drm_nouveau_private *dev_priv = vm->dev->dev_private;
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
struct nouveau_engine *engine;
int i;
pinstmem->flush(vm->dev);
/* BAR */
if (vm == dev_priv->bar1_vm || vm == dev_priv->bar3_vm) {
nv50_vm_flush_engine(vm->dev, 6);
return;
#if 0
for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
if (atomic_read(&vm->engref[i])) {
engine = nouveau_engine(vm->vmm, i);
if (engine && engine->tlb_flush)
engine->tlb_flush(engine);
}
for (i = 0; i < NVOBJ_ENGINE_NR; i++) {
if (atomic_read(&vm->engref[i]))
dev_priv->eng[i]->tlb_flush(vm->dev, i);
}
#else
nv50_vm_flush_engine(nv_subdev(vm->vmm), 0x06); /* bar */
nv50_vm_flush_engine(nv_subdev(vm->vmm), 0x05); /* fifo */
nv50_vm_flush_engine(nv_subdev(vm->vmm), 0x00); /* gr */
#endif
}
void
nv50_vm_flush_engine(struct drm_device *dev, int engine)
nv50_vm_flush_engine(struct nouveau_subdev *subdev, int engine)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_vmmgr_priv *priv = (void *)nouveau_vmmgr(subdev);
unsigned long flags;
spin_lock_irqsave(&dev_priv->vm_lock, flags);
nv_wr32(dev, 0x100c80, (engine << 16) | 1);
if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
spin_lock_irqsave(&priv->lock, flags);
nv_wr32(subdev, 0x100c80, (engine << 16) | 1);
if (!nv_wait(subdev, 0x100c80, 0x00000001, 0x00000000))
nv_error(subdev, "vm flush timeout: engine %d\n", engine);
spin_unlock_irqrestore(&priv->lock, flags);
}
static int
nv50_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
u64 mm_offset, struct nouveau_vm **pvm)
{
u32 block = (1 << (vmm->pgt_bits + 12));
if (block > length)
block = length;
return nouveau_vm_create(vmm, offset, length, mm_offset, block, pvm);
}
static int
nv50_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv50_vmmgr_priv *priv;
int ret;
ret = nouveau_vmmgr_create(parent, engine, oclass, "VM", "vm", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
priv->base.pgt_bits = 29 - 12;
priv->base.spg_shift = 12;
priv->base.lpg_shift = 16;
priv->base.create = nv50_vm_create;
priv->base.map_pgt = nv50_vm_map_pgt;
priv->base.map = nv50_vm_map;
priv->base.map_sg = nv50_vm_map_sg;
priv->base.unmap = nv50_vm_unmap;
priv->base.flush = nv50_vm_flush;
spin_lock_init(&priv->lock);
return 0;
}
struct nouveau_oclass
nv50_vmmgr_oclass = {
.handle = NV_SUBDEV(VM, 0x50),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv50_vmmgr_ctor,
.dtor = _nouveau_vmmgr_dtor,
.init = _nouveau_vmmgr_init,
.fini = _nouveau_vmmgr_fini,
},
};
This diff is collapsed.
......@@ -145,6 +145,9 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
/* allocate hw channel id */
spin_lock_irqsave(&dev_priv->channels.lock, flags);
for (chan->id = 0; chan->id < pfifo->channels; chan->id++) {
if ( dev_priv->card_type == NV_50 && chan->id == 0)
continue;
if (!dev_priv->channels.ptr[chan->id]) {
nouveau_channel_ref(chan, &dev_priv->channels.ptr[chan->id]);
break;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -96,16 +96,6 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
drm_mm_takedown(&chan->notifier_heap);
}
static void
nouveau_notifier_gpuobj_dtor(struct drm_device *dev,
struct nouveau_gpuobj *gpuobj)
{
NV_DEBUG(dev, "\n");
if (gpuobj->priv)
drm_mm_put_block(gpuobj->priv);
}
int
nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
int size, uint32_t start, uint32_t end,
......@@ -147,8 +137,6 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
NV_ERROR(dev, "Error creating notifier ctxdma: %d\n", ret);
return ret;
}
nobj->dtor = nouveau_notifier_gpuobj_dtor;
nobj->priv = mem;
ret = nouveau_ramht_insert(chan, handle, nobj);
nouveau_gpuobj_ref(NULL, &nobj);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment