Commit be761d5e authored by Dave Airlie's avatar Dave Airlie

Merge remote branch 'nouveau/drm-nouveau-fixes' of /ssd/git/drm-nouveau-next into drm-fixes

* 'nouveau/drm-nouveau-fixes' of /ssd/git/drm-nouveau-next:
  drm/nouveau: fix allocation of notifier object
  drm/nouveau: fix notifier memory corruption bug
  drm/nouveau: fix pinning of notifier block
  drm/nouveau: populate ttm_alloced with false, when it's not
  drm/nouveau: fix nv30 pcie boards
  drm/nouveau: split ramin_lock into two locks, one hardirq safe
parents 12dfc843 e4ac93bf
...@@ -83,7 +83,7 @@ nouveau_dma_init(struct nouveau_channel *chan) ...@@ -83,7 +83,7 @@ nouveau_dma_init(struct nouveau_channel *chan)
return ret; return ret;
/* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */ /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */
ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfd0, 0x1000, ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
&chan->m2mf_ntfy); &chan->m2mf_ntfy);
if (ret) if (ret)
return ret; return ret;
......
...@@ -682,6 +682,9 @@ struct drm_nouveau_private { ...@@ -682,6 +682,9 @@ struct drm_nouveau_private {
/* For PFIFO and PGRAPH. */ /* For PFIFO and PGRAPH. */
spinlock_t context_switch_lock; spinlock_t context_switch_lock;
/* VM/PRAMIN flush, legacy PRAMIN aperture */
spinlock_t vm_lock;
/* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */ /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */
struct nouveau_ramht *ramht; struct nouveau_ramht *ramht;
struct nouveau_gpuobj *ramfc; struct nouveau_gpuobj *ramfc;
......
...@@ -181,13 +181,13 @@ nouveau_fbcon_sync(struct fb_info *info) ...@@ -181,13 +181,13 @@ nouveau_fbcon_sync(struct fb_info *info)
OUT_RING (chan, 0); OUT_RING (chan, 0);
} }
nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy + 3, 0xffffffff); nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3, 0xffffffff);
FIRE_RING(chan); FIRE_RING(chan);
mutex_unlock(&chan->mutex); mutex_unlock(&chan->mutex);
ret = -EBUSY; ret = -EBUSY;
for (i = 0; i < 100000; i++) { for (i = 0; i < 100000; i++) {
if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy + 3)) { if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3)) {
ret = 0; ret = 0;
break; break;
} }
......
...@@ -398,7 +398,7 @@ nouveau_mem_vram_init(struct drm_device *dev) ...@@ -398,7 +398,7 @@ nouveau_mem_vram_init(struct drm_device *dev)
dma_bits = 40; dma_bits = 40;
} else } else
if (drm_pci_device_is_pcie(dev) && if (drm_pci_device_is_pcie(dev) &&
dev_priv->chipset != 0x40 && dev_priv->chipset > 0x40 &&
dev_priv->chipset != 0x45) { dev_priv->chipset != 0x45) {
if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39))) if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39)))
dma_bits = 39; dma_bits = 39;
......
...@@ -35,19 +35,22 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan) ...@@ -35,19 +35,22 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
{ {
struct drm_device *dev = chan->dev; struct drm_device *dev = chan->dev;
struct nouveau_bo *ntfy = NULL; struct nouveau_bo *ntfy = NULL;
uint32_t flags; uint32_t flags, ttmpl;
int ret; int ret;
if (nouveau_vram_notify) if (nouveau_vram_notify) {
flags = NOUVEAU_GEM_DOMAIN_VRAM; flags = NOUVEAU_GEM_DOMAIN_VRAM;
else ttmpl = TTM_PL_FLAG_VRAM;
} else {
flags = NOUVEAU_GEM_DOMAIN_GART; flags = NOUVEAU_GEM_DOMAIN_GART;
ttmpl = TTM_PL_FLAG_TT;
}
ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags, 0, 0, &ntfy); ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags, 0, 0, &ntfy);
if (ret) if (ret)
return ret; return ret;
ret = nouveau_bo_pin(ntfy, flags); ret = nouveau_bo_pin(ntfy, ttmpl);
if (ret) if (ret)
goto out_err; goto out_err;
......
...@@ -1039,19 +1039,20 @@ nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset) ...@@ -1039,19 +1039,20 @@ nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset)
{ {
struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
struct drm_device *dev = gpuobj->dev; struct drm_device *dev = gpuobj->dev;
unsigned long flags;
if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) { if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
u64 ptr = gpuobj->vinst + offset; u64 ptr = gpuobj->vinst + offset;
u32 base = ptr >> 16; u32 base = ptr >> 16;
u32 val; u32 val;
spin_lock(&dev_priv->ramin_lock); spin_lock_irqsave(&dev_priv->vm_lock, flags);
if (dev_priv->ramin_base != base) { if (dev_priv->ramin_base != base) {
dev_priv->ramin_base = base; dev_priv->ramin_base = base;
nv_wr32(dev, 0x001700, dev_priv->ramin_base); nv_wr32(dev, 0x001700, dev_priv->ramin_base);
} }
val = nv_rd32(dev, 0x700000 + (ptr & 0xffff)); val = nv_rd32(dev, 0x700000 + (ptr & 0xffff));
spin_unlock(&dev_priv->ramin_lock); spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
return val; return val;
} }
...@@ -1063,18 +1064,19 @@ nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val) ...@@ -1063,18 +1064,19 @@ nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val)
{ {
struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
struct drm_device *dev = gpuobj->dev; struct drm_device *dev = gpuobj->dev;
unsigned long flags;
if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) { if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
u64 ptr = gpuobj->vinst + offset; u64 ptr = gpuobj->vinst + offset;
u32 base = ptr >> 16; u32 base = ptr >> 16;
spin_lock(&dev_priv->ramin_lock); spin_lock_irqsave(&dev_priv->vm_lock, flags);
if (dev_priv->ramin_base != base) { if (dev_priv->ramin_base != base) {
dev_priv->ramin_base = base; dev_priv->ramin_base = base;
nv_wr32(dev, 0x001700, dev_priv->ramin_base); nv_wr32(dev, 0x001700, dev_priv->ramin_base);
} }
nv_wr32(dev, 0x700000 + (ptr & 0xffff), val); nv_wr32(dev, 0x700000 + (ptr & 0xffff), val);
spin_unlock(&dev_priv->ramin_lock); spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
return; return;
} }
......
...@@ -55,6 +55,7 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages, ...@@ -55,6 +55,7 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
be->func->clear(be); be->func->clear(be);
return -EFAULT; return -EFAULT;
} }
nvbe->ttm_alloced[nvbe->nr_pages] = false;
} }
nvbe->nr_pages++; nvbe->nr_pages++;
...@@ -427,7 +428,7 @@ nouveau_sgdma_init(struct drm_device *dev) ...@@ -427,7 +428,7 @@ nouveau_sgdma_init(struct drm_device *dev)
u32 aper_size, align; u32 aper_size, align;
int ret; int ret;
if (dev_priv->card_type >= NV_50 || drm_pci_device_is_pcie(dev)) if (dev_priv->card_type >= NV_40 && drm_pci_device_is_pcie(dev))
aper_size = 512 * 1024 * 1024; aper_size = 512 * 1024 * 1024;
else else
aper_size = 64 * 1024 * 1024; aper_size = 64 * 1024 * 1024;
...@@ -457,7 +458,7 @@ nouveau_sgdma_init(struct drm_device *dev) ...@@ -457,7 +458,7 @@ nouveau_sgdma_init(struct drm_device *dev)
dev_priv->gart_info.func = &nv50_sgdma_backend; dev_priv->gart_info.func = &nv50_sgdma_backend;
} else } else
if (drm_pci_device_is_pcie(dev) && if (drm_pci_device_is_pcie(dev) &&
dev_priv->chipset != 0x40 && dev_priv->chipset != 0x45) { dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) {
if (nv44_graph_class(dev)) { if (nv44_graph_class(dev)) {
dev_priv->gart_info.func = &nv44_sgdma_backend; dev_priv->gart_info.func = &nv44_sgdma_backend;
align = 512 * 1024; align = 512 * 1024;
......
...@@ -608,6 +608,7 @@ nouveau_card_init(struct drm_device *dev) ...@@ -608,6 +608,7 @@ nouveau_card_init(struct drm_device *dev)
spin_lock_init(&dev_priv->channels.lock); spin_lock_init(&dev_priv->channels.lock);
spin_lock_init(&dev_priv->tile.lock); spin_lock_init(&dev_priv->tile.lock);
spin_lock_init(&dev_priv->context_switch_lock); spin_lock_init(&dev_priv->context_switch_lock);
spin_lock_init(&dev_priv->vm_lock);
/* Make the CRTCs and I2C buses accessible */ /* Make the CRTCs and I2C buses accessible */
ret = engine->display.early_init(dev); ret = engine->display.early_init(dev);
......
...@@ -404,23 +404,25 @@ void ...@@ -404,23 +404,25 @@ void
nv50_instmem_flush(struct drm_device *dev) nv50_instmem_flush(struct drm_device *dev)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
unsigned long flags;
spin_lock(&dev_priv->ramin_lock); spin_lock_irqsave(&dev_priv->vm_lock, flags);
nv_wr32(dev, 0x00330c, 0x00000001); nv_wr32(dev, 0x00330c, 0x00000001);
if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000)) if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000))
NV_ERROR(dev, "PRAMIN flush timeout\n"); NV_ERROR(dev, "PRAMIN flush timeout\n");
spin_unlock(&dev_priv->ramin_lock); spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
} }
void void
nv84_instmem_flush(struct drm_device *dev) nv84_instmem_flush(struct drm_device *dev)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
unsigned long flags;
spin_lock(&dev_priv->ramin_lock); spin_lock_irqsave(&dev_priv->vm_lock, flags);
nv_wr32(dev, 0x070000, 0x00000001); nv_wr32(dev, 0x070000, 0x00000001);
if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000)) if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
NV_ERROR(dev, "PRAMIN flush timeout\n"); NV_ERROR(dev, "PRAMIN flush timeout\n");
spin_unlock(&dev_priv->ramin_lock); spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
} }
...@@ -174,10 +174,11 @@ void ...@@ -174,10 +174,11 @@ void
nv50_vm_flush_engine(struct drm_device *dev, int engine) nv50_vm_flush_engine(struct drm_device *dev, int engine)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
unsigned long flags;
spin_lock(&dev_priv->ramin_lock); spin_lock_irqsave(&dev_priv->vm_lock, flags);
nv_wr32(dev, 0x100c80, (engine << 16) | 1); nv_wr32(dev, 0x100c80, (engine << 16) | 1);
if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000)) if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
spin_unlock(&dev_priv->ramin_lock); spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
} }
...@@ -104,11 +104,12 @@ nvc0_vm_flush(struct nouveau_vm *vm) ...@@ -104,11 +104,12 @@ nvc0_vm_flush(struct nouveau_vm *vm)
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
struct drm_device *dev = vm->dev; struct drm_device *dev = vm->dev;
struct nouveau_vm_pgd *vpgd; struct nouveau_vm_pgd *vpgd;
unsigned long flags;
u32 engine = (dev_priv->chan_vm == vm) ? 1 : 5; u32 engine = (dev_priv->chan_vm == vm) ? 1 : 5;
pinstmem->flush(vm->dev); pinstmem->flush(vm->dev);
spin_lock(&dev_priv->ramin_lock); spin_lock_irqsave(&dev_priv->vm_lock, flags);
list_for_each_entry(vpgd, &vm->pgd_list, head) { list_for_each_entry(vpgd, &vm->pgd_list, head) {
/* looks like maybe a "free flush slots" counter, the /* looks like maybe a "free flush slots" counter, the
* faster you write to 0x100cbc to more it decreases * faster you write to 0x100cbc to more it decreases
...@@ -125,5 +126,5 @@ nvc0_vm_flush(struct nouveau_vm *vm) ...@@ -125,5 +126,5 @@ nvc0_vm_flush(struct nouveau_vm *vm)
nv_rd32(dev, 0x100c80), engine); nv_rd32(dev, 0x100c80), engine);
} }
} }
spin_unlock(&dev_priv->ramin_lock); spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment