Commit 46d967ae authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "Radeon and Nouveau fixes:

  So nouveau had a few regression introduced, Ben and Maarten finally
  tracked down the one that was causing problems on my MacBookPro, also
  nvidia gave some info on the an engine we were using incorrectly, so
  disable our use of it, and one regresion with pci hotplug affecting
  optimus users.

  Radeon has an oops fixs, sync fix, and one workaround to avoid broken
  functionality on 32-bit x86, this needs better root causing and a
  better fix, but the bandaid is a lot safer at this point"

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux:
  drm/radeon: kernel panic in drm_calc_vbltimestamp_from_scanoutpos with 3.18.0-rc6
  drm/radeon: Ignore RADEON_GEM_GTT_WC on 32-bit x86
  drm/radeon: sync all BOs involved in a CS v2
  nouveau: move the hotplug ignore to correct place.
  drm/nouveau/gf116: remove copy1 engine
  drm/nouveau: prevent stale fence->channel pointers, and protect with rcu
  drm/nouveau/fifo/g84-: ack non-stall interrupt before handling it
parents 9044f940 00d6a9b6
...@@ -218,7 +218,6 @@ nvc0_identify(struct nouveau_device *device) ...@@ -218,7 +218,6 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass; device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass; device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
break; break;
......
...@@ -551,8 +551,8 @@ nv04_fifo_intr(struct nouveau_subdev *subdev) ...@@ -551,8 +551,8 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
} }
if (status & 0x40000000) { if (status & 0x40000000) {
nouveau_fifo_uevent(&priv->base);
nv_wr32(priv, 0x002100, 0x40000000); nv_wr32(priv, 0x002100, 0x40000000);
nouveau_fifo_uevent(&priv->base);
status &= ~0x40000000; status &= ~0x40000000;
} }
} }
......
...@@ -740,6 +740,8 @@ nvc0_fifo_intr_engine_unit(struct nvc0_fifo_priv *priv, int engn) ...@@ -740,6 +740,8 @@ nvc0_fifo_intr_engine_unit(struct nvc0_fifo_priv *priv, int engn)
u32 inte = nv_rd32(priv, 0x002628); u32 inte = nv_rd32(priv, 0x002628);
u32 unkn; u32 unkn;
nv_wr32(priv, 0x0025a8 + (engn * 0x04), intr);
for (unkn = 0; unkn < 8; unkn++) { for (unkn = 0; unkn < 8; unkn++) {
u32 ints = (intr >> (unkn * 0x04)) & inte; u32 ints = (intr >> (unkn * 0x04)) & inte;
if (ints & 0x1) { if (ints & 0x1) {
...@@ -751,8 +753,6 @@ nvc0_fifo_intr_engine_unit(struct nvc0_fifo_priv *priv, int engn) ...@@ -751,8 +753,6 @@ nvc0_fifo_intr_engine_unit(struct nvc0_fifo_priv *priv, int engn)
nv_mask(priv, 0x002628, ints, 0); nv_mask(priv, 0x002628, ints, 0);
} }
} }
nv_wr32(priv, 0x0025a8 + (engn * 0x04), intr);
} }
static void static void
......
...@@ -952,8 +952,8 @@ nve0_fifo_intr(struct nouveau_subdev *subdev) ...@@ -952,8 +952,8 @@ nve0_fifo_intr(struct nouveau_subdev *subdev)
} }
if (stat & 0x80000000) { if (stat & 0x80000000) {
nve0_fifo_intr_engine(priv);
nv_wr32(priv, 0x002100, 0x80000000); nv_wr32(priv, 0x002100, 0x80000000);
nve0_fifo_intr_engine(priv);
stat &= ~0x80000000; stat &= ~0x80000000;
} }
......
...@@ -629,7 +629,6 @@ int nouveau_pmops_suspend(struct device *dev) ...@@ -629,7 +629,6 @@ int nouveau_pmops_suspend(struct device *dev)
pci_save_state(pdev); pci_save_state(pdev);
pci_disable_device(pdev); pci_disable_device(pdev);
pci_ignore_hotplug(pdev);
pci_set_power_state(pdev, PCI_D3hot); pci_set_power_state(pdev, PCI_D3hot);
return 0; return 0;
} }
...@@ -933,6 +932,7 @@ static int nouveau_pmops_runtime_suspend(struct device *dev) ...@@ -933,6 +932,7 @@ static int nouveau_pmops_runtime_suspend(struct device *dev)
ret = nouveau_do_suspend(drm_dev, true); ret = nouveau_do_suspend(drm_dev, true);
pci_save_state(pdev); pci_save_state(pdev);
pci_disable_device(pdev); pci_disable_device(pdev);
pci_ignore_hotplug(pdev);
pci_set_power_state(pdev, PCI_D3cold); pci_set_power_state(pdev, PCI_D3cold);
drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF; drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
return ret; return ret;
......
...@@ -52,20 +52,24 @@ nouveau_fctx(struct nouveau_fence *fence) ...@@ -52,20 +52,24 @@ nouveau_fctx(struct nouveau_fence *fence)
return container_of(fence->base.lock, struct nouveau_fence_chan, lock); return container_of(fence->base.lock, struct nouveau_fence_chan, lock);
} }
static void static int
nouveau_fence_signal(struct nouveau_fence *fence) nouveau_fence_signal(struct nouveau_fence *fence)
{ {
int drop = 0;
fence_signal_locked(&fence->base); fence_signal_locked(&fence->base);
list_del(&fence->head); list_del(&fence->head);
rcu_assign_pointer(fence->channel, NULL);
if (test_bit(FENCE_FLAG_USER_BITS, &fence->base.flags)) { if (test_bit(FENCE_FLAG_USER_BITS, &fence->base.flags)) {
struct nouveau_fence_chan *fctx = nouveau_fctx(fence); struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
if (!--fctx->notify_ref) if (!--fctx->notify_ref)
nvif_notify_put(&fctx->notify); drop = 1;
} }
fence_put(&fence->base); fence_put(&fence->base);
return drop;
} }
static struct nouveau_fence * static struct nouveau_fence *
...@@ -88,16 +92,23 @@ nouveau_fence_context_del(struct nouveau_fence_chan *fctx) ...@@ -88,16 +92,23 @@ nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
{ {
struct nouveau_fence *fence; struct nouveau_fence *fence;
nvif_notify_fini(&fctx->notify);
spin_lock_irq(&fctx->lock); spin_lock_irq(&fctx->lock);
while (!list_empty(&fctx->pending)) { while (!list_empty(&fctx->pending)) {
fence = list_entry(fctx->pending.next, typeof(*fence), head); fence = list_entry(fctx->pending.next, typeof(*fence), head);
nouveau_fence_signal(fence); if (nouveau_fence_signal(fence))
fence->channel = NULL; nvif_notify_put(&fctx->notify);
} }
spin_unlock_irq(&fctx->lock); spin_unlock_irq(&fctx->lock);
nvif_notify_fini(&fctx->notify);
fctx->dead = 1;
/*
* Ensure that all accesses to fence->channel complete before freeing
* the channel.
*/
synchronize_rcu();
} }
static void static void
...@@ -112,21 +123,23 @@ nouveau_fence_context_free(struct nouveau_fence_chan *fctx) ...@@ -112,21 +123,23 @@ nouveau_fence_context_free(struct nouveau_fence_chan *fctx)
kref_put(&fctx->fence_ref, nouveau_fence_context_put); kref_put(&fctx->fence_ref, nouveau_fence_context_put);
} }
static void static int
nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx) nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
{ {
struct nouveau_fence *fence; struct nouveau_fence *fence;
int drop = 0;
u32 seq = fctx->read(chan); u32 seq = fctx->read(chan);
while (!list_empty(&fctx->pending)) { while (!list_empty(&fctx->pending)) {
fence = list_entry(fctx->pending.next, typeof(*fence), head); fence = list_entry(fctx->pending.next, typeof(*fence), head);
if ((int)(seq - fence->base.seqno) < 0) if ((int)(seq - fence->base.seqno) < 0)
return; break;
nouveau_fence_signal(fence); drop |= nouveau_fence_signal(fence);
} }
return drop;
} }
static int static int
...@@ -135,18 +148,21 @@ nouveau_fence_wait_uevent_handler(struct nvif_notify *notify) ...@@ -135,18 +148,21 @@ nouveau_fence_wait_uevent_handler(struct nvif_notify *notify)
struct nouveau_fence_chan *fctx = struct nouveau_fence_chan *fctx =
container_of(notify, typeof(*fctx), notify); container_of(notify, typeof(*fctx), notify);
unsigned long flags; unsigned long flags;
int ret = NVIF_NOTIFY_KEEP;
spin_lock_irqsave(&fctx->lock, flags); spin_lock_irqsave(&fctx->lock, flags);
if (!list_empty(&fctx->pending)) { if (!list_empty(&fctx->pending)) {
struct nouveau_fence *fence; struct nouveau_fence *fence;
struct nouveau_channel *chan;
fence = list_entry(fctx->pending.next, typeof(*fence), head); fence = list_entry(fctx->pending.next, typeof(*fence), head);
nouveau_fence_update(fence->channel, fctx); chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
if (nouveau_fence_update(fence->channel, fctx))
ret = NVIF_NOTIFY_DROP;
} }
spin_unlock_irqrestore(&fctx->lock, flags); spin_unlock_irqrestore(&fctx->lock, flags);
/* Always return keep here. NVIF refcount is handled with nouveau_fence_update */ return ret;
return NVIF_NOTIFY_KEEP;
} }
void void
...@@ -262,7 +278,10 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan) ...@@ -262,7 +278,10 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
if (!ret) { if (!ret) {
fence_get(&fence->base); fence_get(&fence->base);
spin_lock_irq(&fctx->lock); spin_lock_irq(&fctx->lock);
nouveau_fence_update(chan, fctx);
if (nouveau_fence_update(chan, fctx))
nvif_notify_put(&fctx->notify);
list_add_tail(&fence->head, &fctx->pending); list_add_tail(&fence->head, &fctx->pending);
spin_unlock_irq(&fctx->lock); spin_unlock_irq(&fctx->lock);
} }
...@@ -276,13 +295,16 @@ nouveau_fence_done(struct nouveau_fence *fence) ...@@ -276,13 +295,16 @@ nouveau_fence_done(struct nouveau_fence *fence)
if (fence->base.ops == &nouveau_fence_ops_legacy || if (fence->base.ops == &nouveau_fence_ops_legacy ||
fence->base.ops == &nouveau_fence_ops_uevent) { fence->base.ops == &nouveau_fence_ops_uevent) {
struct nouveau_fence_chan *fctx = nouveau_fctx(fence); struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
struct nouveau_channel *chan;
unsigned long flags; unsigned long flags;
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
return true; return true;
spin_lock_irqsave(&fctx->lock, flags); spin_lock_irqsave(&fctx->lock, flags);
nouveau_fence_update(fence->channel, fctx); chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
if (chan && nouveau_fence_update(chan, fctx))
nvif_notify_put(&fctx->notify);
spin_unlock_irqrestore(&fctx->lock, flags); spin_unlock_irqrestore(&fctx->lock, flags);
} }
return fence_is_signaled(&fence->base); return fence_is_signaled(&fence->base);
...@@ -387,12 +409,18 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e ...@@ -387,12 +409,18 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
if (fence && (!exclusive || !fobj || !fobj->shared_count)) { if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
struct nouveau_channel *prev = NULL; struct nouveau_channel *prev = NULL;
bool must_wait = true;
f = nouveau_local_fence(fence, chan->drm); f = nouveau_local_fence(fence, chan->drm);
if (f) if (f) {
prev = f->channel; rcu_read_lock();
prev = rcu_dereference(f->channel);
if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
must_wait = false;
rcu_read_unlock();
}
if (!prev || (prev != chan && (ret = fctx->sync(f, prev, chan)))) if (must_wait)
ret = fence_wait(fence, intr); ret = fence_wait(fence, intr);
return ret; return ret;
...@@ -403,19 +431,22 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e ...@@ -403,19 +431,22 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
for (i = 0; i < fobj->shared_count && !ret; ++i) { for (i = 0; i < fobj->shared_count && !ret; ++i) {
struct nouveau_channel *prev = NULL; struct nouveau_channel *prev = NULL;
bool must_wait = true;
fence = rcu_dereference_protected(fobj->shared[i], fence = rcu_dereference_protected(fobj->shared[i],
reservation_object_held(resv)); reservation_object_held(resv));
f = nouveau_local_fence(fence, chan->drm); f = nouveau_local_fence(fence, chan->drm);
if (f) if (f) {
prev = f->channel; rcu_read_lock();
prev = rcu_dereference(f->channel);
if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
must_wait = false;
rcu_read_unlock();
}
if (!prev || (prev != chan && (ret = fctx->sync(f, prev, chan)))) if (must_wait)
ret = fence_wait(fence, intr); ret = fence_wait(fence, intr);
if (ret)
break;
} }
return ret; return ret;
...@@ -463,7 +494,7 @@ static const char *nouveau_fence_get_timeline_name(struct fence *f) ...@@ -463,7 +494,7 @@ static const char *nouveau_fence_get_timeline_name(struct fence *f)
struct nouveau_fence *fence = from_fence(f); struct nouveau_fence *fence = from_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence); struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
return fence->channel ? fctx->name : "dead channel"; return !fctx->dead ? fctx->name : "dead channel";
} }
/* /*
...@@ -476,9 +507,16 @@ static bool nouveau_fence_is_signaled(struct fence *f) ...@@ -476,9 +507,16 @@ static bool nouveau_fence_is_signaled(struct fence *f)
{ {
struct nouveau_fence *fence = from_fence(f); struct nouveau_fence *fence = from_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence); struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
struct nouveau_channel *chan = fence->channel; struct nouveau_channel *chan;
bool ret = false;
rcu_read_lock();
chan = rcu_dereference(fence->channel);
if (chan)
ret = (int)(fctx->read(chan) - fence->base.seqno) >= 0;
rcu_read_unlock();
return (int)(fctx->read(chan) - fence->base.seqno) >= 0; return ret;
} }
static bool nouveau_fence_no_signaling(struct fence *f) static bool nouveau_fence_no_signaling(struct fence *f)
......
...@@ -14,7 +14,7 @@ struct nouveau_fence { ...@@ -14,7 +14,7 @@ struct nouveau_fence {
bool sysmem; bool sysmem;
struct nouveau_channel *channel; struct nouveau_channel __rcu *channel;
unsigned long timeout; unsigned long timeout;
}; };
...@@ -47,7 +47,7 @@ struct nouveau_fence_chan { ...@@ -47,7 +47,7 @@ struct nouveau_fence_chan {
char name[32]; char name[32];
struct nvif_notify notify; struct nvif_notify notify;
int notify_ref; int notify_ref, dead;
}; };
struct nouveau_fence_priv { struct nouveau_fence_priv {
......
...@@ -251,22 +251,19 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority ...@@ -251,22 +251,19 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
static int radeon_cs_sync_rings(struct radeon_cs_parser *p) static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
{ {
int i, r = 0; struct radeon_cs_reloc *reloc;
int r;
for (i = 0; i < p->nrelocs; i++) { list_for_each_entry(reloc, &p->validated, tv.head) {
struct reservation_object *resv; struct reservation_object *resv;
if (!p->relocs[i].robj) resv = reloc->robj->tbo.resv;
continue;
resv = p->relocs[i].robj->tbo.resv;
r = radeon_semaphore_sync_resv(p->rdev, p->ib.semaphore, resv, r = radeon_semaphore_sync_resv(p->rdev, p->ib.semaphore, resv,
p->relocs[i].tv.shared); reloc->tv.shared);
if (r) if (r)
break; return r;
} }
return r; return 0;
} }
/* XXX: note that this is called from the legacy UMS CS ioctl as well */ /* XXX: note that this is called from the legacy UMS CS ioctl as well */
......
...@@ -795,6 +795,8 @@ int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc, ...@@ -795,6 +795,8 @@ int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
/* Get associated drm_crtc: */ /* Get associated drm_crtc: */
drmcrtc = &rdev->mode_info.crtcs[crtc]->base; drmcrtc = &rdev->mode_info.crtcs[crtc]->base;
if (!drmcrtc)
return -EINVAL;
/* Helper routine in DRM core does all the work: */ /* Helper routine in DRM core does all the work: */
return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error, return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
......
...@@ -213,6 +213,13 @@ int radeon_bo_create(struct radeon_device *rdev, ...@@ -213,6 +213,13 @@ int radeon_bo_create(struct radeon_device *rdev,
if (!(rdev->flags & RADEON_IS_PCIE)) if (!(rdev->flags & RADEON_IS_PCIE))
bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
#ifdef CONFIG_X86_32
/* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
* See https://bugs.freedesktop.org/show_bug.cgi?id=84627
*/
bo->flags &= ~RADEON_GEM_GTT_WC;
#endif
radeon_ttm_placement_from_domain(bo, domain); radeon_ttm_placement_from_domain(bo, domain);
/* Kernel allocation are uninterruptible */ /* Kernel allocation are uninterruptible */
down_read(&rdev->pm.mclk_lock); down_read(&rdev->pm.mclk_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment