Commit db218549 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-for-v4.15-rc10-2' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "A fairly urgent nouveau regression fix for broken irqs across
  suspend/resume came in. This was broken before but a patch in 4.15 has
  made it much more obviously broken and now s/r fails a lot more often.

  The fix removes freeing the irq across s/r which never should have
  been done anyways.

  Also two vc4 fixes for a NULL deference and some misrendering /
  flickering on screen"

* tag 'drm-fixes-for-v4.15-rc10-2' of git://people.freedesktop.org/~airlied/linux:
  drm/nouveau: Move irq setup/teardown to pci ctor/dtor
  drm/vc4: Fix NULL pointer dereference in vc4_save_hang_state()
  drm/vc4: Flush the caches before the bin jobs, as well.
parents 993ca206 baa35cc3
...@@ -71,6 +71,10 @@ nvkm_pci_intr(int irq, void *arg) ...@@ -71,6 +71,10 @@ nvkm_pci_intr(int irq, void *arg)
struct nvkm_pci *pci = arg; struct nvkm_pci *pci = arg;
struct nvkm_device *device = pci->subdev.device; struct nvkm_device *device = pci->subdev.device;
bool handled = false; bool handled = false;
if (pci->irq < 0)
return IRQ_HANDLED;
nvkm_mc_intr_unarm(device); nvkm_mc_intr_unarm(device);
if (pci->msi) if (pci->msi)
pci->func->msi_rearm(pci); pci->func->msi_rearm(pci);
...@@ -84,11 +88,6 @@ nvkm_pci_fini(struct nvkm_subdev *subdev, bool suspend) ...@@ -84,11 +88,6 @@ nvkm_pci_fini(struct nvkm_subdev *subdev, bool suspend)
{ {
struct nvkm_pci *pci = nvkm_pci(subdev); struct nvkm_pci *pci = nvkm_pci(subdev);
if (pci->irq >= 0) {
free_irq(pci->irq, pci);
pci->irq = -1;
}
if (pci->agp.bridge) if (pci->agp.bridge)
nvkm_agp_fini(pci); nvkm_agp_fini(pci);
...@@ -108,8 +107,20 @@ static int ...@@ -108,8 +107,20 @@ static int
nvkm_pci_oneinit(struct nvkm_subdev *subdev) nvkm_pci_oneinit(struct nvkm_subdev *subdev)
{ {
struct nvkm_pci *pci = nvkm_pci(subdev); struct nvkm_pci *pci = nvkm_pci(subdev);
if (pci_is_pcie(pci->pdev)) struct pci_dev *pdev = pci->pdev;
return nvkm_pcie_oneinit(pci); int ret;
if (pci_is_pcie(pci->pdev)) {
ret = nvkm_pcie_oneinit(pci);
if (ret)
return ret;
}
ret = request_irq(pdev->irq, nvkm_pci_intr, IRQF_SHARED, "nvkm", pci);
if (ret)
return ret;
pci->irq = pdev->irq;
return 0; return 0;
} }
...@@ -117,7 +128,6 @@ static int ...@@ -117,7 +128,6 @@ static int
nvkm_pci_init(struct nvkm_subdev *subdev) nvkm_pci_init(struct nvkm_subdev *subdev)
{ {
struct nvkm_pci *pci = nvkm_pci(subdev); struct nvkm_pci *pci = nvkm_pci(subdev);
struct pci_dev *pdev = pci->pdev;
int ret; int ret;
if (pci->agp.bridge) { if (pci->agp.bridge) {
...@@ -131,28 +141,34 @@ nvkm_pci_init(struct nvkm_subdev *subdev) ...@@ -131,28 +141,34 @@ nvkm_pci_init(struct nvkm_subdev *subdev)
if (pci->func->init) if (pci->func->init)
pci->func->init(pci); pci->func->init(pci);
ret = request_irq(pdev->irq, nvkm_pci_intr, IRQF_SHARED, "nvkm", pci);
if (ret)
return ret;
pci->irq = pdev->irq;
/* Ensure MSI interrupts are armed, for the case where there are /* Ensure MSI interrupts are armed, for the case where there are
* already interrupts pending (for whatever reason) at load time. * already interrupts pending (for whatever reason) at load time.
*/ */
if (pci->msi) if (pci->msi)
pci->func->msi_rearm(pci); pci->func->msi_rearm(pci);
return ret; return 0;
} }
static void * static void *
nvkm_pci_dtor(struct nvkm_subdev *subdev) nvkm_pci_dtor(struct nvkm_subdev *subdev)
{ {
struct nvkm_pci *pci = nvkm_pci(subdev); struct nvkm_pci *pci = nvkm_pci(subdev);
nvkm_agp_dtor(pci); nvkm_agp_dtor(pci);
if (pci->irq >= 0) {
/* freq_irq() will call the handler, we use pci->irq == -1
* to signal that it's been torn down and should be a noop.
*/
int irq = pci->irq;
pci->irq = -1;
free_irq(irq, pci);
}
if (pci->msi) if (pci->msi)
pci_disable_msi(pci->pdev); pci_disable_msi(pci->pdev);
return nvkm_pci(subdev); return nvkm_pci(subdev);
} }
......
...@@ -146,7 +146,7 @@ vc4_save_hang_state(struct drm_device *dev) ...@@ -146,7 +146,7 @@ vc4_save_hang_state(struct drm_device *dev)
struct vc4_exec_info *exec[2]; struct vc4_exec_info *exec[2];
struct vc4_bo *bo; struct vc4_bo *bo;
unsigned long irqflags; unsigned long irqflags;
unsigned int i, j, unref_list_count, prev_idx; unsigned int i, j, k, unref_list_count;
kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL); kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL);
if (!kernel_state) if (!kernel_state)
...@@ -182,7 +182,7 @@ vc4_save_hang_state(struct drm_device *dev) ...@@ -182,7 +182,7 @@ vc4_save_hang_state(struct drm_device *dev)
return; return;
} }
prev_idx = 0; k = 0;
for (i = 0; i < 2; i++) { for (i = 0; i < 2; i++) {
if (!exec[i]) if (!exec[i])
continue; continue;
...@@ -197,7 +197,7 @@ vc4_save_hang_state(struct drm_device *dev) ...@@ -197,7 +197,7 @@ vc4_save_hang_state(struct drm_device *dev)
WARN_ON(!refcount_read(&bo->usecnt)); WARN_ON(!refcount_read(&bo->usecnt));
refcount_inc(&bo->usecnt); refcount_inc(&bo->usecnt);
drm_gem_object_get(&exec[i]->bo[j]->base); drm_gem_object_get(&exec[i]->bo[j]->base);
kernel_state->bo[j + prev_idx] = &exec[i]->bo[j]->base; kernel_state->bo[k++] = &exec[i]->bo[j]->base;
} }
list_for_each_entry(bo, &exec[i]->unref_list, unref_head) { list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
...@@ -205,12 +205,12 @@ vc4_save_hang_state(struct drm_device *dev) ...@@ -205,12 +205,12 @@ vc4_save_hang_state(struct drm_device *dev)
* because they are naturally unpurgeable. * because they are naturally unpurgeable.
*/ */
drm_gem_object_get(&bo->base.base); drm_gem_object_get(&bo->base.base);
kernel_state->bo[j + prev_idx] = &bo->base.base; kernel_state->bo[k++] = &bo->base.base;
j++;
} }
prev_idx = j + 1;
} }
WARN_ON_ONCE(k != state->bo_count);
if (exec[0]) if (exec[0])
state->start_bin = exec[0]->ct0ca; state->start_bin = exec[0]->ct0ca;
if (exec[1]) if (exec[1])
...@@ -436,6 +436,19 @@ vc4_flush_caches(struct drm_device *dev) ...@@ -436,6 +436,19 @@ vc4_flush_caches(struct drm_device *dev)
VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC)); VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC));
} }
static void
vc4_flush_texture_caches(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
V3D_WRITE(V3D_L2CACTL,
V3D_L2CACTL_L2CCLR);
V3D_WRITE(V3D_SLCACTL,
VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC));
}
/* Sets the registers for the next job to be actually be executed in /* Sets the registers for the next job to be actually be executed in
* the hardware. * the hardware.
* *
...@@ -474,6 +487,14 @@ vc4_submit_next_render_job(struct drm_device *dev) ...@@ -474,6 +487,14 @@ vc4_submit_next_render_job(struct drm_device *dev)
if (!exec) if (!exec)
return; return;
/* A previous RCL may have written to one of our textures, and
* our full cache flush at bin time may have occurred before
* that RCL completed. Flush the texture cache now, but not
* the instructions or uniforms (since we don't write those
* from an RCL).
*/
vc4_flush_texture_caches(dev);
submit_cl(dev, 1, exec->ct1ca, exec->ct1ea); submit_cl(dev, 1, exec->ct1ca, exec->ct1ea);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment