Commit 3afdd8a0 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-fixes-2014-09-10' of git://anongit.freedesktop.org/drm-intel into drm-fixes

more fixes for 3.17, almost all Cc: stable material.

* tag 'drm-intel-fixes-2014-09-10' of git://anongit.freedesktop.org/drm-intel:
  drm/i915: Wait for vblank before enabling the TV encoder
  drm/i915: Evict CS TLBs between batches
  drm/i915: Fix irq enable tracking in driver load
  drm/i915: Fix EIO/wedged handling in gem fault handler
  drm/i915: Prevent recursive deadlock on releasing a busy userptr
parents 69e672fd 7a98948f
...@@ -1336,12 +1336,17 @@ static int i915_load_modeset_init(struct drm_device *dev) ...@@ -1336,12 +1336,17 @@ static int i915_load_modeset_init(struct drm_device *dev)
intel_power_domains_init_hw(dev_priv); intel_power_domains_init_hw(dev_priv);
/*
* We enable some interrupt sources in our postinstall hooks, so mark
* interrupts as enabled _before_ actually enabling them to avoid
* special cases in our ordering checks.
*/
dev_priv->pm._irqs_disabled = false;
ret = drm_irq_install(dev, dev->pdev->irq); ret = drm_irq_install(dev, dev->pdev->irq);
if (ret) if (ret)
goto cleanup_gem_stolen; goto cleanup_gem_stolen;
dev_priv->pm._irqs_disabled = false;
/* Important: The output setup functions called by modeset_init need /* Important: The output setup functions called by modeset_init need
* working irqs for e.g. gmbus and dp aux transfers. */ * working irqs for e.g. gmbus and dp aux transfers. */
intel_modeset_init(dev); intel_modeset_init(dev);
......
...@@ -184,6 +184,7 @@ enum hpd_pin { ...@@ -184,6 +184,7 @@ enum hpd_pin {
if ((1 << (domain)) & (mask)) if ((1 << (domain)) & (mask))
struct drm_i915_private; struct drm_i915_private;
struct i915_mm_struct;
struct i915_mmu_object; struct i915_mmu_object;
enum intel_dpll_id { enum intel_dpll_id {
...@@ -1506,9 +1507,8 @@ struct drm_i915_private { ...@@ -1506,9 +1507,8 @@ struct drm_i915_private {
struct i915_gtt gtt; /* VM representing the global address space */ struct i915_gtt gtt; /* VM representing the global address space */
struct i915_gem_mm mm; struct i915_gem_mm mm;
#if defined(CONFIG_MMU_NOTIFIER) DECLARE_HASHTABLE(mm_structs, 7);
DECLARE_HASHTABLE(mmu_notifiers, 7); struct mutex mm_lock;
#endif
/* Kernel Modesetting */ /* Kernel Modesetting */
...@@ -1814,8 +1814,8 @@ struct drm_i915_gem_object { ...@@ -1814,8 +1814,8 @@ struct drm_i915_gem_object {
unsigned workers :4; unsigned workers :4;
#define I915_GEM_USERPTR_MAX_WORKERS 15 #define I915_GEM_USERPTR_MAX_WORKERS 15
struct mm_struct *mm; struct i915_mm_struct *mm;
struct i915_mmu_object *mn; struct i915_mmu_object *mmu_object;
struct work_struct *work; struct work_struct *work;
} userptr; } userptr;
}; };
......
...@@ -1590,10 +1590,13 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -1590,10 +1590,13 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
out: out:
switch (ret) { switch (ret) {
case -EIO: case -EIO:
/* If this -EIO is due to a gpu hang, give the reset code a /*
* chance to clean up the mess. Otherwise return the proper * We eat errors when the gpu is terminally wedged to avoid
* SIGBUS. */ * userspace unduly crashing (gl has no provisions for mmaps to
if (i915_terminally_wedged(&dev_priv->gpu_error)) { * fail). But any other -EIO isn't ours (e.g. swap in failure)
* and so needs to be reported.
*/
if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
ret = VM_FAULT_SIGBUS; ret = VM_FAULT_SIGBUS;
break; break;
} }
......
This diff is collapsed.
...@@ -334,16 +334,20 @@ ...@@ -334,16 +334,20 @@
#define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1) #define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3)) #define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2) #define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
#define COLOR_BLT_CMD (2<<29 | 0x40<<22 | (5-2))
#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4) #define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6) #define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
#define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5) #define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5)
#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21) #define BLT_WRITE_A (2<<20)
#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20) #define BLT_WRITE_RGB (1<<20)
#define BLT_WRITE_RGBA (BLT_WRITE_RGB | BLT_WRITE_A)
#define BLT_DEPTH_8 (0<<24) #define BLT_DEPTH_8 (0<<24)
#define BLT_DEPTH_16_565 (1<<24) #define BLT_DEPTH_16_565 (1<<24)
#define BLT_DEPTH_16_1555 (2<<24) #define BLT_DEPTH_16_1555 (2<<24)
#define BLT_DEPTH_32 (3<<24) #define BLT_DEPTH_32 (3<<24)
#define BLT_ROP_GXCOPY (0xcc<<16) #define BLT_ROP_SRC_COPY (0xcc<<16)
#define BLT_ROP_COLOR_COPY (0xf0<<16)
#define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */ #define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */
#define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */ #define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */
#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2) #define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
......
...@@ -1363,53 +1363,65 @@ i965_dispatch_execbuffer(struct intel_engine_cs *ring, ...@@ -1363,53 +1363,65 @@ i965_dispatch_execbuffer(struct intel_engine_cs *ring,
/* Just userspace ABI convention to limit the wa batch bo to a resonable size */ /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
#define I830_BATCH_LIMIT (256*1024) #define I830_BATCH_LIMIT (256*1024)
#define I830_TLB_ENTRIES (2)
#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
static int static int
i830_dispatch_execbuffer(struct intel_engine_cs *ring, i830_dispatch_execbuffer(struct intel_engine_cs *ring,
u64 offset, u32 len, u64 offset, u32 len,
unsigned flags) unsigned flags)
{ {
u32 cs_offset = ring->scratch.gtt_offset;
int ret; int ret;
if (flags & I915_DISPATCH_PINNED) { ret = intel_ring_begin(ring, 6);
ret = intel_ring_begin(ring, 4);
if (ret) if (ret)
return ret; return ret;
intel_ring_emit(ring, MI_BATCH_BUFFER); /* Evict the invalid PTE TLBs */
intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
intel_ring_emit(ring, offset + len - 8); intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
intel_ring_emit(ring, cs_offset);
intel_ring_emit(ring, 0xdeadbeef);
intel_ring_emit(ring, MI_NOOP); intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring); intel_ring_advance(ring);
} else {
u32 cs_offset = ring->scratch.gtt_offset;
if ((flags & I915_DISPATCH_PINNED) == 0) {
if (len > I830_BATCH_LIMIT) if (len > I830_BATCH_LIMIT)
return -ENOSPC; return -ENOSPC;
ret = intel_ring_begin(ring, 9+3); ret = intel_ring_begin(ring, 6 + 2);
if (ret) if (ret)
return ret; return ret;
/* Blit the batch (which has now all relocs applied) to the stable batch
* scratch bo area (so that the CS never stumbles over its tlb /* Blit the batch (which has now all relocs applied) to the
* invalidation bug) ... */ * stable batch scratch bo area (so that the CS never
intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD | * stumbles over its tlb invalidation bug) ...
XY_SRC_COPY_BLT_WRITE_ALPHA | */
XY_SRC_COPY_BLT_WRITE_RGB); intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096); intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
intel_ring_emit(ring, 0); intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 1024);
intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024);
intel_ring_emit(ring, cs_offset); intel_ring_emit(ring, cs_offset);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, 4096); intel_ring_emit(ring, 4096);
intel_ring_emit(ring, offset); intel_ring_emit(ring, offset);
intel_ring_emit(ring, MI_FLUSH); intel_ring_emit(ring, MI_FLUSH);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
/* ... and execute it. */ /* ... and execute it. */
offset = cs_offset;
}
ret = intel_ring_begin(ring, 4);
if (ret)
return ret;
intel_ring_emit(ring, MI_BATCH_BUFFER); intel_ring_emit(ring, MI_BATCH_BUFFER);
intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
intel_ring_emit(ring, cs_offset + len - 8); intel_ring_emit(ring, offset + len - 8);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring); intel_ring_advance(ring);
}
return 0; return 0;
} }
...@@ -2200,7 +2212,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) ...@@ -2200,7 +2212,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
/* Workaround batchbuffer to combat CS tlb bug. */ /* Workaround batchbuffer to combat CS tlb bug. */
if (HAS_BROKEN_CS_TLB(dev)) { if (HAS_BROKEN_CS_TLB(dev)) {
obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT); obj = i915_gem_alloc_object(dev, I830_WA_SIZE);
if (obj == NULL) { if (obj == NULL) {
DRM_ERROR("Failed to allocate batch bo\n"); DRM_ERROR("Failed to allocate batch bo\n");
return -ENOMEM; return -ENOMEM;
......
...@@ -854,6 +854,10 @@ intel_enable_tv(struct intel_encoder *encoder) ...@@ -854,6 +854,10 @@ intel_enable_tv(struct intel_encoder *encoder)
struct drm_device *dev = encoder->base.dev; struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
/* Prevents vblank waits from timing out in intel_tv_detect_type() */
intel_wait_for_vblank(encoder->base.dev,
to_intel_crtc(encoder->base.crtc)->pipe);
I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE); I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment