Commit 98c7b423 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-intel-fixes' of git://people.freedesktop.org/~danvet/drm-intel into drm-next

Daniel writes: (this pull is the one with the bad patch dropped)
First pile of fixes for 3.6 already, and I'm afraid it's a bit larger than
what I'd wish for. But I've moved all the feature-y stuff to -next, so
this really is all -fixes. Most of it is handling fallout from the hw
context stuff, discovered now that mesa git has started using them for
real. Otherwise all just small fixes:
- unbreak modeset=0 on gen6+ (regressed in next)
- const mismatch fix for ->mode_fixup
- simplify overly clever lvds modeset code (current code can totally
  confuse backlights, resulting in broken panels until a full power draw
  restores them).
- fix some fallout from the flushing_list disabling (regression only
  introduced in -next)
- DP link train improvements (this also kills the last 3.2 dp regression
  afaik)
- bugfix for the new ddc VGA detection on newer platforms
- minor backlight fixes (one of them a -next regression)
- only enable the required PM interrupts (to avoid waking up the cpu
  unnecessarily)
- some really minor bits (workaround clarification, make coverty happy,
  hsw init fix)
* 'drm-intel-fixes' of git://people.freedesktop.org/~danvet/drm-intel: (23 commits)
  drm/i915: unbreak lastclose for failed driver init
  drm/i915: Set the context before setting up regs for the context.
  drm/i915: constify mode in crtc_mode_fixup
  drm/i915/lvds: ditch ->prepare special case
  drm/i915: dereferencing an error pointer
  drm/i915: fix invalid reference handling of the default ctx obj
  drm/i915: Add -EIO to the list of known errors for __wait_seqno
  drm/i915: Flush the context object from the CPU caches upon switching
  drm/i915: Make the lock for pageflips interruptible
  drm/i915: don't forget the PCH backlight registers
  drm/i915: Insert a flush between batches if the breadcrumb was dropped
  drm/i915: missing error case in init status page
  drm/i915: mask tiled bit when updating ILK sprites
  drm/i915: try to train DP even harder
  drm/i915: kill intel_ddc_probe
  drm/i915: check whether we actually received an edid in detect_ddc
  drm/i915: fix up PCH backlight #define mixup
  drm/i915: Add comments to explain the BSD tail write workaround
  drm/i915: Disable the BLT on pre-production SNB hardware
  drm/i915: initialize power wells in modeset_init_hw
  ...
parents 2536f7dc e8aeaee7
...@@ -676,6 +676,7 @@ static void i915_ring_error_state(struct seq_file *m, ...@@ -676,6 +676,7 @@ static void i915_ring_error_state(struct seq_file *m,
seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
seq_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]); seq_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
if (INTEL_INFO(dev)->gen >= 6) { if (INTEL_INFO(dev)->gen >= 6) {
seq_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]); seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
seq_printf(m, " SYNC_0: 0x%08x\n", seq_printf(m, " SYNC_0: 0x%08x\n",
error->semaphore_mboxes[ring][0]); error->semaphore_mboxes[ring][0]);
......
...@@ -1781,7 +1781,13 @@ void i915_driver_lastclose(struct drm_device * dev) ...@@ -1781,7 +1781,13 @@ void i915_driver_lastclose(struct drm_device * dev)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) { /* On gen6+ we refuse to init without kms enabled, but then the drm core
* goes right around and calls lastclose. Check for this and don't clean
* up anything. */
if (!dev_priv)
return;
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_fb_restore_mode(dev); intel_fb_restore_mode(dev);
vga_switcheroo_process_delayed_switch(); vga_switcheroo_process_delayed_switch();
return; return;
......
...@@ -190,6 +190,7 @@ struct drm_i915_error_state { ...@@ -190,6 +190,7 @@ struct drm_i915_error_state {
u32 instdone[I915_NUM_RINGS]; u32 instdone[I915_NUM_RINGS];
u32 acthd[I915_NUM_RINGS]; u32 acthd[I915_NUM_RINGS];
u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1]; u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
/* our own tracking of ring head and tail */ /* our own tracking of ring head and tail */
u32 cpu_ring_head[I915_NUM_RINGS]; u32 cpu_ring_head[I915_NUM_RINGS];
u32 cpu_ring_tail[I915_NUM_RINGS]; u32 cpu_ring_tail[I915_NUM_RINGS];
......
...@@ -2003,6 +2003,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, ...@@ -2003,6 +2003,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
} }
switch (end) { switch (end) {
case -EIO:
case -EAGAIN: /* Wedged */ case -EAGAIN: /* Wedged */
case -ERESTARTSYS: /* Signal */ case -ERESTARTSYS: /* Signal */
return (int)end; return (int)end;
...@@ -3726,6 +3727,22 @@ void i915_gem_init_ppgtt(struct drm_device *dev) ...@@ -3726,6 +3727,22 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
} }
} }
static bool
intel_enable_blt(struct drm_device *dev)
{
if (!HAS_BLT(dev))
return false;
/* The blitter was dysfunctional on early prototypes */
if (IS_GEN6(dev) && dev->pdev->revision < 8) {
DRM_INFO("BLT not supported on this pre-production hardware;"
" graphics performance will be degraded.\n");
return false;
}
return true;
}
int int
i915_gem_init_hw(struct drm_device *dev) i915_gem_init_hw(struct drm_device *dev)
{ {
...@@ -3749,7 +3766,7 @@ i915_gem_init_hw(struct drm_device *dev) ...@@ -3749,7 +3766,7 @@ i915_gem_init_hw(struct drm_device *dev)
goto cleanup_render_ring; goto cleanup_render_ring;
} }
if (HAS_BLT(dev)) { if (intel_enable_blt(dev)) {
ret = intel_init_blt_ring_buffer(dev); ret = intel_init_blt_ring_buffer(dev);
if (ret) if (ret)
goto cleanup_bsd_ring; goto cleanup_bsd_ring;
......
...@@ -112,8 +112,8 @@ static int get_context_size(struct drm_device *dev) ...@@ -112,8 +112,8 @@ static int get_context_size(struct drm_device *dev)
ret = GEN6_CXT_TOTAL_SIZE(reg) * 64; ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
break; break;
case 7: case 7:
reg = I915_READ(GEN7_CTX_SIZE); reg = I915_READ(GEN7_CXT_SIZE);
ret = GEN7_CTX_TOTAL_SIZE(reg) * 64; ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
break; break;
default: default:
BUG(); BUG();
...@@ -374,6 +374,17 @@ static int do_switch(struct drm_i915_gem_object *from_obj, ...@@ -374,6 +374,17 @@ static int do_switch(struct drm_i915_gem_object *from_obj,
if (ret) if (ret)
return ret; return ret;
/* Clear this page out of any CPU caches for coherent swap-in/out. Note
* that thanks to write = false in this call and us not setting any gpu
* write domains when putting a context object onto the active list
* (when switching away from it), this won't block.
* XXX: We need a real interface to do this instead of trickery. */
ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
if (ret) {
i915_gem_object_unpin(to->obj);
return ret;
}
if (!to->obj->has_global_gtt_mapping) if (!to->obj->has_global_gtt_mapping)
i915_gem_gtt_bind_object(to->obj, to->obj->cache_level); i915_gem_gtt_bind_object(to->obj, to->obj->cache_level);
...@@ -408,8 +419,11 @@ static int do_switch(struct drm_i915_gem_object *from_obj, ...@@ -408,8 +419,11 @@ static int do_switch(struct drm_i915_gem_object *from_obj,
from_obj->dirty = 1; from_obj->dirty = 1;
BUG_ON(from_obj->ring != to->ring); BUG_ON(from_obj->ring != to->ring);
i915_gem_object_unpin(from_obj); i915_gem_object_unpin(from_obj);
drm_gem_object_unreference(&from_obj->base);
} }
drm_gem_object_reference(&to->obj->base);
ring->last_context_obj = to->obj; ring->last_context_obj = to->obj;
to->is_initialized = true; to->is_initialized = true;
...@@ -459,20 +473,7 @@ int i915_switch_context(struct intel_ring_buffer *ring, ...@@ -459,20 +473,7 @@ int i915_switch_context(struct intel_ring_buffer *ring,
if (from_obj == to->obj) if (from_obj == to->obj)
return 0; return 0;
ret = do_switch(from_obj, to, i915_gem_next_request_seqno(to->ring)); return do_switch(from_obj, to, i915_gem_next_request_seqno(to->ring));
if (ret)
return ret;
/* Just to make the code a little cleaner we take the object reference
* after the switch was successful. It would be more intuitive to ref
* the 'to' object before the switch but we know the refcount must be >0
* if context_get() succeeded, and we hold struct mutex. So it's safe to
* do this here/now
*/
drm_gem_object_reference(&to->obj->base);
if (from_obj != NULL)
drm_gem_object_unreference(&from_obj->base);
return ret;
} }
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
...@@ -496,11 +497,13 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, ...@@ -496,11 +497,13 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
ctx = create_hw_context(dev, file_priv); ctx = create_hw_context(dev, file_priv);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
args->ctx_id = ctx->id; args->ctx_id = ctx->id;
DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id); DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);
return PTR_RET(ctx); return 0;
} }
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
......
...@@ -885,11 +885,16 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, ...@@ -885,11 +885,16 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
return ret; return ret;
} }
/* Unconditionally invalidate gpu caches. */ /* Unconditionally invalidate gpu caches and ensure that we do flush
ret = i915_gem_flush_ring(ring, I915_GEM_GPU_DOMAINS, 0); * any residual writes from the previous batch.
*/
ret = i915_gem_flush_ring(ring,
I915_GEM_GPU_DOMAINS,
ring->gpu_caches_dirty ? I915_GEM_GPU_DOMAINS : 0);
if (ret) if (ret)
return ret; return ret;
ring->gpu_caches_dirty = false;
return 0; return 0;
} }
...@@ -1223,6 +1228,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -1223,6 +1228,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
} }
} }
ret = i915_switch_context(ring, file, ctx_id);
if (ret)
goto err;
if (ring == &dev_priv->ring[RCS] && if (ring == &dev_priv->ring[RCS] &&
mode != dev_priv->relative_constants_mode) { mode != dev_priv->relative_constants_mode) {
ret = intel_ring_begin(ring, 4); ret = intel_ring_begin(ring, 4);
...@@ -1244,10 +1253,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -1244,10 +1253,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err; goto err;
} }
ret = i915_switch_context(ring, file, ctx_id);
if (ret)
goto err;
trace_i915_gem_ring_dispatch(ring, seqno); trace_i915_gem_ring_dispatch(ring, seqno);
exec_start = batch_obj->gtt_offset + args->batch_start_offset; exec_start = batch_obj->gtt_offset + args->batch_start_offset;
......
...@@ -1066,6 +1066,7 @@ static void i915_record_ring_state(struct drm_device *dev, ...@@ -1066,6 +1066,7 @@ static void i915_record_ring_state(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
if (INTEL_INFO(dev)->gen >= 6) { if (INTEL_INFO(dev)->gen >= 6) {
error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring)); error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
error->semaphore_mboxes[ring->id][0] error->semaphore_mboxes[ring->id][0]
= I915_READ(RING_SYNC_0(ring->mmio_base)); = I915_READ(RING_SYNC_0(ring->mmio_base));
......
...@@ -690,10 +690,10 @@ ...@@ -690,10 +690,10 @@
#define GEN6_BLITTER_FBC_NOTIFY (1<<3) #define GEN6_BLITTER_FBC_NOTIFY (1<<3)
#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050 #define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK (1 << 16) #define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0)
#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE (1 << 0) #define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2)
#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE 0 #define GEN6_BSD_SLEEP_INDICATOR (1 << 3)
#define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3) #define GEN6_BSD_GO_INDICATOR (1 << 4)
#define GEN6_BSD_HWSTAM 0x12098 #define GEN6_BSD_HWSTAM 0x12098
#define GEN6_BSD_IMR 0x120a8 #define GEN6_BSD_IMR 0x120a8
...@@ -1482,15 +1482,19 @@ ...@@ -1482,15 +1482,19 @@
GEN6_CXT_RENDER_SIZE(cxt_reg) + \ GEN6_CXT_RENDER_SIZE(cxt_reg) + \
GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \ GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \
GEN6_CXT_PIPELINE_SIZE(cxt_reg)) GEN6_CXT_PIPELINE_SIZE(cxt_reg))
#define GEN7_CTX_SIZE 0x21a8 #define GEN7_CXT_SIZE 0x21a8
#define GEN7_CTX_RENDER_SIZE(ctx_reg) ((ctx_reg >> 16) & 0x3f) #define GEN7_CXT_POWER_SIZE(ctx_reg) ((ctx_reg >> 25) & 0x7f)
#define GEN7_CTX_EXTENDED_SIZE(ctx_reg) ((ctx_reg >> 9) & 0x7f) #define GEN7_CXT_RING_SIZE(ctx_reg) ((ctx_reg >> 22) & 0x7)
#define GEN7_CTX_GT1_SIZE(ctx_reg) ((ctx_reg >> 6) & 0x7) #define GEN7_CXT_RENDER_SIZE(ctx_reg) ((ctx_reg >> 16) & 0x3f)
#define GEN7_CTX_VFSTATE_SIZE(ctx_reg) ((ctx_reg >> 0) & 0x3f) #define GEN7_CXT_EXTENDED_SIZE(ctx_reg) ((ctx_reg >> 9) & 0x7f)
#define GEN7_CTX_TOTAL_SIZE(ctx_reg) (GEN7_CTX_RENDER_SIZE(ctx_reg) + \ #define GEN7_CXT_GT1_SIZE(ctx_reg) ((ctx_reg >> 6) & 0x7)
GEN7_CTX_EXTENDED_SIZE(ctx_reg) + \ #define GEN7_CXT_VFSTATE_SIZE(ctx_reg) ((ctx_reg >> 0) & 0x3f)
GEN7_CTX_GT1_SIZE(ctx_reg) + \ #define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_POWER_SIZE(ctx_reg) + \
GEN7_CTX_VFSTATE_SIZE(ctx_reg)) GEN7_CXT_RING_SIZE(ctx_reg) + \
GEN7_CXT_RENDER_SIZE(ctx_reg) + \
GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
GEN7_CXT_GT1_SIZE(ctx_reg) + \
GEN7_CXT_VFSTATE_SIZE(ctx_reg))
/* /*
* Overlay regs * Overlay regs
...@@ -1896,7 +1900,7 @@ ...@@ -1896,7 +1900,7 @@
/* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is /* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is
* like the normal CTL from gen4 and earlier. Hooray for confusing naming. */ * like the normal CTL from gen4 and earlier. Hooray for confusing naming. */
#define BLC_PWM_PCH_CTL1 0xc8250 #define BLC_PWM_PCH_CTL1 0xc8250
#define BLM_PCH_PWM_ENABLE (1 << 30) #define BLM_PCH_PWM_ENABLE (1 << 31)
#define BLM_PCH_OVERRIDE_ENABLE (1 << 30) #define BLM_PCH_OVERRIDE_ENABLE (1 << 30)
#define BLM_PCH_POLARITY (1 << 29) #define BLM_PCH_POLARITY (1 << 29)
#define BLC_PWM_PCH_CTL2 0xc8254 #define BLC_PWM_PCH_CTL2 0xc8254
......
...@@ -330,39 +330,34 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector) ...@@ -330,39 +330,34 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
{ {
struct intel_crt *crt = intel_attached_crt(connector); struct intel_crt *crt = intel_attached_crt(connector);
struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private; struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private;
/* CRT should always be at 0, but check anyway */
if (crt->base.type != INTEL_OUTPUT_ANALOG)
return false;
if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) {
struct edid *edid; struct edid *edid;
bool is_digital = false;
struct i2c_adapter *i2c; struct i2c_adapter *i2c;
BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin); i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
edid = drm_get_edid(connector, i2c); edid = drm_get_edid(connector, i2c);
if (edid) {
bool is_digital = edid->input & DRM_EDID_INPUT_DIGITAL;
/* /*
* This may be a DVI-I connector with a shared DDC * This may be a DVI-I connector with a shared DDC
* link between analog and digital outputs, so we * link between analog and digital outputs, so we
* have to check the EDID input spec of the attached device. * have to check the EDID input spec of the attached device.
*
* On the other hand, what should we do if it is a broken EDID?
*/ */
if (edid != NULL) {
is_digital = edid->input & DRM_EDID_INPUT_DIGITAL;
connector->display_info.raw_edid = NULL;
kfree(edid);
}
if (!is_digital) { if (!is_digital) {
DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
return true; return true;
} else {
DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
} }
DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
} else {
DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n");
} }
kfree(edid);
return false; return false;
} }
......
...@@ -3573,7 +3573,7 @@ void intel_encoder_destroy(struct drm_encoder *encoder) ...@@ -3573,7 +3573,7 @@ void intel_encoder_destroy(struct drm_encoder *encoder)
} }
static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode, const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode) struct drm_display_mode *adjusted_mode)
{ {
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
...@@ -6486,7 +6486,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ...@@ -6486,7 +6486,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
intel_fb = to_intel_framebuffer(fb); intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj; obj = intel_fb->obj;
mutex_lock(&dev->struct_mutex); ret = i915_mutex_lock_interruptible(dev);
if (ret)
goto cleanup;
/* Reference the objects for the scheduled work. */ /* Reference the objects for the scheduled work. */
drm_gem_object_reference(&work->old_fb_obj->base); drm_gem_object_reference(&work->old_fb_obj->base);
...@@ -6521,6 +6523,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ...@@ -6521,6 +6523,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
drm_gem_object_unreference(&obj->base); drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
cleanup:
spin_lock_irqsave(&dev->event_lock, flags); spin_lock_irqsave(&dev->event_lock, flags);
intel_crtc->unpin_work = NULL; intel_crtc->unpin_work = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags); spin_unlock_irqrestore(&dev->event_lock, flags);
...@@ -7174,6 +7177,11 @@ static void i915_disable_vga(struct drm_device *dev) ...@@ -7174,6 +7177,11 @@ static void i915_disable_vga(struct drm_device *dev)
void intel_modeset_init_hw(struct drm_device *dev) void intel_modeset_init_hw(struct drm_device *dev)
{ {
/* We attempt to init the necessary power wells early in the initialization
* time, so the subsystems that expect power to be enabled can work.
*/
intel_init_power_wells(dev);
intel_prepare_ddi(dev); intel_prepare_ddi(dev);
intel_init_clock_gating(dev); intel_init_clock_gating(dev);
......
...@@ -1771,7 +1771,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) ...@@ -1771,7 +1771,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
for (i = 0; i < intel_dp->lane_count; i++) for (i = 0; i < intel_dp->lane_count; i++)
if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
break; break;
if (i == intel_dp->lane_count) { if (i == intel_dp->lane_count && voltage_tries == 5) {
++loop_tries; ++loop_tries;
if (loop_tries == 5) { if (loop_tries == 5) {
DRM_DEBUG_KMS("too many full retries, give up\n"); DRM_DEBUG_KMS("too many full retries, give up\n");
......
...@@ -342,7 +342,6 @@ struct intel_fbc_work { ...@@ -342,7 +342,6 @@ struct intel_fbc_work {
}; };
int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus);
extern void intel_attach_force_audio_property(struct drm_connector *connector); extern void intel_attach_force_audio_property(struct drm_connector *connector);
extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector); extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
...@@ -496,6 +495,7 @@ extern void intel_update_fbc(struct drm_device *dev); ...@@ -496,6 +495,7 @@ extern void intel_update_fbc(struct drm_device *dev);
extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv); extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
extern void intel_gpu_ips_teardown(void); extern void intel_gpu_ips_teardown(void);
extern void intel_init_power_wells(struct drm_device *dev);
extern void intel_enable_gt_powersave(struct drm_device *dev); extern void intel_enable_gt_powersave(struct drm_device *dev);
extern void intel_disable_gt_powersave(struct drm_device *dev); extern void intel_disable_gt_powersave(struct drm_device *dev);
extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv); extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);
......
...@@ -409,12 +409,6 @@ static void intel_lvds_prepare(struct drm_encoder *encoder) ...@@ -409,12 +409,6 @@ static void intel_lvds_prepare(struct drm_encoder *encoder)
{ {
struct intel_lvds *intel_lvds = to_intel_lvds(encoder); struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
/*
* Prior to Ironlake, we must disable the pipe if we want to adjust
* the panel fitter. However at all other times we can just reset
* the registers regardless.
*/
if (!HAS_PCH_SPLIT(encoder->dev) && intel_lvds->pfit_dirty)
intel_lvds_disable(intel_lvds); intel_lvds_disable(intel_lvds);
} }
......
...@@ -32,34 +32,6 @@ ...@@ -32,34 +32,6 @@
#include "intel_drv.h" #include "intel_drv.h"
#include "i915_drv.h" #include "i915_drv.h"
/**
* intel_ddc_probe
*
*/
bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus)
{
struct drm_i915_private *dev_priv = intel_encoder->base.dev->dev_private;
u8 out_buf[] = { 0x0, 0x0};
u8 buf[2];
struct i2c_msg msgs[] = {
{
.addr = DDC_ADDR,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
.addr = DDC_ADDR,
.flags = I2C_M_RD,
.len = 1,
.buf = buf,
}
};
return i2c_transfer(intel_gmbus_get_adapter(dev_priv, ddc_bus),
msgs, 2) == 2;
}
/** /**
* intel_ddc_get_modes - get modelist from monitor * intel_ddc_get_modes - get modelist from monitor
* @connector: DRM connector device to use * @connector: DRM connector device to use
......
...@@ -289,11 +289,17 @@ void intel_panel_disable_backlight(struct drm_device *dev) ...@@ -289,11 +289,17 @@ void intel_panel_disable_backlight(struct drm_device *dev)
intel_panel_actually_set_backlight(dev, 0); intel_panel_actually_set_backlight(dev, 0);
if (INTEL_INFO(dev)->gen >= 4) { if (INTEL_INFO(dev)->gen >= 4) {
uint32_t reg; uint32_t reg, tmp;
reg = HAS_PCH_SPLIT(dev) ? BLC_PWM_CPU_CTL2 : BLC_PWM_CTL2; reg = HAS_PCH_SPLIT(dev) ? BLC_PWM_CPU_CTL2 : BLC_PWM_CTL2;
I915_WRITE(reg, I915_READ(reg) & ~BLM_PWM_ENABLE); I915_WRITE(reg, I915_READ(reg) & ~BLM_PWM_ENABLE);
if (HAS_PCH_SPLIT(dev)) {
tmp = I915_READ(BLC_PWM_PCH_CTL1);
tmp &= ~BLM_PCH_PWM_ENABLE;
I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
}
} }
} }
...@@ -333,6 +339,13 @@ void intel_panel_enable_backlight(struct drm_device *dev, ...@@ -333,6 +339,13 @@ void intel_panel_enable_backlight(struct drm_device *dev,
I915_WRITE(reg, tmp); I915_WRITE(reg, tmp);
POSTING_READ(reg); POSTING_READ(reg);
I915_WRITE(reg, tmp | BLM_PWM_ENABLE); I915_WRITE(reg, tmp | BLM_PWM_ENABLE);
if (HAS_PCH_SPLIT(dev)) {
tmp = I915_READ(BLC_PWM_PCH_CTL1);
tmp |= BLM_PCH_PWM_ENABLE;
tmp &= ~BLM_PCH_OVERRIDE_ENABLE;
I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
}
} }
} }
......
...@@ -2491,14 +2491,7 @@ static void gen6_enable_rps(struct drm_device *dev) ...@@ -2491,14 +2491,7 @@ static void gen6_enable_rps(struct drm_device *dev)
gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8); gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
/* requires MSI enabled */ /* requires MSI enabled */
I915_WRITE(GEN6_PMIER, I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS);
GEN6_PM_MBOX_EVENT |
GEN6_PM_THERMAL_EVENT |
GEN6_PM_RP_DOWN_TIMEOUT |
GEN6_PM_RP_UP_THRESHOLD |
GEN6_PM_RP_DOWN_THRESHOLD |
GEN6_PM_RP_UP_EI_EXPIRED |
GEN6_PM_RP_DOWN_EI_EXPIRED);
spin_lock_irq(&dev_priv->rps_lock); spin_lock_irq(&dev_priv->rps_lock);
WARN_ON(dev_priv->pm_iir != 0); WARN_ON(dev_priv->pm_iir != 0);
I915_WRITE(GEN6_PMIMR, 0); I915_WRITE(GEN6_PMIMR, 0);
...@@ -3939,11 +3932,6 @@ void intel_init_pm(struct drm_device *dev) ...@@ -3939,11 +3932,6 @@ void intel_init_pm(struct drm_device *dev)
else else
dev_priv->display.get_fifo_size = i830_get_fifo_size; dev_priv->display.get_fifo_size = i830_get_fifo_size;
} }
/* We attempt to init the necessary power wells early in the initialization
* time, so the subsystems that expect power to be enabled can work.
*/
intel_init_power_wells(dev);
} }
static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
......
...@@ -972,6 +972,7 @@ static int init_status_page(struct intel_ring_buffer *ring) ...@@ -972,6 +972,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
ring->status_page.gfx_addr = obj->gtt_offset; ring->status_page.gfx_addr = obj->gtt_offset;
ring->status_page.page_addr = kmap(obj->pages[0]); ring->status_page.page_addr = kmap(obj->pages[0]);
if (ring->status_page.page_addr == NULL) { if (ring->status_page.page_addr == NULL) {
ret = -ENOMEM;
goto err_unpin; goto err_unpin;
} }
ring->status_page.obj = obj; ring->status_page.obj = obj;
...@@ -1270,20 +1271,31 @@ static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, ...@@ -1270,20 +1271,31 @@ static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
drm_i915_private_t *dev_priv = ring->dev->dev_private; drm_i915_private_t *dev_priv = ring->dev->dev_private;
/* Every tail move must follow the sequence below */ /* Every tail move must follow the sequence below */
/* Disable notification that the ring is IDLE. The GT
* will then assume that it is busy and bring it out of rc6.
*/
I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK | _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
I915_WRITE(GEN6_BSD_RNCID, 0x0);
/* Clear the context id. Here be magic! */
I915_WRITE64(GEN6_BSD_RNCID, 0x0);
/* Wait for the ring not to be idle, i.e. for it to wake up. */
if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) & if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0, GEN6_BSD_SLEEP_INDICATOR) == 0,
50)) 50))
DRM_ERROR("timed out waiting for IDLE Indicator\n"); DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
/* Now that the ring is fully powered up, update the tail */
I915_WRITE_TAIL(ring, value); I915_WRITE_TAIL(ring, value);
POSTING_READ(RING_TAIL(ring->mmio_base));
/* Let the ring send IDLE messages to the GT again,
* and so let it sleep to conserve power when idle.
*/
I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK | _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
} }
static int gen6_ring_flush(struct intel_ring_buffer *ring, static int gen6_ring_flush(struct intel_ring_buffer *ring,
......
...@@ -234,6 +234,7 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, ...@@ -234,6 +234,7 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
dvscntr &= ~DVS_PIXFORMAT_MASK; dvscntr &= ~DVS_PIXFORMAT_MASK;
dvscntr &= ~DVS_RGB_ORDER_XBGR; dvscntr &= ~DVS_RGB_ORDER_XBGR;
dvscntr &= ~DVS_YUV_BYTE_ORDER_MASK; dvscntr &= ~DVS_YUV_BYTE_ORDER_MASK;
dvscntr &= ~DVS_TILED;
switch (fb->pixel_format) { switch (fb->pixel_format) {
case DRM_FORMAT_XBGR8888: case DRM_FORMAT_XBGR8888:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment