Commit 64aa7e34 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-next-fixes-2015-02-11' of...

Merge tag 'drm-intel-next-fixes-2015-02-11' of git://anongit.freedesktop.org/drm-intel into drm-next

Here's a batch of i915 fixes for drm-next, with more cc: stable material
than fixes specific to drm-next.

* tag 'drm-intel-next-fixes-2015-02-11' of git://anongit.freedesktop.org/drm-intel:
  drm/i915: Clamp efficient frequency to valid range
  drm/i915: Really ignore long HPD pulses on eDP
  drm/i915: Correct the base value while updating LP_OUTPUT_HOLD in MIPI_PORT_CTRL
  drm/i915: Insert a command barrier on BLT/BSD cache flushes
  drm/i915: Drop vblank wait from intel_dp_link_down
  drm/i915: Squelch overzealous uncore reset WARN_ON
  drm/i915: Take runtime pm reference on hangcheck_info
  drm/i915: Correct the IOSF Dev_FN field for IOSF transfers
  drm/i915: Prevent use-after-free in invalidate_range_start callback
parents 85840c76 46efa4ab
...@@ -1223,8 +1223,11 @@ static int i915_frequency_info(struct seq_file *m, void *unused) ...@@ -1223,8 +1223,11 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
static int i915_hangcheck_info(struct seq_file *m, void *unused) static int i915_hangcheck_info(struct seq_file *m, void *unused)
{ {
struct drm_info_node *node = m->private; struct drm_info_node *node = m->private;
struct drm_i915_private *dev_priv = to_i915(node->minor->dev); struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring; struct intel_engine_cs *ring;
u64 acthd[I915_NUM_RINGS];
u32 seqno[I915_NUM_RINGS];
int i; int i;
if (!i915.enable_hangcheck) { if (!i915.enable_hangcheck) {
...@@ -1232,6 +1235,15 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) ...@@ -1232,6 +1235,15 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
return 0; return 0;
} }
intel_runtime_pm_get(dev_priv);
for_each_ring(ring, dev_priv, i) {
seqno[i] = ring->get_seqno(ring, false);
acthd[i] = intel_ring_get_active_head(ring);
}
intel_runtime_pm_put(dev_priv);
if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) { if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) {
seq_printf(m, "Hangcheck active, fires in %dms\n", seq_printf(m, "Hangcheck active, fires in %dms\n",
jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires - jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
...@@ -1242,14 +1254,14 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) ...@@ -1242,14 +1254,14 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
for_each_ring(ring, dev_priv, i) { for_each_ring(ring, dev_priv, i) {
seq_printf(m, "%s:\n", ring->name); seq_printf(m, "%s:\n", ring->name);
seq_printf(m, "\tseqno = %x [current %x]\n", seq_printf(m, "\tseqno = %x [current %x]\n",
ring->hangcheck.seqno, ring->get_seqno(ring, false)); ring->hangcheck.seqno, seqno[i]);
seq_printf(m, "\taction = %d\n", ring->hangcheck.action);
seq_printf(m, "\tscore = %d\n", ring->hangcheck.score);
seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n", seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
(long long)ring->hangcheck.acthd, (long long)ring->hangcheck.acthd,
(long long)intel_ring_get_active_head(ring)); (long long)acthd[i]);
seq_printf(m, "\tmax ACTHD = 0x%08llx\n", seq_printf(m, "\tmax ACTHD = 0x%08llx\n",
(long long)ring->hangcheck.max_acthd); (long long)ring->hangcheck.max_acthd);
seq_printf(m, "\tscore = %d\n", ring->hangcheck.score);
seq_printf(m, "\taction = %d\n", ring->hangcheck.action);
} }
return 0; return 0;
......
...@@ -113,7 +113,10 @@ static void *invalidate_range__linear(struct i915_mmu_notifier *mn, ...@@ -113,7 +113,10 @@ static void *invalidate_range__linear(struct i915_mmu_notifier *mn,
continue; continue;
obj = mo->obj; obj = mo->obj;
drm_gem_object_reference(&obj->base);
if (!kref_get_unless_zero(&obj->base.refcount))
continue;
spin_unlock(&mn->lock); spin_unlock(&mn->lock);
cancel_userptr(obj); cancel_userptr(obj);
...@@ -149,7 +152,20 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, ...@@ -149,7 +152,20 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
it = interval_tree_iter_first(&mn->objects, start, end); it = interval_tree_iter_first(&mn->objects, start, end);
if (it != NULL) { if (it != NULL) {
obj = container_of(it, struct i915_mmu_object, it)->obj; obj = container_of(it, struct i915_mmu_object, it)->obj;
drm_gem_object_reference(&obj->base);
/* The mmu_object is released late when destroying the
* GEM object so it is entirely possible to gain a
* reference on an object in the process of being freed
* since our serialisation is via the spinlock and not
* the struct_mutex - and consequently use it after it
* is freed and then double free it.
*/
if (!kref_get_unless_zero(&obj->base.refcount)) {
spin_unlock(&mn->lock);
serial = 0;
continue;
}
serial = mn->serial; serial = mn->serial;
} }
spin_unlock(&mn->lock); spin_unlock(&mn->lock);
......
...@@ -3521,8 +3521,6 @@ intel_dp_link_down(struct intel_dp *intel_dp) ...@@ -3521,8 +3521,6 @@ intel_dp_link_down(struct intel_dp *intel_dp)
enum port port = intel_dig_port->port; enum port port = intel_dig_port->port;
struct drm_device *dev = intel_dig_port->base.base.dev; struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(intel_dig_port->base.base.crtc);
uint32_t DP = intel_dp->DP; uint32_t DP = intel_dp->DP;
if (WARN_ON(HAS_DDI(dev))) if (WARN_ON(HAS_DDI(dev)))
...@@ -3547,8 +3545,6 @@ intel_dp_link_down(struct intel_dp *intel_dp) ...@@ -3547,8 +3545,6 @@ intel_dp_link_down(struct intel_dp *intel_dp)
if (HAS_PCH_IBX(dev) && if (HAS_PCH_IBX(dev) &&
I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
/* Hardware workaround: leaving our transcoder select /* Hardware workaround: leaving our transcoder select
* set to transcoder B while it's off will prevent the * set to transcoder B while it's off will prevent the
* corresponding HDMI output on transcoder A. * corresponding HDMI output on transcoder A.
...@@ -3559,18 +3555,7 @@ intel_dp_link_down(struct intel_dp *intel_dp) ...@@ -3559,18 +3555,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
*/ */
DP &= ~DP_PIPEB_SELECT; DP &= ~DP_PIPEB_SELECT;
I915_WRITE(intel_dp->output_reg, DP); I915_WRITE(intel_dp->output_reg, DP);
POSTING_READ(intel_dp->output_reg);
/* Changes to enable or select take place the vblank
* after being written.
*/
if (WARN_ON(crtc == NULL)) {
/* We should never try to disable a port without a crtc
* attached. For paranoia keep the code around for a
* bit. */
POSTING_READ(intel_dp->output_reg);
msleep(50);
} else
intel_wait_for_vblank(dev, intel_crtc->pipe);
} }
DP &= ~DP_AUDIO_OUTPUT_ENABLE; DP &= ~DP_AUDIO_OUTPUT_ENABLE;
...@@ -4446,7 +4431,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) ...@@ -4446,7 +4431,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
*/ */
DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n", DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
port_name(intel_dig_port->port)); port_name(intel_dig_port->port));
return false; return IRQ_HANDLED;
} }
DRM_DEBUG_KMS("got hpd irq on port %c - %s\n", DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
......
...@@ -360,12 +360,11 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder) ...@@ -360,12 +360,11 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
I915_WRITE(MIPI_DEVICE_READY(port), ULPS_STATE_ENTER); I915_WRITE(MIPI_DEVICE_READY(port), ULPS_STATE_ENTER);
usleep_range(2500, 3000); usleep_range(2500, 3000);
val = I915_READ(MIPI_PORT_CTRL(port));
/* Enable MIPI PHY transparent latch /* Enable MIPI PHY transparent latch
* Common bit for both MIPI Port A & MIPI Port C * Common bit for both MIPI Port A & MIPI Port C
* No similar bit in MIPI Port C reg * No similar bit in MIPI Port C reg
*/ */
val = I915_READ(MIPI_PORT_CTRL(PORT_A));
I915_WRITE(MIPI_PORT_CTRL(PORT_A), val | LP_OUTPUT_HOLD); I915_WRITE(MIPI_PORT_CTRL(PORT_A), val | LP_OUTPUT_HOLD);
usleep_range(1000, 1500); usleep_range(1000, 1500);
...@@ -543,10 +542,10 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder) ...@@ -543,10 +542,10 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
== 0x00000), 30)) == 0x00000), 30))
DRM_ERROR("DSI LP not going Low\n"); DRM_ERROR("DSI LP not going Low\n");
val = I915_READ(MIPI_PORT_CTRL(port));
/* Disable MIPI PHY transparent latch /* Disable MIPI PHY transparent latch
* Common bit for both MIPI Port A & MIPI Port C * Common bit for both MIPI Port A & MIPI Port C
*/ */
val = I915_READ(MIPI_PORT_CTRL(PORT_A));
I915_WRITE(MIPI_PORT_CTRL(PORT_A), val & ~LP_OUTPUT_HOLD); I915_WRITE(MIPI_PORT_CTRL(PORT_A), val & ~LP_OUTPUT_HOLD);
usleep_range(1000, 1500); usleep_range(1000, 1500);
......
...@@ -1211,15 +1211,17 @@ static int gen8_emit_flush(struct intel_ringbuffer *ringbuf, ...@@ -1211,15 +1211,17 @@ static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
cmd = MI_FLUSH_DW + 1; cmd = MI_FLUSH_DW + 1;
if (ring == &dev_priv->ring[VCS]) { /* We always require a command barrier so that subsequent
if (invalidate_domains & I915_GEM_GPU_DOMAINS) * commands, such as breadcrumb interrupts, are strictly ordered
cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD | * wrt the contents of the write cache being flushed to memory
MI_FLUSH_DW_STORE_INDEX | * (and thus being coherent from the CPU).
MI_FLUSH_DW_OP_STOREDW; */
} else { cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
if (invalidate_domains & I915_GEM_DOMAIN_RENDER)
cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX | if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
MI_FLUSH_DW_OP_STOREDW; cmd |= MI_INVALIDATE_TLB;
if (ring == &dev_priv->ring[VCS])
cmd |= MI_INVALIDATE_BSD;
} }
intel_logical_ring_emit(ringbuf, cmd); intel_logical_ring_emit(ringbuf, cmd);
......
...@@ -4005,7 +4005,10 @@ static void gen6_init_rps_frequencies(struct drm_device *dev) ...@@ -4005,7 +4005,10 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
&ddcc_status); &ddcc_status);
if (0 == ret) if (0 == ret)
dev_priv->rps.efficient_freq = dev_priv->rps.efficient_freq =
(ddcc_status >> 8) & 0xff; clamp_t(u8,
((ddcc_status >> 8) & 0xff),
dev_priv->rps.min_freq,
dev_priv->rps.max_freq);
} }
/* Preserve min/max settings in case of re-init */ /* Preserve min/max settings in case of re-init */
......
...@@ -2240,6 +2240,14 @@ static int gen6_bsd_ring_flush(struct intel_engine_cs *ring, ...@@ -2240,6 +2240,14 @@ static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
cmd = MI_FLUSH_DW; cmd = MI_FLUSH_DW;
if (INTEL_INFO(ring->dev)->gen >= 8) if (INTEL_INFO(ring->dev)->gen >= 8)
cmd += 1; cmd += 1;
/* We always require a command barrier so that subsequent
* commands, such as breadcrumb interrupts, are strictly ordered
* wrt the contents of the write cache being flushed to memory
* (and thus being coherent from the CPU).
*/
cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
/* /*
* Bspec vol 1c.5 - video engine command streamer: * Bspec vol 1c.5 - video engine command streamer:
* "If ENABLED, all TLBs will be invalidated once the flush * "If ENABLED, all TLBs will be invalidated once the flush
...@@ -2247,8 +2255,8 @@ static int gen6_bsd_ring_flush(struct intel_engine_cs *ring, ...@@ -2247,8 +2255,8 @@ static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
* Post-Sync Operation field is a value of 1h or 3h." * Post-Sync Operation field is a value of 1h or 3h."
*/ */
if (invalidate & I915_GEM_GPU_DOMAINS) if (invalidate & I915_GEM_GPU_DOMAINS)
cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD | cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
intel_ring_emit(ring, cmd); intel_ring_emit(ring, cmd);
intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
if (INTEL_INFO(ring->dev)->gen >= 8) { if (INTEL_INFO(ring->dev)->gen >= 8) {
...@@ -2344,6 +2352,14 @@ static int gen6_ring_flush(struct intel_engine_cs *ring, ...@@ -2344,6 +2352,14 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
cmd = MI_FLUSH_DW; cmd = MI_FLUSH_DW;
if (INTEL_INFO(ring->dev)->gen >= 8) if (INTEL_INFO(ring->dev)->gen >= 8)
cmd += 1; cmd += 1;
/* We always require a command barrier so that subsequent
* commands, such as breadcrumb interrupts, are strictly ordered
* wrt the contents of the write cache being flushed to memory
* (and thus being coherent from the CPU).
*/
cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
/* /*
* Bspec vol 1c.3 - blitter engine command streamer: * Bspec vol 1c.3 - blitter engine command streamer:
* "If ENABLED, all TLBs will be invalidated once the flush * "If ENABLED, all TLBs will be invalidated once the flush
...@@ -2351,8 +2367,7 @@ static int gen6_ring_flush(struct intel_engine_cs *ring, ...@@ -2351,8 +2367,7 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
* Post-Sync Operation field is a value of 1h or 3h." * Post-Sync Operation field is a value of 1h or 3h."
*/ */
if (invalidate & I915_GEM_DOMAIN_RENDER) if (invalidate & I915_GEM_DOMAIN_RENDER)
cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX | cmd |= MI_INVALIDATE_TLB;
MI_FLUSH_DW_OP_STOREDW;
intel_ring_emit(ring, cmd); intel_ring_emit(ring, cmd);
intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
if (INTEL_INFO(ring->dev)->gen >= 8) { if (INTEL_INFO(ring->dev)->gen >= 8) {
......
...@@ -82,7 +82,7 @@ u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr) ...@@ -82,7 +82,7 @@ u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr)
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
mutex_lock(&dev_priv->dpio_lock); mutex_lock(&dev_priv->dpio_lock);
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT, vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
SB_CRRDDA_NP, addr, &val); SB_CRRDDA_NP, addr, &val);
mutex_unlock(&dev_priv->dpio_lock); mutex_unlock(&dev_priv->dpio_lock);
...@@ -94,7 +94,7 @@ void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val) ...@@ -94,7 +94,7 @@ void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val)
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
mutex_lock(&dev_priv->dpio_lock); mutex_lock(&dev_priv->dpio_lock);
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT, vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
SB_CRWRDA_NP, addr, &val); SB_CRWRDA_NP, addr, &val);
mutex_unlock(&dev_priv->dpio_lock); mutex_unlock(&dev_priv->dpio_lock);
} }
...@@ -103,7 +103,7 @@ u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg) ...@@ -103,7 +103,7 @@ u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg)
{ {
u32 val = 0; u32 val = 0;
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT, vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
SB_CRRDDA_NP, reg, &val); SB_CRRDDA_NP, reg, &val);
return val; return val;
...@@ -111,7 +111,7 @@ u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg) ...@@ -111,7 +111,7 @@ u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg)
void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val) void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{ {
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT, vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
SB_CRWRDA_NP, reg, &val); SB_CRWRDA_NP, reg, &val);
} }
...@@ -122,7 +122,7 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr) ...@@ -122,7 +122,7 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
mutex_lock(&dev_priv->dpio_lock); mutex_lock(&dev_priv->dpio_lock);
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_NC, vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_NC,
SB_CRRDDA_NP, addr, &val); SB_CRRDDA_NP, addr, &val);
mutex_unlock(&dev_priv->dpio_lock); mutex_unlock(&dev_priv->dpio_lock);
...@@ -132,56 +132,56 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr) ...@@ -132,56 +132,56 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg) u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg)
{ {
u32 val = 0; u32 val = 0;
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC, vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC,
SB_CRRDDA_NP, reg, &val); SB_CRRDDA_NP, reg, &val);
return val; return val;
} }
void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val) void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{ {
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC, vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC,
SB_CRWRDA_NP, reg, &val); SB_CRWRDA_NP, reg, &val);
} }
u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg) u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg)
{ {
u32 val = 0; u32 val = 0;
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK, vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
SB_CRRDDA_NP, reg, &val); SB_CRRDDA_NP, reg, &val);
return val; return val;
} }
void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val) void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{ {
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK, vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
SB_CRWRDA_NP, reg, &val); SB_CRWRDA_NP, reg, &val);
} }
u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg) u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg)
{ {
u32 val = 0; u32 val = 0;
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU, vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
SB_CRRDDA_NP, reg, &val); SB_CRRDDA_NP, reg, &val);
return val; return val;
} }
void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val) void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{ {
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU, vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
SB_CRWRDA_NP, reg, &val); SB_CRWRDA_NP, reg, &val);
} }
u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg) u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg)
{ {
u32 val = 0; u32 val = 0;
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE, vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE,
SB_CRRDDA_NP, reg, &val); SB_CRRDDA_NP, reg, &val);
return val; return val;
} }
void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val) void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{ {
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE, vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE,
SB_CRWRDA_NP, reg, &val); SB_CRWRDA_NP, reg, &val);
} }
......
...@@ -166,7 +166,8 @@ fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_do ...@@ -166,7 +166,8 @@ fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_do
struct intel_uncore_forcewake_domain *d; struct intel_uncore_forcewake_domain *d;
enum forcewake_domain_id id; enum forcewake_domain_id id;
WARN_ON(dev_priv->uncore.fw_domains == 0); if (dev_priv->uncore.fw_domains == 0)
return;
for_each_fw_domain_mask(d, fw_domains, dev_priv, id) for_each_fw_domain_mask(d, fw_domains, dev_priv, id)
fw_domain_reset(d); fw_domain_reset(d);
...@@ -997,6 +998,9 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev) ...@@ -997,6 +998,9 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
if (INTEL_INFO(dev_priv->dev)->gen <= 5)
return;
if (IS_GEN9(dev)) { if (IS_GEN9(dev)) {
dev_priv->uncore.funcs.force_wake_get = fw_domains_get; dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
dev_priv->uncore.funcs.force_wake_put = fw_domains_put; dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
...@@ -1069,6 +1073,9 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev) ...@@ -1069,6 +1073,9 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
FORCEWAKE, FORCEWAKE_ACK); FORCEWAKE, FORCEWAKE_ACK);
} }
/* All future platforms are expected to require complex power gating */
WARN_ON(dev_priv->uncore.fw_domains == 0);
} }
void intel_uncore_init(struct drm_device *dev) void intel_uncore_init(struct drm_device *dev)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment