Commit ec403b89 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-fixes-2015-10-01' of git://anongit.freedesktop.org/drm-intel into drm-fixes

a few i915 fixes for v4.3.

* tag 'drm-intel-fixes-2015-10-01' of git://anongit.freedesktop.org/drm-intel:
  drm/i915: Call non-locking version of drm_kms_helper_poll_enable(), v2
  drm: Add a non-locking version of drm_kms_helper_poll_enable(), v2
  drm/i915: Consider HW CSB write pointer before resetting the sw read pointer
  drm/i915/skl: Don't call intel_prepare_ddi when encoder list isn't yet initialized.
parents 62886a36 b94be972
...@@ -94,7 +94,18 @@ static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector) ...@@ -94,7 +94,18 @@ static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector)
} }
#define DRM_OUTPUT_POLL_PERIOD (10*HZ) #define DRM_OUTPUT_POLL_PERIOD (10*HZ)
static void __drm_kms_helper_poll_enable(struct drm_device *dev) /**
* drm_kms_helper_poll_enable_locked - re-enable output polling.
* @dev: drm_device
*
* This function re-enables the output polling work without
* locking the mode_config mutex.
*
* This is like drm_kms_helper_poll_enable() however it is to be
* called from a context where the mode_config mutex is locked
* already.
*/
void drm_kms_helper_poll_enable_locked(struct drm_device *dev)
{ {
bool poll = false; bool poll = false;
struct drm_connector *connector; struct drm_connector *connector;
...@@ -113,6 +124,8 @@ static void __drm_kms_helper_poll_enable(struct drm_device *dev) ...@@ -113,6 +124,8 @@ static void __drm_kms_helper_poll_enable(struct drm_device *dev)
if (poll) if (poll)
schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD); schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
} }
EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked);
static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector, static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector,
uint32_t maxX, uint32_t maxY, bool merge_type_bits) uint32_t maxX, uint32_t maxY, bool merge_type_bits)
...@@ -174,7 +187,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect ...@@ -174,7 +187,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
/* Re-enable polling in case the global poll config changed. */ /* Re-enable polling in case the global poll config changed. */
if (drm_kms_helper_poll != dev->mode_config.poll_running) if (drm_kms_helper_poll != dev->mode_config.poll_running)
__drm_kms_helper_poll_enable(dev); drm_kms_helper_poll_enable_locked(dev);
dev->mode_config.poll_running = drm_kms_helper_poll; dev->mode_config.poll_running = drm_kms_helper_poll;
...@@ -428,7 +441,7 @@ EXPORT_SYMBOL(drm_kms_helper_poll_disable); ...@@ -428,7 +441,7 @@ EXPORT_SYMBOL(drm_kms_helper_poll_disable);
void drm_kms_helper_poll_enable(struct drm_device *dev) void drm_kms_helper_poll_enable(struct drm_device *dev)
{ {
mutex_lock(&dev->mode_config.mutex); mutex_lock(&dev->mode_config.mutex);
__drm_kms_helper_poll_enable(dev); drm_kms_helper_poll_enable_locked(dev);
mutex_unlock(&dev->mode_config.mutex); mutex_unlock(&dev->mode_config.mutex);
} }
EXPORT_SYMBOL(drm_kms_helper_poll_enable); EXPORT_SYMBOL(drm_kms_helper_poll_enable);
......
...@@ -180,7 +180,7 @@ static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv) ...@@ -180,7 +180,7 @@ static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
/* Enable polling and queue hotplug re-enabling. */ /* Enable polling and queue hotplug re-enabling. */
if (hpd_disabled) { if (hpd_disabled) {
drm_kms_helper_poll_enable(dev); drm_kms_helper_poll_enable_locked(dev);
mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work, mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
msecs_to_jiffies(HPD_STORM_REENABLE_DELAY)); msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
} }
......
...@@ -484,18 +484,18 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring) ...@@ -484,18 +484,18 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring)); status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
read_pointer = ring->next_context_status_buffer; read_pointer = ring->next_context_status_buffer;
write_pointer = status_pointer & 0x07; write_pointer = status_pointer & GEN8_CSB_PTR_MASK;
if (read_pointer > write_pointer) if (read_pointer > write_pointer)
write_pointer += 6; write_pointer += GEN8_CSB_ENTRIES;
spin_lock(&ring->execlist_lock); spin_lock(&ring->execlist_lock);
while (read_pointer < write_pointer) { while (read_pointer < write_pointer) {
read_pointer++; read_pointer++;
status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
(read_pointer % 6) * 8); (read_pointer % GEN8_CSB_ENTRIES) * 8);
status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
(read_pointer % 6) * 8 + 4); (read_pointer % GEN8_CSB_ENTRIES) * 8 + 4);
if (status & GEN8_CTX_STATUS_IDLE_ACTIVE) if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
continue; continue;
...@@ -521,10 +521,12 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring) ...@@ -521,10 +521,12 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
spin_unlock(&ring->execlist_lock); spin_unlock(&ring->execlist_lock);
WARN(submit_contexts > 2, "More than two context complete events?\n"); WARN(submit_contexts > 2, "More than two context complete events?\n");
ring->next_context_status_buffer = write_pointer % 6; ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
I915_WRITE(RING_CONTEXT_STATUS_PTR(ring), I915_WRITE(RING_CONTEXT_STATUS_PTR(ring),
_MASKED_FIELD(0x07 << 8, ((u32)ring->next_context_status_buffer & 0x07) << 8)); _MASKED_FIELD(GEN8_CSB_PTR_MASK << 8,
((u32)ring->next_context_status_buffer &
GEN8_CSB_PTR_MASK) << 8));
} }
static int execlists_context_queue(struct drm_i915_gem_request *request) static int execlists_context_queue(struct drm_i915_gem_request *request)
...@@ -1422,6 +1424,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring) ...@@ -1422,6 +1424,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
{ {
struct drm_device *dev = ring->dev; struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
u8 next_context_status_buffer_hw;
I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff); I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
...@@ -1436,7 +1439,29 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring) ...@@ -1436,7 +1439,29 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
_MASKED_BIT_DISABLE(GFX_REPLAY_MODE) | _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
_MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)); _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
POSTING_READ(RING_MODE_GEN7(ring)); POSTING_READ(RING_MODE_GEN7(ring));
ring->next_context_status_buffer = 0;
/*
* Instead of resetting the Context Status Buffer (CSB) read pointer to
* zero, we need to read the write pointer from hardware and use its
* value because "this register is power context save restored".
* Effectively, these states have been observed:
*
* | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) |
* BDW | CSB regs not reset | CSB regs reset |
* CHT | CSB regs not reset | CSB regs not reset |
*/
next_context_status_buffer_hw = (I915_READ(RING_CONTEXT_STATUS_PTR(ring))
& GEN8_CSB_PTR_MASK);
/*
* When the CSB registers are reset (also after power-up / gpu reset),
* CSB write pointer is set to all 1's, which is not valid, use '5' in
* this special case, so the first element read is CSB[0].
*/
if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK)
next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1);
ring->next_context_status_buffer = next_context_status_buffer_hw;
DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name); DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name);
memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
......
...@@ -25,6 +25,8 @@ ...@@ -25,6 +25,8 @@
#define _INTEL_LRC_H_ #define _INTEL_LRC_H_
#define GEN8_LR_CONTEXT_ALIGN 4096 #define GEN8_LR_CONTEXT_ALIGN 4096
#define GEN8_CSB_ENTRIES 6
#define GEN8_CSB_PTR_MASK 0x07
/* Execlists regs */ /* Execlists regs */
#define RING_ELSP(ring) ((ring)->mmio_base+0x230) #define RING_ELSP(ring) ((ring)->mmio_base+0x230)
......
...@@ -246,7 +246,8 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv, ...@@ -246,7 +246,8 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
} }
if (power_well->data == SKL_DISP_PW_1) { if (power_well->data == SKL_DISP_PW_1) {
intel_prepare_ddi(dev); if (!dev_priv->power_domains.initializing)
intel_prepare_ddi(dev);
gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A); gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A);
} }
} }
......
...@@ -240,5 +240,6 @@ extern void drm_kms_helper_hotplug_event(struct drm_device *dev); ...@@ -240,5 +240,6 @@ extern void drm_kms_helper_hotplug_event(struct drm_device *dev);
extern void drm_kms_helper_poll_disable(struct drm_device *dev); extern void drm_kms_helper_poll_disable(struct drm_device *dev);
extern void drm_kms_helper_poll_enable(struct drm_device *dev); extern void drm_kms_helper_poll_enable(struct drm_device *dev);
extern void drm_kms_helper_poll_enable_locked(struct drm_device *dev);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment