Commit cb17aa52 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-next-fixes-2018-03-27' of...

Merge tag 'drm-intel-next-fixes-2018-03-27' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

- Display fixes for booting with MST hub lid closed and display
  freezing after hibernation (fd.o bugs 105470 & 105196)
- Fix for a very rare interrupt handling race resulting in GPU hang

* tag 'drm-intel-next-fixes-2018-03-27' of git://anongit.freedesktop.org/drm/drm-intel:
  drm/i915: Fix hibernation with ACPI S0 target state
  drm/i915/execlists: Use a locked clear_bit() for synchronisation with interrupt
  drm/i915: Specify which engines to reset following semaphore/event lockups
  drm/i915/dp: Write to SET_POWER dpcd to enable MST hub.
parents 2b4f44ee 300efa9e
......@@ -1611,15 +1611,12 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
bool fw_csr;
int ret;
disable_rpm_wakeref_asserts(dev_priv);
intel_display_set_init_power(dev_priv, false);
fw_csr = !IS_GEN9_LP(dev_priv) && !hibernation &&
suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
/*
* In case of firmware assisted context save/restore don't manually
* deinit the power domains. This also means the CSR/DMC firmware will
......@@ -1627,8 +1624,11 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
* also enable deeper system power states that would be blocked if the
* firmware was inactive.
*/
if (!fw_csr)
if (IS_GEN9_LP(dev_priv) || hibernation || !suspend_to_idle(dev_priv) ||
dev_priv->csr.dmc_payload == NULL) {
intel_power_domains_suspend(dev_priv);
dev_priv->power_domains_suspended = true;
}
ret = 0;
if (IS_GEN9_LP(dev_priv))
......@@ -1640,8 +1640,10 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
if (ret) {
DRM_ERROR("Suspend complete failed: %d\n", ret);
if (!fw_csr)
if (dev_priv->power_domains_suspended) {
intel_power_domains_init_hw(dev_priv, true);
dev_priv->power_domains_suspended = false;
}
goto out;
}
......@@ -1662,8 +1664,6 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
if (!(hibernation && INTEL_GEN(dev_priv) < 6))
pci_set_power_state(pdev, PCI_D3hot);
dev_priv->suspended_to_idle = suspend_to_idle(dev_priv);
out:
enable_rpm_wakeref_asserts(dev_priv);
......@@ -1830,8 +1830,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
intel_uncore_resume_early(dev_priv);
if (IS_GEN9_LP(dev_priv)) {
if (!dev_priv->suspended_to_idle)
gen9_sanitize_dc_state(dev_priv);
gen9_sanitize_dc_state(dev_priv);
bxt_disable_dc9(dev_priv);
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
hsw_disable_pc8(dev_priv);
......@@ -1839,8 +1838,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
intel_uncore_sanitize(dev_priv);
if (IS_GEN9_LP(dev_priv) ||
!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
if (dev_priv->power_domains_suspended)
intel_power_domains_init_hw(dev_priv, true);
else
intel_display_set_init_power(dev_priv, true);
......@@ -1850,7 +1848,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
enable_rpm_wakeref_asserts(dev_priv);
out:
dev_priv->suspended_to_idle = false;
dev_priv->power_domains_suspended = false;
return ret;
}
......
......@@ -2119,7 +2119,7 @@ struct drm_i915_private {
u32 bxt_phy_grc;
u32 suspend_count;
bool suspended_to_idle;
bool power_domains_suspended;
struct i915_suspend_saved_registers regfile;
struct vlv_s0ix_state vlv_s0ix_state;
......
......@@ -831,7 +831,8 @@ static void execlists_submission_tasklet(unsigned long data)
struct drm_i915_private *dev_priv = engine->i915;
bool fw = false;
/* We can skip acquiring intel_runtime_pm_get() here as it was taken
/*
* We can skip acquiring intel_runtime_pm_get() here as it was taken
* on our behalf by the request (see i915_gem_mark_busy()) and it will
* not be relinquished until the device is idle (see
* i915_gem_idle_work_handler()). As a precaution, we make sure
......@@ -840,7 +841,8 @@ static void execlists_submission_tasklet(unsigned long data)
*/
GEM_BUG_ON(!dev_priv->gt.awake);
/* Prefer doing test_and_clear_bit() as a two stage operation to avoid
/*
* Prefer doing test_and_clear_bit() as a two stage operation to avoid
* imposing the cost of a locked atomic transaction when submitting a
* new request (outside of the context-switch interrupt).
*/
......@@ -856,17 +858,10 @@ static void execlists_submission_tasklet(unsigned long data)
execlists->csb_head = -1; /* force mmio read of CSB ptrs */
}
/* The write will be ordered by the uncached read (itself
* a memory barrier), so we do not need another in the form
* of a locked instruction. The race between the interrupt
* handler and the split test/clear is harmless as we order
* our clear before the CSB read. If the interrupt arrived
* first between the test and the clear, we read the updated
* CSB and clear the bit. If the interrupt arrives as we read
* the CSB or later (i.e. after we had cleared the bit) the bit
* is set and we do a new loop.
*/
__clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
/* Clear before reading to catch new interrupts */
clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
smp_mb__after_atomic();
if (unlikely(execlists->csb_head == -1)) { /* following a reset */
if (!fw) {
intel_uncore_forcewake_get(dev_priv,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment