Commit dd0a1aa1 authored by Jeff McGee's avatar Jeff McGee Committed by Daniel Vetter

drm/i915: Restore rps/rc6 on reset

A check of rps/rc6 state after i915_reset determined that the ring
MAX_IDLE registers were returned to their hardware defaults and that
the GEN6_PMIMR register was set to mask all interrupts. This change
restores those values to their pre-reset states by re-initializing
rps/rc6 in i915_reset. A full re-initialization was opted for versus
a targeted set of restore operations for simplicity and maintain-
ability. Note that the re-initialization is not done for Ironlake,
due to a past comment that it causes problems.

Also updated the rps initialization sequence to preserve existing
min/max values in the case of a re-init. We assume the values were
validated upon being set and do not do further range checking. The
debugfs interface for changing min/max was updated with range
checking to ensure this condition (already present in sysfs
interface).

v2: fix rps logging to output hw_max and hw_min, not rps.max_delay
    and rps.min_delay which don't strictly represent hardware limits.
    Add igt testcase to signed-off-by section.

Testcase: igt/pm_rps/reset
Signed-off-by: default avatarJeff McGee <jeff.mcgee@intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 1f70999f
...@@ -3223,6 +3223,7 @@ i915_max_freq_set(void *data, u64 val) ...@@ -3223,6 +3223,7 @@ i915_max_freq_set(void *data, u64 val)
{ {
struct drm_device *dev = data; struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
u32 rp_state_cap, hw_max, hw_min;
int ret; int ret;
if (!(IS_GEN6(dev) || IS_GEN7(dev))) if (!(IS_GEN6(dev) || IS_GEN7(dev)))
...@@ -3241,14 +3242,29 @@ i915_max_freq_set(void *data, u64 val) ...@@ -3241,14 +3242,29 @@ i915_max_freq_set(void *data, u64 val)
*/ */
if (IS_VALLEYVIEW(dev)) { if (IS_VALLEYVIEW(dev)) {
val = vlv_freq_opcode(dev_priv, val); val = vlv_freq_opcode(dev_priv, val);
dev_priv->rps.max_delay = val;
valleyview_set_rps(dev, val); hw_max = valleyview_rps_max_freq(dev_priv);
hw_min = valleyview_rps_min_freq(dev_priv);
} else { } else {
do_div(val, GT_FREQUENCY_MULTIPLIER); do_div(val, GT_FREQUENCY_MULTIPLIER);
dev_priv->rps.max_delay = val;
gen6_set_rps(dev, val); rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
hw_max = dev_priv->rps.hw_max;
hw_min = (rp_state_cap >> 16) & 0xff;
}
if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) {
mutex_unlock(&dev_priv->rps.hw_lock);
return -EINVAL;
} }
dev_priv->rps.max_delay = val;
if (IS_VALLEYVIEW(dev))
valleyview_set_rps(dev, val);
else
gen6_set_rps(dev, val);
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
return 0; return 0;
...@@ -3288,6 +3304,7 @@ i915_min_freq_set(void *data, u64 val) ...@@ -3288,6 +3304,7 @@ i915_min_freq_set(void *data, u64 val)
{ {
struct drm_device *dev = data; struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
u32 rp_state_cap, hw_max, hw_min;
int ret; int ret;
if (!(IS_GEN6(dev) || IS_GEN7(dev))) if (!(IS_GEN6(dev) || IS_GEN7(dev)))
...@@ -3306,13 +3323,29 @@ i915_min_freq_set(void *data, u64 val) ...@@ -3306,13 +3323,29 @@ i915_min_freq_set(void *data, u64 val)
*/ */
if (IS_VALLEYVIEW(dev)) { if (IS_VALLEYVIEW(dev)) {
val = vlv_freq_opcode(dev_priv, val); val = vlv_freq_opcode(dev_priv, val);
dev_priv->rps.min_delay = val;
valleyview_set_rps(dev, val); hw_max = valleyview_rps_max_freq(dev_priv);
hw_min = valleyview_rps_min_freq(dev_priv);
} else { } else {
do_div(val, GT_FREQUENCY_MULTIPLIER); do_div(val, GT_FREQUENCY_MULTIPLIER);
dev_priv->rps.min_delay = val;
gen6_set_rps(dev, val); rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
hw_max = dev_priv->rps.hw_max;
hw_min = (rp_state_cap >> 16) & 0xff;
}
if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
mutex_unlock(&dev_priv->rps.hw_lock);
return -EINVAL;
} }
dev_priv->rps.min_delay = val;
if (IS_VALLEYVIEW(dev))
valleyview_set_rps(dev, val);
else
gen6_set_rps(dev, val);
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
return 0; return 0;
......
...@@ -728,6 +728,17 @@ int i915_reset(struct drm_device *dev) ...@@ -728,6 +728,17 @@ int i915_reset(struct drm_device *dev)
drm_irq_uninstall(dev); drm_irq_uninstall(dev);
drm_irq_install(dev); drm_irq_install(dev);
/* rps/rc6 re-init is necessary to restore state lost after the
* reset and the re-install of drm irq. Skip for ironlake per
* previous concerns that it doesn't respond well to some forms
* of re-init after reset. */
if (INTEL_INFO(dev)->gen > 5) {
mutex_lock(&dev->struct_mutex);
intel_enable_gt_powersave(dev);
mutex_unlock(&dev->struct_mutex);
}
intel_hpd_init(dev); intel_hpd_init(dev);
} else { } else {
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
......
...@@ -3322,7 +3322,7 @@ static void gen6_enable_rps(struct drm_device *dev) ...@@ -3322,7 +3322,7 @@ static void gen6_enable_rps(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring; struct intel_ring_buffer *ring;
u32 rp_state_cap; u32 rp_state_cap, hw_max, hw_min;
u32 gt_perf_status; u32 gt_perf_status;
u32 rc6vids, pcu_mbox, rc6_mask = 0; u32 rc6vids, pcu_mbox, rc6_mask = 0;
u32 gtfifodbg; u32 gtfifodbg;
...@@ -3351,13 +3351,20 @@ static void gen6_enable_rps(struct drm_device *dev) ...@@ -3351,13 +3351,20 @@ static void gen6_enable_rps(struct drm_device *dev)
gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
/* In units of 50MHz */ /* In units of 50MHz */
dev_priv->rps.hw_max = dev_priv->rps.max_delay = rp_state_cap & 0xff; dev_priv->rps.hw_max = hw_max = rp_state_cap & 0xff;
dev_priv->rps.min_delay = (rp_state_cap >> 16) & 0xff; hw_min = (rp_state_cap >> 16) & 0xff;
dev_priv->rps.rp1_delay = (rp_state_cap >> 8) & 0xff; dev_priv->rps.rp1_delay = (rp_state_cap >> 8) & 0xff;
dev_priv->rps.rp0_delay = (rp_state_cap >> 0) & 0xff; dev_priv->rps.rp0_delay = (rp_state_cap >> 0) & 0xff;
dev_priv->rps.rpe_delay = dev_priv->rps.rp1_delay; dev_priv->rps.rpe_delay = dev_priv->rps.rp1_delay;
dev_priv->rps.cur_delay = 0; dev_priv->rps.cur_delay = 0;
/* Preserve min/max settings in case of re-init */
if (dev_priv->rps.max_delay == 0)
dev_priv->rps.max_delay = hw_max;
if (dev_priv->rps.min_delay == 0)
dev_priv->rps.min_delay = hw_min;
/* disable the counters and set deterministic thresholds */ /* disable the counters and set deterministic thresholds */
I915_WRITE(GEN6_RC_CONTROL, 0); I915_WRITE(GEN6_RC_CONTROL, 0);
...@@ -3586,7 +3593,7 @@ static void valleyview_enable_rps(struct drm_device *dev) ...@@ -3586,7 +3593,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring; struct intel_ring_buffer *ring;
u32 gtfifodbg, val, rc6_mode = 0; u32 gtfifodbg, val, hw_max, hw_min, rc6_mode = 0;
int i; int i;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
...@@ -3648,21 +3655,27 @@ static void valleyview_enable_rps(struct drm_device *dev) ...@@ -3648,21 +3655,27 @@ static void valleyview_enable_rps(struct drm_device *dev)
vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay), vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay),
dev_priv->rps.cur_delay); dev_priv->rps.cur_delay);
dev_priv->rps.max_delay = valleyview_rps_max_freq(dev_priv); dev_priv->rps.hw_max = hw_max = valleyview_rps_max_freq(dev_priv);
dev_priv->rps.hw_max = dev_priv->rps.max_delay;
DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay), vlv_gpu_freq(dev_priv, hw_max),
dev_priv->rps.max_delay); hw_max);
dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv); dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv);
DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay), vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay),
dev_priv->rps.rpe_delay); dev_priv->rps.rpe_delay);
dev_priv->rps.min_delay = valleyview_rps_min_freq(dev_priv); hw_min = valleyview_rps_min_freq(dev_priv);
DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay), vlv_gpu_freq(dev_priv, hw_min),
dev_priv->rps.min_delay); hw_min);
/* Preserve min/max settings in case of re-init */
if (dev_priv->rps.max_delay == 0)
dev_priv->rps.max_delay = hw_max;
if (dev_priv->rps.min_delay == 0)
dev_priv->rps.min_delay = hw_min;
DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n", DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay), vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment