Commit b39fb297 authored by Ben Widawsky's avatar Ben Widawsky Committed by Daniel Vetter

drm/i915: Rename and comment all the RPS *stuff*

The names of the struct members for RPS are stupid. Every time I need to
do anything in this code I have to spend a significant amount of time to
remember what it all means. By renaming the variables (and adding the
comments) I hope to clear up the situation. Indeed doing this make some
upcoming patches more readable.

I've avoided ILK because it's possible that the naming used for Ironlake
matches what is in the docs. I believe the ILK power docs were never
published, and I am too lazy to dig them up.

v2: leave rp0, and rp1 in the names. It is useful to have these limits
available at times. min_freq and max_freq (which may be equal to rp0, or
rp1 depending on the platform) represent the actual HW min and max.

Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarBen Widawsky <ben@bwidawsk.net>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 1f05c944
...@@ -1026,7 +1026,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) ...@@ -1026,7 +1026,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
max_freq * GT_FREQUENCY_MULTIPLIER); max_freq * GT_FREQUENCY_MULTIPLIER);
seq_printf(m, "Max overclocked frequency: %dMHz\n", seq_printf(m, "Max overclocked frequency: %dMHz\n",
dev_priv->rps.hw_max * GT_FREQUENCY_MULTIPLIER); dev_priv->rps.max_freq * GT_FREQUENCY_MULTIPLIER);
} else if (IS_VALLEYVIEW(dev)) { } else if (IS_VALLEYVIEW(dev)) {
u32 freq_sts, val; u32 freq_sts, val;
...@@ -1498,8 +1498,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) ...@@ -1498,8 +1498,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
for (gpu_freq = dev_priv->rps.min_delay; for (gpu_freq = dev_priv->rps.min_freq_softlimit;
gpu_freq <= dev_priv->rps.max_delay; gpu_freq <= dev_priv->rps.max_freq_softlimit;
gpu_freq++) { gpu_freq++) {
ia_freq = gpu_freq; ia_freq = gpu_freq;
sandybridge_pcode_read(dev_priv, sandybridge_pcode_read(dev_priv,
...@@ -3449,9 +3449,9 @@ i915_max_freq_get(void *data, u64 *val) ...@@ -3449,9 +3449,9 @@ i915_max_freq_get(void *data, u64 *val)
return ret; return ret;
if (IS_VALLEYVIEW(dev)) if (IS_VALLEYVIEW(dev))
*val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay); *val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
else else
*val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER; *val = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
return 0; return 0;
...@@ -3488,16 +3488,16 @@ i915_max_freq_set(void *data, u64 val) ...@@ -3488,16 +3488,16 @@ i915_max_freq_set(void *data, u64 val)
do_div(val, GT_FREQUENCY_MULTIPLIER); do_div(val, GT_FREQUENCY_MULTIPLIER);
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
hw_max = dev_priv->rps.hw_max; hw_max = dev_priv->rps.max_freq;
hw_min = (rp_state_cap >> 16) & 0xff; hw_min = (rp_state_cap >> 16) & 0xff;
} }
if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) { if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
return -EINVAL; return -EINVAL;
} }
dev_priv->rps.max_delay = val; dev_priv->rps.max_freq_softlimit = val;
if (IS_VALLEYVIEW(dev)) if (IS_VALLEYVIEW(dev))
valleyview_set_rps(dev, val); valleyview_set_rps(dev, val);
...@@ -3530,9 +3530,9 @@ i915_min_freq_get(void *data, u64 *val) ...@@ -3530,9 +3530,9 @@ i915_min_freq_get(void *data, u64 *val)
return ret; return ret;
if (IS_VALLEYVIEW(dev)) if (IS_VALLEYVIEW(dev))
*val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay); *val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
else else
*val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER; *val = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
return 0; return 0;
...@@ -3569,16 +3569,16 @@ i915_min_freq_set(void *data, u64 val) ...@@ -3569,16 +3569,16 @@ i915_min_freq_set(void *data, u64 val)
do_div(val, GT_FREQUENCY_MULTIPLIER); do_div(val, GT_FREQUENCY_MULTIPLIER);
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
hw_max = dev_priv->rps.hw_max; hw_max = dev_priv->rps.max_freq;
hw_min = (rp_state_cap >> 16) & 0xff; hw_min = (rp_state_cap >> 16) & 0xff;
} }
if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) { if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
return -EINVAL; return -EINVAL;
} }
dev_priv->rps.min_delay = val; dev_priv->rps.min_freq_softlimit = val;
if (IS_VALLEYVIEW(dev)) if (IS_VALLEYVIEW(dev))
valleyview_set_rps(dev, val); valleyview_set_rps(dev, val);
......
...@@ -980,14 +980,24 @@ struct intel_gen6_power_mgmt { ...@@ -980,14 +980,24 @@ struct intel_gen6_power_mgmt {
struct work_struct work; struct work_struct work;
u32 pm_iir; u32 pm_iir;
u8 cur_delay; /* Frequencies are stored in potentially platform dependent multiples.
u8 min_delay; * In other words, *_freq needs to be multiplied by X to be interesting.
u8 max_delay; * Soft limits are those which are used for the dynamic reclocking done
u8 rpe_delay; * by the driver (raise frequencies under heavy loads, and lower for
u8 rp1_delay; * lighter loads). Hard limits are those imposed by the hardware.
u8 rp0_delay; *
u8 hw_max; * A distinction is made for overclocking, which is never enabled by
u8 min_freq; * default, and is considered to be above the hard limit if it's
* possible at all.
*/
u8 cur_freq; /* Current frequency (cached, may not == HW) */
u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */
u8 max_freq_softlimit; /* Max frequency permitted by the driver */
u8 max_freq; /* Maximum frequency, RP0 if not overclocking */
u8 min_freq; /* AKA RPn. Minimum frequency */
u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
u8 rp1_freq; /* "less than" RP0 power/freqency */
u8 rp0_freq; /* Non-overclocked max frequency. */
bool rp_up_masked; bool rp_up_masked;
bool rp_down_masked; bool rp_down_masked;
......
...@@ -1075,7 +1075,7 @@ void gen6_set_pm_mask(struct drm_i915_private *dev_priv, ...@@ -1075,7 +1075,7 @@ void gen6_set_pm_mask(struct drm_i915_private *dev_priv,
u32 pm_iir, int new_delay) u32 pm_iir, int new_delay)
{ {
if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
if (new_delay >= dev_priv->rps.max_delay) { if (new_delay >= dev_priv->rps.max_freq_softlimit) {
/* Mask UP THRESHOLD Interrupts */ /* Mask UP THRESHOLD Interrupts */
I915_WRITE(GEN6_PMINTRMSK, I915_WRITE(GEN6_PMINTRMSK,
I915_READ(GEN6_PMINTRMSK) | I915_READ(GEN6_PMINTRMSK) |
...@@ -1090,7 +1090,7 @@ void gen6_set_pm_mask(struct drm_i915_private *dev_priv, ...@@ -1090,7 +1090,7 @@ void gen6_set_pm_mask(struct drm_i915_private *dev_priv,
dev_priv->rps.rp_down_masked = false; dev_priv->rps.rp_down_masked = false;
} }
} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
if (new_delay <= dev_priv->rps.min_delay) { if (new_delay <= dev_priv->rps.min_freq_softlimit) {
/* Mask DOWN THRESHOLD Interrupts */ /* Mask DOWN THRESHOLD Interrupts */
I915_WRITE(GEN6_PMINTRMSK, I915_WRITE(GEN6_PMINTRMSK,
I915_READ(GEN6_PMINTRMSK) | I915_READ(GEN6_PMINTRMSK) |
...@@ -1136,38 +1136,39 @@ static void gen6_pm_rps_work(struct work_struct *work) ...@@ -1136,38 +1136,39 @@ static void gen6_pm_rps_work(struct work_struct *work)
adj *= 2; adj *= 2;
else else
adj = 1; adj = 1;
new_delay = dev_priv->rps.cur_delay + adj; new_delay = dev_priv->rps.cur_freq + adj;
/* /*
* For better performance, jump directly * For better performance, jump directly
* to RPe if we're below it. * to RPe if we're below it.
*/ */
if (new_delay < dev_priv->rps.rpe_delay) if (new_delay < dev_priv->rps.efficient_freq)
new_delay = dev_priv->rps.rpe_delay; new_delay = dev_priv->rps.efficient_freq;
} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay) if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
new_delay = dev_priv->rps.rpe_delay; new_delay = dev_priv->rps.efficient_freq;
else else
new_delay = dev_priv->rps.min_delay; new_delay = dev_priv->rps.min_freq_softlimit;
adj = 0; adj = 0;
} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
if (adj < 0) if (adj < 0)
adj *= 2; adj *= 2;
else else
adj = -1; adj = -1;
new_delay = dev_priv->rps.cur_delay + adj; new_delay = dev_priv->rps.cur_freq + adj;
} else { /* unknown event */ } else { /* unknown event */
new_delay = dev_priv->rps.cur_delay; new_delay = dev_priv->rps.cur_freq;
} }
/* sysfs frequency interfaces may have snuck in while servicing the /* sysfs frequency interfaces may have snuck in while servicing the
* interrupt * interrupt
*/ */
new_delay = clamp_t(int, new_delay, new_delay = clamp_t(int, new_delay,
dev_priv->rps.min_delay, dev_priv->rps.max_delay); dev_priv->rps.min_freq_softlimit,
dev_priv->rps.max_freq_softlimit);
gen6_set_pm_mask(dev_priv, pm_iir, new_delay); gen6_set_pm_mask(dev_priv, pm_iir, new_delay);
dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay; dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
if (IS_VALLEYVIEW(dev_priv->dev)) if (IS_VALLEYVIEW(dev_priv->dev))
valleyview_set_rps(dev_priv->dev, new_delay); valleyview_set_rps(dev_priv->dev, new_delay);
......
...@@ -269,7 +269,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev, ...@@ -269,7 +269,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
ret = vlv_gpu_freq(dev_priv, (freq >> 8) & 0xff); ret = vlv_gpu_freq(dev_priv, (freq >> 8) & 0xff);
} else { } else {
ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER; ret = dev_priv->rps.cur_freq * GT_FREQUENCY_MULTIPLIER;
} }
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
...@@ -284,7 +284,7 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev, ...@@ -284,7 +284,7 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
return snprintf(buf, PAGE_SIZE, "%d\n", return snprintf(buf, PAGE_SIZE, "%d\n",
vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay)); vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
} }
static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
...@@ -298,9 +298,9 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute ...@@ -298,9 +298,9 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
mutex_lock(&dev_priv->rps.hw_lock); mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev)) if (IS_VALLEYVIEW(dev_priv->dev))
ret = vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay); ret = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
else else
ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER; ret = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
return snprintf(buf, PAGE_SIZE, "%d\n", ret); return snprintf(buf, PAGE_SIZE, "%d\n", ret);
...@@ -334,13 +334,13 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, ...@@ -334,13 +334,13 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
val /= GT_FREQUENCY_MULTIPLIER; val /= GT_FREQUENCY_MULTIPLIER;
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
hw_max = dev_priv->rps.hw_max; hw_max = dev_priv->rps.max_freq;
non_oc_max = (rp_state_cap & 0xff); non_oc_max = (rp_state_cap & 0xff);
hw_min = ((rp_state_cap & 0xff0000) >> 16); hw_min = ((rp_state_cap & 0xff0000) >> 16);
} }
if (val < hw_min || val > hw_max || if (val < hw_min || val > hw_max ||
val < dev_priv->rps.min_delay) { val < dev_priv->rps.min_freq_softlimit) {
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
return -EINVAL; return -EINVAL;
} }
...@@ -349,9 +349,9 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, ...@@ -349,9 +349,9 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
DRM_DEBUG("User requested overclocking to %d\n", DRM_DEBUG("User requested overclocking to %d\n",
val * GT_FREQUENCY_MULTIPLIER); val * GT_FREQUENCY_MULTIPLIER);
dev_priv->rps.max_delay = val; dev_priv->rps.max_freq_softlimit = val;
if (dev_priv->rps.cur_delay > val) { if (dev_priv->rps.cur_freq > val) {
if (IS_VALLEYVIEW(dev)) if (IS_VALLEYVIEW(dev))
valleyview_set_rps(dev, val); valleyview_set_rps(dev, val);
else else
...@@ -360,7 +360,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, ...@@ -360,7 +360,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
/* We still need gen6_set_rps to process the new max_delay and /* We still need gen6_set_rps to process the new max_delay and
* update the interrupt limits even though frequency request is * update the interrupt limits even though frequency request is
* unchanged. */ * unchanged. */
gen6_set_rps(dev, dev_priv->rps.cur_delay); gen6_set_rps(dev, dev_priv->rps.cur_freq);
} }
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
...@@ -379,9 +379,9 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute ...@@ -379,9 +379,9 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
mutex_lock(&dev_priv->rps.hw_lock); mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev)) if (IS_VALLEYVIEW(dev_priv->dev))
ret = vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay); ret = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
else else
ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER; ret = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
return snprintf(buf, PAGE_SIZE, "%d\n", ret); return snprintf(buf, PAGE_SIZE, "%d\n", ret);
...@@ -414,18 +414,18 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, ...@@ -414,18 +414,18 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
val /= GT_FREQUENCY_MULTIPLIER; val /= GT_FREQUENCY_MULTIPLIER;
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
hw_max = dev_priv->rps.hw_max; hw_max = dev_priv->rps.max_freq;
hw_min = ((rp_state_cap & 0xff0000) >> 16); hw_min = ((rp_state_cap & 0xff0000) >> 16);
} }
if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) { if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
return -EINVAL; return -EINVAL;
} }
dev_priv->rps.min_delay = val; dev_priv->rps.min_freq_softlimit = val;
if (dev_priv->rps.cur_delay < val) { if (dev_priv->rps.cur_freq < val) {
if (IS_VALLEYVIEW(dev)) if (IS_VALLEYVIEW(dev))
valleyview_set_rps(dev, val); valleyview_set_rps(dev, val);
else else
...@@ -434,7 +434,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, ...@@ -434,7 +434,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
/* We still need gen6_set_rps to process the new min_delay and /* We still need gen6_set_rps to process the new min_delay and
* update the interrupt limits even though frequency request is * update the interrupt limits even though frequency request is
* unchanged. */ * unchanged. */
gen6_set_rps(dev, dev_priv->rps.cur_delay); gen6_set_rps(dev, dev_priv->rps.cur_freq);
} }
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment