Commit f7d6040a authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'pm-4.10-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull power management fixes from Rafael Wysocki:
 "These add a quirk to intel_pstate to work around a firmware setting
  that leads to frequency scaling issues (discovered recently) on some
  Intel Kaby Lake processors, fix up the recently added brcmstb-avs
  cpufreq driver and avoid false-positive warnings from the runtime PM
  framework triggered by recent changes in i915.

  Specifics:

   - Add an intel_pstate driver quirk to work around a firmware setting
     that leads to frequency scaling issues on desktop Intel Kaby Lake
     processors in some configurations if the hardware-managed P-states
     (HWP) feature is in use (Srinivas Pandruvada)

   - Fix up the recently added brcmstb-avs cpufreq driver: fix a bug
     related to system suspend and change the sysfs interface to match
     the user space expectations (Markus Mayer)

   - Modify the runtime PM framework to avoid false-positive warnings
     from the might_sleep_if() assertions in it (Rafael Wysocki)"

* tag 'pm-4.10-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  PM / runtime: Avoid false-positive warnings from might_sleep_if()
  cpufreq: intel_pstate: Disable energy efficiency optimization
  cpufreq: brcmstb-avs-cpufreq: properly retrieve P-state upon suspend
  cpufreq: brcmstb-avs-cpufreq: extend sysfs entry brcm_avs_pmap
parents 50dcb6cd cbf304e4
...@@ -966,13 +966,13 @@ int __pm_runtime_idle(struct device *dev, int rpmflags) ...@@ -966,13 +966,13 @@ int __pm_runtime_idle(struct device *dev, int rpmflags)
unsigned long flags; unsigned long flags;
int retval; int retval;
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
if (rpmflags & RPM_GET_PUT) { if (rpmflags & RPM_GET_PUT) {
if (!atomic_dec_and_test(&dev->power.usage_count)) if (!atomic_dec_and_test(&dev->power.usage_count))
return 0; return 0;
} }
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
spin_lock_irqsave(&dev->power.lock, flags); spin_lock_irqsave(&dev->power.lock, flags);
retval = rpm_idle(dev, rpmflags); retval = rpm_idle(dev, rpmflags);
spin_unlock_irqrestore(&dev->power.lock, flags); spin_unlock_irqrestore(&dev->power.lock, flags);
...@@ -998,13 +998,13 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags) ...@@ -998,13 +998,13 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags)
unsigned long flags; unsigned long flags;
int retval; int retval;
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
if (rpmflags & RPM_GET_PUT) { if (rpmflags & RPM_GET_PUT) {
if (!atomic_dec_and_test(&dev->power.usage_count)) if (!atomic_dec_and_test(&dev->power.usage_count))
return 0; return 0;
} }
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
spin_lock_irqsave(&dev->power.lock, flags); spin_lock_irqsave(&dev->power.lock, flags);
retval = rpm_suspend(dev, rpmflags); retval = rpm_suspend(dev, rpmflags);
spin_unlock_irqrestore(&dev->power.lock, flags); spin_unlock_irqrestore(&dev->power.lock, flags);
...@@ -1029,7 +1029,8 @@ int __pm_runtime_resume(struct device *dev, int rpmflags) ...@@ -1029,7 +1029,8 @@ int __pm_runtime_resume(struct device *dev, int rpmflags)
unsigned long flags; unsigned long flags;
int retval; int retval;
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
dev->power.runtime_status != RPM_ACTIVE);
if (rpmflags & RPM_GET_PUT) if (rpmflags & RPM_GET_PUT)
atomic_inc(&dev->power.usage_count); atomic_inc(&dev->power.usage_count);
......
...@@ -784,8 +784,19 @@ static int brcm_avs_target_index(struct cpufreq_policy *policy, ...@@ -784,8 +784,19 @@ static int brcm_avs_target_index(struct cpufreq_policy *policy,
static int brcm_avs_suspend(struct cpufreq_policy *policy) static int brcm_avs_suspend(struct cpufreq_policy *policy)
{ {
struct private_data *priv = policy->driver_data; struct private_data *priv = policy->driver_data;
int ret;
ret = brcm_avs_get_pmap(priv, &priv->pmap);
if (ret)
return ret;
return brcm_avs_get_pmap(priv, &priv->pmap); /*
* We can't use the P-state returned by brcm_avs_get_pmap(), since
* that's the initial P-state from when the P-map was downloaded to the
* AVS co-processor, not necessarily the P-state we are running at now.
* So, we get the current P-state explicitly.
*/
return brcm_avs_get_pstate(priv, &priv->pmap.state);
} }
static int brcm_avs_resume(struct cpufreq_policy *policy) static int brcm_avs_resume(struct cpufreq_policy *policy)
...@@ -954,9 +965,9 @@ static ssize_t show_brcm_avs_pmap(struct cpufreq_policy *policy, char *buf) ...@@ -954,9 +965,9 @@ static ssize_t show_brcm_avs_pmap(struct cpufreq_policy *policy, char *buf)
brcm_avs_parse_p1(pmap.p1, &mdiv_p0, &pdiv, &ndiv); brcm_avs_parse_p1(pmap.p1, &mdiv_p0, &pdiv, &ndiv);
brcm_avs_parse_p2(pmap.p2, &mdiv_p1, &mdiv_p2, &mdiv_p3, &mdiv_p4); brcm_avs_parse_p2(pmap.p2, &mdiv_p1, &mdiv_p2, &mdiv_p3, &mdiv_p4);
return sprintf(buf, "0x%08x 0x%08x %u %u %u %u %u %u %u\n", return sprintf(buf, "0x%08x 0x%08x %u %u %u %u %u %u %u %u %u\n",
pmap.p1, pmap.p2, ndiv, pdiv, mdiv_p0, mdiv_p1, mdiv_p2, pmap.p1, pmap.p2, ndiv, pdiv, mdiv_p0, mdiv_p1, mdiv_p2,
mdiv_p3, mdiv_p4); mdiv_p3, mdiv_p4, pmap.mode, pmap.state);
} }
static ssize_t show_brcm_avs_voltage(struct cpufreq_policy *policy, char *buf) static ssize_t show_brcm_avs_voltage(struct cpufreq_policy *policy, char *buf)
......
...@@ -1235,6 +1235,25 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata) ...@@ -1235,6 +1235,25 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
cpudata->epp_default = intel_pstate_get_epp(cpudata, 0); cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
} }
#define MSR_IA32_POWER_CTL_BIT_EE 19
/* Disable energy efficiency optimization */
static void intel_pstate_disable_ee(int cpu)
{
u64 power_ctl;
int ret;
ret = rdmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, &power_ctl);
if (ret)
return;
if (!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE))) {
pr_info("Disabling energy efficiency optimization\n");
power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE);
wrmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, power_ctl);
}
}
static int atom_get_min_pstate(void) static int atom_get_min_pstate(void)
{ {
u64 value; u64 value;
...@@ -1845,6 +1864,11 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { ...@@ -1845,6 +1864,11 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
{} {}
}; };
static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = {
ICPU(INTEL_FAM6_KABYLAKE_DESKTOP, core_params),
{}
};
static int intel_pstate_init_cpu(unsigned int cpunum) static int intel_pstate_init_cpu(unsigned int cpunum)
{ {
struct cpudata *cpu; struct cpudata *cpu;
...@@ -1875,6 +1899,12 @@ static int intel_pstate_init_cpu(unsigned int cpunum) ...@@ -1875,6 +1899,12 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
cpu->cpu = cpunum; cpu->cpu = cpunum;
if (hwp_active) { if (hwp_active) {
const struct x86_cpu_id *id;
id = x86_match_cpu(intel_pstate_cpu_ee_disable_ids);
if (id)
intel_pstate_disable_ee(cpunum);
intel_pstate_hwp_enable(cpu); intel_pstate_hwp_enable(cpu);
pid_params.sample_rate_ms = 50; pid_params.sample_rate_ms = 50;
pid_params.sample_rate_ns = 50 * NSEC_PER_MSEC; pid_params.sample_rate_ns = 50 * NSEC_PER_MSEC;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment