Commit 659ed6e2 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'pm-5.18-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull power management fixes from Rafael Wysocki:
 "These fix up recent intel_idle driver changes and fix some ARM cpufreq
  driver issues.

  Specifics:

   - Fix issues with the Qualcomm's cpufreq driver (Dmitry Baryshkov,
     Vladimir Zapolskiy).

   - Fix memory leak with the Sun501 driver (Xiaobing Luo).

   - Make intel_idle enable C1E promotion on all CPUs when C1E is
     preferred to C1 (Artem Bityutskiy).

   - Make C6 optimization on Sapphire Rapids added recently work as
     expected if both C1E and C1 are "preferred" (Artem Bityutskiy)"

* tag 'pm-5.18-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  intel_idle: Fix SPR C6 optimization
  intel_idle: Fix the 'preferred_cstates' module parameter
  cpufreq: qcom-cpufreq-hw: Clear dcvs interrupts
  cpufreq: fix memory leak in sun50i_cpufreq_nvmem_probe
  cpufreq: qcom-cpufreq-hw: Fix throttle frequency value on EPSS platforms
  cpufreq: qcom-hw: provide online/offline operations
  cpufreq: qcom-hw: fix the opp entries refcounting
  cpufreq: qcom-hw: fix the race between LMH worker and cpuhp
  cpufreq: qcom-hw: drop affinity hint before freeing the IRQ
parents f12d31c0 edbd9772
...@@ -24,13 +24,17 @@ ...@@ -24,13 +24,17 @@
#define CLK_HW_DIV 2 #define CLK_HW_DIV 2
#define LUT_TURBO_IND 1 #define LUT_TURBO_IND 1
#define GT_IRQ_STATUS BIT(2)
#define HZ_PER_KHZ 1000 #define HZ_PER_KHZ 1000
struct qcom_cpufreq_soc_data { struct qcom_cpufreq_soc_data {
u32 reg_enable; u32 reg_enable;
u32 reg_domain_state;
u32 reg_dcvs_ctrl; u32 reg_dcvs_ctrl;
u32 reg_freq_lut; u32 reg_freq_lut;
u32 reg_volt_lut; u32 reg_volt_lut;
u32 reg_intr_clr;
u32 reg_current_vote; u32 reg_current_vote;
u32 reg_perf_state; u32 reg_perf_state;
u8 lut_row_size; u8 lut_row_size;
...@@ -280,37 +284,46 @@ static void qcom_get_related_cpus(int index, struct cpumask *m) ...@@ -280,37 +284,46 @@ static void qcom_get_related_cpus(int index, struct cpumask *m)
} }
} }
static unsigned int qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data) static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
{ {
unsigned int val = readl_relaxed(data->base + data->soc_data->reg_current_vote); unsigned int lval;
if (data->soc_data->reg_current_vote)
lval = readl_relaxed(data->base + data->soc_data->reg_current_vote) & 0x3ff;
else
lval = readl_relaxed(data->base + data->soc_data->reg_domain_state) & 0xff;
return (val & 0x3FF) * 19200; return lval * xo_rate;
} }
static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data) static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
{ {
struct cpufreq_policy *policy = data->policy; struct cpufreq_policy *policy = data->policy;
int cpu = cpumask_first(policy->cpus); int cpu = cpumask_first(policy->related_cpus);
struct device *dev = get_cpu_device(cpu); struct device *dev = get_cpu_device(cpu);
unsigned long freq_hz, throttled_freq; unsigned long freq_hz, throttled_freq;
struct dev_pm_opp *opp; struct dev_pm_opp *opp;
unsigned int freq;
/* /*
* Get the h/w throttled frequency, normalize it using the * Get the h/w throttled frequency, normalize it using the
* registered opp table and use it to calculate thermal pressure. * registered opp table and use it to calculate thermal pressure.
*/ */
freq = qcom_lmh_get_throttle_freq(data); freq_hz = qcom_lmh_get_throttle_freq(data);
freq_hz = freq * HZ_PER_KHZ;
opp = dev_pm_opp_find_freq_floor(dev, &freq_hz); opp = dev_pm_opp_find_freq_floor(dev, &freq_hz);
if (IS_ERR(opp) && PTR_ERR(opp) == -ERANGE) if (IS_ERR(opp) && PTR_ERR(opp) == -ERANGE)
dev_pm_opp_find_freq_ceil(dev, &freq_hz); opp = dev_pm_opp_find_freq_ceil(dev, &freq_hz);
if (IS_ERR(opp)) {
dev_warn(dev, "Can't find the OPP for throttling: %pe!\n", opp);
} else {
throttled_freq = freq_hz / HZ_PER_KHZ;
throttled_freq = freq_hz / HZ_PER_KHZ; /* Update thermal pressure (the boost frequencies are accepted) */
arch_update_thermal_pressure(policy->related_cpus, throttled_freq);
/* Update thermal pressure (the boost frequencies are accepted) */ dev_pm_opp_put(opp);
arch_update_thermal_pressure(policy->related_cpus, throttled_freq); }
/* /*
* In the unlikely case policy is unregistered do not enable * In the unlikely case policy is unregistered do not enable
...@@ -350,6 +363,10 @@ static irqreturn_t qcom_lmh_dcvs_handle_irq(int irq, void *data) ...@@ -350,6 +363,10 @@ static irqreturn_t qcom_lmh_dcvs_handle_irq(int irq, void *data)
disable_irq_nosync(c_data->throttle_irq); disable_irq_nosync(c_data->throttle_irq);
schedule_delayed_work(&c_data->throttle_work, 0); schedule_delayed_work(&c_data->throttle_work, 0);
if (c_data->soc_data->reg_intr_clr)
writel_relaxed(GT_IRQ_STATUS,
c_data->base + c_data->soc_data->reg_intr_clr);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -365,9 +382,11 @@ static const struct qcom_cpufreq_soc_data qcom_soc_data = { ...@@ -365,9 +382,11 @@ static const struct qcom_cpufreq_soc_data qcom_soc_data = {
static const struct qcom_cpufreq_soc_data epss_soc_data = { static const struct qcom_cpufreq_soc_data epss_soc_data = {
.reg_enable = 0x0, .reg_enable = 0x0,
.reg_domain_state = 0x20,
.reg_dcvs_ctrl = 0xb0, .reg_dcvs_ctrl = 0xb0,
.reg_freq_lut = 0x100, .reg_freq_lut = 0x100,
.reg_volt_lut = 0x200, .reg_volt_lut = 0x200,
.reg_intr_clr = 0x308,
.reg_perf_state = 0x320, .reg_perf_state = 0x320,
.lut_row_size = 4, .lut_row_size = 4,
}; };
...@@ -417,16 +436,39 @@ static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index) ...@@ -417,16 +436,39 @@ static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
return 0; return 0;
} }
static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data *data) static int qcom_cpufreq_hw_cpu_online(struct cpufreq_policy *policy)
{
struct qcom_cpufreq_data *data = policy->driver_data;
struct platform_device *pdev = cpufreq_get_driver_data();
int ret;
ret = irq_set_affinity_hint(data->throttle_irq, policy->cpus);
if (ret)
dev_err(&pdev->dev, "Failed to set CPU affinity of %s[%d]\n",
data->irq_name, data->throttle_irq);
return ret;
}
static int qcom_cpufreq_hw_cpu_offline(struct cpufreq_policy *policy)
{ {
struct qcom_cpufreq_data *data = policy->driver_data;
if (data->throttle_irq <= 0) if (data->throttle_irq <= 0)
return; return 0;
mutex_lock(&data->throttle_lock); mutex_lock(&data->throttle_lock);
data->cancel_throttle = true; data->cancel_throttle = true;
mutex_unlock(&data->throttle_lock); mutex_unlock(&data->throttle_lock);
cancel_delayed_work_sync(&data->throttle_work); cancel_delayed_work_sync(&data->throttle_work);
irq_set_affinity_hint(data->throttle_irq, NULL);
return 0;
}
static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data *data)
{
free_irq(data->throttle_irq, data); free_irq(data->throttle_irq, data);
} }
...@@ -583,6 +625,8 @@ static struct cpufreq_driver cpufreq_qcom_hw_driver = { ...@@ -583,6 +625,8 @@ static struct cpufreq_driver cpufreq_qcom_hw_driver = {
.get = qcom_cpufreq_hw_get, .get = qcom_cpufreq_hw_get,
.init = qcom_cpufreq_hw_cpu_init, .init = qcom_cpufreq_hw_cpu_init,
.exit = qcom_cpufreq_hw_cpu_exit, .exit = qcom_cpufreq_hw_cpu_exit,
.online = qcom_cpufreq_hw_cpu_online,
.offline = qcom_cpufreq_hw_cpu_offline,
.register_em = cpufreq_register_em_with_opp, .register_em = cpufreq_register_em_with_opp,
.fast_switch = qcom_cpufreq_hw_fast_switch, .fast_switch = qcom_cpufreq_hw_fast_switch,
.name = "qcom-cpufreq-hw", .name = "qcom-cpufreq-hw",
......
...@@ -98,8 +98,10 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev) ...@@ -98,8 +98,10 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
return -ENOMEM; return -ENOMEM;
ret = sun50i_cpufreq_get_efuse(&speed); ret = sun50i_cpufreq_get_efuse(&speed);
if (ret) if (ret) {
kfree(opp_tables);
return ret; return ret;
}
snprintf(name, MAX_NAME_LEN, "speed%d", speed); snprintf(name, MAX_NAME_LEN, "speed%d", speed);
......
...@@ -69,7 +69,12 @@ static unsigned int preferred_states_mask; ...@@ -69,7 +69,12 @@ static unsigned int preferred_states_mask;
static struct cpuidle_device __percpu *intel_idle_cpuidle_devices; static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
static unsigned long auto_demotion_disable_flags; static unsigned long auto_demotion_disable_flags;
static bool disable_promotion_to_c1e;
static enum {
C1E_PROMOTION_PRESERVE,
C1E_PROMOTION_ENABLE,
C1E_PROMOTION_DISABLE
} c1e_promotion = C1E_PROMOTION_PRESERVE;
struct idle_cpu { struct idle_cpu {
struct cpuidle_state *state_table; struct cpuidle_state *state_table;
...@@ -1398,8 +1403,6 @@ static inline void intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { } ...@@ -1398,8 +1403,6 @@ static inline void intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { }
static inline bool intel_idle_off_by_default(u32 mwait_hint) { return false; } static inline bool intel_idle_off_by_default(u32 mwait_hint) { return false; }
#endif /* !CONFIG_ACPI_PROCESSOR_CSTATE */ #endif /* !CONFIG_ACPI_PROCESSOR_CSTATE */
static void c1e_promotion_enable(void);
/** /**
* ivt_idle_state_table_update - Tune the idle states table for Ivy Town. * ivt_idle_state_table_update - Tune the idle states table for Ivy Town.
* *
...@@ -1578,17 +1581,14 @@ static void __init spr_idle_state_table_update(void) ...@@ -1578,17 +1581,14 @@ static void __init spr_idle_state_table_update(void)
unsigned long long msr; unsigned long long msr;
/* Check if user prefers C1E over C1. */ /* Check if user prefers C1E over C1. */
if (preferred_states_mask & BIT(2)) { if ((preferred_states_mask & BIT(2)) &&
if (preferred_states_mask & BIT(1)) !(preferred_states_mask & BIT(1))) {
/* Both can't be enabled, stick to the defaults. */ /* Disable C1 and enable C1E. */
return;
spr_cstates[0].flags |= CPUIDLE_FLAG_UNUSABLE; spr_cstates[0].flags |= CPUIDLE_FLAG_UNUSABLE;
spr_cstates[1].flags &= ~CPUIDLE_FLAG_UNUSABLE; spr_cstates[1].flags &= ~CPUIDLE_FLAG_UNUSABLE;
/* Enable C1E using the "C1E promotion" bit. */ /* Enable C1E using the "C1E promotion" bit. */
c1e_promotion_enable(); c1e_promotion = C1E_PROMOTION_ENABLE;
disable_promotion_to_c1e = false;
} }
/* /*
...@@ -1754,7 +1754,9 @@ static int intel_idle_cpu_init(unsigned int cpu) ...@@ -1754,7 +1754,9 @@ static int intel_idle_cpu_init(unsigned int cpu)
if (auto_demotion_disable_flags) if (auto_demotion_disable_flags)
auto_demotion_disable(); auto_demotion_disable();
if (disable_promotion_to_c1e) if (c1e_promotion == C1E_PROMOTION_ENABLE)
c1e_promotion_enable();
else if (c1e_promotion == C1E_PROMOTION_DISABLE)
c1e_promotion_disable(); c1e_promotion_disable();
return 0; return 0;
...@@ -1833,7 +1835,8 @@ static int __init intel_idle_init(void) ...@@ -1833,7 +1835,8 @@ static int __init intel_idle_init(void)
if (icpu) { if (icpu) {
cpuidle_state_table = icpu->state_table; cpuidle_state_table = icpu->state_table;
auto_demotion_disable_flags = icpu->auto_demotion_disable_flags; auto_demotion_disable_flags = icpu->auto_demotion_disable_flags;
disable_promotion_to_c1e = icpu->disable_promotion_to_c1e; if (icpu->disable_promotion_to_c1e)
c1e_promotion = C1E_PROMOTION_DISABLE;
if (icpu->use_acpi || force_use_acpi) if (icpu->use_acpi || force_use_acpi)
intel_idle_acpi_cst_extract(); intel_idle_acpi_cst_extract();
} else if (!intel_idle_acpi_cst_extract()) { } else if (!intel_idle_acpi_cst_extract()) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment