Commit f3bdd82c authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

Merge branch 'pm-cpufreq'

Merge cpufreq fixes for 6.8-rc2:

 - Fix the handling of scaling_max/min_freq sysfs attributes in the AMD
   P-state cpufreq driver (Mario Limonciello).

 - Make the intel_pstate cpufreq driver avoid unnecessary computation of
   the HWP performance level corresponding to a given frequency in the
   cases when it is known already, which also helps to avoid reducing
   the maximum CPU capacity artificially on some systems (Rafael J.
   Wysocki).

* pm-cpufreq:
  cpufreq/amd-pstate: Fix setting scaling max/min freq values
  cpufreq: intel_pstate: Refine computation of P-state for given frequency
parents d3b93fe1 22fb4f04
......@@ -1232,14 +1232,13 @@ static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq);
min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq);
WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
cpudata->max_limit_perf);
min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
cpudata->max_limit_perf);
WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
value = READ_ONCE(cpudata->cppc_req_cached);
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
......
......@@ -529,6 +529,30 @@ static int intel_pstate_cppc_get_scaling(int cpu)
}
#endif /* CONFIG_ACPI_CPPC_LIB */
static int intel_pstate_freq_to_hwp_rel(struct cpudata *cpu, int freq,
unsigned int relation)
{
if (freq == cpu->pstate.turbo_freq)
return cpu->pstate.turbo_pstate;
if (freq == cpu->pstate.max_freq)
return cpu->pstate.max_pstate;
switch (relation) {
case CPUFREQ_RELATION_H:
return freq / cpu->pstate.scaling;
case CPUFREQ_RELATION_C:
return DIV_ROUND_CLOSEST(freq, cpu->pstate.scaling);
}
return DIV_ROUND_UP(freq, cpu->pstate.scaling);
}
static int intel_pstate_freq_to_hwp(struct cpudata *cpu, int freq)
{
return intel_pstate_freq_to_hwp_rel(cpu, freq, CPUFREQ_RELATION_L);
}
/**
* intel_pstate_hybrid_hwp_adjust - Calibrate HWP performance levels.
* @cpu: Target CPU.
......@@ -546,6 +570,7 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling;
int perf_ctl_turbo = pstate_funcs.get_turbo(cpu->cpu);
int scaling = cpu->pstate.scaling;
int freq;
pr_debug("CPU%d: perf_ctl_max_phys = %d\n", cpu->cpu, perf_ctl_max_phys);
pr_debug("CPU%d: perf_ctl_turbo = %d\n", cpu->cpu, perf_ctl_turbo);
......@@ -559,16 +584,16 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
cpu->pstate.max_freq = rounddown(cpu->pstate.max_pstate * scaling,
perf_ctl_scaling);
cpu->pstate.max_pstate_physical =
DIV_ROUND_UP(perf_ctl_max_phys * perf_ctl_scaling,
scaling);
freq = perf_ctl_max_phys * perf_ctl_scaling;
cpu->pstate.max_pstate_physical = intel_pstate_freq_to_hwp(cpu, freq);
cpu->pstate.min_freq = cpu->pstate.min_pstate * perf_ctl_scaling;
freq = cpu->pstate.min_pstate * perf_ctl_scaling;
cpu->pstate.min_freq = freq;
/*
* Cast the min P-state value retrieved via pstate_funcs.get_min() to
* the effective range of HWP performance levels.
*/
cpu->pstate.min_pstate = DIV_ROUND_UP(cpu->pstate.min_freq, scaling);
cpu->pstate.min_pstate = intel_pstate_freq_to_hwp(cpu, freq);
}
static inline void update_turbo_state(void)
......@@ -2528,13 +2553,12 @@ static void intel_pstate_update_perf_limits(struct cpudata *cpu,
* abstract values to represent performance rather than pure ratios.
*/
if (hwp_active && cpu->pstate.scaling != perf_ctl_scaling) {
int scaling = cpu->pstate.scaling;
int freq;
freq = max_policy_perf * perf_ctl_scaling;
max_policy_perf = DIV_ROUND_UP(freq, scaling);
max_policy_perf = intel_pstate_freq_to_hwp(cpu, freq);
freq = min_policy_perf * perf_ctl_scaling;
min_policy_perf = DIV_ROUND_UP(freq, scaling);
min_policy_perf = intel_pstate_freq_to_hwp(cpu, freq);
}
pr_debug("cpu:%d min_policy_perf:%d max_policy_perf:%d\n",
......@@ -2908,18 +2932,7 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,
cpufreq_freq_transition_begin(policy, &freqs);
switch (relation) {
case CPUFREQ_RELATION_L:
target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling);
break;
case CPUFREQ_RELATION_H:
target_pstate = freqs.new / cpu->pstate.scaling;
break;
default:
target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling);
break;
}
target_pstate = intel_pstate_freq_to_hwp_rel(cpu, freqs.new, relation);
target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, false);
freqs.new = target_pstate * cpu->pstate.scaling;
......@@ -2937,7 +2950,7 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
update_turbo_state();
target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
target_pstate = intel_pstate_freq_to_hwp(cpu, target_freq);
target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment