Commit c9fdaba8 authored by Perry Yuan's avatar Perry Yuan Committed by Mario Limonciello

cpufreq: amd-pstate: switch boot_cpu_has() to cpu_feature_enabled()

replace the usage of the deprecated boot_cpu_has() function with
the modern cpu_feature_enabled() function. The switch to cpu_feature_enabled()
ensures compatibility with the latest CPU feature detection mechanisms and
improves code maintainability.
Acked-by: default avatarMario Limonciello <mario.limonciello@amd.com>
Suggested-by: default avatarBorislav Petkov (AMD) <bp@alien8.de>
Signed-off-by: default avatarPerry Yuan <perry.yuan@amd.com>
Reviewed-by: default avatarGautham R. Shenoy <gautham.shenoy@amd.com>
Link: https://lore.kernel.org/r/f1567593ac5e1d38343067e9c681a8c4b0707038.1718811234.git.perry.yuan@amd.comSigned-off-by: default avatarMario Limonciello <mario.limonciello@amd.com>
parent 1d53f30b
...@@ -158,7 +158,7 @@ static int __init dmi_matched_7k62_bios_bug(const struct dmi_system_id *dmi) ...@@ -158,7 +158,7 @@ static int __init dmi_matched_7k62_bios_bug(const struct dmi_system_id *dmi)
* broken BIOS lack of nominal_freq and lowest_freq capabilities * broken BIOS lack of nominal_freq and lowest_freq capabilities
* definition in ACPI tables * definition in ACPI tables
*/ */
if (boot_cpu_has(X86_FEATURE_ZEN2)) { if (cpu_feature_enabled(X86_FEATURE_ZEN2)) {
quirks = dmi->driver_data; quirks = dmi->driver_data;
pr_info("Overriding nominal and lowest frequencies for %s\n", dmi->ident); pr_info("Overriding nominal and lowest frequencies for %s\n", dmi->ident);
return 1; return 1;
...@@ -200,7 +200,7 @@ static s16 amd_pstate_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached) ...@@ -200,7 +200,7 @@ static s16 amd_pstate_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached)
u64 epp; u64 epp;
int ret; int ret;
if (boot_cpu_has(X86_FEATURE_CPPC)) { if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
if (!cppc_req_cached) { if (!cppc_req_cached) {
epp = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, epp = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
&cppc_req_cached); &cppc_req_cached);
...@@ -253,7 +253,7 @@ static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp) ...@@ -253,7 +253,7 @@ static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
int ret; int ret;
struct cppc_perf_ctrls perf_ctrls; struct cppc_perf_ctrls perf_ctrls;
if (boot_cpu_has(X86_FEATURE_CPPC)) { if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
u64 value = READ_ONCE(cpudata->cppc_req_cached); u64 value = READ_ONCE(cpudata->cppc_req_cached);
value &= ~GENMASK_ULL(31, 24); value &= ~GENMASK_ULL(31, 24);
...@@ -752,7 +752,7 @@ static int amd_pstate_get_highest_perf(int cpu, u32 *highest_perf) ...@@ -752,7 +752,7 @@ static int amd_pstate_get_highest_perf(int cpu, u32 *highest_perf)
{ {
int ret; int ret;
if (boot_cpu_has(X86_FEATURE_CPPC)) { if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
u64 cap1; u64 cap1;
ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1); ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1);
...@@ -991,7 +991,7 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy) ...@@ -991,7 +991,7 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
/* It will be updated by governor */ /* It will be updated by governor */
policy->cur = policy->cpuinfo.min_freq; policy->cur = policy->cpuinfo.min_freq;
if (boot_cpu_has(X86_FEATURE_CPPC)) if (cpu_feature_enabled(X86_FEATURE_CPPC))
policy->fast_switch_possible = true; policy->fast_switch_possible = true;
ret = freq_qos_add_request(&policy->constraints, &cpudata->req[0], ret = freq_qos_add_request(&policy->constraints, &cpudata->req[0],
...@@ -1224,7 +1224,7 @@ static int amd_pstate_change_mode_without_dvr_change(int mode) ...@@ -1224,7 +1224,7 @@ static int amd_pstate_change_mode_without_dvr_change(int mode)
cppc_state = mode; cppc_state = mode;
if (boot_cpu_has(X86_FEATURE_CPPC) || cppc_state == AMD_PSTATE_ACTIVE) if (cpu_feature_enabled(X86_FEATURE_CPPC) || cppc_state == AMD_PSTATE_ACTIVE)
return 0; return 0;
for_each_present_cpu(cpu) { for_each_present_cpu(cpu) {
...@@ -1453,7 +1453,7 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy) ...@@ -1453,7 +1453,7 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
else else
policy->policy = CPUFREQ_POLICY_POWERSAVE; policy->policy = CPUFREQ_POLICY_POWERSAVE;
if (boot_cpu_has(X86_FEATURE_CPPC)) { if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value); ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
if (ret) if (ret)
return ret; return ret;
...@@ -1543,7 +1543,7 @@ static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy) ...@@ -1543,7 +1543,7 @@ static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
epp = 0; epp = 0;
/* Set initial EPP value */ /* Set initial EPP value */
if (boot_cpu_has(X86_FEATURE_CPPC)) { if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
value &= ~GENMASK_ULL(31, 24); value &= ~GENMASK_ULL(31, 24);
value |= (u64)epp << 24; value |= (u64)epp << 24;
} }
...@@ -1582,7 +1582,7 @@ static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata) ...@@ -1582,7 +1582,7 @@ static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
value = READ_ONCE(cpudata->cppc_req_cached); value = READ_ONCE(cpudata->cppc_req_cached);
max_perf = READ_ONCE(cpudata->highest_perf); max_perf = READ_ONCE(cpudata->highest_perf);
if (boot_cpu_has(X86_FEATURE_CPPC)) { if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value); wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
} else { } else {
perf_ctrls.max_perf = max_perf; perf_ctrls.max_perf = max_perf;
...@@ -1616,7 +1616,7 @@ static void amd_pstate_epp_offline(struct cpufreq_policy *policy) ...@@ -1616,7 +1616,7 @@ static void amd_pstate_epp_offline(struct cpufreq_policy *policy)
value = READ_ONCE(cpudata->cppc_req_cached); value = READ_ONCE(cpudata->cppc_req_cached);
mutex_lock(&amd_pstate_limits_lock); mutex_lock(&amd_pstate_limits_lock);
if (boot_cpu_has(X86_FEATURE_CPPC)) { if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
cpudata->epp_policy = CPUFREQ_POLICY_UNKNOWN; cpudata->epp_policy = CPUFREQ_POLICY_UNKNOWN;
/* Set max perf same as min perf */ /* Set max perf same as min perf */
...@@ -1819,7 +1819,7 @@ static int __init amd_pstate_init(void) ...@@ -1819,7 +1819,7 @@ static int __init amd_pstate_init(void)
*/ */
if (amd_pstate_acpi_pm_profile_undefined() || if (amd_pstate_acpi_pm_profile_undefined() ||
amd_pstate_acpi_pm_profile_server() || amd_pstate_acpi_pm_profile_server() ||
!boot_cpu_has(X86_FEATURE_CPPC)) { !cpu_feature_enabled(X86_FEATURE_CPPC)) {
pr_info("driver load is disabled, boot with specific mode to enable this\n"); pr_info("driver load is disabled, boot with specific mode to enable this\n");
return -ENODEV; return -ENODEV;
} }
...@@ -1838,7 +1838,7 @@ static int __init amd_pstate_init(void) ...@@ -1838,7 +1838,7 @@ static int __init amd_pstate_init(void)
} }
/* capability check */ /* capability check */
if (boot_cpu_has(X86_FEATURE_CPPC)) { if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
pr_debug("AMD CPPC MSR based functionality is supported\n"); pr_debug("AMD CPPC MSR based functionality is supported\n");
if (cppc_state != AMD_PSTATE_ACTIVE) if (cppc_state != AMD_PSTATE_ACTIVE)
current_pstate_driver->adjust_perf = amd_pstate_adjust_perf; current_pstate_driver->adjust_perf = amd_pstate_adjust_perf;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment