Commit 0dcab41d authored by Tony Luck's avatar Tony Luck Committed by Borislav Petkov

x86/cpu: Merge Intel and AMD ppin_init() functions

The code to decide whether a system supports the PPIN (Protected
Processor Inventory Number) MSR was cloned from the Intel
implementation. Apart from the X86_FEATURE bit and the MSR numbers it is
identical.

Merge the two functions into common x86 code, but use x86_match_cpu()
instead of the switch (c->x86_model) that was used by the old Intel
code.

No functional change.
Signed-off-by: default avatarTony Luck <tony.luck@intel.com>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Link: https://lore.kernel.org/r/20220131230111.2004669-2-tony.luck@intel.com
parent 7f99cb5e
...@@ -394,35 +394,6 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c) ...@@ -394,35 +394,6 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c)
per_cpu(cpu_llc_id, cpu) = c->cpu_die_id = c->phys_proc_id; per_cpu(cpu_llc_id, cpu) = c->cpu_die_id = c->phys_proc_id;
} }
static void amd_detect_ppin(struct cpuinfo_x86 *c)
{
unsigned long long val;
if (!cpu_has(c, X86_FEATURE_AMD_PPIN))
return;
/* When PPIN is defined in CPUID, still need to check PPIN_CTL MSR */
if (rdmsrl_safe(MSR_AMD_PPIN_CTL, &val))
goto clear_ppin;
/* PPIN is locked in disabled mode, clear feature bit */
if ((val & 3UL) == 1UL)
goto clear_ppin;
/* If PPIN is disabled, try to enable it */
if (!(val & 2UL)) {
wrmsrl_safe(MSR_AMD_PPIN_CTL, val | 2UL);
rdmsrl_safe(MSR_AMD_PPIN_CTL, &val);
}
/* If PPIN_EN bit is 1, return from here; otherwise fall through */
if (val & 2UL)
return;
clear_ppin:
clear_cpu_cap(c, X86_FEATURE_AMD_PPIN);
}
u32 amd_get_nodes_per_socket(void) u32 amd_get_nodes_per_socket(void)
{ {
return nodes_per_socket; return nodes_per_socket;
...@@ -947,7 +918,6 @@ static void init_amd(struct cpuinfo_x86 *c) ...@@ -947,7 +918,6 @@ static void init_amd(struct cpuinfo_x86 *c)
amd_detect_cmp(c); amd_detect_cmp(c);
amd_get_topology(c); amd_get_topology(c);
srat_detect_node(c); srat_detect_node(c);
amd_detect_ppin(c);
init_amd_cacheinfo(c); init_amd_cacheinfo(c);
......
...@@ -88,6 +88,78 @@ EXPORT_SYMBOL_GPL(get_llc_id); ...@@ -88,6 +88,78 @@ EXPORT_SYMBOL_GPL(get_llc_id);
/* L2 cache ID of each logical CPU */ /* L2 cache ID of each logical CPU */
DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_l2c_id) = BAD_APICID; DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_l2c_id) = BAD_APICID;
static struct ppin_info {
int feature;
int msr_ppin_ctl;
} ppin_info[] = {
[X86_VENDOR_INTEL] = {
.feature = X86_FEATURE_INTEL_PPIN,
.msr_ppin_ctl = MSR_PPIN_CTL,
},
[X86_VENDOR_AMD] = {
.feature = X86_FEATURE_AMD_PPIN,
.msr_ppin_ctl = MSR_AMD_PPIN_CTL,
},
};
static const struct x86_cpu_id ppin_cpuids[] = {
X86_MATCH_FEATURE(X86_FEATURE_AMD_PPIN, &ppin_info[X86_VENDOR_AMD]),
/* Legacy models without CPUID enumeration */
X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &ppin_info[X86_VENDOR_INTEL]),
X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &ppin_info[X86_VENDOR_INTEL]),
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &ppin_info[X86_VENDOR_INTEL]),
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &ppin_info[X86_VENDOR_INTEL]),
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &ppin_info[X86_VENDOR_INTEL]),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &ppin_info[X86_VENDOR_INTEL]),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &ppin_info[X86_VENDOR_INTEL]),
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &ppin_info[X86_VENDOR_INTEL]),
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &ppin_info[X86_VENDOR_INTEL]),
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &ppin_info[X86_VENDOR_INTEL]),
{}
};
static void ppin_init(struct cpuinfo_x86 *c)
{
const struct x86_cpu_id *id;
unsigned long long val;
struct ppin_info *info;
id = x86_match_cpu(ppin_cpuids);
if (!id)
return;
/*
* Testing the presence of the MSR is not enough. Need to check
* that the PPIN_CTL allows reading of the PPIN.
*/
info = (struct ppin_info *)id->driver_data;
if (rdmsrl_safe(info->msr_ppin_ctl, &val))
goto clear_ppin;
if ((val & 3UL) == 1UL) {
/* PPIN locked in disabled mode */
goto clear_ppin;
}
/* If PPIN is disabled, try to enable */
if (!(val & 2UL)) {
wrmsrl_safe(info->msr_ppin_ctl, val | 2UL);
rdmsrl_safe(info->msr_ppin_ctl, &val);
}
/* Is the enable bit set? */
if (val & 2UL) {
set_cpu_cap(c, info->feature);
return;
}
clear_ppin:
clear_cpu_cap(c, info->feature);
}
/* correctly size the local cpu masks */ /* correctly size the local cpu masks */
void __init setup_cpu_local_masks(void) void __init setup_cpu_local_masks(void)
{ {
...@@ -1655,6 +1727,8 @@ static void identify_cpu(struct cpuinfo_x86 *c) ...@@ -1655,6 +1727,8 @@ static void identify_cpu(struct cpuinfo_x86 *c)
c->x86_capability[i] |= boot_cpu_data.x86_capability[i]; c->x86_capability[i] |= boot_cpu_data.x86_capability[i];
} }
ppin_init(c);
/* Init Machine Check Exception if available. */ /* Init Machine Check Exception if available. */
mcheck_cpu_init(c); mcheck_cpu_init(c);
......
...@@ -470,47 +470,6 @@ void intel_clear_lmce(void) ...@@ -470,47 +470,6 @@ void intel_clear_lmce(void)
wrmsrl(MSR_IA32_MCG_EXT_CTL, val); wrmsrl(MSR_IA32_MCG_EXT_CTL, val);
} }
static void intel_ppin_init(struct cpuinfo_x86 *c)
{
unsigned long long val;
/*
* Even if testing the presence of the MSR would be enough, we don't
* want to risk the situation where other models reuse this MSR for
* other purposes.
*/
switch (c->x86_model) {
case INTEL_FAM6_IVYBRIDGE_X:
case INTEL_FAM6_HASWELL_X:
case INTEL_FAM6_BROADWELL_D:
case INTEL_FAM6_BROADWELL_X:
case INTEL_FAM6_SKYLAKE_X:
case INTEL_FAM6_ICELAKE_X:
case INTEL_FAM6_ICELAKE_D:
case INTEL_FAM6_SAPPHIRERAPIDS_X:
case INTEL_FAM6_XEON_PHI_KNL:
case INTEL_FAM6_XEON_PHI_KNM:
if (rdmsrl_safe(MSR_PPIN_CTL, &val))
return;
if ((val & 3UL) == 1UL) {
/* PPIN locked in disabled mode */
return;
}
/* If PPIN is disabled, try to enable */
if (!(val & 2UL)) {
wrmsrl_safe(MSR_PPIN_CTL, val | 2UL);
rdmsrl_safe(MSR_PPIN_CTL, &val);
}
/* Is the enable bit set? */
if (val & 2UL)
set_cpu_cap(c, X86_FEATURE_INTEL_PPIN);
}
}
/* /*
* Enable additional error logs from the integrated * Enable additional error logs from the integrated
* memory controller on processors that support this. * memory controller on processors that support this.
...@@ -535,7 +494,6 @@ void mce_intel_feature_init(struct cpuinfo_x86 *c) ...@@ -535,7 +494,6 @@ void mce_intel_feature_init(struct cpuinfo_x86 *c)
{ {
intel_init_cmci(); intel_init_cmci();
intel_init_lmce(); intel_init_lmce();
intel_ppin_init(c);
intel_imc_init(c); intel_imc_init(c);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment