Commit 7745f03e authored by Len Brown's avatar Len Brown Committed by Thomas Gleixner

x86/topology: Add CPUID.1F multi-die/package support

Some new systems have multiple software-visible die within each package.

Update Linux parsing of the Intel CPUID "Extended Topology Leaf" to handle
either CPUID.B, or the new CPUID.1F.

Add cpuinfo_x86.die_id and cpuinfo_x86.max_dies to store the result.

die_id will be non-zero only for multi-die/package systems.
Signed-off-by: default avatarLen Brown <len.brown@intel.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarIngo Molnar <mingo@kernel.org>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: linux-doc@vger.kernel.org
Link: https://lkml.kernel.org/r/7b23d2d26d717b8e14ba137c94b70943f1ae4b5c.1557769318.git.len.brown@intel.com
parent a188339c
...@@ -49,6 +49,10 @@ Package-related topology information in the kernel: ...@@ -49,6 +49,10 @@ Package-related topology information in the kernel:
The number of cores in a package. This information is retrieved via CPUID. The number of cores in a package. This information is retrieved via CPUID.
- cpuinfo_x86.x86_max_dies:
The number of dies in a package. This information is retrieved via CPUID.
- cpuinfo_x86.phys_proc_id: - cpuinfo_x86.phys_proc_id:
The physical ID of the package. This information is retrieved via CPUID The physical ID of the package. This information is retrieved via CPUID
......
...@@ -106,6 +106,7 @@ struct cpuinfo_x86 { ...@@ -106,6 +106,7 @@ struct cpuinfo_x86 {
unsigned long loops_per_jiffy; unsigned long loops_per_jiffy;
/* cpuid returned max cores value: */ /* cpuid returned max cores value: */
u16 x86_max_cores; u16 x86_max_cores;
u16 x86_max_dies;
u16 apicid; u16 apicid;
u16 initial_apicid; u16 initial_apicid;
u16 x86_clflush_size; u16 x86_clflush_size;
...@@ -117,6 +118,7 @@ struct cpuinfo_x86 { ...@@ -117,6 +118,7 @@ struct cpuinfo_x86 {
u16 logical_proc_id; u16 logical_proc_id;
/* Core id: */ /* Core id: */
u16 cpu_core_id; u16 cpu_core_id;
u16 cpu_die_id;
/* Index into per_cpu list: */ /* Index into per_cpu list: */
u16 cpu_index; u16 cpu_index;
u32 microcode; u32 microcode;
......
...@@ -15,33 +15,63 @@ ...@@ -15,33 +15,63 @@
/* leaf 0xb SMT level */ /* leaf 0xb SMT level */
#define SMT_LEVEL 0 #define SMT_LEVEL 0
/* leaf 0xb sub-leaf types */ /* extended topology sub-leaf types */
#define INVALID_TYPE 0 #define INVALID_TYPE 0
#define SMT_TYPE 1 #define SMT_TYPE 1
#define CORE_TYPE 2 #define CORE_TYPE 2
#define DIE_TYPE 5
#define LEAFB_SUBTYPE(ecx) (((ecx) >> 8) & 0xff) #define LEAFB_SUBTYPE(ecx) (((ecx) >> 8) & 0xff)
#define BITS_SHIFT_NEXT_LEVEL(eax) ((eax) & 0x1f) #define BITS_SHIFT_NEXT_LEVEL(eax) ((eax) & 0x1f)
#define LEVEL_MAX_SIBLINGS(ebx) ((ebx) & 0xffff) #define LEVEL_MAX_SIBLINGS(ebx) ((ebx) & 0xffff)
int detect_extended_topology_early(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/*
* Check if given CPUID extended toplogy "leaf" is implemented
*/
static int check_extended_topology_leaf(int leaf)
{
unsigned int eax, ebx, ecx, edx; unsigned int eax, ebx, ecx, edx;
if (c->cpuid_level < 0xb) cpuid_count(leaf, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
return -1;
cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx); if (ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE))
return -1;
/* return 0;
* check if the cpuid leaf 0xb is actually implemented. }
/*
* Return best CPUID Extended Toplogy Leaf supported
*/ */
if (ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE)) static int detect_extended_topology_leaf(struct cpuinfo_x86 *c)
{
if (c->cpuid_level >= 0x1f) {
if (check_extended_topology_leaf(0x1f) == 0)
return 0x1f;
}
if (c->cpuid_level >= 0xb) {
if (check_extended_topology_leaf(0xb) == 0)
return 0xb;
}
return -1;
}
#endif
int detect_extended_topology_early(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_SMP
unsigned int eax, ebx, ecx, edx;
int leaf;
leaf = detect_extended_topology_leaf(c);
if (leaf < 0)
return -1; return -1;
set_cpu_cap(c, X86_FEATURE_XTOPOLOGY); set_cpu_cap(c, X86_FEATURE_XTOPOLOGY);
cpuid_count(leaf, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
/* /*
* initial apic id, which also represents 32-bit extended x2apic id. * initial apic id, which also represents 32-bit extended x2apic id.
*/ */
...@@ -52,7 +82,7 @@ int detect_extended_topology_early(struct cpuinfo_x86 *c) ...@@ -52,7 +82,7 @@ int detect_extended_topology_early(struct cpuinfo_x86 *c)
} }
/* /*
* Check for extended topology enumeration cpuid leaf 0xb and if it * Check for extended topology enumeration cpuid leaf, and if it
* exists, use it for populating initial_apicid and cpu topology * exists, use it for populating initial_apicid and cpu topology
* detection. * detection.
*/ */
...@@ -60,22 +90,28 @@ int detect_extended_topology(struct cpuinfo_x86 *c) ...@@ -60,22 +90,28 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
unsigned int eax, ebx, ecx, edx, sub_index; unsigned int eax, ebx, ecx, edx, sub_index;
unsigned int ht_mask_width, core_plus_mask_width; unsigned int ht_mask_width, core_plus_mask_width, die_plus_mask_width;
unsigned int core_select_mask, core_level_siblings; unsigned int core_select_mask, core_level_siblings;
unsigned int die_select_mask, die_level_siblings;
int leaf;
if (detect_extended_topology_early(c) < 0) leaf = detect_extended_topology_leaf(c);
if (leaf < 0)
return -1; return -1;
/* /*
* Populate HT related information from sub-leaf level 0. * Populate HT related information from sub-leaf level 0.
*/ */
cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx); cpuid_count(leaf, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
c->initial_apicid = edx;
core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx); core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
die_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
sub_index = 1; sub_index = 1;
do { do {
cpuid_count(0xb, sub_index, &eax, &ebx, &ecx, &edx); cpuid_count(leaf, sub_index, &eax, &ebx, &ecx, &edx);
/* /*
* Check for the Core type in the implemented sub leaves. * Check for the Core type in the implemented sub leaves.
...@@ -83,23 +119,34 @@ int detect_extended_topology(struct cpuinfo_x86 *c) ...@@ -83,23 +119,34 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
if (LEAFB_SUBTYPE(ecx) == CORE_TYPE) { if (LEAFB_SUBTYPE(ecx) == CORE_TYPE) {
core_level_siblings = LEVEL_MAX_SIBLINGS(ebx); core_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
core_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); core_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
break; die_level_siblings = core_level_siblings;
die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
}
if (LEAFB_SUBTYPE(ecx) == DIE_TYPE) {
die_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
} }
sub_index++; sub_index++;
} while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE); } while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE);
core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width; core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width;
die_select_mask = (~(-1 << die_plus_mask_width)) >>
c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, ht_mask_width) core_plus_mask_width;
& core_select_mask;
c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, core_plus_mask_width); c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid,
ht_mask_width) & core_select_mask;
c->cpu_die_id = apic->phys_pkg_id(c->initial_apicid,
core_plus_mask_width) & die_select_mask;
c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid,
die_plus_mask_width);
/* /*
* Reinit the apicid, now that we have extended initial_apicid. * Reinit the apicid, now that we have extended initial_apicid.
*/ */
c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
c->x86_max_cores = (core_level_siblings / smp_num_siblings); c->x86_max_cores = (core_level_siblings / smp_num_siblings);
c->x86_max_dies = (die_level_siblings / core_level_siblings);
#endif #endif
return 0; return 0;
} }
...@@ -389,6 +389,7 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) ...@@ -389,6 +389,7 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
int cpu1 = c->cpu_index, cpu2 = o->cpu_index; int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
if (c->phys_proc_id == o->phys_proc_id && if (c->phys_proc_id == o->phys_proc_id &&
c->cpu_die_id == o->cpu_die_id &&
per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) { per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) {
if (c->cpu_core_id == o->cpu_core_id) if (c->cpu_core_id == o->cpu_core_id)
return topology_sane(c, o, "smt"); return topology_sane(c, o, "smt");
...@@ -400,6 +401,7 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) ...@@ -400,6 +401,7 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
} }
} else if (c->phys_proc_id == o->phys_proc_id && } else if (c->phys_proc_id == o->phys_proc_id &&
c->cpu_die_id == o->cpu_die_id &&
c->cpu_core_id == o->cpu_core_id) { c->cpu_core_id == o->cpu_core_id) {
return topology_sane(c, o, "smt"); return topology_sane(c, o, "smt");
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment