Commit a05a4e24 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 topology discovery improvements from Ingo Molnar:
 "These changes improve topology discovery on AMD CPUs.

  Right now this feeds information displayed in
  /sys/devices/system/cpu/cpuX/cache/indexY/* - but in the future we
  could use this to set up a better scheduling topology."

* 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86, cacheinfo: Base cache sharing info on CPUID 0x8000001d on AMD
  x86, cacheinfo: Make use of CPUID 0x8000001d for cache information on AMD
  x86, cacheinfo: Determine number of cache leafs using CPUID 0x8000001d on AMD
  x86: Add cpu_has_topoext
parents e9a5a919 27d3a8a2
...@@ -311,6 +311,7 @@ extern const char * const x86_power_flags[32]; ...@@ -311,6 +311,7 @@ extern const char * const x86_power_flags[32];
#define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8) #define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8)
#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16) #define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16)
#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU) #define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
#define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) #if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)
# define cpu_has_invlpg 1 # define cpu_has_invlpg 1
......
...@@ -187,7 +187,7 @@ extern void print_cpu_info(struct cpuinfo_x86 *); ...@@ -187,7 +187,7 @@ extern void print_cpu_info(struct cpuinfo_x86 *);
void print_cpu_msr(struct cpuinfo_x86 *); void print_cpu_msr(struct cpuinfo_x86 *);
extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
extern unsigned short num_cache_leaves; extern void init_amd_cacheinfo(struct cpuinfo_x86 *c);
extern void detect_extended_topology(struct cpuinfo_x86 *c); extern void detect_extended_topology(struct cpuinfo_x86 *c);
extern void detect_ht(struct cpuinfo_x86 *c); extern void detect_ht(struct cpuinfo_x86 *c);
......
...@@ -304,7 +304,7 @@ static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c) ...@@ -304,7 +304,7 @@ static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c)
int cpu = smp_processor_id(); int cpu = smp_processor_id();
/* get information required for multi-node processors */ /* get information required for multi-node processors */
if (cpu_has(c, X86_FEATURE_TOPOEXT)) { if (cpu_has_topoext) {
u32 eax, ebx, ecx, edx; u32 eax, ebx, ecx, edx;
cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
...@@ -657,12 +657,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) ...@@ -657,12 +657,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
detect_ht(c); detect_ht(c);
#endif #endif
if (c->extended_cpuid_level >= 0x80000006) { init_amd_cacheinfo(c);
if (cpuid_edx(0x80000006) & 0xf000)
num_cache_leaves = 4;
else
num_cache_leaves = 3;
}
if (c->x86 >= 0xf) if (c->x86 >= 0xf)
set_cpu_cap(c, X86_FEATURE_K8); set_cpu_cap(c, X86_FEATURE_K8);
......
...@@ -538,7 +538,11 @@ __cpuinit cpuid4_cache_lookup_regs(int index, ...@@ -538,7 +538,11 @@ __cpuinit cpuid4_cache_lookup_regs(int index,
unsigned edx; unsigned edx;
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
amd_cpuid4(index, &eax, &ebx, &ecx); if (cpu_has_topoext)
cpuid_count(0x8000001d, index, &eax.full,
&ebx.full, &ecx.full, &edx);
else
amd_cpuid4(index, &eax, &ebx, &ecx);
amd_init_l3_cache(this_leaf, index); amd_init_l3_cache(this_leaf, index);
} else { } else {
cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx); cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
...@@ -557,21 +561,39 @@ __cpuinit cpuid4_cache_lookup_regs(int index, ...@@ -557,21 +561,39 @@ __cpuinit cpuid4_cache_lookup_regs(int index,
return 0; return 0;
} }
static int __cpuinit find_num_cache_leaves(void) static int __cpuinit find_num_cache_leaves(struct cpuinfo_x86 *c)
{ {
unsigned int eax, ebx, ecx, edx; unsigned int eax, ebx, ecx, edx, op;
union _cpuid4_leaf_eax cache_eax; union _cpuid4_leaf_eax cache_eax;
int i = -1; int i = -1;
if (c->x86_vendor == X86_VENDOR_AMD)
op = 0x8000001d;
else
op = 4;
do { do {
++i; ++i;
/* Do cpuid(4) loop to find out num_cache_leaves */ /* Do cpuid(op) loop to find out num_cache_leaves */
cpuid_count(4, i, &eax, &ebx, &ecx, &edx); cpuid_count(op, i, &eax, &ebx, &ecx, &edx);
cache_eax.full = eax; cache_eax.full = eax;
} while (cache_eax.split.type != CACHE_TYPE_NULL); } while (cache_eax.split.type != CACHE_TYPE_NULL);
return i; return i;
} }
void __cpuinit init_amd_cacheinfo(struct cpuinfo_x86 *c)
{
if (cpu_has_topoext) {
num_cache_leaves = find_num_cache_leaves(c);
} else if (c->extended_cpuid_level >= 0x80000006) {
if (cpuid_edx(0x80000006) & 0xf000)
num_cache_leaves = 4;
else
num_cache_leaves = 3;
}
}
unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
{ {
/* Cache sizes */ /* Cache sizes */
...@@ -588,7 +610,7 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) ...@@ -588,7 +610,7 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
if (is_initialized == 0) { if (is_initialized == 0) {
/* Init num_cache_leaves from boot CPU */ /* Init num_cache_leaves from boot CPU */
num_cache_leaves = find_num_cache_leaves(); num_cache_leaves = find_num_cache_leaves(c);
is_initialized++; is_initialized++;
} }
...@@ -728,37 +750,50 @@ static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info); ...@@ -728,37 +750,50 @@ static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index) static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index)
{ {
struct _cpuid4_info *this_leaf; struct _cpuid4_info *this_leaf;
int ret, i, sibling; int i, sibling;
struct cpuinfo_x86 *c = &cpu_data(cpu);
ret = 0; if (cpu_has_topoext) {
if (index == 3) { unsigned int apicid, nshared, first, last;
ret = 1;
for_each_cpu(i, cpu_llc_shared_mask(cpu)) { if (!per_cpu(ici_cpuid4_info, cpu))
return 0;
this_leaf = CPUID4_INFO_IDX(cpu, index);
nshared = this_leaf->base.eax.split.num_threads_sharing + 1;
apicid = cpu_data(cpu).apicid;
first = apicid - (apicid % nshared);
last = first + nshared - 1;
for_each_online_cpu(i) {
apicid = cpu_data(i).apicid;
if ((apicid < first) || (apicid > last))
continue;
if (!per_cpu(ici_cpuid4_info, i)) if (!per_cpu(ici_cpuid4_info, i))
continue; continue;
this_leaf = CPUID4_INFO_IDX(i, index); this_leaf = CPUID4_INFO_IDX(i, index);
for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
if (!cpu_online(sibling)) for_each_online_cpu(sibling) {
apicid = cpu_data(sibling).apicid;
if ((apicid < first) || (apicid > last))
continue; continue;
set_bit(sibling, this_leaf->shared_cpu_map); set_bit(sibling, this_leaf->shared_cpu_map);
} }
} }
} else if ((c->x86 == 0x15) && ((index == 1) || (index == 2))) { } else if (index == 3) {
ret = 1; for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
for_each_cpu(i, cpu_sibling_mask(cpu)) {
if (!per_cpu(ici_cpuid4_info, i)) if (!per_cpu(ici_cpuid4_info, i))
continue; continue;
this_leaf = CPUID4_INFO_IDX(i, index); this_leaf = CPUID4_INFO_IDX(i, index);
for_each_cpu(sibling, cpu_sibling_mask(cpu)) { for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
if (!cpu_online(sibling)) if (!cpu_online(sibling))
continue; continue;
set_bit(sibling, this_leaf->shared_cpu_map); set_bit(sibling, this_leaf->shared_cpu_map);
} }
} }
} } else
return 0;
return ret; return 1;
} }
static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
......
...@@ -332,7 +332,7 @@ do { \ ...@@ -332,7 +332,7 @@ do { \
static bool __cpuinit match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) static bool __cpuinit match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
{ {
if (cpu_has(c, X86_FEATURE_TOPOEXT)) { if (cpu_has_topoext) {
int cpu1 = c->cpu_index, cpu2 = o->cpu_index; int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
if (c->phys_proc_id == o->phys_proc_id && if (c->phys_proc_id == o->phys_proc_id &&
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment