Commit f3fa8ebc authored by Rohit Seth's avatar Rohit Seth Committed by Linus Torvalds

[PATCH] x86_64: moving phys_proc_id and cpu_core_id to cpuinfo_x86

Most of the fields of cpuinfo are defined in cpuinfo_x86 structure.
This patch moves the phys_proc_id and cpu_core_id for each processor to
cpuinfo_x86 structure as well.
Signed-off-by: default avatarRohit Seth <rohitseth@google.com>
Signed-off-by: default avatarAndi Kleen <ak@suse.de>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent e465058d
...@@ -115,7 +115,7 @@ void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c) ...@@ -115,7 +115,7 @@ void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c)
per_cpu(bank_map, cpu) |= (1 << bank); per_cpu(bank_map, cpu) |= (1 << bank);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (shared_bank[bank] && cpu_core_id[cpu]) if (shared_bank[bank] && c->cpu_core_id)
continue; continue;
#endif #endif
...@@ -323,10 +323,10 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, int bank) ...@@ -323,10 +323,10 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, int bank)
struct threshold_bank *b = NULL; struct threshold_bank *b = NULL;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (cpu_core_id[cpu] && shared_bank[bank]) { /* symlink */ if (cpu_data[cpu].cpu_core_id && shared_bank[bank]) { /* symlink */
char name[16]; char name[16];
unsigned lcpu = first_cpu(cpu_core_map[cpu]); unsigned lcpu = first_cpu(cpu_core_map[cpu]);
if (cpu_core_id[lcpu]) if (cpu_data[lcpu].cpu_core_id)
goto out; /* first core not up yet */ goto out; /* first core not up yet */
b = per_cpu(threshold_banks, lcpu)[bank]; b = per_cpu(threshold_banks, lcpu)[bank];
...@@ -434,7 +434,7 @@ static __cpuinit int threshold_create_symlinks(unsigned int cpu) ...@@ -434,7 +434,7 @@ static __cpuinit int threshold_create_symlinks(unsigned int cpu)
int bank, err = 0; int bank, err = 0;
unsigned int lcpu = 0; unsigned int lcpu = 0;
if (cpu_core_id[cpu]) if (cpu_data[cpu].cpu_core_id)
return 0; return 0;
for_each_cpu_mask(lcpu, cpu_core_map[cpu]) { for_each_cpu_mask(lcpu, cpu_core_map[cpu]) {
if (lcpu == cpu) if (lcpu == cpu)
...@@ -455,7 +455,7 @@ static __cpuinit void threshold_remove_symlinks(unsigned int cpu) ...@@ -455,7 +455,7 @@ static __cpuinit void threshold_remove_symlinks(unsigned int cpu)
{ {
int bank; int bank;
unsigned int lcpu = 0; unsigned int lcpu = 0;
if (cpu_core_id[cpu]) if (cpu_data[cpu].cpu_core_id)
return; return;
for_each_cpu_mask(lcpu, cpu_core_map[cpu]) { for_each_cpu_mask(lcpu, cpu_core_map[cpu]) {
if (lcpu == cpu) if (lcpu == cpu)
......
...@@ -680,9 +680,6 @@ void __init gart_iommu_init(void) ...@@ -680,9 +680,6 @@ void __init gart_iommu_init(void)
dma_ops = &gart_dma_ops; dma_ops = &gart_dma_ops;
} }
/* Must execute after PCI subsystem */
fs_initcall(gart_iommu_init);
void gart_parse_options(char *p) void gart_parse_options(char *p)
{ {
int arg; int arg;
......
...@@ -785,9 +785,9 @@ static int nearby_node(int apicid) ...@@ -785,9 +785,9 @@ static int nearby_node(int apicid)
static void __init amd_detect_cmp(struct cpuinfo_x86 *c) static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
int cpu = smp_processor_id();
unsigned bits; unsigned bits;
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
int cpu = smp_processor_id();
int node = 0; int node = 0;
unsigned apicid = hard_smp_processor_id(); unsigned apicid = hard_smp_processor_id();
#endif #endif
...@@ -805,12 +805,12 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c) ...@@ -805,12 +805,12 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
} }
/* Low order bits define the core id (index of core in socket) */ /* Low order bits define the core id (index of core in socket) */
cpu_core_id[cpu] = phys_proc_id[cpu] & ((1 << bits)-1); c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
/* Convert the APIC ID into the socket ID */ /* Convert the APIC ID into the socket ID */
phys_proc_id[cpu] = phys_pkg_id(bits); c->phys_proc_id = phys_pkg_id(bits);
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
node = phys_proc_id[cpu]; node = c->phys_proc_id;
if (apicid_to_node[apicid] != NUMA_NO_NODE) if (apicid_to_node[apicid] != NUMA_NO_NODE)
node = apicid_to_node[apicid]; node = apicid_to_node[apicid];
if (!node_online(node)) { if (!node_online(node)) {
...@@ -823,7 +823,7 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c) ...@@ -823,7 +823,7 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
but in the same order as the HT nodeids. but in the same order as the HT nodeids.
If that doesn't result in a usable node fall back to the If that doesn't result in a usable node fall back to the
path for the previous case. */ path for the previous case. */
int ht_nodeid = apicid - (phys_proc_id[0] << bits); int ht_nodeid = apicid - (cpu_data[0].phys_proc_id << bits);
if (ht_nodeid >= 0 && if (ht_nodeid >= 0 &&
apicid_to_node[ht_nodeid] != NUMA_NO_NODE) apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
node = apicid_to_node[ht_nodeid]; node = apicid_to_node[ht_nodeid];
...@@ -834,7 +834,7 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c) ...@@ -834,7 +834,7 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
numa_set_node(cpu, node); numa_set_node(cpu, node);
printk(KERN_INFO "CPU %d/%x(%d) -> Node %d -> Core %d\n", printk(KERN_INFO "CPU %d/%x(%d) -> Node %d -> Core %d\n",
cpu, apicid, c->x86_max_cores, node, cpu_core_id[cpu]); cpu, apicid, c->x86_max_cores, node, c->cpu_core_id);
#endif #endif
#endif #endif
} }
...@@ -905,7 +905,6 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c) ...@@ -905,7 +905,6 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
u32 eax, ebx, ecx, edx; u32 eax, ebx, ecx, edx;
int index_msb, core_bits; int index_msb, core_bits;
int cpu = smp_processor_id();
cpuid(1, &eax, &ebx, &ecx, &edx); cpuid(1, &eax, &ebx, &ecx, &edx);
...@@ -926,10 +925,10 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c) ...@@ -926,10 +925,10 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
} }
index_msb = get_count_order(smp_num_siblings); index_msb = get_count_order(smp_num_siblings);
phys_proc_id[cpu] = phys_pkg_id(index_msb); c->phys_proc_id = phys_pkg_id(index_msb);
printk(KERN_INFO "CPU: Physical Processor ID: %d\n", printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
phys_proc_id[cpu]); c->phys_proc_id);
smp_num_siblings = smp_num_siblings / c->x86_max_cores; smp_num_siblings = smp_num_siblings / c->x86_max_cores;
...@@ -937,12 +936,12 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c) ...@@ -937,12 +936,12 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
core_bits = get_count_order(c->x86_max_cores); core_bits = get_count_order(c->x86_max_cores);
cpu_core_id[cpu] = phys_pkg_id(index_msb) & c->cpu_core_id = phys_pkg_id(index_msb) &
((1 << core_bits) - 1); ((1 << core_bits) - 1);
if (c->x86_max_cores > 1) if (c->x86_max_cores > 1)
printk(KERN_INFO "CPU: Processor Core ID: %d\n", printk(KERN_INFO "CPU: Processor Core ID: %d\n",
cpu_core_id[cpu]); c->cpu_core_id);
} }
#endif #endif
} }
...@@ -1080,7 +1079,7 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) ...@@ -1080,7 +1079,7 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff; c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
#endif #endif
} }
...@@ -1288,9 +1287,9 @@ static int show_cpuinfo(struct seq_file *m, void *v) ...@@ -1288,9 +1287,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (smp_num_siblings * c->x86_max_cores > 1) { if (smp_num_siblings * c->x86_max_cores > 1) {
int cpu = c - cpu_data; int cpu = c - cpu_data;
seq_printf(m, "physical id\t: %d\n", phys_proc_id[cpu]); seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu])); seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu]));
seq_printf(m, "core id\t\t: %d\n", cpu_core_id[cpu]); seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
} }
#endif #endif
......
...@@ -63,10 +63,6 @@ ...@@ -63,10 +63,6 @@
/* Number of siblings per CPU package */ /* Number of siblings per CPU package */
int smp_num_siblings = 1; int smp_num_siblings = 1;
/* Package ID of each logical CPU */
u8 phys_proc_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
/* core ID of each logical CPU */
u8 cpu_core_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
/* Last level cache ID of each logical CPU */ /* Last level cache ID of each logical CPU */
u8 cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID}; u8 cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID};
...@@ -472,8 +468,8 @@ static inline void set_cpu_sibling_map(int cpu) ...@@ -472,8 +468,8 @@ static inline void set_cpu_sibling_map(int cpu)
if (smp_num_siblings > 1) { if (smp_num_siblings > 1) {
for_each_cpu_mask(i, cpu_sibling_setup_map) { for_each_cpu_mask(i, cpu_sibling_setup_map) {
if (phys_proc_id[cpu] == phys_proc_id[i] && if (c[cpu].phys_proc_id == c[i].phys_proc_id &&
cpu_core_id[cpu] == cpu_core_id[i]) { c[cpu].cpu_core_id == c[i].cpu_core_id) {
cpu_set(i, cpu_sibling_map[cpu]); cpu_set(i, cpu_sibling_map[cpu]);
cpu_set(cpu, cpu_sibling_map[i]); cpu_set(cpu, cpu_sibling_map[i]);
cpu_set(i, cpu_core_map[cpu]); cpu_set(i, cpu_core_map[cpu]);
...@@ -500,7 +496,7 @@ static inline void set_cpu_sibling_map(int cpu) ...@@ -500,7 +496,7 @@ static inline void set_cpu_sibling_map(int cpu)
cpu_set(i, c[cpu].llc_shared_map); cpu_set(i, c[cpu].llc_shared_map);
cpu_set(cpu, c[i].llc_shared_map); cpu_set(cpu, c[i].llc_shared_map);
} }
if (phys_proc_id[cpu] == phys_proc_id[i]) { if (c[cpu].phys_proc_id == c[i].phys_proc_id) {
cpu_set(i, cpu_core_map[cpu]); cpu_set(i, cpu_core_map[cpu]);
cpu_set(cpu, cpu_core_map[i]); cpu_set(cpu, cpu_core_map[i]);
/* /*
...@@ -1201,8 +1197,8 @@ static void remove_siblinginfo(int cpu) ...@@ -1201,8 +1197,8 @@ static void remove_siblinginfo(int cpu)
cpu_clear(cpu, cpu_sibling_map[sibling]); cpu_clear(cpu, cpu_sibling_map[sibling]);
cpus_clear(cpu_sibling_map[cpu]); cpus_clear(cpu_sibling_map[cpu]);
cpus_clear(cpu_core_map[cpu]); cpus_clear(cpu_core_map[cpu]);
phys_proc_id[cpu] = BAD_APICID; c[cpu].phys_proc_id = 0;
cpu_core_id[cpu] = BAD_APICID; c[cpu].cpu_core_id = 0;
cpu_clear(cpu, cpu_sibling_setup_map); cpu_clear(cpu, cpu_sibling_setup_map);
} }
......
...@@ -69,7 +69,11 @@ struct cpuinfo_x86 { ...@@ -69,7 +69,11 @@ struct cpuinfo_x86 {
cpumask_t llc_shared_map; /* cpus sharing the last level cache */ cpumask_t llc_shared_map; /* cpus sharing the last level cache */
#endif #endif
__u8 apicid; __u8 apicid;
#ifdef CONFIG_SMP
__u8 booted_cores; /* number of cores as seen by OS */ __u8 booted_cores; /* number of cores as seen by OS */
__u8 phys_proc_id; /* Physical Processor id. */
__u8 cpu_core_id; /* Core id. */
#endif
} ____cacheline_aligned; } ____cacheline_aligned;
#define X86_VENDOR_INTEL 0 #define X86_VENDOR_INTEL 0
......
...@@ -53,8 +53,6 @@ extern int smp_call_function_single(int cpuid, void (*func) (void *info), ...@@ -53,8 +53,6 @@ extern int smp_call_function_single(int cpuid, void (*func) (void *info),
extern cpumask_t cpu_sibling_map[NR_CPUS]; extern cpumask_t cpu_sibling_map[NR_CPUS];
extern cpumask_t cpu_core_map[NR_CPUS]; extern cpumask_t cpu_core_map[NR_CPUS];
extern u8 phys_proc_id[NR_CPUS];
extern u8 cpu_core_id[NR_CPUS];
extern u8 cpu_llc_id[NR_CPUS]; extern u8 cpu_llc_id[NR_CPUS];
#define SMP_TRAMPOLINE_BASE 0x6000 #define SMP_TRAMPOLINE_BASE 0x6000
......
...@@ -55,10 +55,8 @@ extern int __node_distance(int, int); ...@@ -55,10 +55,8 @@ extern int __node_distance(int, int);
#endif #endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define topology_physical_package_id(cpu) \ #define topology_physical_package_id(cpu) (cpu_data[cpu].phys_proc_id)
(phys_proc_id[cpu] == BAD_APICID ? -1 : phys_proc_id[cpu]) #define topology_core_id(cpu) (cpu_data[cpu].cpu_core_id)
#define topology_core_id(cpu) \
(cpu_core_id[cpu] == BAD_APICID ? 0 : cpu_core_id[cpu])
#define topology_core_siblings(cpu) (cpu_core_map[cpu]) #define topology_core_siblings(cpu) (cpu_core_map[cpu])
#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu]) #define topology_thread_siblings(cpu) (cpu_sibling_map[cpu])
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment