Commit a6c422cc authored by Glauber de Oliveira Costa's avatar Glauber de Oliveira Costa Committed by Ingo Molnar

x86: fill cpu to apicid and present map in mpparse

This is the way x86_64 does, and complement the already
present patch that does the bios cpu to apicid mapping here
Signed-off-by: default avatarGlauber Costa <gcosta@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 73bf102b
...@@ -105,7 +105,8 @@ static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __cpuinit ...@@ -105,7 +105,8 @@ static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __cpuinit
static void __cpuinit MP_processor_info (struct mpc_config_processor *m) static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
{ {
int ver, apicid; int ver, apicid, cpu;
cpumask_t tmp_map;
physid_mask_t phys_cpu; physid_mask_t phys_cpu;
if (!(m->mpc_cpuflag & CPU_ENABLED)) { if (!(m->mpc_cpuflag & CPU_ENABLED)) {
...@@ -198,6 +199,16 @@ static void __cpuinit MP_processor_info (struct mpc_config_processor *m) ...@@ -198,6 +199,16 @@ static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
cpu_set(num_processors, cpu_possible_map); cpu_set(num_processors, cpu_possible_map);
num_processors++; num_processors++;
cpus_complement(tmp_map, cpu_present_map);
cpu = first_cpu(tmp_map);
if (m->mpc_cpuflag & CPU_BOOTPROCESSOR)
/*
* x86_bios_cpu_apicid is required to have processors listed
* in same order as logical cpu numbers. Hence the first
* entry is BSP, and so on.
*/
cpu = 0;
/* /*
* Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y
...@@ -220,12 +231,16 @@ static void __cpuinit MP_processor_info (struct mpc_config_processor *m) ...@@ -220,12 +231,16 @@ static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
} }
/* are we being called early in kernel startup? */ /* are we being called early in kernel startup? */
if (x86_cpu_to_apicid_early_ptr) { if (x86_cpu_to_apicid_early_ptr) {
u16 *cpu_to_apicid = x86_cpu_to_apicid_early_ptr;
u16 *bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr; u16 *bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr;
cpu_to_apicid[cpu] = m->mpc_apicid;
bios_cpu_apicid[num_processors - 1] = m->mpc_apicid; bios_cpu_apicid[num_processors - 1] = m->mpc_apicid;
} else { } else {
int cpu = num_processors - 1; per_cpu(x86_cpu_to_apicid, cpu) = m->mpc_apicid;
per_cpu(x86_bios_cpu_apicid, cpu) = m->mpc_apicid; per_cpu(x86_bios_cpu_apicid, cpu) = m->mpc_apicid;
} }
cpu_set(cpu, cpu_present_map);
} }
static void __init MP_bus_info (struct mpc_config_bus *m) static void __init MP_bus_info (struct mpc_config_bus *m)
......
...@@ -525,16 +525,6 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) ...@@ -525,16 +525,6 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
#endif /* WAKE_SECONDARY_VIA_INIT */ #endif /* WAKE_SECONDARY_VIA_INIT */
extern cpumask_t cpu_initialized; extern cpumask_t cpu_initialized;
static inline int alloc_cpu_id(void)
{
cpumask_t tmp_map;
int cpu;
cpus_complement(tmp_map, cpu_present_map);
cpu = first_cpu(tmp_map);
if (cpu >= NR_CPUS)
return -ENODEV;
return cpu;
}
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
static struct task_struct * __cpuinitdata cpu_idle_tasks[NR_CPUS]; static struct task_struct * __cpuinitdata cpu_idle_tasks[NR_CPUS];
...@@ -605,7 +595,6 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) ...@@ -605,7 +595,6 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
irq_ctx_init(cpu); irq_ctx_init(cpu);
per_cpu(x86_cpu_to_apicid, cpu) = apicid;
/* /*
* This grunge runs the startup process for * This grunge runs the startup process for
* the targeted processor. * the targeted processor.
...@@ -666,10 +655,8 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) ...@@ -666,10 +655,8 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */ cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */ cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
cpu_clear(cpu, cpu_possible_map); cpu_clear(cpu, cpu_possible_map);
per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
cpucount--; cpucount--;
} else {
per_cpu(x86_cpu_to_apicid, cpu) = apicid;
cpu_set(cpu, cpu_present_map);
} }
/* mark "stuck" area as not stuck */ /* mark "stuck" area as not stuck */
...@@ -745,6 +732,7 @@ EXPORT_SYMBOL(xquad_portio); ...@@ -745,6 +732,7 @@ EXPORT_SYMBOL(xquad_portio);
static void __init disable_smp(void) static void __init disable_smp(void)
{ {
cpu_possible_map = cpumask_of_cpu(0); cpu_possible_map = cpumask_of_cpu(0);
cpu_present_map = cpumask_of_cpu(0);
smpboot_clear_io_apic_irqs(); smpboot_clear_io_apic_irqs();
phys_cpu_present_map = physid_mask_of_physid(0); phys_cpu_present_map = physid_mask_of_physid(0);
map_cpu_to_logical_apicid(); map_cpu_to_logical_apicid();
...@@ -825,7 +813,6 @@ static void __init smp_boot_cpus(unsigned int max_cpus) ...@@ -825,7 +813,6 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
boot_cpu_logical_apicid = logical_smp_processor_id(); boot_cpu_logical_apicid = logical_smp_processor_id();
per_cpu(x86_cpu_to_apicid, 0) = boot_cpu_physical_apicid;
current_thread_info()->cpu = 0; current_thread_info()->cpu = 0;
...@@ -866,8 +853,11 @@ static void __init smp_boot_cpus(unsigned int max_cpus) ...@@ -866,8 +853,11 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
continue; continue;
if (max_cpus <= cpucount+1) if (max_cpus <= cpucount+1)
continue; continue;
/* Utterly temporary */
if (((cpu = alloc_cpu_id()) <= 0) || do_boot_cpu(apicid, cpu)) for (cpu = 0; cpu < NR_CPUS; cpu++)
if (per_cpu(x86_cpu_to_apicid, cpu) == apicid)
break;
if (do_boot_cpu(apicid, cpu))
printk("CPU #%d not responding - cannot use it.\n", printk("CPU #%d not responding - cannot use it.\n",
apicid); apicid);
else else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment