Commit 91718e8d authored by Glauber Costa's avatar Glauber Costa Committed by Ingo Molnar

x86: unify setup_trampoline

setup_trampoline() looks very similar between architectures, and this
patch unifies them. The i386 version allocates bootmem memory, while
the x86_64 version uses a fixed address.

In this patch, we initialize the global trampoline_base to the x86_64 version,
and i386 allocation can later override it.
Signed-off-by: default avatarGlauber Costa <gcosta@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent da522b07
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/bootmem.h>
#include <asm/nmi.h> #include <asm/nmi.h>
#include <asm/irq.h> #include <asm/irq.h>
...@@ -38,6 +39,9 @@ EXPORT_PER_CPU_SYMBOL(cpu_core_map); ...@@ -38,6 +39,9 @@ EXPORT_PER_CPU_SYMBOL(cpu_core_map);
DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
EXPORT_PER_CPU_SYMBOL(cpu_info); EXPORT_PER_CPU_SYMBOL(cpu_info);
/* ready for x86_64, no harm for x86, since it will overwrite after alloc */
unsigned char *trampoline_base = __va(SMP_TRAMPOLINE_BASE);
/* representing cpus for which sibling maps can be computed */ /* representing cpus for which sibling maps can be computed */
static cpumask_t cpu_sibling_setup_map; static cpumask_t cpu_sibling_setup_map;
...@@ -117,6 +121,35 @@ cpumask_t cpu_coregroup_map(int cpu) ...@@ -117,6 +121,35 @@ cpumask_t cpu_coregroup_map(int cpu)
return c->llc_shared_map; return c->llc_shared_map;
} }
/*
* Currently trivial. Write the real->protected mode
* bootstrap into the page concerned. The caller
* has made sure it's suitably aligned.
*/
unsigned long __cpuinit setup_trampoline(void)
{
memcpy(trampoline_base, trampoline_data,
trampoline_end - trampoline_data);
return virt_to_phys(trampoline_base);
}
#ifdef CONFIG_X86_32
/*
* We are called very early to get the low memory for the
* SMP bootup trampoline page.
*/
void __init smp_alloc_memory(void)
{
trampoline_base = alloc_bootmem_low_pages(PAGE_SIZE);
/*
* Has to be in very low memory so we can execute
* real-mode AP code.
*/
if (__pa(trampoline_base) >= 0x9F000)
BUG();
}
#endif
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
void remove_siblinginfo(int cpu) void remove_siblinginfo(int cpu)
......
...@@ -73,40 +73,11 @@ EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid); ...@@ -73,40 +73,11 @@ EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid);
u8 apicid_2_node[MAX_APICID]; u8 apicid_2_node[MAX_APICID];
static unsigned char *trampoline_base;
static void map_cpu_to_logical_apicid(void); static void map_cpu_to_logical_apicid(void);
/* State of each CPU. */ /* State of each CPU. */
DEFINE_PER_CPU(int, cpu_state) = { 0 }; DEFINE_PER_CPU(int, cpu_state) = { 0 };
/*
* Currently trivial. Write the real->protected mode
* bootstrap into the page concerned. The caller
* has made sure it's suitably aligned.
*/
static unsigned long __cpuinit setup_trampoline(void)
{
memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data);
return virt_to_phys(trampoline_base);
}
/*
* We are called very early to get the low memory for the
* SMP bootup trampoline page.
*/
void __init smp_alloc_memory(void)
{
trampoline_base = alloc_bootmem_low_pages(PAGE_SIZE);
/*
* Has to be in very low memory so we can execute
* real-mode AP code.
*/
if (__pa(trampoline_base) >= 0x9F000)
BUG();
}
/* /*
* The bootstrap kernel entry code has set these up. Save them for * The bootstrap kernel entry code has set these up. Save them for
* a given CPU * a given CPU
......
...@@ -85,20 +85,6 @@ struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; ...@@ -85,20 +85,6 @@ struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
#define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p)) #define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p))
#endif #endif
/*
* Currently trivial. Write the real->protected mode
* bootstrap into the page concerned. The caller
* has made sure it's suitably aligned.
*/
static unsigned long __cpuinit setup_trampoline(void)
{
void *tramp = __va(SMP_TRAMPOLINE_BASE);
memcpy(tramp, trampoline_data, trampoline_end - trampoline_data);
return virt_to_phys(tramp);
}
/* /*
* The bootstrap kernel entry code has set these up. Save them for * The bootstrap kernel entry code has set these up. Save them for
* a given CPU * a given CPU
......
...@@ -14,6 +14,7 @@ extern unsigned int num_processors; ...@@ -14,6 +14,7 @@ extern unsigned int num_processors;
*/ */
extern const unsigned char trampoline_data []; extern const unsigned char trampoline_data [];
extern const unsigned char trampoline_end []; extern const unsigned char trampoline_end [];
extern unsigned char *trampoline_base;
struct smp_ops { struct smp_ops {
void (*smp_prepare_boot_cpu)(void); void (*smp_prepare_boot_cpu)(void);
...@@ -81,6 +82,9 @@ extern void __cpu_die(unsigned int cpu); ...@@ -81,6 +82,9 @@ extern void __cpu_die(unsigned int cpu);
extern unsigned disabled_cpus; extern unsigned disabled_cpus;
extern void prefill_possible_map(void); extern void prefill_possible_map(void);
#define SMP_TRAMPOLINE_BASE 0x6000
extern unsigned long setup_trampoline(void);
#endif #endif
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
......
...@@ -40,8 +40,6 @@ static inline int cpu_present_to_apicid(int mps_cpu) ...@@ -40,8 +40,6 @@ static inline int cpu_present_to_apicid(int mps_cpu)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define SMP_TRAMPOLINE_BASE 0x6000
#define raw_smp_processor_id() read_pda(cpunumber) #define raw_smp_processor_id() read_pda(cpunumber)
#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment