From 91718e8d13c23bfe0aa6fa6b730c5c33ee9771bf Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 3 Mar 2008 14:13:12 -0300 Subject: [PATCH] x86: unify setup_trampoline setup_trampoline() looks very similar between architectures, and this patch unifies them. The i386 version allocates bootmem memory, while the x86_64 version uses a fixed address. In this patch, we initialize the global trampoline_base to the x86_64 version, and i386 allocation can later override it. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot.c | 33 +++++++++++++++++++++++++++++++++ arch/x86/kernel/smpboot_32.c | 29 ----------------------------- arch/x86/kernel/smpboot_64.c | 14 -------------- include/asm-x86/smp.h | 4 ++++ include/asm-x86/smp_64.h | 2 -- 5 files changed, 37 insertions(+), 45 deletions(-) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 34c31178041..b13b9d55f9c 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -3,6 +3,7 @@ #include #include #include +#include #include #include @@ -38,6 +39,9 @@ EXPORT_PER_CPU_SYMBOL(cpu_core_map); DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); EXPORT_PER_CPU_SYMBOL(cpu_info); +/* ready for x86_64, no harm for x86, since it will overwrite after alloc */ +unsigned char *trampoline_base = __va(SMP_TRAMPOLINE_BASE); + /* representing cpus for which sibling maps can be computed */ static cpumask_t cpu_sibling_setup_map; @@ -117,6 +121,35 @@ cpumask_t cpu_coregroup_map(int cpu) return c->llc_shared_map; } +/* + * Currently trivial. Write the real->protected mode + * bootstrap into the page concerned. The caller + * has made sure it's suitably aligned. + */ + +unsigned long __cpuinit setup_trampoline(void) +{ + memcpy(trampoline_base, trampoline_data, + trampoline_end - trampoline_data); + return virt_to_phys(trampoline_base); +} + +#ifdef CONFIG_X86_32 +/* + * We are called very early to get the low memory for the + * SMP bootup trampoline page. + */ +void __init smp_alloc_memory(void) +{ + trampoline_base = alloc_bootmem_low_pages(PAGE_SIZE); + /* + * Has to be in very low memory so we can execute + * real-mode AP code. + */ + if (__pa(trampoline_base) >= 0x9F000) + BUG(); +} +#endif #ifdef CONFIG_HOTPLUG_CPU void remove_siblinginfo(int cpu) diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index a21f25418b3..ee826594aa0 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -73,40 +73,11 @@ EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid); u8 apicid_2_node[MAX_APICID]; -static unsigned char *trampoline_base; - static void map_cpu_to_logical_apicid(void); /* State of each CPU. */ DEFINE_PER_CPU(int, cpu_state) = { 0 }; -/* - * Currently trivial. Write the real->protected mode - * bootstrap into the page concerned. The caller - * has made sure it's suitably aligned. - */ - -static unsigned long __cpuinit setup_trampoline(void) -{ - memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data); - return virt_to_phys(trampoline_base); -} - -/* - * We are called very early to get the low memory for the - * SMP bootup trampoline page. - */ -void __init smp_alloc_memory(void) -{ - trampoline_base = alloc_bootmem_low_pages(PAGE_SIZE); - /* - * Has to be in very low memory so we can execute - * real-mode AP code. - */ - if (__pa(trampoline_base) >= 0x9F000) - BUG(); -} - /* * The bootstrap kernel entry code has set these up. Save them for * a given CPU diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index 2cc1b8b0601..9f4935e70e7 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -85,20 +85,6 @@ struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; #define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p)) #endif - -/* - * Currently trivial. Write the real->protected mode - * bootstrap into the page concerned. The caller - * has made sure it's suitably aligned. - */ - -static unsigned long __cpuinit setup_trampoline(void) -{ - void *tramp = __va(SMP_TRAMPOLINE_BASE); - memcpy(tramp, trampoline_data, trampoline_end - trampoline_data); - return virt_to_phys(tramp); -} - /* * The bootstrap kernel entry code has set these up. Save them for * a given CPU diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index b2a1697e470..513c8571a4a 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -14,6 +14,7 @@ extern unsigned int num_processors; */ extern const unsigned char trampoline_data []; extern const unsigned char trampoline_end []; +extern unsigned char *trampoline_base; struct smp_ops { void (*smp_prepare_boot_cpu)(void); @@ -81,6 +82,9 @@ extern void __cpu_die(unsigned int cpu); extern unsigned disabled_cpus; extern void prefill_possible_map(void); + +#define SMP_TRAMPOLINE_BASE 0x6000 +extern unsigned long setup_trampoline(void); #endif #ifdef CONFIG_X86_32 diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index d554d7d5732..394c7852433 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h @@ -40,8 +40,6 @@ static inline int cpu_present_to_apicid(int mps_cpu) #ifdef CONFIG_SMP -#define SMP_TRAMPOLINE_BASE 0x6000 - #define raw_smp_processor_id() read_pda(cpunumber) #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) -- 2.41.0