From 7818a1e0294debee02d5135e17b89f28b8871887 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 30 Jan 2008 13:31:31 +0100 Subject: [PATCH] x86: provide 64-bit with a load_sp0 function. Paravirt guests need to inform the underlying hypervisor whenever the sp0 tss field changes. i386 already has such a function, and we use it for x86_64 too. There's an unnecessary (for 64-bit) msr handling part in the original version, and it is placed around an ifdef. Making no more sense in processor_32.h, it is moved to the common header Signed-off-by: Glauber de Oliveira Costa Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/x86/kernel/process_64.c | 2 +- arch/x86/kernel/smpboot_64.c | 2 +- include/asm-x86/processor.h | 22 +++++++++++++++++++++- include/asm-x86/processor_32.h | 20 -------------------- 4 files changed, 23 insertions(+), 23 deletions(-) diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index af56104b73f..e3a3610ade1 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -639,7 +639,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) /* * Reload esp0, LDT and the page table pointer: */ - tss->x86_tss.sp0 = next->sp0; + load_sp0(tss, next); /* * Switch DS and ES. diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index 2ea02a71b64..5bd42ce144d 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -614,7 +614,7 @@ do_rest: start_rip = setup_trampoline(); init_rsp = c_idle.idle->thread.sp; - per_cpu(init_tss, cpu).x86_tss.sp0 = init_rsp; + load_sp0(&per_cpu(init_tss, cpu), &c_idle.idle->thread); initial_code = start_secondary; clear_tsk_thread_flag(c_idle.idle, TIF_FORK); diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h index cede9ad3dc6..b1ea5215636 100644 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h @@ -193,8 +193,22 @@ static inline void native_set_iopl_mask(unsigned mask) #endif } +static inline void native_load_sp0(struct tss_struct *tss, + struct thread_struct *thread) +{ + tss->x86_tss.sp0 = thread->sp0; +#ifdef CONFIG_X86_32 + /* Only happens when SEP is enabled, no need to test "SEP"arately */ + if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { + tss->x86_tss.ss1 = thread->sysenter_cs; + wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); + } +#endif +} -#ifndef CONFIG_PARAVIRT +#ifdef CONFIG_PARAVIRT +#include +#else #define __cpuid native_cpuid #define paravirt_enabled() 0 @@ -206,6 +220,12 @@ static inline void native_set_iopl_mask(unsigned mask) #define set_debugreg(value, register) \ native_set_debugreg(register, value) +static inline void load_sp0(struct tss_struct *tss, + struct thread_struct *thread) +{ + native_load_sp0(tss, thread); +} + #define set_iopl_mask native_set_iopl_mask #endif /* CONFIG_PARAVIRT */ diff --git a/include/asm-x86/processor_32.h b/include/asm-x86/processor_32.h index 57b345bc3c7..53037d1a6ae 100644 --- a/include/asm-x86/processor_32.h +++ b/include/asm-x86/processor_32.h @@ -278,26 +278,6 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); #define KSTK_ESP(task) (task_pt_regs(task)->sp) -static inline void native_load_sp0(struct tss_struct *tss, struct thread_struct *thread) -{ - tss->x86_tss.sp0 = thread->sp0; - /* This can only happen when SEP is enabled, no need to test "SEP"arately */ - if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { - tss->x86_tss.ss1 = thread->sysenter_cs; - wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); - } -} - -#ifdef CONFIG_PARAVIRT -#include -#else - -static inline void load_sp0(struct tss_struct *tss, struct thread_struct *thread) -{ - native_load_sp0(tss, thread); -} -#endif /* CONFIG_PARAVIRT */ - /* generic versions from gas */ #define GENERIC_NOP1 ".byte 0x90\n" #define GENERIC_NOP2 ".byte 0x89,0xf6\n" -- 2.46.0