Commit a141fd55 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Ingo Molnar:
 "Another round of CR3/PCID related fixes (I think this addresses all
  but one of the known problems with PCID support), an objtool fix plus
  a Clang fix that (finally) solves all Clang quirks to build a bootable
  x86 kernel as-is"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/asm: Fix inline asm call constraints for Clang
  objtool: Handle another GCC stack pointer adjustment bug
  x86/mm/32: Load a sane CR3 before cpu_init() on secondary CPUs
  x86/mm/32: Move setup_clear_cpu_cap(X86_FEATURE_PCID) earlier
  x86/mm/64: Stop using CR3.PCID == 0 in ASID-aware code
  x86/mm: Factor out CR3-building code
parents 16528a3f f5caf621
...@@ -218,10 +218,9 @@ static inline int alternatives_text_reserved(void *start, void *end) ...@@ -218,10 +218,9 @@ static inline int alternatives_text_reserved(void *start, void *end)
#define alternative_call_2(oldfunc, newfunc1, feature1, newfunc2, feature2, \ #define alternative_call_2(oldfunc, newfunc1, feature1, newfunc2, feature2, \
output, input...) \ output, input...) \
{ \ { \
register void *__sp asm(_ASM_SP); \
asm volatile (ALTERNATIVE_2("call %P[old]", "call %P[new1]", feature1,\ asm volatile (ALTERNATIVE_2("call %P[old]", "call %P[new1]", feature1,\
"call %P[new2]", feature2) \ "call %P[new2]", feature2) \
: output, "+r" (__sp) \ : output, ASM_CALL_CONSTRAINT \
: [old] "i" (oldfunc), [new1] "i" (newfunc1), \ : [old] "i" (oldfunc), [new1] "i" (newfunc1), \
[new2] "i" (newfunc2), ## input); \ [new2] "i" (newfunc2), ## input); \
} }
......
...@@ -132,4 +132,15 @@ ...@@ -132,4 +132,15 @@
/* For C file, we already have NOKPROBE_SYMBOL macro */ /* For C file, we already have NOKPROBE_SYMBOL macro */
#endif #endif
#ifndef __ASSEMBLY__
/*
* This output constraint should be used for any inline asm which has a "call"
* instruction. Otherwise the asm may be inserted before the frame pointer
* gets set up by the containing function. If you forget to do this, objtool
* may print a "call without frame pointer save/setup" warning.
*/
register unsigned int __asm_call_sp asm("esp");
#define ASM_CALL_CONSTRAINT "+r" (__asm_call_sp)
#endif
#endif /* _ASM_X86_ASM_H */ #endif /* _ASM_X86_ASM_H */
...@@ -286,6 +286,32 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, ...@@ -286,6 +286,32 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
return __pkru_allows_pkey(vma_pkey(vma), write); return __pkru_allows_pkey(vma_pkey(vma), write);
} }
/*
* If PCID is on, ASID-aware code paths put the ASID+1 into the PCID
* bits. This serves two purposes. It prevents a nasty situation in
* which PCID-unaware code saves CR3, loads some other value (with PCID
* == 0), and then restores CR3, thus corrupting the TLB for ASID 0 if
* the saved ASID was nonzero. It also means that any bugs involving
* loading a PCID-enabled CR3 with CR4.PCIDE off will trigger
* deterministically.
*/
static inline unsigned long build_cr3(struct mm_struct *mm, u16 asid)
{
if (static_cpu_has(X86_FEATURE_PCID)) {
VM_WARN_ON_ONCE(asid > 4094);
return __sme_pa(mm->pgd) | (asid + 1);
} else {
VM_WARN_ON_ONCE(asid != 0);
return __sme_pa(mm->pgd);
}
}
static inline unsigned long build_cr3_noflush(struct mm_struct *mm, u16 asid)
{
VM_WARN_ON_ONCE(asid > 4094);
return __sme_pa(mm->pgd) | (asid + 1) | CR3_NOFLUSH;
}
/* /*
* This can be used from process context to figure out what the value of * This can be used from process context to figure out what the value of
...@@ -296,10 +322,8 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, ...@@ -296,10 +322,8 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
*/ */
static inline unsigned long __get_current_cr3_fast(void) static inline unsigned long __get_current_cr3_fast(void)
{ {
unsigned long cr3 = __pa(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd); unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm),
this_cpu_read(cpu_tlbstate.loaded_mm_asid));
if (static_cpu_has(X86_FEATURE_PCID))
cr3 |= this_cpu_read(cpu_tlbstate.loaded_mm_asid);
/* For now, be very restrictive about when this can be called. */ /* For now, be very restrictive about when this can be called. */
VM_WARN_ON(in_nmi() || preemptible()); VM_WARN_ON(in_nmi() || preemptible());
......
...@@ -179,7 +179,6 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output) ...@@ -179,7 +179,6 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
u64 input_address = input ? virt_to_phys(input) : 0; u64 input_address = input ? virt_to_phys(input) : 0;
u64 output_address = output ? virt_to_phys(output) : 0; u64 output_address = output ? virt_to_phys(output) : 0;
u64 hv_status; u64 hv_status;
register void *__sp asm(_ASM_SP);
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
if (!hv_hypercall_pg) if (!hv_hypercall_pg)
...@@ -187,7 +186,7 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output) ...@@ -187,7 +186,7 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
__asm__ __volatile__("mov %4, %%r8\n" __asm__ __volatile__("mov %4, %%r8\n"
"call *%5" "call *%5"
: "=a" (hv_status), "+r" (__sp), : "=a" (hv_status), ASM_CALL_CONSTRAINT,
"+c" (control), "+d" (input_address) "+c" (control), "+d" (input_address)
: "r" (output_address), "m" (hv_hypercall_pg) : "r" (output_address), "m" (hv_hypercall_pg)
: "cc", "memory", "r8", "r9", "r10", "r11"); : "cc", "memory", "r8", "r9", "r10", "r11");
...@@ -202,7 +201,7 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output) ...@@ -202,7 +201,7 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
__asm__ __volatile__("call *%7" __asm__ __volatile__("call *%7"
: "=A" (hv_status), : "=A" (hv_status),
"+c" (input_address_lo), "+r" (__sp) "+c" (input_address_lo), ASM_CALL_CONSTRAINT
: "A" (control), : "A" (control),
"b" (input_address_hi), "b" (input_address_hi),
"D"(output_address_hi), "S"(output_address_lo), "D"(output_address_hi), "S"(output_address_lo),
...@@ -224,12 +223,11 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output) ...@@ -224,12 +223,11 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1) static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
{ {
u64 hv_status, control = (u64)code | HV_HYPERCALL_FAST_BIT; u64 hv_status, control = (u64)code | HV_HYPERCALL_FAST_BIT;
register void *__sp asm(_ASM_SP);
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
{ {
__asm__ __volatile__("call *%4" __asm__ __volatile__("call *%4"
: "=a" (hv_status), "+r" (__sp), : "=a" (hv_status), ASM_CALL_CONSTRAINT,
"+c" (control), "+d" (input1) "+c" (control), "+d" (input1)
: "m" (hv_hypercall_pg) : "m" (hv_hypercall_pg)
: "cc", "r8", "r9", "r10", "r11"); : "cc", "r8", "r9", "r10", "r11");
...@@ -242,7 +240,7 @@ static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1) ...@@ -242,7 +240,7 @@ static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
__asm__ __volatile__ ("call *%5" __asm__ __volatile__ ("call *%5"
: "=A"(hv_status), : "=A"(hv_status),
"+c"(input1_lo), "+c"(input1_lo),
"+r"(__sp) ASM_CALL_CONSTRAINT
: "A" (control), : "A" (control),
"b" (input1_hi), "b" (input1_hi),
"m" (hv_hypercall_pg) "m" (hv_hypercall_pg)
......
...@@ -459,8 +459,8 @@ int paravirt_disable_iospace(void); ...@@ -459,8 +459,8 @@ int paravirt_disable_iospace(void);
*/ */
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
#define PVOP_VCALL_ARGS \ #define PVOP_VCALL_ARGS \
unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; \ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx;
register void *__sp asm("esp")
#define PVOP_CALL_ARGS PVOP_VCALL_ARGS #define PVOP_CALL_ARGS PVOP_VCALL_ARGS
#define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x)) #define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x))
...@@ -480,8 +480,8 @@ int paravirt_disable_iospace(void); ...@@ -480,8 +480,8 @@ int paravirt_disable_iospace(void);
/* [re]ax isn't an arg, but the return val */ /* [re]ax isn't an arg, but the return val */
#define PVOP_VCALL_ARGS \ #define PVOP_VCALL_ARGS \
unsigned long __edi = __edi, __esi = __esi, \ unsigned long __edi = __edi, __esi = __esi, \
__edx = __edx, __ecx = __ecx, __eax = __eax; \ __edx = __edx, __ecx = __ecx, __eax = __eax;
register void *__sp asm("rsp")
#define PVOP_CALL_ARGS PVOP_VCALL_ARGS #define PVOP_CALL_ARGS PVOP_VCALL_ARGS
#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x)) #define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
...@@ -532,7 +532,7 @@ int paravirt_disable_iospace(void); ...@@ -532,7 +532,7 @@ int paravirt_disable_iospace(void);
asm volatile(pre \ asm volatile(pre \
paravirt_alt(PARAVIRT_CALL) \ paravirt_alt(PARAVIRT_CALL) \
post \ post \
: call_clbr, "+r" (__sp) \ : call_clbr, ASM_CALL_CONSTRAINT \
: paravirt_type(op), \ : paravirt_type(op), \
paravirt_clobber(clbr), \ paravirt_clobber(clbr), \
##__VA_ARGS__ \ ##__VA_ARGS__ \
...@@ -542,7 +542,7 @@ int paravirt_disable_iospace(void); ...@@ -542,7 +542,7 @@ int paravirt_disable_iospace(void);
asm volatile(pre \ asm volatile(pre \
paravirt_alt(PARAVIRT_CALL) \ paravirt_alt(PARAVIRT_CALL) \
post \ post \
: call_clbr, "+r" (__sp) \ : call_clbr, ASM_CALL_CONSTRAINT \
: paravirt_type(op), \ : paravirt_type(op), \
paravirt_clobber(clbr), \ paravirt_clobber(clbr), \
##__VA_ARGS__ \ ##__VA_ARGS__ \
...@@ -569,7 +569,7 @@ int paravirt_disable_iospace(void); ...@@ -569,7 +569,7 @@ int paravirt_disable_iospace(void);
asm volatile(pre \ asm volatile(pre \
paravirt_alt(PARAVIRT_CALL) \ paravirt_alt(PARAVIRT_CALL) \
post \ post \
: call_clbr, "+r" (__sp) \ : call_clbr, ASM_CALL_CONSTRAINT \
: paravirt_type(op), \ : paravirt_type(op), \
paravirt_clobber(clbr), \ paravirt_clobber(clbr), \
##__VA_ARGS__ \ ##__VA_ARGS__ \
......
...@@ -101,18 +101,13 @@ static __always_inline bool should_resched(int preempt_offset) ...@@ -101,18 +101,13 @@ static __always_inline bool should_resched(int preempt_offset)
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
extern asmlinkage void ___preempt_schedule(void); extern asmlinkage void ___preempt_schedule(void);
# define __preempt_schedule() \ # define __preempt_schedule() \
({ \ asm volatile ("call ___preempt_schedule" : ASM_CALL_CONSTRAINT)
register void *__sp asm(_ASM_SP); \
asm volatile ("call ___preempt_schedule" : "+r"(__sp)); \
})
extern asmlinkage void preempt_schedule(void); extern asmlinkage void preempt_schedule(void);
extern asmlinkage void ___preempt_schedule_notrace(void); extern asmlinkage void ___preempt_schedule_notrace(void);
# define __preempt_schedule_notrace() \ # define __preempt_schedule_notrace() \
({ \ asm volatile ("call ___preempt_schedule_notrace" : ASM_CALL_CONSTRAINT)
register void *__sp asm(_ASM_SP); \
asm volatile ("call ___preempt_schedule_notrace" : "+r"(__sp)); \
})
extern asmlinkage void preempt_schedule_notrace(void); extern asmlinkage void preempt_schedule_notrace(void);
#endif #endif
......
...@@ -677,8 +677,6 @@ static inline void sync_core(void) ...@@ -677,8 +677,6 @@ static inline void sync_core(void)
* Like all of Linux's memory ordering operations, this is a * Like all of Linux's memory ordering operations, this is a
* compiler barrier as well. * compiler barrier as well.
*/ */
register void *__sp asm(_ASM_SP);
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
asm volatile ( asm volatile (
"pushfl\n\t" "pushfl\n\t"
...@@ -686,7 +684,7 @@ static inline void sync_core(void) ...@@ -686,7 +684,7 @@ static inline void sync_core(void)
"pushl $1f\n\t" "pushl $1f\n\t"
"iret\n\t" "iret\n\t"
"1:" "1:"
: "+r" (__sp) : : "memory"); : ASM_CALL_CONSTRAINT : : "memory");
#else #else
unsigned int tmp; unsigned int tmp;
...@@ -703,7 +701,7 @@ static inline void sync_core(void) ...@@ -703,7 +701,7 @@ static inline void sync_core(void)
"iretq\n\t" "iretq\n\t"
UNWIND_HINT_RESTORE UNWIND_HINT_RESTORE
"1:" "1:"
: "=&r" (tmp), "+r" (__sp) : : "cc", "memory"); : "=&r" (tmp), ASM_CALL_CONSTRAINT : : "cc", "memory");
#endif #endif
} }
......
...@@ -103,7 +103,6 @@ static inline bool __down_read_trylock(struct rw_semaphore *sem) ...@@ -103,7 +103,6 @@ static inline bool __down_read_trylock(struct rw_semaphore *sem)
({ \ ({ \
long tmp; \ long tmp; \
struct rw_semaphore* ret; \ struct rw_semaphore* ret; \
register void *__sp asm(_ASM_SP); \
\ \
asm volatile("# beginning down_write\n\t" \ asm volatile("# beginning down_write\n\t" \
LOCK_PREFIX " xadd %1,(%4)\n\t" \ LOCK_PREFIX " xadd %1,(%4)\n\t" \
...@@ -114,7 +113,8 @@ static inline bool __down_read_trylock(struct rw_semaphore *sem) ...@@ -114,7 +113,8 @@ static inline bool __down_read_trylock(struct rw_semaphore *sem)
" call " slow_path "\n" \ " call " slow_path "\n" \
"1:\n" \ "1:\n" \
"# ending down_write" \ "# ending down_write" \
: "+m" (sem->count), "=d" (tmp), "=a" (ret), "+r" (__sp) \ : "+m" (sem->count), "=d" (tmp), \
"=a" (ret), ASM_CALL_CONSTRAINT \
: "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS) \ : "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS) \
: "memory", "cc"); \ : "memory", "cc"); \
ret; \ ret; \
......
...@@ -166,11 +166,11 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) ...@@ -166,11 +166,11 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
({ \ ({ \
int __ret_gu; \ int __ret_gu; \
register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \ register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
register void *__sp asm(_ASM_SP); \
__chk_user_ptr(ptr); \ __chk_user_ptr(ptr); \
might_fault(); \ might_fault(); \
asm volatile("call __get_user_%P4" \ asm volatile("call __get_user_%P4" \
: "=a" (__ret_gu), "=r" (__val_gu), "+r" (__sp) \ : "=a" (__ret_gu), "=r" (__val_gu), \
ASM_CALL_CONSTRAINT \
: "0" (ptr), "i" (sizeof(*(ptr)))); \ : "0" (ptr), "i" (sizeof(*(ptr)))); \
(x) = (__force __typeof__(*(ptr))) __val_gu; \ (x) = (__force __typeof__(*(ptr))) __val_gu; \
__builtin_expect(__ret_gu, 0); \ __builtin_expect(__ret_gu, 0); \
......
...@@ -113,10 +113,9 @@ extern struct { char _entry[32]; } hypercall_page[]; ...@@ -113,10 +113,9 @@ extern struct { char _entry[32]; } hypercall_page[];
register unsigned long __arg2 asm(__HYPERCALL_ARG2REG) = __arg2; \ register unsigned long __arg2 asm(__HYPERCALL_ARG2REG) = __arg2; \
register unsigned long __arg3 asm(__HYPERCALL_ARG3REG) = __arg3; \ register unsigned long __arg3 asm(__HYPERCALL_ARG3REG) = __arg3; \
register unsigned long __arg4 asm(__HYPERCALL_ARG4REG) = __arg4; \ register unsigned long __arg4 asm(__HYPERCALL_ARG4REG) = __arg4; \
register unsigned long __arg5 asm(__HYPERCALL_ARG5REG) = __arg5; \ register unsigned long __arg5 asm(__HYPERCALL_ARG5REG) = __arg5;
register void *__sp asm(_ASM_SP);
#define __HYPERCALL_0PARAM "=r" (__res), "+r" (__sp) #define __HYPERCALL_0PARAM "=r" (__res), ASM_CALL_CONSTRAINT
#define __HYPERCALL_1PARAM __HYPERCALL_0PARAM, "+r" (__arg1) #define __HYPERCALL_1PARAM __HYPERCALL_0PARAM, "+r" (__arg1)
#define __HYPERCALL_2PARAM __HYPERCALL_1PARAM, "+r" (__arg2) #define __HYPERCALL_2PARAM __HYPERCALL_1PARAM, "+r" (__arg2)
#define __HYPERCALL_3PARAM __HYPERCALL_2PARAM, "+r" (__arg3) #define __HYPERCALL_3PARAM __HYPERCALL_2PARAM, "+r" (__arg3)
......
...@@ -21,14 +21,6 @@ ...@@ -21,14 +21,6 @@
void __init check_bugs(void) void __init check_bugs(void)
{ {
#ifdef CONFIG_X86_32
/*
* Regardless of whether PCID is enumerated, the SDM says
* that it can't be enabled in 32-bit mode.
*/
setup_clear_cpu_cap(X86_FEATURE_PCID);
#endif
identify_boot_cpu(); identify_boot_cpu();
if (!IS_ENABLED(CONFIG_SMP)) { if (!IS_ENABLED(CONFIG_SMP)) {
......
...@@ -904,6 +904,14 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) ...@@ -904,6 +904,14 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
setup_force_cpu_cap(X86_FEATURE_ALWAYS); setup_force_cpu_cap(X86_FEATURE_ALWAYS);
fpu__init_system(c); fpu__init_system(c);
#ifdef CONFIG_X86_32
/*
* Regardless of whether PCID is enumerated, the SDM says
* that it can't be enabled in 32-bit mode.
*/
setup_clear_cpu_cap(X86_FEATURE_PCID);
#endif
} }
void __init early_cpu_init(void) void __init early_cpu_init(void)
......
...@@ -232,12 +232,6 @@ static void notrace start_secondary(void *unused) ...@@ -232,12 +232,6 @@ static void notrace start_secondary(void *unused)
*/ */
if (boot_cpu_has(X86_FEATURE_PCID)) if (boot_cpu_has(X86_FEATURE_PCID))
__write_cr4(__read_cr4() | X86_CR4_PCIDE); __write_cr4(__read_cr4() | X86_CR4_PCIDE);
cpu_init();
x86_cpuinit.early_percpu_clock_init();
preempt_disable();
smp_callin();
enable_start_cpu0 = 0;
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
/* switch away from the initial page table */ /* switch away from the initial page table */
...@@ -245,6 +239,13 @@ static void notrace start_secondary(void *unused) ...@@ -245,6 +239,13 @@ static void notrace start_secondary(void *unused)
__flush_tlb_all(); __flush_tlb_all();
#endif #endif
cpu_init();
x86_cpuinit.early_percpu_clock_init();
preempt_disable();
smp_callin();
enable_start_cpu0 = 0;
/* otherwise gcc will move up smp_processor_id before the cpu_init */ /* otherwise gcc will move up smp_processor_id before the cpu_init */
barrier(); barrier();
/* /*
......
...@@ -5298,7 +5298,6 @@ static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt, ...@@ -5298,7 +5298,6 @@ static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)) static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
{ {
register void *__sp asm(_ASM_SP);
ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF; ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
if (!(ctxt->d & ByteOp)) if (!(ctxt->d & ByteOp))
...@@ -5306,7 +5305,7 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)) ...@@ -5306,7 +5305,7 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n" asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
: "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags), : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
[fastop]"+S"(fop), "+r"(__sp) [fastop]"+S"(fop), ASM_CALL_CONSTRAINT
: "c"(ctxt->src2.val)); : "c"(ctxt->src2.val));
ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK); ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
......
...@@ -9045,7 +9045,6 @@ static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx) ...@@ -9045,7 +9045,6 @@ static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
static void vmx_handle_external_intr(struct kvm_vcpu *vcpu) static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
{ {
u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
register void *__sp asm(_ASM_SP);
if ((exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK)) if ((exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK))
== (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR)) { == (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR)) {
...@@ -9074,7 +9073,7 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu) ...@@ -9074,7 +9073,7 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
[sp]"=&r"(tmp), [sp]"=&r"(tmp),
#endif #endif
"+r"(__sp) ASM_CALL_CONSTRAINT
: :
[entry]"r"(entry), [entry]"r"(entry),
[ss]"i"(__KERNEL_DS), [ss]"i"(__KERNEL_DS),
......
...@@ -806,7 +806,6 @@ no_context(struct pt_regs *regs, unsigned long error_code, ...@@ -806,7 +806,6 @@ no_context(struct pt_regs *regs, unsigned long error_code,
if (is_vmalloc_addr((void *)address) && if (is_vmalloc_addr((void *)address) &&
(((unsigned long)tsk->stack - 1 - address < PAGE_SIZE) || (((unsigned long)tsk->stack - 1 - address < PAGE_SIZE) ||
address - ((unsigned long)tsk->stack + THREAD_SIZE) < PAGE_SIZE)) { address - ((unsigned long)tsk->stack + THREAD_SIZE) < PAGE_SIZE)) {
register void *__sp asm("rsp");
unsigned long stack = this_cpu_read(orig_ist.ist[DOUBLEFAULT_STACK]) - sizeof(void *); unsigned long stack = this_cpu_read(orig_ist.ist[DOUBLEFAULT_STACK]) - sizeof(void *);
/* /*
* We're likely to be running with very little stack space * We're likely to be running with very little stack space
...@@ -821,7 +820,7 @@ no_context(struct pt_regs *regs, unsigned long error_code, ...@@ -821,7 +820,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
asm volatile ("movq %[stack], %%rsp\n\t" asm volatile ("movq %[stack], %%rsp\n\t"
"call handle_stack_overflow\n\t" "call handle_stack_overflow\n\t"
"1: jmp 1b" "1: jmp 1b"
: "+r" (__sp) : ASM_CALL_CONSTRAINT
: "D" ("kernel stack overflow (page fault)"), : "D" ("kernel stack overflow (page fault)"),
"S" (regs), "d" (address), "S" (regs), "d" (address),
[stack] "rm" (stack)); [stack] "rm" (stack));
......
...@@ -126,8 +126,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, ...@@ -126,8 +126,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
* isn't free. * isn't free.
*/ */
#ifdef CONFIG_DEBUG_VM #ifdef CONFIG_DEBUG_VM
if (WARN_ON_ONCE(__read_cr3() != if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev, prev_asid))) {
(__sme_pa(real_prev->pgd) | prev_asid))) {
/* /*
* If we were to BUG here, we'd be very likely to kill * If we were to BUG here, we'd be very likely to kill
* the system so hard that we don't see the call trace. * the system so hard that we don't see the call trace.
...@@ -172,7 +171,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, ...@@ -172,7 +171,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
*/ */
this_cpu_write(cpu_tlbstate.ctxs[prev_asid].tlb_gen, this_cpu_write(cpu_tlbstate.ctxs[prev_asid].tlb_gen,
next_tlb_gen); next_tlb_gen);
write_cr3(__sme_pa(next->pgd) | prev_asid); write_cr3(build_cr3(next, prev_asid));
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH,
TLB_FLUSH_ALL); TLB_FLUSH_ALL);
} }
...@@ -216,12 +215,12 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, ...@@ -216,12 +215,12 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
if (need_flush) { if (need_flush) {
this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
write_cr3(__sme_pa(next->pgd) | new_asid); write_cr3(build_cr3(next, new_asid));
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH,
TLB_FLUSH_ALL); TLB_FLUSH_ALL);
} else { } else {
/* The new ASID is already up to date. */ /* The new ASID is already up to date. */
write_cr3(__sme_pa(next->pgd) | new_asid | CR3_NOFLUSH); write_cr3(build_cr3_noflush(next, new_asid));
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 0); trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 0);
} }
...@@ -265,7 +264,7 @@ void initialize_tlbstate_and_flush(void) ...@@ -265,7 +264,7 @@ void initialize_tlbstate_and_flush(void)
!(cr4_read_shadow() & X86_CR4_PCIDE)); !(cr4_read_shadow() & X86_CR4_PCIDE));
/* Force ASID 0 and force a TLB flush. */ /* Force ASID 0 and force a TLB flush. */
write_cr3(cr3 & ~CR3_PCID_MASK); write_cr3(build_cr3(mm, 0));
/* Reinitialize tlbstate. */ /* Reinitialize tlbstate. */
this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0); this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0);
......
...@@ -194,10 +194,10 @@ they mean, and suggestions for how to fix them. ...@@ -194,10 +194,10 @@ they mean, and suggestions for how to fix them.
If it's a GCC-compiled .c file, the error may be because the function If it's a GCC-compiled .c file, the error may be because the function
uses an inline asm() statement which has a "call" instruction. An uses an inline asm() statement which has a "call" instruction. An
asm() statement with a call instruction must declare the use of the asm() statement with a call instruction must declare the use of the
stack pointer in its output operand. For example, on x86_64: stack pointer in its output operand. On x86_64, this means adding
the ASM_CALL_CONSTRAINT as an output constraint:
register void *__sp asm("rsp"); asm volatile("call func" : ASM_CALL_CONSTRAINT);
asm volatile("call func" : "+r" (__sp));
Otherwise the stack frame may not get created before the call. Otherwise the stack frame may not get created before the call.
......
...@@ -208,14 +208,14 @@ int arch_decode_instruction(struct elf *elf, struct section *sec, ...@@ -208,14 +208,14 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,
break; break;
case 0x89: case 0x89:
if (rex == 0x48 && modrm == 0xe5) { if (rex_w && !rex_r && modrm_mod == 3 && modrm_reg == 4) {
/* mov %rsp, %rbp */ /* mov %rsp, reg */
*type = INSN_STACK; *type = INSN_STACK;
op->src.type = OP_SRC_REG; op->src.type = OP_SRC_REG;
op->src.reg = CFI_SP; op->src.reg = CFI_SP;
op->dest.type = OP_DEST_REG; op->dest.type = OP_DEST_REG;
op->dest.reg = CFI_BP; op->dest.reg = op_to_cfi_reg[modrm_rm][rex_b];
break; break;
} }
......
...@@ -1203,9 +1203,8 @@ static int update_insn_state(struct instruction *insn, struct insn_state *state) ...@@ -1203,9 +1203,8 @@ static int update_insn_state(struct instruction *insn, struct insn_state *state)
switch (op->src.type) { switch (op->src.type) {
case OP_SRC_REG: case OP_SRC_REG:
if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP) { if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP &&
cfa->base == CFI_SP &&
if (cfa->base == CFI_SP &&
regs[CFI_BP].base == CFI_CFA && regs[CFI_BP].base == CFI_CFA &&
regs[CFI_BP].offset == -cfa->offset) { regs[CFI_BP].offset == -cfa->offset) {
...@@ -1214,13 +1213,29 @@ static int update_insn_state(struct instruction *insn, struct insn_state *state) ...@@ -1214,13 +1213,29 @@ static int update_insn_state(struct instruction *insn, struct insn_state *state)
state->bp_scratch = false; state->bp_scratch = false;
} }
else if (state->drap) { else if (op->src.reg == CFI_SP &&
op->dest.reg == CFI_BP && state->drap) {
/* drap: mov %rsp, %rbp */ /* drap: mov %rsp, %rbp */
regs[CFI_BP].base = CFI_BP; regs[CFI_BP].base = CFI_BP;
regs[CFI_BP].offset = -state->stack_size; regs[CFI_BP].offset = -state->stack_size;
state->bp_scratch = false; state->bp_scratch = false;
} }
else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
/*
* mov %rsp, %reg
*
* This is needed for the rare case where GCC
* does:
*
* mov %rsp, %rax
* ...
* mov %rax, %rsp
*/
state->vals[op->dest.reg].base = CFI_CFA;
state->vals[op->dest.reg].offset = -state->stack_size;
} }
else if (op->dest.reg == cfa->base) { else if (op->dest.reg == cfa->base) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment