Commit c48f46ac authored by Borislav Petkov's avatar Borislav Petkov

Merge 'x86/cpu' to pick up dependent bits

Pick up work happening in parallel to avoid nasty merge conflicts later.
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
parents 28b590f4 61aa9a0a
...@@ -368,6 +368,7 @@ ...@@ -368,6 +368,7 @@
#define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */ #define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */
#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */ #define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
#define X86_FEATURE_SERIALIZE (18*32+14) /* SERIALIZE instruction */ #define X86_FEATURE_SERIALIZE (18*32+14) /* SERIALIZE instruction */
#define X86_FEATURE_TSXLDTRK (18*32+16) /* TSX Suspend Load Address Tracking */
#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */ #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
#define X86_FEATURE_ARCH_LBR (18*32+19) /* Intel ARCH LBR */ #define X86_FEATURE_ARCH_LBR (18*32+19) /* Intel ARCH LBR */
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
......
...@@ -602,9 +602,7 @@ static inline u64 xgetbv(u32 index) ...@@ -602,9 +602,7 @@ static inline u64 xgetbv(u32 index)
{ {
u32 eax, edx; u32 eax, edx;
asm volatile(".byte 0x0f,0x01,0xd0" /* xgetbv */ asm volatile("xgetbv" : "=a" (eax), "=d" (edx) : "c" (index));
: "=a" (eax), "=d" (edx)
: "c" (index));
return eax + ((u64)edx << 32); return eax + ((u64)edx << 32);
} }
...@@ -613,8 +611,7 @@ static inline void xsetbv(u32 index, u64 value) ...@@ -613,8 +611,7 @@ static inline void xsetbv(u32 index, u64 value)
u32 eax = value; u32 eax = value;
u32 edx = value >> 32; u32 edx = value >> 32;
asm volatile(".byte 0x0f,0x01,0xd1" /* xsetbv */ asm volatile("xsetbv" :: "a" (eax), "d" (edx), "c" (index));
: : "a" (eax), "d" (edx), "c" (index));
} }
#endif /* _ASM_X86_FPU_INTERNAL_H */ #endif /* _ASM_X86_FPU_INTERNAL_H */
...@@ -234,6 +234,12 @@ static inline void clwb(volatile void *__p) ...@@ -234,6 +234,12 @@ static inline void clwb(volatile void *__p)
#define nop() asm volatile ("nop") #define nop() asm volatile ("nop")
static inline void serialize(void)
{
/* Instruction opcode for SERIALIZE; supported in binutils >= 2.35. */
asm volatile(".byte 0xf, 0x1, 0xe8" ::: "memory");
}
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_X86_SPECIAL_INSNS_H */ #endif /* _ASM_X86_SPECIAL_INSNS_H */
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include <linux/preempt.h> #include <linux/preempt.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/special_insns.h>
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
static inline void iret_to_self(void) static inline void iret_to_self(void)
...@@ -46,22 +47,34 @@ static inline void iret_to_self(void) ...@@ -46,22 +47,34 @@ static inline void iret_to_self(void)
* *
* b) Text was modified on a different CPU, may subsequently be * b) Text was modified on a different CPU, may subsequently be
* executed on this CPU, and you want to make sure the new version * executed on this CPU, and you want to make sure the new version
* gets executed. This generally means you're calling this in a IPI. * gets executed. This generally means you're calling this in an IPI.
* *
* If you're calling this for a different reason, you're probably doing * If you're calling this for a different reason, you're probably doing
* it wrong. * it wrong.
*
* Like all of Linux's memory ordering operations, this is a
* compiler barrier as well.
*/ */
static inline void sync_core(void) static inline void sync_core(void)
{ {
/* /*
* There are quite a few ways to do this. IRET-to-self is nice * The SERIALIZE instruction is the most straightforward way to
* because it works on every CPU, at any CPL (so it's compatible * do this, but it is not universally available.
* with paravirtualization), and it never exits to a hypervisor. */
* The only down sides are that it's a bit slow (it seems to be if (static_cpu_has(X86_FEATURE_SERIALIZE)) {
* a bit more than 2x slower than the fastest options) and that serialize();
* it unmasks NMIs. The "push %cs" is needed because, in return;
* paravirtual environments, __KERNEL_CS may not be a valid CS }
* value when we do IRET directly.
/*
* For all other processors, there are quite a few ways to do this.
* IRET-to-self is nice because it works on every CPU, at any CPL
* (so it's compatible with paravirtualization), and it never exits
* to a hypervisor. The only downsides are that it's a bit slow
* (it seems to be a bit more than 2x slower than the fastest
* options) and that it unmasks NMIs. The "push %cs" is needed,
* because in paravirtual environments __KERNEL_CS may not be a
* valid CS value when we do IRET directly.
* *
* In case NMI unmasking or performance ever becomes a problem, * In case NMI unmasking or performance ever becomes a problem,
* the next best option appears to be MOV-to-CR2 and an * the next best option appears to be MOV-to-CR2 and an
...@@ -71,9 +84,6 @@ static inline void sync_core(void) ...@@ -71,9 +84,6 @@ static inline void sync_core(void)
* CPUID is the conventional way, but it's nasty: it doesn't * CPUID is the conventional way, but it's nasty: it doesn't
* exist on some 486-like CPUs, and it usually exits to a * exist on some 486-like CPUs, and it usually exits to a
* hypervisor. * hypervisor.
*
* Like all of Linux's memory ordering operations, this is a
* compiler barrier as well.
*/ */
iret_to_self(); iret_to_self();
} }
......
...@@ -45,11 +45,12 @@ ...@@ -45,11 +45,12 @@
* value that, lies close to the top of the kernel memory. The limit for the GDT * value that, lies close to the top of the kernel memory. The limit for the GDT
* and the IDT are set to zero. * and the IDT are set to zero.
* *
* Given that SLDT and STR are not commonly used in programs that run on WineHQ * The instruction SMSW is emulated to return the value that the register CR0
* or DOSEMU2, they are not emulated.
*
* The instruction smsw is emulated to return the value that the register CR0
* has at boot time as set in the head_32. * has at boot time as set in the head_32.
* SLDT and STR are emulated to return the values that the kernel programmatically
* assigns:
* - SLDT returns (GDT_ENTRY_LDT * 8) if an LDT has been set, 0 if not.
* - STR returns (GDT_ENTRY_TSS * 8).
* *
* Emulation is provided for both 32-bit and 64-bit processes. * Emulation is provided for both 32-bit and 64-bit processes.
* *
...@@ -244,16 +245,34 @@ static int emulate_umip_insn(struct insn *insn, int umip_inst, ...@@ -244,16 +245,34 @@ static int emulate_umip_insn(struct insn *insn, int umip_inst,
*data_size += UMIP_GDT_IDT_LIMIT_SIZE; *data_size += UMIP_GDT_IDT_LIMIT_SIZE;
memcpy(data, &dummy_limit, UMIP_GDT_IDT_LIMIT_SIZE); memcpy(data, &dummy_limit, UMIP_GDT_IDT_LIMIT_SIZE);
} else if (umip_inst == UMIP_INST_SMSW) { } else if (umip_inst == UMIP_INST_SMSW || umip_inst == UMIP_INST_SLDT ||
unsigned long dummy_value = CR0_STATE; umip_inst == UMIP_INST_STR) {
unsigned long dummy_value;
if (umip_inst == UMIP_INST_SMSW) {
dummy_value = CR0_STATE;
} else if (umip_inst == UMIP_INST_STR) {
dummy_value = GDT_ENTRY_TSS * 8;
} else if (umip_inst == UMIP_INST_SLDT) {
#ifdef CONFIG_MODIFY_LDT_SYSCALL
down_read(&current->mm->context.ldt_usr_sem);
if (current->mm->context.ldt)
dummy_value = GDT_ENTRY_LDT * 8;
else
dummy_value = 0;
up_read(&current->mm->context.ldt_usr_sem);
#else
dummy_value = 0;
#endif
}
/* /*
* Even though the CR0 register has 4 bytes, the number * For these 3 instructions, the number
* of bytes to be copied in the result buffer is determined * of bytes to be copied in the result buffer is determined
* by whether the operand is a register or a memory location. * by whether the operand is a register or a memory location.
* If operand is a register, return as many bytes as the operand * If operand is a register, return as many bytes as the operand
* size. If operand is memory, return only the two least * size. If operand is memory, return only the two least
* siginificant bytes of CR0. * siginificant bytes.
*/ */
if (X86_MODRM_MOD(insn->modrm.value) == 3) if (X86_MODRM_MOD(insn->modrm.value) == 3)
*data_size = insn->opnd_bytes; *data_size = insn->opnd_bytes;
...@@ -261,7 +280,6 @@ static int emulate_umip_insn(struct insn *insn, int umip_inst, ...@@ -261,7 +280,6 @@ static int emulate_umip_insn(struct insn *insn, int umip_inst,
*data_size = 2; *data_size = 2;
memcpy(data, &dummy_value, *data_size); memcpy(data, &dummy_value, *data_size);
/* STR and SLDT are not emulated */
} else { } else {
return -EINVAL; return -EINVAL;
} }
...@@ -383,10 +401,6 @@ bool fixup_umip_exception(struct pt_regs *regs) ...@@ -383,10 +401,6 @@ bool fixup_umip_exception(struct pt_regs *regs)
umip_pr_warn(regs, "%s instruction cannot be used by applications.\n", umip_pr_warn(regs, "%s instruction cannot be used by applications.\n",
umip_insns[umip_inst]); umip_insns[umip_inst]);
/* Do not emulate (spoof) SLDT or STR. */
if (umip_inst == UMIP_INST_STR || umip_inst == UMIP_INST_SLDT)
return false;
umip_pr_warn(regs, "For now, expensive software emulation returns the result.\n"); umip_pr_warn(regs, "For now, expensive software emulation returns the result.\n");
if (emulate_umip_insn(&insn, umip_inst, dummy_data, &dummy_data_size, if (emulate_umip_insn(&insn, umip_inst, dummy_data, &dummy_data_size,
......
...@@ -371,7 +371,7 @@ void kvm_set_cpu_caps(void) ...@@ -371,7 +371,7 @@ void kvm_set_cpu_caps(void)
F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) | F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) |
F(MD_CLEAR) | F(AVX512_VP2INTERSECT) | F(FSRM) | F(MD_CLEAR) | F(AVX512_VP2INTERSECT) | F(FSRM) |
F(SERIALIZE) F(SERIALIZE) | F(TSXLDTRK)
); );
/* TSC_ADJUST and ARCH_CAPABILITIES are emulated in software. */ /* TSC_ADJUST and ARCH_CAPABILITIES are emulated in software. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment