Commit 2a8a2b7c authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Thomas Gleixner:

 - Correct the L1TF fallout on 32bit and the off by one in the 'too much
   RAM for protection' calculation.

 - Add a helpful kernel message for the 'too much RAM' case

 - Unbreak the VDSO in case that the compiler desides to use indirect
   jumps/calls and emits retpolines which cannot be resolved because the
   kernel uses its own thunks, which does not work for the VDSO. Make it
   use the builtin thunks.

 - Re-export start_thread() which was unexported when the 32/64bit
   implementation was unified. start_thread() is required by modular
   binfmt handlers.

 - Trivial cleanups

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/speculation/l1tf: Suggest what to do on systems with too much RAM
  x86/speculation/l1tf: Fix off-by-one error when warning that system has too much RAM
  x86/kvm/vmx: Remove duplicate l1d flush definitions
  x86/speculation/l1tf: Fix overflow in l1tf_pfn_limit() on 32bit
  x86/process: Re-export start_thread()
  x86/mce: Add notifier_block forward declaration
  x86/vdso: Fix vDSO build if a retpoline is emitted
parents de375035 6a012288
...@@ -507,9 +507,13 @@ KBUILD_AFLAGS += $(call cc-option, -no-integrated-as) ...@@ -507,9 +507,13 @@ KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
endif endif
RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register
RETPOLINE_VDSO_CFLAGS_GCC := -mindirect-branch=thunk-inline -mindirect-branch-register
RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk
RETPOLINE_VDSO_CFLAGS_CLANG := -mretpoline
RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG))) RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG)))
RETPOLINE_VDSO_CFLAGS := $(call cc-option,$(RETPOLINE_VDSO_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_VDSO_CFLAGS_CLANG)))
export RETPOLINE_CFLAGS export RETPOLINE_CFLAGS
export RETPOLINE_VDSO_CFLAGS
KBUILD_CFLAGS += $(call cc-option,-fno-PIE) KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
KBUILD_AFLAGS += $(call cc-option,-fno-PIE) KBUILD_AFLAGS += $(call cc-option,-fno-PIE)
......
...@@ -68,9 +68,9 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE ...@@ -68,9 +68,9 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \ CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
$(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \ $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
-fno-omit-frame-pointer -foptimize-sibling-calls \ -fno-omit-frame-pointer -foptimize-sibling-calls \
-DDISABLE_BRANCH_PROFILING -DBUILD_VDSO -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO $(RETPOLINE_VDSO_CFLAGS)
$(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS)) $(CFL) $(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
# #
# vDSO code runs in userspace and -pg doesn't help with profiling anyway. # vDSO code runs in userspace and -pg doesn't help with profiling anyway.
...@@ -132,11 +132,13 @@ KBUILD_CFLAGS_32 := $(filter-out -mcmodel=kernel,$(KBUILD_CFLAGS_32)) ...@@ -132,11 +132,13 @@ KBUILD_CFLAGS_32 := $(filter-out -mcmodel=kernel,$(KBUILD_CFLAGS_32))
KBUILD_CFLAGS_32 := $(filter-out -fno-pic,$(KBUILD_CFLAGS_32)) KBUILD_CFLAGS_32 := $(filter-out -fno-pic,$(KBUILD_CFLAGS_32))
KBUILD_CFLAGS_32 := $(filter-out -mfentry,$(KBUILD_CFLAGS_32)) KBUILD_CFLAGS_32 := $(filter-out -mfentry,$(KBUILD_CFLAGS_32))
KBUILD_CFLAGS_32 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS_32)) KBUILD_CFLAGS_32 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS_32))
KBUILD_CFLAGS_32 := $(filter-out $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS_32))
KBUILD_CFLAGS_32 += -m32 -msoft-float -mregparm=0 -fpic KBUILD_CFLAGS_32 += -m32 -msoft-float -mregparm=0 -fpic
KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector) KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls) KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
KBUILD_CFLAGS_32 += -fno-omit-frame-pointer KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS)
$(obj)/vdso32.so.dbg: KBUILD_CFLAGS = $(KBUILD_CFLAGS_32) $(obj)/vdso32.so.dbg: KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
$(obj)/vdso32.so.dbg: FORCE \ $(obj)/vdso32.so.dbg: FORCE \
......
...@@ -148,6 +148,7 @@ enum mce_notifier_prios { ...@@ -148,6 +148,7 @@ enum mce_notifier_prios {
MCE_PRIO_LOWEST = 0, MCE_PRIO_LOWEST = 0,
}; };
struct notifier_block;
extern void mce_register_decode_chain(struct notifier_block *nb); extern void mce_register_decode_chain(struct notifier_block *nb);
extern void mce_unregister_decode_chain(struct notifier_block *nb); extern void mce_unregister_decode_chain(struct notifier_block *nb);
......
...@@ -181,9 +181,9 @@ extern const struct seq_operations cpuinfo_op; ...@@ -181,9 +181,9 @@ extern const struct seq_operations cpuinfo_op;
extern void cpu_detect(struct cpuinfo_x86 *c); extern void cpu_detect(struct cpuinfo_x86 *c);
static inline unsigned long l1tf_pfn_limit(void) static inline unsigned long long l1tf_pfn_limit(void)
{ {
return BIT(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT) - 1; return BIT_ULL(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT);
} }
extern void early_cpu_init(void); extern void early_cpu_init(void);
......
...@@ -702,6 +702,10 @@ static void __init l1tf_select_mitigation(void) ...@@ -702,6 +702,10 @@ static void __init l1tf_select_mitigation(void)
half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT; half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) { if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n"); pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
half_pa);
pr_info("However, doing so will make a part of your RAM unusable.\n");
pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html might help you decide.\n");
return; return;
} }
......
...@@ -384,6 +384,7 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) ...@@ -384,6 +384,7 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
start_thread_common(regs, new_ip, new_sp, start_thread_common(regs, new_ip, new_sp,
__USER_CS, __USER_DS, 0); __USER_CS, __USER_DS, 0);
} }
EXPORT_SYMBOL_GPL(start_thread);
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp) void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp)
......
...@@ -10131,9 +10131,6 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) ...@@ -10131,9 +10131,6 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
* information but as all relevant affected CPUs have 32KiB L1D cache size * information but as all relevant affected CPUs have 32KiB L1D cache size
* there is no point in doing so. * there is no point in doing so.
*/ */
#define L1D_CACHE_ORDER 4
static void *vmx_l1d_flush_pages;
static void vmx_l1d_flush(struct kvm_vcpu *vcpu) static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
{ {
int size = PAGE_SIZE << L1D_CACHE_ORDER; int size = PAGE_SIZE << L1D_CACHE_ORDER;
......
...@@ -930,7 +930,7 @@ unsigned long max_swapfile_size(void) ...@@ -930,7 +930,7 @@ unsigned long max_swapfile_size(void)
if (boot_cpu_has_bug(X86_BUG_L1TF)) { if (boot_cpu_has_bug(X86_BUG_L1TF)) {
/* Limit the swap file size to MAX_PA/2 for L1TF workaround */ /* Limit the swap file size to MAX_PA/2 for L1TF workaround */
unsigned long l1tf_limit = l1tf_pfn_limit() + 1; unsigned long long l1tf_limit = l1tf_pfn_limit();
/* /*
* We encode swap offsets also with 3 bits below those for pfn * We encode swap offsets also with 3 bits below those for pfn
* which makes the usable limit higher. * which makes the usable limit higher.
...@@ -938,7 +938,7 @@ unsigned long max_swapfile_size(void) ...@@ -938,7 +938,7 @@ unsigned long max_swapfile_size(void)
#if CONFIG_PGTABLE_LEVELS > 2 #if CONFIG_PGTABLE_LEVELS > 2
l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT; l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT;
#endif #endif
pages = min_t(unsigned long, l1tf_limit, pages); pages = min_t(unsigned long long, l1tf_limit, pages);
} }
return pages; return pages;
} }
......
...@@ -257,7 +257,7 @@ bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot) ...@@ -257,7 +257,7 @@ bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
/* If it's real memory always allow */ /* If it's real memory always allow */
if (pfn_valid(pfn)) if (pfn_valid(pfn))
return true; return true;
if (pfn > l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN)) if (pfn >= l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN))
return false; return false;
return true; return true;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment