Commit 6f27a640 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Thomas Gleixner:

 - Prevent an out-of-bounds access in mtrr_write()

 - Break a circular dependency in the new hyperv IPI acceleration code

 - Address the build breakage related to inline functions by enforcing
   gnu_inline and explicitly bringing native_save_fl() out of line,
   which also adds a set of _ARM_ARG macros which provide 32/64bit
   safety.

 - Initialize the shadow CR4 per cpu variable before using it.

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/mtrr: Don't copy out-of-bounds data in mtrr_write
  x86/hyper-v: Fix the circular dependency in IPI enlightenment
  x86/paravirt: Make native_save_fl() extern inline
  x86/asm: Add _ASM_ARG* constants for argument registers to <asm/asm.h>
  compiler-gcc.h: Add __attribute__((gnu_inline)) to all inline declarations
  x86/mm/32: Initialize the CR4 shadow before __flush_tlb_all()
parents 6fb2489d 15279df6
...@@ -114,6 +114,8 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector) ...@@ -114,6 +114,8 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K; ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask); nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask);
} }
if (nr_bank < 0)
goto ipi_mask_ex_done;
if (!nr_bank) if (!nr_bank)
ipi_arg->vp_set.format = HV_GENERIC_SET_ALL; ipi_arg->vp_set.format = HV_GENERIC_SET_ALL;
...@@ -158,6 +160,9 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector) ...@@ -158,6 +160,9 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector)
for_each_cpu(cur_cpu, mask) { for_each_cpu(cur_cpu, mask) {
vcpu = hv_cpu_number_to_vp_number(cur_cpu); vcpu = hv_cpu_number_to_vp_number(cur_cpu);
if (vcpu == VP_INVAL)
goto ipi_mask_done;
/* /*
* This particular version of the IPI hypercall can * This particular version of the IPI hypercall can
* only target upto 64 CPUs. * only target upto 64 CPUs.
......
...@@ -265,7 +265,7 @@ void __init hyperv_init(void) ...@@ -265,7 +265,7 @@ void __init hyperv_init(void)
{ {
u64 guest_id, required_msrs; u64 guest_id, required_msrs;
union hv_x64_msr_hypercall_contents hypercall_msr; union hv_x64_msr_hypercall_contents hypercall_msr;
int cpuhp; int cpuhp, i;
if (x86_hyper_type != X86_HYPER_MS_HYPERV) if (x86_hyper_type != X86_HYPER_MS_HYPERV)
return; return;
...@@ -293,6 +293,9 @@ void __init hyperv_init(void) ...@@ -293,6 +293,9 @@ void __init hyperv_init(void)
if (!hv_vp_index) if (!hv_vp_index)
return; return;
for (i = 0; i < num_possible_cpus(); i++)
hv_vp_index[i] = VP_INVAL;
hv_vp_assist_page = kcalloc(num_possible_cpus(), hv_vp_assist_page = kcalloc(num_possible_cpus(),
sizeof(*hv_vp_assist_page), GFP_KERNEL); sizeof(*hv_vp_assist_page), GFP_KERNEL);
if (!hv_vp_assist_page) { if (!hv_vp_assist_page) {
......
...@@ -46,6 +46,65 @@ ...@@ -46,6 +46,65 @@
#define _ASM_SI __ASM_REG(si) #define _ASM_SI __ASM_REG(si)
#define _ASM_DI __ASM_REG(di) #define _ASM_DI __ASM_REG(di)
#ifndef __x86_64__
/* 32 bit */
#define _ASM_ARG1 _ASM_AX
#define _ASM_ARG2 _ASM_DX
#define _ASM_ARG3 _ASM_CX
#define _ASM_ARG1L eax
#define _ASM_ARG2L edx
#define _ASM_ARG3L ecx
#define _ASM_ARG1W ax
#define _ASM_ARG2W dx
#define _ASM_ARG3W cx
#define _ASM_ARG1B al
#define _ASM_ARG2B dl
#define _ASM_ARG3B cl
#else
/* 64 bit */
#define _ASM_ARG1 _ASM_DI
#define _ASM_ARG2 _ASM_SI
#define _ASM_ARG3 _ASM_DX
#define _ASM_ARG4 _ASM_CX
#define _ASM_ARG5 r8
#define _ASM_ARG6 r9
#define _ASM_ARG1Q rdi
#define _ASM_ARG2Q rsi
#define _ASM_ARG3Q rdx
#define _ASM_ARG4Q rcx
#define _ASM_ARG5Q r8
#define _ASM_ARG6Q r9
#define _ASM_ARG1L edi
#define _ASM_ARG2L esi
#define _ASM_ARG3L edx
#define _ASM_ARG4L ecx
#define _ASM_ARG5L r8d
#define _ASM_ARG6L r9d
#define _ASM_ARG1W di
#define _ASM_ARG2W si
#define _ASM_ARG3W dx
#define _ASM_ARG4W cx
#define _ASM_ARG5W r8w
#define _ASM_ARG6W r9w
#define _ASM_ARG1B dil
#define _ASM_ARG2B sil
#define _ASM_ARG3B dl
#define _ASM_ARG4B cl
#define _ASM_ARG5B r8b
#define _ASM_ARG6B r9b
#endif
/* /*
* Macros to generate condition code outputs from inline assembly, * Macros to generate condition code outputs from inline assembly,
* The output operand must be type "bool". * The output operand must be type "bool".
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
* Interrupt control: * Interrupt control:
*/ */
static inline unsigned long native_save_fl(void) extern inline unsigned long native_save_fl(void)
{ {
unsigned long flags; unsigned long flags;
......
...@@ -9,6 +9,8 @@ ...@@ -9,6 +9,8 @@
#include <asm/hyperv-tlfs.h> #include <asm/hyperv-tlfs.h>
#include <asm/nospec-branch.h> #include <asm/nospec-branch.h>
#define VP_INVAL U32_MAX
struct ms_hyperv_info { struct ms_hyperv_info {
u32 features; u32 features;
u32 misc_features; u32 misc_features;
...@@ -20,7 +22,6 @@ struct ms_hyperv_info { ...@@ -20,7 +22,6 @@ struct ms_hyperv_info {
extern struct ms_hyperv_info ms_hyperv; extern struct ms_hyperv_info ms_hyperv;
/* /*
* Generate the guest ID. * Generate the guest ID.
*/ */
...@@ -281,6 +282,8 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset, ...@@ -281,6 +282,8 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset,
*/ */
for_each_cpu(cpu, cpus) { for_each_cpu(cpu, cpus) {
vcpu = hv_cpu_number_to_vp_number(cpu); vcpu = hv_cpu_number_to_vp_number(cpu);
if (vcpu == VP_INVAL)
return -1;
vcpu_bank = vcpu / 64; vcpu_bank = vcpu / 64;
vcpu_offset = vcpu % 64; vcpu_offset = vcpu % 64;
__set_bit(vcpu_offset, (unsigned long *) __set_bit(vcpu_offset, (unsigned long *)
......
...@@ -61,6 +61,7 @@ obj-y += alternative.o i8253.o hw_breakpoint.o ...@@ -61,6 +61,7 @@ obj-y += alternative.o i8253.o hw_breakpoint.o
obj-y += tsc.o tsc_msr.o io_delay.o rtc.o obj-y += tsc.o tsc_msr.o io_delay.o rtc.o
obj-y += pci-iommu_table.o obj-y += pci-iommu_table.o
obj-y += resource.o obj-y += resource.o
obj-y += irqflags.o
obj-y += process.o obj-y += process.o
obj-y += fpu/ obj-y += fpu/
......
...@@ -106,7 +106,8 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) ...@@ -106,7 +106,8 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
memset(line, 0, LINE_SIZE); memset(line, 0, LINE_SIZE);
length = strncpy_from_user(line, buf, LINE_SIZE - 1); len = min_t(size_t, len, LINE_SIZE - 1);
length = strncpy_from_user(line, buf, len);
if (length < 0) if (length < 0)
return length; return length;
......
/* SPDX-License-Identifier: GPL-2.0 */
#include <asm/asm.h>
#include <asm/export.h>
#include <linux/linkage.h>
/*
* unsigned long native_save_fl(void)
*/
ENTRY(native_save_fl)
pushf
pop %_ASM_AX
ret
ENDPROC(native_save_fl)
EXPORT_SYMBOL(native_save_fl)
/*
* void native_restore_fl(unsigned long flags)
* %eax/%rdi: flags
*/
ENTRY(native_restore_fl)
push %_ASM_ARG1
popf
ret
ENDPROC(native_restore_fl)
EXPORT_SYMBOL(native_restore_fl)
...@@ -221,6 +221,11 @@ static void notrace start_secondary(void *unused) ...@@ -221,6 +221,11 @@ static void notrace start_secondary(void *unused)
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
/* switch away from the initial page table */ /* switch away from the initial page table */
load_cr3(swapper_pg_dir); load_cr3(swapper_pg_dir);
/*
* Initialize the CR4 shadow before doing anything that could
* try to read it.
*/
cr4_init_shadow();
__flush_tlb_all(); __flush_tlb_all();
#endif #endif
load_current_idt(); load_current_idt();
......
...@@ -65,6 +65,18 @@ ...@@ -65,6 +65,18 @@
#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
#endif #endif
/*
* Feature detection for gnu_inline (gnu89 extern inline semantics). Either
* __GNUC_STDC_INLINE__ is defined (not using gnu89 extern inline semantics,
* and we opt in to the gnu89 semantics), or __GNUC_STDC_INLINE__ is not
* defined so the gnu89 semantics are the default.
*/
#ifdef __GNUC_STDC_INLINE__
# define __gnu_inline __attribute__((gnu_inline))
#else
# define __gnu_inline
#endif
/* /*
* Force always-inline if the user requests it so via the .config, * Force always-inline if the user requests it so via the .config,
* or if gcc is too old. * or if gcc is too old.
...@@ -72,19 +84,22 @@ ...@@ -72,19 +84,22 @@
* -Wunused-function. This turns out to avoid the need for complex #ifdef * -Wunused-function. This turns out to avoid the need for complex #ifdef
* directives. Suppress the warning in clang as well by using "unused" * directives. Suppress the warning in clang as well by using "unused"
* function attribute, which is redundant but not harmful for gcc. * function attribute, which is redundant but not harmful for gcc.
* Prefer gnu_inline, so that extern inline functions do not emit an
* externally visible function. This makes extern inline behave as per gnu89
* semantics rather than c99. This prevents multiple symbol definition errors
* of extern inline functions at link time.
* A lot of inline functions can cause havoc with function tracing.
*/ */
#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \ #if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
!defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4) !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
#define inline inline __attribute__((always_inline,unused)) notrace #define inline \
#define __inline__ __inline__ __attribute__((always_inline,unused)) notrace inline __attribute__((always_inline, unused)) notrace __gnu_inline
#define __inline __inline __attribute__((always_inline,unused)) notrace
#else #else
/* A lot of inline functions can cause havoc with function tracing */ #define inline inline __attribute__((unused)) notrace __gnu_inline
#define inline inline __attribute__((unused)) notrace
#define __inline__ __inline__ __attribute__((unused)) notrace
#define __inline __inline __attribute__((unused)) notrace
#endif #endif
#define __inline__ inline
#define __inline inline
#define __always_inline inline __attribute__((always_inline)) #define __always_inline inline __attribute__((always_inline))
#define noinline __attribute__((noinline)) #define noinline __attribute__((noinline))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment