Commit ccaaaf6f authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'mpx-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/daveh/x86-mpx

Pull x86 MPX removal from Dave Hansen:
 "MPX requires recompiling applications, which requires compiler
  support. Unfortunately, GCC 9.1 is expected to be be released without
  support for MPX. This means that there was only a relatively small
  window where folks could have ever used MPX. It failed to gain wide
  adoption in the industry, and Linux was the only mainstream OS to ever
  support it widely.

  Support for the feature may also disappear on future processors.

  This set completes the process that we started during the 5.4 merge
  window when the MPX prctl()s were removed. XSAVE support is left in
  place, which allows MPX-using KVM guests to continue to function"

* tag 'mpx-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/daveh/x86-mpx:
  x86/mpx: remove MPX from arch/x86
  mm: remove arch_bprm_mm_init() hook
  x86/mpx: remove bounds exception code
  x86/mpx: remove build infrastructure
  x86/alternatives: add missing insn.h include
parents 35c222fd 45fc24e8
This diff is collapsed.
...@@ -238,11 +238,6 @@ static inline void arch_unmap(struct mm_struct *mm, ...@@ -238,11 +238,6 @@ static inline void arch_unmap(struct mm_struct *mm,
mm->context.vdso_base = 0; mm->context.vdso_base = 0;
} }
static inline void arch_bprm_mm_init(struct mm_struct *mm,
struct vm_area_struct *vma)
{
}
#ifdef CONFIG_PPC_MEM_KEYS #ifdef CONFIG_PPC_MEM_KEYS
bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write, bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write,
bool execute, bool foreign); bool execute, bool foreign);
......
...@@ -25,11 +25,6 @@ static inline void arch_unmap(struct mm_struct *mm, ...@@ -25,11 +25,6 @@ static inline void arch_unmap(struct mm_struct *mm,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
} }
static inline void arch_bprm_mm_init(struct mm_struct *mm,
struct vm_area_struct *vma)
{
}
static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
bool write, bool execute, bool foreign) bool write, bool execute, bool foreign)
{ {
......
...@@ -89,11 +89,6 @@ static inline void arch_unmap(struct mm_struct *mm, ...@@ -89,11 +89,6 @@ static inline void arch_unmap(struct mm_struct *mm,
{ {
} }
static inline void arch_bprm_mm_init(struct mm_struct *mm,
struct vm_area_struct *vma)
{
}
static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
bool write, bool execute, bool foreign) bool write, bool execute, bool foreign)
{ {
......
...@@ -1888,34 +1888,6 @@ config X86_UMIP ...@@ -1888,34 +1888,6 @@ config X86_UMIP
specific cases in protected and virtual-8086 modes. Emulated specific cases in protected and virtual-8086 modes. Emulated
results are dummy. results are dummy.
config X86_INTEL_MPX
prompt "Intel MPX (Memory Protection Extensions)"
def_bool n
# Note: only available in 64-bit mode due to VMA flags shortage
depends on CPU_SUP_INTEL && X86_64
select ARCH_USES_HIGH_VMA_FLAGS
---help---
MPX provides hardware features that can be used in
conjunction with compiler-instrumented code to check
memory references. It is designed to detect buffer
overflow or underflow bugs.
This option enables running applications which are
instrumented or otherwise use MPX. It does not use MPX
itself inside the kernel or to protect the kernel
against bad memory references.
Enabling this option will make the kernel larger:
~8k of kernel text and 36 bytes of data on a 64-bit
defconfig. It adds a long to the 'mm_struct' which
will increase the kernel memory overhead of each
process and adds some branches to paths used during
exec() and munmap().
For details, see Documentation/x86/intel_mpx.rst
If unsure, say N.
config X86_INTEL_MEMORY_PROTECTION_KEYS config X86_INTEL_MEMORY_PROTECTION_KEYS
prompt "Intel Memory Protection Keys" prompt "Intel Memory Protection Keys"
def_bool y def_bool y
......
...@@ -6,12 +6,6 @@ ...@@ -6,12 +6,6 @@
extern void check_bugs(void); extern void check_bugs(void);
#if defined(CONFIG_CPU_SUP_INTEL)
void check_mpx_erratum(struct cpuinfo_x86 *c);
#else
static inline void check_mpx_erratum(struct cpuinfo_x86 *c) {}
#endif
#if defined(CONFIG_CPU_SUP_INTEL) && defined(CONFIG_X86_32) #if defined(CONFIG_CPU_SUP_INTEL) && defined(CONFIG_X86_32)
int ppro_with_ram_bug(void); int ppro_with_ram_bug(void);
#else #else
......
...@@ -10,12 +10,6 @@ ...@@ -10,12 +10,6 @@
* cpu_feature_enabled(). * cpu_feature_enabled().
*/ */
#ifdef CONFIG_X86_INTEL_MPX
# define DISABLE_MPX 0
#else
# define DISABLE_MPX (1<<(X86_FEATURE_MPX & 31))
#endif
#ifdef CONFIG_X86_SMAP #ifdef CONFIG_X86_SMAP
# define DISABLE_SMAP 0 # define DISABLE_SMAP 0
#else #else
...@@ -74,7 +68,7 @@ ...@@ -74,7 +68,7 @@
#define DISABLED_MASK6 0 #define DISABLED_MASK6 0
#define DISABLED_MASK7 (DISABLE_PTI) #define DISABLED_MASK7 (DISABLE_PTI)
#define DISABLED_MASK8 0 #define DISABLED_MASK8 0
#define DISABLED_MASK9 (DISABLE_MPX|DISABLE_SMAP) #define DISABLED_MASK9 (DISABLE_SMAP)
#define DISABLED_MASK10 0 #define DISABLED_MASK10 0
#define DISABLED_MASK11 0 #define DISABLED_MASK11 0
#define DISABLED_MASK12 0 #define DISABLED_MASK12 0
......
...@@ -50,10 +50,6 @@ typedef struct { ...@@ -50,10 +50,6 @@ typedef struct {
u16 pkey_allocation_map; u16 pkey_allocation_map;
s16 execute_only_pkey; s16 execute_only_pkey;
#endif #endif
#ifdef CONFIG_X86_INTEL_MPX
/* address of the bounds directory */
void __user *bd_addr;
#endif
} mm_context_t; } mm_context_t;
#define INIT_MM_CONTEXT(mm) \ #define INIT_MM_CONTEXT(mm) \
......
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/paravirt.h> #include <asm/paravirt.h>
#include <asm/mpx.h>
#include <asm/debugreg.h> #include <asm/debugreg.h>
extern atomic64_t last_mm_ctx_id; extern atomic64_t last_mm_ctx_id;
...@@ -200,34 +199,9 @@ static inline bool is_64bit_mm(struct mm_struct *mm) ...@@ -200,34 +199,9 @@ static inline bool is_64bit_mm(struct mm_struct *mm)
} }
#endif #endif
static inline void arch_bprm_mm_init(struct mm_struct *mm,
struct vm_area_struct *vma)
{
mpx_mm_init(mm);
}
static inline void arch_unmap(struct mm_struct *mm, unsigned long start, static inline void arch_unmap(struct mm_struct *mm, unsigned long start,
unsigned long end) unsigned long end)
{ {
/*
* mpx_notify_unmap() goes and reads a rarely-hot
* cacheline in the mm_struct. That can be expensive
* enough to be seen in profiles.
*
* The mpx_notify_unmap() call and its contents have been
* observed to affect munmap() performance on hardware
* where MPX is not present.
*
* The unlikely() optimizes for the fast case: no MPX
* in the CPU, or no MPX use in the process. Even if
* we get this wrong (in the unlikely event that MPX
* is widely enabled on some system) the overhead of
* MPX itself (reading bounds tables) is expected to
* overwhelm the overhead of getting this unlikely()
* consistently wrong.
*/
if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX)))
mpx_notify_unmap(mm, start, end);
} }
/* /*
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_MPX_H
#define _ASM_X86_MPX_H
#include <linux/types.h>
#include <linux/mm_types.h>
#include <asm/ptrace.h>
#include <asm/insn.h>
/*
* NULL is theoretically a valid place to put the bounds
* directory, so point this at an invalid address.
*/
#define MPX_INVALID_BOUNDS_DIR ((void __user *)-1)
#define MPX_BNDCFG_ENABLE_FLAG 0x1
#define MPX_BD_ENTRY_VALID_FLAG 0x1
/*
* The upper 28 bits [47:20] of the virtual address in 64-bit
* are used to index into bounds directory (BD).
*
* The directory is 2G (2^31) in size, and with 8-byte entries
* it has 2^28 entries.
*/
#define MPX_BD_SIZE_BYTES_64 (1UL<<31)
#define MPX_BD_ENTRY_BYTES_64 8
#define MPX_BD_NR_ENTRIES_64 (MPX_BD_SIZE_BYTES_64/MPX_BD_ENTRY_BYTES_64)
/*
* The 32-bit directory is 4MB (2^22) in size, and with 4-byte
* entries it has 2^20 entries.
*/
#define MPX_BD_SIZE_BYTES_32 (1UL<<22)
#define MPX_BD_ENTRY_BYTES_32 4
#define MPX_BD_NR_ENTRIES_32 (MPX_BD_SIZE_BYTES_32/MPX_BD_ENTRY_BYTES_32)
/*
* A 64-bit table is 4MB total in size, and an entry is
* 4 64-bit pointers in size.
*/
#define MPX_BT_SIZE_BYTES_64 (1UL<<22)
#define MPX_BT_ENTRY_BYTES_64 32
#define MPX_BT_NR_ENTRIES_64 (MPX_BT_SIZE_BYTES_64/MPX_BT_ENTRY_BYTES_64)
/*
* A 32-bit table is 16kB total in size, and an entry is
* 4 32-bit pointers in size.
*/
#define MPX_BT_SIZE_BYTES_32 (1UL<<14)
#define MPX_BT_ENTRY_BYTES_32 16
#define MPX_BT_NR_ENTRIES_32 (MPX_BT_SIZE_BYTES_32/MPX_BT_ENTRY_BYTES_32)
#define MPX_BNDSTA_TAIL 2
#define MPX_BNDCFG_TAIL 12
#define MPX_BNDSTA_ADDR_MASK (~((1UL<<MPX_BNDSTA_TAIL)-1))
#define MPX_BNDCFG_ADDR_MASK (~((1UL<<MPX_BNDCFG_TAIL)-1))
#define MPX_BNDSTA_ERROR_CODE 0x3
struct mpx_fault_info {
void __user *addr;
void __user *lower;
void __user *upper;
};
#ifdef CONFIG_X86_INTEL_MPX
extern int mpx_fault_info(struct mpx_fault_info *info, struct pt_regs *regs);
extern int mpx_handle_bd_fault(void);
static inline int kernel_managing_mpx_tables(struct mm_struct *mm)
{
return (mm->context.bd_addr != MPX_INVALID_BOUNDS_DIR);
}
static inline void mpx_mm_init(struct mm_struct *mm)
{
/*
* NULL is theoretically a valid place to put the bounds
* directory, so point this at an invalid address.
*/
mm->context.bd_addr = MPX_INVALID_BOUNDS_DIR;
}
extern void mpx_notify_unmap(struct mm_struct *mm, unsigned long start, unsigned long end);
extern unsigned long mpx_unmapped_area_check(unsigned long addr, unsigned long len, unsigned long flags);
#else
static inline int mpx_fault_info(struct mpx_fault_info *info, struct pt_regs *regs)
{
return -EINVAL;
}
static inline int mpx_handle_bd_fault(void)
{
return -EINVAL;
}
static inline int kernel_managing_mpx_tables(struct mm_struct *mm)
{
return 0;
}
static inline void mpx_mm_init(struct mm_struct *mm)
{
}
static inline void mpx_notify_unmap(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
}
static inline unsigned long mpx_unmapped_area_check(unsigned long addr,
unsigned long len, unsigned long flags)
{
return addr;
}
#endif /* CONFIG_X86_INTEL_MPX */
#endif /* _ASM_X86_MPX_H */
...@@ -947,24 +947,6 @@ extern int set_tsc_mode(unsigned int val); ...@@ -947,24 +947,6 @@ extern int set_tsc_mode(unsigned int val);
DECLARE_PER_CPU(u64, msr_misc_features_shadow); DECLARE_PER_CPU(u64, msr_misc_features_shadow);
/* Register/unregister a process' MPX related resource */
#define MPX_ENABLE_MANAGEMENT() mpx_enable_management()
#define MPX_DISABLE_MANAGEMENT() mpx_disable_management()
#ifdef CONFIG_X86_INTEL_MPX
extern int mpx_enable_management(void);
extern int mpx_disable_management(void);
#else
static inline int mpx_enable_management(void)
{
return -EINVAL;
}
static inline int mpx_disable_management(void)
{
return -EINVAL;
}
#endif /* CONFIG_X86_INTEL_MPX */
#ifdef CONFIG_CPU_SUP_AMD #ifdef CONFIG_CPU_SUP_AMD
extern u16 amd_get_nb_id(int cpu); extern u16 amd_get_nb_id(int cpu);
extern u32 amd_get_nodes_per_socket(void); extern u32 amd_get_nodes_per_socket(void);
......
/* SPDX-License-Identifier: GPL-2.0 */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM mpx
#if !defined(_TRACE_MPX_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_MPX_H
#include <linux/tracepoint.h>
#ifdef CONFIG_X86_INTEL_MPX
TRACE_EVENT(mpx_bounds_register_exception,
TP_PROTO(void __user *addr_referenced,
const struct mpx_bndreg *bndreg),
TP_ARGS(addr_referenced, bndreg),
TP_STRUCT__entry(
__field(void __user *, addr_referenced)
__field(u64, lower_bound)
__field(u64, upper_bound)
),
TP_fast_assign(
__entry->addr_referenced = addr_referenced;
__entry->lower_bound = bndreg->lower_bound;
__entry->upper_bound = bndreg->upper_bound;
),
/*
* Note that we are printing out the '~' of the upper
* bounds register here. It is actually stored in its
* one's complement form so that its 'init' state
* corresponds to all 0's. But, that looks like
* gibberish when printed out, so print out the 1's
* complement instead of the actual value here. Note
* though that you still need to specify filters for the
* actual value, not the displayed one.
*/
TP_printk("address referenced: 0x%p bounds: lower: 0x%llx ~upper: 0x%llx",
__entry->addr_referenced,
__entry->lower_bound,
~__entry->upper_bound
)
);
TRACE_EVENT(bounds_exception_mpx,
TP_PROTO(const struct mpx_bndcsr *bndcsr),
TP_ARGS(bndcsr),
TP_STRUCT__entry(
__field(u64, bndcfgu)
__field(u64, bndstatus)
),
TP_fast_assign(
/* need to get rid of the 'const' on bndcsr */
__entry->bndcfgu = (u64)bndcsr->bndcfgu;
__entry->bndstatus = (u64)bndcsr->bndstatus;
),
TP_printk("bndcfgu:0x%llx bndstatus:0x%llx",
__entry->bndcfgu,
__entry->bndstatus)
);
DECLARE_EVENT_CLASS(mpx_range_trace,
TP_PROTO(unsigned long start,
unsigned long end),
TP_ARGS(start, end),
TP_STRUCT__entry(
__field(unsigned long, start)
__field(unsigned long, end)
),
TP_fast_assign(
__entry->start = start;
__entry->end = end;
),
TP_printk("[0x%p:0x%p]",
(void *)__entry->start,
(void *)__entry->end
)
);
DEFINE_EVENT(mpx_range_trace, mpx_unmap_zap,
TP_PROTO(unsigned long start, unsigned long end),
TP_ARGS(start, end)
);
DEFINE_EVENT(mpx_range_trace, mpx_unmap_search,
TP_PROTO(unsigned long start, unsigned long end),
TP_ARGS(start, end)
);
TRACE_EVENT(mpx_new_bounds_table,
TP_PROTO(unsigned long table_vaddr),
TP_ARGS(table_vaddr),
TP_STRUCT__entry(
__field(unsigned long, table_vaddr)
),
TP_fast_assign(
__entry->table_vaddr = table_vaddr;
),
TP_printk("table vaddr:%p", (void *)__entry->table_vaddr)
);
#else
/*
* This gets used outside of MPX-specific code, so we need a stub.
*/
static inline
void trace_bounds_exception_mpx(const struct mpx_bndcsr *bndcsr)
{
}
#endif /* CONFIG_X86_INTEL_MPX */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH asm/trace/
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE mpx
#endif /* _TRACE_MPX_H */
/* This part must be outside protection */
#include <trace/define_trace.h>
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <asm/nmi.h> #include <asm/nmi.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/insn.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/fixmap.h> #include <asm/fixmap.h>
......
...@@ -164,22 +164,6 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { ...@@ -164,22 +164,6 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
} }; } };
EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
static int __init x86_mpx_setup(char *s)
{
/* require an exact match without trailing characters */
if (strlen(s))
return 0;
/* do not emit a message if the feature is not present */
if (!boot_cpu_has(X86_FEATURE_MPX))
return 1;
setup_clear_cpu_cap(X86_FEATURE_MPX);
pr_info("nompx: Intel Memory Protection Extensions (MPX) disabled\n");
return 1;
}
__setup("nompx", x86_mpx_setup);
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
static int __init x86_nopcid_setup(char *s) static int __init x86_nopcid_setup(char *s)
{ {
...@@ -306,8 +290,6 @@ static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) ...@@ -306,8 +290,6 @@ static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
static __init int setup_disable_smep(char *arg) static __init int setup_disable_smep(char *arg)
{ {
setup_clear_cpu_cap(X86_FEATURE_SMEP); setup_clear_cpu_cap(X86_FEATURE_SMEP);
/* Check for things that depend on SMEP being enabled: */
check_mpx_erratum(&boot_cpu_data);
return 1; return 1;
} }
__setup("nosmep", setup_disable_smep); __setup("nosmep", setup_disable_smep);
......
...@@ -31,41 +31,6 @@ ...@@ -31,41 +31,6 @@
#include <asm/apic.h> #include <asm/apic.h>
#endif #endif
/*
* Just in case our CPU detection goes bad, or you have a weird system,
* allow a way to override the automatic disabling of MPX.
*/
static int forcempx;
static int __init forcempx_setup(char *__unused)
{
forcempx = 1;
return 1;
}
__setup("intel-skd-046-workaround=disable", forcempx_setup);
void check_mpx_erratum(struct cpuinfo_x86 *c)
{
if (forcempx)
return;
/*
* Turn off the MPX feature on CPUs where SMEP is not
* available or disabled.
*
* Works around Intel Erratum SKD046: "Branch Instructions
* May Initialize MPX Bound Registers Incorrectly".
*
* This might falsely disable MPX on systems without
* SMEP, like Atom processors without SMEP. But there
* is no such hardware known at the moment.
*/
if (cpu_has(c, X86_FEATURE_MPX) && !cpu_has(c, X86_FEATURE_SMEP)) {
setup_clear_cpu_cap(X86_FEATURE_MPX);
pr_warn("x86/mpx: Disabling MPX since SMEP not present\n");
}
}
/* /*
* Processors which have self-snooping capability can handle conflicting * Processors which have self-snooping capability can handle conflicting
* memory type across CPUs by snooping its own cache. However, there exists * memory type across CPUs by snooping its own cache. However, there exists
...@@ -330,7 +295,6 @@ static void early_init_intel(struct cpuinfo_x86 *c) ...@@ -330,7 +295,6 @@ static void early_init_intel(struct cpuinfo_x86 *c)
c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff); c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff);
} }
check_mpx_erratum(c);
check_memory_type_self_snoop_errata(c); check_memory_type_self_snoop_errata(c);
/* /*
......
...@@ -893,8 +893,6 @@ void __init setup_arch(char **cmdline_p) ...@@ -893,8 +893,6 @@ void __init setup_arch(char **cmdline_p)
init_mm.end_data = (unsigned long) _edata; init_mm.end_data = (unsigned long) _edata;
init_mm.brk = _brk_end; init_mm.brk = _brk_end;
mpx_mm_init(&init_mm);
code_resource.start = __pa_symbol(_text); code_resource.start = __pa_symbol(_text);
code_resource.end = __pa_symbol(_etext)-1; code_resource.end = __pa_symbol(_etext)-1;
rodata_resource.start = __pa_symbol(__start_rodata); rodata_resource.start = __pa_symbol(__start_rodata);
......
...@@ -22,7 +22,6 @@ ...@@ -22,7 +22,6 @@
#include <asm/elf.h> #include <asm/elf.h>
#include <asm/ia32.h> #include <asm/ia32.h>
#include <asm/syscalls.h> #include <asm/syscalls.h>
#include <asm/mpx.h>
/* /*
* Align a virtual address to avoid aliasing in the I$ on AMD F15h. * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
...@@ -137,10 +136,6 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, ...@@ -137,10 +136,6 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
struct vm_unmapped_area_info info; struct vm_unmapped_area_info info;
unsigned long begin, end; unsigned long begin, end;
addr = mpx_unmapped_area_check(addr, len, flags);
if (IS_ERR_VALUE(addr))
return addr;
if (flags & MAP_FIXED) if (flags & MAP_FIXED)
return addr; return addr;
...@@ -180,10 +175,6 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, ...@@ -180,10 +175,6 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
unsigned long addr = addr0; unsigned long addr = addr0;
struct vm_unmapped_area_info info; struct vm_unmapped_area_info info;
addr = mpx_unmapped_area_check(addr, len, flags);
if (IS_ERR_VALUE(addr))
return addr;
/* requested length too big for entire address space */ /* requested length too big for entire address space */
if (len > TASK_SIZE) if (len > TASK_SIZE)
return -ENOMEM; return -ENOMEM;
......
...@@ -52,8 +52,6 @@ ...@@ -52,8 +52,6 @@
#include <asm/mach_traps.h> #include <asm/mach_traps.h>
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/fpu/xstate.h> #include <asm/fpu/xstate.h>
#include <asm/trace/mpx.h>
#include <asm/mpx.h>
#include <asm/vm86.h> #include <asm/vm86.h>
#include <asm/umip.h> #include <asm/umip.h>
#include <asm/insn.h> #include <asm/insn.h>
...@@ -436,8 +434,6 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code, unsign ...@@ -436,8 +434,6 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code, unsign
dotraplinkage void do_bounds(struct pt_regs *regs, long error_code) dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
{ {
const struct mpx_bndcsr *bndcsr;
RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
if (notify_die(DIE_TRAP, "bounds", regs, error_code, if (notify_die(DIE_TRAP, "bounds", regs, error_code,
X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP) X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP)
...@@ -447,76 +443,6 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code) ...@@ -447,76 +443,6 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
if (!user_mode(regs)) if (!user_mode(regs))
die("bounds", regs, error_code); die("bounds", regs, error_code);
if (!cpu_feature_enabled(X86_FEATURE_MPX)) {
/* The exception is not from Intel MPX */
goto exit_trap;
}
/*
* We need to look at BNDSTATUS to resolve this exception.
* A NULL here might mean that it is in its 'init state',
* which is all zeros which indicates MPX was not
* responsible for the exception.
*/
bndcsr = get_xsave_field_ptr(XFEATURE_BNDCSR);
if (!bndcsr)
goto exit_trap;
trace_bounds_exception_mpx(bndcsr);
/*
* The error code field of the BNDSTATUS register communicates status
* information of a bound range exception #BR or operation involving
* bound directory.
*/
switch (bndcsr->bndstatus & MPX_BNDSTA_ERROR_CODE) {
case 2: /* Bound directory has invalid entry. */
if (mpx_handle_bd_fault())
goto exit_trap;
break; /* Success, it was handled */
case 1: /* Bound violation. */
{
struct task_struct *tsk = current;
struct mpx_fault_info mpx;
if (mpx_fault_info(&mpx, regs)) {
/*
* We failed to decode the MPX instruction. Act as if
* the exception was not caused by MPX.
*/
goto exit_trap;
}
/*
* Success, we decoded the instruction and retrieved
* an 'mpx' containing the address being accessed
* which caused the exception. This information
* allows and application to possibly handle the
* #BR exception itself.
*/
if (!do_trap_no_signal(tsk, X86_TRAP_BR, "bounds", regs,
error_code))
break;
show_signal(tsk, SIGSEGV, "trap ", "bounds", regs, error_code);
force_sig_bnderr(mpx.addr, mpx.lower, mpx.upper);
break;
}
case 0: /* No exception caused by Intel MPX operations. */
goto exit_trap;
default:
die("bounds", regs, error_code);
}
return;
exit_trap:
/*
* This path out is for all the cases where we could not
* handle the exception in some way (like allocating a
* table or telling userspace about it. We will also end
* up here if the kernel has MPX turned off at compile
* time..
*/
do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, 0, NULL); do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, 0, NULL);
} }
......
...@@ -45,7 +45,6 @@ obj-$(CONFIG_AMD_NUMA) += amdtopology.o ...@@ -45,7 +45,6 @@ obj-$(CONFIG_AMD_NUMA) += amdtopology.o
obj-$(CONFIG_ACPI_NUMA) += srat.o obj-$(CONFIG_ACPI_NUMA) += srat.o
obj-$(CONFIG_NUMA_EMU) += numa_emulation.o obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
obj-$(CONFIG_X86_INTEL_MPX) += mpx.o
obj-$(CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS) += pkeys.o obj-$(CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS) += pkeys.o
obj-$(CONFIG_RANDOMIZE_MEMORY) += kaslr.o obj-$(CONFIG_RANDOMIZE_MEMORY) += kaslr.o
obj-$(CONFIG_PAGE_TABLE_ISOLATION) += pti.o obj-$(CONFIG_PAGE_TABLE_ISOLATION) += pti.o
......
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/elf.h> #include <asm/elf.h>
#include <asm/mpx.h>
#if 0 /* This is just for testing */ #if 0 /* This is just for testing */
struct page * struct page *
...@@ -151,10 +150,6 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, ...@@ -151,10 +150,6 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
if (len & ~huge_page_mask(h)) if (len & ~huge_page_mask(h))
return -EINVAL; return -EINVAL;
addr = mpx_unmapped_area_check(addr, len, flags);
if (IS_ERR_VALUE(addr))
return addr;
if (len > TASK_SIZE) if (len > TASK_SIZE)
return -ENOMEM; return -ENOMEM;
......
...@@ -163,8 +163,6 @@ unsigned long get_mmap_base(int is_legacy) ...@@ -163,8 +163,6 @@ unsigned long get_mmap_base(int is_legacy)
const char *arch_vma_name(struct vm_area_struct *vma) const char *arch_vma_name(struct vm_area_struct *vma)
{ {
if (vma->vm_flags & VM_MPX)
return "[mpx]";
return NULL; return NULL;
} }
......
This diff is collapsed.
...@@ -272,7 +272,6 @@ static int __bprm_mm_init(struct linux_binprm *bprm) ...@@ -272,7 +272,6 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
goto err; goto err;
mm->stack_vm = mm->total_vm = 1; mm->stack_vm = mm->total_vm = 1;
arch_bprm_mm_init(mm, vma);
up_write(&mm->mmap_sem); up_write(&mm->mmap_sem);
bprm->p = vma->vm_end - sizeof(void *); bprm->p = vma->vm_end - sizeof(void *);
return 0; return 0;
......
...@@ -22,11 +22,6 @@ static inline void arch_unmap(struct mm_struct *mm, ...@@ -22,11 +22,6 @@ static inline void arch_unmap(struct mm_struct *mm,
{ {
} }
static inline void arch_bprm_mm_init(struct mm_struct *mm,
struct vm_area_struct *vma)
{
}
static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
bool write, bool execute, bool foreign) bool write, bool execute, bool foreign)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment