Commit 44b04912 authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Ingo Molnar

x86/mpx: Do not allow MPX if we have mappings above 47-bit

MPX (without MAWA extension) cannot handle addresses above 47 bits, so we
need to make sure that MPX cannot be enabled if we already have a VMA above
the boundary and forbid creating such VMAs once MPX is enabled.

The patch implements mpx_unmapped_area_check() which is called from all
variants of get_unmapped_area() to check if the requested address fits
mpx.

On enabling MPX, we check if we already have any vma above 47-bit
boundary and forbit the enabling if we do.

As long as DEFAULT_MAP_WINDOW is equal to TASK_SIZE_MAX, the change is
nop. It will change when we allow userspace to have mappings above
47-bits.
Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arch@vger.kernel.org
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/20170716225954.74185-6-kirill.shutemov@linux.intel.com
[ Readability edits. ]
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent e8f01a8d
...@@ -73,6 +73,9 @@ static inline void mpx_mm_init(struct mm_struct *mm) ...@@ -73,6 +73,9 @@ static inline void mpx_mm_init(struct mm_struct *mm)
} }
void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma, void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long start, unsigned long end); unsigned long start, unsigned long end);
unsigned long mpx_unmapped_area_check(unsigned long addr, unsigned long len,
unsigned long flags);
#else #else
static inline siginfo_t *mpx_generate_siginfo(struct pt_regs *regs) static inline siginfo_t *mpx_generate_siginfo(struct pt_regs *regs)
{ {
...@@ -94,6 +97,12 @@ static inline void mpx_notify_unmap(struct mm_struct *mm, ...@@ -94,6 +97,12 @@ static inline void mpx_notify_unmap(struct mm_struct *mm,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
} }
static inline unsigned long mpx_unmapped_area_check(unsigned long addr,
unsigned long len, unsigned long flags)
{
return addr;
}
#endif /* CONFIG_X86_INTEL_MPX */ #endif /* CONFIG_X86_INTEL_MPX */
#endif /* _ASM_X86_MPX_H */ #endif /* _ASM_X86_MPX_H */
...@@ -809,6 +809,7 @@ static inline void spin_lock_prefetch(const void *x) ...@@ -809,6 +809,7 @@ static inline void spin_lock_prefetch(const void *x)
#define IA32_PAGE_OFFSET PAGE_OFFSET #define IA32_PAGE_OFFSET PAGE_OFFSET
#define TASK_SIZE PAGE_OFFSET #define TASK_SIZE PAGE_OFFSET
#define TASK_SIZE_MAX TASK_SIZE #define TASK_SIZE_MAX TASK_SIZE
#define DEFAULT_MAP_WINDOW TASK_SIZE
#define STACK_TOP TASK_SIZE #define STACK_TOP TASK_SIZE
#define STACK_TOP_MAX STACK_TOP #define STACK_TOP_MAX STACK_TOP
...@@ -850,6 +851,8 @@ static inline void spin_lock_prefetch(const void *x) ...@@ -850,6 +851,8 @@ static inline void spin_lock_prefetch(const void *x)
*/ */
#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE) #define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
#define DEFAULT_MAP_WINDOW TASK_SIZE_MAX
/* This decides where the kernel will search for a free chunk of vm /* This decides where the kernel will search for a free chunk of vm
* space during mmap's. * space during mmap's.
*/ */
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <asm/compat.h> #include <asm/compat.h>
#include <asm/ia32.h> #include <asm/ia32.h>
#include <asm/syscalls.h> #include <asm/syscalls.h>
#include <asm/mpx.h>
/* /*
* Align a virtual address to avoid aliasing in the I$ on AMD F15h. * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
...@@ -132,6 +133,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, ...@@ -132,6 +133,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
struct vm_unmapped_area_info info; struct vm_unmapped_area_info info;
unsigned long begin, end; unsigned long begin, end;
addr = mpx_unmapped_area_check(addr, len, flags);
if (IS_ERR_VALUE(addr))
return addr;
if (flags & MAP_FIXED) if (flags & MAP_FIXED)
return addr; return addr;
...@@ -171,6 +176,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, ...@@ -171,6 +176,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
unsigned long addr = addr0; unsigned long addr = addr0;
struct vm_unmapped_area_info info; struct vm_unmapped_area_info info;
addr = mpx_unmapped_area_check(addr, len, flags);
if (IS_ERR_VALUE(addr))
return addr;
/* requested length too big for entire address space */ /* requested length too big for entire address space */
if (len > TASK_SIZE) if (len > TASK_SIZE)
return -ENOMEM; return -ENOMEM;
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/elf.h> #include <asm/elf.h>
#include <asm/mpx.h>
#if 0 /* This is just for testing */ #if 0 /* This is just for testing */
struct page * struct page *
...@@ -135,6 +136,11 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, ...@@ -135,6 +136,11 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
if (len & ~huge_page_mask(h)) if (len & ~huge_page_mask(h))
return -EINVAL; return -EINVAL;
addr = mpx_unmapped_area_check(addr, len, flags);
if (IS_ERR_VALUE(addr))
return addr;
if (len > TASK_SIZE) if (len > TASK_SIZE)
return -ENOMEM; return -ENOMEM;
......
...@@ -355,10 +355,19 @@ int mpx_enable_management(void) ...@@ -355,10 +355,19 @@ int mpx_enable_management(void)
*/ */
bd_base = mpx_get_bounds_dir(); bd_base = mpx_get_bounds_dir();
down_write(&mm->mmap_sem); down_write(&mm->mmap_sem);
/* MPX doesn't support addresses above 47 bits yet. */
if (find_vma(mm, DEFAULT_MAP_WINDOW)) {
pr_warn_once("%s (%d): MPX cannot handle addresses "
"above 47-bits. Disabling.",
current->comm, current->pid);
ret = -ENXIO;
goto out;
}
mm->context.bd_addr = bd_base; mm->context.bd_addr = bd_base;
if (mm->context.bd_addr == MPX_INVALID_BOUNDS_DIR) if (mm->context.bd_addr == MPX_INVALID_BOUNDS_DIR)
ret = -ENXIO; ret = -ENXIO;
out:
up_write(&mm->mmap_sem); up_write(&mm->mmap_sem);
return ret; return ret;
} }
...@@ -1030,3 +1039,25 @@ void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1030,3 +1039,25 @@ void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
if (ret) if (ret)
force_sig(SIGSEGV, current); force_sig(SIGSEGV, current);
} }
/* MPX cannot handle addresses above 47 bits yet. */
unsigned long mpx_unmapped_area_check(unsigned long addr, unsigned long len,
unsigned long flags)
{
if (!kernel_managing_mpx_tables(current->mm))
return addr;
if (addr + len <= DEFAULT_MAP_WINDOW)
return addr;
if (flags & MAP_FIXED)
return -ENOMEM;
/*
* Requested len is larger than the whole area we're allowed to map in.
* Resetting hinting address wouldn't do much good -- fail early.
*/
if (len > DEFAULT_MAP_WINDOW)
return -ENOMEM;
/* Look for unmap area within DEFAULT_MAP_WINDOW */
return 0;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment