Commit f9d4861f authored by Will Deacon's avatar Will Deacon Committed by Russell King

ARM: 7294/1: vectors: use gate_vma for vectors user mapping

The current user mapping for the vectors page is inserted as a `horrible
hack vma' into each task via arch_setup_additional_pages. This causes
problems with the MM subsystem and vm_normal_page, as described here:

https://lkml.org/lkml/2012/1/14/55

Following the suggestion from Hugh in the above thread, this patch uses
the gate_vma for the vectors user mapping, therefore consolidating
the horrible hack VMAs into one.
Acked-and-Tested-by: default avatarNicolas Pitre <nico@linaro.org>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent 195864cf
...@@ -130,8 +130,4 @@ struct mm_struct; ...@@ -130,8 +130,4 @@ struct mm_struct;
extern unsigned long arch_randomize_brk(struct mm_struct *mm); extern unsigned long arch_randomize_brk(struct mm_struct *mm);
#define arch_randomize_brk arch_randomize_brk #define arch_randomize_brk arch_randomize_brk
extern int vectors_user_mapping(void);
#define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
#endif #endif
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/cachetype.h> #include <asm/cachetype.h>
#include <asm/proc-fns.h> #include <asm/proc-fns.h>
#include <asm-generic/mm_hooks.h>
void __check_kvm_seq(struct mm_struct *mm); void __check_kvm_seq(struct mm_struct *mm);
...@@ -133,32 +134,4 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -133,32 +134,4 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
#define deactivate_mm(tsk,mm) do { } while (0) #define deactivate_mm(tsk,mm) do { } while (0)
#define activate_mm(prev,next) switch_mm(prev, next, NULL) #define activate_mm(prev,next) switch_mm(prev, next, NULL)
/*
* We are inserting a "fake" vma for the user-accessible vector page so
* gdb and friends can get to it through ptrace and /proc/<pid>/mem.
* But we also want to remove it before the generic code gets to see it
* during process exit or the unmapping of it would cause total havoc.
* (the macro is used as remove_vma() is static to mm/mmap.c)
*/
#define arch_exit_mmap(mm) \
do { \
struct vm_area_struct *high_vma = find_vma(mm, 0xffff0000); \
if (high_vma) { \
BUG_ON(high_vma->vm_next); /* it should be last */ \
if (high_vma->vm_prev) \
high_vma->vm_prev->vm_next = NULL; \
else \
mm->mmap = NULL; \
rb_erase(&high_vma->vm_rb, &mm->mm_rb); \
mm->mmap_cache = NULL; \
mm->map_count--; \
remove_vma(high_vma); \
} \
} while (0)
static inline void arch_dup_mmap(struct mm_struct *oldmm,
struct mm_struct *mm)
{
}
#endif #endif
...@@ -151,6 +151,8 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from, ...@@ -151,6 +151,8 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
extern void copy_page(void *to, const void *from); extern void copy_page(void *to, const void *from);
#define __HAVE_ARCH_GATE_AREA 1
#ifdef CONFIG_ARM_LPAE #ifdef CONFIG_ARM_LPAE
#include <asm/pgtable-3level-types.h> #include <asm/pgtable-3level-types.h>
#else #else
......
...@@ -526,22 +526,40 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) ...@@ -526,22 +526,40 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
/* /*
* The vectors page is always readable from user space for the * The vectors page is always readable from user space for the
* atomic helpers and the signal restart code. Let's declare a mapping * atomic helpers and the signal restart code. Insert it into the
* for it so it is visible through ptrace and /proc/<pid>/mem. * gate_vma so that it is visible through ptrace and /proc/<pid>/mem.
*/ */
static struct vm_area_struct gate_vma;
int vectors_user_mapping(void) static int __init gate_vma_init(void)
{ {
struct mm_struct *mm = current->mm; gate_vma.vm_start = 0xffff0000;
return install_special_mapping(mm, 0xffff0000, PAGE_SIZE, gate_vma.vm_end = 0xffff0000 + PAGE_SIZE;
VM_READ | VM_EXEC | gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
VM_MAYREAD | VM_MAYEXEC | gate_vma.vm_flags = VM_READ | VM_EXEC |
VM_ALWAYSDUMP | VM_RESERVED, VM_MAYREAD | VM_MAYEXEC |
NULL); VM_ALWAYSDUMP;
return 0;
}
arch_initcall(gate_vma_init);
struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
{
return &gate_vma;
}
int in_gate_area(struct mm_struct *mm, unsigned long addr)
{
return (addr >= gate_vma.vm_start) && (addr < gate_vma.vm_end);
}
int in_gate_area_no_mm(unsigned long addr)
{
return in_gate_area(NULL, addr);
} }
const char *arch_vma_name(struct vm_area_struct *vma) const char *arch_vma_name(struct vm_area_struct *vma)
{ {
return (vma->vm_start == 0xffff0000) ? "[vectors]" : NULL; return (vma == &gate_vma) ? "[vectors]" : NULL;
} }
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment