Commit a6c19dfe authored by Andy Lutomirski's avatar Andy Lutomirski Committed by Linus Torvalds

arm64,ia64,ppc,s390,sh,tile,um,x86,mm: remove default gate area

The core mm code will provide a default gate area based on
FIXADDR_USER_START and FIXADDR_USER_END if
!defined(__HAVE_ARCH_GATE_AREA) && defined(AT_SYSINFO_EHDR).

This default is only useful for ia64.  arm64, ppc, s390, sh, tile, 64-bit
UML, and x86_32 have their own code just to disable it.  arm, 32-bit UML,
and x86_64 have gate areas, but they have their own implementations.

This gets rid of the default and moves the code into ia64.

This should save some code on architectures without a gate area: it's now
possible to inline the gate_area functions in the default case.
Signed-off-by: default avatarAndy Lutomirski <luto@amacapital.net>
Acked-by: default avatarNathan Lynch <nathan_lynch@mentor.com>
Acked-by: default avatarH. Peter Anvin <hpa@linux.intel.com>
Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> [in principle]
Acked-by: Richard Weinberger <richard@nod.at> [for um]
Acked-by: Will Deacon <will.deacon@arm.com> [for arm64]
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Jeff Dike <jdike@addtoit.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Nathan Lynch <Nathan_Lynch@mentor.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e0d9bf4c
...@@ -28,9 +28,6 @@ ...@@ -28,9 +28,6 @@
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1)) #define PAGE_MASK (~(PAGE_SIZE-1))
/* We do define AT_SYSINFO_EHDR but don't use the gate mechanism */
#define __HAVE_ARCH_GATE_AREA 1
/* /*
* The idmap and swapper page tables need some space reserved in the kernel * The idmap and swapper page tables need some space reserved in the kernel
* image. Both require pgd, pud (4 levels only) and pmd tables to (section) * image. Both require pgd, pud (4 levels only) and pmd tables to (section)
......
...@@ -194,25 +194,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, ...@@ -194,25 +194,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
return PTR_ERR(ret); return PTR_ERR(ret);
} }
/*
* We define AT_SYSINFO_EHDR, so we need these function stubs to keep
* Linux happy.
*/
int in_gate_area_no_mm(unsigned long addr)
{
return 0;
}
int in_gate_area(struct mm_struct *mm, unsigned long addr)
{
return 0;
}
struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
{
return NULL;
}
/* /*
* Update the vDSO data page to keep in sync with kernel timekeeping. * Update the vDSO data page to keep in sync with kernel timekeeping.
*/ */
......
...@@ -231,4 +231,6 @@ get_order (unsigned long size) ...@@ -231,4 +231,6 @@ get_order (unsigned long size)
#define PERCPU_ADDR (-PERCPU_PAGE_SIZE) #define PERCPU_ADDR (-PERCPU_PAGE_SIZE)
#define LOAD_OFFSET (KERNEL_START - KERNEL_TR_PAGE_SIZE) #define LOAD_OFFSET (KERNEL_START - KERNEL_TR_PAGE_SIZE)
#define __HAVE_ARCH_GATE_AREA 1
#endif /* _ASM_IA64_PAGE_H */ #endif /* _ASM_IA64_PAGE_H */
...@@ -278,6 +278,37 @@ setup_gate (void) ...@@ -278,6 +278,37 @@ setup_gate (void)
ia64_patch_gate(); ia64_patch_gate();
} }
static struct vm_area_struct gate_vma;
static int __init gate_vma_init(void)
{
gate_vma.vm_mm = NULL;
gate_vma.vm_start = FIXADDR_USER_START;
gate_vma.vm_end = FIXADDR_USER_END;
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
gate_vma.vm_page_prot = __P101;
return 0;
}
__initcall(gate_vma_init);
struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
{
return &gate_vma;
}
int in_gate_area_no_mm(unsigned long addr)
{
if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
return 1;
return 0;
}
int in_gate_area(struct mm_struct *mm, unsigned long addr)
{
return in_gate_area_no_mm(addr);
}
void ia64_mmu_init(void *my_cpu_data) void ia64_mmu_init(void *my_cpu_data)
{ {
unsigned long pta, impl_va_bits; unsigned long pta, impl_va_bits;
......
...@@ -48,9 +48,6 @@ extern unsigned int HPAGE_SHIFT; ...@@ -48,9 +48,6 @@ extern unsigned int HPAGE_SHIFT;
#define HUGE_MAX_HSTATE (MMU_PAGE_COUNT-1) #define HUGE_MAX_HSTATE (MMU_PAGE_COUNT-1)
#endif #endif
/* We do define AT_SYSINFO_EHDR but don't use the gate mechanism */
#define __HAVE_ARCH_GATE_AREA 1
/* /*
* Subtle: (1 << PAGE_SHIFT) is an int, not an unsigned long. So if we * Subtle: (1 << PAGE_SHIFT) is an int, not an unsigned long. So if we
* assign PAGE_MASK to a larger type it gets extended the way we want * assign PAGE_MASK to a larger type it gets extended the way we want
......
...@@ -840,19 +840,3 @@ static int __init vdso_init(void) ...@@ -840,19 +840,3 @@ static int __init vdso_init(void)
return 0; return 0;
} }
arch_initcall(vdso_init); arch_initcall(vdso_init);
int in_gate_area_no_mm(unsigned long addr)
{
return 0;
}
int in_gate_area(struct mm_struct *mm, unsigned long addr)
{
return 0;
}
struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
{
return NULL;
}
...@@ -162,6 +162,4 @@ static inline int devmem_is_allowed(unsigned long pfn) ...@@ -162,6 +162,4 @@ static inline int devmem_is_allowed(unsigned long pfn)
#include <asm-generic/memory_model.h> #include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h> #include <asm-generic/getorder.h>
#define __HAVE_ARCH_GATE_AREA 1
#endif /* _S390_PAGE_H */ #endif /* _S390_PAGE_H */
...@@ -316,18 +316,3 @@ static int __init vdso_init(void) ...@@ -316,18 +316,3 @@ static int __init vdso_init(void)
return 0; return 0;
} }
early_initcall(vdso_init); early_initcall(vdso_init);
int in_gate_area_no_mm(unsigned long addr)
{
return 0;
}
int in_gate_area(struct mm_struct *mm, unsigned long addr)
{
return 0;
}
struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
{
return NULL;
}
...@@ -186,11 +186,6 @@ typedef struct page *pgtable_t; ...@@ -186,11 +186,6 @@ typedef struct page *pgtable_t;
#include <asm-generic/memory_model.h> #include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h> #include <asm-generic/getorder.h>
/* vDSO support */
#ifdef CONFIG_VSYSCALL
#define __HAVE_ARCH_GATE_AREA
#endif
/* /*
* Some drivers need to perform DMA into kmalloc'ed buffers * Some drivers need to perform DMA into kmalloc'ed buffers
* and so we have to increase the kmalloc minalign for this. * and so we have to increase the kmalloc minalign for this.
......
...@@ -92,18 +92,3 @@ const char *arch_vma_name(struct vm_area_struct *vma) ...@@ -92,18 +92,3 @@ const char *arch_vma_name(struct vm_area_struct *vma)
return NULL; return NULL;
} }
struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
{
return NULL;
}
int in_gate_area(struct mm_struct *mm, unsigned long address)
{
return 0;
}
int in_gate_area_no_mm(unsigned long address)
{
return 0;
}
...@@ -38,12 +38,6 @@ ...@@ -38,12 +38,6 @@
#define PAGE_MASK (~(PAGE_SIZE - 1)) #define PAGE_MASK (~(PAGE_SIZE - 1))
#define HPAGE_MASK (~(HPAGE_SIZE - 1)) #define HPAGE_MASK (~(HPAGE_SIZE - 1))
/*
* We do define AT_SYSINFO_EHDR to support vDSO,
* but don't use the gate mechanism.
*/
#define __HAVE_ARCH_GATE_AREA 1
/* /*
* If the Kconfig doesn't specify, set a maximum zone order that * If the Kconfig doesn't specify, set a maximum zone order that
* is enough so that we can create huge pages from small pages given * is enough so that we can create huge pages from small pages given
......
...@@ -121,21 +121,6 @@ const char *arch_vma_name(struct vm_area_struct *vma) ...@@ -121,21 +121,6 @@ const char *arch_vma_name(struct vm_area_struct *vma)
return NULL; return NULL;
} }
struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
{
return NULL;
}
int in_gate_area(struct mm_struct *mm, unsigned long address)
{
return 0;
}
int in_gate_area_no_mm(unsigned long address)
{
return 0;
}
int setup_vdso_pages(void) int setup_vdso_pages(void)
{ {
struct page **pagelist; struct page **pagelist;
......
...@@ -119,4 +119,9 @@ extern unsigned long uml_physmem; ...@@ -119,4 +119,9 @@ extern unsigned long uml_physmem;
#include <asm-generic/getorder.h> #include <asm-generic/getorder.h>
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#ifdef CONFIG_X86_32
#define __HAVE_ARCH_GATE_AREA 1
#endif
#endif /* __UM_PAGE_H */ #endif /* __UM_PAGE_H */
...@@ -70,7 +70,6 @@ extern bool __virt_addr_valid(unsigned long kaddr); ...@@ -70,7 +70,6 @@ extern bool __virt_addr_valid(unsigned long kaddr);
#include <asm-generic/memory_model.h> #include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h> #include <asm-generic/getorder.h>
#define __HAVE_ARCH_GATE_AREA 1
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -39,4 +39,6 @@ void copy_page(void *to, void *from); ...@@ -39,4 +39,6 @@ void copy_page(void *to, void *from);
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#define __HAVE_ARCH_GATE_AREA 1
#endif /* _ASM_X86_PAGE_64_H */ #endif /* _ASM_X86_PAGE_64_H */
...@@ -216,6 +216,5 @@ extern long elf_aux_hwcap; ...@@ -216,6 +216,5 @@ extern long elf_aux_hwcap;
#define ELF_HWCAP (elf_aux_hwcap) #define ELF_HWCAP (elf_aux_hwcap)
#define SET_PERSONALITY(ex) do ; while(0) #define SET_PERSONALITY(ex) do ; while(0)
#define __HAVE_ARCH_GATE_AREA 1
#endif #endif
...@@ -9,18 +9,3 @@ const char *arch_vma_name(struct vm_area_struct *vma) ...@@ -9,18 +9,3 @@ const char *arch_vma_name(struct vm_area_struct *vma)
return NULL; return NULL;
} }
struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
{
return NULL;
}
int in_gate_area(struct mm_struct *mm, unsigned long addr)
{
return 0;
}
int in_gate_area_no_mm(unsigned long addr)
{
return 0;
}
...@@ -115,23 +115,6 @@ static __init int ia32_binfmt_init(void) ...@@ -115,23 +115,6 @@ static __init int ia32_binfmt_init(void)
return 0; return 0;
} }
__initcall(ia32_binfmt_init); __initcall(ia32_binfmt_init);
#endif #endif /* CONFIG_SYSCTL */
#else /* CONFIG_X86_32 */
struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
{
return NULL;
}
int in_gate_area(struct mm_struct *mm, unsigned long addr)
{
return 0;
}
int in_gate_area_no_mm(unsigned long addr)
{
return 0;
}
#endif /* CONFIG_X86_64 */ #endif /* CONFIG_X86_64 */
...@@ -2014,13 +2014,20 @@ static inline bool kernel_page_present(struct page *page) { return true; } ...@@ -2014,13 +2014,20 @@ static inline bool kernel_page_present(struct page *page) { return true; }
#endif /* CONFIG_HIBERNATION */ #endif /* CONFIG_HIBERNATION */
#endif #endif
#ifdef __HAVE_ARCH_GATE_AREA
extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm); extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
#ifdef __HAVE_ARCH_GATE_AREA extern int in_gate_area_no_mm(unsigned long addr);
int in_gate_area_no_mm(unsigned long addr); extern int in_gate_area(struct mm_struct *mm, unsigned long addr);
int in_gate_area(struct mm_struct *mm, unsigned long addr);
#else #else
int in_gate_area_no_mm(unsigned long addr); static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
#define in_gate_area(mm, addr) ({(void)mm; in_gate_area_no_mm(addr);}) {
return NULL;
}
static inline int in_gate_area_no_mm(unsigned long addr) { return 0; }
static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
{
return 0;
}
#endif /* __HAVE_ARCH_GATE_AREA */ #endif /* __HAVE_ARCH_GATE_AREA */
#ifdef CONFIG_SYSCTL #ifdef CONFIG_SYSCTL
......
...@@ -3430,44 +3430,6 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) ...@@ -3430,44 +3430,6 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
} }
#endif /* __PAGETABLE_PMD_FOLDED */ #endif /* __PAGETABLE_PMD_FOLDED */
#if !defined(__HAVE_ARCH_GATE_AREA)
#if defined(AT_SYSINFO_EHDR)
static struct vm_area_struct gate_vma;
static int __init gate_vma_init(void)
{
gate_vma.vm_mm = NULL;
gate_vma.vm_start = FIXADDR_USER_START;
gate_vma.vm_end = FIXADDR_USER_END;
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
gate_vma.vm_page_prot = __P101;
return 0;
}
__initcall(gate_vma_init);
#endif
struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
{
#ifdef AT_SYSINFO_EHDR
return &gate_vma;
#else
return NULL;
#endif
}
int in_gate_area_no_mm(unsigned long addr)
{
#ifdef AT_SYSINFO_EHDR
if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
return 1;
#endif
return 0;
}
#endif /* __HAVE_ARCH_GATE_AREA */
static int __follow_pte(struct mm_struct *mm, unsigned long address, static int __follow_pte(struct mm_struct *mm, unsigned long address,
pte_t **ptepp, spinlock_t **ptlp) pte_t **ptepp, spinlock_t **ptlp)
{ {
......
...@@ -1981,11 +1981,6 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) ...@@ -1981,11 +1981,6 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
return -ENOMEM; return -ENOMEM;
} }
int in_gate_area_no_mm(unsigned long addr)
{
return 0;
}
int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{ {
BUG(); BUG();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment