Commit 2947ba05 authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Ingo Molnar

x86/mm/gup: Switch GUP to the generic get_user_page_fast() implementation

This patch provides all required callbacks required by the generic
get_user_pages_fast() code and switches x86 over - and removes
the platform specific implementation.
Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Aneesh Kumar K . V <aneesh.kumar@linux.vnet.ibm.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Dann Frazier <dann.frazier@canonical.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Steve Capper <steve.capper@linaro.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arch@vger.kernel.org
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/20170316213906.89528-1-kirill.shutemov@linux.intel.com
[ Minor readability edits. ]
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 73e10a61
...@@ -1666,7 +1666,7 @@ config ARCH_SELECT_MEMORY_MODEL ...@@ -1666,7 +1666,7 @@ config ARCH_SELECT_MEMORY_MODEL
config HAVE_ARCH_PFN_VALID config HAVE_ARCH_PFN_VALID
def_bool ARCH_HAS_HOLES_MEMORYMODEL || !SPARSEMEM def_bool ARCH_HAS_HOLES_MEMORYMODEL || !SPARSEMEM
config HAVE_GENERIC_RCU_GUP config HAVE_GENERIC_GUP
def_bool y def_bool y
depends on ARM_LPAE depends on ARM_LPAE
......
...@@ -205,7 +205,7 @@ config GENERIC_CALIBRATE_DELAY ...@@ -205,7 +205,7 @@ config GENERIC_CALIBRATE_DELAY
config ZONE_DMA config ZONE_DMA
def_bool y def_bool y
config HAVE_GENERIC_RCU_GUP config HAVE_GENERIC_GUP
def_bool y def_bool y
config ARCH_DMA_ADDR_T_64BIT config ARCH_DMA_ADDR_T_64BIT
......
...@@ -135,7 +135,7 @@ config PPC ...@@ -135,7 +135,7 @@ config PPC
select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_TRACER
select HAVE_GCC_PLUGINS select HAVE_GCC_PLUGINS
select HAVE_GENERIC_RCU_GUP select HAVE_GENERIC_GUP
select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx) select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx)
select HAVE_IDE select HAVE_IDE
select HAVE_IOREMAP_PROT select HAVE_IOREMAP_PROT
......
...@@ -2788,6 +2788,9 @@ config X86_DMA_REMAP ...@@ -2788,6 +2788,9 @@ config X86_DMA_REMAP
bool bool
depends on STA2X11 depends on STA2X11
config HAVE_GENERIC_GUP
def_bool y
source "net/Kconfig" source "net/Kconfig"
source "drivers/Kconfig" source "drivers/Kconfig"
......
...@@ -220,18 +220,6 @@ static inline int vma_pkey(struct vm_area_struct *vma) ...@@ -220,18 +220,6 @@ static inline int vma_pkey(struct vm_area_struct *vma)
} }
#endif #endif
static inline bool __pkru_allows_pkey(u16 pkey, bool write)
{
u32 pkru = read_pkru();
if (!__pkru_allows_read(pkru, pkey))
return false;
if (write && !__pkru_allows_write(pkru, pkey))
return false;
return true;
}
/* /*
* We only want to enforce protection keys on the current process * We only want to enforce protection keys on the current process
* because we effectively have no access to PKRU for other * because we effectively have no access to PKRU for other
......
...@@ -215,4 +215,51 @@ static inline pud_t native_pudp_get_and_clear(pud_t *pudp) ...@@ -215,4 +215,51 @@ static inline pud_t native_pudp_get_and_clear(pud_t *pudp)
#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high }) #define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
#define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } }) #define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } })
#define gup_get_pte gup_get_pte
/*
* WARNING: only to be used in the get_user_pages_fast() implementation.
*
* With get_user_pages_fast(), we walk down the pagetables without taking
* any locks. For this we would like to load the pointers atomically,
* but that is not possible (without expensive cmpxchg8b) on PAE. What
* we do have is the guarantee that a PTE will only either go from not
* present to present, or present to not present or both -- it will not
* switch to a completely different present page without a TLB flush in
* between; something that we are blocking by holding interrupts off.
*
* Setting ptes from not present to present goes:
*
* ptep->pte_high = h;
* smp_wmb();
* ptep->pte_low = l;
*
* And present to not present goes:
*
* ptep->pte_low = 0;
* smp_wmb();
* ptep->pte_high = 0;
*
* We must ensure here that the load of pte_low sees 'l' iff pte_high
* sees 'h'. We load pte_high *after* loading pte_low, which ensures we
* don't see an older value of pte_high. *Then* we recheck pte_low,
* which ensures that we haven't picked up a changed pte high. We might
* have gotten rubbish values from pte_low and pte_high, but we are
* guaranteed that pte_low will not have the present bit set *unless*
* it is 'l'. Because get_user_pages_fast() only operates on present ptes
* we're safe.
*/
static inline pte_t gup_get_pte(pte_t *ptep)
{
pte_t pte;
do {
pte.pte_low = ptep->pte_low;
smp_rmb();
pte.pte_high = ptep->pte_high;
smp_rmb();
} while (unlikely(pte.pte_low != ptep->pte_low));
return pte;
}
#endif /* _ASM_X86_PGTABLE_3LEVEL_H */ #endif /* _ASM_X86_PGTABLE_3LEVEL_H */
...@@ -233,6 +233,11 @@ static inline int pud_devmap(pud_t pud) ...@@ -233,6 +233,11 @@ static inline int pud_devmap(pud_t pud)
return 0; return 0;
} }
#endif #endif
static inline int pgd_devmap(pgd_t pgd)
{
return 0;
}
#endif #endif
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
...@@ -1136,6 +1141,54 @@ static inline u16 pte_flags_pkey(unsigned long pte_flags) ...@@ -1136,6 +1141,54 @@ static inline u16 pte_flags_pkey(unsigned long pte_flags)
#endif #endif
} }
static inline bool __pkru_allows_pkey(u16 pkey, bool write)
{
u32 pkru = read_pkru();
if (!__pkru_allows_read(pkru, pkey))
return false;
if (write && !__pkru_allows_write(pkru, pkey))
return false;
return true;
}
/*
* 'pteval' can come from a PTE, PMD or PUD. We only check
* _PAGE_PRESENT, _PAGE_USER, and _PAGE_RW in here which are the
* same value on all 3 types.
*/
static inline bool __pte_access_permitted(unsigned long pteval, bool write)
{
unsigned long need_pte_bits = _PAGE_PRESENT|_PAGE_USER;
if (write)
need_pte_bits |= _PAGE_RW;
if ((pteval & need_pte_bits) != need_pte_bits)
return 0;
return __pkru_allows_pkey(pte_flags_pkey(pteval), write);
}
#define pte_access_permitted pte_access_permitted
static inline bool pte_access_permitted(pte_t pte, bool write)
{
return __pte_access_permitted(pte_val(pte), write);
}
#define pmd_access_permitted pmd_access_permitted
static inline bool pmd_access_permitted(pmd_t pmd, bool write)
{
return __pte_access_permitted(pmd_val(pmd), write);
}
#define pud_access_permitted pud_access_permitted
static inline bool pud_access_permitted(pud_t pud, bool write)
{
return __pte_access_permitted(pud_val(pud), write);
}
#include <asm-generic/pgtable.h> #include <asm-generic/pgtable.h>
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
...@@ -207,6 +207,20 @@ extern void cleanup_highmap(void); ...@@ -207,6 +207,20 @@ extern void cleanup_highmap(void);
extern void init_extra_mapping_uc(unsigned long phys, unsigned long size); extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
extern void init_extra_mapping_wb(unsigned long phys, unsigned long size); extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
#endif /* !__ASSEMBLY__ */ #define gup_fast_permitted gup_fast_permitted
static inline bool gup_fast_permitted(unsigned long start, int nr_pages,
int write)
{
unsigned long len, end;
len = (unsigned long)nr_pages << PAGE_SHIFT;
end = start + len;
if (end < start)
return false;
if (end >> __VIRTUAL_MASK_SHIFT)
return false;
return true;
}
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_PGTABLE_64_H */ #endif /* _ASM_X86_PGTABLE_64_H */
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
KCOV_INSTRUMENT_tlb.o := n KCOV_INSTRUMENT_tlb.o := n
obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
pat.o pgtable.o physaddr.o gup.o setup_nx.o tlb.o pat.o pgtable.o physaddr.o setup_nx.o tlb.o
# Make sure __phys_addr has no stackprotector # Make sure __phys_addr has no stackprotector
nostackp := $(call cc-option, -fno-stack-protector) nostackp := $(call cc-option, -fno-stack-protector)
......
This diff is collapsed.
...@@ -137,7 +137,7 @@ config HAVE_MEMBLOCK_NODE_MAP ...@@ -137,7 +137,7 @@ config HAVE_MEMBLOCK_NODE_MAP
config HAVE_MEMBLOCK_PHYS_MAP config HAVE_MEMBLOCK_PHYS_MAP
bool bool
config HAVE_GENERIC_RCU_GUP config HAVE_GENERIC_GUP
bool bool
config ARCH_DISCARD_MEMBLOCK config ARCH_DISCARD_MEMBLOCK
......
...@@ -1155,7 +1155,7 @@ struct page *get_dump_page(unsigned long addr) ...@@ -1155,7 +1155,7 @@ struct page *get_dump_page(unsigned long addr)
#endif /* CONFIG_ELF_CORE */ #endif /* CONFIG_ELF_CORE */
/* /*
* Generic RCU Fast GUP * Generic Fast GUP
* *
* get_user_pages_fast attempts to pin user pages by walking the page * get_user_pages_fast attempts to pin user pages by walking the page
* tables directly and avoids taking locks. Thus the walker needs to be * tables directly and avoids taking locks. Thus the walker needs to be
...@@ -1176,8 +1176,8 @@ struct page *get_dump_page(unsigned long addr) ...@@ -1176,8 +1176,8 @@ struct page *get_dump_page(unsigned long addr)
* Before activating this code, please be aware that the following assumptions * Before activating this code, please be aware that the following assumptions
* are currently made: * are currently made:
* *
* *) HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table is used to free * *) Either HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to
* pages containing page tables. * free pages containing page tables or TLB flushing requires IPI broadcast.
* *
* *) ptes can be read atomically by the architecture. * *) ptes can be read atomically by the architecture.
* *
...@@ -1187,7 +1187,7 @@ struct page *get_dump_page(unsigned long addr) ...@@ -1187,7 +1187,7 @@ struct page *get_dump_page(unsigned long addr)
* *
* This code is based heavily on the PowerPC implementation by Nick Piggin. * This code is based heavily on the PowerPC implementation by Nick Piggin.
*/ */
#ifdef CONFIG_HAVE_GENERIC_RCU_GUP #ifdef CONFIG_HAVE_GENERIC_GUP
#ifndef gup_get_pte #ifndef gup_get_pte
/* /*
...@@ -1677,4 +1677,4 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, ...@@ -1677,4 +1677,4 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
return ret; return ret;
} }
#endif /* CONFIG_HAVE_GENERIC_RCU_GUP */ #endif /* CONFIG_HAVE_GENERIC_GUP */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment