Commit 5e5f6dc1 authored by Steve Capper's avatar Steve Capper Committed by Linus Torvalds

arm64: mm: enable HAVE_RCU_TABLE_FREE logic

In order to implement fast_get_user_pages we need to ensure that the page
table walker is protected from page table pages being freed from under it.

This patch enables HAVE_RCU_TABLE_FREE, any page table pages belonging to
address spaces with multiple users will be call_rcu_sched freed.  Meaning
that disabling interrupts will block the free and protect the fast gup
page walker.
Signed-off-by: default avatarSteve Capper <steve.capper@linaro.org>
Tested-by: default avatarDann Frazier <dann.frazier@canonical.com>
Acked-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Christoffer Dall <christoffer.dall@linaro.org>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b8cd51af
...@@ -57,6 +57,7 @@ config ARM64 ...@@ -57,6 +57,7 @@ config ARM64
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select HAVE_PERF_REGS select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP select HAVE_PERF_USER_STACK_DUMP
select HAVE_RCU_TABLE_FREE
select HAVE_SYSCALL_TRACEPOINTS select HAVE_SYSCALL_TRACEPOINTS
select IRQ_DOMAIN select IRQ_DOMAIN
select MODULES_USE_ELF_RELA select MODULES_USE_ELF_RELA
......
...@@ -23,6 +23,20 @@ ...@@ -23,6 +23,20 @@
#include <asm-generic/tlb.h> #include <asm-generic/tlb.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
#define tlb_remove_entry(tlb, entry) tlb_remove_table(tlb, entry)
static inline void __tlb_remove_table(void *_table)
{
free_page_and_swap_cache((struct page *)_table);
}
#else
#define tlb_remove_entry(tlb, entry) tlb_remove_page(tlb, entry)
#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
/* /*
* There's three ways the TLB shootdown code is used: * There's three ways the TLB shootdown code is used:
* 1. Unmapping a range of vmas. See zap_page_range(), unmap_region(). * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region().
...@@ -88,7 +102,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, ...@@ -88,7 +102,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
{ {
pgtable_page_dtor(pte); pgtable_page_dtor(pte);
tlb_add_flush(tlb, addr); tlb_add_flush(tlb, addr);
tlb_remove_page(tlb, pte); tlb_remove_entry(tlb, pte);
} }
#if CONFIG_ARM64_PGTABLE_LEVELS > 2 #if CONFIG_ARM64_PGTABLE_LEVELS > 2
...@@ -96,7 +110,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, ...@@ -96,7 +110,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
unsigned long addr) unsigned long addr)
{ {
tlb_add_flush(tlb, addr); tlb_add_flush(tlb, addr);
tlb_remove_page(tlb, virt_to_page(pmdp)); tlb_remove_entry(tlb, virt_to_page(pmdp));
} }
#endif #endif
...@@ -105,7 +119,7 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp, ...@@ -105,7 +119,7 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
unsigned long addr) unsigned long addr)
{ {
tlb_add_flush(tlb, addr); tlb_add_flush(tlb, addr);
tlb_remove_page(tlb, virt_to_page(pudp)); tlb_remove_entry(tlb, virt_to_page(pudp));
} }
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment