Commit cec6515a authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Linus Torvalds

powerpc/book3s64/mm: update flush_tlb_range to flush page walk cache

flush_tlb_range is special in that we don't specify the page size used for
the translation.  Hence when flushing TLB we flush the translation cache
for all possible page sizes.  The kernel also uses the same interface when
moving page tables around.  Such a move requires us to flush the page walk
cache.

Instead of adding another interface to force page walk cache flush, update
flush_tlb_range to flush page walk cache if the range flushed is more than
the PMD range.  A page table move will always involve an invalidate range
more than PMD_SIZE.

Running microbenchmark with mprotect and parallel memory access didn't
show any observable performance impact.

Link: https://lkml.kernel.org/r/20210616045735.374532-3-aneesh.kumar@linux.ibm.comSigned-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Hugh Dickins <hughd@google.com>
Cc: Joel Fernandes <joel@joelfernandes.org>
Cc: Kalesh Singh <kaleshsingh@google.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 3bbda69c
...@@ -64,6 +64,8 @@ extern void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma, ...@@ -64,6 +64,8 @@ extern void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end); unsigned long start, unsigned long end);
extern void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start, extern void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
unsigned long end, int psize); unsigned long end, int psize);
void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start,
unsigned long end, int psize);
extern void radix__flush_pmd_tlb_range(struct vm_area_struct *vma, extern void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end); unsigned long start, unsigned long end);
extern void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start, extern void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
......
...@@ -32,7 +32,13 @@ void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma, unsigned long st ...@@ -32,7 +32,13 @@ void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma, unsigned long st
struct hstate *hstate = hstate_file(vma->vm_file); struct hstate *hstate = hstate_file(vma->vm_file);
psize = hstate_get_psize(hstate); psize = hstate_get_psize(hstate);
radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize); /*
* Flush PWC even if we get PUD_SIZE hugetlb invalidate to keep this simpler.
*/
if (end - start >= PUD_SIZE)
radix__flush_tlb_pwc_range_psize(vma->vm_mm, start, end, psize);
else
radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize);
} }
/* /*
......
...@@ -1111,14 +1111,13 @@ static unsigned long tlb_local_single_page_flush_ceiling __read_mostly = POWER9_ ...@@ -1111,14 +1111,13 @@ static unsigned long tlb_local_single_page_flush_ceiling __read_mostly = POWER9_
static inline void __radix__flush_tlb_range(struct mm_struct *mm, static inline void __radix__flush_tlb_range(struct mm_struct *mm,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
unsigned long pid; unsigned long pid;
unsigned int page_shift = mmu_psize_defs[mmu_virtual_psize].shift; unsigned int page_shift = mmu_psize_defs[mmu_virtual_psize].shift;
unsigned long page_size = 1UL << page_shift; unsigned long page_size = 1UL << page_shift;
unsigned long nr_pages = (end - start) >> page_shift; unsigned long nr_pages = (end - start) >> page_shift;
bool fullmm = (end == TLB_FLUSH_ALL); bool fullmm = (end == TLB_FLUSH_ALL);
bool flush_pid; bool flush_pid, flush_pwc = false;
enum tlb_flush_type type; enum tlb_flush_type type;
pid = mm->context.id; pid = mm->context.id;
...@@ -1137,8 +1136,16 @@ static inline void __radix__flush_tlb_range(struct mm_struct *mm, ...@@ -1137,8 +1136,16 @@ static inline void __radix__flush_tlb_range(struct mm_struct *mm,
flush_pid = nr_pages > tlb_single_page_flush_ceiling; flush_pid = nr_pages > tlb_single_page_flush_ceiling;
else else
flush_pid = nr_pages > tlb_local_single_page_flush_ceiling; flush_pid = nr_pages > tlb_local_single_page_flush_ceiling;
/*
* full pid flush already does the PWC flush. if it is not full pid
* flush check the range is more than PMD and force a pwc flush
* mremap() depends on this behaviour.
*/
if (!flush_pid && (end - start) >= PMD_SIZE)
flush_pwc = true;
if (!mmu_has_feature(MMU_FTR_GTSE) && type == FLUSH_TYPE_GLOBAL) { if (!mmu_has_feature(MMU_FTR_GTSE) && type == FLUSH_TYPE_GLOBAL) {
unsigned long type = H_RPTI_TYPE_TLB;
unsigned long tgt = H_RPTI_TARGET_CMMU; unsigned long tgt = H_RPTI_TARGET_CMMU;
unsigned long pg_sizes = psize_to_rpti_pgsize(mmu_virtual_psize); unsigned long pg_sizes = psize_to_rpti_pgsize(mmu_virtual_psize);
...@@ -1146,19 +1153,20 @@ static inline void __radix__flush_tlb_range(struct mm_struct *mm, ...@@ -1146,19 +1153,20 @@ static inline void __radix__flush_tlb_range(struct mm_struct *mm,
pg_sizes |= psize_to_rpti_pgsize(MMU_PAGE_2M); pg_sizes |= psize_to_rpti_pgsize(MMU_PAGE_2M);
if (atomic_read(&mm->context.copros) > 0) if (atomic_read(&mm->context.copros) > 0)
tgt |= H_RPTI_TARGET_NMMU; tgt |= H_RPTI_TARGET_NMMU;
pseries_rpt_invalidate(pid, tgt, H_RPTI_TYPE_TLB, pg_sizes, if (flush_pwc)
start, end); type |= H_RPTI_TYPE_PWC;
pseries_rpt_invalidate(pid, tgt, type, pg_sizes, start, end);
} else if (flush_pid) { } else if (flush_pid) {
/*
* We are now flushing a range larger than PMD size force a RIC_FLUSH_ALL
*/
if (type == FLUSH_TYPE_LOCAL) { if (type == FLUSH_TYPE_LOCAL) {
_tlbiel_pid(pid, RIC_FLUSH_TLB); _tlbiel_pid(pid, RIC_FLUSH_ALL);
} else { } else {
if (cputlb_use_tlbie()) { if (cputlb_use_tlbie()) {
if (mm_needs_flush_escalation(mm)) _tlbie_pid(pid, RIC_FLUSH_ALL);
_tlbie_pid(pid, RIC_FLUSH_ALL);
else
_tlbie_pid(pid, RIC_FLUSH_TLB);
} else { } else {
_tlbiel_pid_multicast(mm, pid, RIC_FLUSH_TLB); _tlbiel_pid_multicast(mm, pid, RIC_FLUSH_ALL);
} }
} }
} else { } else {
...@@ -1174,6 +1182,9 @@ static inline void __radix__flush_tlb_range(struct mm_struct *mm, ...@@ -1174,6 +1182,9 @@ static inline void __radix__flush_tlb_range(struct mm_struct *mm,
if (type == FLUSH_TYPE_LOCAL) { if (type == FLUSH_TYPE_LOCAL) {
asm volatile("ptesync": : :"memory"); asm volatile("ptesync": : :"memory");
if (flush_pwc)
/* For PWC, only one flush is needed */
__tlbiel_pid(pid, 0, RIC_FLUSH_PWC);
__tlbiel_va_range(start, end, pid, page_size, mmu_virtual_psize); __tlbiel_va_range(start, end, pid, page_size, mmu_virtual_psize);
if (hflush) if (hflush)
__tlbiel_va_range(hstart, hend, pid, __tlbiel_va_range(hstart, hend, pid,
...@@ -1181,6 +1192,8 @@ static inline void __radix__flush_tlb_range(struct mm_struct *mm, ...@@ -1181,6 +1192,8 @@ static inline void __radix__flush_tlb_range(struct mm_struct *mm,
ppc_after_tlbiel_barrier(); ppc_after_tlbiel_barrier();
} else if (cputlb_use_tlbie()) { } else if (cputlb_use_tlbie()) {
asm volatile("ptesync": : :"memory"); asm volatile("ptesync": : :"memory");
if (flush_pwc)
__tlbie_pid(pid, RIC_FLUSH_PWC);
__tlbie_va_range(start, end, pid, page_size, mmu_virtual_psize); __tlbie_va_range(start, end, pid, page_size, mmu_virtual_psize);
if (hflush) if (hflush)
__tlbie_va_range(hstart, hend, pid, __tlbie_va_range(hstart, hend, pid,
...@@ -1188,10 +1201,10 @@ static inline void __radix__flush_tlb_range(struct mm_struct *mm, ...@@ -1188,10 +1201,10 @@ static inline void __radix__flush_tlb_range(struct mm_struct *mm,
asm volatile("eieio; tlbsync; ptesync": : :"memory"); asm volatile("eieio; tlbsync; ptesync": : :"memory");
} else { } else {
_tlbiel_va_range_multicast(mm, _tlbiel_va_range_multicast(mm,
start, end, pid, page_size, mmu_virtual_psize, false); start, end, pid, page_size, mmu_virtual_psize, flush_pwc);
if (hflush) if (hflush)
_tlbiel_va_range_multicast(mm, _tlbiel_va_range_multicast(mm,
hstart, hend, pid, PMD_SIZE, MMU_PAGE_2M, false); hstart, hend, pid, PMD_SIZE, MMU_PAGE_2M, flush_pwc);
} }
} }
out: out:
...@@ -1265,9 +1278,6 @@ void radix__flush_all_lpid_guest(unsigned int lpid) ...@@ -1265,9 +1278,6 @@ void radix__flush_all_lpid_guest(unsigned int lpid)
_tlbie_lpid_guest(lpid, RIC_FLUSH_ALL); _tlbie_lpid_guest(lpid, RIC_FLUSH_ALL);
} }
static void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start,
unsigned long end, int psize);
void radix__tlb_flush(struct mmu_gather *tlb) void radix__tlb_flush(struct mmu_gather *tlb)
{ {
int psize = 0; int psize = 0;
...@@ -1374,8 +1384,8 @@ void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start, ...@@ -1374,8 +1384,8 @@ void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
return __radix__flush_tlb_range_psize(mm, start, end, psize, false); return __radix__flush_tlb_range_psize(mm, start, end, psize, false);
} }
static void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start, void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start,
unsigned long end, int psize) unsigned long end, int psize)
{ {
__radix__flush_tlb_range_psize(mm, start, end, psize, true); __radix__flush_tlb_range_psize(mm, start, end, psize, true);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment