Commit 6a2561f9 authored by Hugh Dickins's avatar Hugh Dickins Committed by Andrew Morton

parisc: add pte_unmap() to balance get_ptep()

To keep balance in future, remember to pte_unmap() after a successful
get_ptep().  And act as if flush_cache_pages() really needs a map there,
to read the pfn before "unmapping", to be sure page table is not removed.

Link: https://lkml.kernel.org/r/653369-95ef-acd2-d6ea-e95f5a997493@google.comSigned-off-by: default avatarHugh Dickins <hughd@google.com>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Alexandre Ghiti <alexghiti@rivosinc.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: Claudio Imbrenda <imbrenda@linux.ibm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Greg Ungerer <gerg@linux-m68k.org>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Helge Deller <deller@gmx.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: John David Anglin <dave.anglin@bell.net>
Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Mike Rapoport (IBM) <rppt@kernel.org>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 17b25a38
...@@ -426,10 +426,15 @@ void flush_dcache_page(struct page *page) ...@@ -426,10 +426,15 @@ void flush_dcache_page(struct page *page)
offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
addr = mpnt->vm_start + offset; addr = mpnt->vm_start + offset;
if (parisc_requires_coherency()) { if (parisc_requires_coherency()) {
bool needs_flush = false;
pte_t *ptep; pte_t *ptep;
ptep = get_ptep(mpnt->vm_mm, addr); ptep = get_ptep(mpnt->vm_mm, addr);
if (ptep && pte_needs_flush(*ptep)) if (ptep) {
needs_flush = pte_needs_flush(*ptep);
pte_unmap(ptep);
}
if (needs_flush)
flush_user_cache_page(mpnt, addr); flush_user_cache_page(mpnt, addr);
} else { } else {
/* /*
...@@ -561,14 +566,20 @@ EXPORT_SYMBOL(flush_kernel_dcache_page_addr); ...@@ -561,14 +566,20 @@ EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
static void flush_cache_page_if_present(struct vm_area_struct *vma, static void flush_cache_page_if_present(struct vm_area_struct *vma,
unsigned long vmaddr, unsigned long pfn) unsigned long vmaddr, unsigned long pfn)
{ {
pte_t *ptep = get_ptep(vma->vm_mm, vmaddr); bool needs_flush = false;
pte_t *ptep;
/* /*
* The pte check is racy and sometimes the flush will trigger * The pte check is racy and sometimes the flush will trigger
* a non-access TLB miss. Hopefully, the page has already been * a non-access TLB miss. Hopefully, the page has already been
* flushed. * flushed.
*/ */
if (ptep && pte_needs_flush(*ptep)) ptep = get_ptep(vma->vm_mm, vmaddr);
if (ptep) {
needs_flush = pte_needs_flush(*ptep);
pte_unmap(ptep);
}
if (needs_flush)
flush_cache_page(vma, vmaddr, pfn); flush_cache_page(vma, vmaddr, pfn);
} }
...@@ -635,17 +646,22 @@ static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, u ...@@ -635,17 +646,22 @@ static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, u
pte_t *ptep; pte_t *ptep;
for (addr = start; addr < end; addr += PAGE_SIZE) { for (addr = start; addr < end; addr += PAGE_SIZE) {
bool needs_flush = false;
/* /*
* The vma can contain pages that aren't present. Although * The vma can contain pages that aren't present. Although
* the pte search is expensive, we need the pte to find the * the pte search is expensive, we need the pte to find the
* page pfn and to check whether the page should be flushed. * page pfn and to check whether the page should be flushed.
*/ */
ptep = get_ptep(vma->vm_mm, addr); ptep = get_ptep(vma->vm_mm, addr);
if (ptep && pte_needs_flush(*ptep)) { if (ptep) {
needs_flush = pte_needs_flush(*ptep);
pfn = pte_pfn(*ptep);
pte_unmap(ptep);
}
if (needs_flush) {
if (parisc_requires_coherency()) { if (parisc_requires_coherency()) {
flush_user_cache_page(vma, addr); flush_user_cache_page(vma, addr);
} else { } else {
pfn = pte_pfn(*ptep);
if (WARN_ON(!pfn_valid(pfn))) if (WARN_ON(!pfn_valid(pfn)))
return; return;
__flush_cache_page(vma, addr, PFN_PHYS(pfn)); __flush_cache_page(vma, addr, PFN_PHYS(pfn));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment