Commit e70bbca6 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

parisc: implement the new page table range API

Add set_ptes(), update_mmu_cache_range(), flush_dcache_folio() and
flush_icache_pages().  Change the PG_arch_1 (aka PG_dcache_dirty) flag
from being per-page to per-folio.

Link: https://lkml.kernel.org/r/20230802151406.3735276-21-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: default avatarMike Rapoport (IBM) <rppt@kernel.org>
Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
Cc: Helge Deller <deller@gmx.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 063e409d
...@@ -43,8 +43,13 @@ void invalidate_kernel_vmap_range(void *vaddr, int size); ...@@ -43,8 +43,13 @@ void invalidate_kernel_vmap_range(void *vaddr, int size);
#define flush_cache_vmap(start, end) flush_cache_all() #define flush_cache_vmap(start, end) flush_cache_all()
#define flush_cache_vunmap(start, end) flush_cache_all() #define flush_cache_vunmap(start, end) flush_cache_all()
void flush_dcache_folio(struct folio *folio);
#define flush_dcache_folio flush_dcache_folio
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
void flush_dcache_page(struct page *page); static inline void flush_dcache_page(struct page *page)
{
flush_dcache_folio(page_folio(page));
}
#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages) #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages) #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
...@@ -53,10 +58,9 @@ void flush_dcache_page(struct page *page); ...@@ -53,10 +58,9 @@ void flush_dcache_page(struct page *page);
#define flush_dcache_mmap_unlock_irqrestore(mapping, flags) \ #define flush_dcache_mmap_unlock_irqrestore(mapping, flags) \
xa_unlock_irqrestore(&mapping->i_pages, flags) xa_unlock_irqrestore(&mapping->i_pages, flags)
#define flush_icache_page(vma,page) do { \ void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
flush_kernel_dcache_page_addr(page_address(page)); \ unsigned int nr);
flush_kernel_icache_page(page_address(page)); \ #define flush_icache_page(vma, page) flush_icache_pages(vma, page, 1)
} while (0)
#define flush_icache_range(s,e) do { \ #define flush_icache_range(s,e) do { \
flush_kernel_dcache_range_asm(s,e); \ flush_kernel_dcache_range_asm(s,e); \
......
...@@ -73,15 +73,6 @@ extern void __update_cache(pte_t pte); ...@@ -73,15 +73,6 @@ extern void __update_cache(pte_t pte);
mb(); \ mb(); \
} while(0) } while(0)
#define set_pte_at(mm, addr, pteptr, pteval) \
do { \
if (pte_present(pteval) && \
pte_user(pteval)) \
__update_cache(pteval); \
*(pteptr) = (pteval); \
purge_tlb_entries(mm, addr); \
} while (0)
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#define pte_ERROR(e) \ #define pte_ERROR(e) \
...@@ -285,7 +276,7 @@ extern unsigned long *empty_zero_page; ...@@ -285,7 +276,7 @@ extern unsigned long *empty_zero_page;
#define pte_none(x) (pte_val(x) == 0) #define pte_none(x) (pte_val(x) == 0)
#define pte_present(x) (pte_val(x) & _PAGE_PRESENT) #define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
#define pte_user(x) (pte_val(x) & _PAGE_USER) #define pte_user(x) (pte_val(x) & _PAGE_USER)
#define pte_clear(mm, addr, xp) set_pte_at(mm, addr, xp, __pte(0)) #define pte_clear(mm, addr, xp) set_pte(xp, __pte(0))
#define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK) #define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK)
#define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT) #define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
...@@ -391,11 +382,29 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd) ...@@ -391,11 +382,29 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
extern void paging_init (void); extern void paging_init (void);
static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte, unsigned int nr)
{
if (pte_present(pte) && pte_user(pte))
__update_cache(pte);
for (;;) {
*ptep = pte;
purge_tlb_entries(mm, addr);
if (--nr == 0)
break;
ptep++;
pte_val(pte) += 1 << PFN_PTE_SHIFT;
addr += PAGE_SIZE;
}
}
#define set_ptes set_ptes
/* Used for deferring calls to flush_dcache_page() */ /* Used for deferring calls to flush_dcache_page() */
#define PG_dcache_dirty PG_arch_1 #define PG_dcache_dirty PG_arch_1
#define update_mmu_cache(vms,addr,ptep) __update_cache(*ptep) #define update_mmu_cache_range(vmf, vma, addr, ptep, nr) __update_cache(*ptep)
#define update_mmu_cache(vma, addr, ptep) __update_cache(*ptep)
/* /*
* Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
...@@ -450,7 +459,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned ...@@ -450,7 +459,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
if (!pte_young(pte)) { if (!pte_young(pte)) {
return 0; return 0;
} }
set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte)); set_pte(ptep, pte_mkold(pte));
return 1; return 1;
} }
...@@ -460,14 +469,14 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, ...@@ -460,14 +469,14 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t old_pte; pte_t old_pte;
old_pte = *ptep; old_pte = *ptep;
set_pte_at(mm, addr, ptep, __pte(0)); set_pte(ptep, __pte(0));
return old_pte; return old_pte;
} }
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{ {
set_pte_at(mm, addr, ptep, pte_wrprotect(*ptep)); set_pte(ptep, pte_wrprotect(*ptep));
} }
#define pte_same(A,B) (pte_val(A) == pte_val(B)) #define pte_same(A,B) (pte_val(A) == pte_val(B))
......
...@@ -94,11 +94,11 @@ static inline void flush_data_cache(void) ...@@ -94,11 +94,11 @@ static inline void flush_data_cache(void)
/* Kernel virtual address of pfn. */ /* Kernel virtual address of pfn. */
#define pfn_va(pfn) __va(PFN_PHYS(pfn)) #define pfn_va(pfn) __va(PFN_PHYS(pfn))
void void __update_cache(pte_t pte)
__update_cache(pte_t pte)
{ {
unsigned long pfn = pte_pfn(pte); unsigned long pfn = pte_pfn(pte);
struct page *page; struct folio *folio;
unsigned int nr;
/* We don't have pte special. As a result, we can be called with /* We don't have pte special. As a result, we can be called with
an invalid pfn and we don't need to flush the kernel dcache page. an invalid pfn and we don't need to flush the kernel dcache page.
...@@ -106,13 +106,17 @@ __update_cache(pte_t pte) ...@@ -106,13 +106,17 @@ __update_cache(pte_t pte)
if (!pfn_valid(pfn)) if (!pfn_valid(pfn))
return; return;
page = pfn_to_page(pfn); folio = page_folio(pfn_to_page(pfn));
if (page_mapping_file(page) && pfn = folio_pfn(folio);
test_bit(PG_dcache_dirty, &page->flags)) { nr = folio_nr_pages(folio);
flush_kernel_dcache_page_addr(pfn_va(pfn)); if (folio_flush_mapping(folio) &&
clear_bit(PG_dcache_dirty, &page->flags); test_bit(PG_dcache_dirty, &folio->flags)) {
while (nr--)
flush_kernel_dcache_page_addr(pfn_va(pfn + nr));
clear_bit(PG_dcache_dirty, &folio->flags);
} else if (parisc_requires_coherency()) } else if (parisc_requires_coherency())
flush_kernel_dcache_page_addr(pfn_va(pfn)); while (nr--)
flush_kernel_dcache_page_addr(pfn_va(pfn + nr));
} }
void void
...@@ -366,6 +370,20 @@ static void flush_user_cache_page(struct vm_area_struct *vma, unsigned long vmad ...@@ -366,6 +370,20 @@ static void flush_user_cache_page(struct vm_area_struct *vma, unsigned long vmad
preempt_enable(); preempt_enable();
} }
void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
unsigned int nr)
{
void *kaddr = page_address(page);
for (;;) {
flush_kernel_dcache_page_addr(kaddr);
flush_kernel_icache_page(kaddr);
if (--nr == 0)
break;
kaddr += PAGE_SIZE;
}
}
static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr) static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr)
{ {
pte_t *ptep = NULL; pte_t *ptep = NULL;
...@@ -394,27 +412,30 @@ static inline bool pte_needs_flush(pte_t pte) ...@@ -394,27 +412,30 @@ static inline bool pte_needs_flush(pte_t pte)
== (_PAGE_PRESENT | _PAGE_ACCESSED); == (_PAGE_PRESENT | _PAGE_ACCESSED);
} }
void flush_dcache_page(struct page *page) void flush_dcache_folio(struct folio *folio)
{ {
struct address_space *mapping = page_mapping_file(page); struct address_space *mapping = folio_flush_mapping(folio);
struct vm_area_struct *mpnt; struct vm_area_struct *vma;
unsigned long offset;
unsigned long addr, old_addr = 0; unsigned long addr, old_addr = 0;
void *kaddr;
unsigned long count = 0; unsigned long count = 0;
unsigned long flags; unsigned long i, nr, flags;
pgoff_t pgoff; pgoff_t pgoff;
if (mapping && !mapping_mapped(mapping)) { if (mapping && !mapping_mapped(mapping)) {
set_bit(PG_dcache_dirty, &page->flags); set_bit(PG_dcache_dirty, &folio->flags);
return; return;
} }
flush_kernel_dcache_page_addr(page_address(page)); nr = folio_nr_pages(folio);
kaddr = folio_address(folio);
for (i = 0; i < nr; i++)
flush_kernel_dcache_page_addr(kaddr + i * PAGE_SIZE);
if (!mapping) if (!mapping)
return; return;
pgoff = page->index; pgoff = folio->index;
/* /*
* We have carefully arranged in arch_get_unmapped_area() that * We have carefully arranged in arch_get_unmapped_area() that
...@@ -424,20 +445,33 @@ void flush_dcache_page(struct page *page) ...@@ -424,20 +445,33 @@ void flush_dcache_page(struct page *page)
* on machines that support equivalent aliasing * on machines that support equivalent aliasing
*/ */
flush_dcache_mmap_lock_irqsave(mapping, flags); flush_dcache_mmap_lock_irqsave(mapping, flags);
vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff + nr - 1) {
offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; unsigned long offset = pgoff - vma->vm_pgoff;
addr = mpnt->vm_start + offset; unsigned long pfn = folio_pfn(folio);
if (parisc_requires_coherency()) {
bool needs_flush = false; addr = vma->vm_start;
pte_t *ptep; nr = folio_nr_pages(folio);
if (offset > -nr) {
pfn -= offset;
nr += offset;
} else {
addr += offset * PAGE_SIZE;
}
if (addr + nr * PAGE_SIZE > vma->vm_end)
nr = (vma->vm_end - addr) / PAGE_SIZE;
ptep = get_ptep(mpnt->vm_mm, addr); if (parisc_requires_coherency()) {
if (ptep) { for (i = 0; i < nr; i++) {
needs_flush = pte_needs_flush(*ptep); pte_t *ptep = get_ptep(vma->vm_mm,
addr + i * PAGE_SIZE);
if (!ptep)
continue;
if (pte_needs_flush(*ptep))
flush_user_cache_page(vma,
addr + i * PAGE_SIZE);
/* Optimise accesses to the same table? */
pte_unmap(ptep); pte_unmap(ptep);
} }
if (needs_flush)
flush_user_cache_page(mpnt, addr);
} else { } else {
/* /*
* The TLB is the engine of coherence on parisc: * The TLB is the engine of coherence on parisc:
...@@ -450,27 +484,32 @@ void flush_dcache_page(struct page *page) ...@@ -450,27 +484,32 @@ void flush_dcache_page(struct page *page)
* in (until the user or kernel specifically * in (until the user or kernel specifically
* accesses it, of course) * accesses it, of course)
*/ */
flush_tlb_page(mpnt, addr); for (i = 0; i < nr; i++)
flush_tlb_page(vma, addr + i * PAGE_SIZE);
if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1)) if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
!= (addr & (SHM_COLOUR - 1))) { != (addr & (SHM_COLOUR - 1))) {
__flush_cache_page(mpnt, addr, page_to_phys(page)); for (i = 0; i < nr; i++)
__flush_cache_page(vma,
addr + i * PAGE_SIZE,
(pfn + i) * PAGE_SIZE);
/* /*
* Software is allowed to have any number * Software is allowed to have any number
* of private mappings to a page. * of private mappings to a page.
*/ */
if (!(mpnt->vm_flags & VM_SHARED)) if (!(vma->vm_flags & VM_SHARED))
continue; continue;
if (old_addr) if (old_addr)
pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n", pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
old_addr, addr, mpnt->vm_file); old_addr, addr, vma->vm_file);
old_addr = addr; if (nr == folio_nr_pages(folio))
old_addr = addr;
} }
} }
WARN_ON(++count == 4096); WARN_ON(++count == 4096);
} }
flush_dcache_mmap_unlock_irqrestore(mapping, flags); flush_dcache_mmap_unlock_irqrestore(mapping, flags);
} }
EXPORT_SYMBOL(flush_dcache_page); EXPORT_SYMBOL(flush_dcache_folio);
/* Defined in arch/parisc/kernel/pacache.S */ /* Defined in arch/parisc/kernel/pacache.S */
EXPORT_SYMBOL(flush_kernel_dcache_range_asm); EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment