Commit 63497b71 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

alpha: implement the new page table range API

Add PFN_PTE_SHIFT, update_mmu_cache_range() and flush_icache_pages().

Link: https://lkml.kernel.org/r/20230802151406.3735276-8-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: default avatarMike Rapoport (IBM) <rppt@kernel.org>
Cc: Richard Henderson <richard.henderson@linaro.org>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: Matt Turner <mattst88@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent bcc6cc83
...@@ -57,6 +57,16 @@ extern void flush_icache_user_page(struct vm_area_struct *vma, ...@@ -57,6 +57,16 @@ extern void flush_icache_user_page(struct vm_area_struct *vma,
#define flush_icache_page(vma, page) \ #define flush_icache_page(vma, page) \
flush_icache_user_page((vma), (page), 0, 0) flush_icache_user_page((vma), (page), 0, 0)
/*
* Both implementations of flush_icache_user_page flush the entire
* address space, so one call, no matter how many pages.
*/
static inline void flush_icache_pages(struct vm_area_struct *vma,
struct page *page, unsigned int nr)
{
flush_icache_user_page(vma, page, 0, 0);
}
#include <asm-generic/cacheflush.h> #include <asm-generic/cacheflush.h>
#endif /* _ALPHA_CACHEFLUSH_H */ #endif /* _ALPHA_CACHEFLUSH_H */
...@@ -26,7 +26,6 @@ struct vm_area_struct; ...@@ -26,7 +26,6 @@ struct vm_area_struct;
* hook is made available. * hook is made available.
*/ */
#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval)) #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
/* PMD_SHIFT determines the size of the area a second-level page table can map */ /* PMD_SHIFT determines the size of the area a second-level page table can map */
#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3)) #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3))
...@@ -189,7 +188,8 @@ extern unsigned long __zero_page(void); ...@@ -189,7 +188,8 @@ extern unsigned long __zero_page(void);
* and a page entry and page directory to the page they refer to. * and a page entry and page directory to the page they refer to.
*/ */
#define page_to_pa(page) (page_to_pfn(page) << PAGE_SHIFT) #define page_to_pa(page) (page_to_pfn(page) << PAGE_SHIFT)
#define pte_pfn(pte) (pte_val(pte) >> 32) #define PFN_PTE_SHIFT 32
#define pte_pfn(pte) (pte_val(pte) >> PFN_PTE_SHIFT)
#define pte_page(pte) pfn_to_page(pte_pfn(pte)) #define pte_page(pte) pfn_to_page(pte_pfn(pte))
#define mk_pte(page, pgprot) \ #define mk_pte(page, pgprot) \
...@@ -303,6 +303,12 @@ extern inline void update_mmu_cache(struct vm_area_struct * vma, ...@@ -303,6 +303,12 @@ extern inline void update_mmu_cache(struct vm_area_struct * vma,
{ {
} }
static inline void update_mmu_cache_range(struct vm_fault *vmf,
struct vm_area_struct *vma, unsigned long address,
pte_t *ptep, unsigned int nr)
{
}
/* /*
* Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
* are !pte_none() && !pte_present(). * are !pte_none() && !pte_present().
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment