Commit 73ea68ad authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/book3s: Inline first level of update_mmu_cache()

update_mmu_cache() voids when hash page tables are not used.
On PPC32 that means when MMU_FTR_HPTE_TABLE is not defined.
On PPC64 that means when RADIX is enabled.

Rename core part of update_mmu_cache() as __update_mmu_cache()
and include the initial verification in an inlined caller.
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@csgroup.eu>
Reviewed-by: default avatarNicholas Piggin <npiggin@gmail.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/bea5ad0de7f83eff256116816d46c84fa0a444de.1662370698.git.christophe.leroy@csgroup.eu
parent 691cdf01
...@@ -25,7 +25,8 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, ...@@ -25,7 +25,8 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot); unsigned long size, pgprot_t vma_prot);
#define __HAVE_PHYS_MEM_ACCESS_PROT #define __HAVE_PHYS_MEM_ACCESS_PROT
#if defined(CONFIG_PPC32) || defined(CONFIG_PPC_64S_HASH_MMU) void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
/* /*
* This gets called at the end of handling a page fault, when * This gets called at the end of handling a page fault, when
* the kernel has put a new PTE into the page table for the process. * the kernel has put a new PTE into the page table for the process.
...@@ -35,10 +36,14 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, ...@@ -35,10 +36,14 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
* corresponding HPTE into the hash table ahead of time, instead of * corresponding HPTE into the hash table ahead of time, instead of
* waiting for the inevitable extra hash-table miss exception. * waiting for the inevitable extra hash-table miss exception.
*/ */
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep); static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
#else {
static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) {} if (IS_ENABLED(CONFIG_PPC32) && !mmu_has_feature(MMU_FTR_HPTE_TABLE))
#endif return;
if (radix_enabled())
return;
__update_mmu_cache(vma, address, ptep);
}
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif #endif
...@@ -314,11 +314,9 @@ static void hash_preload(struct mm_struct *mm, unsigned long ea) ...@@ -314,11 +314,9 @@ static void hash_preload(struct mm_struct *mm, unsigned long ea)
* *
* This must always be called with the pte lock held. * This must always be called with the pte lock held.
*/ */
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
pte_t *ptep) pte_t *ptep)
{ {
if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
return;
/* /*
* We don't need to worry about _PAGE_PRESENT here because we are * We don't need to worry about _PAGE_PRESENT here because we are
* called with either mm->page_table_lock held or ptl lock held * called with either mm->page_table_lock held or ptl lock held
......
...@@ -1781,7 +1781,7 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea, ...@@ -1781,7 +1781,7 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
* *
* This must always be called with the pte lock held. * This must always be called with the pte lock held.
*/ */
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
pte_t *ptep) pte_t *ptep)
{ {
/* /*
...@@ -1791,9 +1791,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, ...@@ -1791,9 +1791,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
unsigned long trap; unsigned long trap;
bool is_exec; bool is_exec;
if (radix_enabled())
return;
/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */ /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
if (!pte_young(*ptep) || address >= TASK_SIZE) if (!pte_young(*ptep) || address >= TASK_SIZE)
return; return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment