Commit 69be3fb1 authored by Jisheng Zhang's avatar Jisheng Zhang Committed by Palmer Dabbelt

riscv: enable MMU_GATHER_RCU_TABLE_FREE for SMP && MMU

In order to implement fast gup we need to ensure that the page
table walker is protected from page table pages being freed from
under it.

riscv situation is more complicated than other architectures: some
riscv platforms may use IPI to perform TLB shootdown, for example,
those platforms which support AIA, usually the riscv_ipi_for_rfence is
true on these platforms; some riscv platforms may rely on the SBI to
perform TLB shootdown, usually the riscv_ipi_for_rfence is false on
these platforms. To keep software pagetable walkers safe in this case
we switch to RCU based table free (MMU_GATHER_RCU_TABLE_FREE). See the
comment below 'ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE' in
include/asm-generic/tlb.h for more details.

This patch enables MMU_GATHER_RCU_TABLE_FREE, then use

*tlb_remove_page_ptdesc() for those platforms which use IPI to perform
TLB shootdown;

*tlb_remove_ptdesc() for those platforms which use SBI to perform TLB
shootdown;

Both case mean that disabling interrupts will block the free and
protect the fast gup page walker.
Signed-off-by: default avatarJisheng Zhang <jszhang@kernel.org>
Reviewed-by: default avatarAlexandre Ghiti <alexghiti@rivosinc.com>
Link: https://lore.kernel.org/r/20231219175046.2496-4-jszhang@kernel.orgSigned-off-by: default avatarPalmer Dabbelt <palmer@rivosinc.com>
parent 40d1bb92
...@@ -147,6 +147,7 @@ config RISCV ...@@ -147,6 +147,7 @@ config RISCV
select IRQ_FORCED_THREADING select IRQ_FORCED_THREADING
select KASAN_VMALLOC if KASAN select KASAN_VMALLOC if KASAN
select LOCK_MM_AND_FIND_VMA select LOCK_MM_AND_FIND_VMA
select MMU_GATHER_RCU_TABLE_FREE if SMP && MMU
select MODULES_USE_ELF_RELA if MODULES select MODULES_USE_ELF_RELA if MODULES
select MODULE_SECTIONS if MODULES select MODULE_SECTIONS if MODULES
select OF select OF
......
...@@ -102,7 +102,10 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, ...@@ -102,7 +102,10 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
struct ptdesc *ptdesc = virt_to_ptdesc(pud); struct ptdesc *ptdesc = virt_to_ptdesc(pud);
pagetable_pud_dtor(ptdesc); pagetable_pud_dtor(ptdesc);
tlb_remove_page_ptdesc(tlb, ptdesc); if (riscv_use_ipi_for_rfence())
tlb_remove_page_ptdesc(tlb, ptdesc);
else
tlb_remove_ptdesc(tlb, ptdesc);
} }
} }
...@@ -136,8 +139,12 @@ static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d) ...@@ -136,8 +139,12 @@ static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
unsigned long addr) unsigned long addr)
{ {
if (pgtable_l5_enabled) if (pgtable_l5_enabled) {
tlb_remove_page_ptdesc(tlb, virt_to_ptdesc(p4d)); if (riscv_use_ipi_for_rfence())
tlb_remove_page_ptdesc(tlb, virt_to_ptdesc(p4d));
else
tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d));
}
} }
#endif /* __PAGETABLE_PMD_FOLDED */ #endif /* __PAGETABLE_PMD_FOLDED */
...@@ -169,7 +176,10 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, ...@@ -169,7 +176,10 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
struct ptdesc *ptdesc = virt_to_ptdesc(pmd); struct ptdesc *ptdesc = virt_to_ptdesc(pmd);
pagetable_pmd_dtor(ptdesc); pagetable_pmd_dtor(ptdesc);
tlb_remove_page_ptdesc(tlb, ptdesc); if (riscv_use_ipi_for_rfence())
tlb_remove_page_ptdesc(tlb, ptdesc);
else
tlb_remove_ptdesc(tlb, ptdesc);
} }
#endif /* __PAGETABLE_PMD_FOLDED */ #endif /* __PAGETABLE_PMD_FOLDED */
...@@ -180,7 +190,10 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, ...@@ -180,7 +190,10 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
struct ptdesc *ptdesc = page_ptdesc(pte); struct ptdesc *ptdesc = page_ptdesc(pte);
pagetable_pte_dtor(ptdesc); pagetable_pte_dtor(ptdesc);
tlb_remove_page_ptdesc(tlb, ptdesc); if (riscv_use_ipi_for_rfence())
tlb_remove_page_ptdesc(tlb, ptdesc);
else
tlb_remove_ptdesc(tlb, ptdesc);
} }
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
......
...@@ -10,6 +10,24 @@ struct mmu_gather; ...@@ -10,6 +10,24 @@ struct mmu_gather;
static void tlb_flush(struct mmu_gather *tlb); static void tlb_flush(struct mmu_gather *tlb);
#ifdef CONFIG_MMU
#include <linux/swap.h>
/*
* While riscv platforms with riscv_ipi_for_rfence as true require an IPI to
* perform TLB shootdown, some platforms with riscv_ipi_for_rfence as false use
* SBI to perform TLB shootdown. To keep software pagetable walkers safe in this
* case we switch to RCU based table free (MMU_GATHER_RCU_TABLE_FREE). See the
* comment below 'ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE' in include/asm-generic/tlb.h
* for more details.
*/
static inline void __tlb_remove_table(void *table)
{
free_page_and_swap_cache(table);
}
#endif /* CONFIG_MMU */
#define tlb_flush tlb_flush #define tlb_flush tlb_flush
#include <asm-generic/tlb.h> #include <asm-generic/tlb.h>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment