Commit c5e9b2c2 authored by Alexandre Ghiti's avatar Alexandre Ghiti Committed by Palmer Dabbelt

riscv: Improve tlb_flush()

For now, tlb_flush() simply calls flush_tlb_mm() which results in a
flush of the whole TLB. So let's use mmu_gather fields to provide a more
fine-grained flush of the TLB.
Signed-off-by: default avatarAlexandre Ghiti <alexghiti@rivosinc.com>
Reviewed-by: default avatarAndrew Jones <ajones@ventanamicro.com>
Reviewed-by: default avatarSamuel Holland <samuel.holland@sifive.com>
Tested-by: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com> # On RZ/Five SMARC
Link: https://lore.kernel.org/r/20231030133027.19542-2-alexghiti@rivosinc.comSigned-off-by: default avatarPalmer Dabbelt <palmer@rivosinc.com>
parent 0bb80ecc
...@@ -15,7 +15,13 @@ static void tlb_flush(struct mmu_gather *tlb); ...@@ -15,7 +15,13 @@ static void tlb_flush(struct mmu_gather *tlb);
static inline void tlb_flush(struct mmu_gather *tlb) static inline void tlb_flush(struct mmu_gather *tlb)
{ {
flush_tlb_mm(tlb->mm); #ifdef CONFIG_MMU
if (tlb->fullmm || tlb->need_flush_all)
flush_tlb_mm(tlb->mm);
else
flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end,
tlb_get_unmap_size(tlb));
#endif
} }
#endif /* _ASM_RISCV_TLB_H */ #endif /* _ASM_RISCV_TLB_H */
...@@ -32,6 +32,8 @@ static inline void local_flush_tlb_page(unsigned long addr) ...@@ -32,6 +32,8 @@ static inline void local_flush_tlb_page(unsigned long addr)
#if defined(CONFIG_SMP) && defined(CONFIG_MMU) #if defined(CONFIG_SMP) && defined(CONFIG_MMU)
void flush_tlb_all(void); void flush_tlb_all(void);
void flush_tlb_mm(struct mm_struct *mm); void flush_tlb_mm(struct mm_struct *mm);
void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned int page_size);
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr); void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end); unsigned long end);
...@@ -52,6 +54,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, ...@@ -52,6 +54,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
} }
#define flush_tlb_mm(mm) flush_tlb_all() #define flush_tlb_mm(mm) flush_tlb_all()
#define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all()
#endif /* !CONFIG_SMP || !CONFIG_MMU */ #endif /* !CONFIG_SMP || !CONFIG_MMU */
/* Flush a range of kernel pages */ /* Flush a range of kernel pages */
......
...@@ -132,6 +132,13 @@ void flush_tlb_mm(struct mm_struct *mm) ...@@ -132,6 +132,13 @@ void flush_tlb_mm(struct mm_struct *mm)
__flush_tlb_range(mm, 0, -1, PAGE_SIZE); __flush_tlb_range(mm, 0, -1, PAGE_SIZE);
} }
void flush_tlb_mm_range(struct mm_struct *mm,
unsigned long start, unsigned long end,
unsigned int page_size)
{
__flush_tlb_range(mm, start, end - start, page_size);
}
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
{ {
__flush_tlb_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE); __flush_tlb_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment