Commit 289d6b0e authored by GuanXuetao's avatar GuanXuetao

unicore32: rewrite arch-specific tlb.h to use asm-generic version

Signed-off-by: default avatarGuan Xuetao <gxt@mprc.pku.edu.cn>
parent 1cf46c42
...@@ -12,87 +12,17 @@ ...@@ -12,87 +12,17 @@
#ifndef __UNICORE_TLB_H__ #ifndef __UNICORE_TLB_H__
#define __UNICORE_TLB_H__ #define __UNICORE_TLB_H__
#include <asm/cacheflush.h> #define tlb_start_vma(tlb, vma) do { } while (0)
#include <asm/tlbflush.h> #define tlb_end_vma(tlb, vma) do { } while (0)
#include <asm/pgalloc.h> #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
/*
* TLB handling. This allows us to remove pages from the page #define __pte_free_tlb(tlb, pte, addr) \
* tables, and efficiently handle the TLB issues. do { \
*/ pgtable_page_dtor(pte); \
struct mmu_gather { tlb_remove_page((tlb), (pte)); \
struct mm_struct *mm; } while (0)
unsigned int fullmm;
unsigned long range_start; #include <asm-generic/tlb.h>
unsigned long range_end;
};
DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
static inline struct mmu_gather *
tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
{
struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
tlb->mm = mm;
tlb->fullmm = full_mm_flush;
return tlb;
}
static inline void
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
if (tlb->fullmm)
flush_tlb_mm(tlb->mm);
/* keep the page table cache within bounds */
check_pgt_cache();
put_cpu_var(mmu_gathers);
}
/*
* Memorize the range for the TLB flush.
*/
static inline void
tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
{
if (!tlb->fullmm) {
if (addr < tlb->range_start)
tlb->range_start = addr;
if (addr + PAGE_SIZE > tlb->range_end)
tlb->range_end = addr + PAGE_SIZE;
}
}
/*
* In the case of tlb vma handling, we can optimise these away in the
* case where we're doing a full MM flush. When we're doing a munmap,
* the vmas are adjusted to only cover the region to be torn down.
*/
static inline void
tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
if (!tlb->fullmm) {
flush_cache_range(vma, vma->vm_start, vma->vm_end);
tlb->range_start = TASK_SIZE;
tlb->range_end = 0;
}
}
static inline void
tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
if (!tlb->fullmm && tlb->range_end > 0)
flush_tlb_range(vma, tlb->range_start, tlb->range_end);
}
#define tlb_remove_page(tlb, page) free_page_and_swap_cache(page)
#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep)
#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp)
#define pud_free_tlb(tlb, x, addr) do { } while (0)
#define tlb_migrate_finish(mm) do { } while (0)
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment