Commit 8aa6a07c authored by David Mosberger's avatar David Mosberger

ia64: Incorporate no-flush-needed optimization from Andrew's asm-generic/tlb.h.

parent 73d429f9
......@@ -54,7 +54,8 @@
typedef struct {
struct mm_struct *mm;
unsigned int nr; /* == ~0U => fast mode */
unsigned int fullmm; /* non-zero means full mm flush */
unsigned char fullmm; /* non-zero means full mm flush */
unsigned char need_flush; /* really unmapped some PTEs? */
unsigned long freed; /* number of pages freed */
unsigned long start_addr;
unsigned long end_addr;
......@@ -73,6 +74,10 @@ ia64_tlb_flush_mmu (mmu_gather_t *tlb, unsigned long start, unsigned long end)
{
unsigned int nr;
if (!tlb->need_flush)
return;
tlb->need_flush = 0;
if (tlb->fullmm) {
/*
* Tearing down the entire address space. This happens both as a result
......@@ -167,18 +172,6 @@ tlb_finish_mmu (mmu_gather_t *tlb, unsigned long start, unsigned long end)
check_pgt_cache();
}
/*
* Remove TLB entry for PTE mapped at virtual address ADDRESS. This is called for any
* PTE, not just those pointing to (normal) physical memory.
*/
static inline void
__tlb_remove_tlb_entry (mmu_gather_t *tlb, pte_t *ptep, unsigned long address)
{
if (tlb->start_addr == ~0UL)
tlb->start_addr = address;
tlb->end_addr = address + PAGE_SIZE;
}
/*
* Logically, this routine frees PAGE. On MP machines, the actual freeing of the page
* must be delayed until after the TLB has been flushed (see comments at the beginning of
......@@ -187,6 +180,8 @@ __tlb_remove_tlb_entry (mmu_gather_t *tlb, pte_t *ptep, unsigned long address)
static inline void
tlb_remove_page (mmu_gather_t *tlb, struct page *page)
{
tlb->need_flush = 1;
if (tlb_fast_mode(tlb)) {
free_page_and_swap_cache(page);
return;
......@@ -196,11 +191,37 @@ tlb_remove_page (mmu_gather_t *tlb, struct page *page)
ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
}
/*
* Remove TLB entry for PTE mapped at virtual address ADDRESS. This is called for any
* PTE, not just those pointing to (normal) physical memory.
*/
static inline void
__tlb_remove_tlb_entry (mmu_gather_t *tlb, pte_t *ptep, unsigned long address)
{
if (tlb->start_addr == ~0UL)
tlb->start_addr = address;
tlb->end_addr = address + PAGE_SIZE;
}
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define tlb_remove_tlb_entry(tlb, ptep, addr) __tlb_remove_tlb_entry(tlb, ptep, addr)
#define pte_free_tlb(tlb, ptep) __pte_free_tlb(tlb, ptep)
#define pmd_free_tlb(tlb, ptep) __pmd_free_tlb(tlb, ptep)
#define tlb_remove_tlb_entry(tlb, ptep, addr) \
do { \
tlb->need_flush = 1; \
__tlb_remove_tlb_entry(tlb, ptep, addr); \
} while (0)
#define pte_free_tlb(tlb, ptep) \
do { \
tlb->need_flush = 1; \
__pte_free_tlb(tlb, ptep); \
} while (0)
#define pmd_free_tlb(tlb, ptep) \
do { \
tlb->need_flush = 1; \
__pmd_free_tlb(tlb, ptep); \
} while (0)
#endif /* _ASM_IA64_TLB_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment