Commit f21760b1 authored by Shaohua Li's avatar Shaohua Li Committed by Linus Torvalds

thp: add tlb_remove_pmd_tlb_entry

We have tlb_remove_tlb_entry to indicate a pte tlb flush entry should be
flushed, but not a corresponding API for pmd entry.  This isn't a
problem so far because THP is only for x86 currently and tlb_flush()
under x86 will flush entire TLB.  But this is confusion and could be
missed if thp is ported to other arch.

Also convert tlb->need_flush = 1 to a VM_BUG_ON(!tlb->need_flush) in
__tlb_remove_page() as suggested by Andrea Arcangeli.  The
__tlb_remove_page() function is supposed to be called after
tlb_remove_xxx_tlb_entry() and we can catch any misuse.
Signed-off-by: default avatarShaohua Li <shaohua.li@intel.com>
Reviewed-by: default avatarAndrea Arcangeli <aarcange@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Johannes Weiner <jweiner@redhat.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e5591307
...@@ -139,6 +139,20 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) ...@@ -139,6 +139,20 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
__tlb_remove_tlb_entry(tlb, ptep, address); \ __tlb_remove_tlb_entry(tlb, ptep, address); \
} while (0) } while (0)
/**
* tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
* This is a nop so far, because only x86 needs it.
*/
#ifndef __tlb_remove_pmd_tlb_entry
#define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
#endif
#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
do { \
tlb->need_flush = 1; \
__tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
} while (0)
#define pte_free_tlb(tlb, ptep, address) \ #define pte_free_tlb(tlb, ptep, address) \
do { \ do { \
tlb->need_flush = 1; \ tlb->need_flush = 1; \
......
...@@ -18,7 +18,7 @@ extern struct page *follow_trans_huge_pmd(struct mm_struct *mm, ...@@ -18,7 +18,7 @@ extern struct page *follow_trans_huge_pmd(struct mm_struct *mm,
unsigned int flags); unsigned int flags);
extern int zap_huge_pmd(struct mmu_gather *tlb, extern int zap_huge_pmd(struct mmu_gather *tlb,
struct vm_area_struct *vma, struct vm_area_struct *vma,
pmd_t *pmd); pmd_t *pmd, unsigned long addr);
extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end, unsigned long addr, unsigned long end,
unsigned char *vec); unsigned char *vec);
......
...@@ -1026,7 +1026,7 @@ struct page *follow_trans_huge_pmd(struct mm_struct *mm, ...@@ -1026,7 +1026,7 @@ struct page *follow_trans_huge_pmd(struct mm_struct *mm,
} }
int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
pmd_t *pmd) pmd_t *pmd, unsigned long addr)
{ {
int ret = 0; int ret = 0;
...@@ -1042,6 +1042,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, ...@@ -1042,6 +1042,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
pgtable = get_pmd_huge_pte(tlb->mm); pgtable = get_pmd_huge_pte(tlb->mm);
page = pmd_page(*pmd); page = pmd_page(*pmd);
pmd_clear(pmd); pmd_clear(pmd);
tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
page_remove_rmap(page); page_remove_rmap(page);
VM_BUG_ON(page_mapcount(page) < 0); VM_BUG_ON(page_mapcount(page) < 0);
add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
......
...@@ -293,7 +293,7 @@ int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) ...@@ -293,7 +293,7 @@ int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{ {
struct mmu_gather_batch *batch; struct mmu_gather_batch *batch;
tlb->need_flush = 1; VM_BUG_ON(!tlb->need_flush);
if (tlb_fast_mode(tlb)) { if (tlb_fast_mode(tlb)) {
free_page_and_swap_cache(page); free_page_and_swap_cache(page);
...@@ -1231,7 +1231,7 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, ...@@ -1231,7 +1231,7 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
if (next-addr != HPAGE_PMD_SIZE) { if (next-addr != HPAGE_PMD_SIZE) {
VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem)); VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
split_huge_page_pmd(vma->vm_mm, pmd); split_huge_page_pmd(vma->vm_mm, pmd);
} else if (zap_huge_pmd(tlb, vma, pmd)) } else if (zap_huge_pmd(tlb, vma, pmd, addr))
continue; continue;
/* fall through */ /* fall through */
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment