Commit b528e4b6 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Linus Torvalds

mm/hugetlb: add tlb_remove_hugetlb_entry for handling hugetlb pages

This add tlb_remove_hugetlb_entry similar to tlb_remove_pmd_tlb_entry.

Link: http://lkml.kernel.org/r/20161026084839.27299-4-aneesh.kumar@linux.vnet.ibm.comSigned-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b5bc66b7
...@@ -186,6 +186,8 @@ tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr) ...@@ -186,6 +186,8 @@ tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
tlb_add_flush(tlb, addr); tlb_add_flush(tlb, addr);
} }
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
tlb_remove_tlb_entry(tlb, ptep, address)
/* /*
* In the case of tlb vma handling, we can optimise these away in the * In the case of tlb vma handling, we can optimise these away in the
* case where we're doing a full MM flush. When we're doing a munmap, * case where we're doing a full MM flush. When we're doing a munmap,
......
...@@ -283,6 +283,9 @@ do { \ ...@@ -283,6 +283,9 @@ do { \
__tlb_remove_tlb_entry(tlb, ptep, addr); \ __tlb_remove_tlb_entry(tlb, ptep, addr); \
} while (0) } while (0)
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
tlb_remove_tlb_entry(tlb, ptep, address)
#define pte_free_tlb(tlb, ptep, address) \ #define pte_free_tlb(tlb, ptep, address) \
do { \ do { \
tlb->need_flush = 1; \ tlb->need_flush = 1; \
......
...@@ -162,5 +162,7 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, ...@@ -162,5 +162,7 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
#define tlb_remove_tlb_entry(tlb, ptep, addr) do { } while (0) #define tlb_remove_tlb_entry(tlb, ptep, addr) do { } while (0)
#define tlb_remove_pmd_tlb_entry(tlb, pmdp, addr) do { } while (0) #define tlb_remove_pmd_tlb_entry(tlb, pmdp, addr) do { } while (0)
#define tlb_migrate_finish(mm) do { } while (0) #define tlb_migrate_finish(mm) do { } while (0)
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
tlb_remove_tlb_entry(tlb, ptep, address)
#endif /* _S390_TLB_H */ #endif /* _S390_TLB_H */
...@@ -65,6 +65,9 @@ tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long address) ...@@ -65,6 +65,9 @@ tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
tlb->end = address + PAGE_SIZE; tlb->end = address + PAGE_SIZE;
} }
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
tlb_remove_tlb_entry(tlb, ptep, address)
/* /*
* In the case of tlb vma handling, we can optimise these away in the * In the case of tlb vma handling, we can optimise these away in the
* case where we're doing a full MM flush. When we're doing a munmap, * case where we're doing a full MM flush. When we're doing a munmap,
......
...@@ -141,6 +141,9 @@ static inline void tlb_remove_page_size(struct mmu_gather *tlb, ...@@ -141,6 +141,9 @@ static inline void tlb_remove_page_size(struct mmu_gather *tlb,
__tlb_remove_tlb_entry(tlb, ptep, address); \ __tlb_remove_tlb_entry(tlb, ptep, address); \
} while (0) } while (0)
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
tlb_remove_tlb_entry(tlb, ptep, address)
#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr) #define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
#define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr) #define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr)
......
...@@ -220,6 +220,12 @@ static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, struct page *pa ...@@ -220,6 +220,12 @@ static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, struct page *pa
__tlb_remove_tlb_entry(tlb, ptep, address); \ __tlb_remove_tlb_entry(tlb, ptep, address); \
} while (0) } while (0)
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
do { \
__tlb_adjust_range(tlb, address, huge_page_size(h)); \
__tlb_remove_tlb_entry(tlb, ptep, address); \
} while (0)
/** /**
* tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
* This is a nop so far, because only x86 needs it. * This is a nop so far, because only x86 needs it.
......
...@@ -3336,7 +3336,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, ...@@ -3336,7 +3336,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
} }
pte = huge_ptep_get_and_clear(mm, address, ptep); pte = huge_ptep_get_and_clear(mm, address, ptep);
tlb_remove_tlb_entry(tlb, ptep, address); tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
if (huge_pte_dirty(pte)) if (huge_pte_dirty(pte))
set_page_dirty(page); set_page_dirty(page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment