Commit 5d3a551c authored by Will Deacon's avatar Will Deacon Committed by Linus Torvalds

mm: hugetlb: add arch hook for clearing page flags before entering pool

The core page allocator ensures that page flags are zeroed when freeing
pages via free_pages_check.  A number of architectures (ARM, PPC, MIPS)
rely on this property to treat new pages as dirty with respect to the data
cache and perform the appropriate flushing before mapping the pages into
userspace.

This can lead to cache synchronisation problems when using hugepages,
since the allocator keeps its own pool of pages above the usual page
allocator and does not reset the page flags when freeing a page into the
pool.

This patch adds a new architecture hook, arch_clear_hugepage_flags, so
that architectures which rely on the page flags being in a particular
state for fresh allocations can adjust the flags accordingly when a page
is freed into the pool.
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
Cc: Michal Hocko <mhocko@suse.cz>
Reviewed-by: default avatarMichal Hocko <mhocko@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 01dc52eb
...@@ -77,4 +77,8 @@ static inline void arch_release_hugepage(struct page *page) ...@@ -77,4 +77,8 @@ static inline void arch_release_hugepage(struct page *page)
{ {
} }
static inline void arch_clear_hugepage_flags(struct page *page)
{
}
#endif /* _ASM_IA64_HUGETLB_H */ #endif /* _ASM_IA64_HUGETLB_H */
...@@ -112,4 +112,8 @@ static inline void arch_release_hugepage(struct page *page) ...@@ -112,4 +112,8 @@ static inline void arch_release_hugepage(struct page *page)
{ {
} }
static inline void arch_clear_hugepage_flags(struct page *page)
{
}
#endif /* __ASM_HUGETLB_H */ #endif /* __ASM_HUGETLB_H */
...@@ -151,6 +151,10 @@ static inline void arch_release_hugepage(struct page *page) ...@@ -151,6 +151,10 @@ static inline void arch_release_hugepage(struct page *page)
{ {
} }
static inline void arch_clear_hugepage_flags(struct page *page)
{
}
#else /* ! CONFIG_HUGETLB_PAGE */ #else /* ! CONFIG_HUGETLB_PAGE */
static inline void flush_hugetlb_page(struct vm_area_struct *vma, static inline void flush_hugetlb_page(struct vm_area_struct *vma,
unsigned long vmaddr) unsigned long vmaddr)
......
...@@ -33,6 +33,7 @@ static inline int prepare_hugepage_range(struct file *file, ...@@ -33,6 +33,7 @@ static inline int prepare_hugepage_range(struct file *file,
} }
#define hugetlb_prefault_arch_hook(mm) do { } while (0) #define hugetlb_prefault_arch_hook(mm) do { } while (0)
#define arch_clear_hugepage_flags(page) do { } while (0)
int arch_prepare_hugepage(struct page *page); int arch_prepare_hugepage(struct page *page);
void arch_release_hugepage(struct page *page); void arch_release_hugepage(struct page *page);
......
#ifndef _ASM_SH_HUGETLB_H #ifndef _ASM_SH_HUGETLB_H
#define _ASM_SH_HUGETLB_H #define _ASM_SH_HUGETLB_H
#include <asm/cacheflush.h>
#include <asm/page.h> #include <asm/page.h>
...@@ -89,4 +90,9 @@ static inline void arch_release_hugepage(struct page *page) ...@@ -89,4 +90,9 @@ static inline void arch_release_hugepage(struct page *page)
{ {
} }
static inline void arch_clear_hugepage_flags(struct page *page)
{
clear_bit(PG_dcache_clean, &page->flags);
}
#endif /* _ASM_SH_HUGETLB_H */ #endif /* _ASM_SH_HUGETLB_H */
...@@ -82,4 +82,8 @@ static inline void arch_release_hugepage(struct page *page) ...@@ -82,4 +82,8 @@ static inline void arch_release_hugepage(struct page *page)
{ {
} }
static inline void arch_clear_hugepage_flags(struct page *page)
{
}
#endif /* _ASM_SPARC64_HUGETLB_H */ #endif /* _ASM_SPARC64_HUGETLB_H */
...@@ -106,6 +106,10 @@ static inline void arch_release_hugepage(struct page *page) ...@@ -106,6 +106,10 @@ static inline void arch_release_hugepage(struct page *page)
{ {
} }
static inline void arch_clear_hugepage_flags(struct page *page)
{
}
#ifdef CONFIG_HUGETLB_SUPER_PAGES #ifdef CONFIG_HUGETLB_SUPER_PAGES
static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
struct page *page, int writable) struct page *page, int writable)
......
...@@ -90,4 +90,8 @@ static inline void arch_release_hugepage(struct page *page) ...@@ -90,4 +90,8 @@ static inline void arch_release_hugepage(struct page *page)
{ {
} }
static inline void arch_clear_hugepage_flags(struct page *page)
{
}
#endif /* _ASM_X86_HUGETLB_H */ #endif /* _ASM_X86_HUGETLB_H */
...@@ -637,6 +637,7 @@ static void free_huge_page(struct page *page) ...@@ -637,6 +637,7 @@ static void free_huge_page(struct page *page)
h->surplus_huge_pages--; h->surplus_huge_pages--;
h->surplus_huge_pages_node[nid]--; h->surplus_huge_pages_node[nid]--;
} else { } else {
arch_clear_hugepage_flags(page);
enqueue_huge_page(h, page); enqueue_huge_page(h, page);
} }
spin_unlock(&hugetlb_lock); spin_unlock(&hugetlb_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment