Commit 6bc56a4d authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

mm: add vma_alloc_zeroed_movable_folio()

Replace alloc_zeroed_user_highpage_movable().  The main difference is
returning a folio containing a single page instead of returning the page,
but take the opportunity to rename the function to match other allocation
functions a little better and rewrite the documentation to place more
emphasis on the zeroing rather than the highmem aspect.

Link: https://lkml.kernel.org/r/20230116191813.2145215-2-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarZi Yan <ziy@nvidia.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent c5792d93
......@@ -17,9 +17,8 @@
extern void clear_page(void *page);
#define clear_user_page(page, vaddr, pg) clear_page(page)
#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr)
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
#define vma_alloc_zeroed_movable_folio(vma, vaddr) \
vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false)
extern void copy_page(void * _to, void * _from);
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
......
......@@ -29,9 +29,9 @@ void copy_user_highpage(struct page *to, struct page *from,
void copy_highpage(struct page *to, struct page *from);
#define __HAVE_ARCH_COPY_HIGHPAGE
struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
unsigned long vaddr);
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
#define vma_alloc_zeroed_movable_folio vma_alloc_zeroed_movable_folio
void tag_clear_highpage(struct page *to);
#define __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
......
......@@ -925,7 +925,7 @@ NOKPROBE_SYMBOL(do_debug_exception);
/*
* Used during anonymous page fault handling.
*/
struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
unsigned long vaddr)
{
gfp_t flags = GFP_HIGHUSER_MOVABLE | __GFP_ZERO;
......@@ -938,7 +938,7 @@ struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
if (vma->vm_flags & VM_MTE)
flags |= __GFP_ZEROTAGS;
return alloc_page_vma(flags, vma, vaddr);
return vma_alloc_folio(flags, 0, vma, vaddr, false);
}
void tag_clear_highpage(struct page *page)
......
......@@ -82,17 +82,15 @@ do { \
} while (0)
#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
#define vma_alloc_zeroed_movable_folio(vma, vaddr) \
({ \
struct page *page = alloc_page_vma( \
GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr); \
if (page) \
flush_dcache_page(page); \
page; \
struct folio *folio = vma_alloc_folio( \
GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false); \
if (folio) \
flush_dcache_folio(folio); \
folio; \
})
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#include <asm-generic/memory_model.h>
......
......@@ -13,9 +13,8 @@ extern unsigned long memory_end;
#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr)
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
#define vma_alloc_zeroed_movable_folio(vma, vaddr) \
vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false)
#define __pa(vaddr) ((unsigned long)(vaddr))
#define __va(paddr) ((void *)((unsigned long)(paddr)))
......
......@@ -73,9 +73,8 @@ static inline void copy_page(void *to, void *from)
#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr)
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
#define vma_alloc_zeroed_movable_folio(vma, vaddr) \
vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false)
/*
* These are used to make use of C type-checking..
......
......@@ -34,9 +34,8 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
copy_page(to, from);
}
#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr)
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
#define vma_alloc_zeroed_movable_folio(vma, vaddr) \
vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false)
#ifndef __pa
#define __pa(x) __phys_addr((unsigned long)(x))
......
......@@ -207,31 +207,30 @@ static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
}
#endif
#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
#ifndef vma_alloc_zeroed_movable_folio
/**
* alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
* @vma: The VMA the page is to be allocated for
* @vaddr: The virtual address the page will be inserted into
* vma_alloc_zeroed_movable_folio - Allocate a zeroed page for a VMA.
* @vma: The VMA the page is to be allocated for.
* @vaddr: The virtual address the page will be inserted into.
*
* Returns: The allocated and zeroed HIGHMEM page
* This function will allocate a page suitable for inserting into this
* VMA at this virtual address. It may be allocated from highmem or
* the movable zone. An architecture may provide its own implementation.
*
* This function will allocate a page for a VMA that the caller knows will
* be able to migrate in the future using move_pages() or reclaimed
*
* An architecture may override this function by defining
* __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE and providing their own
* implementation.
* Return: A folio containing one allocated and zeroed page or NULL if
* we are out of memory.
*/
static inline struct page *
alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
static inline
struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
unsigned long vaddr)
{
struct page *page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
struct folio *folio;
if (page)
clear_user_highpage(page, vaddr);
folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr, false);
if (folio)
clear_user_highpage(&folio->page, vaddr);
return page;
return folio;
}
#endif
......
......@@ -3056,10 +3056,12 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
goto oom;
if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
new_page = alloc_zeroed_user_highpage_movable(vma,
vmf->address);
if (!new_page)
struct folio *new_folio;
new_folio = vma_alloc_zeroed_movable_folio(vma, vmf->address);
if (!new_folio)
goto oom;
new_page = &new_folio->page;
} else {
new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
vmf->address);
......@@ -3995,6 +3997,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct page *page;
struct folio *folio;
vm_fault_t ret = 0;
pte_t entry;
......@@ -4044,11 +4047,12 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
/* Allocate our own private page. */
if (unlikely(anon_vma_prepare(vma)))
goto oom;
page = alloc_zeroed_user_highpage_movable(vma, vmf->address);
if (!page)
folio = vma_alloc_zeroed_movable_folio(vma, vmf->address);
if (!folio)
goto oom;
if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL))
page = &folio->page;
if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL))
goto oom_free_page;
cgroup_throttle_swaprate(page, GFP_KERNEL);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment