Commit cfd19c5a authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

mm: only set page->pfmemalloc when ALLOC_NO_WATERMARKS was used

__alloc_pages_slowpath() is called when the number of free pages is below
the low watermark.  If the caller is entitled to use ALLOC_NO_WATERMARKS
then the page will be marked page->pfmemalloc.  This protects more pages
than are strictly necessary as we only need to protect pages allocated
below the min watermark (the pfmemalloc reserves).

This patch only sets page->pfmemalloc when ALLOC_NO_WATERMARKS was
required to allocate the page.

[rientjes@google.com: David noticed the problem during review]
Signed-off-by: default avatarMel Gorman <mgorman@suse.de>
Cc: David Miller <davem@davemloft.net>
Cc: Neil Brown <neilb@suse.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Christie <michaelc@cs.wisc.edu>
Cc: Eric B Munson <emunson@mgebm.net>
Cc: Eric Dumazet <eric.dumazet@gmail.com>
Cc: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Christoph Lameter <cl@linux.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 907aed48
...@@ -2116,8 +2116,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, ...@@ -2116,8 +2116,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
page = get_page_from_freelist(gfp_mask, nodemask, page = get_page_from_freelist(gfp_mask, nodemask,
order, zonelist, high_zoneidx, order, zonelist, high_zoneidx,
alloc_flags, preferred_zone, alloc_flags & ~ALLOC_NO_WATERMARKS,
migratetype); preferred_zone, migratetype);
if (page) { if (page) {
preferred_zone->compact_considered = 0; preferred_zone->compact_considered = 0;
preferred_zone->compact_defer_shift = 0; preferred_zone->compact_defer_shift = 0;
...@@ -2209,8 +2209,8 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, ...@@ -2209,8 +2209,8 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
retry: retry:
page = get_page_from_freelist(gfp_mask, nodemask, order, page = get_page_from_freelist(gfp_mask, nodemask, order,
zonelist, high_zoneidx, zonelist, high_zoneidx,
alloc_flags, preferred_zone, alloc_flags & ~ALLOC_NO_WATERMARKS,
migratetype); preferred_zone, migratetype);
/* /*
* If an allocation failed after direct reclaim, it could be because * If an allocation failed after direct reclaim, it could be because
...@@ -2381,8 +2381,17 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, ...@@ -2381,8 +2381,17 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
page = __alloc_pages_high_priority(gfp_mask, order, page = __alloc_pages_high_priority(gfp_mask, order,
zonelist, high_zoneidx, nodemask, zonelist, high_zoneidx, nodemask,
preferred_zone, migratetype); preferred_zone, migratetype);
if (page) if (page) {
/*
* page->pfmemalloc is set when ALLOC_NO_WATERMARKS was
* necessary to allocate the page. The expectation is
* that the caller is taking steps that will free more
* memory. The caller should avoid the page being used
* for !PFMEMALLOC purposes.
*/
page->pfmemalloc = true;
goto got_pg; goto got_pg;
}
} }
/* Atomic allocations - we can't balance anything */ /* Atomic allocations - we can't balance anything */
...@@ -2499,14 +2508,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, ...@@ -2499,14 +2508,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
warn_alloc_failed(gfp_mask, order, NULL); warn_alloc_failed(gfp_mask, order, NULL);
return page; return page;
got_pg: got_pg:
/*
* page->pfmemalloc is set when the caller had PFMEMALLOC set, is
* been OOM killed or specified __GFP_MEMALLOC. The expectation is
* that the caller is taking steps that will free more memory. The
* caller should avoid the page being used for !PFMEMALLOC purposes.
*/
page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
if (kmemcheck_enabled) if (kmemcheck_enabled)
kmemcheck_pagealloc_alloc(page, order, gfp_mask); kmemcheck_pagealloc_alloc(page, order, gfp_mask);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment