Commit 31a6c190 authored by Vlastimil Babka's avatar Vlastimil Babka Committed by Linus Torvalds

mm, page_alloc: set alloc_flags only once in slowpath

In __alloc_pages_slowpath(), alloc_flags doesn't change after it's
initialized, so move the initialization above the retry: label.  Also
make the comment above the initialization more descriptive.

The only exception in the alloc_flags being constant is
ALLOC_NO_WATERMARKS, which may change due to TIF_MEMDIE being set on the
allocating thread.  We can fix this, and make the code simpler and a bit
more effective at the same time, by moving the part that determines
ALLOC_NO_WATERMARKS from gfp_to_alloc_flags() to gfp_pfmemalloc_allowed().

This means we don't have to mask out ALLOC_NO_WATERMARKS in numerous
places in __alloc_pages_slowpath() anymore.  The only two tests for the
flag can instead call gfp_pfmemalloc_allowed().

Link: http://lkml.kernel.org/r/20160721073614.24395-3-vbabka@suse.czSigned-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Acked-by: default avatarMel Gorman <mgorman@techsingularity.net>
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 87cc271d
...@@ -3119,8 +3119,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, ...@@ -3119,8 +3119,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
*/ */
count_vm_event(COMPACTSTALL); count_vm_event(COMPACTSTALL);
page = get_page_from_freelist(gfp_mask, order, page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
if (page) { if (page) {
struct zone *zone = page_zone(page); struct zone *zone = page_zone(page);
...@@ -3288,8 +3287,7 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, ...@@ -3288,8 +3287,7 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
return NULL; return NULL;
retry: retry:
page = get_page_from_freelist(gfp_mask, order, page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
/* /*
* If an allocation failed after direct reclaim, it could be because * If an allocation failed after direct reclaim, it could be because
...@@ -3351,16 +3349,6 @@ gfp_to_alloc_flags(gfp_t gfp_mask) ...@@ -3351,16 +3349,6 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
} else if (unlikely(rt_task(current)) && !in_interrupt()) } else if (unlikely(rt_task(current)) && !in_interrupt())
alloc_flags |= ALLOC_HARDER; alloc_flags |= ALLOC_HARDER;
if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
if (gfp_mask & __GFP_MEMALLOC)
alloc_flags |= ALLOC_NO_WATERMARKS;
else if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
alloc_flags |= ALLOC_NO_WATERMARKS;
else if (!in_interrupt() &&
((current->flags & PF_MEMALLOC) ||
unlikely(test_thread_flag(TIF_MEMDIE))))
alloc_flags |= ALLOC_NO_WATERMARKS;
}
#ifdef CONFIG_CMA #ifdef CONFIG_CMA
if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
alloc_flags |= ALLOC_CMA; alloc_flags |= ALLOC_CMA;
...@@ -3370,7 +3358,19 @@ gfp_to_alloc_flags(gfp_t gfp_mask) ...@@ -3370,7 +3358,19 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
{ {
return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS); if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
return false;
if (gfp_mask & __GFP_MEMALLOC)
return true;
if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
return true;
if (!in_interrupt() &&
((current->flags & PF_MEMALLOC) ||
unlikely(test_thread_flag(TIF_MEMDIE))))
return true;
return false;
} }
static inline bool is_thp_gfp_mask(gfp_t gfp_mask) static inline bool is_thp_gfp_mask(gfp_t gfp_mask)
...@@ -3503,36 +3503,36 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, ...@@ -3503,36 +3503,36 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
(__GFP_ATOMIC|__GFP_DIRECT_RECLAIM))) (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
gfp_mask &= ~__GFP_ATOMIC; gfp_mask &= ~__GFP_ATOMIC;
retry:
if (gfp_mask & __GFP_KSWAPD_RECLAIM)
wake_all_kswapds(order, ac);
/* /*
* OK, we're below the kswapd watermark and have kicked background * The fast path uses conservative alloc_flags to succeed only until
* reclaim. Now things get more complex, so set up alloc_flags according * kswapd needs to be woken up, and to avoid the cost of setting up
* to how we want to proceed. * alloc_flags precisely. So we do that now.
*/ */
alloc_flags = gfp_to_alloc_flags(gfp_mask); alloc_flags = gfp_to_alloc_flags(gfp_mask);
retry:
if (gfp_mask & __GFP_KSWAPD_RECLAIM)
wake_all_kswapds(order, ac);
/* /*
* Reset the zonelist iterators if memory policies can be ignored. * Reset the zonelist iterators if memory policies can be ignored.
* These allocations are high priority and system rather than user * These allocations are high priority and system rather than user
* orientated. * orientated.
*/ */
if ((alloc_flags & ALLOC_NO_WATERMARKS) || !(alloc_flags & ALLOC_CPUSET)) { if (!(alloc_flags & ALLOC_CPUSET) || gfp_pfmemalloc_allowed(gfp_mask)) {
ac->zonelist = node_zonelist(numa_node_id(), gfp_mask); ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
ac->high_zoneidx, ac->nodemask); ac->high_zoneidx, ac->nodemask);
} }
/* This is the last chance, in general, before the goto nopage. */ /* This is the last chance, in general, before the goto nopage. */
page = get_page_from_freelist(gfp_mask, order, page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
if (page) if (page)
goto got_pg; goto got_pg;
/* Allocate without watermarks if the context allows */ /* Allocate without watermarks if the context allows */
if (alloc_flags & ALLOC_NO_WATERMARKS) { if (gfp_pfmemalloc_allowed(gfp_mask)) {
page = get_page_from_freelist(gfp_mask, order, page = get_page_from_freelist(gfp_mask, order,
ALLOC_NO_WATERMARKS, ac); ALLOC_NO_WATERMARKS, ac);
if (page) if (page)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment