Commit 91fbdc0f authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

mm/page_alloc.c:__alloc_pages_nodemask(): don't alter arg gfp_mask

__alloc_pages_nodemask() strips __GFP_IO when retrying the page
allocation.  But it does this by altering the function-wide variable
gfp_mask.  This will cause subsequent allocation attempts to inadvertently
use the modified gfp_mask.

Also, pass the correct mask (the mask we actually used) into
trace_mm_page_alloc().

Cc: Ming Lei <ming.lei@canonical.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: default avatarYasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: David Rientjes <rientjes@google.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 6de22619
...@@ -2865,6 +2865,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, ...@@ -2865,6 +2865,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
unsigned int cpuset_mems_cookie; unsigned int cpuset_mems_cookie;
int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR; int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR;
int classzone_idx; int classzone_idx;
gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
gfp_mask &= gfp_allowed_mask; gfp_mask &= gfp_allowed_mask;
...@@ -2898,22 +2899,24 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, ...@@ -2898,22 +2899,24 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
classzone_idx = zonelist_zone_idx(preferred_zoneref); classzone_idx = zonelist_zone_idx(preferred_zoneref);
/* First allocation attempt */ /* First allocation attempt */
page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order, alloc_mask = gfp_mask|__GFP_HARDWALL;
zonelist, high_zoneidx, alloc_flags, page = get_page_from_freelist(alloc_mask, nodemask, order, zonelist,
preferred_zone, classzone_idx, migratetype); high_zoneidx, alloc_flags, preferred_zone,
classzone_idx, migratetype);
if (unlikely(!page)) { if (unlikely(!page)) {
/* /*
* Runtime PM, block IO and its error handling path * Runtime PM, block IO and its error handling path
* can deadlock because I/O on the device might not * can deadlock because I/O on the device might not
* complete. * complete.
*/ */
gfp_mask = memalloc_noio_flags(gfp_mask); alloc_mask = memalloc_noio_flags(gfp_mask);
page = __alloc_pages_slowpath(gfp_mask, order,
page = __alloc_pages_slowpath(alloc_mask, order,
zonelist, high_zoneidx, nodemask, zonelist, high_zoneidx, nodemask,
preferred_zone, classzone_idx, migratetype); preferred_zone, classzone_idx, migratetype);
} }
trace_mm_page_alloc(page, order, gfp_mask, migratetype); trace_mm_page_alloc(page, order, alloc_mask, migratetype);
out: out:
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment