Commit 206ca74e authored by Nick Piggin's avatar Nick Piggin Committed by Linus Torvalds

[PATCH] mm: higher order watermarks

Move the watermark checking code into a single function.  Extend it to
account for the order of the allocation and the number of free pages that
could satisfy such a request.

From: Marcelo Tosatti <marcelo.tosatti@cyclades.com>

Fix typo in Nick's kswapd-high-order awareness patch
Signed-off-by: default avatarNick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent f86789bc
...@@ -280,6 +280,8 @@ void get_zone_counts(unsigned long *active, unsigned long *inactive, ...@@ -280,6 +280,8 @@ void get_zone_counts(unsigned long *active, unsigned long *inactive,
unsigned long *free); unsigned long *free);
void build_all_zonelists(void); void build_all_zonelists(void);
void wakeup_kswapd(struct zone *zone); void wakeup_kswapd(struct zone *zone);
int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
int alloc_type, int can_try_harder, int gfp_high);
/* /*
* zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc. * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc.
......
...@@ -585,6 +585,37 @@ buffered_rmqueue(struct zone *zone, int order, int gfp_flags) ...@@ -585,6 +585,37 @@ buffered_rmqueue(struct zone *zone, int order, int gfp_flags)
return page; return page;
} }
/*
* Return 1 if free pages are above 'mark'. This takes into account the order
* of the allocation.
*/
int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
int alloc_type, int can_try_harder, int gfp_high)
{
/* free_pages my go negative - that's OK */
long min = mark, free_pages = z->free_pages - (1 << order) + 1;
int o;
if (gfp_high)
min -= min / 2;
if (can_try_harder)
min -= min / 4;
if (free_pages <= min + z->protection[alloc_type])
return 0;
for (o = 0; o < order; o++) {
/* At the next order, this order's pages become unavailable */
free_pages -= z->free_area[o].nr_free << o;
/* Require fewer higher order pages to be free */
min >>= 1;
if (free_pages <= min)
return 0;
}
return 1;
}
/* /*
* This is the 'heart' of the zoned buddy allocator. * This is the 'heart' of the zoned buddy allocator.
* *
...@@ -606,7 +637,6 @@ __alloc_pages(unsigned int gfp_mask, unsigned int order, ...@@ -606,7 +637,6 @@ __alloc_pages(unsigned int gfp_mask, unsigned int order,
struct zonelist *zonelist) struct zonelist *zonelist)
{ {
const int wait = gfp_mask & __GFP_WAIT; const int wait = gfp_mask & __GFP_WAIT;
unsigned long min;
struct zone **zones, *z; struct zone **zones, *z;
struct page *page; struct page *page;
struct reclaim_state reclaim_state; struct reclaim_state reclaim_state;
...@@ -636,9 +666,9 @@ __alloc_pages(unsigned int gfp_mask, unsigned int order, ...@@ -636,9 +666,9 @@ __alloc_pages(unsigned int gfp_mask, unsigned int order,
/* Go through the zonelist once, looking for a zone with enough free */ /* Go through the zonelist once, looking for a zone with enough free */
for (i = 0; (z = zones[i]) != NULL; i++) { for (i = 0; (z = zones[i]) != NULL; i++) {
min = z->pages_low + (1<<order) + z->protection[alloc_type];
if (z->free_pages < min) if (!zone_watermark_ok(z, order, z->pages_low,
alloc_type, 0, 0))
continue; continue;
page = buffered_rmqueue(z, order, gfp_mask); page = buffered_rmqueue(z, order, gfp_mask);
...@@ -654,14 +684,9 @@ __alloc_pages(unsigned int gfp_mask, unsigned int order, ...@@ -654,14 +684,9 @@ __alloc_pages(unsigned int gfp_mask, unsigned int order,
* coming from realtime tasks to go deeper into reserves * coming from realtime tasks to go deeper into reserves
*/ */
for (i = 0; (z = zones[i]) != NULL; i++) { for (i = 0; (z = zones[i]) != NULL; i++) {
min = z->pages_min; if (!zone_watermark_ok(z, order, z->pages_min,
if (gfp_mask & __GFP_HIGH) alloc_type, can_try_harder,
min /= 2; gfp_mask & __GFP_HIGH))
if (can_try_harder)
min -= min / 4;
min += (1<<order) + z->protection[alloc_type];
if (z->free_pages < min)
continue; continue;
page = buffered_rmqueue(z, order, gfp_mask); page = buffered_rmqueue(z, order, gfp_mask);
...@@ -697,14 +722,9 @@ __alloc_pages(unsigned int gfp_mask, unsigned int order, ...@@ -697,14 +722,9 @@ __alloc_pages(unsigned int gfp_mask, unsigned int order,
/* go through the zonelist yet one more time */ /* go through the zonelist yet one more time */
for (i = 0; (z = zones[i]) != NULL; i++) { for (i = 0; (z = zones[i]) != NULL; i++) {
min = z->pages_min; if (!zone_watermark_ok(z, order, z->pages_min,
if (gfp_mask & __GFP_HIGH) alloc_type, can_try_harder,
min /= 2; gfp_mask & __GFP_HIGH))
if (can_try_harder)
min -= min / 4;
min += (1<<order) + z->protection[alloc_type];
if (z->free_pages < min)
continue; continue;
page = buffered_rmqueue(z, order, gfp_mask); page = buffered_rmqueue(z, order, gfp_mask);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment