Commit f2260e6b authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

page allocator: update NR_FREE_PAGES only as necessary

When pages are being freed to the buddy allocator, the zone NR_FREE_PAGES
counter must be updated.  In the case of bulk per-cpu page freeing, it's
updated once per page.  This retouches cache lines more than necessary.
Update the counters one per per-cpu bulk free.
Signed-off-by: default avatarMel Gorman <mel@csn.ul.ie>
Reviewed-by: default avatarChristoph Lameter <cl@linux-foundation.org>
Reviewed-by: default avatarKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Dave Hansen <dave@linux.vnet.ibm.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 41858966
...@@ -456,7 +456,6 @@ static inline void __free_one_page(struct page *page, ...@@ -456,7 +456,6 @@ static inline void __free_one_page(struct page *page,
int migratetype) int migratetype)
{ {
unsigned long page_idx; unsigned long page_idx;
int order_size = 1 << order;
if (unlikely(PageCompound(page))) if (unlikely(PageCompound(page)))
if (unlikely(destroy_compound_page(page, order))) if (unlikely(destroy_compound_page(page, order)))
...@@ -466,10 +465,9 @@ static inline void __free_one_page(struct page *page, ...@@ -466,10 +465,9 @@ static inline void __free_one_page(struct page *page,
page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
VM_BUG_ON(page_idx & (order_size - 1)); VM_BUG_ON(page_idx & ((1 << order) - 1));
VM_BUG_ON(bad_range(zone, page)); VM_BUG_ON(bad_range(zone, page));
__mod_zone_page_state(zone, NR_FREE_PAGES, order_size);
while (order < MAX_ORDER-1) { while (order < MAX_ORDER-1) {
unsigned long combined_idx; unsigned long combined_idx;
struct page *buddy; struct page *buddy;
...@@ -524,6 +522,8 @@ static void free_pages_bulk(struct zone *zone, int count, ...@@ -524,6 +522,8 @@ static void free_pages_bulk(struct zone *zone, int count,
spin_lock(&zone->lock); spin_lock(&zone->lock);
zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE); zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
zone->pages_scanned = 0; zone->pages_scanned = 0;
__mod_zone_page_state(zone, NR_FREE_PAGES, count << order);
while (count--) { while (count--) {
struct page *page; struct page *page;
...@@ -542,6 +542,8 @@ static void free_one_page(struct zone *zone, struct page *page, int order, ...@@ -542,6 +542,8 @@ static void free_one_page(struct zone *zone, struct page *page, int order,
spin_lock(&zone->lock); spin_lock(&zone->lock);
zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE); zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
zone->pages_scanned = 0; zone->pages_scanned = 0;
__mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
__free_one_page(page, zone, order, migratetype); __free_one_page(page, zone, order, migratetype);
spin_unlock(&zone->lock); spin_unlock(&zone->lock);
} }
...@@ -686,7 +688,6 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, ...@@ -686,7 +688,6 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
list_del(&page->lru); list_del(&page->lru);
rmv_page_order(page); rmv_page_order(page);
area->nr_free--; area->nr_free--;
__mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order));
expand(zone, page, order, current_order, area, migratetype); expand(zone, page, order, current_order, area, migratetype);
return page; return page;
} }
...@@ -826,8 +827,6 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype) ...@@ -826,8 +827,6 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
/* Remove the page from the freelists */ /* Remove the page from the freelists */
list_del(&page->lru); list_del(&page->lru);
rmv_page_order(page); rmv_page_order(page);
__mod_zone_page_state(zone, NR_FREE_PAGES,
-(1UL << order));
if (current_order == pageblock_order) if (current_order == pageblock_order)
set_pageblock_migratetype(page, set_pageblock_migratetype(page,
...@@ -900,6 +899,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, ...@@ -900,6 +899,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
set_page_private(page, migratetype); set_page_private(page, migratetype);
list = &page->lru; list = &page->lru;
} }
__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
spin_unlock(&zone->lock); spin_unlock(&zone->lock);
return i; return i;
} }
...@@ -1129,6 +1129,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, ...@@ -1129,6 +1129,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
} else { } else {
spin_lock_irqsave(&zone->lock, flags); spin_lock_irqsave(&zone->lock, flags);
page = __rmqueue(zone, order, migratetype); page = __rmqueue(zone, order, migratetype);
__mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
spin_unlock(&zone->lock); spin_unlock(&zone->lock);
if (!page) if (!page)
goto failed; goto failed;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment