Commit ed0ae21d authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

page allocator: do not call get_pageblock_migratetype() more than necessary

get_pageblock_migratetype() is potentially called twice for every page
free.  Once, when being freed to the pcp lists and once when being freed
back to buddy.  When freeing from the pcp lists, it is known what the
pageblock type was at the time of free so use it rather than rechecking.
In low memory situations under memory pressure, this might skew
anti-fragmentation slightly but the interference is minimal and decisions
that are fragmenting memory are being made anyway.
Signed-off-by: default avatarMel Gorman <mel@csn.ul.ie>
Reviewed-by: default avatarChristoph Lameter <cl@linux-foundation.org>
Reviewed-by: default avatarKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Dave Hansen <dave@linux.vnet.ibm.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 0ac3a409
...@@ -452,16 +452,18 @@ static inline int page_is_buddy(struct page *page, struct page *buddy, ...@@ -452,16 +452,18 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
*/ */
static inline void __free_one_page(struct page *page, static inline void __free_one_page(struct page *page,
struct zone *zone, unsigned int order) struct zone *zone, unsigned int order,
int migratetype)
{ {
unsigned long page_idx; unsigned long page_idx;
int order_size = 1 << order; int order_size = 1 << order;
int migratetype = get_pageblock_migratetype(page);
if (unlikely(PageCompound(page))) if (unlikely(PageCompound(page)))
if (unlikely(destroy_compound_page(page, order))) if (unlikely(destroy_compound_page(page, order)))
return; return;
VM_BUG_ON(migratetype == -1);
page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
VM_BUG_ON(page_idx & (order_size - 1)); VM_BUG_ON(page_idx & (order_size - 1));
...@@ -530,17 +532,18 @@ static void free_pages_bulk(struct zone *zone, int count, ...@@ -530,17 +532,18 @@ static void free_pages_bulk(struct zone *zone, int count,
page = list_entry(list->prev, struct page, lru); page = list_entry(list->prev, struct page, lru);
/* have to delete it as __free_one_page list manipulates */ /* have to delete it as __free_one_page list manipulates */
list_del(&page->lru); list_del(&page->lru);
__free_one_page(page, zone, order); __free_one_page(page, zone, order, page_private(page));
} }
spin_unlock(&zone->lock); spin_unlock(&zone->lock);
} }
static void free_one_page(struct zone *zone, struct page *page, int order) static void free_one_page(struct zone *zone, struct page *page, int order,
int migratetype)
{ {
spin_lock(&zone->lock); spin_lock(&zone->lock);
zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE); zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
zone->pages_scanned = 0; zone->pages_scanned = 0;
__free_one_page(page, zone, order); __free_one_page(page, zone, order, migratetype);
spin_unlock(&zone->lock); spin_unlock(&zone->lock);
} }
...@@ -565,7 +568,8 @@ static void __free_pages_ok(struct page *page, unsigned int order) ...@@ -565,7 +568,8 @@ static void __free_pages_ok(struct page *page, unsigned int order)
local_irq_save(flags); local_irq_save(flags);
__count_vm_events(PGFREE, 1 << order); __count_vm_events(PGFREE, 1 << order);
free_one_page(page_zone(page), page, order); free_one_page(page_zone(page), page, order,
get_pageblock_migratetype(page));
local_irq_restore(flags); local_irq_restore(flags);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment