Commit 3e4c5912 authored by Andrew Morton's avatar Andrew Morton Committed by David S. Miller

[PATCH] fix page alloc/free accounting

We're currently incrementing /proc/vmstat:pgalloc in front of the
per-cpu page queues, and incrementing /proc/vmstat:pgfree behind the
per-cpu queues.  So they get out of whack.

Change it so that we increment the counters each time someone requests
a page.  ie: they're both in front of the queues.

Also, remove a duplicated prep_new_page() call and as a consequence,
drop the whole additional list walk in rmqueue_bulk().
parent d9c53386
...@@ -175,7 +175,6 @@ free_pages_bulk(struct zone *zone, int count, ...@@ -175,7 +175,6 @@ free_pages_bulk(struct zone *zone, int count,
/* have to delete it as __free_pages_bulk list manipulates */ /* have to delete it as __free_pages_bulk list manipulates */
list_del(&page->list); list_del(&page->list);
__free_pages_bulk(page, base, zone, area, mask, order); __free_pages_bulk(page, base, zone, area, mask, order);
mod_page_state(pgfree, count<<order);
ret++; ret++;
} }
spin_unlock_irqrestore(&zone->lock, flags); spin_unlock_irqrestore(&zone->lock, flags);
...@@ -186,6 +185,7 @@ void __free_pages_ok(struct page *page, unsigned int order) ...@@ -186,6 +185,7 @@ void __free_pages_ok(struct page *page, unsigned int order)
{ {
LIST_HEAD(list); LIST_HEAD(list);
mod_page_state(pgfree, 1 << order);
free_pages_check(__FUNCTION__, page); free_pages_check(__FUNCTION__, page);
list_add(&page->list, &list); list_add(&page->list, &list);
free_pages_bulk(page_zone(page), 1, &list, order); free_pages_bulk(page_zone(page), 1, &list, order);
...@@ -291,32 +291,19 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, ...@@ -291,32 +291,19 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
unsigned long count, struct list_head *list) unsigned long count, struct list_head *list)
{ {
unsigned long flags; unsigned long flags;
int i, allocated = 0; int i;
int allocated = 0;
struct page *page; struct page *page;
struct list_head *curr;
LIST_HEAD(temp);
spin_lock_irqsave(&zone->lock, flags); spin_lock_irqsave(&zone->lock, flags);
for (i = 0; i < count; ++i) { for (i = 0; i < count; ++i) {
page = __rmqueue(zone, order); page = __rmqueue(zone, order);
if (page == NULL) if (page == NULL)
break; break;
++allocated; allocated++;
list_add(&page->list, &temp); list_add_tail(&page->list, list);
} }
spin_unlock_irqrestore(&zone->lock, flags); spin_unlock_irqrestore(&zone->lock, flags);
/*
* This may look inefficient because we're walking the list again,
* but the cachelines are hot, so it's very cheap, and this way we
* can drop the zone lock much earlier
*/
list_for_each(curr, &temp) {
page = list_entry(curr, struct page, list);
BUG_ON(bad_range(zone, page));
prep_new_page(page, order);
}
list_splice(&temp, list->prev);
return allocated; return allocated;
} }
...@@ -354,6 +341,7 @@ static void free_hot_cold_page(struct page *page, int cold) ...@@ -354,6 +341,7 @@ static void free_hot_cold_page(struct page *page, int cold)
struct per_cpu_pages *pcp; struct per_cpu_pages *pcp;
unsigned long flags; unsigned long flags;
inc_page_state(pgfree);
free_pages_check(__FUNCTION__, page); free_pages_check(__FUNCTION__, page);
pcp = &zone->pageset[get_cpu()].pcp[cold]; pcp = &zone->pageset[get_cpu()].pcp[cold];
local_irq_save(flags); local_irq_save(flags);
...@@ -405,6 +393,7 @@ static struct page *buffered_rmqueue(struct zone *zone, int order, int cold) ...@@ -405,6 +393,7 @@ static struct page *buffered_rmqueue(struct zone *zone, int order, int cold)
if (page != NULL) { if (page != NULL) {
BUG_ON(bad_range(zone, page)); BUG_ON(bad_range(zone, page));
mod_page_state(pgalloc, 1 << order);
prep_new_page(page, order); prep_new_page(page, order);
} }
return page; return page;
...@@ -431,8 +420,6 @@ __alloc_pages(unsigned int gfp_mask, unsigned int order, ...@@ -431,8 +420,6 @@ __alloc_pages(unsigned int gfp_mask, unsigned int order,
if (gfp_mask & __GFP_COLD) if (gfp_mask & __GFP_COLD)
cold = 1; cold = 1;
mod_page_state(pgalloc, 1<<order);
zones = zonelist->zones; /* the list of zones suitable for gfp_mask */ zones = zonelist->zones; /* the list of zones suitable for gfp_mask */
classzone = zones[0]; classzone = zones[0];
if (classzone == NULL) /* no zones in the zonelist */ if (classzone == NULL) /* no zones in the zonelist */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment