Commit a5cc10d5 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] vmscan: fix calculation of number of pages scanned

From: Nick Piggin <piggin@cyberone.com.au>

The logic which calculates the numberof pages which were scanned is mucked
up.  Fix.
parent b488ea81
...@@ -244,8 +244,7 @@ static void handle_write_error(struct address_space *mapping, ...@@ -244,8 +244,7 @@ static void handle_write_error(struct address_space *mapping,
* shrink_list returns the number of reclaimed pages * shrink_list returns the number of reclaimed pages
*/ */
static int static int
shrink_list(struct list_head *page_list, unsigned int gfp_mask, shrink_list(struct list_head *page_list, unsigned int gfp_mask, int *nr_scanned)
int *max_scan, int *nr_mapped)
{ {
struct address_space *mapping; struct address_space *mapping;
LIST_HEAD(ret_pages); LIST_HEAD(ret_pages);
...@@ -269,7 +268,7 @@ shrink_list(struct list_head *page_list, unsigned int gfp_mask, ...@@ -269,7 +268,7 @@ shrink_list(struct list_head *page_list, unsigned int gfp_mask,
/* Double the slab pressure for mapped and swapcache pages */ /* Double the slab pressure for mapped and swapcache pages */
if (page_mapped(page) || PageSwapCache(page)) if (page_mapped(page) || PageSwapCache(page))
(*nr_mapped)++; (*nr_scanned)++;
BUG_ON(PageActive(page)); BUG_ON(PageActive(page));
...@@ -477,7 +476,7 @@ shrink_list(struct list_head *page_list, unsigned int gfp_mask, ...@@ -477,7 +476,7 @@ shrink_list(struct list_head *page_list, unsigned int gfp_mask,
*/ */
static int static int
shrink_cache(const int nr_pages, struct zone *zone, shrink_cache(const int nr_pages, struct zone *zone,
unsigned int gfp_mask, int max_scan, int *nr_mapped) unsigned int gfp_mask, int max_scan, int *total_scanned)
{ {
LIST_HEAD(page_list); LIST_HEAD(page_list);
struct pagevec pvec; struct pagevec pvec;
...@@ -534,8 +533,8 @@ shrink_cache(const int nr_pages, struct zone *zone, ...@@ -534,8 +533,8 @@ shrink_cache(const int nr_pages, struct zone *zone,
mod_page_state_zone(zone, pgscan_kswapd, nr_scan); mod_page_state_zone(zone, pgscan_kswapd, nr_scan);
else else
mod_page_state_zone(zone, pgscan_direct, nr_scan); mod_page_state_zone(zone, pgscan_direct, nr_scan);
nr_freed = shrink_list(&page_list, gfp_mask, nr_freed = shrink_list(&page_list, gfp_mask, total_scanned);
&max_scan, nr_mapped); *total_scanned += nr_taken;
if (current_is_kswapd()) if (current_is_kswapd())
mod_page_state(kswapd_steal, nr_freed); mod_page_state(kswapd_steal, nr_freed);
mod_page_state_zone(zone, pgsteal, nr_freed); mod_page_state_zone(zone, pgsteal, nr_freed);
...@@ -749,7 +748,7 @@ refill_inactive_zone(struct zone *zone, const int nr_pages_in, ...@@ -749,7 +748,7 @@ refill_inactive_zone(struct zone *zone, const int nr_pages_in,
*/ */
static int static int
shrink_zone(struct zone *zone, int max_scan, unsigned int gfp_mask, shrink_zone(struct zone *zone, int max_scan, unsigned int gfp_mask,
const int nr_pages, int *nr_mapped, struct page_state *ps) const int nr_pages, int *total_scanned, struct page_state *ps)
{ {
unsigned long ratio; unsigned long ratio;
...@@ -782,7 +781,7 @@ shrink_zone(struct zone *zone, int max_scan, unsigned int gfp_mask, ...@@ -782,7 +781,7 @@ shrink_zone(struct zone *zone, int max_scan, unsigned int gfp_mask,
refill_inactive_zone(zone, count, ps); refill_inactive_zone(zone, count, ps);
} }
return shrink_cache(nr_pages, zone, gfp_mask, return shrink_cache(nr_pages, zone, gfp_mask,
max_scan, nr_mapped); max_scan, total_scanned);
} }
/* /*
...@@ -811,7 +810,6 @@ shrink_caches(struct zone **zones, int priority, int *total_scanned, ...@@ -811,7 +810,6 @@ shrink_caches(struct zone **zones, int priority, int *total_scanned,
for (i = 0; zones[i] != NULL; i++) { for (i = 0; zones[i] != NULL; i++) {
int to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX); int to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX);
struct zone *zone = zones[i]; struct zone *zone = zones[i];
int nr_mapped = 0;
int max_scan; int max_scan;
if (zone->free_pages < zone->pages_high) if (zone->free_pages < zone->pages_high)
...@@ -828,8 +826,7 @@ shrink_caches(struct zone **zones, int priority, int *total_scanned, ...@@ -828,8 +826,7 @@ shrink_caches(struct zone **zones, int priority, int *total_scanned,
if (max_scan < to_reclaim * 2) if (max_scan < to_reclaim * 2)
max_scan = to_reclaim * 2; max_scan = to_reclaim * 2;
ret += shrink_zone(zone, max_scan, gfp_mask, ret += shrink_zone(zone, max_scan, gfp_mask,
to_reclaim, &nr_mapped, ps); to_reclaim, total_scanned, ps);
*total_scanned += max_scan + nr_mapped;
if (ret >= nr_pages) if (ret >= nr_pages)
break; break;
} }
...@@ -944,7 +941,7 @@ static int balance_pgdat(pg_data_t *pgdat, int nr_pages, struct page_state *ps) ...@@ -944,7 +941,7 @@ static int balance_pgdat(pg_data_t *pgdat, int nr_pages, struct page_state *ps)
for (i = 0; i < pgdat->nr_zones; i++) { for (i = 0; i < pgdat->nr_zones; i++) {
struct zone *zone = pgdat->node_zones + i; struct zone *zone = pgdat->node_zones + i;
int nr_mapped = 0; int total_scanned = 0;
int max_scan; int max_scan;
int to_reclaim; int to_reclaim;
int reclaimed; int reclaimed;
...@@ -966,10 +963,10 @@ static int balance_pgdat(pg_data_t *pgdat, int nr_pages, struct page_state *ps) ...@@ -966,10 +963,10 @@ static int balance_pgdat(pg_data_t *pgdat, int nr_pages, struct page_state *ps)
if (max_scan < SWAP_CLUSTER_MAX) if (max_scan < SWAP_CLUSTER_MAX)
max_scan = SWAP_CLUSTER_MAX; max_scan = SWAP_CLUSTER_MAX;
reclaimed = shrink_zone(zone, max_scan, GFP_KERNEL, reclaimed = shrink_zone(zone, max_scan, GFP_KERNEL,
to_reclaim, &nr_mapped, ps); to_reclaim, &total_scanned, ps);
if (i < ZONE_HIGHMEM) { if (i < ZONE_HIGHMEM) {
reclaim_state->reclaimed_slab = 0; reclaim_state->reclaimed_slab = 0;
shrink_slab(max_scan + nr_mapped, GFP_KERNEL); shrink_slab(total_scanned, GFP_KERNEL);
reclaimed += reclaim_state->reclaimed_slab; reclaimed += reclaim_state->reclaimed_slab;
} }
to_free -= reclaimed; to_free -= reclaimed;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment