Commit ceb37d32 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] vmscan: batch up inactive list scanning work

From: Nick Piggin <piggin@cyberone.com.au>

Use a "refill_counter" for inactive list scanning, similar to the one used
for active list scanning.  This batches up scanning now that we precisely
balance ratios, and don't round up the amount to be done.

No observed benefits, but I imagine it would lower the acquisition
frequency of the lru locks in some cases, and make codepaths more efficient
in general due to cache niceness.
parent 085b4897
......@@ -76,7 +76,8 @@ struct zone {
spinlock_t lru_lock;
struct list_head active_list;
struct list_head inactive_list;
atomic_t refill_counter;
atomic_t nr_scan_active;
atomic_t nr_scan_inactive;
unsigned long nr_active;
unsigned long nr_inactive;
int all_unreclaimable; /* All pages pinned */
......
......@@ -1405,7 +1405,8 @@ static void __init free_area_init_core(struct pglist_data *pgdat,
zone_names[j], realsize, batch);
INIT_LIST_HEAD(&zone->active_list);
INIT_LIST_HEAD(&zone->inactive_list);
atomic_set(&zone->refill_counter, 0);
atomic_set(&zone->nr_scan_active, 0);
atomic_set(&zone->nr_scan_inactive, 0);
zone->nr_active = 0;
zone->nr_inactive = 0;
if (!size)
......
......@@ -742,6 +742,7 @@ shrink_zone(struct zone *zone, int max_scan, unsigned int gfp_mask,
int *total_scanned, struct page_state *ps)
{
unsigned long ratio;
int count;
/*
* Try to keep the active list 2/3 of the size of the cache. And
......@@ -755,23 +756,28 @@ shrink_zone(struct zone *zone, int max_scan, unsigned int gfp_mask,
*/
ratio = (unsigned long)SWAP_CLUSTER_MAX * zone->nr_active /
((zone->nr_inactive | 1) * 2);
atomic_add(ratio+1, &zone->refill_counter);
if (atomic_read(&zone->refill_counter) > SWAP_CLUSTER_MAX) {
int count;
atomic_add(ratio+1, &zone->nr_scan_active);
if (atomic_read(&zone->nr_scan_active) > SWAP_CLUSTER_MAX) {
/*
* Don't try to bring down too many pages in one attempt.
* If this fails, the caller will increase `priority' and
* we'll try again, with an increased chance of reclaiming
* mapped memory.
*/
count = atomic_read(&zone->refill_counter);
count = atomic_read(&zone->nr_scan_active);
if (count > SWAP_CLUSTER_MAX * 4)
count = SWAP_CLUSTER_MAX * 4;
atomic_set(&zone->refill_counter, 0);
atomic_set(&zone->nr_scan_active, 0);
refill_inactive_zone(zone, count, ps);
}
return shrink_cache(zone, gfp_mask, max_scan, total_scanned);
atomic_add(max_scan, &zone->nr_scan_inactive);
count = atomic_read(&zone->nr_scan_inactive);
if (max_scan > SWAP_CLUSTER_MAX) {
atomic_sub(count, &zone->nr_scan_inactive);
return shrink_cache(zone, gfp_mask, count, total_scanned);
}
return 0;
}
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment