Commit 3aa1dc77 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] multithread page reclaim

This patch multithreads the main page reclaim function, shrink_cache().

This function used to run under pagemap_lru_lock.  Instead, we grab
that lock, put 32 pages from the LRU into a private list, drop the
pagemap_lru_lock and then proceed to attempt to free those pages.

Any pages which were succesfully reclaimed are batch-freed.  Pages
which were not reclaimed are re-added to the LRU.

This patch reduces pagemap_lru_lock contention on the 4-way by a factor
of thirty.

The shrink_cache() code has been simplified somewhat.

refill_inactive() was being called too often - often just to process
two or three pages.  Fiddled with that so it processes pages at the
same rate, but works on 32 pages at a time.

Added a couple of mark_page_accessed() calls into mm/memory.c from 2.4.
They seem appropriate.

Change the shrink_caches() logic so that it will still trickle through
the active list (via refill_inactive) even if the inactive list is much
larger than the active list.
parent 6a952840
......@@ -195,6 +195,7 @@ struct page {
*/
#define get_page(p) atomic_inc(&(p)->count)
#define put_page(p) __free_page(p)
#define __put_page(p) atomic_dec(&(p)->count)
#define put_page_testzero(p) atomic_dec_and_test(&(p)->count)
#define page_count(p) atomic_read(&(p)->count)
#define set_page_count(p,v) atomic_set(&(p)->count, v)
......
......@@ -154,6 +154,7 @@ extern void get_page_state(struct page_state *ret);
ret; \
})
#define SetPageLRU(page) set_bit(PG_lru, &(page)->flags)
#define PageLRU(page) test_bit(PG_lru, &(page)->flags)
#define TestSetPageLRU(page) test_and_set_bit(PG_lru, &(page)->flags)
#define TestClearPageLRU(page) test_and_clear_bit(PG_lru, &(page)->flags)
......@@ -161,6 +162,7 @@ extern void get_page_state(struct page_state *ret);
#define PageActive(page) test_bit(PG_active, &(page)->flags)
#define SetPageActive(page) set_bit(PG_active, &(page)->flags)
#define ClearPageActive(page) clear_bit(PG_active, &(page)->flags)
#define TestClearPageActive(page) test_and_clear_bit(PG_active, &(page)->flags)
#define PageSlab(page) test_bit(PG_slab, &(page)->flags)
#define SetPageSlab(page) set_bit(PG_slab, &(page)->flags)
......
......@@ -227,12 +227,17 @@ do { \
BUG(); \
} while (0)
#define __add_page_to_active_list(page) \
do { \
list_add(&(page)->lru, &active_list); \
inc_page_state(nr_active); \
} while (0)
#define add_page_to_active_list(page) \
do { \
DEBUG_LRU_PAGE(page); \
SetPageActive(page); \
list_add(&(page)->lru, &active_list); \
inc_page_state(nr_active); \
__add_page_to_active_list(page); \
} while (0)
#define add_page_to_inactive_list(page) \
......
......@@ -545,7 +545,8 @@ int add_to_page_cache(struct page *page,
page_cache_get(page);
}
write_unlock(&mapping->page_lock);
if (!error)
/* Anon pages are already on the LRU */
if (!error && !PageSwapCache(page))
lru_cache_add(page);
return error;
}
......
......@@ -1180,6 +1180,7 @@ static int do_swap_page(struct mm_struct * mm,
KERNEL_STAT_INC(pgmajfault);
}
mark_page_accessed(page);
lock_page(page);
/*
......@@ -1257,6 +1258,7 @@ static int do_anonymous_page(struct mm_struct * mm, struct vm_area_struct * vma,
flush_page_to_ram(page);
entry = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
lru_cache_add(page);
mark_page_accessed(page);
}
set_pte(page_table, entry);
......
......@@ -381,6 +381,7 @@ struct page * read_swap_cache_async(swp_entry_t entry)
/*
* Initiate read into locked page and return.
*/
lru_cache_add(new_page);
swap_readpage(NULL, new_page);
return new_page;
}
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment