Commit 349055d0 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] vmscan: add lru_to_page() helper

From: Nick Piggin <piggin@cyberone.com.au>

Add a little helper macro for a common list extraction operation in vmscan.c
parent fb5b4abe
......@@ -43,14 +43,15 @@
int vm_swappiness = 60;
static long total_memory;
#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
#ifdef ARCH_HAS_PREFETCH
#define prefetch_prev_lru_page(_page, _base, _field) \
do { \
if ((_page)->lru.prev != _base) { \
struct page *prev; \
\
prev = list_entry(_page->lru.prev, \
struct page, lru); \
prev = lru_to_page(&(_page->lru)); \
prefetch(&prev->_field); \
} \
} while (0)
......@@ -64,8 +65,7 @@ static long total_memory;
if ((_page)->lru.prev != _base) { \
struct page *prev; \
\
prev = list_entry(_page->lru.prev, \
struct page, lru); \
prev = lru_to_page(&(_page->lru)); \
prefetchw(&prev->_field); \
} \
} while (0)
......@@ -260,7 +260,7 @@ shrink_list(struct list_head *page_list, unsigned int gfp_mask, int *nr_scanned)
int may_enter_fs;
int referenced;
page = list_entry(page_list->prev, struct page, lru);
page = lru_to_page(page_list);
list_del(&page->lru);
if (TestSetPageLocked(page))
......@@ -494,8 +494,7 @@ shrink_cache(struct zone *zone, unsigned int gfp_mask,
while (nr_scan++ < SWAP_CLUSTER_MAX &&
!list_empty(&zone->inactive_list)) {
page = list_entry(zone->inactive_list.prev,
struct page, lru);
page = lru_to_page(&zone->inactive_list);
prefetchw_prev_lru_page(page,
&zone->inactive_list, flags);
......@@ -540,7 +539,7 @@ shrink_cache(struct zone *zone, unsigned int gfp_mask,
* Put back any unfreeable pages.
*/
while (!list_empty(&page_list)) {
page = list_entry(page_list.prev, struct page, lru);
page = lru_to_page(&page_list);
if (TestSetPageLRU(page))
BUG();
list_del(&page->lru);
......@@ -599,7 +598,7 @@ refill_inactive_zone(struct zone *zone, const int nr_pages_in,
pgmoved = 0;
spin_lock_irq(&zone->lru_lock);
while (nr_pages && !list_empty(&zone->active_list)) {
page = list_entry(zone->active_list.prev, struct page, lru);
page = lru_to_page(&zone->active_list);
prefetchw_prev_lru_page(page, &zone->active_list, flags);
if (!TestClearPageLRU(page))
BUG();
......@@ -650,7 +649,7 @@ refill_inactive_zone(struct zone *zone, const int nr_pages_in,
reclaim_mapped = 1;
while (!list_empty(&l_hold)) {
page = list_entry(l_hold.prev, struct page, lru);
page = lru_to_page(&l_hold);
list_del(&page->lru);
if (page_mapped(page)) {
if (!reclaim_mapped) {
......@@ -681,7 +680,7 @@ refill_inactive_zone(struct zone *zone, const int nr_pages_in,
pgmoved = 0;
spin_lock_irq(&zone->lru_lock);
while (!list_empty(&l_inactive)) {
page = list_entry(l_inactive.prev, struct page, lru);
page = lru_to_page(&l_inactive);
prefetchw_prev_lru_page(page, &l_inactive, flags);
if (TestSetPageLRU(page))
BUG();
......@@ -710,7 +709,7 @@ refill_inactive_zone(struct zone *zone, const int nr_pages_in,
pgmoved = 0;
while (!list_empty(&l_active)) {
page = list_entry(l_active.prev, struct page, lru);
page = lru_to_page(&l_active);
prefetchw_prev_lru_page(page, &l_active, flags);
if (TestSetPageLRU(page))
BUG();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment