Commit 349055d0 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] vmscan: add lru_to_page() helper

From: Nick Piggin <piggin@cyberone.com.au>

Add a little helper macro for a common list extraction operation in vmscan.c
parent fb5b4abe
...@@ -43,14 +43,15 @@ ...@@ -43,14 +43,15 @@
int vm_swappiness = 60; int vm_swappiness = 60;
static long total_memory; static long total_memory;
#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
#ifdef ARCH_HAS_PREFETCH #ifdef ARCH_HAS_PREFETCH
#define prefetch_prev_lru_page(_page, _base, _field) \ #define prefetch_prev_lru_page(_page, _base, _field) \
do { \ do { \
if ((_page)->lru.prev != _base) { \ if ((_page)->lru.prev != _base) { \
struct page *prev; \ struct page *prev; \
\ \
prev = list_entry(_page->lru.prev, \ prev = lru_to_page(&(_page->lru)); \
struct page, lru); \
prefetch(&prev->_field); \ prefetch(&prev->_field); \
} \ } \
} while (0) } while (0)
...@@ -64,8 +65,7 @@ static long total_memory; ...@@ -64,8 +65,7 @@ static long total_memory;
if ((_page)->lru.prev != _base) { \ if ((_page)->lru.prev != _base) { \
struct page *prev; \ struct page *prev; \
\ \
prev = list_entry(_page->lru.prev, \ prev = lru_to_page(&(_page->lru)); \
struct page, lru); \
prefetchw(&prev->_field); \ prefetchw(&prev->_field); \
} \ } \
} while (0) } while (0)
...@@ -260,7 +260,7 @@ shrink_list(struct list_head *page_list, unsigned int gfp_mask, int *nr_scanned) ...@@ -260,7 +260,7 @@ shrink_list(struct list_head *page_list, unsigned int gfp_mask, int *nr_scanned)
int may_enter_fs; int may_enter_fs;
int referenced; int referenced;
page = list_entry(page_list->prev, struct page, lru); page = lru_to_page(page_list);
list_del(&page->lru); list_del(&page->lru);
if (TestSetPageLocked(page)) if (TestSetPageLocked(page))
...@@ -494,8 +494,7 @@ shrink_cache(struct zone *zone, unsigned int gfp_mask, ...@@ -494,8 +494,7 @@ shrink_cache(struct zone *zone, unsigned int gfp_mask,
while (nr_scan++ < SWAP_CLUSTER_MAX && while (nr_scan++ < SWAP_CLUSTER_MAX &&
!list_empty(&zone->inactive_list)) { !list_empty(&zone->inactive_list)) {
page = list_entry(zone->inactive_list.prev, page = lru_to_page(&zone->inactive_list);
struct page, lru);
prefetchw_prev_lru_page(page, prefetchw_prev_lru_page(page,
&zone->inactive_list, flags); &zone->inactive_list, flags);
...@@ -540,7 +539,7 @@ shrink_cache(struct zone *zone, unsigned int gfp_mask, ...@@ -540,7 +539,7 @@ shrink_cache(struct zone *zone, unsigned int gfp_mask,
* Put back any unfreeable pages. * Put back any unfreeable pages.
*/ */
while (!list_empty(&page_list)) { while (!list_empty(&page_list)) {
page = list_entry(page_list.prev, struct page, lru); page = lru_to_page(&page_list);
if (TestSetPageLRU(page)) if (TestSetPageLRU(page))
BUG(); BUG();
list_del(&page->lru); list_del(&page->lru);
...@@ -599,7 +598,7 @@ refill_inactive_zone(struct zone *zone, const int nr_pages_in, ...@@ -599,7 +598,7 @@ refill_inactive_zone(struct zone *zone, const int nr_pages_in,
pgmoved = 0; pgmoved = 0;
spin_lock_irq(&zone->lru_lock); spin_lock_irq(&zone->lru_lock);
while (nr_pages && !list_empty(&zone->active_list)) { while (nr_pages && !list_empty(&zone->active_list)) {
page = list_entry(zone->active_list.prev, struct page, lru); page = lru_to_page(&zone->active_list);
prefetchw_prev_lru_page(page, &zone->active_list, flags); prefetchw_prev_lru_page(page, &zone->active_list, flags);
if (!TestClearPageLRU(page)) if (!TestClearPageLRU(page))
BUG(); BUG();
...@@ -650,7 +649,7 @@ refill_inactive_zone(struct zone *zone, const int nr_pages_in, ...@@ -650,7 +649,7 @@ refill_inactive_zone(struct zone *zone, const int nr_pages_in,
reclaim_mapped = 1; reclaim_mapped = 1;
while (!list_empty(&l_hold)) { while (!list_empty(&l_hold)) {
page = list_entry(l_hold.prev, struct page, lru); page = lru_to_page(&l_hold);
list_del(&page->lru); list_del(&page->lru);
if (page_mapped(page)) { if (page_mapped(page)) {
if (!reclaim_mapped) { if (!reclaim_mapped) {
...@@ -681,7 +680,7 @@ refill_inactive_zone(struct zone *zone, const int nr_pages_in, ...@@ -681,7 +680,7 @@ refill_inactive_zone(struct zone *zone, const int nr_pages_in,
pgmoved = 0; pgmoved = 0;
spin_lock_irq(&zone->lru_lock); spin_lock_irq(&zone->lru_lock);
while (!list_empty(&l_inactive)) { while (!list_empty(&l_inactive)) {
page = list_entry(l_inactive.prev, struct page, lru); page = lru_to_page(&l_inactive);
prefetchw_prev_lru_page(page, &l_inactive, flags); prefetchw_prev_lru_page(page, &l_inactive, flags);
if (TestSetPageLRU(page)) if (TestSetPageLRU(page))
BUG(); BUG();
...@@ -710,7 +709,7 @@ refill_inactive_zone(struct zone *zone, const int nr_pages_in, ...@@ -710,7 +709,7 @@ refill_inactive_zone(struct zone *zone, const int nr_pages_in,
pgmoved = 0; pgmoved = 0;
while (!list_empty(&l_active)) { while (!list_empty(&l_active)) {
page = list_entry(l_active.prev, struct page, lru); page = lru_to_page(&l_active);
prefetchw_prev_lru_page(page, &l_active, flags); prefetchw_prev_lru_page(page, &l_active, flags);
if (TestSetPageLRU(page)) if (TestSetPageLRU(page))
BUG(); BUG();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment