Commit aaba9265 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] make pagemap_lru_lock irq-safe

It is expensive for a CPU to take an interrupt while holding the page
LRU lock, because other CPUs will pile up on the lock while the
interrupt runs.

Disabling interrupts while holding the lock reduces contention by an
additional 30% on 4-way.  This is when the only source of interrupts is
disk completion.  The improvement will be higher with more CPUs and it
will be higher if there is networking happening.

The maximum hold time of this lock is 17 microseconds on 500 MHx PIII,
which is well inside the kernel's maximum interrupt latency (which was
100 usecs when I last looked, a year ago).

This optimisation is not needed on uniprocessor, but the patch disables
IRQs while holding pagemap_lru_lock anyway, so it becomes an irq-safe
spinlock, and pages can be moved from the LRU in interrupt context.

pagemap_lru_lock has been renamed to _pagemap_lru_lock to pick up any
missed uses, and to reliably break any out-of-tree patches which may be
using the old semantics.
parent 008f707c
...@@ -211,7 +211,7 @@ extern struct swap_list_t swap_list; ...@@ -211,7 +211,7 @@ extern struct swap_list_t swap_list;
asmlinkage long sys_swapoff(const char *); asmlinkage long sys_swapoff(const char *);
asmlinkage long sys_swapon(const char *, int); asmlinkage long sys_swapon(const char *, int);
extern spinlock_t pagemap_lru_lock; extern spinlock_t _pagemap_lru_lock;
extern void FASTCALL(mark_page_accessed(struct page *)); extern void FASTCALL(mark_page_accessed(struct page *));
......
...@@ -62,7 +62,7 @@ ...@@ -62,7 +62,7 @@
* ->inode_lock (__mark_inode_dirty) * ->inode_lock (__mark_inode_dirty)
* ->sb_lock (fs/fs-writeback.c) * ->sb_lock (fs/fs-writeback.c)
*/ */
spinlock_t pagemap_lru_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; spinlock_t _pagemap_lru_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
/* /*
* Remove a page from the page cache and free it. Caller has to make * Remove a page from the page cache and free it. Caller has to make
......
...@@ -41,9 +41,9 @@ static inline void activate_page_nolock(struct page * page) ...@@ -41,9 +41,9 @@ static inline void activate_page_nolock(struct page * page)
*/ */
void activate_page(struct page * page) void activate_page(struct page * page)
{ {
spin_lock(&pagemap_lru_lock); spin_lock_irq(&_pagemap_lru_lock);
activate_page_nolock(page); activate_page_nolock(page);
spin_unlock(&pagemap_lru_lock); spin_unlock_irq(&_pagemap_lru_lock);
} }
/** /**
...@@ -53,10 +53,10 @@ void activate_page(struct page * page) ...@@ -53,10 +53,10 @@ void activate_page(struct page * page)
void lru_cache_add(struct page * page) void lru_cache_add(struct page * page)
{ {
if (!PageLRU(page)) { if (!PageLRU(page)) {
spin_lock(&pagemap_lru_lock); spin_lock_irq(&_pagemap_lru_lock);
if (!TestSetPageLRU(page)) if (!TestSetPageLRU(page))
add_page_to_inactive_list(page); add_page_to_inactive_list(page);
spin_unlock(&pagemap_lru_lock); spin_unlock_irq(&_pagemap_lru_lock);
} }
} }
...@@ -83,9 +83,9 @@ void __lru_cache_del(struct page * page) ...@@ -83,9 +83,9 @@ void __lru_cache_del(struct page * page)
*/ */
void lru_cache_del(struct page * page) void lru_cache_del(struct page * page)
{ {
spin_lock(&pagemap_lru_lock); spin_lock_irq(&_pagemap_lru_lock);
__lru_cache_del(page); __lru_cache_del(page);
spin_unlock(&pagemap_lru_lock); spin_unlock_irq(&_pagemap_lru_lock);
} }
/* /*
...@@ -116,7 +116,7 @@ void __pagevec_release(struct pagevec *pvec) ...@@ -116,7 +116,7 @@ void __pagevec_release(struct pagevec *pvec)
continue; continue;
if (!lock_held) { if (!lock_held) {
spin_lock(&pagemap_lru_lock); spin_lock_irq(&_pagemap_lru_lock);
lock_held = 1; lock_held = 1;
} }
...@@ -130,7 +130,7 @@ void __pagevec_release(struct pagevec *pvec) ...@@ -130,7 +130,7 @@ void __pagevec_release(struct pagevec *pvec)
pagevec_add(&pages_to_free, page); pagevec_add(&pages_to_free, page);
} }
if (lock_held) if (lock_held)
spin_unlock(&pagemap_lru_lock); spin_unlock_irq(&_pagemap_lru_lock);
pagevec_free(&pages_to_free); pagevec_free(&pages_to_free);
pagevec_init(pvec); pagevec_init(pvec);
...@@ -175,14 +175,14 @@ void pagevec_deactivate_inactive(struct pagevec *pvec) ...@@ -175,14 +175,14 @@ void pagevec_deactivate_inactive(struct pagevec *pvec)
if (!lock_held) { if (!lock_held) {
if (PageActive(page) || !PageLRU(page)) if (PageActive(page) || !PageLRU(page))
continue; continue;
spin_lock(&pagemap_lru_lock); spin_lock_irq(&_pagemap_lru_lock);
lock_held = 1; lock_held = 1;
} }
if (!PageActive(page) && PageLRU(page)) if (!PageActive(page) && PageLRU(page))
list_move(&page->lru, &inactive_list); list_move(&page->lru, &inactive_list);
} }
if (lock_held) if (lock_held)
spin_unlock(&pagemap_lru_lock); spin_unlock_irq(&_pagemap_lru_lock);
__pagevec_release(pvec); __pagevec_release(pvec);
} }
...@@ -194,7 +194,7 @@ void __pagevec_lru_add(struct pagevec *pvec) ...@@ -194,7 +194,7 @@ void __pagevec_lru_add(struct pagevec *pvec)
{ {
int i; int i;
spin_lock(&pagemap_lru_lock); spin_lock_irq(&_pagemap_lru_lock);
for (i = 0; i < pagevec_count(pvec); i++) { for (i = 0; i < pagevec_count(pvec); i++) {
struct page *page = pvec->pages[i]; struct page *page = pvec->pages[i];
...@@ -202,7 +202,7 @@ void __pagevec_lru_add(struct pagevec *pvec) ...@@ -202,7 +202,7 @@ void __pagevec_lru_add(struct pagevec *pvec)
BUG(); BUG();
add_page_to_inactive_list(page); add_page_to_inactive_list(page);
} }
spin_unlock(&pagemap_lru_lock); spin_unlock_irq(&_pagemap_lru_lock);
pagevec_release(pvec); pagevec_release(pvec);
} }
...@@ -214,7 +214,7 @@ void __pagevec_lru_del(struct pagevec *pvec) ...@@ -214,7 +214,7 @@ void __pagevec_lru_del(struct pagevec *pvec)
{ {
int i; int i;
spin_lock(&pagemap_lru_lock); spin_lock_irq(&_pagemap_lru_lock);
for (i = 0; i < pagevec_count(pvec); i++) { for (i = 0; i < pagevec_count(pvec); i++) {
struct page *page = pvec->pages[i]; struct page *page = pvec->pages[i];
...@@ -225,7 +225,7 @@ void __pagevec_lru_del(struct pagevec *pvec) ...@@ -225,7 +225,7 @@ void __pagevec_lru_del(struct pagevec *pvec)
else else
del_page_from_inactive_list(page); del_page_from_inactive_list(page);
} }
spin_unlock(&pagemap_lru_lock); spin_unlock_irq(&_pagemap_lru_lock);
pagevec_release(pvec); pagevec_release(pvec);
} }
......
...@@ -284,7 +284,7 @@ shrink_cache(int nr_pages, zone_t *classzone, ...@@ -284,7 +284,7 @@ shrink_cache(int nr_pages, zone_t *classzone,
pagevec_init(&pvec); pagevec_init(&pvec);
spin_lock(&pagemap_lru_lock); spin_lock_irq(&_pagemap_lru_lock);
while (max_scan > 0 && nr_pages > 0) { while (max_scan > 0 && nr_pages > 0) {
struct page *page; struct page *page;
int n = 0; int n = 0;
...@@ -307,7 +307,7 @@ shrink_cache(int nr_pages, zone_t *classzone, ...@@ -307,7 +307,7 @@ shrink_cache(int nr_pages, zone_t *classzone,
page_cache_get(page); page_cache_get(page);
n++; n++;
} }
spin_unlock(&pagemap_lru_lock); spin_unlock_irq(&_pagemap_lru_lock);
if (list_empty(&page_list)) if (list_empty(&page_list))
goto done; goto done;
...@@ -321,7 +321,7 @@ shrink_cache(int nr_pages, zone_t *classzone, ...@@ -321,7 +321,7 @@ shrink_cache(int nr_pages, zone_t *classzone,
if (nr_pages <= 0 && list_empty(&page_list)) if (nr_pages <= 0 && list_empty(&page_list))
goto done; goto done;
spin_lock(&pagemap_lru_lock); spin_lock_irq(&_pagemap_lru_lock);
/* /*
* Put back any unfreeable pages. * Put back any unfreeable pages.
*/ */
...@@ -335,13 +335,13 @@ shrink_cache(int nr_pages, zone_t *classzone, ...@@ -335,13 +335,13 @@ shrink_cache(int nr_pages, zone_t *classzone,
else else
add_page_to_inactive_list(page); add_page_to_inactive_list(page);
if (!pagevec_add(&pvec, page)) { if (!pagevec_add(&pvec, page)) {
spin_unlock(&pagemap_lru_lock); spin_unlock_irq(&_pagemap_lru_lock);
__pagevec_release(&pvec); __pagevec_release(&pvec);
spin_lock(&pagemap_lru_lock); spin_lock_irq(&_pagemap_lru_lock);
} }
} }
} }
spin_unlock(&pagemap_lru_lock); spin_unlock_irq(&_pagemap_lru_lock);
done: done:
pagevec_release(&pvec); pagevec_release(&pvec);
return nr_pages; return nr_pages;
...@@ -374,7 +374,7 @@ static /* inline */ void refill_inactive(const int nr_pages_in) ...@@ -374,7 +374,7 @@ static /* inline */ void refill_inactive(const int nr_pages_in)
struct page *page; struct page *page;
struct pagevec pvec; struct pagevec pvec;
spin_lock(&pagemap_lru_lock); spin_lock_irq(&_pagemap_lru_lock);
while (nr_pages && !list_empty(&active_list)) { while (nr_pages && !list_empty(&active_list)) {
page = list_entry(active_list.prev, struct page, lru); page = list_entry(active_list.prev, struct page, lru);
prefetchw_prev_lru_page(page, &active_list, flags); prefetchw_prev_lru_page(page, &active_list, flags);
...@@ -384,7 +384,7 @@ static /* inline */ void refill_inactive(const int nr_pages_in) ...@@ -384,7 +384,7 @@ static /* inline */ void refill_inactive(const int nr_pages_in)
list_move(&page->lru, &l_hold); list_move(&page->lru, &l_hold);
nr_pages--; nr_pages--;
} }
spin_unlock(&pagemap_lru_lock); spin_unlock_irq(&_pagemap_lru_lock);
while (!list_empty(&l_hold)) { while (!list_empty(&l_hold)) {
page = list_entry(l_hold.prev, struct page, lru); page = list_entry(l_hold.prev, struct page, lru);
...@@ -406,7 +406,7 @@ static /* inline */ void refill_inactive(const int nr_pages_in) ...@@ -406,7 +406,7 @@ static /* inline */ void refill_inactive(const int nr_pages_in)
} }
pagevec_init(&pvec); pagevec_init(&pvec);
spin_lock(&pagemap_lru_lock); spin_lock_irq(&_pagemap_lru_lock);
while (!list_empty(&l_inactive)) { while (!list_empty(&l_inactive)) {
page = list_entry(l_inactive.prev, struct page, lru); page = list_entry(l_inactive.prev, struct page, lru);
prefetchw_prev_lru_page(page, &l_inactive, flags); prefetchw_prev_lru_page(page, &l_inactive, flags);
...@@ -416,9 +416,9 @@ static /* inline */ void refill_inactive(const int nr_pages_in) ...@@ -416,9 +416,9 @@ static /* inline */ void refill_inactive(const int nr_pages_in)
BUG(); BUG();
list_move(&page->lru, &inactive_list); list_move(&page->lru, &inactive_list);
if (!pagevec_add(&pvec, page)) { if (!pagevec_add(&pvec, page)) {
spin_unlock(&pagemap_lru_lock); spin_unlock_irq(&_pagemap_lru_lock);
__pagevec_release(&pvec); __pagevec_release(&pvec);
spin_lock(&pagemap_lru_lock); spin_lock_irq(&_pagemap_lru_lock);
} }
} }
while (!list_empty(&l_active)) { while (!list_empty(&l_active)) {
...@@ -429,12 +429,12 @@ static /* inline */ void refill_inactive(const int nr_pages_in) ...@@ -429,12 +429,12 @@ static /* inline */ void refill_inactive(const int nr_pages_in)
BUG_ON(!PageActive(page)); BUG_ON(!PageActive(page));
list_move(&page->lru, &active_list); list_move(&page->lru, &active_list);
if (!pagevec_add(&pvec, page)) { if (!pagevec_add(&pvec, page)) {
spin_unlock(&pagemap_lru_lock); spin_unlock_irq(&_pagemap_lru_lock);
__pagevec_release(&pvec); __pagevec_release(&pvec);
spin_lock(&pagemap_lru_lock); spin_lock_irq(&_pagemap_lru_lock);
} }
} }
spin_unlock(&pagemap_lru_lock); spin_unlock_irq(&_pagemap_lru_lock);
pagevec_release(&pvec); pagevec_release(&pvec);
mod_page_state(nr_active, -pgdeactivate); mod_page_state(nr_active, -pgdeactivate);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment