Commit 5feb041e authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] reduce lock contention in try_to_free_buffers()

The blockdev mapping's private_lock is fairly contended.  The buffer
LRU cache fixed a lot of that, but under page replacement load,
try_to_free_buffers is still showing up.

Moving the freeing of buffer_heads outside the lock reduces contention
in there by 30%.
parent 63a07153
...@@ -2472,7 +2472,8 @@ static inline int buffer_busy(struct buffer_head *bh) ...@@ -2472,7 +2472,8 @@ static inline int buffer_busy(struct buffer_head *bh)
(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock))); (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
} }
static /*inline*/ int drop_buffers(struct page *page) static inline int
drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
{ {
struct buffer_head *head = page_buffers(page); struct buffer_head *head = page_buffers(page);
struct buffer_head *bh; struct buffer_head *bh;
...@@ -2496,9 +2497,9 @@ static /*inline*/ int drop_buffers(struct page *page) ...@@ -2496,9 +2497,9 @@ static /*inline*/ int drop_buffers(struct page *page)
if (!list_empty(&bh->b_assoc_buffers)) if (!list_empty(&bh->b_assoc_buffers))
__remove_assoc_queue(bh); __remove_assoc_queue(bh);
free_buffer_head(bh);
bh = next; bh = next;
} while (bh != head); } while (bh != head);
*buffers_to_free = head;
__clear_page_buffers(page); __clear_page_buffers(page);
return 1; return 1;
failed: failed:
...@@ -2508,17 +2509,20 @@ static /*inline*/ int drop_buffers(struct page *page) ...@@ -2508,17 +2509,20 @@ static /*inline*/ int drop_buffers(struct page *page)
int try_to_free_buffers(struct page *page) int try_to_free_buffers(struct page *page)
{ {
struct address_space * const mapping = page->mapping; struct address_space * const mapping = page->mapping;
struct buffer_head *buffers_to_free = NULL;
int ret = 0; int ret = 0;
BUG_ON(!PageLocked(page)); BUG_ON(!PageLocked(page));
if (PageWriteback(page)) if (PageWriteback(page))
return 0; return 0;
if (mapping == NULL) /* swapped-in anon page */ if (mapping == NULL) { /* swapped-in anon page */
return drop_buffers(page); ret = drop_buffers(page, &buffers_to_free);
goto out;
}
spin_lock(&mapping->private_lock); spin_lock(&mapping->private_lock);
ret = drop_buffers(page); ret = drop_buffers(page, &buffers_to_free);
if (ret && !PageSwapCache(page)) { if (ret && !PageSwapCache(page)) {
/* /*
* If the filesystem writes its buffers by hand (eg ext3) * If the filesystem writes its buffers by hand (eg ext3)
...@@ -2531,6 +2535,16 @@ int try_to_free_buffers(struct page *page) ...@@ -2531,6 +2535,16 @@ int try_to_free_buffers(struct page *page)
ClearPageDirty(page); ClearPageDirty(page);
} }
spin_unlock(&mapping->private_lock); spin_unlock(&mapping->private_lock);
out:
if (buffers_to_free) {
struct buffer_head *bh = buffers_to_free;
do {
struct buffer_head *next = bh->b_this_page;
free_buffer_head(bh);
bh = next;
} while (bh != buffers_to_free);
}
return ret; return ret;
} }
EXPORT_SYMBOL(try_to_free_buffers); EXPORT_SYMBOL(try_to_free_buffers);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment