Commit 8e98702b authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] Replace the radix-tree rwlock with a spinlock

Spinlocks don't have a buslocked unlock and are faster.

On a P4, time to write a 4M file with 4M one-byte-write()s:

Before:
	0.72s user 5.47s system 99% cpu 6.227 total
	0.76s user 5.40s system 100% cpu 6.154 total
	0.77s user 5.38s system 100% cpu 6.146 total

After:
	1.09s user 4.92s system 99% cpu 6.014 total
	0.74s user 5.28s system 99% cpu 6.023 total
	1.03s user 4.97s system 100% cpu 5.991 total
parent df921d4d
...@@ -149,10 +149,10 @@ __sync_single_inode(struct inode *inode, struct writeback_control *wbc) ...@@ -149,10 +149,10 @@ __sync_single_inode(struct inode *inode, struct writeback_control *wbc)
* read speculatively by this cpu before &= ~I_DIRTY -- mikulas * read speculatively by this cpu before &= ~I_DIRTY -- mikulas
*/ */
write_lock(&mapping->page_lock); spin_lock(&mapping->page_lock);
if (wait || !wbc->for_kupdate || list_empty(&mapping->io_pages)) if (wait || !wbc->for_kupdate || list_empty(&mapping->io_pages))
list_splice_init(&mapping->dirty_pages, &mapping->io_pages); list_splice_init(&mapping->dirty_pages, &mapping->io_pages);
write_unlock(&mapping->page_lock); spin_unlock(&mapping->page_lock);
spin_unlock(&inode_lock); spin_unlock(&inode_lock);
do_writepages(mapping, wbc); do_writepages(mapping, wbc);
......
...@@ -181,7 +181,7 @@ void inode_init_once(struct inode *inode) ...@@ -181,7 +181,7 @@ void inode_init_once(struct inode *inode)
INIT_LIST_HEAD(&inode->i_devices); INIT_LIST_HEAD(&inode->i_devices);
sema_init(&inode->i_sem, 1); sema_init(&inode->i_sem, 1);
INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC); INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC);
rwlock_init(&inode->i_data.page_lock); spin_lock_init(&inode->i_data.page_lock);
init_MUTEX(&inode->i_data.i_shared_sem); init_MUTEX(&inode->i_data.i_shared_sem);
INIT_LIST_HEAD(&inode->i_data.private_list); INIT_LIST_HEAD(&inode->i_data.private_list);
spin_lock_init(&inode->i_data.private_lock); spin_lock_init(&inode->i_data.private_lock);
......
...@@ -627,7 +627,7 @@ mpage_writepages(struct address_space *mapping, ...@@ -627,7 +627,7 @@ mpage_writepages(struct address_space *mapping,
writepage = mapping->a_ops->writepage; writepage = mapping->a_ops->writepage;
pagevec_init(&pvec, 0); pagevec_init(&pvec, 0);
write_lock(&mapping->page_lock); spin_lock(&mapping->page_lock);
while (!list_empty(&mapping->io_pages) && !done) { while (!list_empty(&mapping->io_pages) && !done) {
struct page *page = list_entry(mapping->io_pages.prev, struct page *page = list_entry(mapping->io_pages.prev,
struct page, list); struct page, list);
...@@ -647,7 +647,7 @@ mpage_writepages(struct address_space *mapping, ...@@ -647,7 +647,7 @@ mpage_writepages(struct address_space *mapping,
list_add(&page->list, &mapping->locked_pages); list_add(&page->list, &mapping->locked_pages);
page_cache_get(page); page_cache_get(page);
write_unlock(&mapping->page_lock); spin_unlock(&mapping->page_lock);
/* /*
* At this point we hold neither mapping->page_lock nor * At this point we hold neither mapping->page_lock nor
...@@ -679,12 +679,12 @@ mpage_writepages(struct address_space *mapping, ...@@ -679,12 +679,12 @@ mpage_writepages(struct address_space *mapping,
unlock_page(page); unlock_page(page);
} }
page_cache_release(page); page_cache_release(page);
write_lock(&mapping->page_lock); spin_lock(&mapping->page_lock);
} }
/* /*
* Leave any remaining dirty pages on ->io_pages * Leave any remaining dirty pages on ->io_pages
*/ */
write_unlock(&mapping->page_lock); spin_unlock(&mapping->page_lock);
if (bio) if (bio)
mpage_bio_submit(WRITE, bio); mpage_bio_submit(WRITE, bio);
return ret; return ret;
......
...@@ -313,7 +313,7 @@ struct backing_dev_info; ...@@ -313,7 +313,7 @@ struct backing_dev_info;
struct address_space { struct address_space {
struct inode *host; /* owner: inode, block_device */ struct inode *host; /* owner: inode, block_device */
struct radix_tree_root page_tree; /* radix tree of all pages */ struct radix_tree_root page_tree; /* radix tree of all pages */
rwlock_t page_lock; /* and rwlock protecting it */ spinlock_t page_lock; /* and rwlock protecting it */
struct list_head clean_pages; /* list of clean pages */ struct list_head clean_pages; /* list of clean pages */
struct list_head dirty_pages; /* list of dirty pages */ struct list_head dirty_pages; /* list of dirty pages */
struct list_head locked_pages; /* list of locked pages */ struct list_head locked_pages; /* list of locked pages */
......
...@@ -99,9 +99,9 @@ void remove_from_page_cache(struct page *page) ...@@ -99,9 +99,9 @@ void remove_from_page_cache(struct page *page)
if (unlikely(!PageLocked(page))) if (unlikely(!PageLocked(page)))
PAGE_BUG(page); PAGE_BUG(page);
write_lock(&mapping->page_lock); spin_lock(&mapping->page_lock);
__remove_from_page_cache(page); __remove_from_page_cache(page);
write_unlock(&mapping->page_lock); spin_unlock(&mapping->page_lock);
} }
static inline int sync_page(struct page *page) static inline int sync_page(struct page *page)
...@@ -133,9 +133,9 @@ static int __filemap_fdatawrite(struct address_space *mapping, int sync_mode) ...@@ -133,9 +133,9 @@ static int __filemap_fdatawrite(struct address_space *mapping, int sync_mode)
if (mapping->backing_dev_info->memory_backed) if (mapping->backing_dev_info->memory_backed)
return 0; return 0;
write_lock(&mapping->page_lock); spin_lock(&mapping->page_lock);
list_splice_init(&mapping->dirty_pages, &mapping->io_pages); list_splice_init(&mapping->dirty_pages, &mapping->io_pages);
write_unlock(&mapping->page_lock); spin_unlock(&mapping->page_lock);
ret = do_writepages(mapping, &wbc); ret = do_writepages(mapping, &wbc);
return ret; return ret;
} }
...@@ -166,7 +166,7 @@ int filemap_fdatawait(struct address_space * mapping) ...@@ -166,7 +166,7 @@ int filemap_fdatawait(struct address_space * mapping)
restart: restart:
progress = 0; progress = 0;
write_lock(&mapping->page_lock); spin_lock(&mapping->page_lock);
while (!list_empty(&mapping->locked_pages)) { while (!list_empty(&mapping->locked_pages)) {
struct page *page; struct page *page;
...@@ -180,7 +180,7 @@ int filemap_fdatawait(struct address_space * mapping) ...@@ -180,7 +180,7 @@ int filemap_fdatawait(struct address_space * mapping)
if (!PageWriteback(page)) { if (!PageWriteback(page)) {
if (++progress > 32) { if (++progress > 32) {
if (need_resched()) { if (need_resched()) {
write_unlock(&mapping->page_lock); spin_unlock(&mapping->page_lock);
__cond_resched(); __cond_resched();
goto restart; goto restart;
} }
...@@ -190,16 +190,16 @@ int filemap_fdatawait(struct address_space * mapping) ...@@ -190,16 +190,16 @@ int filemap_fdatawait(struct address_space * mapping)
progress = 0; progress = 0;
page_cache_get(page); page_cache_get(page);
write_unlock(&mapping->page_lock); spin_unlock(&mapping->page_lock);
wait_on_page_writeback(page); wait_on_page_writeback(page);
if (PageError(page)) if (PageError(page))
ret = -EIO; ret = -EIO;
page_cache_release(page); page_cache_release(page);
write_lock(&mapping->page_lock); spin_lock(&mapping->page_lock);
} }
write_unlock(&mapping->page_lock); spin_unlock(&mapping->page_lock);
return ret; return ret;
} }
...@@ -227,7 +227,7 @@ int add_to_page_cache(struct page *page, struct address_space *mapping, ...@@ -227,7 +227,7 @@ int add_to_page_cache(struct page *page, struct address_space *mapping,
if (error == 0) { if (error == 0) {
page_cache_get(page); page_cache_get(page);
write_lock(&mapping->page_lock); spin_lock(&mapping->page_lock);
error = radix_tree_insert(&mapping->page_tree, offset, page); error = radix_tree_insert(&mapping->page_tree, offset, page);
if (!error) { if (!error) {
SetPageLocked(page); SetPageLocked(page);
...@@ -235,7 +235,7 @@ int add_to_page_cache(struct page *page, struct address_space *mapping, ...@@ -235,7 +235,7 @@ int add_to_page_cache(struct page *page, struct address_space *mapping,
} else { } else {
page_cache_release(page); page_cache_release(page);
} }
write_unlock(&mapping->page_lock); spin_unlock(&mapping->page_lock);
radix_tree_preload_end(); radix_tree_preload_end();
} }
return error; return error;
...@@ -364,11 +364,11 @@ struct page * find_get_page(struct address_space *mapping, unsigned long offset) ...@@ -364,11 +364,11 @@ struct page * find_get_page(struct address_space *mapping, unsigned long offset)
* We scan the hash list read-only. Addition to and removal from * We scan the hash list read-only. Addition to and removal from
* the hash-list needs a held write-lock. * the hash-list needs a held write-lock.
*/ */
read_lock(&mapping->page_lock); spin_lock(&mapping->page_lock);
page = radix_tree_lookup(&mapping->page_tree, offset); page = radix_tree_lookup(&mapping->page_tree, offset);
if (page) if (page)
page_cache_get(page); page_cache_get(page);
read_unlock(&mapping->page_lock); spin_unlock(&mapping->page_lock);
return page; return page;
} }
...@@ -379,11 +379,11 @@ struct page *find_trylock_page(struct address_space *mapping, unsigned long offs ...@@ -379,11 +379,11 @@ struct page *find_trylock_page(struct address_space *mapping, unsigned long offs
{ {
struct page *page; struct page *page;
read_lock(&mapping->page_lock); spin_lock(&mapping->page_lock);
page = radix_tree_lookup(&mapping->page_tree, offset); page = radix_tree_lookup(&mapping->page_tree, offset);
if (page && TestSetPageLocked(page)) if (page && TestSetPageLocked(page))
page = NULL; page = NULL;
read_unlock(&mapping->page_lock); spin_unlock(&mapping->page_lock);
return page; return page;
} }
...@@ -403,15 +403,15 @@ struct page *find_lock_page(struct address_space *mapping, ...@@ -403,15 +403,15 @@ struct page *find_lock_page(struct address_space *mapping,
{ {
struct page *page; struct page *page;
read_lock(&mapping->page_lock); spin_lock(&mapping->page_lock);
repeat: repeat:
page = radix_tree_lookup(&mapping->page_tree, offset); page = radix_tree_lookup(&mapping->page_tree, offset);
if (page) { if (page) {
page_cache_get(page); page_cache_get(page);
if (TestSetPageLocked(page)) { if (TestSetPageLocked(page)) {
read_unlock(&mapping->page_lock); spin_unlock(&mapping->page_lock);
lock_page(page); lock_page(page);
read_lock(&mapping->page_lock); spin_lock(&mapping->page_lock);
/* Has the page been truncated while we slept? */ /* Has the page been truncated while we slept? */
if (page->mapping != mapping || page->index != offset) { if (page->mapping != mapping || page->index != offset) {
...@@ -421,7 +421,7 @@ struct page *find_lock_page(struct address_space *mapping, ...@@ -421,7 +421,7 @@ struct page *find_lock_page(struct address_space *mapping,
} }
} }
} }
read_unlock(&mapping->page_lock); spin_unlock(&mapping->page_lock);
return page; return page;
} }
...@@ -491,12 +491,12 @@ unsigned int find_get_pages(struct address_space *mapping, pgoff_t start, ...@@ -491,12 +491,12 @@ unsigned int find_get_pages(struct address_space *mapping, pgoff_t start,
unsigned int i; unsigned int i;
unsigned int ret; unsigned int ret;
read_lock(&mapping->page_lock); spin_lock(&mapping->page_lock);
ret = radix_tree_gang_lookup(&mapping->page_tree, ret = radix_tree_gang_lookup(&mapping->page_tree,
(void **)pages, start, nr_pages); (void **)pages, start, nr_pages);
for (i = 0; i < ret; i++) for (i = 0; i < ret; i++)
page_cache_get(pages[i]); page_cache_get(pages[i]);
read_unlock(&mapping->page_lock); spin_unlock(&mapping->page_lock);
return ret; return ret;
} }
......
...@@ -425,12 +425,12 @@ int write_one_page(struct page *page, int wait) ...@@ -425,12 +425,12 @@ int write_one_page(struct page *page, int wait)
if (wait && PageWriteback(page)) if (wait && PageWriteback(page))
wait_on_page_writeback(page); wait_on_page_writeback(page);
write_lock(&mapping->page_lock); spin_lock(&mapping->page_lock);
list_del(&page->list); list_del(&page->list);
if (test_clear_page_dirty(page)) { if (test_clear_page_dirty(page)) {
list_add(&page->list, &mapping->locked_pages); list_add(&page->list, &mapping->locked_pages);
page_cache_get(page); page_cache_get(page);
write_unlock(&mapping->page_lock); spin_unlock(&mapping->page_lock);
ret = mapping->a_ops->writepage(page, &wbc); ret = mapping->a_ops->writepage(page, &wbc);
if (ret == 0 && wait) { if (ret == 0 && wait) {
wait_on_page_writeback(page); wait_on_page_writeback(page);
...@@ -440,7 +440,7 @@ int write_one_page(struct page *page, int wait) ...@@ -440,7 +440,7 @@ int write_one_page(struct page *page, int wait)
page_cache_release(page); page_cache_release(page);
} else { } else {
list_add(&page->list, &mapping->clean_pages); list_add(&page->list, &mapping->clean_pages);
write_unlock(&mapping->page_lock); spin_unlock(&mapping->page_lock);
unlock_page(page); unlock_page(page);
} }
return ret; return ret;
...@@ -513,14 +513,14 @@ int __set_page_dirty_buffers(struct page *page) ...@@ -513,14 +513,14 @@ int __set_page_dirty_buffers(struct page *page)
spin_unlock(&mapping->private_lock); spin_unlock(&mapping->private_lock);
if (!TestSetPageDirty(page)) { if (!TestSetPageDirty(page)) {
write_lock(&mapping->page_lock); spin_lock(&mapping->page_lock);
if (page->mapping) { /* Race with truncate? */ if (page->mapping) { /* Race with truncate? */
if (!mapping->backing_dev_info->memory_backed) if (!mapping->backing_dev_info->memory_backed)
inc_page_state(nr_dirty); inc_page_state(nr_dirty);
list_del(&page->list); list_del(&page->list);
list_add(&page->list, &mapping->dirty_pages); list_add(&page->list, &mapping->dirty_pages);
} }
write_unlock(&mapping->page_lock); spin_unlock(&mapping->page_lock);
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES); __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
} }
...@@ -550,7 +550,7 @@ int __set_page_dirty_nobuffers(struct page *page) ...@@ -550,7 +550,7 @@ int __set_page_dirty_nobuffers(struct page *page)
struct address_space *mapping = page->mapping; struct address_space *mapping = page->mapping;
if (mapping) { if (mapping) {
write_lock(&mapping->page_lock); spin_lock(&mapping->page_lock);
if (page->mapping) { /* Race with truncate? */ if (page->mapping) { /* Race with truncate? */
BUG_ON(page->mapping != mapping); BUG_ON(page->mapping != mapping);
if (!mapping->backing_dev_info->memory_backed) if (!mapping->backing_dev_info->memory_backed)
...@@ -558,7 +558,7 @@ int __set_page_dirty_nobuffers(struct page *page) ...@@ -558,7 +558,7 @@ int __set_page_dirty_nobuffers(struct page *page)
list_del(&page->list); list_del(&page->list);
list_add(&page->list, &mapping->dirty_pages); list_add(&page->list, &mapping->dirty_pages);
} }
write_unlock(&mapping->page_lock); spin_unlock(&mapping->page_lock);
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES); __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
} }
} }
......
...@@ -217,7 +217,7 @@ __do_page_cache_readahead(struct address_space *mapping, struct file *filp, ...@@ -217,7 +217,7 @@ __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
/* /*
* Preallocate as many pages as we will need. * Preallocate as many pages as we will need.
*/ */
read_lock(&mapping->page_lock); spin_lock(&mapping->page_lock);
for (page_idx = 0; page_idx < nr_to_read; page_idx++) { for (page_idx = 0; page_idx < nr_to_read; page_idx++) {
unsigned long page_offset = offset + page_idx; unsigned long page_offset = offset + page_idx;
...@@ -228,16 +228,16 @@ __do_page_cache_readahead(struct address_space *mapping, struct file *filp, ...@@ -228,16 +228,16 @@ __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
if (page) if (page)
continue; continue;
read_unlock(&mapping->page_lock); spin_unlock(&mapping->page_lock);
page = page_cache_alloc_cold(mapping); page = page_cache_alloc_cold(mapping);
read_lock(&mapping->page_lock); spin_lock(&mapping->page_lock);
if (!page) if (!page)
break; break;
page->index = page_offset; page->index = page_offset;
list_add(&page->list, &page_pool); list_add(&page->list, &page_pool);
ret++; ret++;
} }
read_unlock(&mapping->page_lock); spin_unlock(&mapping->page_lock);
/* /*
* Now start the IO. We ignore I/O errors - if the page is not * Now start the IO. We ignore I/O errors - if the page is not
......
...@@ -34,7 +34,7 @@ extern struct address_space_operations swap_aops; ...@@ -34,7 +34,7 @@ extern struct address_space_operations swap_aops;
struct address_space swapper_space = { struct address_space swapper_space = {
.page_tree = RADIX_TREE_INIT(GFP_ATOMIC), .page_tree = RADIX_TREE_INIT(GFP_ATOMIC),
.page_lock = RW_LOCK_UNLOCKED, .page_lock = SPIN_LOCK_UNLOCKED,
.clean_pages = LIST_HEAD_INIT(swapper_space.clean_pages), .clean_pages = LIST_HEAD_INIT(swapper_space.clean_pages),
.dirty_pages = LIST_HEAD_INIT(swapper_space.dirty_pages), .dirty_pages = LIST_HEAD_INIT(swapper_space.dirty_pages),
.io_pages = LIST_HEAD_INIT(swapper_space.io_pages), .io_pages = LIST_HEAD_INIT(swapper_space.io_pages),
...@@ -191,9 +191,9 @@ void delete_from_swap_cache(struct page *page) ...@@ -191,9 +191,9 @@ void delete_from_swap_cache(struct page *page)
entry.val = page->index; entry.val = page->index;
write_lock(&swapper_space.page_lock); spin_lock(&swapper_space.page_lock);
__delete_from_swap_cache(page); __delete_from_swap_cache(page);
write_unlock(&swapper_space.page_lock); spin_unlock(&swapper_space.page_lock);
swap_free(entry); swap_free(entry);
page_cache_release(page); page_cache_release(page);
...@@ -204,8 +204,8 @@ int move_to_swap_cache(struct page *page, swp_entry_t entry) ...@@ -204,8 +204,8 @@ int move_to_swap_cache(struct page *page, swp_entry_t entry)
struct address_space *mapping = page->mapping; struct address_space *mapping = page->mapping;
int err; int err;
write_lock(&swapper_space.page_lock); spin_lock(&swapper_space.page_lock);
write_lock(&mapping->page_lock); spin_lock(&mapping->page_lock);
err = radix_tree_insert(&swapper_space.page_tree, entry.val, page); err = radix_tree_insert(&swapper_space.page_tree, entry.val, page);
if (!err) { if (!err) {
...@@ -213,8 +213,8 @@ int move_to_swap_cache(struct page *page, swp_entry_t entry) ...@@ -213,8 +213,8 @@ int move_to_swap_cache(struct page *page, swp_entry_t entry)
___add_to_page_cache(page, &swapper_space, entry.val); ___add_to_page_cache(page, &swapper_space, entry.val);
} }
write_unlock(&mapping->page_lock); spin_unlock(&mapping->page_lock);
write_unlock(&swapper_space.page_lock); spin_unlock(&swapper_space.page_lock);
if (!err) { if (!err) {
if (!swap_duplicate(entry)) if (!swap_duplicate(entry))
...@@ -240,8 +240,8 @@ int move_from_swap_cache(struct page *page, unsigned long index, ...@@ -240,8 +240,8 @@ int move_from_swap_cache(struct page *page, unsigned long index,
entry.val = page->index; entry.val = page->index;
write_lock(&swapper_space.page_lock); spin_lock(&swapper_space.page_lock);
write_lock(&mapping->page_lock); spin_lock(&mapping->page_lock);
err = radix_tree_insert(&mapping->page_tree, index, page); err = radix_tree_insert(&mapping->page_tree, index, page);
if (!err) { if (!err) {
...@@ -249,8 +249,8 @@ int move_from_swap_cache(struct page *page, unsigned long index, ...@@ -249,8 +249,8 @@ int move_from_swap_cache(struct page *page, unsigned long index,
___add_to_page_cache(page, mapping, index); ___add_to_page_cache(page, mapping, index);
} }
write_unlock(&mapping->page_lock); spin_unlock(&mapping->page_lock);
write_unlock(&swapper_space.page_lock); spin_unlock(&swapper_space.page_lock);
if (!err) { if (!err) {
swap_free(entry); swap_free(entry);
......
...@@ -248,10 +248,10 @@ static int exclusive_swap_page(struct page *page) ...@@ -248,10 +248,10 @@ static int exclusive_swap_page(struct page *page)
/* Is the only swap cache user the cache itself? */ /* Is the only swap cache user the cache itself? */
if (p->swap_map[swp_offset(entry)] == 1) { if (p->swap_map[swp_offset(entry)] == 1) {
/* Recheck the page count with the pagecache lock held.. */ /* Recheck the page count with the pagecache lock held.. */
read_lock(&swapper_space.page_lock); spin_lock(&swapper_space.page_lock);
if (page_count(page) - !!PagePrivate(page) == 2) if (page_count(page) - !!PagePrivate(page) == 2)
retval = 1; retval = 1;
read_unlock(&swapper_space.page_lock); spin_unlock(&swapper_space.page_lock);
} }
swap_info_put(p); swap_info_put(p);
} }
...@@ -319,13 +319,13 @@ int remove_exclusive_swap_page(struct page *page) ...@@ -319,13 +319,13 @@ int remove_exclusive_swap_page(struct page *page)
retval = 0; retval = 0;
if (p->swap_map[swp_offset(entry)] == 1) { if (p->swap_map[swp_offset(entry)] == 1) {
/* Recheck the page count with the pagecache lock held.. */ /* Recheck the page count with the pagecache lock held.. */
write_lock(&swapper_space.page_lock); spin_lock(&swapper_space.page_lock);
if ((page_count(page) == 2) && !PageWriteback(page)) { if ((page_count(page) == 2) && !PageWriteback(page)) {
__delete_from_swap_cache(page); __delete_from_swap_cache(page);
SetPageDirty(page); SetPageDirty(page);
retval = 1; retval = 1;
} }
write_unlock(&swapper_space.page_lock); spin_unlock(&swapper_space.page_lock);
} }
swap_info_put(p); swap_info_put(p);
......
...@@ -73,13 +73,13 @@ invalidate_complete_page(struct address_space *mapping, struct page *page) ...@@ -73,13 +73,13 @@ invalidate_complete_page(struct address_space *mapping, struct page *page)
if (PagePrivate(page) && !try_to_release_page(page, 0)) if (PagePrivate(page) && !try_to_release_page(page, 0))
return 0; return 0;
write_lock(&mapping->page_lock); spin_lock(&mapping->page_lock);
if (PageDirty(page)) { if (PageDirty(page)) {
write_unlock(&mapping->page_lock); spin_unlock(&mapping->page_lock);
return 0; return 0;
} }
__remove_from_page_cache(page); __remove_from_page_cache(page);
write_unlock(&mapping->page_lock); spin_unlock(&mapping->page_lock);
ClearPageUptodate(page); ClearPageUptodate(page);
page_cache_release(page); /* pagecache ref */ page_cache_release(page); /* pagecache ref */
return 1; return 1;
......
...@@ -325,7 +325,7 @@ shrink_list(struct list_head *page_list, unsigned int gfp_mask, ...@@ -325,7 +325,7 @@ shrink_list(struct list_head *page_list, unsigned int gfp_mask,
goto keep_locked; goto keep_locked;
if (!may_write_to_queue(mapping->backing_dev_info)) if (!may_write_to_queue(mapping->backing_dev_info))
goto keep_locked; goto keep_locked;
write_lock(&mapping->page_lock); spin_lock(&mapping->page_lock);
if (test_clear_page_dirty(page)) { if (test_clear_page_dirty(page)) {
int res; int res;
struct writeback_control wbc = { struct writeback_control wbc = {
...@@ -336,7 +336,7 @@ shrink_list(struct list_head *page_list, unsigned int gfp_mask, ...@@ -336,7 +336,7 @@ shrink_list(struct list_head *page_list, unsigned int gfp_mask,
}; };
list_move(&page->list, &mapping->locked_pages); list_move(&page->list, &mapping->locked_pages);
write_unlock(&mapping->page_lock); spin_unlock(&mapping->page_lock);
SetPageReclaim(page); SetPageReclaim(page);
res = mapping->a_ops->writepage(page, &wbc); res = mapping->a_ops->writepage(page, &wbc);
...@@ -351,7 +351,7 @@ shrink_list(struct list_head *page_list, unsigned int gfp_mask, ...@@ -351,7 +351,7 @@ shrink_list(struct list_head *page_list, unsigned int gfp_mask,
} }
goto keep; goto keep;
} }
write_unlock(&mapping->page_lock); spin_unlock(&mapping->page_lock);
} }
/* /*
...@@ -385,7 +385,7 @@ shrink_list(struct list_head *page_list, unsigned int gfp_mask, ...@@ -385,7 +385,7 @@ shrink_list(struct list_head *page_list, unsigned int gfp_mask,
if (!mapping) if (!mapping)
goto keep_locked; /* truncate got there first */ goto keep_locked; /* truncate got there first */
write_lock(&mapping->page_lock); spin_lock(&mapping->page_lock);
/* /*
* The non-racy check for busy page. It is critical to check * The non-racy check for busy page. It is critical to check
...@@ -393,7 +393,7 @@ shrink_list(struct list_head *page_list, unsigned int gfp_mask, ...@@ -393,7 +393,7 @@ shrink_list(struct list_head *page_list, unsigned int gfp_mask,
* not in use by anybody. (pagecache + us == 2) * not in use by anybody. (pagecache + us == 2)
*/ */
if (page_count(page) != 2 || PageDirty(page)) { if (page_count(page) != 2 || PageDirty(page)) {
write_unlock(&mapping->page_lock); spin_unlock(&mapping->page_lock);
goto keep_locked; goto keep_locked;
} }
...@@ -401,7 +401,7 @@ shrink_list(struct list_head *page_list, unsigned int gfp_mask, ...@@ -401,7 +401,7 @@ shrink_list(struct list_head *page_list, unsigned int gfp_mask,
if (PageSwapCache(page)) { if (PageSwapCache(page)) {
swp_entry_t swap = { .val = page->index }; swp_entry_t swap = { .val = page->index };
__delete_from_swap_cache(page); __delete_from_swap_cache(page);
write_unlock(&mapping->page_lock); spin_unlock(&mapping->page_lock);
swap_free(swap); swap_free(swap);
__put_page(page); /* The pagecache ref */ __put_page(page); /* The pagecache ref */
goto free_it; goto free_it;
...@@ -409,7 +409,7 @@ shrink_list(struct list_head *page_list, unsigned int gfp_mask, ...@@ -409,7 +409,7 @@ shrink_list(struct list_head *page_list, unsigned int gfp_mask,
#endif /* CONFIG_SWAP */ #endif /* CONFIG_SWAP */
__remove_from_page_cache(page); __remove_from_page_cache(page);
write_unlock(&mapping->page_lock); spin_unlock(&mapping->page_lock);
__put_page(page); __put_page(page);
free_it: free_it:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment