Commit 3ab86fb0 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] leave swapcache pages unlocked during writeout

Convert swap pages so that they are PageWriteback and !PageLocked while
under writeout, like all other block-backed pages.  (Network
filesystems aren't doing this yet - their pages are still locked while
under writeout)
parent 43967af3
......@@ -542,14 +542,6 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
*/
if (page_uptodate && !PageError(page))
SetPageUptodate(page);
/*
* swap page handling is a bit hacky. A standalone completion handler
* for swapout pages would fix that up. swapin can use this function.
*/
if (PageSwapCache(page) && PageWriteback(page))
end_page_writeback(page);
unlock_page(page);
return;
......@@ -559,8 +551,9 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
}
/*
* Completion handler for block_write_full_page() - pages which are unlocked
* during I/O, and which have PageWriteback cleared upon I/O completion.
* Completion handler for block_write_full_page() and for brw_page() - pages
* which are unlocked during I/O, and which have PageWriteback cleared
* upon I/O completion.
*/
static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
{
......@@ -2281,16 +2274,6 @@ int brw_kiovec(int rw, int nr, struct kiobuf *iovec[],
*
* FIXME: we need a swapper_inode->get_block function to remove
* some of the bmap kludges and interface ugliness here.
*
* NOTE: unlike file pages, swap pages are locked while under writeout.
* This is to throttle processes which reuse their swapcache pages while
* they are under writeout, and to ensure that there is no I/O going on
* when the page has been successfully locked. Functions such as
* free_swap_and_cache() need to guarantee that there is no I/O in progress
* because they will be freeing up swap blocks, which may then be reused.
*
* Swap pages are also marked PageWriteback when they are being written
* so that memory allocators will throttle on them.
*/
int brw_page(int rw, struct page *page,
struct block_device *bdev, sector_t b[], int size)
......@@ -2312,18 +2295,17 @@ int brw_page(int rw, struct page *page,
if (rw == WRITE) {
set_buffer_uptodate(bh);
clear_buffer_dirty(bh);
}
/*
* Swap pages are locked during writeout, so use
* buffer_async_read in strange ways.
*/
mark_buffer_async_write(bh);
} else {
mark_buffer_async_read(bh);
}
bh = bh->b_this_page;
} while (bh != head);
if (rw == WRITE) {
BUG_ON(PageWriteback(page));
SetPageWriteback(page);
unlock_page(page);
}
/* Stage 2: start the IO */
......
......@@ -426,14 +426,21 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s
swap_free(entry);
ptr[offset] = (swp_entry_t) {0};
while (inode && move_from_swap_cache(page, idx, inode->i_mapping)) {
while (inode && (PageWriteback(page) ||
move_from_swap_cache(page, idx, inode->i_mapping))) {
/*
* Yield for kswapd, and try again - but we're still
* holding the page lock - ugh! fix this up later on.
* Beware of inode being unlinked or truncated: just
* leave try_to_unuse to delete_from_swap_cache if so.
*
* AKPM: We now wait on writeback too. Note that it's
* the page lock which prevents new writeback from starting.
*/
spin_unlock(&info->lock);
if (PageWriteback(page))
wait_on_page_writeback(page);
else
yield();
spin_lock(&info->lock);
ptr = shmem_swp_entry(info, idx, 0);
......@@ -596,7 +603,12 @@ static struct page * shmem_getpage_locked(struct shmem_inode_info *info, struct
/* We have to do this with page locked to prevent races */
if (TestSetPageLocked(page))
goto wait_retry;
if (PageWriteback(page)) {
spin_unlock(&info->lock);
wait_on_page_writeback(page);
unlock_page(page);
goto repeat;
}
error = move_from_swap_cache(page, idx, mapping);
if (error < 0) {
unlock_page(page);
......@@ -651,7 +663,7 @@ static struct page * shmem_getpage_locked(struct shmem_inode_info *info, struct
return ERR_PTR(-ENOSPC);
wait_retry:
spin_unlock (&info->lock);
spin_unlock(&info->lock);
wait_on_page_locked(page);
page_cache_release(page);
goto repeat;
......
......@@ -131,10 +131,9 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry)
*/
void __delete_from_swap_cache(struct page *page)
{
if (!PageLocked(page))
BUG();
if (!PageSwapCache(page))
BUG();
BUG_ON(!PageLocked(page));
BUG_ON(!PageSwapCache(page));
BUG_ON(PageWriteback(page));
ClearPageDirty(page);
__remove_inode_page(page);
INC_CACHE_INFO(del_total);
......
......@@ -298,6 +298,8 @@ int remove_exclusive_swap_page(struct page *page)
BUG();
if (!PageSwapCache(page))
return 0;
if (PageWriteback(page))
return 0;
if (page_count(page) - !!PagePrivate(page) != 2) /* 2: us + cache */
return 0;
......@@ -311,7 +313,8 @@ int remove_exclusive_swap_page(struct page *page)
if (p->swap_map[swp_offset(entry)] == 1) {
/* Recheck the page count with the pagecache lock held.. */
write_lock(&swapper_space.page_lock);
if (page_count(page) - !!PagePrivate(page) == 2) {
if ((page_count(page) - !!page_has_buffers(page) == 2) &&
!PageWriteback(page)) {
__delete_from_swap_cache(page);
/*
* NOTE: if/when swap gets buffer/page coherency
......@@ -326,7 +329,6 @@ int remove_exclusive_swap_page(struct page *page)
swap_info_put(p);
if (retval) {
BUG_ON(PageWriteback(page));
if (page_has_buffers(page) && !try_to_free_buffers(page))
BUG();
swap_free(entry);
......@@ -352,9 +354,12 @@ void free_swap_and_cache(swp_entry_t entry)
swap_info_put(p);
}
if (page) {
int one_user;
page_cache_get(page);
one_user = (page_count(page) - !!page_has_buffers(page) == 2);
/* Only cache user (+us), or swap space full? Free it! */
if (page_count(page) - !!PagePrivate(page) == 2 || vm_swap_full()) {
if (!PageWriteback(page) && (one_user || vm_swap_full())) {
delete_from_swap_cache(page);
SetPageDirty(page);
}
......@@ -606,6 +611,7 @@ static int try_to_unuse(unsigned int type)
wait_on_page_locked(page);
wait_on_page_writeback(page);
lock_page(page);
wait_on_page_writeback(page);
/*
* Remove all references to entry, without blocking.
......@@ -688,8 +694,10 @@ static int try_to_unuse(unsigned int type)
rw_swap_page(WRITE, page);
lock_page(page);
}
if (PageSwapCache(page))
if (PageSwapCache(page)) {
wait_on_page_writeback(page);
delete_from_swap_cache(page);
}
/*
* So we could skip searching mms once swap count went
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment