Commit d0d96328 authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds

[PATCH] SwapMig: Switch error handling in migrate_pages to use -Exx

Use -Exxx instead of numeric return codes and cleanup the code in
migrate_pages() using -Exx error codes.

Consolidate successful migration handling
Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent d4984711
...@@ -606,10 +606,6 @@ int putback_lru_pages(struct list_head *l) ...@@ -606,10 +606,6 @@ int putback_lru_pages(struct list_head *l)
/* /*
* swapout a single page * swapout a single page
* page is locked upon entry, unlocked on exit * page is locked upon entry, unlocked on exit
*
* return codes:
* 0 = complete
* 1 = retry
*/ */
static int swap_page(struct page *page) static int swap_page(struct page *page)
{ {
...@@ -650,7 +646,7 @@ static int swap_page(struct page *page) ...@@ -650,7 +646,7 @@ static int swap_page(struct page *page)
unlock_page(page); unlock_page(page);
retry: retry:
return 1; return -EAGAIN;
} }
/* /*
* migrate_pages * migrate_pages
...@@ -669,6 +665,8 @@ static int swap_page(struct page *page) ...@@ -669,6 +665,8 @@ static int swap_page(struct page *page)
* is only swapping out pages and never touches the second * is only swapping out pages and never touches the second
* list. The direct migration patchset * list. The direct migration patchset
* extends this function to avoid the use of swap. * extends this function to avoid the use of swap.
*
* Return: Number of pages not migrated when "to" ran empty.
*/ */
int migrate_pages(struct list_head *from, struct list_head *to, int migrate_pages(struct list_head *from, struct list_head *to,
struct list_head *moved, struct list_head *failed) struct list_head *moved, struct list_head *failed)
...@@ -679,6 +677,7 @@ int migrate_pages(struct list_head *from, struct list_head *to, ...@@ -679,6 +677,7 @@ int migrate_pages(struct list_head *from, struct list_head *to,
struct page *page; struct page *page;
struct page *page2; struct page *page2;
int swapwrite = current->flags & PF_SWAPWRITE; int swapwrite = current->flags & PF_SWAPWRITE;
int rc;
if (!swapwrite) if (!swapwrite)
current->flags |= PF_SWAPWRITE; current->flags |= PF_SWAPWRITE;
...@@ -689,22 +688,23 @@ int migrate_pages(struct list_head *from, struct list_head *to, ...@@ -689,22 +688,23 @@ int migrate_pages(struct list_head *from, struct list_head *to,
list_for_each_entry_safe(page, page2, from, lru) { list_for_each_entry_safe(page, page2, from, lru) {
cond_resched(); cond_resched();
if (page_count(page) == 1) { rc = 0;
if (page_count(page) == 1)
/* page was freed from under us. So we are done. */ /* page was freed from under us. So we are done. */
list_move(&page->lru, moved); goto next;
continue;
}
/* /*
* Skip locked pages during the first two passes to give the * Skip locked pages during the first two passes to give the
* functions holding the lock time to release the page. Later we * functions holding the lock time to release the page. Later we
* use lock_page() to have a higher chance of acquiring the * use lock_page() to have a higher chance of acquiring the
* lock. * lock.
*/ */
rc = -EAGAIN;
if (pass > 2) if (pass > 2)
lock_page(page); lock_page(page);
else else
if (TestSetPageLocked(page)) if (TestSetPageLocked(page))
goto retry_later; goto next;
/* /*
* Only wait on writeback if we have already done a pass where * Only wait on writeback if we have already done a pass where
...@@ -713,18 +713,19 @@ int migrate_pages(struct list_head *from, struct list_head *to, ...@@ -713,18 +713,19 @@ int migrate_pages(struct list_head *from, struct list_head *to,
if (pass > 0) { if (pass > 0) {
wait_on_page_writeback(page); wait_on_page_writeback(page);
} else { } else {
if (PageWriteback(page)) { if (PageWriteback(page))
unlock_page(page); goto unlock_page;
goto retry_later;
}
} }
/*
* Anonymous pages must have swap cache references otherwise
* the information contained in the page maps cannot be
* preserved.
*/
if (PageAnon(page) && !PageSwapCache(page)) { if (PageAnon(page) && !PageSwapCache(page)) {
if (!add_to_swap(page, GFP_KERNEL)) { if (!add_to_swap(page, GFP_KERNEL)) {
unlock_page(page); rc = -ENOMEM;
list_move(&page->lru, failed); goto unlock_page;
nr_failed++;
continue;
} }
} }
...@@ -732,12 +733,23 @@ int migrate_pages(struct list_head *from, struct list_head *to, ...@@ -732,12 +733,23 @@ int migrate_pages(struct list_head *from, struct list_head *to,
* Page is properly locked and writeback is complete. * Page is properly locked and writeback is complete.
* Try to migrate the page. * Try to migrate the page.
*/ */
if (!swap_page(page)) { rc = swap_page(page);
goto next;
unlock_page:
unlock_page(page);
next:
if (rc == -EAGAIN) {
retry++;
} else if (rc) {
/* Permanent failure */
list_move(&page->lru, failed);
nr_failed++;
} else {
/* Success */
list_move(&page->lru, moved); list_move(&page->lru, moved);
continue;
} }
retry_later:
retry++;
} }
if (retry && pass++ < 10) if (retry && pass++ < 10)
goto redo; goto redo;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment