Commit a2c43eed authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

mm: try_to_free_swap replaces remove_exclusive_swap_page

remove_exclusive_swap_page(): its problem is in living up to its name.

It doesn't matter if someone else has a reference to the page (raised
page_count); it doesn't matter if the page is mapped into userspace
(raised page_mapcount - though that hints it may be worth keeping the
swap): all that matters is that there be no more references to the swap
(and no writeback in progress).

swapoff (try_to_unuse) has been removing pages from swapcache for years,
with no concern for page count or page mapcount, and we used to have a
comment in lookup_swap_cache() recognizing that: if you go for a page of
swapcache, you'll get the right page, but it could have been removed from
swapcache by the time you get page lock.

So, give up asking for exclusivity: get rid of
remove_exclusive_swap_page(), and remove_exclusive_swap_page_ref() and
remove_exclusive_swap_page_count() which were spawned for the recent LRU
work: replace them by the simpler try_to_free_swap() which just checks
page_swapcount().

Similarly, remove the page_count limitation from free_swap_and_count(),
but assume that it's worth holding on to the swap if page is mapped and
swap nowhere near full.  Add a vm_swap_full() test in free_swap_cache()?
It would be consistent, but I think we probably have enough for now.
Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Robin Holt <holt@sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 7b1fe597
...@@ -305,8 +305,7 @@ extern sector_t map_swap_page(struct swap_info_struct *, pgoff_t); ...@@ -305,8 +305,7 @@ extern sector_t map_swap_page(struct swap_info_struct *, pgoff_t);
extern sector_t swapdev_block(int, pgoff_t); extern sector_t swapdev_block(int, pgoff_t);
extern struct swap_info_struct *get_swap_info_struct(unsigned); extern struct swap_info_struct *get_swap_info_struct(unsigned);
extern int reuse_swap_page(struct page *); extern int reuse_swap_page(struct page *);
extern int remove_exclusive_swap_page(struct page *); extern int try_to_free_swap(struct page *);
extern int remove_exclusive_swap_page_ref(struct page *);
struct backing_dev_info; struct backing_dev_info;
/* linux/mm/thrash.c */ /* linux/mm/thrash.c */
...@@ -388,12 +387,7 @@ static inline void delete_from_swap_cache(struct page *page) ...@@ -388,12 +387,7 @@ static inline void delete_from_swap_cache(struct page *page)
#define reuse_swap_page(page) (page_mapcount(page) == 1) #define reuse_swap_page(page) (page_mapcount(page) == 1)
static inline int remove_exclusive_swap_page(struct page *p) static inline int try_to_free_swap(struct page *page)
{
return 0;
}
static inline int remove_exclusive_swap_page_ref(struct page *page)
{ {
return 0; return 0;
} }
......
...@@ -2403,7 +2403,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2403,7 +2403,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
swap_free(entry); swap_free(entry);
if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
remove_exclusive_swap_page(page); try_to_free_swap(page);
unlock_page(page); unlock_page(page);
if (write_access) { if (write_access) {
......
...@@ -98,7 +98,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc) ...@@ -98,7 +98,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
struct bio *bio; struct bio *bio;
int ret = 0, rw = WRITE; int ret = 0, rw = WRITE;
if (remove_exclusive_swap_page(page)) { if (try_to_free_swap(page)) {
unlock_page(page); unlock_page(page);
goto out; goto out;
} }
......
...@@ -454,8 +454,7 @@ void pagevec_swap_free(struct pagevec *pvec) ...@@ -454,8 +454,7 @@ void pagevec_swap_free(struct pagevec *pvec)
struct page *page = pvec->pages[i]; struct page *page = pvec->pages[i];
if (PageSwapCache(page) && trylock_page(page)) { if (PageSwapCache(page) && trylock_page(page)) {
if (PageSwapCache(page)) try_to_free_swap(page);
remove_exclusive_swap_page_ref(page);
unlock_page(page); unlock_page(page);
} }
} }
......
...@@ -196,13 +196,13 @@ void delete_from_swap_cache(struct page *page) ...@@ -196,13 +196,13 @@ void delete_from_swap_cache(struct page *page)
* *
* Its ok to check for PageSwapCache without the page lock * Its ok to check for PageSwapCache without the page lock
* here because we are going to recheck again inside * here because we are going to recheck again inside
* exclusive_swap_page() _with_ the lock. * try_to_free_swap() _with_ the lock.
* - Marcelo * - Marcelo
*/ */
static inline void free_swap_cache(struct page *page) static inline void free_swap_cache(struct page *page)
{ {
if (PageSwapCache(page) && trylock_page(page)) { if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
remove_exclusive_swap_page(page); try_to_free_swap(page);
unlock_page(page); unlock_page(page);
} }
} }
......
...@@ -348,68 +348,23 @@ int reuse_swap_page(struct page *page) ...@@ -348,68 +348,23 @@ int reuse_swap_page(struct page *page)
} }
/* /*
* Work out if there are any other processes sharing this * If swap is getting full, or if there are no more mappings of this page,
* swap cache page. Free it if you can. Return success. * then try_to_free_swap is called to free its swap space.
*/ */
static int remove_exclusive_swap_page_count(struct page *page, int count) int try_to_free_swap(struct page *page)
{ {
int retval;
struct swap_info_struct * p;
swp_entry_t entry;
VM_BUG_ON(!PageLocked(page)); VM_BUG_ON(!PageLocked(page));
if (!PageSwapCache(page)) if (!PageSwapCache(page))
return 0; return 0;
if (PageWriteback(page)) if (PageWriteback(page))
return 0; return 0;
if (page_count(page) != count) /* us + cache + ptes */ if (page_swapcount(page))
return 0;
entry.val = page_private(page);
p = swap_info_get(entry);
if (!p)
return 0; return 0;
/* Is the only swap cache user the cache itself? */ delete_from_swap_cache(page);
retval = 0;
if (p->swap_map[swp_offset(entry)] == 1) {
/* Recheck the page count with the swapcache lock held.. */
spin_lock_irq(&swapper_space.tree_lock);
if ((page_count(page) == count) && !PageWriteback(page)) {
__delete_from_swap_cache(page);
SetPageDirty(page); SetPageDirty(page);
retval = 1; return 1;
}
spin_unlock_irq(&swapper_space.tree_lock);
}
spin_unlock(&swap_lock);
if (retval) {
swap_free(entry);
page_cache_release(page);
}
return retval;
}
/*
* Most of the time the page should have two references: one for the
* process and one for the swap cache.
*/
int remove_exclusive_swap_page(struct page *page)
{
return remove_exclusive_swap_page_count(page, 2);
}
/*
* The pageout code holds an extra reference to the page. That raises
* the reference count to test for to 2 for a page that is only in the
* swap cache plus 1 for each process that maps the page.
*/
int remove_exclusive_swap_page_ref(struct page *page)
{
return remove_exclusive_swap_page_count(page, 2 + page_mapcount(page));
} }
/* /*
...@@ -436,13 +391,12 @@ void free_swap_and_cache(swp_entry_t entry) ...@@ -436,13 +391,12 @@ void free_swap_and_cache(swp_entry_t entry)
spin_unlock(&swap_lock); spin_unlock(&swap_lock);
} }
if (page) { if (page) {
int one_user; /*
* Not mapped elsewhere, or swap space full? Free it!
one_user = (page_count(page) == 2); * Also recheck PageSwapCache now page is locked (above).
/* Only cache user (+us), or swap space full? Free it! */ */
/* Also recheck PageSwapCache after page is locked (above) */
if (PageSwapCache(page) && !PageWriteback(page) && if (PageSwapCache(page) && !PageWriteback(page) &&
(one_user || vm_swap_full())) { (!page_mapped(page) || vm_swap_full())) {
delete_from_swap_cache(page); delete_from_swap_cache(page);
SetPageDirty(page); SetPageDirty(page);
} }
......
...@@ -759,7 +759,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -759,7 +759,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
activate_locked: activate_locked:
/* Not a candidate for swapping, so reclaim swap space. */ /* Not a candidate for swapping, so reclaim swap space. */
if (PageSwapCache(page) && vm_swap_full()) if (PageSwapCache(page) && vm_swap_full())
remove_exclusive_swap_page_ref(page); try_to_free_swap(page);
VM_BUG_ON(PageActive(page)); VM_BUG_ON(PageActive(page));
SetPageActive(page); SetPageActive(page);
pgactivate++; pgactivate++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment