Commit 67d13fe8 authored by Weijie Yang's avatar Weijie Yang Committed by Linus Torvalds

mm/zswap: bugfix: memory leak when invalidate and reclaim occur concurrently

Consider the following scenario:

thread 0: reclaim entry x (get refcount, but not call zswap_get_swap_cache_page)
thread 1: call zswap_frontswap_invalidate_page to invalidate entry x.
	finished, entry x and its zbud is not freed as its refcount != 0
	now, the swap_map[x] = 0
thread 0: now call zswap_get_swap_cache_page
	swapcache_prepare return -ENOENT because entry x is not used any more
	zswap_get_swap_cache_page return ZSWAP_SWAPCACHE_NOMEM
	zswap_writeback_entry do nothing except put refcount

Now, the memory of zswap_entry x and its zpage leak.

Modify:
 - check the refcount in fail path, free memory if it is not referenced.

 - use ZSWAP_SWAPCACHE_FAIL instead of ZSWAP_SWAPCACHE_NOMEM as the fail path
   can be not only caused by nomem but also by invalidate.
Signed-off-by: default avatarWeijie Yang <weijie.yang@samsung.com>
Reviewed-by: default avatarBob Liu <bob.liu@oracle.com>
Reviewed-by: default avatarMinchan Kim <minchan@kernel.org>
Acked-by: default avatarSeth Jennings <sjenning@linux.vnet.ibm.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 7a67d7ab
...@@ -387,7 +387,7 @@ static void zswap_free_entry(struct zswap_tree *tree, struct zswap_entry *entry) ...@@ -387,7 +387,7 @@ static void zswap_free_entry(struct zswap_tree *tree, struct zswap_entry *entry)
enum zswap_get_swap_ret { enum zswap_get_swap_ret {
ZSWAP_SWAPCACHE_NEW, ZSWAP_SWAPCACHE_NEW,
ZSWAP_SWAPCACHE_EXIST, ZSWAP_SWAPCACHE_EXIST,
ZSWAP_SWAPCACHE_NOMEM ZSWAP_SWAPCACHE_FAIL,
}; };
/* /*
...@@ -401,9 +401,10 @@ enum zswap_get_swap_ret { ...@@ -401,9 +401,10 @@ enum zswap_get_swap_ret {
* added to the swap cache, and returned in retpage. * added to the swap cache, and returned in retpage.
* *
* If success, the swap cache page is returned in retpage * If success, the swap cache page is returned in retpage
* Returns 0 if page was already in the swap cache, page is not locked * Returns ZSWAP_SWAPCACHE_EXIST if page was already in the swap cache
* Returns 1 if the new page needs to be populated, page is locked * Returns ZSWAP_SWAPCACHE_NEW if the new page needs to be populated,
* Returns <0 on error * the new page is added to swapcache and locked
* Returns ZSWAP_SWAPCACHE_FAIL on error
*/ */
static int zswap_get_swap_cache_page(swp_entry_t entry, static int zswap_get_swap_cache_page(swp_entry_t entry,
struct page **retpage) struct page **retpage)
...@@ -475,7 +476,7 @@ static int zswap_get_swap_cache_page(swp_entry_t entry, ...@@ -475,7 +476,7 @@ static int zswap_get_swap_cache_page(swp_entry_t entry,
if (new_page) if (new_page)
page_cache_release(new_page); page_cache_release(new_page);
if (!found_page) if (!found_page)
return ZSWAP_SWAPCACHE_NOMEM; return ZSWAP_SWAPCACHE_FAIL;
*retpage = found_page; *retpage = found_page;
return ZSWAP_SWAPCACHE_EXIST; return ZSWAP_SWAPCACHE_EXIST;
} }
...@@ -529,11 +530,11 @@ static int zswap_writeback_entry(struct zbud_pool *pool, unsigned long handle) ...@@ -529,11 +530,11 @@ static int zswap_writeback_entry(struct zbud_pool *pool, unsigned long handle)
/* try to allocate swap cache page */ /* try to allocate swap cache page */
switch (zswap_get_swap_cache_page(swpentry, &page)) { switch (zswap_get_swap_cache_page(swpentry, &page)) {
case ZSWAP_SWAPCACHE_NOMEM: /* no memory */ case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */
ret = -ENOMEM; ret = -ENOMEM;
goto fail; goto fail;
case ZSWAP_SWAPCACHE_EXIST: /* page is unlocked */ case ZSWAP_SWAPCACHE_EXIST:
/* page is already in the swap cache, ignore for now */ /* page is already in the swap cache, ignore for now */
page_cache_release(page); page_cache_release(page);
ret = -EEXIST; ret = -EEXIST;
...@@ -594,7 +595,12 @@ static int zswap_writeback_entry(struct zbud_pool *pool, unsigned long handle) ...@@ -594,7 +595,12 @@ static int zswap_writeback_entry(struct zbud_pool *pool, unsigned long handle)
fail: fail:
spin_lock(&tree->lock); spin_lock(&tree->lock);
zswap_entry_put(entry); refcount = zswap_entry_put(entry);
if (refcount <= 0) {
/* invalidate happened, consider writeback as success */
zswap_free_entry(tree, entry);
ret = 0;
}
spin_unlock(&tree->lock); spin_unlock(&tree->lock);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment