Commit ff9d5ba2 authored by Domenico Cerasuolo's avatar Domenico Cerasuolo Committed by Andrew Morton

mm: zswap: simplify writeback function

zswap_writeback_entry() used to be a callback for the backends, which
don't know about struct zswap_entry.

Now that the only user is the generic zswap LRU reclaimer, it can be
simplified: pass the pinned zswap_entry directly, and consolidate the
refcount management in the shrink function.

Link: https://lkml.kernel.org/r/20230612093815.133504-7-cerasuolodomenico@gmail.comSigned-off-by: default avatarDomenico Cerasuolo <cerasuolodomenico@gmail.com>
Tested-by: default avatarYosry Ahmed <yosryahmed@google.com>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Cc: Dan Streetman <ddstreet@ieee.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Seth Jennings <sjenning@redhat.com>
Cc: Vitaly Wool <vitaly.wool@konsulko.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 35499e2b
......@@ -254,7 +254,8 @@ static bool zswap_has_pool;
pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \
zpool_get_type((p)->zpool))
static int zswap_writeback_entry(struct zpool *pool, unsigned long handle);
static int zswap_writeback_entry(struct zswap_entry *entry, struct zswap_header *zhdr,
struct zswap_tree *tree);
static int zswap_pool_get(struct zswap_pool *pool);
static void zswap_pool_put(struct zswap_pool *pool);
......@@ -635,7 +636,7 @@ static int zswap_reclaim_entry(struct zswap_pool *pool)
zswap_entry_get(entry);
spin_unlock(&tree->lock);
ret = zswap_writeback_entry(pool->zpool, entry->handle);
ret = zswap_writeback_entry(entry, zhdr, tree);
spin_lock(&tree->lock);
if (ret) {
......@@ -643,8 +644,17 @@ static int zswap_reclaim_entry(struct zswap_pool *pool)
spin_lock(&pool->lru_lock);
list_move(&entry->lru, &pool->lru);
spin_unlock(&pool->lru_lock);
goto put_unlock;
}
/* Check for invalidate() race */
if (entry != zswap_rb_search(&tree->rbroot, swpoffset))
goto put_unlock;
/* Drop base reference */
zswap_entry_put(tree, entry);
put_unlock:
/* Drop local reference */
zswap_entry_put(tree, entry);
unlock:
......@@ -1045,16 +1055,14 @@ static int zswap_get_swap_cache_page(swp_entry_t entry,
* the swap cache, the compressed version stored by zswap can be
* freed.
*/
static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
static int zswap_writeback_entry(struct zswap_entry *entry, struct zswap_header *zhdr,
struct zswap_tree *tree)
{
struct zswap_header *zhdr;
swp_entry_t swpentry;
struct zswap_tree *tree;
pgoff_t offset;
struct zswap_entry *entry;
swp_entry_t swpentry = zhdr->swpentry;
struct page *page;
struct scatterlist input, output;
struct crypto_acomp_ctx *acomp_ctx;
struct zpool *pool = entry->pool->zpool;
u8 *src, *tmp = NULL;
unsigned int dlen;
......@@ -1069,25 +1077,6 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
return -ENOMEM;
}
/* extract swpentry from data */
zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO);
swpentry = zhdr->swpentry; /* here */
tree = zswap_trees[swp_type(swpentry)];
offset = swp_offset(swpentry);
zpool_unmap_handle(pool, handle);
/* find and ref zswap entry */
spin_lock(&tree->lock);
entry = zswap_entry_find_get(&tree->rbroot, offset);
if (!entry) {
/* entry was invalidated */
spin_unlock(&tree->lock);
kfree(tmp);
return 0;
}
spin_unlock(&tree->lock);
BUG_ON(offset != entry->offset);
/* try to allocate swap cache page */
switch (zswap_get_swap_cache_page(swpentry, &page)) {
case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */
......@@ -1121,12 +1110,12 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
dlen = PAGE_SIZE;
zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO);
zhdr = zpool_map_handle(pool, entry->handle, ZPOOL_MM_RO);
src = (u8 *)zhdr + sizeof(struct zswap_header);
if (!zpool_can_sleep_mapped(pool)) {
memcpy(tmp, src, entry->length);
src = tmp;
zpool_unmap_handle(pool, handle);
zpool_unmap_handle(pool, entry->handle);
}
mutex_lock(acomp_ctx->mutex);
......@@ -1141,7 +1130,7 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
if (!zpool_can_sleep_mapped(pool))
kfree(tmp);
else
zpool_unmap_handle(pool, handle);
zpool_unmap_handle(pool, entry->handle);
BUG_ON(ret);
BUG_ON(dlen != PAGE_SIZE);
......@@ -1158,23 +1147,7 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
put_page(page);
zswap_written_back_pages++;
spin_lock(&tree->lock);
/* drop local reference */
zswap_entry_put(tree, entry);
/*
* There are two possible situations for entry here:
* (1) refcount is 1(normal case), entry is valid and on the tree
* (2) refcount is 0, entry is freed and not on the tree
* because invalidate happened during writeback
* search the tree and free the entry if find entry
*/
if (entry == zswap_rb_search(&tree->rbroot, offset))
zswap_entry_put(tree, entry);
spin_unlock(&tree->lock);
return ret;
fail:
if (!zpool_can_sleep_mapped(pool))
kfree(tmp);
......@@ -1183,13 +1156,8 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
* if we get here due to ZSWAP_SWAPCACHE_EXIST
* a load may be happening concurrently.
* it is safe and okay to not free the entry.
* if we free the entry in the following put
* it is also okay to return !0
*/
spin_lock(&tree->lock);
zswap_entry_put(tree, entry);
spin_unlock(&tree->lock);
return ret;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment