Commit 66447fd0 authored by Chengming Zhou's avatar Chengming Zhou Committed by Andrew Morton

mm/zswap: cleanup zswap_load()

After the common decompress part goes to __zswap_load(), we can cleanup
the zswap_load() a little.

Link: https://lkml.kernel.org/r/20231213-zswap-dstmem-v5-3-9382162bbf05@bytedance.comSigned-off-by: default avatarChengming Zhou <zhouchengming@bytedance.com>
Reviewed-by: default avatarYosry Ahmed <yosryahmed@google.com>
Acked-by: Chis Li <chrisl@kernel.org> (Google)
Cc: Barry Song <21cnbao@gmail.com>
Cc: Dan Streetman <ddstreet@ieee.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Seth Jennings <sjenning@redhat.com>
Cc: Vitaly Wool <vitaly.wool@konsulko.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 32acba4c
......@@ -1746,7 +1746,6 @@ bool zswap_load(struct folio *folio)
struct zswap_tree *tree = zswap_trees[type];
struct zswap_entry *entry;
u8 *dst;
bool ret;
VM_WARN_ON_ONCE(!folio_test_locked(folio));
......@@ -1759,23 +1758,20 @@ bool zswap_load(struct folio *folio)
}
spin_unlock(&tree->lock);
if (!entry->length) {
if (entry->length)
__zswap_load(entry, page);
else {
dst = kmap_local_page(page);
zswap_fill_page(dst, entry->value);
kunmap_local(dst);
ret = true;
goto stats;
}
__zswap_load(entry, page);
ret = true;
stats:
count_vm_event(ZSWPIN);
if (entry->objcg)
count_objcg_event(entry->objcg, ZSWPIN);
spin_lock(&tree->lock);
if (ret && zswap_exclusive_loads_enabled) {
if (zswap_exclusive_loads_enabled) {
zswap_invalidate_entry(tree, entry);
folio_mark_dirty(folio);
} else if (entry->length) {
......@@ -1785,7 +1781,7 @@ bool zswap_load(struct folio *folio)
zswap_entry_put(tree, entry);
spin_unlock(&tree->lock);
return ret;
return true;
}
void zswap_invalidate(int type, pgoff_t offset)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment