Commit e35606e4 authored by Chengming Zhou's avatar Chengming Zhou Committed by Andrew Morton

mm/zswap: global lru and shrinker shared by all zswap_pools fix

Commit bf9b7df2 ("mm/zswap: global lru and shrinker shared by all
zswap_pools") introduced a new lock to protect zswap_next_shrink, instead
of reusing zswap_pools_lock.

But the problem is that it's initialized only when zswap enabled, which
causes bug if zswap_memcg_offline_cleanup() called without zswap enabled.

Fix it by using DEFINE_SPINLOCK() to statically initialize them and define
them as multiple static variables to keep in consistent with the existing
global variables in zswap.

Link: https://lkml.kernel.org/r/20240305075345.1493214-1-chengming.zhou@linux.dev
Fixes: bf9b7df2 ("mm/zswap: global lru and shrinker shared by all zswap_pools")
Reported-by: default avatarkernel test robot <oliver.sang@intel.com>
Closes: https://lore.kernel.org/oe-lkp/202403051008.a8cf8a94-lkp@intel.comSigned-off-by: default avatarChengming Zhou <chengming.zhou@linux.dev>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Yosry Ahmed <yosryahmed@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 5aa598a7
...@@ -180,15 +180,16 @@ struct zswap_pool { ...@@ -180,15 +180,16 @@ struct zswap_pool {
char tfm_name[CRYPTO_MAX_ALG_NAME]; char tfm_name[CRYPTO_MAX_ALG_NAME];
}; };
static struct { /* Global LRU lists shared by all zswap pools. */
struct list_lru list_lru; static struct list_lru zswap_list_lru;
atomic_t nr_stored; /* counter of pages stored in all zswap pools. */
struct shrinker *shrinker; static atomic_t zswap_nr_stored = ATOMIC_INIT(0);
struct work_struct shrink_work;
struct mem_cgroup *next_shrink; /* The lock protects zswap_next_shrink updates. */
/* The lock protects next_shrink. */ static DEFINE_SPINLOCK(zswap_shrink_lock);
spinlock_t shrink_lock; static struct mem_cgroup *zswap_next_shrink;
} zswap; static struct work_struct zswap_shrink_work;
static struct shrinker *zswap_shrinker;
/* /*
* struct zswap_entry * struct zswap_entry
...@@ -798,10 +799,10 @@ void zswap_folio_swapin(struct folio *folio) ...@@ -798,10 +799,10 @@ void zswap_folio_swapin(struct folio *folio)
void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg) void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg)
{ {
/* lock out zswap shrinker walking memcg tree */ /* lock out zswap shrinker walking memcg tree */
spin_lock(&zswap.shrink_lock); spin_lock(&zswap_shrink_lock);
if (zswap.next_shrink == memcg) if (zswap_next_shrink == memcg)
zswap.next_shrink = mem_cgroup_iter(NULL, zswap.next_shrink, NULL); zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
spin_unlock(&zswap.shrink_lock); spin_unlock(&zswap_shrink_lock);
} }
/********************************* /*********************************
...@@ -900,9 +901,9 @@ static void zswap_entry_free(struct zswap_entry *entry) ...@@ -900,9 +901,9 @@ static void zswap_entry_free(struct zswap_entry *entry)
if (!entry->length) if (!entry->length)
atomic_dec(&zswap_same_filled_pages); atomic_dec(&zswap_same_filled_pages);
else { else {
zswap_lru_del(&zswap.list_lru, entry); zswap_lru_del(&zswap_list_lru, entry);
zpool_free(zswap_find_zpool(entry), entry->handle); zpool_free(zswap_find_zpool(entry), entry->handle);
atomic_dec(&zswap.nr_stored); atomic_dec(&zswap_nr_stored);
zswap_pool_put(entry->pool); zswap_pool_put(entry->pool);
} }
if (entry->objcg) { if (entry->objcg) {
...@@ -1274,7 +1275,7 @@ static unsigned long zswap_shrinker_scan(struct shrinker *shrinker, ...@@ -1274,7 +1275,7 @@ static unsigned long zswap_shrinker_scan(struct shrinker *shrinker,
nr_protected = nr_protected =
atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected); atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
lru_size = list_lru_shrink_count(&zswap.list_lru, sc); lru_size = list_lru_shrink_count(&zswap_list_lru, sc);
/* /*
* Abort if we are shrinking into the protected region. * Abort if we are shrinking into the protected region.
...@@ -1291,7 +1292,7 @@ static unsigned long zswap_shrinker_scan(struct shrinker *shrinker, ...@@ -1291,7 +1292,7 @@ static unsigned long zswap_shrinker_scan(struct shrinker *shrinker,
return SHRINK_STOP; return SHRINK_STOP;
} }
shrink_ret = list_lru_shrink_walk(&zswap.list_lru, sc, &shrink_memcg_cb, shrink_ret = list_lru_shrink_walk(&zswap_list_lru, sc, &shrink_memcg_cb,
&encountered_page_in_swapcache); &encountered_page_in_swapcache);
if (encountered_page_in_swapcache) if (encountered_page_in_swapcache)
...@@ -1317,7 +1318,7 @@ static unsigned long zswap_shrinker_count(struct shrinker *shrinker, ...@@ -1317,7 +1318,7 @@ static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
#else #else
/* use pool stats instead of memcg stats */ /* use pool stats instead of memcg stats */
nr_backing = zswap_pool_total_size >> PAGE_SHIFT; nr_backing = zswap_pool_total_size >> PAGE_SHIFT;
nr_stored = atomic_read(&zswap.nr_stored); nr_stored = atomic_read(&zswap_nr_stored);
#endif #endif
if (!nr_stored) if (!nr_stored)
...@@ -1325,7 +1326,7 @@ static unsigned long zswap_shrinker_count(struct shrinker *shrinker, ...@@ -1325,7 +1326,7 @@ static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
nr_protected = nr_protected =
atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected); atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
nr_freeable = list_lru_shrink_count(&zswap.list_lru, sc); nr_freeable = list_lru_shrink_count(&zswap_list_lru, sc);
/* /*
* Subtract the lru size by an estimate of the number of pages * Subtract the lru size by an estimate of the number of pages
* that should be protected. * that should be protected.
...@@ -1374,7 +1375,7 @@ static int shrink_memcg(struct mem_cgroup *memcg) ...@@ -1374,7 +1375,7 @@ static int shrink_memcg(struct mem_cgroup *memcg)
for_each_node_state(nid, N_NORMAL_MEMORY) { for_each_node_state(nid, N_NORMAL_MEMORY) {
unsigned long nr_to_walk = 1; unsigned long nr_to_walk = 1;
shrunk += list_lru_walk_one(&zswap.list_lru, nid, memcg, shrunk += list_lru_walk_one(&zswap_list_lru, nid, memcg,
&shrink_memcg_cb, NULL, &nr_to_walk); &shrink_memcg_cb, NULL, &nr_to_walk);
} }
return shrunk ? 0 : -EAGAIN; return shrunk ? 0 : -EAGAIN;
...@@ -1387,9 +1388,9 @@ static void shrink_worker(struct work_struct *w) ...@@ -1387,9 +1388,9 @@ static void shrink_worker(struct work_struct *w)
/* global reclaim will select cgroup in a round-robin fashion. */ /* global reclaim will select cgroup in a round-robin fashion. */
do { do {
spin_lock(&zswap.shrink_lock); spin_lock(&zswap_shrink_lock);
zswap.next_shrink = mem_cgroup_iter(NULL, zswap.next_shrink, NULL); zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
memcg = zswap.next_shrink; memcg = zswap_next_shrink;
/* /*
* We need to retry if we have gone through a full round trip, or if we * We need to retry if we have gone through a full round trip, or if we
...@@ -1403,7 +1404,7 @@ static void shrink_worker(struct work_struct *w) ...@@ -1403,7 +1404,7 @@ static void shrink_worker(struct work_struct *w)
* memcg is not killed when we are reclaiming. * memcg is not killed when we are reclaiming.
*/ */
if (!memcg) { if (!memcg) {
spin_unlock(&zswap.shrink_lock); spin_unlock(&zswap_shrink_lock);
if (++failures == MAX_RECLAIM_RETRIES) if (++failures == MAX_RECLAIM_RETRIES)
break; break;
...@@ -1413,15 +1414,15 @@ static void shrink_worker(struct work_struct *w) ...@@ -1413,15 +1414,15 @@ static void shrink_worker(struct work_struct *w)
if (!mem_cgroup_tryget_online(memcg)) { if (!mem_cgroup_tryget_online(memcg)) {
/* drop the reference from mem_cgroup_iter() */ /* drop the reference from mem_cgroup_iter() */
mem_cgroup_iter_break(NULL, memcg); mem_cgroup_iter_break(NULL, memcg);
zswap.next_shrink = NULL; zswap_next_shrink = NULL;
spin_unlock(&zswap.shrink_lock); spin_unlock(&zswap_shrink_lock);
if (++failures == MAX_RECLAIM_RETRIES) if (++failures == MAX_RECLAIM_RETRIES)
break; break;
goto resched; goto resched;
} }
spin_unlock(&zswap.shrink_lock); spin_unlock(&zswap_shrink_lock);
ret = shrink_memcg(memcg); ret = shrink_memcg(memcg);
/* drop the extra reference */ /* drop the extra reference */
...@@ -1542,7 +1543,7 @@ bool zswap_store(struct folio *folio) ...@@ -1542,7 +1543,7 @@ bool zswap_store(struct folio *folio)
if (objcg) { if (objcg) {
memcg = get_mem_cgroup_from_objcg(objcg); memcg = get_mem_cgroup_from_objcg(objcg);
if (memcg_list_lru_alloc(memcg, &zswap.list_lru, GFP_KERNEL)) { if (memcg_list_lru_alloc(memcg, &zswap_list_lru, GFP_KERNEL)) {
mem_cgroup_put(memcg); mem_cgroup_put(memcg);
goto put_pool; goto put_pool;
} }
...@@ -1573,8 +1574,8 @@ bool zswap_store(struct folio *folio) ...@@ -1573,8 +1574,8 @@ bool zswap_store(struct folio *folio)
} }
if (entry->length) { if (entry->length) {
INIT_LIST_HEAD(&entry->lru); INIT_LIST_HEAD(&entry->lru);
zswap_lru_add(&zswap.list_lru, entry); zswap_lru_add(&zswap_list_lru, entry);
atomic_inc(&zswap.nr_stored); atomic_inc(&zswap_nr_stored);
} }
spin_unlock(&tree->lock); spin_unlock(&tree->lock);
...@@ -1606,7 +1607,7 @@ bool zswap_store(struct folio *folio) ...@@ -1606,7 +1607,7 @@ bool zswap_store(struct folio *folio)
return false; return false;
shrink: shrink:
queue_work(shrink_wq, &zswap.shrink_work); queue_work(shrink_wq, &zswap_shrink_work);
goto reject; goto reject;
} }
...@@ -1773,16 +1774,14 @@ static int zswap_setup(void) ...@@ -1773,16 +1774,14 @@ static int zswap_setup(void)
if (!shrink_wq) if (!shrink_wq)
goto shrink_wq_fail; goto shrink_wq_fail;
zswap.shrinker = zswap_alloc_shrinker(); zswap_shrinker = zswap_alloc_shrinker();
if (!zswap.shrinker) if (!zswap_shrinker)
goto shrinker_fail; goto shrinker_fail;
if (list_lru_init_memcg(&zswap.list_lru, zswap.shrinker)) if (list_lru_init_memcg(&zswap_list_lru, zswap_shrinker))
goto lru_fail; goto lru_fail;
shrinker_register(zswap.shrinker); shrinker_register(zswap_shrinker);
INIT_WORK(&zswap.shrink_work, shrink_worker); INIT_WORK(&zswap_shrink_work, shrink_worker);
atomic_set(&zswap.nr_stored, 0);
spin_lock_init(&zswap.shrink_lock);
pool = __zswap_pool_create_fallback(); pool = __zswap_pool_create_fallback();
if (pool) { if (pool) {
...@@ -1801,7 +1800,7 @@ static int zswap_setup(void) ...@@ -1801,7 +1800,7 @@ static int zswap_setup(void)
return 0; return 0;
lru_fail: lru_fail:
shrinker_free(zswap.shrinker); shrinker_free(zswap_shrinker);
shrinker_fail: shrinker_fail:
destroy_workqueue(shrink_wq); destroy_workqueue(shrink_wq);
shrink_wq_fail: shrink_wq_fail:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment