Commit 714e5bde authored by Qi Zheng's avatar Qi Zheng Committed by Andrew Morton

mbcache: dynamically allocate the mbcache shrinker

In preparation for implementing lockless slab shrink, use new APIs to
dynamically allocate the mbcache shrinker, so that it can be freed
asynchronously via RCU. Then it doesn't need to wait for RCU read-side
critical section when releasing the struct mb_cache.

Link: https://lkml.kernel.org/r/20230911094444.68966-30-zhengqi.arch@bytedance.comSigned-off-by: default avatarQi Zheng <zhengqi.arch@bytedance.com>
Reviewed-by: default avatarMuchun Song <songmuchun@bytedance.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Abhinav Kumar <quic_abhinavk@quicinc.com>
Cc: Alasdair Kergon <agk@redhat.com>
Cc: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
Cc: Andreas Dilger <adilger.kernel@dilger.ca>
Cc: Andreas Gruenbacher <agruenba@redhat.com>
Cc: Anna Schumaker <anna@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Bob Peterson <rpeterso@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Carlos Llamas <cmllamas@google.com>
Cc: Chandan Babu R <chandan.babu@oracle.com>
Cc: Chao Yu <chao@kernel.org>
Cc: Chris Mason <clm@fb.com>
Cc: Christian Koenig <christian.koenig@amd.com>
Cc: Chuck Lever <cel@kernel.org>
Cc: Coly Li <colyli@suse.de>
Cc: Dai Ngo <Dai.Ngo@oracle.com>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: "Darrick J. Wong" <djwong@kernel.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David Airlie <airlied@gmail.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Sterba <dsterba@suse.com>
Cc: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
Cc: Gao Xiang <hsiangkao@linux.alibaba.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Huang Rui <ray.huang@amd.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jaegeuk Kim <jaegeuk@kernel.org>
Cc: Jani Nikula <jani.nikula@linux.intel.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jason Wang <jasowang@redhat.com>
Cc: Jeff Layton <jlayton@kernel.org>
Cc: Jeffle Xu <jefflexu@linux.alibaba.com>
Cc: Joel Fernandes (Google) <joel@joelfernandes.org>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Josef Bacik <josef@toxicpanda.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Kent Overstreet <kent.overstreet@gmail.com>
Cc: Kirill Tkhai <tkhai@ya.ru>
Cc: Marijn Suijten <marijn.suijten@somainline.org>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Mike Snitzer <snitzer@kernel.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Nadav Amit <namit@vmware.com>
Cc: Neil Brown <neilb@suse.de>
Cc: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
Cc: Olga Kornievskaia <kolga@netapp.com>
Cc: Paul E. McKenney <paulmck@kernel.org>
Cc: Richard Weinberger <richard@nod.at>
Cc: Rob Clark <robdclark@gmail.com>
Cc: Rob Herring <robh@kernel.org>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Sean Paul <sean@poorly.run>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Song Liu <song@kernel.org>
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: Steven Price <steven.price@arm.com>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tomeu Vizoso <tomeu.vizoso@collabora.com>
Cc: Tom Talpey <tom@talpey.com>
Cc: Trond Myklebust <trond.myklebust@hammerspace.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Cc: Yue Hu <huyue2@coolpad.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 0fbb9969
...@@ -37,7 +37,7 @@ struct mb_cache { ...@@ -37,7 +37,7 @@ struct mb_cache {
struct list_head c_list; struct list_head c_list;
/* Number of entries in cache */ /* Number of entries in cache */
unsigned long c_entry_count; unsigned long c_entry_count;
struct shrinker c_shrink; struct shrinker *c_shrink;
/* Work for shrinking when the cache has too many entries */ /* Work for shrinking when the cache has too many entries */
struct work_struct c_shrink_work; struct work_struct c_shrink_work;
}; };
...@@ -293,8 +293,7 @@ EXPORT_SYMBOL(mb_cache_entry_touch); ...@@ -293,8 +293,7 @@ EXPORT_SYMBOL(mb_cache_entry_touch);
static unsigned long mb_cache_count(struct shrinker *shrink, static unsigned long mb_cache_count(struct shrinker *shrink,
struct shrink_control *sc) struct shrink_control *sc)
{ {
struct mb_cache *cache = container_of(shrink, struct mb_cache, struct mb_cache *cache = shrink->private_data;
c_shrink);
return cache->c_entry_count; return cache->c_entry_count;
} }
...@@ -333,8 +332,7 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache, ...@@ -333,8 +332,7 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache,
static unsigned long mb_cache_scan(struct shrinker *shrink, static unsigned long mb_cache_scan(struct shrinker *shrink,
struct shrink_control *sc) struct shrink_control *sc)
{ {
struct mb_cache *cache = container_of(shrink, struct mb_cache, struct mb_cache *cache = shrink->private_data;
c_shrink);
return mb_cache_shrink(cache, sc->nr_to_scan); return mb_cache_shrink(cache, sc->nr_to_scan);
} }
...@@ -377,15 +375,19 @@ struct mb_cache *mb_cache_create(int bucket_bits) ...@@ -377,15 +375,19 @@ struct mb_cache *mb_cache_create(int bucket_bits)
for (i = 0; i < bucket_count; i++) for (i = 0; i < bucket_count; i++)
INIT_HLIST_BL_HEAD(&cache->c_hash[i]); INIT_HLIST_BL_HEAD(&cache->c_hash[i]);
cache->c_shrink.count_objects = mb_cache_count; cache->c_shrink = shrinker_alloc(0, "mbcache-shrinker");
cache->c_shrink.scan_objects = mb_cache_scan; if (!cache->c_shrink) {
cache->c_shrink.seeks = DEFAULT_SEEKS;
if (register_shrinker(&cache->c_shrink, "mbcache-shrinker")) {
kfree(cache->c_hash); kfree(cache->c_hash);
kfree(cache); kfree(cache);
goto err_out; goto err_out;
} }
cache->c_shrink->count_objects = mb_cache_count;
cache->c_shrink->scan_objects = mb_cache_scan;
cache->c_shrink->private_data = cache;
shrinker_register(cache->c_shrink);
INIT_WORK(&cache->c_shrink_work, mb_cache_shrink_worker); INIT_WORK(&cache->c_shrink_work, mb_cache_shrink_worker);
return cache; return cache;
...@@ -406,7 +408,7 @@ void mb_cache_destroy(struct mb_cache *cache) ...@@ -406,7 +408,7 @@ void mb_cache_destroy(struct mb_cache *cache)
{ {
struct mb_cache_entry *entry, *next; struct mb_cache_entry *entry, *next;
unregister_shrinker(&cache->c_shrink); shrinker_free(cache->c_shrink);
/* /*
* We don't bother with any locking. Cache must not be used at this * We don't bother with any locking. Cache must not be used at this
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment