Commit 568b567f authored by Chengming Zhou's avatar Chengming Zhou Committed by Andrew Morton

mm/zsmalloc: fix migrate_write_lock() when !CONFIG_COMPACTION

Patch series "mm/zsmalloc: fix and optimize objects/page migration".

This series is to fix and optimize the zsmalloc objects/page migration.


This patch (of 3):

migrate_write_lock() is a empty function when !CONFIG_COMPACTION, in which
case zs_compact() can be triggered from shrinker reclaim context.  (Maybe
it's better to rename it to zs_shrink()?)

And zspage map object users rely on this migrate_read_lock() so object
won't be migrated elsewhere.

Fix it by always implementing the migrate_write_lock() related functions.

Link: https://lkml.kernel.org/r/20240219-b4-szmalloc-migrate-v1-0-34cd49c6545b@bytedance.com
Link: https://lkml.kernel.org/r/20240219-b4-szmalloc-migrate-v1-1-34cd49c6545b@bytedance.comSigned-off-by: default avatarChengming Zhou <zhouchengming@bytedance.com>
Reviewed-by: default avatarSergey Senozhatsky <senozhatsky@chromium.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Yosry Ahmed <yosryahmed@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 75c40c25
...@@ -278,18 +278,15 @@ static bool ZsHugePage(struct zspage *zspage) ...@@ -278,18 +278,15 @@ static bool ZsHugePage(struct zspage *zspage)
static void migrate_lock_init(struct zspage *zspage); static void migrate_lock_init(struct zspage *zspage);
static void migrate_read_lock(struct zspage *zspage); static void migrate_read_lock(struct zspage *zspage);
static void migrate_read_unlock(struct zspage *zspage); static void migrate_read_unlock(struct zspage *zspage);
#ifdef CONFIG_COMPACTION
static void migrate_write_lock(struct zspage *zspage); static void migrate_write_lock(struct zspage *zspage);
static void migrate_write_lock_nested(struct zspage *zspage); static void migrate_write_lock_nested(struct zspage *zspage);
static void migrate_write_unlock(struct zspage *zspage); static void migrate_write_unlock(struct zspage *zspage);
#ifdef CONFIG_COMPACTION
static void kick_deferred_free(struct zs_pool *pool); static void kick_deferred_free(struct zs_pool *pool);
static void init_deferred_free(struct zs_pool *pool); static void init_deferred_free(struct zs_pool *pool);
static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage); static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage);
#else #else
static void migrate_write_lock(struct zspage *zspage) {}
static void migrate_write_lock_nested(struct zspage *zspage) {}
static void migrate_write_unlock(struct zspage *zspage) {}
static void kick_deferred_free(struct zs_pool *pool) {} static void kick_deferred_free(struct zs_pool *pool) {}
static void init_deferred_free(struct zs_pool *pool) {} static void init_deferred_free(struct zs_pool *pool) {}
static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {} static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {}
...@@ -1725,7 +1722,6 @@ static void migrate_read_unlock(struct zspage *zspage) __releases(&zspage->lock) ...@@ -1725,7 +1722,6 @@ static void migrate_read_unlock(struct zspage *zspage) __releases(&zspage->lock)
read_unlock(&zspage->lock); read_unlock(&zspage->lock);
} }
#ifdef CONFIG_COMPACTION
static void migrate_write_lock(struct zspage *zspage) static void migrate_write_lock(struct zspage *zspage)
{ {
write_lock(&zspage->lock); write_lock(&zspage->lock);
...@@ -1741,6 +1737,7 @@ static void migrate_write_unlock(struct zspage *zspage) ...@@ -1741,6 +1737,7 @@ static void migrate_write_unlock(struct zspage *zspage)
write_unlock(&zspage->lock); write_unlock(&zspage->lock);
} }
#ifdef CONFIG_COMPACTION
/* Number of isolated subpage for *page migration* in this zspage */ /* Number of isolated subpage for *page migration* in this zspage */
static void inc_zspage_isolation(struct zspage *zspage) static void inc_zspage_isolation(struct zspage *zspage)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment