Commit 61dd3f24 authored by Kinsey Ho's avatar Kinsey Ho Committed by Andrew Morton

mm/mglru: add CONFIG_LRU_GEN_WALKS_MMU

Add CONFIG_LRU_GEN_WALKS_MMU such that if disabled, the code that
walks page tables to promote pages into the youngest generation will
not be built.

Also improves code readability by adding two helper functions
get_mm_state() and get_next_mm().

Link: https://lkml.kernel.org/r/20231227141205.2200125-3-kinseyho@google.comSigned-off-by: default avatarKinsey Ho <kinseyho@google.com>
Co-developed-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Tested-by: default avatarDonet Tom <donettom@linux.vnet.ibm.com>
Acked-by: default avatarYu Zhao <yuzhao@google.com>
Cc: kernel test robot <lkp@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 71ce1ab5
...@@ -330,7 +330,7 @@ struct mem_cgroup { ...@@ -330,7 +330,7 @@ struct mem_cgroup {
struct deferred_split deferred_split_queue; struct deferred_split deferred_split_queue;
#endif #endif
#ifdef CONFIG_LRU_GEN #ifdef CONFIG_LRU_GEN_WALKS_MMU
/* per-memcg mm_struct list */ /* per-memcg mm_struct list */
struct lru_gen_mm_list mm_list; struct lru_gen_mm_list mm_list;
#endif #endif
......
...@@ -958,7 +958,7 @@ struct mm_struct { ...@@ -958,7 +958,7 @@ struct mm_struct {
*/ */
unsigned long ksm_zero_pages; unsigned long ksm_zero_pages;
#endif /* CONFIG_KSM */ #endif /* CONFIG_KSM */
#ifdef CONFIG_LRU_GEN #ifdef CONFIG_LRU_GEN_WALKS_MMU
struct { struct {
/* this mm_struct is on lru_gen_mm_list */ /* this mm_struct is on lru_gen_mm_list */
struct list_head list; struct list_head list;
...@@ -973,7 +973,7 @@ struct mm_struct { ...@@ -973,7 +973,7 @@ struct mm_struct {
struct mem_cgroup *memcg; struct mem_cgroup *memcg;
#endif #endif
} lru_gen; } lru_gen;
#endif /* CONFIG_LRU_GEN */ #endif /* CONFIG_LRU_GEN_WALKS_MMU */
} __randomize_layout; } __randomize_layout;
/* /*
...@@ -1011,6 +1011,10 @@ struct lru_gen_mm_list { ...@@ -1011,6 +1011,10 @@ struct lru_gen_mm_list {
spinlock_t lock; spinlock_t lock;
}; };
#endif /* CONFIG_LRU_GEN */
#ifdef CONFIG_LRU_GEN_WALKS_MMU
void lru_gen_add_mm(struct mm_struct *mm); void lru_gen_add_mm(struct mm_struct *mm);
void lru_gen_del_mm(struct mm_struct *mm); void lru_gen_del_mm(struct mm_struct *mm);
#ifdef CONFIG_MEMCG #ifdef CONFIG_MEMCG
...@@ -1036,7 +1040,7 @@ static inline void lru_gen_use_mm(struct mm_struct *mm) ...@@ -1036,7 +1040,7 @@ static inline void lru_gen_use_mm(struct mm_struct *mm)
WRITE_ONCE(mm->lru_gen.bitmap, -1); WRITE_ONCE(mm->lru_gen.bitmap, -1);
} }
#else /* !CONFIG_LRU_GEN */ #else /* !CONFIG_LRU_GEN_WALKS_MMU */
static inline void lru_gen_add_mm(struct mm_struct *mm) static inline void lru_gen_add_mm(struct mm_struct *mm)
{ {
...@@ -1060,7 +1064,7 @@ static inline void lru_gen_use_mm(struct mm_struct *mm) ...@@ -1060,7 +1064,7 @@ static inline void lru_gen_use_mm(struct mm_struct *mm)
{ {
} }
#endif /* CONFIG_LRU_GEN */ #endif /* CONFIG_LRU_GEN_WALKS_MMU */
struct vma_iterator { struct vma_iterator {
struct ma_state mas; struct ma_state mas;
......
...@@ -640,9 +640,11 @@ struct lruvec { ...@@ -640,9 +640,11 @@ struct lruvec {
#ifdef CONFIG_LRU_GEN #ifdef CONFIG_LRU_GEN
/* evictable pages divided into generations */ /* evictable pages divided into generations */
struct lru_gen_folio lrugen; struct lru_gen_folio lrugen;
#ifdef CONFIG_LRU_GEN_WALKS_MMU
/* to concurrently iterate lru_gen_mm_list */ /* to concurrently iterate lru_gen_mm_list */
struct lru_gen_mm_state mm_state; struct lru_gen_mm_state mm_state;
#endif #endif
#endif /* CONFIG_LRU_GEN */
#ifdef CONFIG_MEMCG #ifdef CONFIG_MEMCG
struct pglist_data *pgdat; struct pglist_data *pgdat;
#endif #endif
......
...@@ -2946,7 +2946,7 @@ pid_t kernel_clone(struct kernel_clone_args *args) ...@@ -2946,7 +2946,7 @@ pid_t kernel_clone(struct kernel_clone_args *args)
get_task_struct(p); get_task_struct(p);
} }
if (IS_ENABLED(CONFIG_LRU_GEN) && !(clone_flags & CLONE_VM)) { if (IS_ENABLED(CONFIG_LRU_GEN_WALKS_MMU) && !(clone_flags & CLONE_VM)) {
/* lock the task to synchronize with memcg migration */ /* lock the task to synchronize with memcg migration */
task_lock(p); task_lock(p);
lru_gen_add_mm(p->mm); lru_gen_add_mm(p->mm);
......
...@@ -1274,6 +1274,10 @@ config LRU_GEN_STATS ...@@ -1274,6 +1274,10 @@ config LRU_GEN_STATS
from evicted generations for debugging purpose. from evicted generations for debugging purpose.
This option has a per-memcg and per-node memory overhead. This option has a per-memcg and per-node memory overhead.
config LRU_GEN_WALKS_MMU
def_bool y
depends on LRU_GEN && ARCH_HAS_HW_PTE_YOUNG
# } # }
config ARCH_SUPPORTS_PER_VMA_LOCK config ARCH_SUPPORTS_PER_VMA_LOCK
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment