Commit 7d7602b4 authored by Shakeel Butt's avatar Shakeel Butt Committed by Andrew Morton

memcg: move mem_cgroup_charge_statistics to v1 code

There are no callers of mem_cgroup_charge_statistics() in the v2 code
base, so move it to the v1 only code and rename it to
memcg1_charge_statistics().

Link: https://lkml.kernel.org/r/20240815050453.1298138-4-shakeel.butt@linux.devSigned-off-by: default avatarShakeel Butt <shakeel.butt@linux.dev>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: T.J. Mercier <tjmercier@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 41213dd0
...@@ -853,9 +853,9 @@ static int mem_cgroup_move_account(struct folio *folio, ...@@ -853,9 +853,9 @@ static int mem_cgroup_move_account(struct folio *folio,
nid = folio_nid(folio); nid = folio_nid(folio);
local_irq_disable(); local_irq_disable();
mem_cgroup_charge_statistics(to, nr_pages); memcg1_charge_statistics(to, nr_pages);
memcg1_check_events(to, nid); memcg1_check_events(to, nid);
mem_cgroup_charge_statistics(from, -nr_pages); memcg1_charge_statistics(from, -nr_pages);
memcg1_check_events(from, nid); memcg1_check_events(from, nid);
local_irq_enable(); local_irq_enable();
out: out:
...@@ -1439,6 +1439,19 @@ static void mem_cgroup_threshold(struct mem_cgroup *memcg) ...@@ -1439,6 +1439,19 @@ static void mem_cgroup_threshold(struct mem_cgroup *memcg)
} }
} }
void memcg1_charge_statistics(struct mem_cgroup *memcg, int nr_pages)
{
/* pagein of a big page is an event. So, ignore page size */
if (nr_pages > 0)
__count_memcg_events(memcg, PGPGIN, 1);
else {
__count_memcg_events(memcg, PGPGOUT, 1);
nr_pages = -nr_pages; /* for event */
}
__this_cpu_add(memcg->events_percpu->nr_page_events, nr_pages);
}
#define THRESHOLDS_EVENTS_TARGET 128 #define THRESHOLDS_EVENTS_TARGET 128
#define SOFTLIMIT_EVENTS_TARGET 1024 #define SOFTLIMIT_EVENTS_TARGET 1024
......
...@@ -7,7 +7,6 @@ ...@@ -7,7 +7,6 @@
/* Cgroup v1 and v2 common declarations */ /* Cgroup v1 and v2 common declarations */
void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, int nr_pages);
int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask, int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
unsigned int nr_pages); unsigned int nr_pages);
...@@ -116,6 +115,7 @@ bool memcg1_oom_prepare(struct mem_cgroup *memcg, bool *locked); ...@@ -116,6 +115,7 @@ bool memcg1_oom_prepare(struct mem_cgroup *memcg, bool *locked);
void memcg1_oom_finish(struct mem_cgroup *memcg, bool locked); void memcg1_oom_finish(struct mem_cgroup *memcg, bool locked);
void memcg1_oom_recover(struct mem_cgroup *memcg); void memcg1_oom_recover(struct mem_cgroup *memcg);
void memcg1_charge_statistics(struct mem_cgroup *memcg, int nr_pages);
void memcg1_check_events(struct mem_cgroup *memcg, int nid); void memcg1_check_events(struct mem_cgroup *memcg, int nid);
void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s); void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s);
...@@ -147,6 +147,7 @@ static inline bool memcg1_oom_prepare(struct mem_cgroup *memcg, bool *locked) { ...@@ -147,6 +147,7 @@ static inline bool memcg1_oom_prepare(struct mem_cgroup *memcg, bool *locked) {
static inline void memcg1_oom_finish(struct mem_cgroup *memcg, bool locked) {} static inline void memcg1_oom_finish(struct mem_cgroup *memcg, bool locked) {}
static inline void memcg1_oom_recover(struct mem_cgroup *memcg) {} static inline void memcg1_oom_recover(struct mem_cgroup *memcg) {}
static inline void memcg1_charge_statistics(struct mem_cgroup *memcg, int nr_pages) {}
static inline void memcg1_check_events(struct mem_cgroup *memcg, int nid) {} static inline void memcg1_check_events(struct mem_cgroup *memcg, int nid) {}
static inline void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) {} static inline void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) {}
......
...@@ -840,19 +840,6 @@ unsigned long memcg_events_local(struct mem_cgroup *memcg, int event) ...@@ -840,19 +840,6 @@ unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
return READ_ONCE(memcg->vmstats->events_local[i]); return READ_ONCE(memcg->vmstats->events_local[i]);
} }
void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, int nr_pages)
{
/* pagein of a big page is an event. So, ignore page size */
if (nr_pages > 0)
__count_memcg_events(memcg, PGPGIN, 1);
else {
__count_memcg_events(memcg, PGPGOUT, 1);
nr_pages = -nr_pages; /* for event */
}
__this_cpu_add(memcg->events_percpu->nr_page_events, nr_pages);
}
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
{ {
/* /*
...@@ -2366,7 +2353,7 @@ void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg) ...@@ -2366,7 +2353,7 @@ void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg)
commit_charge(folio, memcg); commit_charge(folio, memcg);
local_irq_disable(); local_irq_disable();
mem_cgroup_charge_statistics(memcg, folio_nr_pages(folio)); memcg1_charge_statistics(memcg, folio_nr_pages(folio));
memcg1_check_events(memcg, folio_nid(folio)); memcg1_check_events(memcg, folio_nid(folio));
local_irq_enable(); local_irq_enable();
} }
...@@ -4742,7 +4729,7 @@ void mem_cgroup_replace_folio(struct folio *old, struct folio *new) ...@@ -4742,7 +4729,7 @@ void mem_cgroup_replace_folio(struct folio *old, struct folio *new)
commit_charge(new, memcg); commit_charge(new, memcg);
local_irq_save(flags); local_irq_save(flags);
mem_cgroup_charge_statistics(memcg, nr_pages); memcg1_charge_statistics(memcg, nr_pages);
memcg1_check_events(memcg, folio_nid(new)); memcg1_check_events(memcg, folio_nid(new));
local_irq_restore(flags); local_irq_restore(flags);
} }
...@@ -4987,7 +4974,7 @@ void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry) ...@@ -4987,7 +4974,7 @@ void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
* only synchronisation we have for updating the per-CPU variables. * only synchronisation we have for updating the per-CPU variables.
*/ */
memcg_stats_lock(); memcg_stats_lock();
mem_cgroup_charge_statistics(memcg, -nr_entries); memcg1_charge_statistics(memcg, -nr_entries);
memcg_stats_unlock(); memcg_stats_unlock();
memcg1_check_events(memcg, folio_nid(folio)); memcg1_check_events(memcg, folio_nid(folio));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment