Commit ffc3c8a6 authored by Kefeng Wang's avatar Kefeng Wang Committed by Andrew Morton

mm: memcontrol: remove page_memcg()

The page_memcg() only called by mod_memcg_page_state(), so squash it to
cleanup page_memcg().

Link: https://lkml.kernel.org/r/20240524014950.187805-1-wangkefeng.wang@huawei.comSigned-off-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 1e25501d
...@@ -443,11 +443,6 @@ static inline struct mem_cgroup *folio_memcg(struct folio *folio) ...@@ -443,11 +443,6 @@ static inline struct mem_cgroup *folio_memcg(struct folio *folio)
return __folio_memcg(folio); return __folio_memcg(folio);
} }
static inline struct mem_cgroup *page_memcg(struct page *page)
{
return folio_memcg(page_folio(page));
}
/** /**
* folio_memcg_rcu - Locklessly get the memory cgroup associated with a folio. * folio_memcg_rcu - Locklessly get the memory cgroup associated with a folio.
* @folio: Pointer to the folio. * @folio: Pointer to the folio.
...@@ -1014,7 +1009,7 @@ static inline void mod_memcg_page_state(struct page *page, ...@@ -1014,7 +1009,7 @@ static inline void mod_memcg_page_state(struct page *page,
return; return;
rcu_read_lock(); rcu_read_lock();
memcg = page_memcg(page); memcg = folio_memcg(page_folio(page));
if (memcg) if (memcg)
mod_memcg_state(memcg, idx, val); mod_memcg_state(memcg, idx, val);
rcu_read_unlock(); rcu_read_unlock();
...@@ -1133,11 +1128,6 @@ static inline struct mem_cgroup *folio_memcg(struct folio *folio) ...@@ -1133,11 +1128,6 @@ static inline struct mem_cgroup *folio_memcg(struct folio *folio)
return NULL; return NULL;
} }
static inline struct mem_cgroup *page_memcg(struct page *page)
{
return NULL;
}
static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio) static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
{ {
WARN_ON_ONCE(!rcu_read_lock_held()); WARN_ON_ONCE(!rcu_read_lock_held());
...@@ -1636,7 +1626,7 @@ static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec, ...@@ -1636,7 +1626,7 @@ static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec,
spin_unlock_irqrestore(&lruvec->lru_lock, flags); spin_unlock_irqrestore(&lruvec->lru_lock, flags);
} }
/* Test requires a stable page->memcg binding, see page_memcg() */ /* Test requires a stable folio->memcg binding, see folio_memcg() */
static inline bool folio_matches_lruvec(struct folio *folio, static inline bool folio_matches_lruvec(struct folio *folio,
struct lruvec *lruvec) struct lruvec *lruvec)
{ {
......
...@@ -3807,7 +3807,7 @@ void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, ...@@ -3807,7 +3807,7 @@ void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
#endif /* CONFIG_MEMCG_KMEM */ #endif /* CONFIG_MEMCG_KMEM */
/* /*
* Because page_memcg(head) is not set on tails, set it now. * Because folio_memcg(head) is not set on tails, set it now.
*/ */
void split_page_memcg(struct page *head, int old_order, int new_order) void split_page_memcg(struct page *head, int old_order, int new_order)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment