Commit be6c8982 authored by Zhou Guanghui's avatar Zhou Guanghui Committed by Linus Torvalds

mm/memcg: rename mem_cgroup_split_huge_fixup to split_page_memcg and add nr_pages argument

Rename mem_cgroup_split_huge_fixup to split_page_memcg and explicitly pass
in page number argument.

In this way, the interface name is more common and can be used by
potential users.  In addition, the complete info(memcg and flag) of the
memcg needs to be set to the tail pages.

Link: https://lkml.kernel.org/r/20210304074053.65527-2-zhouguanghui1@huawei.comSigned-off-by: default avatarZhou Guanghui <zhouguanghui1@huawei.com>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Reviewed-by: default avatarZi Yan <ziy@nvidia.com>
Reviewed-by: default avatarShakeel Butt <shakeelb@google.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Hanjun Guo <guohanjun@huawei.com>
Cc: Tianhong Ding <dingtianhong@huawei.com>
Cc: Weilong Chen <chenweilong@huawei.com>
Cc: Rui Xiang <rui.xiang@huawei.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 61bf318e
...@@ -1061,9 +1061,7 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm, ...@@ -1061,9 +1061,7 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm,
rcu_read_unlock(); rcu_read_unlock();
} }
#ifdef CONFIG_TRANSPARENT_HUGEPAGE void split_page_memcg(struct page *head, unsigned int nr);
void mem_cgroup_split_huge_fixup(struct page *head);
#endif
#else /* CONFIG_MEMCG */ #else /* CONFIG_MEMCG */
...@@ -1400,7 +1398,7 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, ...@@ -1400,7 +1398,7 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
return 0; return 0;
} }
static inline void mem_cgroup_split_huge_fixup(struct page *head) static inline void split_page_memcg(struct page *head, unsigned int nr)
{ {
} }
......
...@@ -2467,7 +2467,7 @@ static void __split_huge_page(struct page *page, struct list_head *list, ...@@ -2467,7 +2467,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
int i; int i;
/* complete memcg works before add pages to LRU */ /* complete memcg works before add pages to LRU */
mem_cgroup_split_huge_fixup(head); split_page_memcg(head, nr);
if (PageAnon(head) && PageSwapCache(head)) { if (PageAnon(head) && PageSwapCache(head)) {
swp_entry_t entry = { .val = page_private(head) }; swp_entry_t entry = { .val = page_private(head) };
......
...@@ -3287,24 +3287,21 @@ void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size) ...@@ -3287,24 +3287,21 @@ void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
#endif /* CONFIG_MEMCG_KMEM */ #endif /* CONFIG_MEMCG_KMEM */
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/* /*
* Because page_memcg(head) is not set on compound tails, set it now. * Because page_memcg(head) is not set on tails, set it now.
*/ */
void mem_cgroup_split_huge_fixup(struct page *head) void split_page_memcg(struct page *head, unsigned int nr)
{ {
struct mem_cgroup *memcg = page_memcg(head); struct mem_cgroup *memcg = page_memcg(head);
int i; int i;
if (mem_cgroup_disabled()) if (mem_cgroup_disabled() || !memcg)
return; return;
for (i = 1; i < HPAGE_PMD_NR; i++) { for (i = 1; i < nr; i++)
css_get(&memcg->css); head[i].memcg_data = head->memcg_data;
head[i].memcg_data = (unsigned long)memcg; css_get_many(&memcg->css, nr - 1);
}
} }
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#ifdef CONFIG_MEMCG_SWAP #ifdef CONFIG_MEMCG_SWAP
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment