Commit b8791381 authored by Zi Yan's avatar Zi Yan Committed by Andrew Morton

mm: memcg: make memcg huge page split support any order split

It sets memcg information for the pages after the split.  A new parameter
new_order is added to tell the order of subpages in the new page, always 0
for now.  It prepares for upcoming changes to support split huge page to
any lower order.

Link: https://lkml.kernel.org/r/20240226205534.1603748-6-zi.yan@sent.comSigned-off-by: default avatarZi Yan <ziy@nvidia.com>
Acked-by: default avatarDavid Hildenbrand <david@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Luis Chamberlain <mcgrof@kernel.org>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Cc: Michal Koutny <mkoutny@suse.com>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Yang Shi <shy828301@gmail.com>
Cc: Yu Zhao <yuzhao@google.com>
Cc: Zach O'Keefe <zokeefe@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 9a581c12
...@@ -1163,7 +1163,7 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm, ...@@ -1163,7 +1163,7 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm,
rcu_read_unlock(); rcu_read_unlock();
} }
void split_page_memcg(struct page *head, int order); void split_page_memcg(struct page *head, int old_order, int new_order);
unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
gfp_t gfp_mask, gfp_t gfp_mask,
...@@ -1621,7 +1621,7 @@ void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx) ...@@ -1621,7 +1621,7 @@ void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
{ {
} }
static inline void split_page_memcg(struct page *head, int order) static inline void split_page_memcg(struct page *head, int old_order, int new_order)
{ {
} }
......
...@@ -2894,7 +2894,7 @@ static void __split_huge_page(struct page *page, struct list_head *list, ...@@ -2894,7 +2894,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
unsigned int nr = 1 << order; unsigned int nr = 1 << order;
/* complete memcg works before add pages to LRU */ /* complete memcg works before add pages to LRU */
split_page_memcg(head, order); split_page_memcg(head, order, 0);
if (folio_test_anon(folio) && folio_test_swapcache(folio)) { if (folio_test_anon(folio) && folio_test_swapcache(folio)) {
offset = swp_offset(folio->swap); offset = swp_offset(folio->swap);
......
...@@ -3606,23 +3606,24 @@ void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size) ...@@ -3606,23 +3606,24 @@ void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
/* /*
* Because page_memcg(head) is not set on tails, set it now. * Because page_memcg(head) is not set on tails, set it now.
*/ */
void split_page_memcg(struct page *head, int order) void split_page_memcg(struct page *head, int old_order, int new_order)
{ {
struct folio *folio = page_folio(head); struct folio *folio = page_folio(head);
struct mem_cgroup *memcg = folio_memcg(folio); struct mem_cgroup *memcg = folio_memcg(folio);
int i; int i;
unsigned int nr = 1 << order; unsigned int old_nr = 1 << old_order;
unsigned int new_nr = 1 << new_order;
if (mem_cgroup_disabled() || !memcg) if (mem_cgroup_disabled() || !memcg)
return; return;
for (i = 1; i < nr; i++) for (i = new_nr; i < old_nr; i += new_nr)
folio_page(folio, i)->memcg_data = folio->memcg_data; folio_page(folio, i)->memcg_data = folio->memcg_data;
if (folio_memcg_kmem(folio)) if (folio_memcg_kmem(folio))
obj_cgroup_get_many(__folio_objcg(folio), nr - 1); obj_cgroup_get_many(__folio_objcg(folio), old_nr / new_nr - 1);
else else
css_get_many(&memcg->css, nr - 1); css_get_many(&memcg->css, old_nr / new_nr - 1);
} }
#ifdef CONFIG_SWAP #ifdef CONFIG_SWAP
......
...@@ -2617,7 +2617,7 @@ void split_page(struct page *page, unsigned int order) ...@@ -2617,7 +2617,7 @@ void split_page(struct page *page, unsigned int order)
for (i = 1; i < (1 << order); i++) for (i = 1; i < (1 << order); i++)
set_page_refcounted(page + i); set_page_refcounted(page + i);
split_page_owner(page, order); split_page_owner(page, order);
split_page_memcg(page, order); split_page_memcg(page, order, 0);
} }
EXPORT_SYMBOL_GPL(split_page); EXPORT_SYMBOL_GPL(split_page);
...@@ -4802,7 +4802,7 @@ static void *make_alloc_exact(unsigned long addr, unsigned int order, ...@@ -4802,7 +4802,7 @@ static void *make_alloc_exact(unsigned long addr, unsigned int order,
struct page *last = page + nr; struct page *last = page + nr;
split_page_owner(page, order); split_page_owner(page, order);
split_page_memcg(page, order); split_page_memcg(page, order, 0);
while (page < --last) while (page < --last)
set_page_refcounted(last); set_page_refcounted(last);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment