Commit 29f39430 authored by Sidhartha Kumar's avatar Sidhartha Kumar Committed by Andrew Morton

mm/hugetlb_cgroup: convert hugetlb_cgroup_migrate to folios

Cleans up intermediate page to folio conversion code in
hugetlb_cgroup_migrate() by changing its arguments from pages to folios.

Link: https://lkml.kernel.org/r/20221101223059.460937-5-sidhartha.kumar@oracle.comSigned-off-by: default avatarSidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Reviewed-by: default avatarMuchun Song <songmuchun@bytedance.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Bui Quang Minh <minhquangbui99@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Mina Almasry <almasrymina@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent de656ed3
...@@ -177,8 +177,8 @@ extern void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv, ...@@ -177,8 +177,8 @@ extern void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
bool region_del); bool region_del);
extern void hugetlb_cgroup_file_init(void) __init; extern void hugetlb_cgroup_file_init(void) __init;
extern void hugetlb_cgroup_migrate(struct page *oldhpage, extern void hugetlb_cgroup_migrate(struct folio *old_folio,
struct page *newhpage); struct folio *new_folio);
#else #else
static inline void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv, static inline void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
...@@ -286,8 +286,8 @@ static inline void hugetlb_cgroup_file_init(void) ...@@ -286,8 +286,8 @@ static inline void hugetlb_cgroup_file_init(void)
{ {
} }
static inline void hugetlb_cgroup_migrate(struct page *oldhpage, static inline void hugetlb_cgroup_migrate(struct folio *old_folio,
struct page *newhpage) struct folio *new_folio)
{ {
} }
......
...@@ -7325,7 +7325,7 @@ void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason) ...@@ -7325,7 +7325,7 @@ void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
{ {
struct hstate *h = page_hstate(oldpage); struct hstate *h = page_hstate(oldpage);
hugetlb_cgroup_migrate(oldpage, newpage); hugetlb_cgroup_migrate(page_folio(oldpage), page_folio(newpage));
set_page_owner_migrate_reason(newpage, reason); set_page_owner_migrate_reason(newpage, reason);
/* /*
......
...@@ -885,13 +885,11 @@ void __init hugetlb_cgroup_file_init(void) ...@@ -885,13 +885,11 @@ void __init hugetlb_cgroup_file_init(void)
* hugetlb_lock will make sure a parallel cgroup rmdir won't happen * hugetlb_lock will make sure a parallel cgroup rmdir won't happen
* when we migrate hugepages * when we migrate hugepages
*/ */
void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage) void hugetlb_cgroup_migrate(struct folio *old_folio, struct folio *new_folio)
{ {
struct hugetlb_cgroup *h_cg; struct hugetlb_cgroup *h_cg;
struct hugetlb_cgroup *h_cg_rsvd; struct hugetlb_cgroup *h_cg_rsvd;
struct hstate *h = page_hstate(oldhpage); struct hstate *h = folio_hstate(old_folio);
struct folio *old_folio = page_folio(oldhpage);
struct folio *new_folio = page_folio(newhpage);
if (hugetlb_cgroup_disabled()) if (hugetlb_cgroup_disabled())
return; return;
...@@ -905,7 +903,7 @@ void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage) ...@@ -905,7 +903,7 @@ void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
/* move the h_cg details to new cgroup */ /* move the h_cg details to new cgroup */
set_hugetlb_cgroup(new_folio, h_cg); set_hugetlb_cgroup(new_folio, h_cg);
set_hugetlb_cgroup_rsvd(new_folio, h_cg_rsvd); set_hugetlb_cgroup_rsvd(new_folio, h_cg_rsvd);
list_move(&newhpage->lru, &h->hugepage_activelist); list_move(&new_folio->lru, &h->hugepage_activelist);
spin_unlock_irq(&hugetlb_lock); spin_unlock_irq(&hugetlb_lock);
return; return;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment