Commit d85aecf2 authored by Miaohe Lin's avatar Miaohe Lin Committed by Linus Torvalds

hugetlb_cgroup: fix imbalanced css_get and css_put pair for shared mappings

The current implementation of hugetlb_cgroup for shared mappings could
have different behavior.  Consider the following two scenarios:

 1.Assume initial css reference count of hugetlb_cgroup is 1:
  1.1 Call hugetlb_reserve_pages with from = 1, to = 2. So css reference
      count is 2 associated with 1 file_region.
  1.2 Call hugetlb_reserve_pages with from = 2, to = 3. So css reference
      count is 3 associated with 2 file_region.
  1.3 coalesce_file_region will coalesce these two file_regions into
      one. So css reference count is 3 associated with 1 file_region
      now.

 2.Assume initial css reference count of hugetlb_cgroup is 1 again:
  2.1 Call hugetlb_reserve_pages with from = 1, to = 3. So css reference
      count is 2 associated with 1 file_region.

Therefore, we might have one file_region while holding one or more css
reference counts. This inconsistency could lead to imbalanced css_get()
and css_put() pair. If we do css_put one by one (i.g. hole punch case),
scenario 2 would put one more css reference. If we do css_put all
together (i.g. truncate case), scenario 1 will leak one css reference.

The imbalanced css_get() and css_put() pair would result in a non-zero
reference when we try to destroy the hugetlb cgroup. The hugetlb cgroup
directory is removed __but__ associated resource is not freed. This
might result in OOM or can not create a new hugetlb cgroup in a busy
workload ultimately.

In order to fix this, we have to make sure that one file_region must
hold exactly one css reference. So in coalesce_file_region case, we
should release one css reference before coalescence. Also only put css
reference when the entire file_region is removed.

The last thing to note is that the caller of region_add() will only hold
one reference to h_cg->css for the whole contiguous reservation region.
But this area might be scattered when there are already some
file_regions reside in it. As a result, many file_regions may share only
one h_cg->css reference. In order to ensure that one file_region must
hold exactly one css reference, we should do css_get() for each
file_region and release the reference held by caller when they are done.

[linmiaohe@huawei.com: fix imbalanced css_get and css_put pair for shared mappings]
  Link: https://lkml.kernel.org/r/20210316023002.53921-1-linmiaohe@huawei.com

Link: https://lkml.kernel.org/r/20210301120540.37076-1-linmiaohe@huawei.com
Fixes: 075a61d0 ("hugetlb_cgroup: add accounting for shared mappings")
Reported-by: kernel test robot <lkp@intel.com> (auto build test ERROR)
Signed-off-by: default avatarMiaohe Lin <linmiaohe@huawei.com>
Reviewed-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: Wanpeng Li <liwp.linux@gmail.com>
Cc: Mina Almasry <almasrymina@google.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 7acac4b3
...@@ -113,6 +113,11 @@ static inline bool hugetlb_cgroup_disabled(void) ...@@ -113,6 +113,11 @@ static inline bool hugetlb_cgroup_disabled(void)
return !cgroup_subsys_enabled(hugetlb_cgrp_subsys); return !cgroup_subsys_enabled(hugetlb_cgrp_subsys);
} }
static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
{
css_put(&h_cg->css);
}
extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
struct hugetlb_cgroup **ptr); struct hugetlb_cgroup **ptr);
extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages, extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
...@@ -138,7 +143,8 @@ extern void hugetlb_cgroup_uncharge_counter(struct resv_map *resv, ...@@ -138,7 +143,8 @@ extern void hugetlb_cgroup_uncharge_counter(struct resv_map *resv,
extern void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv, extern void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
struct file_region *rg, struct file_region *rg,
unsigned long nr_pages); unsigned long nr_pages,
bool region_del);
extern void hugetlb_cgroup_file_init(void) __init; extern void hugetlb_cgroup_file_init(void) __init;
extern void hugetlb_cgroup_migrate(struct page *oldhpage, extern void hugetlb_cgroup_migrate(struct page *oldhpage,
...@@ -147,7 +153,8 @@ extern void hugetlb_cgroup_migrate(struct page *oldhpage, ...@@ -147,7 +153,8 @@ extern void hugetlb_cgroup_migrate(struct page *oldhpage,
#else #else
static inline void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv, static inline void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
struct file_region *rg, struct file_region *rg,
unsigned long nr_pages) unsigned long nr_pages,
bool region_del)
{ {
} }
...@@ -185,6 +192,10 @@ static inline bool hugetlb_cgroup_disabled(void) ...@@ -185,6 +192,10 @@ static inline bool hugetlb_cgroup_disabled(void)
return true; return true;
} }
static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
{
}
static inline int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, static inline int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
struct hugetlb_cgroup **ptr) struct hugetlb_cgroup **ptr)
{ {
......
...@@ -280,6 +280,17 @@ static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg, ...@@ -280,6 +280,17 @@ static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg,
nrg->reservation_counter = nrg->reservation_counter =
&h_cg->rsvd_hugepage[hstate_index(h)]; &h_cg->rsvd_hugepage[hstate_index(h)];
nrg->css = &h_cg->css; nrg->css = &h_cg->css;
/*
* The caller will hold exactly one h_cg->css reference for the
* whole contiguous reservation region. But this area might be
* scattered when there are already some file_regions reside in
* it. As a result, many file_regions may share only one css
* reference. In order to ensure that one file_region must hold
* exactly one h_cg->css reference, we should do css_get for
* each file_region and leave the reference held by caller
* untouched.
*/
css_get(&h_cg->css);
if (!resv->pages_per_hpage) if (!resv->pages_per_hpage)
resv->pages_per_hpage = pages_per_huge_page(h); resv->pages_per_hpage = pages_per_huge_page(h);
/* pages_per_hpage should be the same for all entries in /* pages_per_hpage should be the same for all entries in
...@@ -293,6 +304,14 @@ static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg, ...@@ -293,6 +304,14 @@ static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg,
#endif #endif
} }
static void put_uncharge_info(struct file_region *rg)
{
#ifdef CONFIG_CGROUP_HUGETLB
if (rg->css)
css_put(rg->css);
#endif
}
static bool has_same_uncharge_info(struct file_region *rg, static bool has_same_uncharge_info(struct file_region *rg,
struct file_region *org) struct file_region *org)
{ {
...@@ -316,6 +335,7 @@ static void coalesce_file_region(struct resv_map *resv, struct file_region *rg) ...@@ -316,6 +335,7 @@ static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
prg->to = rg->to; prg->to = rg->to;
list_del(&rg->link); list_del(&rg->link);
put_uncharge_info(rg);
kfree(rg); kfree(rg);
rg = prg; rg = prg;
...@@ -327,6 +347,7 @@ static void coalesce_file_region(struct resv_map *resv, struct file_region *rg) ...@@ -327,6 +347,7 @@ static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
nrg->from = rg->from; nrg->from = rg->from;
list_del(&rg->link); list_del(&rg->link);
put_uncharge_info(rg);
kfree(rg); kfree(rg);
} }
} }
...@@ -662,7 +683,7 @@ static long region_del(struct resv_map *resv, long f, long t) ...@@ -662,7 +683,7 @@ static long region_del(struct resv_map *resv, long f, long t)
del += t - f; del += t - f;
hugetlb_cgroup_uncharge_file_region( hugetlb_cgroup_uncharge_file_region(
resv, rg, t - f); resv, rg, t - f, false);
/* New entry for end of split region */ /* New entry for end of split region */
nrg->from = t; nrg->from = t;
...@@ -683,7 +704,7 @@ static long region_del(struct resv_map *resv, long f, long t) ...@@ -683,7 +704,7 @@ static long region_del(struct resv_map *resv, long f, long t)
if (f <= rg->from && t >= rg->to) { /* Remove entire region */ if (f <= rg->from && t >= rg->to) { /* Remove entire region */
del += rg->to - rg->from; del += rg->to - rg->from;
hugetlb_cgroup_uncharge_file_region(resv, rg, hugetlb_cgroup_uncharge_file_region(resv, rg,
rg->to - rg->from); rg->to - rg->from, true);
list_del(&rg->link); list_del(&rg->link);
kfree(rg); kfree(rg);
continue; continue;
...@@ -691,13 +712,13 @@ static long region_del(struct resv_map *resv, long f, long t) ...@@ -691,13 +712,13 @@ static long region_del(struct resv_map *resv, long f, long t)
if (f <= rg->from) { /* Trim beginning of region */ if (f <= rg->from) { /* Trim beginning of region */
hugetlb_cgroup_uncharge_file_region(resv, rg, hugetlb_cgroup_uncharge_file_region(resv, rg,
t - rg->from); t - rg->from, false);
del += t - rg->from; del += t - rg->from;
rg->from = t; rg->from = t;
} else { /* Trim end of region */ } else { /* Trim end of region */
hugetlb_cgroup_uncharge_file_region(resv, rg, hugetlb_cgroup_uncharge_file_region(resv, rg,
rg->to - f); rg->to - f, false);
del += rg->to - f; del += rg->to - f;
rg->to = f; rg->to = f;
...@@ -5187,6 +5208,10 @@ bool hugetlb_reserve_pages(struct inode *inode, ...@@ -5187,6 +5208,10 @@ bool hugetlb_reserve_pages(struct inode *inode,
*/ */
long rsv_adjust; long rsv_adjust;
/*
* hugetlb_cgroup_uncharge_cgroup_rsvd() will put the
* reference to h_cg->css. See comment below for detail.
*/
hugetlb_cgroup_uncharge_cgroup_rsvd( hugetlb_cgroup_uncharge_cgroup_rsvd(
hstate_index(h), hstate_index(h),
(chg - add) * pages_per_huge_page(h), h_cg); (chg - add) * pages_per_huge_page(h), h_cg);
...@@ -5194,6 +5219,14 @@ bool hugetlb_reserve_pages(struct inode *inode, ...@@ -5194,6 +5219,14 @@ bool hugetlb_reserve_pages(struct inode *inode,
rsv_adjust = hugepage_subpool_put_pages(spool, rsv_adjust = hugepage_subpool_put_pages(spool,
chg - add); chg - add);
hugetlb_acct_memory(h, -rsv_adjust); hugetlb_acct_memory(h, -rsv_adjust);
} else if (h_cg) {
/*
* The file_regions will hold their own reference to
* h_cg->css. So we should release the reference held
* via hugetlb_cgroup_charge_cgroup_rsvd() when we are
* done.
*/
hugetlb_cgroup_put_rsvd_cgroup(h_cg);
} }
} }
return true; return true;
......
...@@ -391,7 +391,8 @@ void hugetlb_cgroup_uncharge_counter(struct resv_map *resv, unsigned long start, ...@@ -391,7 +391,8 @@ void hugetlb_cgroup_uncharge_counter(struct resv_map *resv, unsigned long start,
void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv, void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
struct file_region *rg, struct file_region *rg,
unsigned long nr_pages) unsigned long nr_pages,
bool region_del)
{ {
if (hugetlb_cgroup_disabled() || !resv || !rg || !nr_pages) if (hugetlb_cgroup_disabled() || !resv || !rg || !nr_pages)
return; return;
...@@ -400,6 +401,11 @@ void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv, ...@@ -400,6 +401,11 @@ void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
!resv->reservation_counter) { !resv->reservation_counter) {
page_counter_uncharge(rg->reservation_counter, page_counter_uncharge(rg->reservation_counter,
nr_pages * resv->pages_per_hpage); nr_pages * resv->pages_per_hpage);
/*
* Only do css_put(rg->css) when we delete the entire region
* because one file_region must hold exactly one css reference.
*/
if (region_del)
css_put(rg->css); css_put(rg->css);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment