Commit 91b71e78 authored by Yosry Ahmed's avatar Yosry Ahmed Committed by Andrew Morton

mm: memcg: add NULL check to obj_cgroup_put()

9 out of 16 callers perform a NULL check before calling obj_cgroup_put(). 
Move the NULL check in the function, similar to mem_cgroup_put().  The
unlikely() NULL check in current_objcg_update() was left alone to avoid
dropping the unlikey() annotation as this a fast path.

Link: https://lkml.kernel.org/r/20240316015803.2777252-1-yosryahmed@google.comSigned-off-by: default avatarYosry Ahmed <yosryahmed@google.com>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Acked-by: default avatarRoman Gushchin <roman.gushchin@linux.dev>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 5b0a6700
......@@ -818,6 +818,7 @@ static inline void obj_cgroup_get_many(struct obj_cgroup *objcg,
static inline void obj_cgroup_put(struct obj_cgroup *objcg)
{
if (objcg)
percpu_ref_put(&objcg->refcnt);
}
......
......@@ -759,7 +759,6 @@ void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma)
rcu_in_progress += atomic_read(&c->call_rcu_ttrace_in_progress);
rcu_in_progress += atomic_read(&c->call_rcu_in_progress);
}
if (ma->objcg)
obj_cgroup_put(ma->objcg);
destroy_mem_alloc(ma, rcu_in_progress);
}
......@@ -776,7 +775,6 @@ void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma)
rcu_in_progress += atomic_read(&c->call_rcu_in_progress);
}
}
if (ma->objcg)
obj_cgroup_put(ma->objcg);
destroy_mem_alloc(ma, rcu_in_progress);
}
......
......@@ -2369,7 +2369,6 @@ static void drain_local_stock(struct work_struct *dummy)
clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
if (old)
obj_cgroup_put(old);
}
......@@ -3145,7 +3144,6 @@ static struct obj_cgroup *current_objcg_update(void)
if (old) {
old = (struct obj_cgroup *)
((unsigned long)old & ~CURRENT_OBJCG_UPDATE_FLAG);
if (old)
obj_cgroup_put(old);
old = NULL;
......@@ -3418,7 +3416,6 @@ void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
mod_objcg_mlstate(objcg, pgdat, idx, nr);
local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
if (old)
obj_cgroup_put(old);
}
......@@ -3546,7 +3543,6 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
}
local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
if (old)
obj_cgroup_put(old);
if (nr_pages)
......@@ -5468,7 +5464,6 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
{
int node;
if (memcg->orig_objcg)
obj_cgroup_put(memcg->orig_objcg);
for_each_node(node)
......@@ -6620,7 +6615,6 @@ static void mem_cgroup_exit(struct task_struct *task)
objcg = (struct obj_cgroup *)
((unsigned long)objcg & ~CURRENT_OBJCG_UPDATE_FLAG);
if (objcg)
obj_cgroup_put(objcg);
/*
......
......@@ -1618,7 +1618,6 @@ bool zswap_store(struct folio *folio)
freepage:
zswap_entry_cache_free(entry);
reject:
if (objcg)
obj_cgroup_put(objcg);
check_old:
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment