Commit 91b71e78 authored by Yosry Ahmed's avatar Yosry Ahmed Committed by Andrew Morton

mm: memcg: add NULL check to obj_cgroup_put()

9 out of 16 callers perform a NULL check before calling obj_cgroup_put(). 
Move the NULL check in the function, similar to mem_cgroup_put().  The
unlikely() NULL check in current_objcg_update() was left alone to avoid
dropping the unlikey() annotation as this a fast path.

Link: https://lkml.kernel.org/r/20240316015803.2777252-1-yosryahmed@google.comSigned-off-by: default avatarYosry Ahmed <yosryahmed@google.com>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Acked-by: default avatarRoman Gushchin <roman.gushchin@linux.dev>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 5b0a6700
......@@ -818,7 +818,8 @@ static inline void obj_cgroup_get_many(struct obj_cgroup *objcg,
static inline void obj_cgroup_put(struct obj_cgroup *objcg)
{
percpu_ref_put(&objcg->refcnt);
if (objcg)
percpu_ref_put(&objcg->refcnt);
}
static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
......
......@@ -759,8 +759,7 @@ void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma)
rcu_in_progress += atomic_read(&c->call_rcu_ttrace_in_progress);
rcu_in_progress += atomic_read(&c->call_rcu_in_progress);
}
if (ma->objcg)
obj_cgroup_put(ma->objcg);
obj_cgroup_put(ma->objcg);
destroy_mem_alloc(ma, rcu_in_progress);
}
if (ma->caches) {
......@@ -776,8 +775,7 @@ void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma)
rcu_in_progress += atomic_read(&c->call_rcu_in_progress);
}
}
if (ma->objcg)
obj_cgroup_put(ma->objcg);
obj_cgroup_put(ma->objcg);
destroy_mem_alloc(ma, rcu_in_progress);
}
}
......
......@@ -2369,8 +2369,7 @@ static void drain_local_stock(struct work_struct *dummy)
clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
if (old)
obj_cgroup_put(old);
obj_cgroup_put(old);
}
/*
......@@ -3145,8 +3144,7 @@ static struct obj_cgroup *current_objcg_update(void)
if (old) {
old = (struct obj_cgroup *)
((unsigned long)old & ~CURRENT_OBJCG_UPDATE_FLAG);
if (old)
obj_cgroup_put(old);
obj_cgroup_put(old);
old = NULL;
}
......@@ -3418,8 +3416,7 @@ void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
mod_objcg_mlstate(objcg, pgdat, idx, nr);
local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
if (old)
obj_cgroup_put(old);
obj_cgroup_put(old);
}
static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
......@@ -3546,8 +3543,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
}
local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
if (old)
obj_cgroup_put(old);
obj_cgroup_put(old);
if (nr_pages)
obj_cgroup_uncharge_pages(objcg, nr_pages);
......@@ -5468,8 +5464,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
{
int node;
if (memcg->orig_objcg)
obj_cgroup_put(memcg->orig_objcg);
obj_cgroup_put(memcg->orig_objcg);
for_each_node(node)
free_mem_cgroup_per_node_info(memcg, node);
......@@ -6620,8 +6615,7 @@ static void mem_cgroup_exit(struct task_struct *task)
objcg = (struct obj_cgroup *)
((unsigned long)objcg & ~CURRENT_OBJCG_UPDATE_FLAG);
if (objcg)
obj_cgroup_put(objcg);
obj_cgroup_put(objcg);
/*
* Some kernel allocations can happen after this point,
......
......@@ -1618,8 +1618,7 @@ bool zswap_store(struct folio *folio)
freepage:
zswap_entry_cache_free(entry);
reject:
if (objcg)
obj_cgroup_put(objcg);
obj_cgroup_put(objcg);
check_old:
/*
* If the zswap store fails or zswap is disabled, we must invalidate the
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment