Commit 63e2da3b authored by Arnd Bergmann's avatar Arnd Bergmann Committed by Alexei Starovoitov

bpf: work around -Wuninitialized warning

Splitting these out into separate helper functions means that we
actually pass an uninitialized variable into another function call
if dec_active() happens to not be inlined, and CONFIG_PREEMPT_RT
is disabled:

kernel/bpf/memalloc.c: In function 'add_obj_to_free_list':
kernel/bpf/memalloc.c:200:9: error: 'flags' is used uninitialized [-Werror=uninitialized]
  200 |         dec_active(c, flags);

Avoid this by passing the flags by reference, so they either get
initialized and dereferenced through a pointer, or the pointer never
gets accessed at all.

Fixes: 18e027b1 ("bpf: Factor out inc/dec of active flag into helpers.")
Suggested-by: default avatarAlexei Starovoitov <alexei.starovoitov@gmail.com>
Signed-off-by: default avatarArnd Bergmann <arnd@arndb.de>
Link: https://lore.kernel.org/r/20230725202653.2905259-1-arnd@kernel.orgSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 13fd5e14
......@@ -183,11 +183,11 @@ static void inc_active(struct bpf_mem_cache *c, unsigned long *flags)
WARN_ON_ONCE(local_inc_return(&c->active) != 1);
}
static void dec_active(struct bpf_mem_cache *c, unsigned long flags)
static void dec_active(struct bpf_mem_cache *c, unsigned long *flags)
{
local_dec(&c->active);
if (IS_ENABLED(CONFIG_PREEMPT_RT))
local_irq_restore(flags);
local_irq_restore(*flags);
}
static void add_obj_to_free_list(struct bpf_mem_cache *c, void *obj)
......@@ -197,7 +197,7 @@ static void add_obj_to_free_list(struct bpf_mem_cache *c, void *obj)
inc_active(c, &flags);
__llist_add(obj, &c->free_llist);
c->free_cnt++;
dec_active(c, flags);
dec_active(c, &flags);
}
/* Mostly runs from irq_work except __init phase. */
......@@ -344,7 +344,7 @@ static void free_bulk(struct bpf_mem_cache *c)
cnt = --c->free_cnt;
else
cnt = 0;
dec_active(c, flags);
dec_active(c, &flags);
if (llnode)
enque_to_free(tgt, llnode);
} while (cnt > (c->high_watermark + c->low_watermark) / 2);
......@@ -384,7 +384,7 @@ static void check_free_by_rcu(struct bpf_mem_cache *c)
llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra_rcu))
if (__llist_add(llnode, &c->free_by_rcu))
c->free_by_rcu_tail = llnode;
dec_active(c, flags);
dec_active(c, &flags);
}
if (llist_empty(&c->free_by_rcu))
......@@ -408,7 +408,7 @@ static void check_free_by_rcu(struct bpf_mem_cache *c)
inc_active(c, &flags);
WRITE_ONCE(c->waiting_for_gp.first, __llist_del_all(&c->free_by_rcu));
c->waiting_for_gp_tail = c->free_by_rcu_tail;
dec_active(c, flags);
dec_active(c, &flags);
if (unlikely(READ_ONCE(c->draining))) {
free_all(llist_del_all(&c->waiting_for_gp), !!c->percpu_size);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment