Commit 968a3b92 authored by Daniel Borkmann's avatar Daniel Borkmann

Merge branch 'bpf-mem-cache-free-rcu'

Alexei Starovoitov says:

====================
v3->v4:
- extra patch 14 from Hou to check for object leaks.
- fixed the race/leak in free_by_rcu_ttrace. Extra hunk in patch 8.
- added Acks and fixed typos.

v2->v3:
- dropped _tail optimization for free_by_rcu_ttrace
- new patch 5 to refactor inc/dec of c->active
- change 'draining' logic in patch 7
- add rcu_barrier in patch 12
- __llist_add-> llist_add(waiting_for_gp_ttrace) in patch 9 to fix race
- David's Ack in patch 13 and explanation that migrate_disable cannot be removed just yet.

v1->v2:
- Fixed race condition spotted by Hou. Patch 7.

v1:

Introduce bpf_mem_cache_free_rcu() that is similar to kfree_rcu except
the objects will go through an additional RCU tasks trace grace period
before being freed into slab.

Patches 1-9 - a bunch of prep work
Patch 10 - a patch from Paul that exports rcu_request_urgent_qs_task().
Patch 12 - the main bpf_mem_cache_free_rcu patch.
Patch 13 - use it in bpf_cpumask.

bpf_local_storage, bpf_obj_drop, qp-trie will be other users eventually.

With additional hack patch to htab that replaces bpf_mem_cache_free with bpf_mem_cache_free_rcu
the following are benchmark results:
- map_perf_test 4 8 16348 1000000
drops from 800k to 600k. Waiting for RCU GP makes objects cache cold.

- bench htab-mem -a -p 8
20% drop in performance and big increase in memory. From 3 Mbyte to 50 Mbyte. As expected.

- bench htab-mem -a -p 16 --use-case add_del_on_diff_cpu
Same performance and better memory consumption.
Before these patches this bench would OOM (with or without 'reuse after GP')
Patch 8 addresses the issue.

At the end the performance drop and additional memory consumption due to _rcu()
were expected and came out to be within reasonable margin.
Without Paul's patch 10 the memory consumption in 'bench htab-mem' is in Gbytes
which wouldn't be acceptable.

Patch 8 is a heuristic to address 'alloc on one cpu, free on another' issue.
It works well in practice. One can probably construct an artificial benchmark
to make heuristic ineffective, but we have to trade off performance, code complexity,
and memory consumption.

The life cycle of objects:
alloc: dequeue free_llist
free: enqeueu free_llist
free_llist above high watermark -> free_by_rcu_ttrace
free_rcu: enqueue free_by_rcu -> waiting_for_gp
after RCU GP waiting_for_gp -> free_by_rcu_ttrace
free_by_rcu_ttrace -> waiting_for_gp_ttrace -> slab
====================
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parents c21de5fc 4ed8b5bc
...@@ -27,10 +27,12 @@ void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma); ...@@ -27,10 +27,12 @@ void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma);
/* kmalloc/kfree equivalent: */ /* kmalloc/kfree equivalent: */
void *bpf_mem_alloc(struct bpf_mem_alloc *ma, size_t size); void *bpf_mem_alloc(struct bpf_mem_alloc *ma, size_t size);
void bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr); void bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr);
void bpf_mem_free_rcu(struct bpf_mem_alloc *ma, void *ptr);
/* kmem_cache_alloc/free equivalent: */ /* kmem_cache_alloc/free equivalent: */
void *bpf_mem_cache_alloc(struct bpf_mem_alloc *ma); void *bpf_mem_cache_alloc(struct bpf_mem_alloc *ma);
void bpf_mem_cache_free(struct bpf_mem_alloc *ma, void *ptr); void bpf_mem_cache_free(struct bpf_mem_alloc *ma, void *ptr);
void bpf_mem_cache_free_rcu(struct bpf_mem_alloc *ma, void *ptr);
void bpf_mem_cache_raw_free(void *ptr); void bpf_mem_cache_raw_free(void *ptr);
void *bpf_mem_cache_alloc_flags(struct bpf_mem_alloc *ma, gfp_t flags); void *bpf_mem_cache_alloc_flags(struct bpf_mem_alloc *ma, gfp_t flags);
......
...@@ -138,6 +138,8 @@ static inline int rcu_needs_cpu(void) ...@@ -138,6 +138,8 @@ static inline int rcu_needs_cpu(void)
return 0; return 0;
} }
static inline void rcu_request_urgent_qs_task(struct task_struct *t) { }
/* /*
* Take advantage of the fact that there is only one CPU, which * Take advantage of the fact that there is only one CPU, which
* allows us to ignore virtualization-based context switches. * allows us to ignore virtualization-based context switches.
......
...@@ -21,6 +21,7 @@ void rcu_softirq_qs(void); ...@@ -21,6 +21,7 @@ void rcu_softirq_qs(void);
void rcu_note_context_switch(bool preempt); void rcu_note_context_switch(bool preempt);
int rcu_needs_cpu(void); int rcu_needs_cpu(void);
void rcu_cpu_stall_reset(void); void rcu_cpu_stall_reset(void);
void rcu_request_urgent_qs_task(struct task_struct *t);
/* /*
* Note a virtualization-based context switch. This is simply a * Note a virtualization-based context switch. This is simply a
......
...@@ -9,7 +9,6 @@ ...@@ -9,7 +9,6 @@
/** /**
* struct bpf_cpumask - refcounted BPF cpumask wrapper structure * struct bpf_cpumask - refcounted BPF cpumask wrapper structure
* @cpumask: The actual cpumask embedded in the struct. * @cpumask: The actual cpumask embedded in the struct.
* @rcu: The RCU head used to free the cpumask with RCU safety.
* @usage: Object reference counter. When the refcount goes to 0, the * @usage: Object reference counter. When the refcount goes to 0, the
* memory is released back to the BPF allocator, which provides * memory is released back to the BPF allocator, which provides
* RCU safety. * RCU safety.
...@@ -25,7 +24,6 @@ ...@@ -25,7 +24,6 @@
*/ */
struct bpf_cpumask { struct bpf_cpumask {
cpumask_t cpumask; cpumask_t cpumask;
struct rcu_head rcu;
refcount_t usage; refcount_t usage;
}; };
...@@ -82,16 +80,6 @@ __bpf_kfunc struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) ...@@ -82,16 +80,6 @@ __bpf_kfunc struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask)
return cpumask; return cpumask;
} }
static void cpumask_free_cb(struct rcu_head *head)
{
struct bpf_cpumask *cpumask;
cpumask = container_of(head, struct bpf_cpumask, rcu);
migrate_disable();
bpf_mem_cache_free(&bpf_cpumask_ma, cpumask);
migrate_enable();
}
/** /**
* bpf_cpumask_release() - Release a previously acquired BPF cpumask. * bpf_cpumask_release() - Release a previously acquired BPF cpumask.
* @cpumask: The cpumask being released. * @cpumask: The cpumask being released.
...@@ -102,8 +90,12 @@ static void cpumask_free_cb(struct rcu_head *head) ...@@ -102,8 +90,12 @@ static void cpumask_free_cb(struct rcu_head *head)
*/ */
__bpf_kfunc void bpf_cpumask_release(struct bpf_cpumask *cpumask) __bpf_kfunc void bpf_cpumask_release(struct bpf_cpumask *cpumask)
{ {
if (refcount_dec_and_test(&cpumask->usage)) if (!refcount_dec_and_test(&cpumask->usage))
call_rcu(&cpumask->rcu, cpumask_free_cb); return;
migrate_disable();
bpf_mem_cache_free_rcu(&bpf_cpumask_ma, cpumask);
migrate_enable();
} }
/** /**
......
...@@ -98,11 +98,23 @@ struct bpf_mem_cache { ...@@ -98,11 +98,23 @@ struct bpf_mem_cache {
int free_cnt; int free_cnt;
int low_watermark, high_watermark, batch; int low_watermark, high_watermark, batch;
int percpu_size; int percpu_size;
bool draining;
struct bpf_mem_cache *tgt;
struct rcu_head rcu; /* list of objects to be freed after RCU GP */
struct llist_head free_by_rcu; struct llist_head free_by_rcu;
struct llist_node *free_by_rcu_tail;
struct llist_head waiting_for_gp; struct llist_head waiting_for_gp;
struct llist_node *waiting_for_gp_tail;
struct rcu_head rcu;
atomic_t call_rcu_in_progress; atomic_t call_rcu_in_progress;
struct llist_head free_llist_extra_rcu;
/* list of objects to be freed after RCU tasks trace GP */
struct llist_head free_by_rcu_ttrace;
struct llist_head waiting_for_gp_ttrace;
struct rcu_head rcu_ttrace;
atomic_t call_rcu_ttrace_in_progress;
}; };
struct bpf_mem_caches { struct bpf_mem_caches {
...@@ -153,31 +165,74 @@ static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c) ...@@ -153,31 +165,74 @@ static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c)
#endif #endif
} }
static void inc_active(struct bpf_mem_cache *c, unsigned long *flags)
{
if (IS_ENABLED(CONFIG_PREEMPT_RT))
/* In RT irq_work runs in per-cpu kthread, so disable
* interrupts to avoid preemption and interrupts and
* reduce the chance of bpf prog executing on this cpu
* when active counter is busy.
*/
local_irq_save(*flags);
/* alloc_bulk runs from irq_work which will not preempt a bpf
* program that does unit_alloc/unit_free since IRQs are
* disabled there. There is no race to increment 'active'
* counter. It protects free_llist from corruption in case NMI
* bpf prog preempted this loop.
*/
WARN_ON_ONCE(local_inc_return(&c->active) != 1);
}
static void dec_active(struct bpf_mem_cache *c, unsigned long flags)
{
local_dec(&c->active);
if (IS_ENABLED(CONFIG_PREEMPT_RT))
local_irq_restore(flags);
}
static void add_obj_to_free_list(struct bpf_mem_cache *c, void *obj)
{
unsigned long flags;
inc_active(c, &flags);
__llist_add(obj, &c->free_llist);
c->free_cnt++;
dec_active(c, flags);
}
/* Mostly runs from irq_work except __init phase. */ /* Mostly runs from irq_work except __init phase. */
static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node) static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node)
{ {
struct mem_cgroup *memcg = NULL, *old_memcg; struct mem_cgroup *memcg = NULL, *old_memcg;
unsigned long flags;
void *obj; void *obj;
int i; int i;
memcg = get_memcg(c);
old_memcg = set_active_memcg(memcg);
for (i = 0; i < cnt; i++) { for (i = 0; i < cnt; i++) {
/* /*
* free_by_rcu is only manipulated by irq work refill_work(). * For every 'c' llist_del_first(&c->free_by_rcu_ttrace); is
* IRQ works on the same CPU are called sequentially, so it is * done only by one CPU == current CPU. Other CPUs might
* safe to use __llist_del_first() here. If alloc_bulk() is * llist_add() and llist_del_all() in parallel.
* invoked by the initial prefill, there will be no running
* refill_work(), so __llist_del_first() is fine as well.
*
* In most cases, objects on free_by_rcu are from the same CPU.
* If some objects come from other CPUs, it doesn't incur any
* harm because NUMA_NO_NODE means the preference for current
* numa node and it is not a guarantee.
*/ */
obj = __llist_del_first(&c->free_by_rcu); obj = llist_del_first(&c->free_by_rcu_ttrace);
if (!obj) { if (!obj)
break;
add_obj_to_free_list(c, obj);
}
if (i >= cnt)
return;
for (; i < cnt; i++) {
obj = llist_del_first(&c->waiting_for_gp_ttrace);
if (!obj)
break;
add_obj_to_free_list(c, obj);
}
if (i >= cnt)
return;
memcg = get_memcg(c);
old_memcg = set_active_memcg(memcg);
for (; i < cnt; i++) {
/* Allocate, but don't deplete atomic reserves that typical /* Allocate, but don't deplete atomic reserves that typical
* GFP_ATOMIC would do. irq_work runs on this cpu and kmalloc * GFP_ATOMIC would do. irq_work runs on this cpu and kmalloc
* will allocate from the current numa node which is what we * will allocate from the current numa node which is what we
...@@ -186,26 +241,7 @@ static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node) ...@@ -186,26 +241,7 @@ static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node)
obj = __alloc(c, node, GFP_NOWAIT | __GFP_NOWARN | __GFP_ACCOUNT); obj = __alloc(c, node, GFP_NOWAIT | __GFP_NOWARN | __GFP_ACCOUNT);
if (!obj) if (!obj)
break; break;
} add_obj_to_free_list(c, obj);
if (IS_ENABLED(CONFIG_PREEMPT_RT))
/* In RT irq_work runs in per-cpu kthread, so disable
* interrupts to avoid preemption and interrupts and
* reduce the chance of bpf prog executing on this cpu
* when active counter is busy.
*/
local_irq_save(flags);
/* alloc_bulk runs from irq_work which will not preempt a bpf
* program that does unit_alloc/unit_free since IRQs are
* disabled there. There is no race to increment 'active'
* counter. It protects free_llist from corruption in case NMI
* bpf prog preempted this loop.
*/
WARN_ON_ONCE(local_inc_return(&c->active) != 1);
__llist_add(obj, &c->free_llist);
c->free_cnt++;
local_dec(&c->active);
if (IS_ENABLED(CONFIG_PREEMPT_RT))
local_irq_restore(flags);
} }
set_active_memcg(old_memcg); set_active_memcg(old_memcg);
mem_cgroup_put(memcg); mem_cgroup_put(memcg);
...@@ -222,20 +258,24 @@ static void free_one(void *obj, bool percpu) ...@@ -222,20 +258,24 @@ static void free_one(void *obj, bool percpu)
kfree(obj); kfree(obj);
} }
static void free_all(struct llist_node *llnode, bool percpu) static int free_all(struct llist_node *llnode, bool percpu)
{ {
struct llist_node *pos, *t; struct llist_node *pos, *t;
int cnt = 0;
llist_for_each_safe(pos, t, llnode) llist_for_each_safe(pos, t, llnode) {
free_one(pos, percpu); free_one(pos, percpu);
cnt++;
}
return cnt;
} }
static void __free_rcu(struct rcu_head *head) static void __free_rcu(struct rcu_head *head)
{ {
struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu); struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu_ttrace);
free_all(llist_del_all(&c->waiting_for_gp), !!c->percpu_size); free_all(llist_del_all(&c->waiting_for_gp_ttrace), !!c->percpu_size);
atomic_set(&c->call_rcu_in_progress, 0); atomic_set(&c->call_rcu_ttrace_in_progress, 0);
} }
static void __free_rcu_tasks_trace(struct rcu_head *head) static void __free_rcu_tasks_trace(struct rcu_head *head)
...@@ -254,60 +294,128 @@ static void enque_to_free(struct bpf_mem_cache *c, void *obj) ...@@ -254,60 +294,128 @@ static void enque_to_free(struct bpf_mem_cache *c, void *obj)
struct llist_node *llnode = obj; struct llist_node *llnode = obj;
/* bpf_mem_cache is a per-cpu object. Freeing happens in irq_work. /* bpf_mem_cache is a per-cpu object. Freeing happens in irq_work.
* Nothing races to add to free_by_rcu list. * Nothing races to add to free_by_rcu_ttrace list.
*/ */
__llist_add(llnode, &c->free_by_rcu); llist_add(llnode, &c->free_by_rcu_ttrace);
} }
static void do_call_rcu(struct bpf_mem_cache *c) static void do_call_rcu_ttrace(struct bpf_mem_cache *c)
{ {
struct llist_node *llnode, *t; struct llist_node *llnode, *t;
if (atomic_xchg(&c->call_rcu_in_progress, 1)) if (atomic_xchg(&c->call_rcu_ttrace_in_progress, 1)) {
if (unlikely(READ_ONCE(c->draining))) {
llnode = llist_del_all(&c->free_by_rcu_ttrace);
free_all(llnode, !!c->percpu_size);
}
return; return;
}
WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp_ttrace));
llist_for_each_safe(llnode, t, llist_del_all(&c->free_by_rcu_ttrace))
llist_add(llnode, &c->waiting_for_gp_ttrace);
if (unlikely(READ_ONCE(c->draining))) {
__free_rcu(&c->rcu_ttrace);
return;
}
WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp));
llist_for_each_safe(llnode, t, __llist_del_all(&c->free_by_rcu))
/* There is no concurrent __llist_add(waiting_for_gp) access.
* It doesn't race with llist_del_all either.
* But there could be two concurrent llist_del_all(waiting_for_gp):
* from __free_rcu() and from drain_mem_cache().
*/
__llist_add(llnode, &c->waiting_for_gp);
/* Use call_rcu_tasks_trace() to wait for sleepable progs to finish. /* Use call_rcu_tasks_trace() to wait for sleepable progs to finish.
* If RCU Tasks Trace grace period implies RCU grace period, free * If RCU Tasks Trace grace period implies RCU grace period, free
* these elements directly, else use call_rcu() to wait for normal * these elements directly, else use call_rcu() to wait for normal
* progs to finish and finally do free_one() on each element. * progs to finish and finally do free_one() on each element.
*/ */
call_rcu_tasks_trace(&c->rcu, __free_rcu_tasks_trace); call_rcu_tasks_trace(&c->rcu_ttrace, __free_rcu_tasks_trace);
} }
static void free_bulk(struct bpf_mem_cache *c) static void free_bulk(struct bpf_mem_cache *c)
{ {
struct bpf_mem_cache *tgt = c->tgt;
struct llist_node *llnode, *t; struct llist_node *llnode, *t;
unsigned long flags; unsigned long flags;
int cnt; int cnt;
WARN_ON_ONCE(tgt->unit_size != c->unit_size);
do { do {
if (IS_ENABLED(CONFIG_PREEMPT_RT)) inc_active(c, &flags);
local_irq_save(flags);
WARN_ON_ONCE(local_inc_return(&c->active) != 1);
llnode = __llist_del_first(&c->free_llist); llnode = __llist_del_first(&c->free_llist);
if (llnode) if (llnode)
cnt = --c->free_cnt; cnt = --c->free_cnt;
else else
cnt = 0; cnt = 0;
local_dec(&c->active); dec_active(c, flags);
if (IS_ENABLED(CONFIG_PREEMPT_RT))
local_irq_restore(flags);
if (llnode) if (llnode)
enque_to_free(c, llnode); enque_to_free(tgt, llnode);
} while (cnt > (c->high_watermark + c->low_watermark) / 2); } while (cnt > (c->high_watermark + c->low_watermark) / 2);
/* and drain free_llist_extra */ /* and drain free_llist_extra */
llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra)) llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra))
enque_to_free(c, llnode); enque_to_free(tgt, llnode);
do_call_rcu(c); do_call_rcu_ttrace(tgt);
}
static void __free_by_rcu(struct rcu_head *head)
{
struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu);
struct bpf_mem_cache *tgt = c->tgt;
struct llist_node *llnode;
llnode = llist_del_all(&c->waiting_for_gp);
if (!llnode)
goto out;
llist_add_batch(llnode, c->waiting_for_gp_tail, &tgt->free_by_rcu_ttrace);
/* Objects went through regular RCU GP. Send them to RCU tasks trace */
do_call_rcu_ttrace(tgt);
out:
atomic_set(&c->call_rcu_in_progress, 0);
}
static void check_free_by_rcu(struct bpf_mem_cache *c)
{
struct llist_node *llnode, *t;
unsigned long flags;
/* drain free_llist_extra_rcu */
if (unlikely(!llist_empty(&c->free_llist_extra_rcu))) {
inc_active(c, &flags);
llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra_rcu))
if (__llist_add(llnode, &c->free_by_rcu))
c->free_by_rcu_tail = llnode;
dec_active(c, flags);
}
if (llist_empty(&c->free_by_rcu))
return;
if (atomic_xchg(&c->call_rcu_in_progress, 1)) {
/*
* Instead of kmalloc-ing new rcu_head and triggering 10k
* call_rcu() to hit rcutree.qhimark and force RCU to notice
* the overload just ask RCU to hurry up. There could be many
* objects in free_by_rcu list.
* This hint reduces memory consumption for an artificial
* benchmark from 2 Gbyte to 150 Mbyte.
*/
rcu_request_urgent_qs_task(current);
return;
}
WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp));
inc_active(c, &flags);
WRITE_ONCE(c->waiting_for_gp.first, __llist_del_all(&c->free_by_rcu));
c->waiting_for_gp_tail = c->free_by_rcu_tail;
dec_active(c, flags);
if (unlikely(READ_ONCE(c->draining))) {
free_all(llist_del_all(&c->waiting_for_gp), !!c->percpu_size);
atomic_set(&c->call_rcu_in_progress, 0);
} else {
call_rcu_hurry(&c->rcu, __free_by_rcu);
}
} }
static void bpf_mem_refill(struct irq_work *work) static void bpf_mem_refill(struct irq_work *work)
...@@ -324,6 +432,8 @@ static void bpf_mem_refill(struct irq_work *work) ...@@ -324,6 +432,8 @@ static void bpf_mem_refill(struct irq_work *work)
alloc_bulk(c, c->batch, NUMA_NO_NODE); alloc_bulk(c, c->batch, NUMA_NO_NODE);
else if (cnt > c->high_watermark) else if (cnt > c->high_watermark)
free_bulk(c); free_bulk(c);
check_free_by_rcu(c);
} }
static void notrace irq_work_raise(struct bpf_mem_cache *c) static void notrace irq_work_raise(struct bpf_mem_cache *c)
...@@ -406,6 +516,7 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu) ...@@ -406,6 +516,7 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
c->unit_size = unit_size; c->unit_size = unit_size;
c->objcg = objcg; c->objcg = objcg;
c->percpu_size = percpu_size; c->percpu_size = percpu_size;
c->tgt = c;
prefill_mem_cache(c, cpu); prefill_mem_cache(c, cpu);
} }
ma->cache = pc; ma->cache = pc;
...@@ -428,6 +539,7 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu) ...@@ -428,6 +539,7 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
c = &cc->cache[i]; c = &cc->cache[i];
c->unit_size = sizes[i]; c->unit_size = sizes[i];
c->objcg = objcg; c->objcg = objcg;
c->tgt = c;
prefill_mem_cache(c, cpu); prefill_mem_cache(c, cpu);
} }
} }
...@@ -441,19 +553,57 @@ static void drain_mem_cache(struct bpf_mem_cache *c) ...@@ -441,19 +553,57 @@ static void drain_mem_cache(struct bpf_mem_cache *c)
/* No progs are using this bpf_mem_cache, but htab_map_free() called /* No progs are using this bpf_mem_cache, but htab_map_free() called
* bpf_mem_cache_free() for all remaining elements and they can be in * bpf_mem_cache_free() for all remaining elements and they can be in
* free_by_rcu or in waiting_for_gp lists, so drain those lists now. * free_by_rcu_ttrace or in waiting_for_gp_ttrace lists, so drain those lists now.
* *
* Except for waiting_for_gp list, there are no concurrent operations * Except for waiting_for_gp_ttrace list, there are no concurrent operations
* on these lists, so it is safe to use __llist_del_all(). * on these lists, so it is safe to use __llist_del_all().
*/ */
free_all(__llist_del_all(&c->free_by_rcu), percpu); free_all(llist_del_all(&c->free_by_rcu_ttrace), percpu);
free_all(llist_del_all(&c->waiting_for_gp), percpu); free_all(llist_del_all(&c->waiting_for_gp_ttrace), percpu);
free_all(__llist_del_all(&c->free_llist), percpu); free_all(__llist_del_all(&c->free_llist), percpu);
free_all(__llist_del_all(&c->free_llist_extra), percpu); free_all(__llist_del_all(&c->free_llist_extra), percpu);
free_all(__llist_del_all(&c->free_by_rcu), percpu);
free_all(__llist_del_all(&c->free_llist_extra_rcu), percpu);
free_all(llist_del_all(&c->waiting_for_gp), percpu);
}
static void check_mem_cache(struct bpf_mem_cache *c)
{
WARN_ON_ONCE(!llist_empty(&c->free_by_rcu_ttrace));
WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp_ttrace));
WARN_ON_ONCE(!llist_empty(&c->free_llist));
WARN_ON_ONCE(!llist_empty(&c->free_llist_extra));
WARN_ON_ONCE(!llist_empty(&c->free_by_rcu));
WARN_ON_ONCE(!llist_empty(&c->free_llist_extra_rcu));
WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp));
}
static void check_leaked_objs(struct bpf_mem_alloc *ma)
{
struct bpf_mem_caches *cc;
struct bpf_mem_cache *c;
int cpu, i;
if (ma->cache) {
for_each_possible_cpu(cpu) {
c = per_cpu_ptr(ma->cache, cpu);
check_mem_cache(c);
}
}
if (ma->caches) {
for_each_possible_cpu(cpu) {
cc = per_cpu_ptr(ma->caches, cpu);
for (i = 0; i < NUM_CACHES; i++) {
c = &cc->cache[i];
check_mem_cache(c);
}
}
}
} }
static void free_mem_alloc_no_barrier(struct bpf_mem_alloc *ma) static void free_mem_alloc_no_barrier(struct bpf_mem_alloc *ma)
{ {
check_leaked_objs(ma);
free_percpu(ma->cache); free_percpu(ma->cache);
free_percpu(ma->caches); free_percpu(ma->caches);
ma->cache = NULL; ma->cache = NULL;
...@@ -462,8 +612,8 @@ static void free_mem_alloc_no_barrier(struct bpf_mem_alloc *ma) ...@@ -462,8 +612,8 @@ static void free_mem_alloc_no_barrier(struct bpf_mem_alloc *ma)
static void free_mem_alloc(struct bpf_mem_alloc *ma) static void free_mem_alloc(struct bpf_mem_alloc *ma)
{ {
/* waiting_for_gp lists was drained, but __free_rcu might /* waiting_for_gp[_ttrace] lists were drained, but RCU callbacks
* still execute. Wait for it now before we freeing percpu caches. * might still execute. Wait for them.
* *
* rcu_barrier_tasks_trace() doesn't imply synchronize_rcu_tasks_trace(), * rcu_barrier_tasks_trace() doesn't imply synchronize_rcu_tasks_trace(),
* but rcu_barrier_tasks_trace() and rcu_barrier() below are only used * but rcu_barrier_tasks_trace() and rcu_barrier() below are only used
...@@ -472,7 +622,8 @@ static void free_mem_alloc(struct bpf_mem_alloc *ma) ...@@ -472,7 +622,8 @@ static void free_mem_alloc(struct bpf_mem_alloc *ma)
* rcu_trace_implies_rcu_gp(), it will be OK to skip rcu_barrier() by * rcu_trace_implies_rcu_gp(), it will be OK to skip rcu_barrier() by
* using rcu_trace_implies_rcu_gp() as well. * using rcu_trace_implies_rcu_gp() as well.
*/ */
rcu_barrier_tasks_trace(); rcu_barrier(); /* wait for __free_by_rcu */
rcu_barrier_tasks_trace(); /* wait for __free_rcu */
if (!rcu_trace_implies_rcu_gp()) if (!rcu_trace_implies_rcu_gp())
rcu_barrier(); rcu_barrier();
free_mem_alloc_no_barrier(ma); free_mem_alloc_no_barrier(ma);
...@@ -498,7 +649,7 @@ static void destroy_mem_alloc(struct bpf_mem_alloc *ma, int rcu_in_progress) ...@@ -498,7 +649,7 @@ static void destroy_mem_alloc(struct bpf_mem_alloc *ma, int rcu_in_progress)
return; return;
} }
copy = kmalloc(sizeof(*ma), GFP_KERNEL); copy = kmemdup(ma, sizeof(*ma), GFP_KERNEL);
if (!copy) { if (!copy) {
/* Slow path with inline barrier-s */ /* Slow path with inline barrier-s */
free_mem_alloc(ma); free_mem_alloc(ma);
...@@ -506,10 +657,7 @@ static void destroy_mem_alloc(struct bpf_mem_alloc *ma, int rcu_in_progress) ...@@ -506,10 +657,7 @@ static void destroy_mem_alloc(struct bpf_mem_alloc *ma, int rcu_in_progress)
} }
/* Defer barriers into worker to let the rest of map memory to be freed */ /* Defer barriers into worker to let the rest of map memory to be freed */
copy->cache = ma->cache; memset(ma, 0, sizeof(*ma));
ma->cache = NULL;
copy->caches = ma->caches;
ma->caches = NULL;
INIT_WORK(&copy->work, free_mem_alloc_deferred); INIT_WORK(&copy->work, free_mem_alloc_deferred);
queue_work(system_unbound_wq, &copy->work); queue_work(system_unbound_wq, &copy->work);
} }
...@@ -524,17 +672,10 @@ void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma) ...@@ -524,17 +672,10 @@ void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma)
rcu_in_progress = 0; rcu_in_progress = 0;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
c = per_cpu_ptr(ma->cache, cpu); c = per_cpu_ptr(ma->cache, cpu);
/* WRITE_ONCE(c->draining, true);
* refill_work may be unfinished for PREEMPT_RT kernel
* in which irq work is invoked in a per-CPU RT thread.
* It is also possible for kernel with
* arch_irq_work_has_interrupt() being false and irq
* work is invoked in timer interrupt. So waiting for
* the completion of irq work to ease the handling of
* concurrency.
*/
irq_work_sync(&c->refill_work); irq_work_sync(&c->refill_work);
drain_mem_cache(c); drain_mem_cache(c);
rcu_in_progress += atomic_read(&c->call_rcu_ttrace_in_progress);
rcu_in_progress += atomic_read(&c->call_rcu_in_progress); rcu_in_progress += atomic_read(&c->call_rcu_in_progress);
} }
/* objcg is the same across cpus */ /* objcg is the same across cpus */
...@@ -548,8 +689,10 @@ void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma) ...@@ -548,8 +689,10 @@ void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma)
cc = per_cpu_ptr(ma->caches, cpu); cc = per_cpu_ptr(ma->caches, cpu);
for (i = 0; i < NUM_CACHES; i++) { for (i = 0; i < NUM_CACHES; i++) {
c = &cc->cache[i]; c = &cc->cache[i];
WRITE_ONCE(c->draining, true);
irq_work_sync(&c->refill_work); irq_work_sync(&c->refill_work);
drain_mem_cache(c); drain_mem_cache(c);
rcu_in_progress += atomic_read(&c->call_rcu_ttrace_in_progress);
rcu_in_progress += atomic_read(&c->call_rcu_in_progress); rcu_in_progress += atomic_read(&c->call_rcu_in_progress);
} }
} }
...@@ -581,8 +724,10 @@ static void notrace *unit_alloc(struct bpf_mem_cache *c) ...@@ -581,8 +724,10 @@ static void notrace *unit_alloc(struct bpf_mem_cache *c)
local_irq_save(flags); local_irq_save(flags);
if (local_inc_return(&c->active) == 1) { if (local_inc_return(&c->active) == 1) {
llnode = __llist_del_first(&c->free_llist); llnode = __llist_del_first(&c->free_llist);
if (llnode) if (llnode) {
cnt = --c->free_cnt; cnt = --c->free_cnt;
*(struct bpf_mem_cache **)llnode = c;
}
} }
local_dec(&c->active); local_dec(&c->active);
local_irq_restore(flags); local_irq_restore(flags);
...@@ -606,6 +751,12 @@ static void notrace unit_free(struct bpf_mem_cache *c, void *ptr) ...@@ -606,6 +751,12 @@ static void notrace unit_free(struct bpf_mem_cache *c, void *ptr)
BUILD_BUG_ON(LLIST_NODE_SZ > 8); BUILD_BUG_ON(LLIST_NODE_SZ > 8);
/*
* Remember bpf_mem_cache that allocated this object.
* The hint is not accurate.
*/
c->tgt = *(struct bpf_mem_cache **)llnode;
local_irq_save(flags); local_irq_save(flags);
if (local_inc_return(&c->active) == 1) { if (local_inc_return(&c->active) == 1) {
__llist_add(llnode, &c->free_llist); __llist_add(llnode, &c->free_llist);
...@@ -627,6 +778,27 @@ static void notrace unit_free(struct bpf_mem_cache *c, void *ptr) ...@@ -627,6 +778,27 @@ static void notrace unit_free(struct bpf_mem_cache *c, void *ptr)
irq_work_raise(c); irq_work_raise(c);
} }
static void notrace unit_free_rcu(struct bpf_mem_cache *c, void *ptr)
{
struct llist_node *llnode = ptr - LLIST_NODE_SZ;
unsigned long flags;
c->tgt = *(struct bpf_mem_cache **)llnode;
local_irq_save(flags);
if (local_inc_return(&c->active) == 1) {
if (__llist_add(llnode, &c->free_by_rcu))
c->free_by_rcu_tail = llnode;
} else {
llist_add(llnode, &c->free_llist_extra_rcu);
}
local_dec(&c->active);
local_irq_restore(flags);
if (!atomic_read(&c->call_rcu_in_progress))
irq_work_raise(c);
}
/* Called from BPF program or from sys_bpf syscall. /* Called from BPF program or from sys_bpf syscall.
* In both cases migration is disabled. * In both cases migration is disabled.
*/ */
...@@ -660,6 +832,20 @@ void notrace bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr) ...@@ -660,6 +832,20 @@ void notrace bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr)
unit_free(this_cpu_ptr(ma->caches)->cache + idx, ptr); unit_free(this_cpu_ptr(ma->caches)->cache + idx, ptr);
} }
void notrace bpf_mem_free_rcu(struct bpf_mem_alloc *ma, void *ptr)
{
int idx;
if (!ptr)
return;
idx = bpf_mem_cache_idx(ksize(ptr - LLIST_NODE_SZ));
if (idx < 0)
return;
unit_free_rcu(this_cpu_ptr(ma->caches)->cache + idx, ptr);
}
void notrace *bpf_mem_cache_alloc(struct bpf_mem_alloc *ma) void notrace *bpf_mem_cache_alloc(struct bpf_mem_alloc *ma)
{ {
void *ret; void *ret;
...@@ -676,6 +862,14 @@ void notrace bpf_mem_cache_free(struct bpf_mem_alloc *ma, void *ptr) ...@@ -676,6 +862,14 @@ void notrace bpf_mem_cache_free(struct bpf_mem_alloc *ma, void *ptr)
unit_free(this_cpu_ptr(ma->cache), ptr); unit_free(this_cpu_ptr(ma->cache), ptr);
} }
void notrace bpf_mem_cache_free_rcu(struct bpf_mem_alloc *ma, void *ptr)
{
if (!ptr)
return;
unit_free_rcu(this_cpu_ptr(ma->cache), ptr);
}
/* Directly does a kfree() without putting 'ptr' back to the free_llist /* Directly does a kfree() without putting 'ptr' back to the free_llist
* for reuse and without waiting for a rcu_tasks_trace gp. * for reuse and without waiting for a rcu_tasks_trace gp.
* The caller must first go through the rcu_tasks_trace gp for 'ptr' * The caller must first go through the rcu_tasks_trace gp for 'ptr'
......
...@@ -493,7 +493,6 @@ static inline void rcu_expedite_gp(void) { } ...@@ -493,7 +493,6 @@ static inline void rcu_expedite_gp(void) { }
static inline void rcu_unexpedite_gp(void) { } static inline void rcu_unexpedite_gp(void) { }
static inline void rcu_async_hurry(void) { } static inline void rcu_async_hurry(void) { }
static inline void rcu_async_relax(void) { } static inline void rcu_async_relax(void) { }
static inline void rcu_request_urgent_qs_task(struct task_struct *t) { }
#else /* #ifdef CONFIG_TINY_RCU */ #else /* #ifdef CONFIG_TINY_RCU */
bool rcu_gp_is_normal(void); /* Internal RCU use. */ bool rcu_gp_is_normal(void); /* Internal RCU use. */
bool rcu_gp_is_expedited(void); /* Internal RCU use. */ bool rcu_gp_is_expedited(void); /* Internal RCU use. */
...@@ -508,7 +507,6 @@ void show_rcu_tasks_gp_kthreads(void); ...@@ -508,7 +507,6 @@ void show_rcu_tasks_gp_kthreads(void);
#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
static inline void show_rcu_tasks_gp_kthreads(void) {} static inline void show_rcu_tasks_gp_kthreads(void) {}
#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */ #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
void rcu_request_urgent_qs_task(struct task_struct *t);
#endif /* #else #ifdef CONFIG_TINY_RCU */ #endif /* #else #ifdef CONFIG_TINY_RCU */
#define RCU_SCHEDULER_INACTIVE 0 #define RCU_SCHEDULER_INACTIVE 0
......
...@@ -96,7 +96,7 @@ static __always_inline ...@@ -96,7 +96,7 @@ static __always_inline
int list_push_pop_multiple(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool leave_in_map) int list_push_pop_multiple(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool leave_in_map)
{ {
struct bpf_list_node *n; struct bpf_list_node *n;
struct foo *f[8], *pf; struct foo *f[200], *pf;
int i; int i;
/* Loop following this check adds nodes 2-at-a-time in order to /* Loop following this check adds nodes 2-at-a-time in order to
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment