Commit 366cbf2f authored by Daniel Borkmann's avatar Daniel Borkmann Committed by David S. Miller

bpf, xdp: drop rcu_read_lock from bpf_prog_run_xdp and move to caller

After 326fe02d ("net/mlx4_en: protect ring->xdp_prog with rcu_read_lock"),
the rcu_read_lock() in bpf_prog_run_xdp() is superfluous, since callers
need to hold rcu_read_lock() already to make sure BPF program doesn't
get released in the background.

Thus, drop it from bpf_prog_run_xdp(), as it can otherwise be misleading.
Still keeping the bpf_prog_run_xdp() is useful as it allows for grepping
in XDP supported drivers and to keep the typecheck on the context intact.
For mlx4, this means we don't have a double rcu_read_lock() anymore. nfp can
just make use of bpf_prog_run_xdp(), too. For qede, just move rcu_read_lock()
out of the helper. When the driver gets atomic replace support, this will
move to call-sites eventually.

mlx5 needs actual fixing as it has the same issue as described already in
326fe02d ("net/mlx4_en: protect ring->xdp_prog with rcu_read_lock"),
that is, we're under RCU bh at this time, BPF programs are released via
call_rcu(), and call_rcu() != call_rcu_bh(), so we need to properly mark
read side as programs can get xchg()'ed in mlx5e_xdp_set() without queue
reset.

Fixes: 86994156 ("net/mlx5e: XDP fast RX drop bpf programs support")
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Acked-by: default avatarAlexei Starovoitov <ast@kernel.org>
Acked-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 83a1a1a7
......@@ -737,10 +737,10 @@ static inline
struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
u16 wqe_counter, u32 cqe_bcnt)
{
struct bpf_prog *xdp_prog = READ_ONCE(rq->xdp_prog);
struct mlx5e_dma_info *di;
struct sk_buff *skb;
void *va, *data;
bool consumed;
di = &rq->dma_info[wqe_counter];
va = page_address(di->page);
......@@ -759,7 +759,11 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
return NULL;
}
if (mlx5e_xdp_handle(rq, xdp_prog, di, data, cqe_bcnt))
rcu_read_lock();
consumed = mlx5e_xdp_handle(rq, READ_ONCE(rq->xdp_prog), di, data,
cqe_bcnt);
rcu_read_unlock();
if (consumed)
return NULL; /* page/packet was consumed by XDP */
skb = build_skb(va, RQ_PAGE_SIZE(rq));
......
......@@ -1518,7 +1518,7 @@ static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, unsigned int len)
xdp.data = data;
xdp.data_end = data + len;
return BPF_PROG_RUN(prog, &xdp);
return bpf_prog_run_xdp(prog, &xdp);
}
/**
......
......@@ -1497,7 +1497,14 @@ static bool qede_rx_xdp(struct qede_dev *edev,
xdp.data = page_address(bd->data) + cqe->placement_offset;
xdp.data_end = xdp.data + len;
/* Queues always have a full reset currently, so for the time
* being until there's atomic program replace just mark read
* side for map helpers.
*/
rcu_read_lock();
act = bpf_prog_run_xdp(prog, &xdp);
rcu_read_unlock();
if (act == XDP_PASS)
return true;
......
......@@ -498,16 +498,16 @@ static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
return BPF_PROG_RUN(prog, skb);
}
static inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
struct xdp_buff *xdp)
static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
struct xdp_buff *xdp)
{
u32 ret;
rcu_read_lock();
ret = BPF_PROG_RUN(prog, xdp);
rcu_read_unlock();
return ret;
/* Caller needs to hold rcu_read_lock() (!), otherwise program
* can be released while still running, or map elements could be
* freed early while still having concurrent users. XDP fastpath
* already takes rcu_read_lock() when fetching the program, so
* it's not necessary here anymore.
*/
return BPF_PROG_RUN(prog, xdp);
}
static inline unsigned int bpf_prog_size(unsigned int proglen)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment