Commit 20192d9c authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Andrii Nakryiko says:

====================
pull-request: bpf 2021-07-15

The following pull-request contains BPF updates for your *net* tree.

We've added 9 non-merge commits during the last 5 day(s) which contain
a total of 9 files changed, 37 insertions(+), 15 deletions(-).

The main changes are:

1) Fix NULL pointer dereference in BPF_TEST_RUN for BPF_XDP_DEVMAP and
   BPF_XDP_CPUMAP programs, from Xuan Zhuo.

2) Fix use-after-free of net_device in XDP bpf_link, from Xuan Zhuo.

3) Follow-up fix to subprog poke descriptor use-after-free problem, from
   Daniel Borkmann and John Fastabend.

4) Fix out-of-range array access in s390 BPF JIT backend, from Colin Ian King.

5) Fix memory leak in BPF sockmap, from John Fastabend.

6) Fix for sockmap to prevent proc stats reporting bug, from John Fastabend
   and Jakub Sitnicki.

7) Fix NULL pointer dereference in bpftool, from Tobias Klauser.

8) AF_XDP documentation fixes, from Baruch Siach.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents a6ecfb39 d444b06e
...@@ -243,8 +243,8 @@ Configuration Flags and Socket Options ...@@ -243,8 +243,8 @@ Configuration Flags and Socket Options
These are the various configuration flags that can be used to control These are the various configuration flags that can be used to control
and monitor the behavior of AF_XDP sockets. and monitor the behavior of AF_XDP sockets.
XDP_COPY and XDP_ZERO_COPY bind flags XDP_COPY and XDP_ZEROCOPY bind flags
------------------------------------- ------------------------------------
When you bind to a socket, the kernel will first try to use zero-copy When you bind to a socket, the kernel will first try to use zero-copy
copy. If zero-copy is not supported, it will fall back on using copy copy. If zero-copy is not supported, it will fall back on using copy
...@@ -252,7 +252,7 @@ mode, i.e. copying all packets out to user space. But if you would ...@@ -252,7 +252,7 @@ mode, i.e. copying all packets out to user space. But if you would
like to force a certain mode, you can use the following flags. If you like to force a certain mode, you can use the following flags. If you
pass the XDP_COPY flag to the bind call, the kernel will force the pass the XDP_COPY flag to the bind call, the kernel will force the
socket into copy mode. If it cannot use copy mode, the bind call will socket into copy mode. If it cannot use copy mode, the bind call will
fail with an error. Conversely, the XDP_ZERO_COPY flag will force the fail with an error. Conversely, the XDP_ZEROCOPY flag will force the
socket into zero-copy mode or fail. socket into zero-copy mode or fail.
XDP_SHARED_UMEM bind flag XDP_SHARED_UMEM bind flag
......
...@@ -112,7 +112,7 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) ...@@ -112,7 +112,7 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
{ {
u32 r1 = reg2hex[b1]; u32 r1 = reg2hex[b1];
if (!jit->seen_reg[r1] && r1 >= 6 && r1 <= 15) if (r1 >= 6 && r1 <= 15 && !jit->seen_reg[r1])
jit->seen_reg[r1] = 1; jit->seen_reg[r1] = 1;
} }
......
...@@ -3677,6 +3677,8 @@ static int check_max_stack_depth(struct bpf_verifier_env *env) ...@@ -3677,6 +3677,8 @@ static int check_max_stack_depth(struct bpf_verifier_env *env)
if (tail_call_reachable) if (tail_call_reachable)
for (j = 0; j < frame; j++) for (j = 0; j < frame; j++)
subprog[ret_prog[j]].tail_call_reachable = true; subprog[ret_prog[j]].tail_call_reachable = true;
if (subprog[0].tail_call_reachable)
env->prog->aux->tail_call_reachable = true;
/* end of for() loop means the last insn of the 'subprog' /* end of for() loop means the last insn of the 'subprog'
* was reached. Doesn't matter whether it was JA or EXIT * was reached. Doesn't matter whether it was JA or EXIT
......
...@@ -701,6 +701,9 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, ...@@ -701,6 +701,9 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
void *data; void *data;
int ret; int ret;
if (prog->expected_attach_type == BPF_XDP_DEVMAP ||
prog->expected_attach_type == BPF_XDP_CPUMAP)
return -EINVAL;
if (kattr->test.ctx_in || kattr->test.ctx_out) if (kattr->test.ctx_in || kattr->test.ctx_out)
return -EINVAL; return -EINVAL;
......
...@@ -9712,14 +9712,17 @@ int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) ...@@ -9712,14 +9712,17 @@ int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
struct net_device *dev; struct net_device *dev;
int err, fd; int err, fd;
rtnl_lock();
dev = dev_get_by_index(net, attr->link_create.target_ifindex); dev = dev_get_by_index(net, attr->link_create.target_ifindex);
if (!dev) if (!dev) {
rtnl_unlock();
return -EINVAL; return -EINVAL;
}
link = kzalloc(sizeof(*link), GFP_USER); link = kzalloc(sizeof(*link), GFP_USER);
if (!link) { if (!link) {
err = -ENOMEM; err = -ENOMEM;
goto out_put_dev; goto unlock;
} }
bpf_link_init(&link->link, BPF_LINK_TYPE_XDP, &bpf_xdp_link_lops, prog); bpf_link_init(&link->link, BPF_LINK_TYPE_XDP, &bpf_xdp_link_lops, prog);
...@@ -9729,14 +9732,14 @@ int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) ...@@ -9729,14 +9732,14 @@ int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
err = bpf_link_prime(&link->link, &link_primer); err = bpf_link_prime(&link->link, &link_primer);
if (err) { if (err) {
kfree(link); kfree(link);
goto out_put_dev; goto unlock;
} }
rtnl_lock();
err = dev_xdp_attach_link(dev, NULL, link); err = dev_xdp_attach_link(dev, NULL, link);
rtnl_unlock(); rtnl_unlock();
if (err) { if (err) {
link->dev = NULL;
bpf_link_cleanup(&link_primer); bpf_link_cleanup(&link_primer);
goto out_put_dev; goto out_put_dev;
} }
...@@ -9746,6 +9749,9 @@ int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) ...@@ -9746,6 +9749,9 @@ int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
dev_put(dev); dev_put(dev);
return fd; return fd;
unlock:
rtnl_unlock();
out_put_dev: out_put_dev:
dev_put(dev); dev_put(dev);
return err; return err;
......
...@@ -508,10 +508,8 @@ static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb, ...@@ -508,10 +508,8 @@ static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
if (skb_linearize(skb)) if (skb_linearize(skb))
return -EAGAIN; return -EAGAIN;
num_sge = skb_to_sgvec(skb, msg->sg.data, 0, skb->len); num_sge = skb_to_sgvec(skb, msg->sg.data, 0, skb->len);
if (unlikely(num_sge < 0)) { if (unlikely(num_sge < 0))
kfree(msg);
return num_sge; return num_sge;
}
copied = skb->len; copied = skb->len;
msg->sg.start = 0; msg->sg.start = 0;
...@@ -530,6 +528,7 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb) ...@@ -530,6 +528,7 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
{ {
struct sock *sk = psock->sk; struct sock *sk = psock->sk;
struct sk_msg *msg; struct sk_msg *msg;
int err;
/* If we are receiving on the same sock skb->sk is already assigned, /* If we are receiving on the same sock skb->sk is already assigned,
* skip memory accounting and owner transition seeing it already set * skip memory accounting and owner transition seeing it already set
...@@ -548,7 +547,10 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb) ...@@ -548,7 +547,10 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
* into user buffers. * into user buffers.
*/ */
skb_set_owner_r(skb, sk); skb_set_owner_r(skb, sk);
return sk_psock_skb_ingress_enqueue(skb, psock, sk, msg); err = sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
if (err < 0)
kfree(msg);
return err;
} }
/* Puts an skb on the ingress queue of the socket already assigned to the /* Puts an skb on the ingress queue of the socket already assigned to the
...@@ -559,12 +561,16 @@ static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb ...@@ -559,12 +561,16 @@ static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb
{ {
struct sk_msg *msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC); struct sk_msg *msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC);
struct sock *sk = psock->sk; struct sock *sk = psock->sk;
int err;
if (unlikely(!msg)) if (unlikely(!msg))
return -EAGAIN; return -EAGAIN;
sk_msg_init(msg); sk_msg_init(msg);
skb_set_owner_r(skb, sk); skb_set_owner_r(skb, sk);
return sk_psock_skb_ingress_enqueue(skb, psock, sk, msg); err = sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
if (err < 0)
kfree(msg);
return err;
} }
static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb, static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
......
...@@ -503,7 +503,7 @@ static int __init tcp_bpf_v4_build_proto(void) ...@@ -503,7 +503,7 @@ static int __init tcp_bpf_v4_build_proto(void)
tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV4], &tcp_prot); tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV4], &tcp_prot);
return 0; return 0;
} }
core_initcall(tcp_bpf_v4_build_proto); late_initcall(tcp_bpf_v4_build_proto);
static int tcp_bpf_assert_proto_ops(struct proto *ops) static int tcp_bpf_assert_proto_ops(struct proto *ops)
{ {
......
...@@ -134,7 +134,7 @@ static int __init udp_bpf_v4_build_proto(void) ...@@ -134,7 +134,7 @@ static int __init udp_bpf_v4_build_proto(void)
udp_bpf_rebuild_protos(&udp_bpf_prots[UDP_BPF_IPV4], &udp_prot); udp_bpf_rebuild_protos(&udp_bpf_prots[UDP_BPF_IPV4], &udp_prot);
return 0; return 0;
} }
core_initcall(udp_bpf_v4_build_proto); late_initcall(udp_bpf_v4_build_proto);
int udp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore) int udp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore)
{ {
......
...@@ -222,6 +222,11 @@ int mount_bpffs_for_pin(const char *name) ...@@ -222,6 +222,11 @@ int mount_bpffs_for_pin(const char *name)
int err = 0; int err = 0;
file = malloc(strlen(name) + 1); file = malloc(strlen(name) + 1);
if (!file) {
p_err("mem alloc failed");
return -1;
}
strcpy(file, name); strcpy(file, name);
dir = dirname(file); dir = dirname(file);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment