Commit f16d411c authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Daniel Borkmann says:

====================
pull-request: bpf 2023-09-06

We've added 9 non-merge commits during the last 6 day(s) which contain
a total of 12 files changed, 189 insertions(+), 44 deletions(-).

The main changes are:

1) Fix bpf_sk_storage to address an invalid wait context lockdep
   report and another one to address missing omem uncharge,
   from Martin KaFai Lau.

2) Two BPF recursion detection related fixes,
   from Sebastian Andrzej Siewior.

3) Fix tailcall limit enforcement in trampolines for s390 JIT,
   from Ilya Leoshkevich.

4) Fix a sockmap refcount race where skbs in sk_psock_backlog can
   be referenced after user space side has already skb_consumed them,
   from John Fastabend.

5) Fix BPF CI flake/race wrt sockmap vsock write test where
   the transport endpoint is not connected, from Xu Kuohai.

6) Follow-up doc fix to address a cross-link warning,
   from Eduard Zingerman.

* tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf:
  selftests/bpf: Check bpf_sk_storage has uncharged sk_omem_alloc
  bpf: bpf_sk_storage: Fix the missing uncharge in sk_omem_alloc
  bpf: bpf_sk_storage: Fix invalid wait context lockdep report
  s390/bpf: Pass through tail call counter in trampolines
  bpf: Assign bpf_tramp_run_ctx::saved_run_ctx before recursion check.
  bpf: Invoke __bpf_prog_exit_sleepable_recur() on recursion in kern_sys_bpf().
  bpf, sockmap: Fix skb refcnt race after locking changes
  docs/bpf: Fix "file doesn't exist" warnings in {llvm_reloc,btf}.rst
  selftests/bpf: Fix a CI failure caused by vsock write
====================

Link: https://lore.kernel.org/r/20230906095117.16941-1-daniel@iogearbox.netSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 1a961e74 a96d1cfb
......@@ -803,7 +803,7 @@ structure when .BTF.ext is generated. All ``bpf_core_relo`` structures
within a single ``btf_ext_info_sec`` describe relocations applied to
section named by ``btf_ext_info_sec->sec_name_off``.
See :ref:`Documentation/bpf/llvm_reloc <btf-co-re-relocations>`
See :ref:`Documentation/bpf/llvm_reloc.rst <btf-co-re-relocations>`
for more information on CO-RE relocations.
4.2 .BTF_ids section
......
......@@ -250,7 +250,7 @@ CO-RE Relocations
From object file point of view CO-RE mechanism is implemented as a set
of CO-RE specific relocation records. These relocation records are not
related to ELF relocations and are encoded in .BTF.ext section.
See :ref:`Documentation/bpf/btf <BTF_Ext_Section>` for more
See :ref:`Documentation/bpf/btf.rst <BTF_Ext_Section>` for more
information on .BTF.ext structure.
CO-RE relocations are applied to BPF instructions to update immediate
......
......@@ -2088,6 +2088,7 @@ struct bpf_tramp_jit {
*/
int r14_off; /* Offset of saved %r14 */
int run_ctx_off; /* Offset of struct bpf_tramp_run_ctx */
int tccnt_off; /* Offset of saved tailcall counter */
int do_fexit; /* do_fexit: label */
};
......@@ -2258,12 +2259,16 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
tjit->r14_off = alloc_stack(tjit, sizeof(u64));
tjit->run_ctx_off = alloc_stack(tjit,
sizeof(struct bpf_tramp_run_ctx));
tjit->tccnt_off = alloc_stack(tjit, sizeof(u64));
/* The caller has already reserved STACK_FRAME_OVERHEAD bytes. */
tjit->stack_size -= STACK_FRAME_OVERHEAD;
tjit->orig_stack_args_off = tjit->stack_size + STACK_FRAME_OVERHEAD;
/* aghi %r15,-stack_size */
EMIT4_IMM(0xa70b0000, REG_15, -tjit->stack_size);
/* mvc tccnt_off(4,%r15),stack_size+STK_OFF_TCCNT(%r15) */
_EMIT6(0xd203f000 | tjit->tccnt_off,
0xf000 | (tjit->stack_size + STK_OFF_TCCNT));
/* stmg %r2,%rN,fwd_reg_args_off(%r15) */
if (nr_reg_args)
EMIT6_DISP_LH(0xeb000000, 0x0024, REG_2,
......@@ -2400,6 +2405,8 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
(nr_stack_args * sizeof(u64) - 1) << 16 |
tjit->stack_args_off,
0xf000 | tjit->orig_stack_args_off);
/* mvc STK_OFF_TCCNT(4,%r15),tccnt_off(%r15) */
_EMIT6(0xd203f000 | STK_OFF_TCCNT, 0xf000 | tjit->tccnt_off);
/* lgr %r1,%r8 */
EMIT4(0xb9040000, REG_1, REG_8);
/* %r1() */
......@@ -2456,6 +2463,9 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
if (flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET))
EMIT6_DISP_LH(0xe3000000, 0x0004, REG_2, REG_0, REG_15,
tjit->retval_off);
/* mvc stack_size+STK_OFF_TCCNT(4,%r15),tccnt_off(%r15) */
_EMIT6(0xd203f000 | (tjit->stack_size + STK_OFF_TCCNT),
0xf000 | tjit->tccnt_off);
/* aghi %r15,stack_size */
EMIT4_IMM(0xa70b0000, REG_15, tjit->stack_size);
/* Emit an expoline for the following indirect jump. */
......
......@@ -553,7 +553,7 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
void *value, u64 map_flags, gfp_t gfp_flags)
{
struct bpf_local_storage_data *old_sdata = NULL;
struct bpf_local_storage_elem *selem = NULL;
struct bpf_local_storage_elem *alloc_selem, *selem = NULL;
struct bpf_local_storage *local_storage;
unsigned long flags;
int err;
......@@ -607,11 +607,12 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
}
}
if (gfp_flags == GFP_KERNEL) {
selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags);
if (!selem)
return ERR_PTR(-ENOMEM);
}
/* A lookup has just been done before and concluded a new selem is
* needed. The chance of an unnecessary alloc is unlikely.
*/
alloc_selem = selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags);
if (!alloc_selem)
return ERR_PTR(-ENOMEM);
raw_spin_lock_irqsave(&local_storage->lock, flags);
......@@ -623,13 +624,13 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
* simple.
*/
err = -EAGAIN;
goto unlock_err;
goto unlock;
}
old_sdata = bpf_local_storage_lookup(local_storage, smap, false);
err = check_flags(old_sdata, map_flags);
if (err)
goto unlock_err;
goto unlock;
if (old_sdata && (map_flags & BPF_F_LOCK)) {
copy_map_value_locked(&smap->map, old_sdata->data, value,
......@@ -638,23 +639,7 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
goto unlock;
}
if (gfp_flags != GFP_KERNEL) {
/* local_storage->lock is held. Hence, we are sure
* we can unlink and uncharge the old_sdata successfully
* later. Hence, instead of charging the new selem now
* and then uncharge the old selem later (which may cause
* a potential but unnecessary charge failure), avoid taking
* a charge at all here (the "!old_sdata" check) and the
* old_sdata will not be uncharged later during
* bpf_selem_unlink_storage_nolock().
*/
selem = bpf_selem_alloc(smap, owner, value, !old_sdata, gfp_flags);
if (!selem) {
err = -ENOMEM;
goto unlock_err;
}
}
alloc_selem = NULL;
/* First, link the new selem to the map */
bpf_selem_link_map(smap, selem);
......@@ -665,20 +650,16 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
if (old_sdata) {
bpf_selem_unlink_map(SELEM(old_sdata));
bpf_selem_unlink_storage_nolock(local_storage, SELEM(old_sdata),
false, false);
true, false);
}
unlock:
raw_spin_unlock_irqrestore(&local_storage->lock, flags);
return SDATA(selem);
unlock_err:
raw_spin_unlock_irqrestore(&local_storage->lock, flags);
if (selem) {
if (alloc_selem) {
mem_uncharge(smap, owner, smap->elem_size);
bpf_selem_free(selem, smap, true);
bpf_selem_free(alloc_selem, smap, true);
}
return ERR_PTR(err);
return err ? ERR_PTR(err) : SDATA(selem);
}
static u16 bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache *cache)
......@@ -779,7 +760,7 @@ void bpf_local_storage_destroy(struct bpf_local_storage *local_storage)
* of the loop will set the free_cgroup_storage to true.
*/
free_storage = bpf_selem_unlink_storage_nolock(
local_storage, selem, false, true);
local_storage, selem, true, true);
}
raw_spin_unlock_irqrestore(&local_storage->lock, flags);
......
......@@ -5502,9 +5502,9 @@ int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size)
}
run_ctx.bpf_cookie = 0;
run_ctx.saved_run_ctx = NULL;
if (!__bpf_prog_enter_sleepable_recur(prog, &run_ctx)) {
/* recursion detected */
__bpf_prog_exit_sleepable_recur(prog, 0, &run_ctx);
bpf_prog_put(prog);
return -EBUSY;
}
......
......@@ -926,13 +926,12 @@ u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog,
migrate_disable();
might_fault();
run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
bpf_prog_inc_misses_counter(prog);
return 0;
}
run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
return bpf_prog_start_time();
}
......
......@@ -612,12 +612,18 @@ static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb
static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
u32 off, u32 len, bool ingress)
{
int err = 0;
if (!ingress) {
if (!sock_writeable(psock->sk))
return -EAGAIN;
return skb_send_sock(psock->sk, skb, off, len);
}
return sk_psock_skb_ingress(psock, skb, off, len);
skb_get(skb);
err = sk_psock_skb_ingress(psock, skb, off, len);
if (err < 0)
kfree_skb(skb);
return err;
}
static void sk_psock_skb_state(struct sk_psock *psock,
......@@ -685,9 +691,7 @@ static void sk_psock_backlog(struct work_struct *work)
} while (len);
skb = skb_dequeue(&psock->ingress_skb);
if (!ingress) {
kfree_skb(skb);
}
kfree_skb(skb);
}
end:
mutex_unlock(&psock->work_mutex);
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Facebook */
#include <test_progs.h>
#include <bpf/libbpf.h>
#include <sys/types.h>
#include <sys/socket.h>
#include "sk_storage_omem_uncharge.skel.h"
void test_sk_storage_omem_uncharge(void)
{
struct sk_storage_omem_uncharge *skel;
int sk_fd = -1, map_fd, err, value;
socklen_t optlen;
skel = sk_storage_omem_uncharge__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel open_and_load"))
return;
map_fd = bpf_map__fd(skel->maps.sk_storage);
/* A standalone socket not binding to addr:port,
* so nentns is not needed.
*/
sk_fd = socket(AF_INET6, SOCK_STREAM, 0);
if (!ASSERT_GE(sk_fd, 0, "socket"))
goto done;
optlen = sizeof(skel->bss->cookie);
err = getsockopt(sk_fd, SOL_SOCKET, SO_COOKIE, &skel->bss->cookie, &optlen);
if (!ASSERT_OK(err, "getsockopt(SO_COOKIE)"))
goto done;
value = 0;
err = bpf_map_update_elem(map_fd, &sk_fd, &value, 0);
if (!ASSERT_OK(err, "bpf_map_update_elem(value=0)"))
goto done;
value = 0xdeadbeef;
err = bpf_map_update_elem(map_fd, &sk_fd, &value, 0);
if (!ASSERT_OK(err, "bpf_map_update_elem(value=0xdeadbeef)"))
goto done;
err = sk_storage_omem_uncharge__attach(skel);
if (!ASSERT_OK(err, "attach"))
goto done;
close(sk_fd);
sk_fd = -1;
ASSERT_EQ(skel->bss->cookie_found, 2, "cookie_found");
ASSERT_EQ(skel->bss->omem, 0, "omem");
done:
sk_storage_omem_uncharge__destroy(skel);
if (sk_fd != -1)
close(sk_fd);
}
......@@ -179,6 +179,32 @@
__ret; \
})
static inline int poll_connect(int fd, unsigned int timeout_sec)
{
struct timeval timeout = { .tv_sec = timeout_sec };
fd_set wfds;
int r, eval;
socklen_t esize = sizeof(eval);
FD_ZERO(&wfds);
FD_SET(fd, &wfds);
r = select(fd + 1, NULL, &wfds, NULL, &timeout);
if (r == 0)
errno = ETIME;
if (r != 1)
return -1;
if (getsockopt(fd, SOL_SOCKET, SO_ERROR, &eval, &esize) < 0)
return -1;
if (eval != 0) {
errno = eval;
return -1;
}
return 0;
}
static inline int poll_read(int fd, unsigned int timeout_sec)
{
struct timeval timeout = { .tv_sec = timeout_sec };
......
......@@ -1452,11 +1452,18 @@ static int vsock_socketpair_connectible(int sotype, int *v0, int *v1)
if (p < 0)
goto close_cli;
if (poll_connect(c, IO_TIMEOUT_SEC) < 0) {
FAIL_ERRNO("poll_connect");
goto close_acc;
}
*v0 = p;
*v1 = c;
return 0;
close_acc:
close(p);
close_cli:
close(c);
close_srv:
......
......@@ -88,6 +88,7 @@
#define sk_v6_rcv_saddr __sk_common.skc_v6_rcv_saddr
#define sk_flags __sk_common.skc_flags
#define sk_reuse __sk_common.skc_reuse
#define sk_cookie __sk_common.skc_cookie
#define s6_addr32 in6_u.u6_addr32
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Facebook */
#include "vmlinux.h"
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
void *local_storage_ptr = NULL;
void *sk_ptr = NULL;
int cookie_found = 0;
__u64 cookie = 0;
__u32 omem = 0;
void *bpf_rdonly_cast(void *, __u32) __ksym;
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, int);
} sk_storage SEC(".maps");
SEC("fexit/bpf_local_storage_destroy")
int BPF_PROG(bpf_local_storage_destroy, struct bpf_local_storage *local_storage)
{
struct sock *sk;
if (local_storage_ptr != local_storage)
return 0;
sk = bpf_rdonly_cast(sk_ptr, bpf_core_type_id_kernel(struct sock));
if (sk->sk_cookie.counter != cookie)
return 0;
cookie_found++;
omem = sk->sk_omem_alloc.counter;
local_storage_ptr = NULL;
return 0;
}
SEC("fentry/inet6_sock_destruct")
int BPF_PROG(inet6_sock_destruct, struct sock *sk)
{
int *value;
if (!cookie || sk->sk_cookie.counter != cookie)
return 0;
value = bpf_sk_storage_get(&sk_storage, sk, 0, 0);
if (value && *value == 0xdeadbeef) {
cookie_found++;
sk_ptr = sk;
local_storage_ptr = sk->sk_bpf_storage;
}
return 0;
}
char _license[] SEC("license") = "GPL";
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment