Commit 9e8e714f authored by Jakub Kicinski's avatar Jakub Kicinski

Merge https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Alexei Starovoitov says:

====================
pull-request: bpf 2021-02-26

1) Fix for bpf atomic insns with src_reg=r0, from Brendan.

2) Fix use after free due to bpf_prog_clone, from Cong.

3) Drop imprecise verifier log message, from Dmitrii.

4) Remove incorrect blank line in bpf helper description, from Hangbin.

* https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf:
  selftests/bpf: No need to drop the packet when there is no geneve opt
  bpf: Remove blank line in bpf helper description comment
  tools/resolve_btfids: Fix build error with older host toolchains
  selftests/bpf: Fix a compiler warning in global func test
  bpf: Drop imprecise log message
  bpf: Clear percpu pointers in bpf_prog_clone_free()
  bpf: Fix a warning message in mark_ptr_not_null_reg()
  bpf, x86: Fix BPF_FETCH atomic and/or/xor with r0 as src
====================

Link: https://lore.kernel.org/r/20210226193737.57004-1-alexei.starovoitov@gmail.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 2c87f7a3 557c223b
...@@ -1349,6 +1349,7 @@ st: if (is_imm8(insn->off)) ...@@ -1349,6 +1349,7 @@ st: if (is_imm8(insn->off))
insn->imm == (BPF_XOR | BPF_FETCH)) { insn->imm == (BPF_XOR | BPF_FETCH)) {
u8 *branch_target; u8 *branch_target;
bool is64 = BPF_SIZE(insn->code) == BPF_DW; bool is64 = BPF_SIZE(insn->code) == BPF_DW;
u32 real_src_reg = src_reg;
/* /*
* Can't be implemented with a single x86 insn. * Can't be implemented with a single x86 insn.
...@@ -1357,6 +1358,9 @@ st: if (is_imm8(insn->off)) ...@@ -1357,6 +1358,9 @@ st: if (is_imm8(insn->off))
/* Will need RAX as a CMPXCHG operand so save R0 */ /* Will need RAX as a CMPXCHG operand so save R0 */
emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0); emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);
if (src_reg == BPF_REG_0)
real_src_reg = BPF_REG_AX;
branch_target = prog; branch_target = prog;
/* Load old value */ /* Load old value */
emit_ldx(&prog, BPF_SIZE(insn->code), emit_ldx(&prog, BPF_SIZE(insn->code),
...@@ -1366,9 +1370,9 @@ st: if (is_imm8(insn->off)) ...@@ -1366,9 +1370,9 @@ st: if (is_imm8(insn->off))
* put the result in the AUX_REG. * put the result in the AUX_REG.
*/ */
emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0); emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0);
maybe_emit_mod(&prog, AUX_REG, src_reg, is64); maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64);
EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)], EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)],
add_2reg(0xC0, AUX_REG, src_reg)); add_2reg(0xC0, AUX_REG, real_src_reg));
/* Attempt to swap in new value */ /* Attempt to swap in new value */
err = emit_atomic(&prog, BPF_CMPXCHG, err = emit_atomic(&prog, BPF_CMPXCHG,
dst_reg, AUX_REG, insn->off, dst_reg, AUX_REG, insn->off,
...@@ -1381,7 +1385,7 @@ st: if (is_imm8(insn->off)) ...@@ -1381,7 +1385,7 @@ st: if (is_imm8(insn->off))
*/ */
EMIT2(X86_JNE, -(prog - branch_target) - 2); EMIT2(X86_JNE, -(prog - branch_target) - 2);
/* Return the pre-modification value */ /* Return the pre-modification value */
emit_mov_reg(&prog, is64, src_reg, BPF_REG_0); emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0);
/* Restore R0 after clobbering RAX */ /* Restore R0 after clobbering RAX */
emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX); emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);
break; break;
......
...@@ -3850,7 +3850,6 @@ union bpf_attr { ...@@ -3850,7 +3850,6 @@ union bpf_attr {
* *
* long bpf_check_mtu(void *ctx, u32 ifindex, u32 *mtu_len, s32 len_diff, u64 flags) * long bpf_check_mtu(void *ctx, u32 ifindex, u32 *mtu_len, s32 len_diff, u64 flags)
* Description * Description
* Check ctx packet size against exceeding MTU of net device (based * Check ctx packet size against exceeding MTU of net device (based
* on *ifindex*). This helper will likely be used in combination * on *ifindex*). This helper will likely be used in combination
* with helpers that adjust/change the packet size. * with helpers that adjust/change the packet size.
......
...@@ -4321,8 +4321,6 @@ btf_get_prog_ctx_type(struct bpf_verifier_log *log, struct btf *btf, ...@@ -4321,8 +4321,6 @@ btf_get_prog_ctx_type(struct bpf_verifier_log *log, struct btf *btf,
* is not supported yet. * is not supported yet.
* BPF_PROG_TYPE_RAW_TRACEPOINT is fine. * BPF_PROG_TYPE_RAW_TRACEPOINT is fine.
*/ */
if (log->level & BPF_LOG_LEVEL)
bpf_log(log, "arg#%d type is not a struct\n", arg);
return NULL; return NULL;
} }
tname = btf_name_by_offset(btf, t->name_off); tname = btf_name_by_offset(btf, t->name_off);
......
...@@ -1118,6 +1118,8 @@ static void bpf_prog_clone_free(struct bpf_prog *fp) ...@@ -1118,6 +1118,8 @@ static void bpf_prog_clone_free(struct bpf_prog *fp)
* clone is guaranteed to not be locked. * clone is guaranteed to not be locked.
*/ */
fp->aux = NULL; fp->aux = NULL;
fp->stats = NULL;
fp->active = NULL;
__bpf_prog_free(fp); __bpf_prog_free(fp);
} }
......
...@@ -1120,7 +1120,7 @@ static void mark_ptr_not_null_reg(struct bpf_reg_state *reg) ...@@ -1120,7 +1120,7 @@ static void mark_ptr_not_null_reg(struct bpf_reg_state *reg)
reg->type = PTR_TO_RDWR_BUF; reg->type = PTR_TO_RDWR_BUF;
break; break;
default: default:
WARN_ON("unknown nullable register type"); WARN_ONCE(1, "unknown nullable register type");
} }
} }
......
...@@ -260,6 +260,11 @@ static struct btf_id *add_symbol(struct rb_root *root, char *name, size_t size) ...@@ -260,6 +260,11 @@ static struct btf_id *add_symbol(struct rb_root *root, char *name, size_t size)
return btf_id__add(root, id, false); return btf_id__add(root, id, false);
} }
/* Older libelf.h and glibc elf.h might not yet define the ELF compression types. */
#ifndef SHF_COMPRESSED
#define SHF_COMPRESSED (1 << 11) /* Section with compressed data. */
#endif
/* /*
* The data of compressed section should be aligned to 4 * The data of compressed section should be aligned to 4
* (for 32bit) or 8 (for 64 bit) bytes. The binutils ld * (for 32bit) or 8 (for 64 bit) bytes. The binutils ld
......
...@@ -3850,7 +3850,6 @@ union bpf_attr { ...@@ -3850,7 +3850,6 @@ union bpf_attr {
* *
* long bpf_check_mtu(void *ctx, u32 ifindex, u32 *mtu_len, s32 len_diff, u64 flags) * long bpf_check_mtu(void *ctx, u32 ifindex, u32 *mtu_len, s32 len_diff, u64 flags)
* Description * Description
* Check ctx packet size against exceeding MTU of net device (based * Check ctx packet size against exceeding MTU of net device (based
* on *ifindex*). This helper will likely be used in combination * on *ifindex*). This helper will likely be used in combination
* with helpers that adjust/change the packet size. * with helpers that adjust/change the packet size.
......
...@@ -15,5 +15,5 @@ __noinline int foo(const struct S *s) ...@@ -15,5 +15,5 @@ __noinline int foo(const struct S *s)
SEC("cgroup_skb/ingress") SEC("cgroup_skb/ingress")
int test_cls(struct __sk_buff *skb) int test_cls(struct __sk_buff *skb)
{ {
return foo(skb); return foo((const void *)skb);
} }
...@@ -446,10 +446,8 @@ int _geneve_get_tunnel(struct __sk_buff *skb) ...@@ -446,10 +446,8 @@ int _geneve_get_tunnel(struct __sk_buff *skb)
} }
ret = bpf_skb_get_tunnel_opt(skb, &gopt, sizeof(gopt)); ret = bpf_skb_get_tunnel_opt(skb, &gopt, sizeof(gopt));
if (ret < 0) { if (ret < 0)
ERROR(ret); gopt.opt_class = 0;
return TC_ACT_SHOT;
}
bpf_trace_printk(fmt, sizeof(fmt), bpf_trace_printk(fmt, sizeof(fmt),
key.tunnel_id, key.remote_ipv4, gopt.opt_class); key.tunnel_id, key.remote_ipv4, gopt.opt_class);
......
...@@ -75,3 +75,26 @@ ...@@ -75,3 +75,26 @@
}, },
.result = ACCEPT, .result = ACCEPT,
}, },
{
"BPF_ATOMIC_AND with fetch - r0 as source reg",
.insns = {
/* val = 0x110; */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0x110),
/* old = atomic_fetch_and(&val, 0x011); */
BPF_MOV64_IMM(BPF_REG_0, 0x011),
BPF_ATOMIC_OP(BPF_DW, BPF_AND | BPF_FETCH, BPF_REG_10, BPF_REG_0, -8),
/* if (old != 0x110) exit(3); */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0x110, 2),
BPF_MOV64_IMM(BPF_REG_0, 3),
BPF_EXIT_INSN(),
/* if (val != 0x010) exit(2); */
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -8),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x010, 2),
BPF_MOV64_IMM(BPF_REG_1, 2),
BPF_EXIT_INSN(),
/* exit(0); */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment