Commit 69b45078 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'misc-BPF-improvements'

Daniel Borkmann says:

====================
This set adds various patches I still had in my queue, first two
are test cases to provide coverage for the recent two fixes that
went to bpf tree, then a small improvement on the error message
for gpl helpers. Next, we expose prog and map id into fdinfo in
order to allow for inspection of these objections currently used
in applications. Patch after that removes a retpoline call for
map lookup/update/delete helpers. A new helper is added in the
subsequent patch to lookup the skb's socket's cgroup v2 id which
can be used in an efficient way for e.g. lookups on egress side.
Next one is a fix to fully clear state info in tunnel/xfrm helpers.
Given this is full cap_sys_admin from init ns and has same priv
requirements like tracing, bpf-next should be okay. A small bug
fix for bpf_asm follows, and next a fix for context access in
tracing which was recently reported. Lastly, a small update in
the maintainer's file to add patchwork url and missing files.

Thanks!

v2 -> v3:
  - Noticed a merge artefact inside uapi header comment, sigh,
    fixed now.
v1 -> v2:
  - minor fix in getting context access work on 32 bit for tracing
  - add paragraph to uapi helper doc to better describe kernel
    build deps for cggroup helper
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents 25c1013e 10a76564
......@@ -2722,6 +2722,7 @@ L: netdev@vger.kernel.org
L: linux-kernel@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git
Q: https://patchwork.ozlabs.org/project/netdev/list/?delegate=77147
S: Supported
F: arch/x86/net/bpf_jit*
F: Documentation/networking/filter.txt
......@@ -2740,6 +2741,7 @@ F: net/sched/act_bpf.c
F: net/sched/cls_bpf.c
F: samples/bpf/
F: tools/bpf/
F: tools/lib/bpf/
F: tools/testing/selftests/bpf/
BROADCOM B44 10/100 ETHERNET DRIVER
......
......@@ -289,8 +289,21 @@ struct xdp_buff;
.off = OFF, \
.imm = 0 })
/* Relative call */
#define BPF_CALL_REL(TGT) \
((struct bpf_insn) { \
.code = BPF_JMP | BPF_CALL, \
.dst_reg = 0, \
.src_reg = BPF_PSEUDO_CALL, \
.off = 0, \
.imm = TGT })
/* Function call */
#define BPF_CAST_CALL(x) \
((u64 (*)(u64, u64, u64, u64, u64))(x))
#define BPF_EMIT_CALL(FUNC) \
((struct bpf_insn) { \
.code = BPF_JMP | BPF_CALL, \
......@@ -626,16 +639,34 @@ static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
return prog->type == BPF_PROG_TYPE_UNSPEC;
}
static inline bool
bpf_ctx_narrow_access_ok(u32 off, u32 size, const u32 size_default)
static inline u32 bpf_ctx_off_adjust_machine(u32 size)
{
const u32 size_machine = sizeof(unsigned long);
if (size > size_machine && size % size_machine == 0)
size = size_machine;
return size;
}
static inline bool bpf_ctx_narrow_align_ok(u32 off, u32 size_access,
u32 size_default)
{
bool off_ok;
size_default = bpf_ctx_off_adjust_machine(size_default);
size_access = bpf_ctx_off_adjust_machine(size_access);
#ifdef __LITTLE_ENDIAN
off_ok = (off & (size_default - 1)) == 0;
return (off & (size_default - 1)) == 0;
#else
off_ok = (off & (size_default - 1)) + size == size_default;
return (off & (size_default - 1)) + size_access == size_default;
#endif
return off_ok && size <= size_default && (size & (size - 1)) == 0;
}
static inline bool
bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
{
return bpf_ctx_narrow_align_ok(off, size, size_default) &&
size <= size_default && (size & (size - 1)) == 0;
}
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
......
......@@ -2054,6 +2054,22 @@ union bpf_attr {
*
* Return
* 0
*
* uint64_t bpf_skb_cgroup_id(struct sk_buff *skb)
* Description
* Return the cgroup v2 id of the socket associated with the *skb*.
* This is roughly similar to the **bpf_get_cgroup_classid**\ ()
* helper for cgroup v1 by providing a tag resp. identifier that
* can be matched on or used for map lookups e.g. to implement
* policy. The cgroup v2 id of a given path in the hierarchy is
* exposed in user space through the f_handle API in order to get
* to the same 64-bit id.
*
* This helper can be used on TC egress path, but not on ingress,
* and is available only if the kernel was compiled with the
* **CONFIG_SOCK_CGROUP_DATA** configuration option.
* Return
* The id is returned or 0 in case the id could not be retrieved.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
......@@ -2134,7 +2150,8 @@ union bpf_attr {
FN(lwt_seg6_adjust_srh), \
FN(lwt_seg6_action), \
FN(rc_repeat), \
FN(rc_keydown),
FN(rc_keydown), \
FN(skb_cgroup_id),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
......@@ -2251,7 +2268,7 @@ struct bpf_tunnel_key {
};
__u8 tunnel_tos;
__u8 tunnel_ttl;
__u16 tunnel_ext;
__u16 tunnel_ext; /* Padding, future use. */
__u32 tunnel_label;
};
......@@ -2262,6 +2279,7 @@ struct bpf_xfrm_state {
__u32 reqid;
__u32 spi; /* Stored in network byte order */
__u16 family;
__u16 ext; /* Padding, future use. */
union {
__u32 remote_ipv4; /* Stored in network byte order */
__u32 remote_ipv6[4]; /* Stored in network byte order */
......
......@@ -503,7 +503,9 @@ static u32 htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
struct bpf_insn *insn = insn_buf;
const int ret = BPF_REG_0;
*insn++ = BPF_EMIT_CALL((u64 (*)(u64, u64, u64, u64, u64))__htab_map_lookup_elem);
BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
(void *(*)(struct bpf_map *map, void *key))NULL));
*insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem));
*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
*insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
offsetof(struct htab_elem, key) +
......@@ -530,7 +532,9 @@ static u32 htab_lru_map_gen_lookup(struct bpf_map *map,
const int ret = BPF_REG_0;
const int ref_reg = BPF_REG_1;
*insn++ = BPF_EMIT_CALL((u64 (*)(u64, u64, u64, u64, u64))__htab_map_lookup_elem);
BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
(void *(*)(struct bpf_map *map, void *key))NULL));
*insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem));
*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 4);
*insn++ = BPF_LDX_MEM(BPF_B, ref_reg, ret,
offsetof(struct htab_elem, lru_node) +
......@@ -1369,7 +1373,9 @@ static u32 htab_of_map_gen_lookup(struct bpf_map *map,
struct bpf_insn *insn = insn_buf;
const int ret = BPF_REG_0;
*insn++ = BPF_EMIT_CALL((u64 (*)(u64, u64, u64, u64, u64))__htab_map_lookup_elem);
BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
(void *(*)(struct bpf_map *map, void *key))NULL));
*insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem));
*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 2);
*insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
offsetof(struct htab_elem, key) +
......
......@@ -327,13 +327,15 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
"value_size:\t%u\n"
"max_entries:\t%u\n"
"map_flags:\t%#x\n"
"memlock:\t%llu\n",
"memlock:\t%llu\n"
"map_id:\t%u\n",
map->map_type,
map->key_size,
map->value_size,
map->max_entries,
map->map_flags,
map->pages * 1ULL << PAGE_SHIFT);
map->pages * 1ULL << PAGE_SHIFT,
map->id);
if (owner_prog_type) {
seq_printf(m, "owner_prog_type:\t%u\n",
......@@ -1070,11 +1072,13 @@ static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
"prog_type:\t%u\n"
"prog_jited:\t%u\n"
"prog_tag:\t%s\n"
"memlock:\t%llu\n",
"memlock:\t%llu\n"
"prog_id:\t%u\n",
prog->type,
prog->jited,
prog_tag,
prog->pages * 1ULL << PAGE_SHIFT);
prog->pages * 1ULL << PAGE_SHIFT,
prog->aux->id);
}
#endif
......
......@@ -2421,8 +2421,11 @@ record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
if (func_id != BPF_FUNC_tail_call &&
func_id != BPF_FUNC_map_lookup_elem)
func_id != BPF_FUNC_map_lookup_elem &&
func_id != BPF_FUNC_map_update_elem &&
func_id != BPF_FUNC_map_delete_elem)
return 0;
if (meta->map_ptr == NULL) {
verbose(env, "kernel subsystem misconfigured verifier\n");
return -EINVAL;
......@@ -2462,7 +2465,7 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
/* eBPF programs must be GPL compatible to use GPL-ed functions */
if (!env->prog->gpl_compatible && fn->gpl_only) {
verbose(env, "cannot call GPL only function from proprietary program\n");
verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n");
return -EINVAL;
}
......@@ -5346,6 +5349,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
*/
is_narrower_load = size < ctx_field_size;
if (is_narrower_load) {
u32 size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
u32 off = insn->off;
u8 size_code;
......@@ -5360,7 +5364,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
else if (ctx_field_size == 8)
size_code = BPF_DW;
insn->off = off & ~(ctx_field_size - 1);
insn->off = off & ~(size_default - 1);
insn->code = BPF_LDX | BPF_MEM | size_code;
}
......@@ -5586,6 +5590,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
struct bpf_insn *insn = prog->insnsi;
const struct bpf_func_proto *fn;
const int insn_cnt = prog->len;
const struct bpf_map_ops *ops;
struct bpf_insn_aux_data *aux;
struct bpf_insn insn_buf[16];
struct bpf_prog *new_prog;
......@@ -5715,37 +5720,63 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
}
/* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
* handlers are currently limited to 64 bit only.
* and other inlining handlers are currently limited to 64 bit
* only.
*/
if (prog->jit_requested && BITS_PER_LONG == 64 &&
insn->imm == BPF_FUNC_map_lookup_elem) {
(insn->imm == BPF_FUNC_map_lookup_elem ||
insn->imm == BPF_FUNC_map_update_elem ||
insn->imm == BPF_FUNC_map_delete_elem)) {
aux = &env->insn_aux_data[i + delta];
if (bpf_map_ptr_poisoned(aux))
goto patch_call_imm;
map_ptr = BPF_MAP_PTR(aux->map_state);
if (!map_ptr->ops->map_gen_lookup)
goto patch_call_imm;
cnt = map_ptr->ops->map_gen_lookup(map_ptr, insn_buf);
ops = map_ptr->ops;
if (insn->imm == BPF_FUNC_map_lookup_elem &&
ops->map_gen_lookup) {
cnt = ops->map_gen_lookup(map_ptr, insn_buf);
if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
verbose(env, "bpf verifier is misconfigured\n");
return -EINVAL;
}
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf,
cnt);
new_prog = bpf_patch_insn_data(env, i + delta,
insn_buf, cnt);
if (!new_prog)
return -ENOMEM;
delta += cnt - 1;
/* keep walking new program and skip insns we just inserted */
env->prog = prog = new_prog;
insn = new_prog->insnsi + i + delta;
continue;
}
BUILD_BUG_ON(!__same_type(ops->map_lookup_elem,
(void *(*)(struct bpf_map *map, void *key))NULL));
BUILD_BUG_ON(!__same_type(ops->map_delete_elem,
(int (*)(struct bpf_map *map, void *key))NULL));
BUILD_BUG_ON(!__same_type(ops->map_update_elem,
(int (*)(struct bpf_map *map, void *key, void *value,
u64 flags))NULL));
switch (insn->imm) {
case BPF_FUNC_map_lookup_elem:
insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) -
__bpf_call_base;
continue;
case BPF_FUNC_map_update_elem:
insn->imm = BPF_CAST_CALL(ops->map_update_elem) -
__bpf_call_base;
continue;
case BPF_FUNC_map_delete_elem:
insn->imm = BPF_CAST_CALL(ops->map_delete_elem) -
__bpf_call_base;
continue;
}
goto patch_call_imm;
}
if (insn->imm == BPF_FUNC_redirect_map) {
/* Note, we cannot use prog directly as imm as subsequent
* rewrites would still change the prog pointer. The only
......
......@@ -880,8 +880,14 @@ static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type
return false;
if (type != BPF_READ)
return false;
if (off % size != 0)
if (off % size != 0) {
if (sizeof(unsigned long) != 4)
return false;
if (size != 8)
return false;
if (off % size != 4)
return false;
}
switch (off) {
case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
......
......@@ -356,6 +356,52 @@ static int bpf_fill_maxinsns11(struct bpf_test *self)
return __bpf_fill_ja(self, BPF_MAXINSNS, 68);
}
static int bpf_fill_maxinsns12(struct bpf_test *self)
{
unsigned int len = BPF_MAXINSNS;
struct sock_filter *insn;
int i = 0;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
if (!insn)
return -ENOMEM;
insn[0] = __BPF_JUMP(BPF_JMP | BPF_JA, len - 2, 0, 0);
for (i = 1; i < len - 1; i++)
insn[i] = __BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0);
insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xabababab);
self->u.ptr.insns = insn;
self->u.ptr.len = len;
return 0;
}
static int bpf_fill_maxinsns13(struct bpf_test *self)
{
unsigned int len = BPF_MAXINSNS;
struct sock_filter *insn;
int i = 0;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
if (!insn)
return -ENOMEM;
for (i = 0; i < len - 3; i++)
insn[i] = __BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0);
insn[len - 3] = __BPF_STMT(BPF_LD | BPF_IMM, 0xabababab);
insn[len - 2] = __BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0);
insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0);
self->u.ptr.insns = insn;
self->u.ptr.len = len;
return 0;
}
static int bpf_fill_ja(struct bpf_test *self)
{
/* Hits exactly 11 passes on x86_64 JIT. */
......@@ -5289,6 +5335,23 @@ static struct bpf_test tests[] = {
.fill_helper = bpf_fill_maxinsns11,
.expected_errcode = -ENOTSUPP,
},
{
"BPF_MAXINSNS: jump over MSH",
{ },
CLASSIC | FLAG_EXPECTED_FAIL,
{ 0xfa, 0xfb, 0xfc, 0xfd, },
{ { 4, 0xabababab } },
.fill_helper = bpf_fill_maxinsns12,
.expected_errcode = -EINVAL,
},
{
"BPF_MAXINSNS: exec all MSH",
{ },
CLASSIC,
{ 0xfa, 0xfb, 0xfc, 0xfd, },
{ { 4, 0xababab83 } },
.fill_helper = bpf_fill_maxinsns13,
},
{
"BPF_MAXINSNS: ld_abs+get_processor_id",
{ },
......
......@@ -3445,6 +3445,7 @@ BPF_CALL_4(bpf_skb_get_tunnel_key, struct sk_buff *, skb, struct bpf_tunnel_key
to->tunnel_id = be64_to_cpu(info->key.tun_id);
to->tunnel_tos = info->key.tos;
to->tunnel_ttl = info->key.ttl;
to->tunnel_ext = 0;
if (flags & BPF_F_TUNINFO_IPV6) {
memcpy(to->remote_ipv6, &info->key.u.ipv6.src,
......@@ -3452,6 +3453,8 @@ BPF_CALL_4(bpf_skb_get_tunnel_key, struct sk_buff *, skb, struct bpf_tunnel_key
to->tunnel_label = be32_to_cpu(info->key.label);
} else {
to->remote_ipv4 = be32_to_cpu(info->key.u.ipv4.src);
memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3);
to->tunnel_label = 0;
}
if (unlikely(size != sizeof(struct bpf_tunnel_key)))
......@@ -3661,6 +3664,27 @@ static const struct bpf_func_proto bpf_skb_under_cgroup_proto = {
.arg3_type = ARG_ANYTHING,
};
#ifdef CONFIG_SOCK_CGROUP_DATA
BPF_CALL_1(bpf_skb_cgroup_id, const struct sk_buff *, skb)
{
struct sock *sk = skb_to_full_sk(skb);
struct cgroup *cgrp;
if (!sk || !sk_fullsock(sk))
return 0;
cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
return cgrp->kn->id.id;
}
static const struct bpf_func_proto bpf_skb_cgroup_id_proto = {
.func = bpf_skb_cgroup_id,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
};
#endif
static unsigned long bpf_xdp_copy(void *dst_buff, const void *src_buff,
unsigned long off, unsigned long len)
{
......@@ -4026,11 +4050,14 @@ BPF_CALL_5(bpf_skb_get_xfrm_state, struct sk_buff *, skb, u32, index,
to->reqid = x->props.reqid;
to->spi = x->id.spi;
to->family = x->props.family;
to->ext = 0;
if (to->family == AF_INET6) {
memcpy(to->remote_ipv6, x->props.saddr.a6,
sizeof(to->remote_ipv6));
} else {
to->remote_ipv4 = x->props.saddr.a4;
memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3);
}
return 0;
......@@ -4747,12 +4774,16 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_get_socket_cookie_proto;
case BPF_FUNC_get_socket_uid:
return &bpf_get_socket_uid_proto;
case BPF_FUNC_fib_lookup:
return &bpf_skb_fib_lookup_proto;
#ifdef CONFIG_XFRM
case BPF_FUNC_skb_get_xfrm_state:
return &bpf_skb_get_xfrm_state_proto;
#endif
case BPF_FUNC_fib_lookup:
return &bpf_skb_fib_lookup_proto;
#ifdef CONFIG_SOCK_CGROUP_DATA
case BPF_FUNC_skb_cgroup_id:
return &bpf_skb_cgroup_id_proto;
#endif
default:
return bpf_base_func_proto(func_id);
}
......
......@@ -175,7 +175,7 @@ extern void yyerror(const char *str);
yylval.number = strtol(yytext, NULL, 10);
return number;
}
([0][0-9]+) {
([0][0-7]+) {
yylval.number = strtol(yytext + 1, NULL, 8);
return number;
}
......
......@@ -263,6 +263,16 @@
#define BPF_LD_MAP_FD(DST, MAP_FD) \
BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
/* Relative call */
#define BPF_CALL_REL(TGT) \
((struct bpf_insn) { \
.code = BPF_JMP | BPF_CALL, \
.dst_reg = 0, \
.src_reg = BPF_PSEUDO_CALL, \
.off = 0, \
.imm = TGT })
/* Program exit */
#define BPF_EXIT_INSN() \
......
......@@ -2054,6 +2054,22 @@ union bpf_attr {
*
* Return
* 0
*
* uint64_t bpf_skb_cgroup_id(struct sk_buff *skb)
* Description
* Return the cgroup v2 id of the socket associated with the *skb*.
* This is roughly similar to the **bpf_get_cgroup_classid**\ ()
* helper for cgroup v1 by providing a tag resp. identifier that
* can be matched on or used for map lookups e.g. to implement
* policy. The cgroup v2 id of a given path in the hierarchy is
* exposed in user space through the f_handle API in order to get
* to the same 64-bit id.
*
* This helper can be used on TC egress path, but not on ingress,
* and is available only if the kernel was compiled with the
* **CONFIG_SOCK_CGROUP_DATA** configuration option.
* Return
* The id is returned or 0 in case the id could not be retrieved.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
......@@ -2134,7 +2150,8 @@ union bpf_attr {
FN(lwt_seg6_adjust_srh), \
FN(lwt_seg6_action), \
FN(rc_repeat), \
FN(rc_keydown),
FN(rc_keydown), \
FN(skb_cgroup_id),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
......@@ -2251,7 +2268,7 @@ struct bpf_tunnel_key {
};
__u8 tunnel_tos;
__u8 tunnel_ttl;
__u16 tunnel_ext;
__u16 tunnel_ext; /* Padding, future use. */
__u32 tunnel_label;
};
......@@ -2262,6 +2279,7 @@ struct bpf_xfrm_state {
__u32 reqid;
__u32 spi; /* Stored in network byte order */
__u16 family;
__u16 ext; /* Padding, future use. */
union {
__u32 remote_ipv4; /* Stored in network byte order */
__u32 remote_ipv6[4]; /* Stored in network byte order */
......
......@@ -50,7 +50,7 @@
#define MAX_INSNS BPF_MAXINSNS
#define MAX_FIXUPS 8
#define MAX_NR_MAPS 4
#define MAX_NR_MAPS 7
#define POINTER_VALUE 0xcafe4all
#define TEST_DATA_LEN 64
......@@ -66,7 +66,9 @@ struct bpf_test {
int fixup_map1[MAX_FIXUPS];
int fixup_map2[MAX_FIXUPS];
int fixup_map3[MAX_FIXUPS];
int fixup_prog[MAX_FIXUPS];
int fixup_map4[MAX_FIXUPS];
int fixup_prog1[MAX_FIXUPS];
int fixup_prog2[MAX_FIXUPS];
int fixup_map_in_map[MAX_FIXUPS];
const char *errstr;
const char *errstr_unpriv;
......@@ -2769,7 +2771,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_prog = { 1 },
.fixup_prog1 = { 1 },
.errstr_unpriv = "R3 leaks addr into helper",
.result_unpriv = REJECT,
.result = ACCEPT,
......@@ -2856,7 +2858,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.fixup_prog = { 1 },
.fixup_prog1 = { 1 },
.result = ACCEPT,
.retval = 42,
},
......@@ -2870,7 +2872,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.fixup_prog = { 1 },
.fixup_prog1 = { 1 },
.result = ACCEPT,
.retval = 41,
},
......@@ -2884,7 +2886,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.fixup_prog = { 1 },
.fixup_prog1 = { 1 },
.result = ACCEPT,
.retval = 1,
},
......@@ -2898,7 +2900,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.fixup_prog = { 1 },
.fixup_prog1 = { 1 },
.result = ACCEPT,
.retval = 2,
},
......@@ -2912,7 +2914,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.fixup_prog = { 1 },
.fixup_prog1 = { 1 },
.result = ACCEPT,
.retval = 2,
},
......@@ -2926,7 +2928,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.fixup_prog = { 2 },
.fixup_prog1 = { 2 },
.result = ACCEPT,
.retval = 42,
},
......@@ -11681,6 +11683,112 @@ static struct bpf_test tests[] = {
.result = REJECT,
.prog_type = BPF_PROG_TYPE_XDP,
},
{
"calls: two calls returning different map pointers for lookup (hash, array)",
.insns = {
/* main prog */
BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
BPF_CALL_REL(11),
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
BPF_CALL_REL(12),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
offsetof(struct test_val, foo)),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
/* subprog 1 */
BPF_LD_MAP_FD(BPF_REG_0, 0),
BPF_EXIT_INSN(),
/* subprog 2 */
BPF_LD_MAP_FD(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map2 = { 13 },
.fixup_map4 = { 16 },
.result = ACCEPT,
.retval = 1,
},
{
"calls: two calls returning different map pointers for lookup (hash, map in map)",
.insns = {
/* main prog */
BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
BPF_CALL_REL(11),
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
BPF_CALL_REL(12),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
offsetof(struct test_val, foo)),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
/* subprog 1 */
BPF_LD_MAP_FD(BPF_REG_0, 0),
BPF_EXIT_INSN(),
/* subprog 2 */
BPF_LD_MAP_FD(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_in_map = { 16 },
.fixup_map4 = { 13 },
.result = REJECT,
.errstr = "R0 invalid mem access 'map_ptr'",
},
{
"cond: two branches returning different map pointers for lookup (tail, tail)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 3),
BPF_LD_MAP_FD(BPF_REG_2, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_LD_MAP_FD(BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_3, 7),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_tail_call),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.fixup_prog1 = { 5 },
.fixup_prog2 = { 2 },
.result_unpriv = REJECT,
.errstr_unpriv = "tail_call abusing map_ptr",
.result = ACCEPT,
.retval = 42,
},
{
"cond: two branches returning same map pointers for lookup (tail, tail)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 3),
BPF_LD_MAP_FD(BPF_REG_2, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_LD_MAP_FD(BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_3, 7),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_tail_call),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.fixup_prog2 = { 2, 5 },
.result_unpriv = ACCEPT,
.result = ACCEPT,
.retval = 42,
},
{
"search pruning: all branches should be verified (nop operation)",
.insns = {
......@@ -12162,12 +12270,13 @@ static int probe_filter_length(const struct bpf_insn *fp)
return len + 1;
}
static int create_map(uint32_t size_value, uint32_t max_elem)
static int create_map(uint32_t type, uint32_t size_key,
uint32_t size_value, uint32_t max_elem)
{
int fd;
fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
size_value, max_elem, BPF_F_NO_PREALLOC);
fd = bpf_create_map(type, size_key, size_value, max_elem,
type == BPF_MAP_TYPE_HASH ? BPF_F_NO_PREALLOC : 0);
if (fd < 0)
printf("Failed to create hash map '%s'!\n", strerror(errno));
......@@ -12200,13 +12309,13 @@ static int create_prog_dummy2(int mfd, int idx)
ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
}
static int create_prog_array(void)
static int create_prog_array(uint32_t max_elem, int p1key)
{
int p1key = 0, p2key = 1;
int p2key = 1;
int mfd, p1fd, p2fd;
mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
sizeof(int), 4, 0);
sizeof(int), max_elem, 0);
if (mfd < 0) {
printf("Failed to create prog array '%s'!\n", strerror(errno));
return -1;
......@@ -12261,7 +12370,9 @@ static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
int *fixup_map1 = test->fixup_map1;
int *fixup_map2 = test->fixup_map2;
int *fixup_map3 = test->fixup_map3;
int *fixup_prog = test->fixup_prog;
int *fixup_map4 = test->fixup_map4;
int *fixup_prog1 = test->fixup_prog1;
int *fixup_prog2 = test->fixup_prog2;
int *fixup_map_in_map = test->fixup_map_in_map;
if (test->fill_helper)
......@@ -12272,7 +12383,8 @@ static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
* that really matters is value size in this case.
*/
if (*fixup_map1) {
map_fds[0] = create_map(sizeof(long long), 1);
map_fds[0] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
sizeof(long long), 1);
do {
prog[*fixup_map1].imm = map_fds[0];
fixup_map1++;
......@@ -12280,7 +12392,8 @@ static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
}
if (*fixup_map2) {
map_fds[1] = create_map(sizeof(struct test_val), 1);
map_fds[1] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
sizeof(struct test_val), 1);
do {
prog[*fixup_map2].imm = map_fds[1];
fixup_map2++;
......@@ -12288,25 +12401,43 @@ static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
}
if (*fixup_map3) {
map_fds[1] = create_map(sizeof(struct other_val), 1);
map_fds[2] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
sizeof(struct other_val), 1);
do {
prog[*fixup_map3].imm = map_fds[1];
prog[*fixup_map3].imm = map_fds[2];
fixup_map3++;
} while (*fixup_map3);
}
if (*fixup_prog) {
map_fds[2] = create_prog_array();
if (*fixup_map4) {
map_fds[3] = create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
sizeof(struct test_val), 1);
do {
prog[*fixup_map4].imm = map_fds[3];
fixup_map4++;
} while (*fixup_map4);
}
if (*fixup_prog1) {
map_fds[4] = create_prog_array(4, 0);
do {
prog[*fixup_prog1].imm = map_fds[4];
fixup_prog1++;
} while (*fixup_prog1);
}
if (*fixup_prog2) {
map_fds[5] = create_prog_array(8, 7);
do {
prog[*fixup_prog].imm = map_fds[2];
fixup_prog++;
} while (*fixup_prog);
prog[*fixup_prog2].imm = map_fds[5];
fixup_prog2++;
} while (*fixup_prog2);
}
if (*fixup_map_in_map) {
map_fds[3] = create_map_in_map();
map_fds[6] = create_map_in_map();
do {
prog[*fixup_map_in_map].imm = map_fds[3];
prog[*fixup_map_in_map].imm = map_fds[6];
fixup_map_in_map++;
} while (*fixup_map_in_map);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment