Commit e65eaded authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Daniel Borkmann says:

====================
pull-request: bpf 2021-03-18

The following pull-request contains BPF updates for your *net* tree.

We've added 10 non-merge commits during the last 4 day(s) which contain
a total of 14 files changed, 336 insertions(+), 94 deletions(-).

The main changes are:

1) Fix fexit/fmod_ret trampoline for sleepable programs, and also fix a ftrace
   splat in modify_ftrace_direct() on address change, from Alexei Starovoitov.

2) Fix two oob speculation possibilities that allows unprivileged to leak mem
   via side-channel, from Piotr Krysiuk and Daniel Borkmann.

3) Fix libbpf's netlink handling wrt SOCK_CLOEXEC, from Kumar Kartikeya Dwivedi.

4) Fix libbpf's error handling on failure in getting section names, from Namhyung Kim.

5) Fix tunnel collect_md BPF selftest wrt Geneve option handling, from Hangbin Liu.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents cb038357 58bfd95b
...@@ -1936,7 +1936,7 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog, ...@@ -1936,7 +1936,7 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
* add rsp, 8 // skip eth_type_trans's frame * add rsp, 8 // skip eth_type_trans's frame
* ret // return to its caller * ret // return to its caller
*/ */
int arch_prepare_bpf_trampoline(void *image, void *image_end, int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
const struct btf_func_model *m, u32 flags, const struct btf_func_model *m, u32 flags,
struct bpf_tramp_progs *tprogs, struct bpf_tramp_progs *tprogs,
void *orig_call) void *orig_call)
...@@ -1975,6 +1975,15 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end, ...@@ -1975,6 +1975,15 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end,
save_regs(m, &prog, nr_args, stack_size); save_regs(m, &prog, nr_args, stack_size);
if (flags & BPF_TRAMP_F_CALL_ORIG) {
/* arg1: mov rdi, im */
emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
if (emit_call(&prog, __bpf_tramp_enter, prog)) {
ret = -EINVAL;
goto cleanup;
}
}
if (fentry->nr_progs) if (fentry->nr_progs)
if (invoke_bpf(m, &prog, fentry, stack_size)) if (invoke_bpf(m, &prog, fentry, stack_size))
return -EINVAL; return -EINVAL;
...@@ -1993,8 +2002,7 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end, ...@@ -1993,8 +2002,7 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end,
} }
if (flags & BPF_TRAMP_F_CALL_ORIG) { if (flags & BPF_TRAMP_F_CALL_ORIG) {
if (fentry->nr_progs || fmod_ret->nr_progs) restore_regs(m, &prog, nr_args, stack_size);
restore_regs(m, &prog, nr_args, stack_size);
/* call original function */ /* call original function */
if (emit_call(&prog, orig_call, prog)) { if (emit_call(&prog, orig_call, prog)) {
...@@ -2003,6 +2011,8 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end, ...@@ -2003,6 +2011,8 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end,
} }
/* remember return value in a stack for bpf prog to access */ /* remember return value in a stack for bpf prog to access */
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
im->ip_after_call = prog;
emit_nops(&prog, 5);
} }
if (fmod_ret->nr_progs) { if (fmod_ret->nr_progs) {
...@@ -2033,9 +2043,17 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end, ...@@ -2033,9 +2043,17 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end,
* the return value is only updated on the stack and still needs to be * the return value is only updated on the stack and still needs to be
* restored to R0. * restored to R0.
*/ */
if (flags & BPF_TRAMP_F_CALL_ORIG) if (flags & BPF_TRAMP_F_CALL_ORIG) {
im->ip_epilogue = prog;
/* arg1: mov rdi, im */
emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
if (emit_call(&prog, __bpf_tramp_exit, prog)) {
ret = -EINVAL;
goto cleanup;
}
/* restore original return value back into RAX */ /* restore original return value back into RAX */
emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8); emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
}
EMIT1(0x5B); /* pop rbx */ EMIT1(0x5B); /* pop rbx */
EMIT1(0xC9); /* leave */ EMIT1(0xC9); /* leave */
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <linux/capability.h> #include <linux/capability.h>
#include <linux/sched/mm.h> #include <linux/sched/mm.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/percpu-refcount.h>
struct bpf_verifier_env; struct bpf_verifier_env;
struct bpf_verifier_log; struct bpf_verifier_log;
...@@ -556,7 +557,8 @@ struct bpf_tramp_progs { ...@@ -556,7 +557,8 @@ struct bpf_tramp_progs {
* fentry = a set of program to run before calling original function * fentry = a set of program to run before calling original function
* fexit = a set of program to run after original function * fexit = a set of program to run after original function
*/ */
int arch_prepare_bpf_trampoline(void *image, void *image_end, struct bpf_tramp_image;
int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
const struct btf_func_model *m, u32 flags, const struct btf_func_model *m, u32 flags,
struct bpf_tramp_progs *tprogs, struct bpf_tramp_progs *tprogs,
void *orig_call); void *orig_call);
...@@ -565,6 +567,8 @@ u64 notrace __bpf_prog_enter(struct bpf_prog *prog); ...@@ -565,6 +567,8 @@ u64 notrace __bpf_prog_enter(struct bpf_prog *prog);
void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start); void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start);
u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog); u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog);
void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start); void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start);
void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr);
void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr);
struct bpf_ksym { struct bpf_ksym {
unsigned long start; unsigned long start;
...@@ -583,6 +587,18 @@ enum bpf_tramp_prog_type { ...@@ -583,6 +587,18 @@ enum bpf_tramp_prog_type {
BPF_TRAMP_REPLACE, /* more than MAX */ BPF_TRAMP_REPLACE, /* more than MAX */
}; };
struct bpf_tramp_image {
void *image;
struct bpf_ksym ksym;
struct percpu_ref pcref;
void *ip_after_call;
void *ip_epilogue;
union {
struct rcu_head rcu;
struct work_struct work;
};
};
struct bpf_trampoline { struct bpf_trampoline {
/* hlist for trampoline_table */ /* hlist for trampoline_table */
struct hlist_node hlist; struct hlist_node hlist;
...@@ -605,9 +621,8 @@ struct bpf_trampoline { ...@@ -605,9 +621,8 @@ struct bpf_trampoline {
/* Number of attached programs. A counter per kind. */ /* Number of attached programs. A counter per kind. */
int progs_cnt[BPF_TRAMP_MAX]; int progs_cnt[BPF_TRAMP_MAX];
/* Executable image of trampoline */ /* Executable image of trampoline */
void *image; struct bpf_tramp_image *cur_image;
u64 selector; u64 selector;
struct bpf_ksym ksym;
}; };
struct bpf_attach_target_info { struct bpf_attach_target_info {
...@@ -691,6 +706,8 @@ void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym); ...@@ -691,6 +706,8 @@ void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym);
void bpf_image_ksym_del(struct bpf_ksym *ksym); void bpf_image_ksym_del(struct bpf_ksym *ksym);
void bpf_ksym_add(struct bpf_ksym *ksym); void bpf_ksym_add(struct bpf_ksym *ksym);
void bpf_ksym_del(struct bpf_ksym *ksym); void bpf_ksym_del(struct bpf_ksym *ksym);
int bpf_jit_charge_modmem(u32 pages);
void bpf_jit_uncharge_modmem(u32 pages);
#else #else
static inline int bpf_trampoline_link_prog(struct bpf_prog *prog, static inline int bpf_trampoline_link_prog(struct bpf_prog *prog,
struct bpf_trampoline *tr) struct bpf_trampoline *tr)
...@@ -787,7 +804,6 @@ struct bpf_prog_aux { ...@@ -787,7 +804,6 @@ struct bpf_prog_aux {
bool func_proto_unreliable; bool func_proto_unreliable;
bool sleepable; bool sleepable;
bool tail_call_reachable; bool tail_call_reachable;
enum bpf_tramp_prog_type trampoline_prog_type;
struct hlist_node tramp_hlist; struct hlist_node tramp_hlist;
/* BTF_KIND_FUNC_PROTO for valid attach_btf_id */ /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
const struct btf_type *attach_func_proto; const struct btf_type *attach_func_proto;
......
...@@ -430,7 +430,7 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, ...@@ -430,7 +430,7 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
tprogs[BPF_TRAMP_FENTRY].progs[0] = prog; tprogs[BPF_TRAMP_FENTRY].progs[0] = prog;
tprogs[BPF_TRAMP_FENTRY].nr_progs = 1; tprogs[BPF_TRAMP_FENTRY].nr_progs = 1;
err = arch_prepare_bpf_trampoline(image, err = arch_prepare_bpf_trampoline(NULL, image,
st_map->image + PAGE_SIZE, st_map->image + PAGE_SIZE,
&st_ops->func_models[i], 0, &st_ops->func_models[i], 0,
tprogs, NULL); tprogs, NULL);
......
...@@ -827,7 +827,7 @@ static int __init bpf_jit_charge_init(void) ...@@ -827,7 +827,7 @@ static int __init bpf_jit_charge_init(void)
} }
pure_initcall(bpf_jit_charge_init); pure_initcall(bpf_jit_charge_init);
static int bpf_jit_charge_modmem(u32 pages) int bpf_jit_charge_modmem(u32 pages)
{ {
if (atomic_long_add_return(pages, &bpf_jit_current) > if (atomic_long_add_return(pages, &bpf_jit_current) >
(bpf_jit_limit >> PAGE_SHIFT)) { (bpf_jit_limit >> PAGE_SHIFT)) {
...@@ -840,7 +840,7 @@ static int bpf_jit_charge_modmem(u32 pages) ...@@ -840,7 +840,7 @@ static int bpf_jit_charge_modmem(u32 pages)
return 0; return 0;
} }
static void bpf_jit_uncharge_modmem(u32 pages) void bpf_jit_uncharge_modmem(u32 pages)
{ {
atomic_long_sub(pages, &bpf_jit_current); atomic_long_sub(pages, &bpf_jit_current);
} }
......
...@@ -57,19 +57,10 @@ void bpf_image_ksym_del(struct bpf_ksym *ksym) ...@@ -57,19 +57,10 @@ void bpf_image_ksym_del(struct bpf_ksym *ksym)
PAGE_SIZE, true, ksym->name); PAGE_SIZE, true, ksym->name);
} }
static void bpf_trampoline_ksym_add(struct bpf_trampoline *tr)
{
struct bpf_ksym *ksym = &tr->ksym;
snprintf(ksym->name, KSYM_NAME_LEN, "bpf_trampoline_%llu", tr->key);
bpf_image_ksym_add(tr->image, ksym);
}
static struct bpf_trampoline *bpf_trampoline_lookup(u64 key) static struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
{ {
struct bpf_trampoline *tr; struct bpf_trampoline *tr;
struct hlist_head *head; struct hlist_head *head;
void *image;
int i; int i;
mutex_lock(&trampoline_mutex); mutex_lock(&trampoline_mutex);
...@@ -84,14 +75,6 @@ static struct bpf_trampoline *bpf_trampoline_lookup(u64 key) ...@@ -84,14 +75,6 @@ static struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
if (!tr) if (!tr)
goto out; goto out;
/* is_root was checked earlier. No need for bpf_jit_charge_modmem() */
image = bpf_jit_alloc_exec_page();
if (!image) {
kfree(tr);
tr = NULL;
goto out;
}
tr->key = key; tr->key = key;
INIT_HLIST_NODE(&tr->hlist); INIT_HLIST_NODE(&tr->hlist);
hlist_add_head(&tr->hlist, head); hlist_add_head(&tr->hlist, head);
...@@ -99,9 +82,6 @@ static struct bpf_trampoline *bpf_trampoline_lookup(u64 key) ...@@ -99,9 +82,6 @@ static struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
mutex_init(&tr->mutex); mutex_init(&tr->mutex);
for (i = 0; i < BPF_TRAMP_MAX; i++) for (i = 0; i < BPF_TRAMP_MAX; i++)
INIT_HLIST_HEAD(&tr->progs_hlist[i]); INIT_HLIST_HEAD(&tr->progs_hlist[i]);
tr->image = image;
INIT_LIST_HEAD_RCU(&tr->ksym.lnode);
bpf_trampoline_ksym_add(tr);
out: out:
mutex_unlock(&trampoline_mutex); mutex_unlock(&trampoline_mutex);
return tr; return tr;
...@@ -185,10 +165,142 @@ bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total) ...@@ -185,10 +165,142 @@ bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total)
return tprogs; return tprogs;
} }
static void __bpf_tramp_image_put_deferred(struct work_struct *work)
{
struct bpf_tramp_image *im;
im = container_of(work, struct bpf_tramp_image, work);
bpf_image_ksym_del(&im->ksym);
bpf_jit_free_exec(im->image);
bpf_jit_uncharge_modmem(1);
percpu_ref_exit(&im->pcref);
kfree_rcu(im, rcu);
}
/* callback, fexit step 3 or fentry step 2 */
static void __bpf_tramp_image_put_rcu(struct rcu_head *rcu)
{
struct bpf_tramp_image *im;
im = container_of(rcu, struct bpf_tramp_image, rcu);
INIT_WORK(&im->work, __bpf_tramp_image_put_deferred);
schedule_work(&im->work);
}
/* callback, fexit step 2. Called after percpu_ref_kill confirms. */
static void __bpf_tramp_image_release(struct percpu_ref *pcref)
{
struct bpf_tramp_image *im;
im = container_of(pcref, struct bpf_tramp_image, pcref);
call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu);
}
/* callback, fexit or fentry step 1 */
static void __bpf_tramp_image_put_rcu_tasks(struct rcu_head *rcu)
{
struct bpf_tramp_image *im;
im = container_of(rcu, struct bpf_tramp_image, rcu);
if (im->ip_after_call)
/* the case of fmod_ret/fexit trampoline and CONFIG_PREEMPTION=y */
percpu_ref_kill(&im->pcref);
else
/* the case of fentry trampoline */
call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu);
}
static void bpf_tramp_image_put(struct bpf_tramp_image *im)
{
/* The trampoline image that calls original function is using:
* rcu_read_lock_trace to protect sleepable bpf progs
* rcu_read_lock to protect normal bpf progs
* percpu_ref to protect trampoline itself
* rcu tasks to protect trampoline asm not covered by percpu_ref
* (which are few asm insns before __bpf_tramp_enter and
* after __bpf_tramp_exit)
*
* The trampoline is unreachable before bpf_tramp_image_put().
*
* First, patch the trampoline to avoid calling into fexit progs.
* The progs will be freed even if the original function is still
* executing or sleeping.
* In case of CONFIG_PREEMPT=y use call_rcu_tasks() to wait on
* first few asm instructions to execute and call into
* __bpf_tramp_enter->percpu_ref_get.
* Then use percpu_ref_kill to wait for the trampoline and the original
* function to finish.
* Then use call_rcu_tasks() to make sure few asm insns in
* the trampoline epilogue are done as well.
*
* In !PREEMPT case the task that got interrupted in the first asm
* insns won't go through an RCU quiescent state which the
* percpu_ref_kill will be waiting for. Hence the first
* call_rcu_tasks() is not necessary.
*/
if (im->ip_after_call) {
int err = bpf_arch_text_poke(im->ip_after_call, BPF_MOD_JUMP,
NULL, im->ip_epilogue);
WARN_ON(err);
if (IS_ENABLED(CONFIG_PREEMPTION))
call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu_tasks);
else
percpu_ref_kill(&im->pcref);
return;
}
/* The trampoline without fexit and fmod_ret progs doesn't call original
* function and doesn't use percpu_ref.
* Use call_rcu_tasks_trace() to wait for sleepable progs to finish.
* Then use call_rcu_tasks() to wait for the rest of trampoline asm
* and normal progs.
*/
call_rcu_tasks_trace(&im->rcu, __bpf_tramp_image_put_rcu_tasks);
}
static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key, u32 idx)
{
struct bpf_tramp_image *im;
struct bpf_ksym *ksym;
void *image;
int err = -ENOMEM;
im = kzalloc(sizeof(*im), GFP_KERNEL);
if (!im)
goto out;
err = bpf_jit_charge_modmem(1);
if (err)
goto out_free_im;
err = -ENOMEM;
im->image = image = bpf_jit_alloc_exec_page();
if (!image)
goto out_uncharge;
err = percpu_ref_init(&im->pcref, __bpf_tramp_image_release, 0, GFP_KERNEL);
if (err)
goto out_free_image;
ksym = &im->ksym;
INIT_LIST_HEAD_RCU(&ksym->lnode);
snprintf(ksym->name, KSYM_NAME_LEN, "bpf_trampoline_%llu_%u", key, idx);
bpf_image_ksym_add(image, ksym);
return im;
out_free_image:
bpf_jit_free_exec(im->image);
out_uncharge:
bpf_jit_uncharge_modmem(1);
out_free_im:
kfree(im);
out:
return ERR_PTR(err);
}
static int bpf_trampoline_update(struct bpf_trampoline *tr) static int bpf_trampoline_update(struct bpf_trampoline *tr)
{ {
void *old_image = tr->image + ((tr->selector + 1) & 1) * PAGE_SIZE/2; struct bpf_tramp_image *im;
void *new_image = tr->image + (tr->selector & 1) * PAGE_SIZE/2;
struct bpf_tramp_progs *tprogs; struct bpf_tramp_progs *tprogs;
u32 flags = BPF_TRAMP_F_RESTORE_REGS; u32 flags = BPF_TRAMP_F_RESTORE_REGS;
int err, total; int err, total;
...@@ -198,41 +310,42 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr) ...@@ -198,41 +310,42 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr)
return PTR_ERR(tprogs); return PTR_ERR(tprogs);
if (total == 0) { if (total == 0) {
err = unregister_fentry(tr, old_image); err = unregister_fentry(tr, tr->cur_image->image);
bpf_tramp_image_put(tr->cur_image);
tr->cur_image = NULL;
tr->selector = 0; tr->selector = 0;
goto out; goto out;
} }
im = bpf_tramp_image_alloc(tr->key, tr->selector);
if (IS_ERR(im)) {
err = PTR_ERR(im);
goto out;
}
if (tprogs[BPF_TRAMP_FEXIT].nr_progs || if (tprogs[BPF_TRAMP_FEXIT].nr_progs ||
tprogs[BPF_TRAMP_MODIFY_RETURN].nr_progs) tprogs[BPF_TRAMP_MODIFY_RETURN].nr_progs)
flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME; flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME;
/* Though the second half of trampoline page is unused a task could be err = arch_prepare_bpf_trampoline(im, im->image, im->image + PAGE_SIZE,
* preempted in the middle of the first half of trampoline and two
* updates to trampoline would change the code from underneath the
* preempted task. Hence wait for tasks to voluntarily schedule or go
* to userspace.
* The same trampoline can hold both sleepable and non-sleepable progs.
* synchronize_rcu_tasks_trace() is needed to make sure all sleepable
* programs finish executing.
* Wait for these two grace periods together.
*/
synchronize_rcu_mult(call_rcu_tasks, call_rcu_tasks_trace);
err = arch_prepare_bpf_trampoline(new_image, new_image + PAGE_SIZE / 2,
&tr->func.model, flags, tprogs, &tr->func.model, flags, tprogs,
tr->func.addr); tr->func.addr);
if (err < 0) if (err < 0)
goto out; goto out;
if (tr->selector) WARN_ON(tr->cur_image && tr->selector == 0);
WARN_ON(!tr->cur_image && tr->selector);
if (tr->cur_image)
/* progs already running at this address */ /* progs already running at this address */
err = modify_fentry(tr, old_image, new_image); err = modify_fentry(tr, tr->cur_image->image, im->image);
else else
/* first time registering */ /* first time registering */
err = register_fentry(tr, new_image); err = register_fentry(tr, im->image);
if (err) if (err)
goto out; goto out;
if (tr->cur_image)
bpf_tramp_image_put(tr->cur_image);
tr->cur_image = im;
tr->selector++; tr->selector++;
out: out:
kfree(tprogs); kfree(tprogs);
...@@ -364,17 +477,12 @@ void bpf_trampoline_put(struct bpf_trampoline *tr) ...@@ -364,17 +477,12 @@ void bpf_trampoline_put(struct bpf_trampoline *tr)
goto out; goto out;
if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FEXIT]))) if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FEXIT])))
goto out; goto out;
bpf_image_ksym_del(&tr->ksym); /* This code will be executed even when the last bpf_tramp_image
/* This code will be executed when all bpf progs (both sleepable and * is alive. All progs are detached from the trampoline and the
* non-sleepable) went through * trampoline image is patched with jmp into epilogue to skip
* bpf_prog_put()->call_rcu[_tasks_trace]()->bpf_prog_free_deferred(). * fexit progs. The fentry-only trampoline will be freed via
* Hence no need for another synchronize_rcu_tasks_trace() here, * multiple rcu callbacks.
* but synchronize_rcu_tasks() is still needed, since trampoline
* may not have had any sleepable programs and we need to wait
* for tasks to get out of trampoline code before freeing it.
*/ */
synchronize_rcu_tasks();
bpf_jit_free_exec(tr->image);
hlist_del(&tr->hlist); hlist_del(&tr->hlist);
kfree(tr); kfree(tr);
out: out:
...@@ -478,8 +586,18 @@ void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start) ...@@ -478,8 +586,18 @@ void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start)
rcu_read_unlock_trace(); rcu_read_unlock_trace();
} }
void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr)
{
percpu_ref_get(&tr->pcref);
}
void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr)
{
percpu_ref_put(&tr->pcref);
}
int __weak int __weak
arch_prepare_bpf_trampoline(void *image, void *image_end, arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
const struct btf_func_model *m, u32 flags, const struct btf_func_model *m, u32 flags,
struct bpf_tramp_progs *tprogs, struct bpf_tramp_progs *tprogs,
void *orig_call) void *orig_call)
......
...@@ -5861,10 +5861,14 @@ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg, ...@@ -5861,10 +5861,14 @@ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
{ {
bool mask_to_left = (opcode == BPF_ADD && off_is_neg) || bool mask_to_left = (opcode == BPF_ADD && off_is_neg) ||
(opcode == BPF_SUB && !off_is_neg); (opcode == BPF_SUB && !off_is_neg);
u32 off; u32 off, max;
switch (ptr_reg->type) { switch (ptr_reg->type) {
case PTR_TO_STACK: case PTR_TO_STACK:
/* Offset 0 is out-of-bounds, but acceptable start for the
* left direction, see BPF_REG_FP.
*/
max = MAX_BPF_STACK + mask_to_left;
/* Indirect variable offset stack access is prohibited in /* Indirect variable offset stack access is prohibited in
* unprivileged mode so it's not handled here. * unprivileged mode so it's not handled here.
*/ */
...@@ -5872,16 +5876,17 @@ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg, ...@@ -5872,16 +5876,17 @@ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
if (mask_to_left) if (mask_to_left)
*ptr_limit = MAX_BPF_STACK + off; *ptr_limit = MAX_BPF_STACK + off;
else else
*ptr_limit = -off; *ptr_limit = -off - 1;
return 0; return *ptr_limit >= max ? -ERANGE : 0;
case PTR_TO_MAP_VALUE: case PTR_TO_MAP_VALUE:
max = ptr_reg->map_ptr->value_size;
if (mask_to_left) { if (mask_to_left) {
*ptr_limit = ptr_reg->umax_value + ptr_reg->off; *ptr_limit = ptr_reg->umax_value + ptr_reg->off;
} else { } else {
off = ptr_reg->smin_value + ptr_reg->off; off = ptr_reg->smin_value + ptr_reg->off;
*ptr_limit = ptr_reg->map_ptr->value_size - off; *ptr_limit = ptr_reg->map_ptr->value_size - off - 1;
} }
return 0; return *ptr_limit >= max ? -ERANGE : 0;
default: default:
return -EINVAL; return -EINVAL;
} }
...@@ -5934,6 +5939,7 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env, ...@@ -5934,6 +5939,7 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
u32 alu_state, alu_limit; u32 alu_state, alu_limit;
struct bpf_reg_state tmp; struct bpf_reg_state tmp;
bool ret; bool ret;
int err;
if (can_skip_alu_sanitation(env, insn)) if (can_skip_alu_sanitation(env, insn))
return 0; return 0;
...@@ -5949,10 +5955,13 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env, ...@@ -5949,10 +5955,13 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
alu_state |= ptr_is_dst_reg ? alu_state |= ptr_is_dst_reg ?
BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST; BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg)) err = retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg);
return 0; if (err < 0)
if (update_alu_sanitation_state(aux, alu_state, alu_limit)) return err;
return -EACCES;
err = update_alu_sanitation_state(aux, alu_state, alu_limit);
if (err < 0)
return err;
do_sim: do_sim:
/* Simulate and find potential out-of-bounds access under /* Simulate and find potential out-of-bounds access under
* speculative execution from truncation as a result of * speculative execution from truncation as a result of
...@@ -6103,7 +6112,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, ...@@ -6103,7 +6112,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
case BPF_ADD: case BPF_ADD:
ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0); ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
if (ret < 0) { if (ret < 0) {
verbose(env, "R%d tried to add from different maps or paths\n", dst); verbose(env, "R%d tried to add from different maps, paths, or prohibited types\n", dst);
return ret; return ret;
} }
/* We can take a fixed offset as long as it doesn't overflow /* We can take a fixed offset as long as it doesn't overflow
...@@ -6158,7 +6167,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, ...@@ -6158,7 +6167,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
case BPF_SUB: case BPF_SUB:
ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0); ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
if (ret < 0) { if (ret < 0) {
verbose(env, "R%d tried to sub from different maps or paths\n", dst); verbose(env, "R%d tried to sub from different maps, paths, or prohibited types\n", dst);
return ret; return ret;
} }
if (dst_reg == off_reg) { if (dst_reg == off_reg) {
...@@ -11664,7 +11673,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) ...@@ -11664,7 +11673,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
off_reg = issrc ? insn->src_reg : insn->dst_reg; off_reg = issrc ? insn->src_reg : insn->dst_reg;
if (isneg) if (isneg)
*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
*patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1); *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
*patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg); *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
*patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg); *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
*patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0); *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
......
...@@ -5045,6 +5045,20 @@ struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr) ...@@ -5045,6 +5045,20 @@ struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr)
return NULL; return NULL;
} }
static struct ftrace_direct_func *ftrace_alloc_direct_func(unsigned long addr)
{
struct ftrace_direct_func *direct;
direct = kmalloc(sizeof(*direct), GFP_KERNEL);
if (!direct)
return NULL;
direct->addr = addr;
direct->count = 0;
list_add_rcu(&direct->next, &ftrace_direct_funcs);
ftrace_direct_func_count++;
return direct;
}
/** /**
* register_ftrace_direct - Call a custom trampoline directly * register_ftrace_direct - Call a custom trampoline directly
* @ip: The address of the nop at the beginning of a function * @ip: The address of the nop at the beginning of a function
...@@ -5120,15 +5134,11 @@ int register_ftrace_direct(unsigned long ip, unsigned long addr) ...@@ -5120,15 +5134,11 @@ int register_ftrace_direct(unsigned long ip, unsigned long addr)
direct = ftrace_find_direct_func(addr); direct = ftrace_find_direct_func(addr);
if (!direct) { if (!direct) {
direct = kmalloc(sizeof(*direct), GFP_KERNEL); direct = ftrace_alloc_direct_func(addr);
if (!direct) { if (!direct) {
kfree(entry); kfree(entry);
goto out_unlock; goto out_unlock;
} }
direct->addr = addr;
direct->count = 0;
list_add_rcu(&direct->next, &ftrace_direct_funcs);
ftrace_direct_func_count++;
} }
entry->ip = ip; entry->ip = ip;
...@@ -5329,6 +5339,7 @@ int __weak ftrace_modify_direct_caller(struct ftrace_func_entry *entry, ...@@ -5329,6 +5339,7 @@ int __weak ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
int modify_ftrace_direct(unsigned long ip, int modify_ftrace_direct(unsigned long ip,
unsigned long old_addr, unsigned long new_addr) unsigned long old_addr, unsigned long new_addr)
{ {
struct ftrace_direct_func *direct, *new_direct = NULL;
struct ftrace_func_entry *entry; struct ftrace_func_entry *entry;
struct dyn_ftrace *rec; struct dyn_ftrace *rec;
int ret = -ENODEV; int ret = -ENODEV;
...@@ -5344,6 +5355,20 @@ int modify_ftrace_direct(unsigned long ip, ...@@ -5344,6 +5355,20 @@ int modify_ftrace_direct(unsigned long ip,
if (entry->direct != old_addr) if (entry->direct != old_addr)
goto out_unlock; goto out_unlock;
direct = ftrace_find_direct_func(old_addr);
if (WARN_ON(!direct))
goto out_unlock;
if (direct->count > 1) {
ret = -ENOMEM;
new_direct = ftrace_alloc_direct_func(new_addr);
if (!new_direct)
goto out_unlock;
direct->count--;
new_direct->count++;
} else {
direct->addr = new_addr;
}
/* /*
* If there's no other ftrace callback on the rec->ip location, * If there's no other ftrace callback on the rec->ip location,
* then it can be changed directly by the architecture. * then it can be changed directly by the architecture.
...@@ -5357,6 +5382,14 @@ int modify_ftrace_direct(unsigned long ip, ...@@ -5357,6 +5382,14 @@ int modify_ftrace_direct(unsigned long ip,
ret = 0; ret = 0;
} }
if (unlikely(ret && new_direct)) {
direct->count++;
list_del_rcu(&new_direct->next);
synchronize_rcu_tasks();
kfree(new_direct);
ftrace_direct_func_count--;
}
out_unlock: out_unlock:
mutex_unlock(&ftrace_lock); mutex_unlock(&ftrace_lock);
mutex_unlock(&direct_mutex); mutex_unlock(&direct_mutex);
......
...@@ -1181,7 +1181,8 @@ static int bpf_object__elf_init(struct bpf_object *obj) ...@@ -1181,7 +1181,8 @@ static int bpf_object__elf_init(struct bpf_object *obj)
if (!elf_rawdata(elf_getscn(obj->efile.elf, obj->efile.shstrndx), NULL)) { if (!elf_rawdata(elf_getscn(obj->efile.elf, obj->efile.shstrndx), NULL)) {
pr_warn("elf: failed to get section names strings from %s: %s\n", pr_warn("elf: failed to get section names strings from %s: %s\n",
obj->path, elf_errmsg(-1)); obj->path, elf_errmsg(-1));
return -LIBBPF_ERRNO__FORMAT; err = -LIBBPF_ERRNO__FORMAT;
goto errout;
} }
/* Old LLVM set e_machine to EM_NONE */ /* Old LLVM set e_machine to EM_NONE */
......
...@@ -40,7 +40,7 @@ static int libbpf_netlink_open(__u32 *nl_pid) ...@@ -40,7 +40,7 @@ static int libbpf_netlink_open(__u32 *nl_pid)
memset(&sa, 0, sizeof(sa)); memset(&sa, 0, sizeof(sa));
sa.nl_family = AF_NETLINK; sa.nl_family = AF_NETLINK;
sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE); sock = socket(AF_NETLINK, SOCK_RAW | SOCK_CLOEXEC, NETLINK_ROUTE);
if (sock < 0) if (sock < 0)
return -errno; return -errno;
......
...@@ -508,10 +508,8 @@ int _ip6geneve_get_tunnel(struct __sk_buff *skb) ...@@ -508,10 +508,8 @@ int _ip6geneve_get_tunnel(struct __sk_buff *skb)
} }
ret = bpf_skb_get_tunnel_opt(skb, &gopt, sizeof(gopt)); ret = bpf_skb_get_tunnel_opt(skb, &gopt, sizeof(gopt));
if (ret < 0) { if (ret < 0)
ERROR(ret); gopt.opt_class = 0;
return TC_ACT_SHOT;
}
bpf_trace_printk(fmt, sizeof(fmt), bpf_trace_printk(fmt, sizeof(fmt),
key.tunnel_id, key.remote_ipv4, gopt.opt_class); key.tunnel_id, key.remote_ipv4, gopt.opt_class);
......
...@@ -6,8 +6,9 @@ ...@@ -6,8 +6,9 @@
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.result = REJECT, .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types",
.errstr = "R0 tried to subtract pointer from scalar", .errstr = "R0 tried to subtract pointer from scalar",
.result = REJECT,
}, },
{ {
"check deducing bounds from const, 2", "check deducing bounds from const, 2",
...@@ -20,6 +21,8 @@ ...@@ -20,6 +21,8 @@
BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0), BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.errstr_unpriv = "R1 tried to sub from different maps, paths, or prohibited types",
.result_unpriv = REJECT,
.result = ACCEPT, .result = ACCEPT,
.retval = 1, .retval = 1,
}, },
...@@ -31,8 +34,9 @@ ...@@ -31,8 +34,9 @@
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.result = REJECT, .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types",
.errstr = "R0 tried to subtract pointer from scalar", .errstr = "R0 tried to subtract pointer from scalar",
.result = REJECT,
}, },
{ {
"check deducing bounds from const, 4", "check deducing bounds from const, 4",
...@@ -45,6 +49,8 @@ ...@@ -45,6 +49,8 @@
BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0), BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.errstr_unpriv = "R1 tried to sub from different maps, paths, or prohibited types",
.result_unpriv = REJECT,
.result = ACCEPT, .result = ACCEPT,
}, },
{ {
...@@ -55,8 +61,9 @@ ...@@ -55,8 +61,9 @@
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.result = REJECT, .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types",
.errstr = "R0 tried to subtract pointer from scalar", .errstr = "R0 tried to subtract pointer from scalar",
.result = REJECT,
}, },
{ {
"check deducing bounds from const, 6", "check deducing bounds from const, 6",
...@@ -67,8 +74,9 @@ ...@@ -67,8 +74,9 @@
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.result = REJECT, .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types",
.errstr = "R0 tried to subtract pointer from scalar", .errstr = "R0 tried to subtract pointer from scalar",
.result = REJECT,
}, },
{ {
"check deducing bounds from const, 7", "check deducing bounds from const, 7",
...@@ -80,8 +88,9 @@ ...@@ -80,8 +88,9 @@
offsetof(struct __sk_buff, mark)), offsetof(struct __sk_buff, mark)),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.result = REJECT, .errstr_unpriv = "R1 tried to sub from different maps, paths, or prohibited types",
.errstr = "dereference of modified ctx ptr", .errstr = "dereference of modified ctx ptr",
.result = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
}, },
{ {
...@@ -94,8 +103,9 @@ ...@@ -94,8 +103,9 @@
offsetof(struct __sk_buff, mark)), offsetof(struct __sk_buff, mark)),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.result = REJECT, .errstr_unpriv = "R1 tried to add from different maps, paths, or prohibited types",
.errstr = "dereference of modified ctx ptr", .errstr = "dereference of modified ctx ptr",
.result = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
}, },
{ {
...@@ -106,8 +116,9 @@ ...@@ -106,8 +116,9 @@
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.result = REJECT, .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types",
.errstr = "R0 tried to subtract pointer from scalar", .errstr = "R0 tried to subtract pointer from scalar",
.result = REJECT,
}, },
{ {
"check deducing bounds from const, 10", "check deducing bounds from const, 10",
...@@ -119,6 +130,6 @@ ...@@ -119,6 +130,6 @@
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.result = REJECT,
.errstr = "math between ctx pointer and register with unbounded min value is not allowed", .errstr = "math between ctx pointer and register with unbounded min value is not allowed",
.result = REJECT,
}, },
...@@ -75,6 +75,8 @@ ...@@ -75,6 +75,8 @@
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map_hash_16b = { 4 }, .fixup_map_hash_16b = { 4 },
.result_unpriv = REJECT,
.errstr_unpriv = "R1 tried to add from different maps, paths, or prohibited types",
.result = ACCEPT, .result = ACCEPT,
}, },
{ {
...@@ -91,5 +93,7 @@ ...@@ -91,5 +93,7 @@
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map_hash_16b = { 4 }, .fixup_map_hash_16b = { 4 },
.result_unpriv = REJECT,
.errstr_unpriv = "R1 tried to add from different maps, paths, or prohibited types",
.result = ACCEPT, .result = ACCEPT,
}, },
...@@ -497,7 +497,7 @@ ...@@ -497,7 +497,7 @@
.result = ACCEPT, .result = ACCEPT,
}, },
{ {
"unpriv: adding of fp", "unpriv: adding of fp, reg",
.insns = { .insns = {
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_1, 0), BPF_MOV64_IMM(BPF_REG_1, 0),
...@@ -505,6 +505,19 @@ ...@@ -505,6 +505,19 @@
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8), BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.errstr_unpriv = "R1 tried to add from different maps, paths, or prohibited types",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"unpriv: adding of fp, imm",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R1 stack pointer arithmetic goes out of range", .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
.result_unpriv = REJECT, .result_unpriv = REJECT,
.result = ACCEPT, .result = ACCEPT,
......
...@@ -169,7 +169,7 @@ ...@@ -169,7 +169,7 @@
.fixup_map_array_48b = { 1 }, .fixup_map_array_48b = { 1 },
.result = ACCEPT, .result = ACCEPT,
.result_unpriv = REJECT, .result_unpriv = REJECT,
.errstr_unpriv = "R2 tried to add from different maps or paths", .errstr_unpriv = "R2 tried to add from different maps, paths, or prohibited types",
.retval = 0, .retval = 0,
}, },
{ {
...@@ -516,6 +516,27 @@ ...@@ -516,6 +516,27 @@
.result = ACCEPT, .result = ACCEPT,
.retval = 0xabcdef12, .retval = 0xabcdef12,
}, },
{
"map access: value_ptr += N, value_ptr -= N known scalar",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
BPF_MOV32_IMM(BPF_REG_1, 0x12345678),
BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
BPF_MOV64_IMM(BPF_REG_1, 2),
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 3 },
.result = ACCEPT,
.retval = 0x12345678,
},
{ {
"map access: unknown scalar += value_ptr, 1", "map access: unknown scalar += value_ptr, 1",
.insns = { .insns = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment