Commit 69443c47 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'bpf: refine retval for bpf_get_task_stack helper'

Dave Marchevsky says:

====================

Similarly to the bpf_get_stack helper, bpf_get_task_stack's return value
can be more tightly bound by the verifier - it's the number of bytes
written to a user-supplied buffer, or a negative error value. Currently
the verifier believes bpf_task_get_stack's retval bounds to be unknown,
requiring extraneous bounds checking to remedy.

Adding it to do_refine_retval_range fixes the issue, as evidenced by
new selftests which fail to load if retval bounds are not refined.

v2: Addressed comment nit in patch 3
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents 137733d0 c77cec5c
......@@ -5808,6 +5808,7 @@ static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
if (ret_type != RET_INTEGER ||
(func_id != BPF_FUNC_get_stack &&
func_id != BPF_FUNC_get_task_stack &&
func_id != BPF_FUNC_probe_read_str &&
func_id != BPF_FUNC_probe_read_kernel_str &&
func_id != BPF_FUNC_probe_read_user_str))
......
......@@ -147,6 +147,7 @@ static void test_task_stack(void)
return;
do_dummy_read(skel->progs.dump_task_stack);
do_dummy_read(skel->progs.get_task_user_stacks);
bpf_iter_task_stack__destroy(skel);
}
......
......@@ -35,3 +35,30 @@ int dump_task_stack(struct bpf_iter__task *ctx)
return 0;
}
SEC("iter/task")
int get_task_user_stacks(struct bpf_iter__task *ctx)
{
struct seq_file *seq = ctx->meta->seq;
struct task_struct *task = ctx->task;
uint64_t buf_sz = 0;
int64_t res;
if (task == (void *)0)
return 0;
res = bpf_get_task_stack(task, entries,
MAX_STACK_TRACE_DEPTH * SIZE_OF_ULONG, BPF_F_USER_STACK);
if (res <= 0)
return 0;
buf_sz += res;
/* If the verifier doesn't refine bpf_get_task_stack res, and instead
* assumes res is entirely unknown, this program will fail to load as
* the verifier will believe that max buf_sz value allows reading
* past the end of entries in bpf_seq_write call
*/
bpf_seq_write(seq, &entries, buf_sz);
return 0;
}
......@@ -42,3 +42,46 @@
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"bpf_get_task_stack return R0 range is refined",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_6, 0), // ctx->meta->seq
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_1, 8), // ctx->task
BPF_LD_MAP_FD(BPF_REG_1, 0), // fixup_map_array_48b
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_9, BPF_REG_0), // keep buf for seq_write
BPF_MOV64_IMM(BPF_REG_3, 48),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_EMIT_CALL(BPF_FUNC_get_task_stack),
BPF_JMP_IMM(BPF_JSGT, BPF_REG_0, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_9),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
BPF_EMIT_CALL(BPF_FUNC_seq_write),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACING,
.expected_attach_type = BPF_TRACE_ITER,
.kfunc = "task",
.runs = -1, // Don't run, just load
.fixup_map_array_48b = { 3 },
},
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment