Commit 01cc55af authored by Yonghong Song's avatar Yonghong Song Committed by Alexei Starovoitov

bpf: Add bpf_this_cpu_ptr/bpf_per_cpu_ptr support for allocated percpu obj

The bpf helpers bpf_this_cpu_ptr() and bpf_per_cpu_ptr() are re-purposed
for allocated percpu objects. For an allocated percpu obj,
the reg type is 'PTR_TO_BTF_ID | MEM_PERCPU | MEM_RCU'.

The return type for these two re-purposed helpera is
'PTR_TO_MEM | MEM_RCU | MEM_ALLOC'.
The MEM_ALLOC allows that the per-cpu data can be read and written.

Since the memory allocator bpf_mem_alloc() returns
a ptr to a percpu ptr for percpu data, the first argument
of bpf_this_cpu_ptr() and bpf_per_cpu_ptr() is patched
with a dereference before passing to the helper func.
Signed-off-by: default avatarYonghong Song <yonghong.song@linux.dev>
Link: https://lore.kernel.org/r/20230827152749.1997202-1-yonghong.song@linux.devSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 36d8bdf7
......@@ -480,6 +480,7 @@ struct bpf_insn_aux_data {
bool zext_dst; /* this insn zero extends dst reg */
bool storage_get_func_atomic; /* bpf_*_storage_get() with atomic memory alloc */
bool is_iter_next; /* bpf_iter_<type>_next() kfunc call */
bool call_with_percpu_alloc_ptr; /* {this,per}_cpu_ptr() with prog percpu alloc */
u8 alu_state; /* used in combination with alu_limit */
/* below fields are initialized once */
......
......@@ -6221,7 +6221,7 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
}
if (type_is_alloc(reg->type) && !type_is_non_owning_ref(reg->type) &&
!reg->ref_obj_id) {
!(reg->type & MEM_RCU) && !reg->ref_obj_id) {
verbose(env, "verifier internal error: ref_obj_id for allocated object must be non-zero\n");
return -EFAULT;
}
......@@ -7765,6 +7765,7 @@ static const struct bpf_reg_types btf_ptr_types = {
static const struct bpf_reg_types percpu_btf_ptr_types = {
.types = {
PTR_TO_BTF_ID | MEM_PERCPU,
PTR_TO_BTF_ID | MEM_PERCPU | MEM_RCU,
PTR_TO_BTF_ID | MEM_PERCPU | PTR_TRUSTED,
}
};
......@@ -7941,6 +7942,7 @@ static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
}
break;
case PTR_TO_BTF_ID | MEM_PERCPU:
case PTR_TO_BTF_ID | MEM_PERCPU | MEM_RCU:
case PTR_TO_BTF_ID | MEM_PERCPU | PTR_TRUSTED:
/* Handled by helper specific checks */
break;
......@@ -9547,6 +9549,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
int *insn_idx_p)
{
enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
bool returns_cpu_specific_alloc_ptr = false;
const struct bpf_func_proto *fn = NULL;
enum bpf_return_type ret_type;
enum bpf_type_flag ret_flag;
......@@ -9785,6 +9788,23 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
break;
}
case BPF_FUNC_per_cpu_ptr:
case BPF_FUNC_this_cpu_ptr:
{
struct bpf_reg_state *reg = &regs[BPF_REG_1];
const struct btf_type *type;
if (reg->type & MEM_RCU) {
type = btf_type_by_id(reg->btf, reg->btf_id);
if (!type || !btf_type_is_struct(type)) {
verbose(env, "Helper has invalid btf/btf_id in R1\n");
return -EFAULT;
}
returns_cpu_specific_alloc_ptr = true;
env->insn_aux_data[insn_idx].call_with_percpu_alloc_ptr = true;
}
break;
}
case BPF_FUNC_user_ringbuf_drain:
err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
set_user_ringbuf_callback_state);
......@@ -9874,14 +9894,18 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag;
regs[BPF_REG_0].mem_size = tsize;
} else {
/* MEM_RDONLY may be carried from ret_flag, but it
* doesn't apply on PTR_TO_BTF_ID. Fold it, otherwise
* it will confuse the check of PTR_TO_BTF_ID in
* check_mem_access().
*/
ret_flag &= ~MEM_RDONLY;
if (returns_cpu_specific_alloc_ptr) {
regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC | MEM_RCU;
} else {
/* MEM_RDONLY may be carried from ret_flag, but it
* doesn't apply on PTR_TO_BTF_ID. Fold it, otherwise
* it will confuse the check of PTR_TO_BTF_ID in
* check_mem_access().
*/
ret_flag &= ~MEM_RDONLY;
regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag;
}
regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag;
regs[BPF_REG_0].btf = meta.ret_btf;
regs[BPF_REG_0].btf_id = meta.ret_btf_id;
}
......@@ -18676,6 +18700,25 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
goto patch_call_imm;
}
/* bpf_per_cpu_ptr() and bpf_this_cpu_ptr() */
if (env->insn_aux_data[i + delta].call_with_percpu_alloc_ptr) {
/* patch with 'r1 = *(u64 *)(r1 + 0)' since for percpu data,
* bpf_mem_alloc() returns a ptr to the percpu data ptr.
*/
insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0);
insn_buf[1] = *insn;
cnt = 2;
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
if (!new_prog)
return -ENOMEM;
delta += cnt - 1;
env->prog = prog = new_prog;
insn = new_prog->insnsi + i + delta;
goto patch_call_imm;
}
/* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
* and other inlining handlers are currently limited to 64 bit
* only.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment