Commit 4153b89b authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'refactor-check_func_arg'

Lorenz Bauer says:

====================
Changes in v4:
- Output the desired type on BTF ID mismatch (Martin)

Changes in v3:
- Fix BTF_ID_LIST_SINGLE if BTF is disabled (Martin)
- Drop incorrect arg_btf_id in bpf_sk_storage.c (Martin)
- Check for arg_btf_id in check_func_proto (Martin)
- Drop incorrect PTR_TO_BTF_ID from fullsock_types (Martin)
- Introduce btf_seq_file_ids in bpf_trace.c to reduce duplication

Changes in v2:
- Make the series stand alone (Martin)
- Drop incorrect BTF_SET_START fix (Andrii)
- Only support a single BTF ID per argument (Martin)
- Introduce BTF_ID_LIST_SINGLE macro (Andrii)
- Skip check_ctx_reg iff register is NULL
- Change output of check_reg_type slightly, to avoid touching tests

Original cover letter:

Currently, check_func_arg has this pretty gnarly if statement that
compares the valid arg_type with the actualy reg_type. Sprinkled
in-between are checks for register_is_null, to short circuit these
tests if we're dealing with a nullable arg_type. There is also some
code for later bounds / access checking hidden away in there.

This series of patches refactors the function into something like this:

   if (reg_is_null && arg_type_is_nullable)
     skip type checking

   do type checking, including BTF validation

   do bounds / access checking

The type checking is now table driven, which makes it easy to extend
the acceptable types. Maybe more importantly, using a table makes it
easy to provide more helpful verifier output (see the last patch).
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents 31f23a6a f79e7ea5
...@@ -292,6 +292,7 @@ enum bpf_arg_type { ...@@ -292,6 +292,7 @@ enum bpf_arg_type {
ARG_PTR_TO_ALLOC_MEM, /* pointer to dynamically allocated memory */ ARG_PTR_TO_ALLOC_MEM, /* pointer to dynamically allocated memory */
ARG_PTR_TO_ALLOC_MEM_OR_NULL, /* pointer to dynamically allocated memory or NULL */ ARG_PTR_TO_ALLOC_MEM_OR_NULL, /* pointer to dynamically allocated memory or NULL */
ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */ ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */
__BPF_ARG_TYPE_MAX,
}; };
/* type of values returned from helper functions */ /* type of values returned from helper functions */
...@@ -326,12 +327,16 @@ struct bpf_func_proto { ...@@ -326,12 +327,16 @@ struct bpf_func_proto {
}; };
enum bpf_arg_type arg_type[5]; enum bpf_arg_type arg_type[5];
}; };
int *btf_id; /* BTF ids of arguments */ union {
bool (*check_btf_id)(u32 btf_id, u32 arg); /* if the argument btf_id is struct {
* valid. Often used if more u32 *arg1_btf_id;
* than one btf id is permitted u32 *arg2_btf_id;
* for this argument. u32 *arg3_btf_id;
*/ u32 *arg4_btf_id;
u32 *arg5_btf_id;
};
u32 *arg_btf_id[5];
};
int *ret_btf_id; /* return value btf_id */ int *ret_btf_id; /* return value btf_id */
bool (*allowed)(const struct bpf_prog *prog); bool (*allowed)(const struct bpf_prog *prog);
}; };
...@@ -1385,8 +1390,6 @@ int btf_struct_access(struct bpf_verifier_log *log, ...@@ -1385,8 +1390,6 @@ int btf_struct_access(struct bpf_verifier_log *log,
u32 *next_btf_id); u32 *next_btf_id);
bool btf_struct_ids_match(struct bpf_verifier_log *log, bool btf_struct_ids_match(struct bpf_verifier_log *log,
int off, u32 id, u32 need_type_id); int off, u32 id, u32 need_type_id);
int btf_resolve_helper_id(struct bpf_verifier_log *log,
const struct bpf_func_proto *fn, int);
int btf_distill_func_proto(struct bpf_verifier_log *log, int btf_distill_func_proto(struct bpf_verifier_log *log,
struct btf *btf, struct btf *btf,
...@@ -1905,6 +1908,6 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, ...@@ -1905,6 +1908,6 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
void *addr1, void *addr2); void *addr1, void *addr2);
struct btf_id_set; struct btf_id_set;
bool btf_id_set_contains(struct btf_id_set *set, u32 id); bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
#endif /* _LINUX_BPF_H */ #endif /* _LINUX_BPF_H */
...@@ -76,6 +76,13 @@ extern u32 name[]; ...@@ -76,6 +76,13 @@ extern u32 name[];
#define BTF_ID_LIST_GLOBAL(name) \ #define BTF_ID_LIST_GLOBAL(name) \
__BTF_ID_LIST(name, globl) __BTF_ID_LIST(name, globl)
/* The BTF_ID_LIST_SINGLE macro defines a BTF_ID_LIST with
* a single entry.
*/
#define BTF_ID_LIST_SINGLE(name, prefix, typename) \
BTF_ID_LIST(name) \
BTF_ID(prefix, typename)
/* /*
* The BTF_ID_UNUSED macro defines 4 zero bytes. * The BTF_ID_UNUSED macro defines 4 zero bytes.
* It's used when we want to define 'unused' entry * It's used when we want to define 'unused' entry
...@@ -140,6 +147,7 @@ extern struct btf_id_set name; ...@@ -140,6 +147,7 @@ extern struct btf_id_set name;
#define BTF_ID(prefix, name) #define BTF_ID(prefix, name)
#define BTF_ID_UNUSED #define BTF_ID_UNUSED
#define BTF_ID_LIST_GLOBAL(name) u32 name[1]; #define BTF_ID_LIST_GLOBAL(name) u32 name[1];
#define BTF_ID_LIST_SINGLE(name, prefix, typename) static u32 name[1];
#define BTF_SET_START(name) static struct btf_id_set name = { 0 }; #define BTF_SET_START(name) static struct btf_id_set name = { 0 };
#define BTF_SET_START_GLOBAL(name) static struct btf_id_set name = { 0 }; #define BTF_SET_START_GLOBAL(name) static struct btf_id_set name = { 0 };
#define BTF_SET_END(name) #define BTF_SET_END(name)
......
...@@ -249,9 +249,7 @@ const struct bpf_map_ops inode_storage_map_ops = { ...@@ -249,9 +249,7 @@ const struct bpf_map_ops inode_storage_map_ops = {
.map_owner_storage_ptr = inode_storage_ptr, .map_owner_storage_ptr = inode_storage_ptr,
}; };
BTF_ID_LIST(bpf_inode_storage_btf_ids) BTF_ID_LIST_SINGLE(bpf_inode_storage_btf_ids, struct, inode)
BTF_ID_UNUSED
BTF_ID(struct, inode)
const struct bpf_func_proto bpf_inode_storage_get_proto = { const struct bpf_func_proto bpf_inode_storage_get_proto = {
.func = bpf_inode_storage_get, .func = bpf_inode_storage_get,
...@@ -259,9 +257,9 @@ const struct bpf_func_proto bpf_inode_storage_get_proto = { ...@@ -259,9 +257,9 @@ const struct bpf_func_proto bpf_inode_storage_get_proto = {
.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
.arg1_type = ARG_CONST_MAP_PTR, .arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_BTF_ID, .arg2_type = ARG_PTR_TO_BTF_ID,
.arg2_btf_id = &bpf_inode_storage_btf_ids[0],
.arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL, .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
.arg4_type = ARG_ANYTHING, .arg4_type = ARG_ANYTHING,
.btf_id = bpf_inode_storage_btf_ids,
}; };
const struct bpf_func_proto bpf_inode_storage_delete_proto = { const struct bpf_func_proto bpf_inode_storage_delete_proto = {
...@@ -270,5 +268,5 @@ const struct bpf_func_proto bpf_inode_storage_delete_proto = { ...@@ -270,5 +268,5 @@ const struct bpf_func_proto bpf_inode_storage_delete_proto = {
.ret_type = RET_INTEGER, .ret_type = RET_INTEGER,
.arg1_type = ARG_CONST_MAP_PTR, .arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_BTF_ID, .arg2_type = ARG_PTR_TO_BTF_ID,
.btf_id = bpf_inode_storage_btf_ids, .arg2_btf_id = &bpf_inode_storage_btf_ids[0],
}; };
...@@ -4193,19 +4193,6 @@ bool btf_struct_ids_match(struct bpf_verifier_log *log, ...@@ -4193,19 +4193,6 @@ bool btf_struct_ids_match(struct bpf_verifier_log *log,
return true; return true;
} }
int btf_resolve_helper_id(struct bpf_verifier_log *log,
const struct bpf_func_proto *fn, int arg)
{
int id;
if (fn->arg_type[arg] != ARG_PTR_TO_BTF_ID || !btf_vmlinux)
return -EINVAL;
id = fn->btf_id[arg];
if (!id || id > btf_vmlinux->nr_types)
return -EINVAL;
return id;
}
static int __get_type_size(struct btf *btf, u32 btf_id, static int __get_type_size(struct btf *btf, u32 btf_id,
const struct btf_type **bad_type) const struct btf_type **bad_type)
{ {
...@@ -4772,7 +4759,7 @@ static int btf_id_cmp_func(const void *a, const void *b) ...@@ -4772,7 +4759,7 @@ static int btf_id_cmp_func(const void *a, const void *b)
return *pa - *pb; return *pa - *pb;
} }
bool btf_id_set_contains(struct btf_id_set *set, u32 id) bool btf_id_set_contains(const struct btf_id_set *set, u32 id)
{ {
return bsearch(&id, set->ids, set->cnt, sizeof(u32), btf_id_cmp_func) != NULL; return bsearch(&id, set->ids, set->cnt, sizeof(u32), btf_id_cmp_func) != NULL;
} }
...@@ -665,18 +665,17 @@ BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf, ...@@ -665,18 +665,17 @@ BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf,
return __bpf_get_stack(regs, task, NULL, buf, size, flags); return __bpf_get_stack(regs, task, NULL, buf, size, flags);
} }
BTF_ID_LIST(bpf_get_task_stack_btf_ids) BTF_ID_LIST_SINGLE(bpf_get_task_stack_btf_ids, struct, task_struct)
BTF_ID(struct, task_struct)
const struct bpf_func_proto bpf_get_task_stack_proto = { const struct bpf_func_proto bpf_get_task_stack_proto = {
.func = bpf_get_task_stack, .func = bpf_get_task_stack,
.gpl_only = false, .gpl_only = false,
.ret_type = RET_INTEGER, .ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_BTF_ID, .arg1_type = ARG_PTR_TO_BTF_ID,
.arg1_btf_id = &bpf_get_task_stack_btf_ids[0],
.arg2_type = ARG_PTR_TO_UNINIT_MEM, .arg2_type = ARG_PTR_TO_UNINIT_MEM,
.arg3_type = ARG_CONST_SIZE_OR_ZERO, .arg3_type = ARG_CONST_SIZE_OR_ZERO,
.arg4_type = ARG_ANYTHING, .arg4_type = ARG_ANYTHING,
.btf_id = bpf_get_task_stack_btf_ids,
}; };
BPF_CALL_4(bpf_get_stack_pe, struct bpf_perf_event_data_kern *, ctx, BPF_CALL_4(bpf_get_stack_pe, struct bpf_perf_event_data_kern *, ctx,
......
...@@ -238,7 +238,6 @@ struct bpf_call_arg_meta { ...@@ -238,7 +238,6 @@ struct bpf_call_arg_meta {
u64 msize_max_value; u64 msize_max_value;
int ref_obj_id; int ref_obj_id;
int func_id; int func_id;
u32 btf_id;
}; };
struct btf *btf_vmlinux; struct btf *btf_vmlinux;
...@@ -436,6 +435,15 @@ static bool arg_type_may_be_refcounted(enum bpf_arg_type type) ...@@ -436,6 +435,15 @@ static bool arg_type_may_be_refcounted(enum bpf_arg_type type)
return type == ARG_PTR_TO_SOCK_COMMON; return type == ARG_PTR_TO_SOCK_COMMON;
} }
static bool arg_type_may_be_null(enum bpf_arg_type type)
{
return type == ARG_PTR_TO_MAP_VALUE_OR_NULL ||
type == ARG_PTR_TO_MEM_OR_NULL ||
type == ARG_PTR_TO_CTX_OR_NULL ||
type == ARG_PTR_TO_SOCKET_OR_NULL ||
type == ARG_PTR_TO_ALLOC_MEM_OR_NULL;
}
/* Determine whether the function releases some resources allocated by another /* Determine whether the function releases some resources allocated by another
* function call. The first reference type argument will be assumed to be * function call. The first reference type argument will be assumed to be
* released by release_reference(). * released by release_reference().
...@@ -3641,18 +3649,6 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno, ...@@ -3641,18 +3649,6 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
struct bpf_func_state *state = func(env, reg); struct bpf_func_state *state = func(env, reg);
int err, min_off, max_off, i, j, slot, spi; int err, min_off, max_off, i, j, slot, spi;
if (reg->type != PTR_TO_STACK) {
/* Allow zero-byte read from NULL, regardless of pointer type */
if (zero_size_allowed && access_size == 0 &&
register_is_null(reg))
return 0;
verbose(env, "R%d type=%s expected=%s\n", regno,
reg_type_str[reg->type],
reg_type_str[PTR_TO_STACK]);
return -EACCES;
}
if (tnum_is_const(reg->var_off)) { if (tnum_is_const(reg->var_off)) {
min_off = max_off = reg->var_off.value + reg->off; min_off = max_off = reg->var_off.value + reg->off;
err = __check_stack_boundary(env, regno, min_off, access_size, err = __check_stack_boundary(env, regno, min_off, access_size,
...@@ -3797,9 +3793,19 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, ...@@ -3797,9 +3793,19 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
access_size, zero_size_allowed, access_size, zero_size_allowed,
"rdwr", "rdwr",
&env->prog->aux->max_rdwr_access); &env->prog->aux->max_rdwr_access);
default: /* scalar_value|ptr_to_stack or invalid ptr */ case PTR_TO_STACK:
return check_stack_boundary(env, regno, access_size, return check_stack_boundary(env, regno, access_size,
zero_size_allowed, meta); zero_size_allowed, meta);
default: /* scalar_value or invalid ptr */
/* Allow zero-byte read from NULL, regardless of pointer type */
if (zero_size_allowed && access_size == 0 &&
register_is_null(reg))
return 0;
verbose(env, "R%d type=%s expected=%s\n", regno,
reg_type_str[reg->type],
reg_type_str[PTR_TO_STACK]);
return -EACCES;
} }
} }
...@@ -3831,10 +3837,6 @@ static int process_spin_lock(struct bpf_verifier_env *env, int regno, ...@@ -3831,10 +3837,6 @@ static int process_spin_lock(struct bpf_verifier_env *env, int regno,
struct bpf_map *map = reg->map_ptr; struct bpf_map *map = reg->map_ptr;
u64 val = reg->var_off.value; u64 val = reg->var_off.value;
if (reg->type != PTR_TO_MAP_VALUE) {
verbose(env, "R%d is not a pointer to map_value\n", regno);
return -EINVAL;
}
if (!is_const) { if (!is_const) {
verbose(env, verbose(env,
"R%d doesn't have constant offset. bpf_spin_lock has to be at the constant offset\n", "R%d doesn't have constant offset. bpf_spin_lock has to be at the constant offset\n",
...@@ -3901,12 +3903,6 @@ static bool arg_type_is_mem_size(enum bpf_arg_type type) ...@@ -3901,12 +3903,6 @@ static bool arg_type_is_mem_size(enum bpf_arg_type type)
type == ARG_CONST_SIZE_OR_ZERO; type == ARG_CONST_SIZE_OR_ZERO;
} }
static bool arg_type_is_alloc_mem_ptr(enum bpf_arg_type type)
{
return type == ARG_PTR_TO_ALLOC_MEM ||
type == ARG_PTR_TO_ALLOC_MEM_OR_NULL;
}
static bool arg_type_is_alloc_size(enum bpf_arg_type type) static bool arg_type_is_alloc_size(enum bpf_arg_type type)
{ {
return type == ARG_CONST_ALLOC_SIZE_OR_ZERO; return type == ARG_CONST_ALLOC_SIZE_OR_ZERO;
...@@ -3955,14 +3951,115 @@ static int resolve_map_arg_type(struct bpf_verifier_env *env, ...@@ -3955,14 +3951,115 @@ static int resolve_map_arg_type(struct bpf_verifier_env *env,
return 0; return 0;
} }
struct bpf_reg_types {
const enum bpf_reg_type types[10];
};
static const struct bpf_reg_types map_key_value_types = {
.types = {
PTR_TO_STACK,
PTR_TO_PACKET,
PTR_TO_PACKET_META,
PTR_TO_MAP_VALUE,
},
};
static const struct bpf_reg_types sock_types = {
.types = {
PTR_TO_SOCK_COMMON,
PTR_TO_SOCKET,
PTR_TO_TCP_SOCK,
PTR_TO_XDP_SOCK,
},
};
static const struct bpf_reg_types mem_types = {
.types = {
PTR_TO_STACK,
PTR_TO_PACKET,
PTR_TO_PACKET_META,
PTR_TO_MAP_VALUE,
PTR_TO_MEM,
PTR_TO_RDONLY_BUF,
PTR_TO_RDWR_BUF,
},
};
static const struct bpf_reg_types int_ptr_types = {
.types = {
PTR_TO_STACK,
PTR_TO_PACKET,
PTR_TO_PACKET_META,
PTR_TO_MAP_VALUE,
},
};
static const struct bpf_reg_types fullsock_types = { .types = { PTR_TO_SOCKET } };
static const struct bpf_reg_types scalar_types = { .types = { SCALAR_VALUE } };
static const struct bpf_reg_types context_types = { .types = { PTR_TO_CTX } };
static const struct bpf_reg_types alloc_mem_types = { .types = { PTR_TO_MEM } };
static const struct bpf_reg_types const_map_ptr_types = { .types = { CONST_PTR_TO_MAP } };
static const struct bpf_reg_types btf_ptr_types = { .types = { PTR_TO_BTF_ID } };
static const struct bpf_reg_types spin_lock_types = { .types = { PTR_TO_MAP_VALUE } };
static const struct bpf_reg_types *compatible_reg_types[] = {
[ARG_PTR_TO_MAP_KEY] = &map_key_value_types,
[ARG_PTR_TO_MAP_VALUE] = &map_key_value_types,
[ARG_PTR_TO_UNINIT_MAP_VALUE] = &map_key_value_types,
[ARG_PTR_TO_MAP_VALUE_OR_NULL] = &map_key_value_types,
[ARG_CONST_SIZE] = &scalar_types,
[ARG_CONST_SIZE_OR_ZERO] = &scalar_types,
[ARG_CONST_ALLOC_SIZE_OR_ZERO] = &scalar_types,
[ARG_CONST_MAP_PTR] = &const_map_ptr_types,
[ARG_PTR_TO_CTX] = &context_types,
[ARG_PTR_TO_CTX_OR_NULL] = &context_types,
[ARG_PTR_TO_SOCK_COMMON] = &sock_types,
[ARG_PTR_TO_SOCKET] = &fullsock_types,
[ARG_PTR_TO_SOCKET_OR_NULL] = &fullsock_types,
[ARG_PTR_TO_BTF_ID] = &btf_ptr_types,
[ARG_PTR_TO_SPIN_LOCK] = &spin_lock_types,
[ARG_PTR_TO_MEM] = &mem_types,
[ARG_PTR_TO_MEM_OR_NULL] = &mem_types,
[ARG_PTR_TO_UNINIT_MEM] = &mem_types,
[ARG_PTR_TO_ALLOC_MEM] = &alloc_mem_types,
[ARG_PTR_TO_ALLOC_MEM_OR_NULL] = &alloc_mem_types,
[ARG_PTR_TO_INT] = &int_ptr_types,
[ARG_PTR_TO_LONG] = &int_ptr_types,
[__BPF_ARG_TYPE_MAX] = NULL,
};
static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
const struct bpf_reg_types *compatible)
{
struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
enum bpf_reg_type expected, type = reg->type;
int i, j;
for (i = 0; i < ARRAY_SIZE(compatible->types); i++) {
expected = compatible->types[i];
if (expected == NOT_INIT)
break;
if (type == expected)
return 0;
}
verbose(env, "R%d type=%s expected=", regno, reg_type_str[type]);
for (j = 0; j + 1 < i; j++)
verbose(env, "%s, ", reg_type_str[compatible->types[j]]);
verbose(env, "%s\n", reg_type_str[compatible->types[j]]);
return -EACCES;
}
static int check_func_arg(struct bpf_verifier_env *env, u32 arg, static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
struct bpf_call_arg_meta *meta, struct bpf_call_arg_meta *meta,
const struct bpf_func_proto *fn) const struct bpf_func_proto *fn)
{ {
u32 regno = BPF_REG_1 + arg; u32 regno = BPF_REG_1 + arg;
struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
enum bpf_reg_type expected_type, type = reg->type;
enum bpf_arg_type arg_type = fn->arg_type[arg]; enum bpf_arg_type arg_type = fn->arg_type[arg];
const struct bpf_reg_types *compatible;
enum bpf_reg_type type = reg->type;
int err = 0; int err = 0;
if (arg_type == ARG_DONTCARE) if (arg_type == ARG_DONTCARE)
...@@ -3995,125 +4092,48 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg, ...@@ -3995,125 +4092,48 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
return err; return err;
} }
if (arg_type == ARG_PTR_TO_MAP_KEY || if (register_is_null(reg) && arg_type_may_be_null(arg_type))
arg_type == ARG_PTR_TO_MAP_VALUE || /* A NULL register has a SCALAR_VALUE type, so skip
arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE || * type checking.
arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL) { */
expected_type = PTR_TO_STACK; goto skip_type_check;
if (register_is_null(reg) &&
arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL) compatible = compatible_reg_types[arg_type];
/* final test in check_stack_boundary() */; if (!compatible) {
else if (!type_is_pkt_pointer(type) && verbose(env, "verifier internal error: unsupported arg type %d\n", arg_type);
type != PTR_TO_MAP_VALUE && return -EFAULT;
type != expected_type) }
goto err_type;
} else if (arg_type == ARG_CONST_SIZE || err = check_reg_type(env, regno, compatible);
arg_type == ARG_CONST_SIZE_OR_ZERO || if (err)
arg_type == ARG_CONST_ALLOC_SIZE_OR_ZERO) { return err;
expected_type = SCALAR_VALUE;
if (type != expected_type) if (type == PTR_TO_BTF_ID) {
goto err_type; const u32 *btf_id = fn->arg_btf_id[arg];
} else if (arg_type == ARG_CONST_MAP_PTR) {
expected_type = CONST_PTR_TO_MAP; if (!btf_id) {
if (type != expected_type) verbose(env, "verifier internal error: missing BTF ID\n");
goto err_type; return -EFAULT;
} else if (arg_type == ARG_PTR_TO_CTX ||
arg_type == ARG_PTR_TO_CTX_OR_NULL) {
expected_type = PTR_TO_CTX;
if (!(register_is_null(reg) &&
arg_type == ARG_PTR_TO_CTX_OR_NULL)) {
if (type != expected_type)
goto err_type;
err = check_ctx_reg(env, reg, regno);
if (err < 0)
return err;
} }
} else if (arg_type == ARG_PTR_TO_SOCK_COMMON) {
expected_type = PTR_TO_SOCK_COMMON;
/* Any sk pointer can be ARG_PTR_TO_SOCK_COMMON */
if (!type_is_sk_pointer(type))
goto err_type;
if (reg->ref_obj_id) {
if (meta->ref_obj_id) {
verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
regno, reg->ref_obj_id,
meta->ref_obj_id);
return -EFAULT;
}
meta->ref_obj_id = reg->ref_obj_id;
}
} else if (arg_type == ARG_PTR_TO_SOCKET ||
arg_type == ARG_PTR_TO_SOCKET_OR_NULL) {
expected_type = PTR_TO_SOCKET;
if (!(register_is_null(reg) &&
arg_type == ARG_PTR_TO_SOCKET_OR_NULL)) {
if (type != expected_type)
goto err_type;
}
} else if (arg_type == ARG_PTR_TO_BTF_ID) {
bool ids_match = false;
expected_type = PTR_TO_BTF_ID;
if (type != expected_type)
goto err_type;
if (!fn->check_btf_id) {
if (reg->btf_id != meta->btf_id) {
ids_match = btf_struct_ids_match(&env->log, reg->off, reg->btf_id,
meta->btf_id);
if (!ids_match) {
verbose(env, "Helper has type %s got %s in R%d\n",
kernel_type_name(meta->btf_id),
kernel_type_name(reg->btf_id), regno);
return -EACCES;
}
}
} else if (!fn->check_btf_id(reg->btf_id, arg)) {
verbose(env, "Helper does not support %s in R%d\n",
kernel_type_name(reg->btf_id), regno);
if (!btf_struct_ids_match(&env->log, reg->off, reg->btf_id, *btf_id)) {
verbose(env, "R%d is of type %s but %s is expected\n",
regno, kernel_type_name(reg->btf_id), kernel_type_name(*btf_id));
return -EACCES; return -EACCES;
} }
if ((reg->off && !ids_match) || !tnum_is_const(reg->var_off) || reg->var_off.value) { if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
verbose(env, "R%d is a pointer to in-kernel struct with non-zero offset\n", verbose(env, "R%d is a pointer to in-kernel struct with non-zero offset\n",
regno); regno);
return -EACCES; return -EACCES;
} }
} else if (arg_type == ARG_PTR_TO_SPIN_LOCK) { } else if (type == PTR_TO_CTX) {
if (meta->func_id == BPF_FUNC_spin_lock) { err = check_ctx_reg(env, reg, regno);
if (process_spin_lock(env, regno, true)) if (err < 0)
return -EACCES; return err;
} else if (meta->func_id == BPF_FUNC_spin_unlock) { }
if (process_spin_lock(env, regno, false))
return -EACCES; skip_type_check:
} else { if (reg->ref_obj_id) {
verbose(env, "verifier internal error\n");
return -EFAULT;
}
} else if (arg_type_is_mem_ptr(arg_type)) {
expected_type = PTR_TO_STACK;
/* One exception here. In case function allows for NULL to be
* passed in as argument, it's a SCALAR_VALUE type. Final test
* happens during stack boundary checking.
*/
if (register_is_null(reg) &&
(arg_type == ARG_PTR_TO_MEM_OR_NULL ||
arg_type == ARG_PTR_TO_ALLOC_MEM_OR_NULL))
/* final test in check_stack_boundary() */;
else if (!type_is_pkt_pointer(type) &&
type != PTR_TO_MAP_VALUE &&
type != PTR_TO_MEM &&
type != PTR_TO_RDONLY_BUF &&
type != PTR_TO_RDWR_BUF &&
type != expected_type)
goto err_type;
meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM;
} else if (arg_type_is_alloc_mem_ptr(arg_type)) {
expected_type = PTR_TO_MEM;
if (register_is_null(reg) &&
arg_type == ARG_PTR_TO_ALLOC_MEM_OR_NULL)
/* final test in check_stack_boundary() */;
else if (type != expected_type)
goto err_type;
if (meta->ref_obj_id) { if (meta->ref_obj_id) {
verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n", verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
regno, reg->ref_obj_id, regno, reg->ref_obj_id,
...@@ -4121,15 +4141,6 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg, ...@@ -4121,15 +4141,6 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
return -EFAULT; return -EFAULT;
} }
meta->ref_obj_id = reg->ref_obj_id; meta->ref_obj_id = reg->ref_obj_id;
} else if (arg_type_is_int_ptr(arg_type)) {
expected_type = PTR_TO_STACK;
if (!type_is_pkt_pointer(type) &&
type != PTR_TO_MAP_VALUE &&
type != expected_type)
goto err_type;
} else {
verbose(env, "unsupported arg_type %d\n", arg_type);
return -EFAULT;
} }
if (arg_type == ARG_CONST_MAP_PTR) { if (arg_type == ARG_CONST_MAP_PTR) {
...@@ -4168,6 +4179,22 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg, ...@@ -4168,6 +4179,22 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
err = check_helper_mem_access(env, regno, err = check_helper_mem_access(env, regno,
meta->map_ptr->value_size, false, meta->map_ptr->value_size, false,
meta); meta);
} else if (arg_type == ARG_PTR_TO_SPIN_LOCK) {
if (meta->func_id == BPF_FUNC_spin_lock) {
if (process_spin_lock(env, regno, true))
return -EACCES;
} else if (meta->func_id == BPF_FUNC_spin_unlock) {
if (process_spin_lock(env, regno, false))
return -EACCES;
} else {
verbose(env, "verifier internal error\n");
return -EFAULT;
}
} else if (arg_type_is_mem_ptr(arg_type)) {
/* The access to this pointer is only checked when we hit the
* next is_mem_size argument below.
*/
meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MEM);
} else if (arg_type_is_mem_size(arg_type)) { } else if (arg_type_is_mem_size(arg_type)) {
bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO); bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
...@@ -4233,10 +4260,6 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg, ...@@ -4233,10 +4260,6 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
} }
return err; return err;
err_type:
verbose(env, "R%d type=%s expected=%s\n", regno,
reg_type_str[type], reg_type_str[expected_type]);
return -EACCES;
} }
static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id) static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
...@@ -4547,10 +4570,22 @@ static bool check_refcount_ok(const struct bpf_func_proto *fn, int func_id) ...@@ -4547,10 +4570,22 @@ static bool check_refcount_ok(const struct bpf_func_proto *fn, int func_id)
return count <= 1; return count <= 1;
} }
static bool check_btf_id_ok(const struct bpf_func_proto *fn)
{
int i;
for (i = 0; i < ARRAY_SIZE(fn->arg_type); i++)
if (fn->arg_type[i] == ARG_PTR_TO_BTF_ID && !fn->arg_btf_id[i])
return false;
return true;
}
static int check_func_proto(const struct bpf_func_proto *fn, int func_id) static int check_func_proto(const struct bpf_func_proto *fn, int func_id)
{ {
return check_raw_mode_ok(fn) && return check_raw_mode_ok(fn) &&
check_arg_pair_ok(fn) && check_arg_pair_ok(fn) &&
check_btf_id_ok(fn) &&
check_refcount_ok(fn, func_id) ? 0 : -EINVAL; check_refcount_ok(fn, func_id) ? 0 : -EINVAL;
} }
...@@ -4946,11 +4981,6 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn ...@@ -4946,11 +4981,6 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
meta.func_id = func_id; meta.func_id = func_id;
/* check args */ /* check args */
for (i = 0; i < 5; i++) { for (i = 0; i < 5; i++) {
if (!fn->check_btf_id) {
err = btf_resolve_helper_id(&env->log, fn, i);
if (err > 0)
meta.btf_id = err;
}
err = check_func_arg(env, i, &meta, fn); err = check_func_arg(env, i, &meta, fn);
if (err) if (err)
return err; return err;
......
...@@ -743,19 +743,18 @@ BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size, ...@@ -743,19 +743,18 @@ BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
return err; return err;
} }
BTF_ID_LIST(bpf_seq_printf_btf_ids) BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)
BTF_ID(struct, seq_file)
static const struct bpf_func_proto bpf_seq_printf_proto = { static const struct bpf_func_proto bpf_seq_printf_proto = {
.func = bpf_seq_printf, .func = bpf_seq_printf,
.gpl_only = true, .gpl_only = true,
.ret_type = RET_INTEGER, .ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_BTF_ID, .arg1_type = ARG_PTR_TO_BTF_ID,
.arg1_btf_id = &btf_seq_file_ids[0],
.arg2_type = ARG_PTR_TO_MEM, .arg2_type = ARG_PTR_TO_MEM,
.arg3_type = ARG_CONST_SIZE, .arg3_type = ARG_CONST_SIZE,
.arg4_type = ARG_PTR_TO_MEM_OR_NULL, .arg4_type = ARG_PTR_TO_MEM_OR_NULL,
.arg5_type = ARG_CONST_SIZE_OR_ZERO, .arg5_type = ARG_CONST_SIZE_OR_ZERO,
.btf_id = bpf_seq_printf_btf_ids,
}; };
BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len) BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
...@@ -763,17 +762,14 @@ BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len) ...@@ -763,17 +762,14 @@ BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
return seq_write(m, data, len) ? -EOVERFLOW : 0; return seq_write(m, data, len) ? -EOVERFLOW : 0;
} }
BTF_ID_LIST(bpf_seq_write_btf_ids)
BTF_ID(struct, seq_file)
static const struct bpf_func_proto bpf_seq_write_proto = { static const struct bpf_func_proto bpf_seq_write_proto = {
.func = bpf_seq_write, .func = bpf_seq_write,
.gpl_only = true, .gpl_only = true,
.ret_type = RET_INTEGER, .ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_BTF_ID, .arg1_type = ARG_PTR_TO_BTF_ID,
.arg1_btf_id = &btf_seq_file_ids[0],
.arg2_type = ARG_PTR_TO_MEM, .arg2_type = ARG_PTR_TO_MEM,
.arg3_type = ARG_CONST_SIZE_OR_ZERO, .arg3_type = ARG_CONST_SIZE_OR_ZERO,
.btf_id = bpf_seq_write_btf_ids,
}; };
static __always_inline int static __always_inline int
...@@ -1130,17 +1126,16 @@ static bool bpf_d_path_allowed(const struct bpf_prog *prog) ...@@ -1130,17 +1126,16 @@ static bool bpf_d_path_allowed(const struct bpf_prog *prog)
return btf_id_set_contains(&btf_allowlist_d_path, prog->aux->attach_btf_id); return btf_id_set_contains(&btf_allowlist_d_path, prog->aux->attach_btf_id);
} }
BTF_ID_LIST(bpf_d_path_btf_ids) BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
BTF_ID(struct, path)
static const struct bpf_func_proto bpf_d_path_proto = { static const struct bpf_func_proto bpf_d_path_proto = {
.func = bpf_d_path, .func = bpf_d_path,
.gpl_only = false, .gpl_only = false,
.ret_type = RET_INTEGER, .ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_BTF_ID, .arg1_type = ARG_PTR_TO_BTF_ID,
.arg1_btf_id = &bpf_d_path_btf_ids[0],
.arg2_type = ARG_PTR_TO_MEM, .arg2_type = ARG_PTR_TO_MEM,
.arg3_type = ARG_CONST_SIZE_OR_ZERO, .arg3_type = ARG_CONST_SIZE_OR_ZERO,
.btf_id = bpf_d_path_btf_ids,
.allowed = bpf_d_path_allowed, .allowed = bpf_d_path_allowed,
}; };
......
...@@ -378,19 +378,15 @@ const struct bpf_func_proto bpf_sk_storage_delete_proto = { ...@@ -378,19 +378,15 @@ const struct bpf_func_proto bpf_sk_storage_delete_proto = {
.arg2_type = ARG_PTR_TO_SOCKET, .arg2_type = ARG_PTR_TO_SOCKET,
}; };
BTF_ID_LIST(sk_storage_btf_ids)
BTF_ID_UNUSED
BTF_ID(struct, sock)
const struct bpf_func_proto sk_storage_get_btf_proto = { const struct bpf_func_proto sk_storage_get_btf_proto = {
.func = bpf_sk_storage_get, .func = bpf_sk_storage_get,
.gpl_only = false, .gpl_only = false,
.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
.arg1_type = ARG_CONST_MAP_PTR, .arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_BTF_ID, .arg2_type = ARG_PTR_TO_BTF_ID,
.arg2_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK],
.arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL, .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
.arg4_type = ARG_ANYTHING, .arg4_type = ARG_ANYTHING,
.btf_id = sk_storage_btf_ids,
}; };
const struct bpf_func_proto sk_storage_delete_btf_proto = { const struct bpf_func_proto sk_storage_delete_btf_proto = {
...@@ -399,7 +395,7 @@ const struct bpf_func_proto sk_storage_delete_btf_proto = { ...@@ -399,7 +395,7 @@ const struct bpf_func_proto sk_storage_delete_btf_proto = {
.ret_type = RET_INTEGER, .ret_type = RET_INTEGER,
.arg1_type = ARG_CONST_MAP_PTR, .arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_BTF_ID, .arg2_type = ARG_PTR_TO_BTF_ID,
.btf_id = sk_storage_btf_ids, .arg2_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK],
}; };
struct bpf_sk_storage_diag { struct bpf_sk_storage_diag {
......
...@@ -3803,19 +3803,18 @@ static const struct bpf_func_proto bpf_skb_event_output_proto = { ...@@ -3803,19 +3803,18 @@ static const struct bpf_func_proto bpf_skb_event_output_proto = {
.arg5_type = ARG_CONST_SIZE_OR_ZERO, .arg5_type = ARG_CONST_SIZE_OR_ZERO,
}; };
BTF_ID_LIST(bpf_skb_output_btf_ids) BTF_ID_LIST_SINGLE(bpf_skb_output_btf_ids, struct, sk_buff)
BTF_ID(struct, sk_buff)
const struct bpf_func_proto bpf_skb_output_proto = { const struct bpf_func_proto bpf_skb_output_proto = {
.func = bpf_skb_event_output, .func = bpf_skb_event_output,
.gpl_only = true, .gpl_only = true,
.ret_type = RET_INTEGER, .ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_BTF_ID, .arg1_type = ARG_PTR_TO_BTF_ID,
.arg1_btf_id = &bpf_skb_output_btf_ids[0],
.arg2_type = ARG_CONST_MAP_PTR, .arg2_type = ARG_CONST_MAP_PTR,
.arg3_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING,
.arg4_type = ARG_PTR_TO_MEM, .arg4_type = ARG_PTR_TO_MEM,
.arg5_type = ARG_CONST_SIZE_OR_ZERO, .arg5_type = ARG_CONST_SIZE_OR_ZERO,
.btf_id = bpf_skb_output_btf_ids,
}; };
static unsigned short bpf_tunnel_key_af(u64 flags) static unsigned short bpf_tunnel_key_af(u64 flags)
...@@ -4199,19 +4198,18 @@ static const struct bpf_func_proto bpf_xdp_event_output_proto = { ...@@ -4199,19 +4198,18 @@ static const struct bpf_func_proto bpf_xdp_event_output_proto = {
.arg5_type = ARG_CONST_SIZE_OR_ZERO, .arg5_type = ARG_CONST_SIZE_OR_ZERO,
}; };
BTF_ID_LIST(bpf_xdp_output_btf_ids) BTF_ID_LIST_SINGLE(bpf_xdp_output_btf_ids, struct, xdp_buff)
BTF_ID(struct, xdp_buff)
const struct bpf_func_proto bpf_xdp_output_proto = { const struct bpf_func_proto bpf_xdp_output_proto = {
.func = bpf_xdp_event_output, .func = bpf_xdp_event_output,
.gpl_only = true, .gpl_only = true,
.ret_type = RET_INTEGER, .ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_BTF_ID, .arg1_type = ARG_PTR_TO_BTF_ID,
.arg1_btf_id = &bpf_xdp_output_btf_ids[0],
.arg2_type = ARG_CONST_MAP_PTR, .arg2_type = ARG_CONST_MAP_PTR,
.arg3_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING,
.arg4_type = ARG_PTR_TO_MEM, .arg4_type = ARG_PTR_TO_MEM,
.arg5_type = ARG_CONST_SIZE_OR_ZERO, .arg5_type = ARG_CONST_SIZE_OR_ZERO,
.btf_id = bpf_xdp_output_btf_ids,
}; };
BPF_CALL_1(bpf_get_socket_cookie, struct sk_buff *, skb) BPF_CALL_1(bpf_get_socket_cookie, struct sk_buff *, skb)
...@@ -9897,17 +9895,6 @@ BTF_SOCK_TYPE_xxx ...@@ -9897,17 +9895,6 @@ BTF_SOCK_TYPE_xxx
u32 btf_sock_ids[MAX_BTF_SOCK_TYPE]; u32 btf_sock_ids[MAX_BTF_SOCK_TYPE];
#endif #endif
static bool check_arg_btf_id(u32 btf_id, u32 arg)
{
int i;
/* only one argument, no need to check arg */
for (i = 0; i < MAX_BTF_SOCK_TYPE; i++)
if (btf_sock_ids[i] == btf_id)
return true;
return false;
}
BPF_CALL_1(bpf_skc_to_tcp6_sock, struct sock *, sk) BPF_CALL_1(bpf_skc_to_tcp6_sock, struct sock *, sk)
{ {
/* tcp6_sock type is not generated in dwarf and hence btf, /* tcp6_sock type is not generated in dwarf and hence btf,
...@@ -9926,7 +9913,7 @@ const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto = { ...@@ -9926,7 +9913,7 @@ const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto = {
.gpl_only = false, .gpl_only = false,
.ret_type = RET_PTR_TO_BTF_ID_OR_NULL, .ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
.arg1_type = ARG_PTR_TO_BTF_ID, .arg1_type = ARG_PTR_TO_BTF_ID,
.check_btf_id = check_arg_btf_id, .arg1_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
.ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP6], .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP6],
}; };
...@@ -9943,7 +9930,7 @@ const struct bpf_func_proto bpf_skc_to_tcp_sock_proto = { ...@@ -9943,7 +9930,7 @@ const struct bpf_func_proto bpf_skc_to_tcp_sock_proto = {
.gpl_only = false, .gpl_only = false,
.ret_type = RET_PTR_TO_BTF_ID_OR_NULL, .ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
.arg1_type = ARG_PTR_TO_BTF_ID, .arg1_type = ARG_PTR_TO_BTF_ID,
.check_btf_id = check_arg_btf_id, .arg1_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
.ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP], .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP],
}; };
...@@ -9967,7 +9954,7 @@ const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto = { ...@@ -9967,7 +9954,7 @@ const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto = {
.gpl_only = false, .gpl_only = false,
.ret_type = RET_PTR_TO_BTF_ID_OR_NULL, .ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
.arg1_type = ARG_PTR_TO_BTF_ID, .arg1_type = ARG_PTR_TO_BTF_ID,
.check_btf_id = check_arg_btf_id, .arg1_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
.ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP_TW], .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP_TW],
}; };
...@@ -9991,7 +9978,7 @@ const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto = { ...@@ -9991,7 +9978,7 @@ const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto = {
.gpl_only = false, .gpl_only = false,
.ret_type = RET_PTR_TO_BTF_ID_OR_NULL, .ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
.arg1_type = ARG_PTR_TO_BTF_ID, .arg1_type = ARG_PTR_TO_BTF_ID,
.check_btf_id = check_arg_btf_id, .arg1_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
.ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP_REQ], .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP_REQ],
}; };
...@@ -10013,6 +10000,6 @@ const struct bpf_func_proto bpf_skc_to_udp6_sock_proto = { ...@@ -10013,6 +10000,6 @@ const struct bpf_func_proto bpf_skc_to_udp6_sock_proto = {
.gpl_only = false, .gpl_only = false,
.ret_type = RET_PTR_TO_BTF_ID_OR_NULL, .ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
.arg1_type = ARG_PTR_TO_BTF_ID, .arg1_type = ARG_PTR_TO_BTF_ID,
.check_btf_id = check_arg_btf_id, .arg1_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
.ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_UDP6], .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_UDP6],
}; };
...@@ -28,23 +28,18 @@ static u32 unsupported_ops[] = { ...@@ -28,23 +28,18 @@ static u32 unsupported_ops[] = {
static const struct btf_type *tcp_sock_type; static const struct btf_type *tcp_sock_type;
static u32 tcp_sock_id, sock_id; static u32 tcp_sock_id, sock_id;
static int btf_sk_storage_get_ids[5];
static struct bpf_func_proto btf_sk_storage_get_proto __read_mostly; static struct bpf_func_proto btf_sk_storage_get_proto __read_mostly;
static int btf_sk_storage_delete_ids[5];
static struct bpf_func_proto btf_sk_storage_delete_proto __read_mostly; static struct bpf_func_proto btf_sk_storage_delete_proto __read_mostly;
static void convert_sk_func_proto(struct bpf_func_proto *to, int *to_btf_ids, static void convert_sk_func_proto(struct bpf_func_proto *to, const struct bpf_func_proto *from)
const struct bpf_func_proto *from)
{ {
int i; int i;
*to = *from; *to = *from;
to->btf_id = to_btf_ids;
for (i = 0; i < ARRAY_SIZE(to->arg_type); i++) { for (i = 0; i < ARRAY_SIZE(to->arg_type); i++) {
if (to->arg_type[i] == ARG_PTR_TO_SOCKET) { if (to->arg_type[i] == ARG_PTR_TO_SOCKET) {
to->arg_type[i] = ARG_PTR_TO_BTF_ID; to->arg_type[i] = ARG_PTR_TO_BTF_ID;
to->btf_id[i] = tcp_sock_id; to->arg_btf_id[i] = &tcp_sock_id;
} }
} }
} }
...@@ -64,12 +59,8 @@ static int bpf_tcp_ca_init(struct btf *btf) ...@@ -64,12 +59,8 @@ static int bpf_tcp_ca_init(struct btf *btf)
tcp_sock_id = type_id; tcp_sock_id = type_id;
tcp_sock_type = btf_type_by_id(btf, tcp_sock_id); tcp_sock_type = btf_type_by_id(btf, tcp_sock_id);
convert_sk_func_proto(&btf_sk_storage_get_proto, convert_sk_func_proto(&btf_sk_storage_get_proto, &bpf_sk_storage_get_proto);
btf_sk_storage_get_ids, convert_sk_func_proto(&btf_sk_storage_delete_proto, &bpf_sk_storage_delete_proto);
&bpf_sk_storage_get_proto);
convert_sk_func_proto(&btf_sk_storage_delete_proto,
btf_sk_storage_delete_ids,
&bpf_sk_storage_delete_proto);
return 0; return 0;
} }
...@@ -185,8 +176,8 @@ static const struct bpf_func_proto bpf_tcp_send_ack_proto = { ...@@ -185,8 +176,8 @@ static const struct bpf_func_proto bpf_tcp_send_ack_proto = {
/* In case we want to report error later */ /* In case we want to report error later */
.ret_type = RET_INTEGER, .ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_BTF_ID, .arg1_type = ARG_PTR_TO_BTF_ID,
.arg1_btf_id = &tcp_sock_id,
.arg2_type = ARG_ANYTHING, .arg2_type = ARG_ANYTHING,
.btf_id = &tcp_sock_id,
}; };
static const struct bpf_func_proto * static const struct bpf_func_proto *
......
...@@ -76,6 +76,13 @@ extern u32 name[]; ...@@ -76,6 +76,13 @@ extern u32 name[];
#define BTF_ID_LIST_GLOBAL(name) \ #define BTF_ID_LIST_GLOBAL(name) \
__BTF_ID_LIST(name, globl) __BTF_ID_LIST(name, globl)
/* The BTF_ID_LIST_SINGLE macro defines a BTF_ID_LIST with
* a single entry.
*/
#define BTF_ID_LIST_SINGLE(name, prefix, typename) \
BTF_ID_LIST(name) \
BTF_ID(prefix, typename)
/* /*
* The BTF_ID_UNUSED macro defines 4 zero bytes. * The BTF_ID_UNUSED macro defines 4 zero bytes.
* It's used when we want to define 'unused' entry * It's used when we want to define 'unused' entry
...@@ -140,6 +147,7 @@ extern struct btf_id_set name; ...@@ -140,6 +147,7 @@ extern struct btf_id_set name;
#define BTF_ID(prefix, name) #define BTF_ID(prefix, name)
#define BTF_ID_UNUSED #define BTF_ID_UNUSED
#define BTF_ID_LIST_GLOBAL(name) u32 name[1]; #define BTF_ID_LIST_GLOBAL(name) u32 name[1];
#define BTF_ID_LIST_SINGLE(name, prefix, typename) static u32 name[1];
#define BTF_SET_START(name) static struct btf_id_set name = { 0 }; #define BTF_SET_START(name) static struct btf_id_set name = { 0 };
#define BTF_SET_START_GLOBAL(name) static struct btf_id_set name = { 0 }; #define BTF_SET_START_GLOBAL(name) static struct btf_id_set name = { 0 };
#define BTF_SET_END(name) #define BTF_SET_END(name)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment