Commit 830b8e49 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'check-the-remaining-info_cnt-before-repeating-btf-fields'

Hou Tao says:

====================
Check the remaining info_cnt before repeating btf fields

From: Hou Tao <houtao1@huawei.com>

Hi,

The patch set adds the missed check again info_cnt when flattening the
array of nested struct. The problem was spotted when developing dynptr
key support for hash map. Patch #1 adds the missed check and patch #2
adds three success test cases and one failure test case for the problem.

Comments are always welcome.

Change Log:
v2:
 * patch #1: check info_cnt in btf_repeat_fields()
 * patch #2: use a hard-coded number instead of BTF_FIELDS_MAX, because
             BTF_FIELDS_MAX is not always available in vmlinux.h (e.g.,
	     for llvm 17/18)

v1: https://lore.kernel.org/bpf/20240911110557.2759801-1-houtao@huaweicloud.com/T/#t
====================

Link: https://lore.kernel.org/r/20241008071114.3718177-1-houtao@huaweicloud.comSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents b24d7f0d c456f080
......@@ -3523,7 +3523,7 @@ static int btf_get_field_type(const struct btf *btf, const struct btf_type *var_
* (i + 1) * elem_size
* where i is the repeat index and elem_size is the size of an element.
*/
static int btf_repeat_fields(struct btf_field_info *info,
static int btf_repeat_fields(struct btf_field_info *info, int info_cnt,
u32 field_cnt, u32 repeat_cnt, u32 elem_size)
{
u32 i, j;
......@@ -3543,6 +3543,12 @@ static int btf_repeat_fields(struct btf_field_info *info,
}
}
/* The type of struct size or variable size is u32,
* so the multiplication will not overflow.
*/
if (field_cnt * (repeat_cnt + 1) > info_cnt)
return -E2BIG;
cur = field_cnt;
for (i = 0; i < repeat_cnt; i++) {
memcpy(&info[cur], &info[0], field_cnt * sizeof(info[0]));
......@@ -3587,7 +3593,7 @@ static int btf_find_nested_struct(const struct btf *btf, const struct btf_type *
info[i].off += off;
if (nelems > 1) {
err = btf_repeat_fields(info, ret, nelems - 1, t->size);
err = btf_repeat_fields(info, info_cnt, ret, nelems - 1, t->size);
if (err == 0)
ret *= nelems;
else
......@@ -3681,10 +3687,10 @@ static int btf_find_field_one(const struct btf *btf,
if (ret == BTF_FIELD_IGNORE)
return 0;
if (nelems > info_cnt)
if (!info_cnt)
return -E2BIG;
if (nelems > 1) {
ret = btf_repeat_fields(info, 1, nelems - 1, sz);
ret = btf_repeat_fields(info, info_cnt, 1, nelems - 1, sz);
if (ret < 0)
return ret;
}
......
......@@ -23,6 +23,7 @@ static const char * const cpumask_success_testcases[] = {
"test_global_mask_array_l2_rcu",
"test_global_mask_nested_rcu",
"test_global_mask_nested_deep_rcu",
"test_global_mask_nested_deep_array_rcu",
"test_cpumask_weight",
};
......
......@@ -7,6 +7,11 @@
#include "errno.h"
#include <stdbool.h>
/* Should use BTF_FIELDS_MAX, but it is not always available in vmlinux.h,
* so use the hard-coded number as a workaround.
*/
#define CPUMASK_KPTR_FIELDS_MAX 11
int err;
#define private(name) SEC(".bss." #name) __attribute__((aligned(8)))
......
......@@ -10,6 +10,21 @@
char _license[] SEC("license") = "GPL";
struct kptr_nested_array_2 {
struct bpf_cpumask __kptr * mask;
};
struct kptr_nested_array_1 {
/* Make btf_parse_fields() in map_create() return -E2BIG */
struct kptr_nested_array_2 d_2[CPUMASK_KPTR_FIELDS_MAX + 1];
};
struct kptr_nested_array {
struct kptr_nested_array_1 d_1;
};
private(MASK_NESTED) static struct kptr_nested_array global_mask_nested_arr;
/* Prototype for all of the program trace events below:
*
* TRACE_EVENT(task_newtask,
......@@ -187,3 +202,23 @@ int BPF_PROG(test_global_mask_rcu_no_null_check, struct task_struct *task, u64 c
return 0;
}
SEC("tp_btf/task_newtask")
__failure __msg("has no valid kptr")
int BPF_PROG(test_invalid_nested_array, struct task_struct *task, u64 clone_flags)
{
struct bpf_cpumask *local, *prev;
local = create_cpumask();
if (!local)
return 0;
prev = bpf_kptr_xchg(&global_mask_nested_arr.d_1.d_2[CPUMASK_KPTR_FIELDS_MAX].mask, local);
if (prev) {
bpf_cpumask_release(prev);
err = 3;
return 0;
}
return 0;
}
......@@ -31,11 +31,59 @@ struct kptr_nested_deep {
struct kptr_nested_pair ptr_pairs[3];
};
struct kptr_nested_deep_array_1_2 {
int dummy;
struct bpf_cpumask __kptr * mask[CPUMASK_KPTR_FIELDS_MAX];
};
struct kptr_nested_deep_array_1_1 {
int dummy;
struct kptr_nested_deep_array_1_2 d_2;
};
struct kptr_nested_deep_array_1 {
long dummy;
struct kptr_nested_deep_array_1_1 d_1;
};
struct kptr_nested_deep_array_2_2 {
long dummy[2];
struct bpf_cpumask __kptr * mask;
};
struct kptr_nested_deep_array_2_1 {
int dummy;
struct kptr_nested_deep_array_2_2 d_2[CPUMASK_KPTR_FIELDS_MAX];
};
struct kptr_nested_deep_array_2 {
long dummy;
struct kptr_nested_deep_array_2_1 d_1;
};
struct kptr_nested_deep_array_3_2 {
long dummy[2];
struct bpf_cpumask __kptr * mask;
};
struct kptr_nested_deep_array_3_1 {
int dummy;
struct kptr_nested_deep_array_3_2 d_2;
};
struct kptr_nested_deep_array_3 {
long dummy;
struct kptr_nested_deep_array_3_1 d_1[CPUMASK_KPTR_FIELDS_MAX];
};
private(MASK) static struct bpf_cpumask __kptr * global_mask_array[2];
private(MASK) static struct bpf_cpumask __kptr * global_mask_array_l2[2][1];
private(MASK) static struct bpf_cpumask __kptr * global_mask_array_one[1];
private(MASK) static struct kptr_nested global_mask_nested[2];
private(MASK_DEEP) static struct kptr_nested_deep global_mask_nested_deep;
private(MASK_1) static struct kptr_nested_deep_array_1 global_mask_nested_deep_array_1;
private(MASK_2) static struct kptr_nested_deep_array_2 global_mask_nested_deep_array_2;
private(MASK_3) static struct kptr_nested_deep_array_3 global_mask_nested_deep_array_3;
static bool is_test_task(void)
{
......@@ -543,12 +591,21 @@ static int _global_mask_array_rcu(struct bpf_cpumask **mask0,
goto err_exit;
}
/* [<mask 0>, NULL] */
if (!*mask0 || *mask1) {
/* [<mask 0>, *] */
if (!*mask0) {
err = 2;
goto err_exit;
}
if (!mask1)
goto err_exit;
/* [*, NULL] */
if (*mask1) {
err = 3;
goto err_exit;
}
local = create_cpumask();
if (!local) {
err = 9;
......@@ -631,6 +688,23 @@ int BPF_PROG(test_global_mask_nested_deep_rcu, struct task_struct *task, u64 clo
return 0;
}
SEC("tp_btf/task_newtask")
int BPF_PROG(test_global_mask_nested_deep_array_rcu, struct task_struct *task, u64 clone_flags)
{
int i;
for (i = 0; i < CPUMASK_KPTR_FIELDS_MAX; i++)
_global_mask_array_rcu(&global_mask_nested_deep_array_1.d_1.d_2.mask[i], NULL);
for (i = 0; i < CPUMASK_KPTR_FIELDS_MAX; i++)
_global_mask_array_rcu(&global_mask_nested_deep_array_2.d_1.d_2[i].mask, NULL);
for (i = 0; i < CPUMASK_KPTR_FIELDS_MAX; i++)
_global_mask_array_rcu(&global_mask_nested_deep_array_3.d_1[i].d_2.mask, NULL);
return 0;
}
SEC("tp_btf/task_newtask")
int BPF_PROG(test_cpumask_weight, struct task_struct *task, u64 clone_flags)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment