Commit 07343110 authored by Feng Zhou's avatar Feng Zhou Committed by Alexei Starovoitov

bpf: add bpf_map_lookup_percpu_elem for percpu map

Add new ebpf helpers bpf_map_lookup_percpu_elem.

The implementation method is relatively simple, refer to the implementation
method of map_lookup_elem of percpu map, increase the parameters of cpu, and
obtain it according to the specified cpu.
Signed-off-by: default avatarFeng Zhou <zhoufeng.zf@bytedance.com>
Link: https://lore.kernel.org/r/20220511093854.411-2-zhoufeng.zf@bytedance.comSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 571b8739
...@@ -89,6 +89,7 @@ struct bpf_map_ops { ...@@ -89,6 +89,7 @@ struct bpf_map_ops {
int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags); int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
int (*map_pop_elem)(struct bpf_map *map, void *value); int (*map_pop_elem)(struct bpf_map *map, void *value);
int (*map_peek_elem)(struct bpf_map *map, void *value); int (*map_peek_elem)(struct bpf_map *map, void *value);
void *(*map_lookup_percpu_elem)(struct bpf_map *map, void *key, u32 cpu);
/* funcs called by prog_array and perf_event_array map */ /* funcs called by prog_array and perf_event_array map */
void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
...@@ -2184,6 +2185,7 @@ extern const struct bpf_func_proto bpf_map_delete_elem_proto; ...@@ -2184,6 +2185,7 @@ extern const struct bpf_func_proto bpf_map_delete_elem_proto;
extern const struct bpf_func_proto bpf_map_push_elem_proto; extern const struct bpf_func_proto bpf_map_push_elem_proto;
extern const struct bpf_func_proto bpf_map_pop_elem_proto; extern const struct bpf_func_proto bpf_map_pop_elem_proto;
extern const struct bpf_func_proto bpf_map_peek_elem_proto; extern const struct bpf_func_proto bpf_map_peek_elem_proto;
extern const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto;
extern const struct bpf_func_proto bpf_get_prandom_u32_proto; extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
......
...@@ -5164,6 +5164,14 @@ union bpf_attr { ...@@ -5164,6 +5164,14 @@ union bpf_attr {
* if not NULL, is a reference which must be released using its * if not NULL, is a reference which must be released using its
* corresponding release function, or moved into a BPF map before * corresponding release function, or moved into a BPF map before
* program exit. * program exit.
*
* void *bpf_map_lookup_percpu_elem(struct bpf_map *map, const void *key, u32 cpu)
* Description
* Perform a lookup in *percpu map* for an entry associated to
* *key* on *cpu*.
* Return
* Map value associated to *key* on *cpu*, or **NULL** if no entry
* was found or *cpu* is invalid.
*/ */
#define __BPF_FUNC_MAPPER(FN) \ #define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \ FN(unspec), \
...@@ -5361,6 +5369,7 @@ union bpf_attr { ...@@ -5361,6 +5369,7 @@ union bpf_attr {
FN(skb_set_tstamp), \ FN(skb_set_tstamp), \
FN(ima_file_hash), \ FN(ima_file_hash), \
FN(kptr_xchg), \ FN(kptr_xchg), \
FN(map_lookup_percpu_elem), \
/* */ /* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper /* integer value in 'imm' field of BPF_CALL instruction selects which helper
......
...@@ -243,6 +243,20 @@ static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key) ...@@ -243,6 +243,20 @@ static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
return this_cpu_ptr(array->pptrs[index & array->index_mask]); return this_cpu_ptr(array->pptrs[index & array->index_mask]);
} }
static void *percpu_array_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
{
struct bpf_array *array = container_of(map, struct bpf_array, map);
u32 index = *(u32 *)key;
if (cpu >= nr_cpu_ids)
return NULL;
if (unlikely(index >= array->map.max_entries))
return NULL;
return per_cpu_ptr(array->pptrs[index & array->index_mask], cpu);
}
int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value) int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
{ {
struct bpf_array *array = container_of(map, struct bpf_array, map); struct bpf_array *array = container_of(map, struct bpf_array, map);
...@@ -725,6 +739,7 @@ const struct bpf_map_ops percpu_array_map_ops = { ...@@ -725,6 +739,7 @@ const struct bpf_map_ops percpu_array_map_ops = {
.map_lookup_elem = percpu_array_map_lookup_elem, .map_lookup_elem = percpu_array_map_lookup_elem,
.map_update_elem = array_map_update_elem, .map_update_elem = array_map_update_elem,
.map_delete_elem = array_map_delete_elem, .map_delete_elem = array_map_delete_elem,
.map_lookup_percpu_elem = percpu_array_map_lookup_percpu_elem,
.map_seq_show_elem = percpu_array_map_seq_show_elem, .map_seq_show_elem = percpu_array_map_seq_show_elem,
.map_check_btf = array_map_check_btf, .map_check_btf = array_map_check_btf,
.map_lookup_batch = generic_map_lookup_batch, .map_lookup_batch = generic_map_lookup_batch,
......
...@@ -2619,6 +2619,7 @@ const struct bpf_func_proto bpf_map_delete_elem_proto __weak; ...@@ -2619,6 +2619,7 @@ const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
const struct bpf_func_proto bpf_map_push_elem_proto __weak; const struct bpf_func_proto bpf_map_push_elem_proto __weak;
const struct bpf_func_proto bpf_map_pop_elem_proto __weak; const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
const struct bpf_func_proto bpf_map_peek_elem_proto __weak; const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto __weak;
const struct bpf_func_proto bpf_spin_lock_proto __weak; const struct bpf_func_proto bpf_spin_lock_proto __weak;
const struct bpf_func_proto bpf_spin_unlock_proto __weak; const struct bpf_func_proto bpf_spin_unlock_proto __weak;
const struct bpf_func_proto bpf_jiffies64_proto __weak; const struct bpf_func_proto bpf_jiffies64_proto __weak;
......
...@@ -2199,6 +2199,20 @@ static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key) ...@@ -2199,6 +2199,20 @@ static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key)
return NULL; return NULL;
} }
static void *htab_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
{
struct htab_elem *l;
if (cpu >= nr_cpu_ids)
return NULL;
l = __htab_map_lookup_elem(map, key);
if (l)
return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu);
else
return NULL;
}
static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key) static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key)
{ {
struct htab_elem *l = __htab_map_lookup_elem(map, key); struct htab_elem *l = __htab_map_lookup_elem(map, key);
...@@ -2211,6 +2225,22 @@ static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key) ...@@ -2211,6 +2225,22 @@ static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key)
return NULL; return NULL;
} }
static void *htab_lru_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
{
struct htab_elem *l;
if (cpu >= nr_cpu_ids)
return NULL;
l = __htab_map_lookup_elem(map, key);
if (l) {
bpf_lru_node_set_ref(&l->lru_node);
return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu);
}
return NULL;
}
int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value) int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
{ {
struct htab_elem *l; struct htab_elem *l;
...@@ -2300,6 +2330,7 @@ const struct bpf_map_ops htab_percpu_map_ops = { ...@@ -2300,6 +2330,7 @@ const struct bpf_map_ops htab_percpu_map_ops = {
.map_lookup_and_delete_elem = htab_percpu_map_lookup_and_delete_elem, .map_lookup_and_delete_elem = htab_percpu_map_lookup_and_delete_elem,
.map_update_elem = htab_percpu_map_update_elem, .map_update_elem = htab_percpu_map_update_elem,
.map_delete_elem = htab_map_delete_elem, .map_delete_elem = htab_map_delete_elem,
.map_lookup_percpu_elem = htab_percpu_map_lookup_percpu_elem,
.map_seq_show_elem = htab_percpu_map_seq_show_elem, .map_seq_show_elem = htab_percpu_map_seq_show_elem,
.map_set_for_each_callback_args = map_set_for_each_callback_args, .map_set_for_each_callback_args = map_set_for_each_callback_args,
.map_for_each_callback = bpf_for_each_hash_elem, .map_for_each_callback = bpf_for_each_hash_elem,
...@@ -2318,6 +2349,7 @@ const struct bpf_map_ops htab_lru_percpu_map_ops = { ...@@ -2318,6 +2349,7 @@ const struct bpf_map_ops htab_lru_percpu_map_ops = {
.map_lookup_and_delete_elem = htab_lru_percpu_map_lookup_and_delete_elem, .map_lookup_and_delete_elem = htab_lru_percpu_map_lookup_and_delete_elem,
.map_update_elem = htab_lru_percpu_map_update_elem, .map_update_elem = htab_lru_percpu_map_update_elem,
.map_delete_elem = htab_lru_map_delete_elem, .map_delete_elem = htab_lru_map_delete_elem,
.map_lookup_percpu_elem = htab_lru_percpu_map_lookup_percpu_elem,
.map_seq_show_elem = htab_percpu_map_seq_show_elem, .map_seq_show_elem = htab_percpu_map_seq_show_elem,
.map_set_for_each_callback_args = map_set_for_each_callback_args, .map_set_for_each_callback_args = map_set_for_each_callback_args,
.map_for_each_callback = bpf_for_each_hash_elem, .map_for_each_callback = bpf_for_each_hash_elem,
......
...@@ -119,6 +119,22 @@ const struct bpf_func_proto bpf_map_peek_elem_proto = { ...@@ -119,6 +119,22 @@ const struct bpf_func_proto bpf_map_peek_elem_proto = {
.arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE, .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE,
}; };
BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu)
{
WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
return (unsigned long) map->ops->map_lookup_percpu_elem(map, key, cpu);
}
const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto = {
.func = bpf_map_lookup_percpu_elem,
.gpl_only = false,
.pkt_access = true,
.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
.arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_MAP_KEY,
.arg3_type = ARG_ANYTHING,
};
const struct bpf_func_proto bpf_get_prandom_u32_proto = { const struct bpf_func_proto bpf_get_prandom_u32_proto = {
.func = bpf_user_rnd_u32, .func = bpf_user_rnd_u32,
.gpl_only = false, .gpl_only = false,
...@@ -1420,6 +1436,8 @@ bpf_base_func_proto(enum bpf_func_id func_id) ...@@ -1420,6 +1436,8 @@ bpf_base_func_proto(enum bpf_func_id func_id)
return &bpf_map_pop_elem_proto; return &bpf_map_pop_elem_proto;
case BPF_FUNC_map_peek_elem: case BPF_FUNC_map_peek_elem:
return &bpf_map_peek_elem_proto; return &bpf_map_peek_elem_proto;
case BPF_FUNC_map_lookup_percpu_elem:
return &bpf_map_lookup_percpu_elem_proto;
case BPF_FUNC_get_prandom_u32: case BPF_FUNC_get_prandom_u32:
return &bpf_get_prandom_u32_proto; return &bpf_get_prandom_u32_proto;
case BPF_FUNC_get_smp_processor_id: case BPF_FUNC_get_smp_processor_id:
......
...@@ -6137,6 +6137,12 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env, ...@@ -6137,6 +6137,12 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
map->map_type != BPF_MAP_TYPE_BLOOM_FILTER) map->map_type != BPF_MAP_TYPE_BLOOM_FILTER)
goto error; goto error;
break; break;
case BPF_FUNC_map_lookup_percpu_elem:
if (map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY &&
map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH)
goto error;
break;
case BPF_FUNC_sk_storage_get: case BPF_FUNC_sk_storage_get:
case BPF_FUNC_sk_storage_delete: case BPF_FUNC_sk_storage_delete:
if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
...@@ -6750,7 +6756,8 @@ record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, ...@@ -6750,7 +6756,8 @@ record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
func_id != BPF_FUNC_map_pop_elem && func_id != BPF_FUNC_map_pop_elem &&
func_id != BPF_FUNC_map_peek_elem && func_id != BPF_FUNC_map_peek_elem &&
func_id != BPF_FUNC_for_each_map_elem && func_id != BPF_FUNC_for_each_map_elem &&
func_id != BPF_FUNC_redirect_map) func_id != BPF_FUNC_redirect_map &&
func_id != BPF_FUNC_map_lookup_percpu_elem)
return 0; return 0;
if (map == NULL) { if (map == NULL) {
...@@ -13810,7 +13817,8 @@ static int do_misc_fixups(struct bpf_verifier_env *env) ...@@ -13810,7 +13817,8 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
insn->imm == BPF_FUNC_map_pop_elem || insn->imm == BPF_FUNC_map_pop_elem ||
insn->imm == BPF_FUNC_map_peek_elem || insn->imm == BPF_FUNC_map_peek_elem ||
insn->imm == BPF_FUNC_redirect_map || insn->imm == BPF_FUNC_redirect_map ||
insn->imm == BPF_FUNC_for_each_map_elem)) { insn->imm == BPF_FUNC_for_each_map_elem ||
insn->imm == BPF_FUNC_map_lookup_percpu_elem)) {
aux = &env->insn_aux_data[i + delta]; aux = &env->insn_aux_data[i + delta];
if (bpf_map_ptr_poisoned(aux)) if (bpf_map_ptr_poisoned(aux))
goto patch_call_imm; goto patch_call_imm;
...@@ -13859,6 +13867,8 @@ static int do_misc_fixups(struct bpf_verifier_env *env) ...@@ -13859,6 +13867,8 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
bpf_callback_t callback_fn, bpf_callback_t callback_fn,
void *callback_ctx, void *callback_ctx,
u64 flags))NULL)); u64 flags))NULL));
BUILD_BUG_ON(!__same_type(ops->map_lookup_percpu_elem,
(void *(*)(struct bpf_map *map, void *key, u32 cpu))NULL));
patch_map_ops_generic: patch_map_ops_generic:
switch (insn->imm) { switch (insn->imm) {
...@@ -13886,6 +13896,9 @@ static int do_misc_fixups(struct bpf_verifier_env *env) ...@@ -13886,6 +13896,9 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
case BPF_FUNC_for_each_map_elem: case BPF_FUNC_for_each_map_elem:
insn->imm = BPF_CALL_IMM(ops->map_for_each_callback); insn->imm = BPF_CALL_IMM(ops->map_for_each_callback);
continue; continue;
case BPF_FUNC_map_lookup_percpu_elem:
insn->imm = BPF_CALL_IMM(ops->map_lookup_percpu_elem);
continue;
} }
goto patch_call_imm; goto patch_call_imm;
......
...@@ -1197,6 +1197,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ...@@ -1197,6 +1197,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_map_pop_elem_proto; return &bpf_map_pop_elem_proto;
case BPF_FUNC_map_peek_elem: case BPF_FUNC_map_peek_elem:
return &bpf_map_peek_elem_proto; return &bpf_map_peek_elem_proto;
case BPF_FUNC_map_lookup_percpu_elem:
return &bpf_map_lookup_percpu_elem_proto;
case BPF_FUNC_ktime_get_ns: case BPF_FUNC_ktime_get_ns:
return &bpf_ktime_get_ns_proto; return &bpf_ktime_get_ns_proto;
case BPF_FUNC_ktime_get_boot_ns: case BPF_FUNC_ktime_get_boot_ns:
......
...@@ -5164,6 +5164,14 @@ union bpf_attr { ...@@ -5164,6 +5164,14 @@ union bpf_attr {
* if not NULL, is a reference which must be released using its * if not NULL, is a reference which must be released using its
* corresponding release function, or moved into a BPF map before * corresponding release function, or moved into a BPF map before
* program exit. * program exit.
*
* void *bpf_map_lookup_percpu_elem(struct bpf_map *map, const void *key, u32 cpu)
* Description
* Perform a lookup in *percpu map* for an entry associated to
* *key* on *cpu*.
* Return
* Map value associated to *key* on *cpu*, or **NULL** if no entry
* was found or *cpu* is invalid.
*/ */
#define __BPF_FUNC_MAPPER(FN) \ #define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \ FN(unspec), \
...@@ -5361,6 +5369,7 @@ union bpf_attr { ...@@ -5361,6 +5369,7 @@ union bpf_attr {
FN(skb_set_tstamp), \ FN(skb_set_tstamp), \
FN(ima_file_hash), \ FN(ima_file_hash), \
FN(kptr_xchg), \ FN(kptr_xchg), \
FN(map_lookup_percpu_elem), \
/* */ /* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper /* integer value in 'imm' field of BPF_CALL instruction selects which helper
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment