Commit cb4d03ab authored by Brian Vazquez's avatar Brian Vazquez Committed by Alexei Starovoitov

bpf: Add generic support for lookup batch op

This commit introduces generic support for the bpf_map_lookup_batch.
This implementation can be used by almost all the bpf maps since its core
implementation is relying on the existing map_get_next_key and
map_lookup_elem. The bpf syscall subcommand introduced is:

  BPF_MAP_LOOKUP_BATCH

The UAPI attribute is:

  struct { /* struct used by BPF_MAP_*_BATCH commands */
         __aligned_u64   in_batch;       /* start batch,
                                          * NULL to start from beginning
                                          */
         __aligned_u64   out_batch;      /* output: next start batch */
         __aligned_u64   keys;
         __aligned_u64   values;
         __u32           count;          /* input/output:
                                          * input: # of key/value
                                          * elements
                                          * output: # of filled elements
                                          */
         __u32           map_fd;
         __u64           elem_flags;
         __u64           flags;
  } batch;

in_batch/out_batch are opaque values use to communicate between
user/kernel space, in_batch/out_batch must be of key_size length.

To start iterating from the beginning in_batch must be null,
count is the # of key/value elements to retrieve. Note that the 'keys'
buffer must be a buffer of key_size * count size and the 'values' buffer
must be value_size * count, where value_size must be aligned to 8 bytes
by userspace if it's dealing with percpu maps. 'count' will contain the
number of keys/values successfully retrieved. Note that 'count' is an
input/output variable and it can contain a lower value after a call.

If there's no more entries to retrieve, ENOENT will be returned. If error
is ENOENT, count might be > 0 in case it copied some values but there were
no more entries to retrieve.

Note that if the return code is an error and not -EFAULT,
count indicates the number of elements successfully processed.
Suggested-by: default avatarStanislav Fomichev <sdf@google.com>
Signed-off-by: default avatarBrian Vazquez <brianvv@google.com>
Signed-off-by: default avatarYonghong Song <yhs@fb.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20200115184308.162644-3-brianvv@google.com
parent 15c14a3d
...@@ -44,6 +44,8 @@ struct bpf_map_ops { ...@@ -44,6 +44,8 @@ struct bpf_map_ops {
int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key); int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
void (*map_release_uref)(struct bpf_map *map); void (*map_release_uref)(struct bpf_map *map);
void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key); void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr,
union bpf_attr __user *uattr);
/* funcs callable from userspace and from eBPF programs */ /* funcs callable from userspace and from eBPF programs */
void *(*map_lookup_elem)(struct bpf_map *map, void *key); void *(*map_lookup_elem)(struct bpf_map *map, void *key);
...@@ -982,6 +984,9 @@ void *bpf_map_area_alloc(u64 size, int numa_node); ...@@ -982,6 +984,9 @@ void *bpf_map_area_alloc(u64 size, int numa_node);
void *bpf_map_area_mmapable_alloc(u64 size, int numa_node); void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
void bpf_map_area_free(void *base); void bpf_map_area_free(void *base);
void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
int generic_map_lookup_batch(struct bpf_map *map,
const union bpf_attr *attr,
union bpf_attr __user *uattr);
extern int sysctl_unprivileged_bpf_disabled; extern int sysctl_unprivileged_bpf_disabled;
......
...@@ -107,6 +107,7 @@ enum bpf_cmd { ...@@ -107,6 +107,7 @@ enum bpf_cmd {
BPF_MAP_LOOKUP_AND_DELETE_ELEM, BPF_MAP_LOOKUP_AND_DELETE_ELEM,
BPF_MAP_FREEZE, BPF_MAP_FREEZE,
BPF_BTF_GET_NEXT_ID, BPF_BTF_GET_NEXT_ID,
BPF_MAP_LOOKUP_BATCH,
}; };
enum bpf_map_type { enum bpf_map_type {
...@@ -420,6 +421,23 @@ union bpf_attr { ...@@ -420,6 +421,23 @@ union bpf_attr {
__u64 flags; __u64 flags;
}; };
struct { /* struct used by BPF_MAP_*_BATCH commands */
__aligned_u64 in_batch; /* start batch,
* NULL to start from beginning
*/
__aligned_u64 out_batch; /* output: next start batch */
__aligned_u64 keys;
__aligned_u64 values;
__u32 count; /* input/output:
* input: # of key/value
* elements
* output: # of filled elements
*/
__u32 map_fd;
__u64 elem_flags;
__u64 flags;
} batch;
struct { /* anonymous struct used by BPF_PROG_LOAD command */ struct { /* anonymous struct used by BPF_PROG_LOAD command */
__u32 prog_type; /* one of enum bpf_prog_type */ __u32 prog_type; /* one of enum bpf_prog_type */
__u32 insn_cnt; __u32 insn_cnt;
......
...@@ -219,10 +219,8 @@ static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value, ...@@ -219,10 +219,8 @@ static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
void *ptr; void *ptr;
int err; int err;
if (bpf_map_is_dev_bound(map)) { if (bpf_map_is_dev_bound(map))
err = bpf_map_offload_lookup_elem(map, key, value); return bpf_map_offload_lookup_elem(map, key, value);
return err;
}
preempt_disable(); preempt_disable();
this_cpu_inc(bpf_prog_active); this_cpu_inc(bpf_prog_active);
...@@ -1220,6 +1218,109 @@ static int map_get_next_key(union bpf_attr *attr) ...@@ -1220,6 +1218,109 @@ static int map_get_next_key(union bpf_attr *attr)
return err; return err;
} }
#define MAP_LOOKUP_RETRIES 3
int generic_map_lookup_batch(struct bpf_map *map,
const union bpf_attr *attr,
union bpf_attr __user *uattr)
{
void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch);
void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
void __user *values = u64_to_user_ptr(attr->batch.values);
void __user *keys = u64_to_user_ptr(attr->batch.keys);
void *buf, *buf_prevkey, *prev_key, *key, *value;
int err, retry = MAP_LOOKUP_RETRIES;
u32 value_size, cp, max_count;
bool first_key = false;
if (attr->batch.elem_flags & ~BPF_F_LOCK)
return -EINVAL;
if ((attr->batch.elem_flags & BPF_F_LOCK) &&
!map_value_has_spin_lock(map))
return -EINVAL;
value_size = bpf_map_value_size(map);
max_count = attr->batch.count;
if (!max_count)
return 0;
if (put_user(0, &uattr->batch.count))
return -EFAULT;
buf_prevkey = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
if (!buf_prevkey)
return -ENOMEM;
buf = kmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN);
if (!buf) {
kvfree(buf_prevkey);
return -ENOMEM;
}
err = -EFAULT;
first_key = false;
prev_key = NULL;
if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size))
goto free_buf;
key = buf;
value = key + map->key_size;
if (ubatch)
prev_key = buf_prevkey;
for (cp = 0; cp < max_count;) {
rcu_read_lock();
err = map->ops->map_get_next_key(map, prev_key, key);
rcu_read_unlock();
if (err)
break;
err = bpf_map_copy_value(map, key, value,
attr->batch.elem_flags);
if (err == -ENOENT) {
if (retry) {
retry--;
continue;
}
err = -EINTR;
break;
}
if (err)
goto free_buf;
if (copy_to_user(keys + cp * map->key_size, key,
map->key_size)) {
err = -EFAULT;
goto free_buf;
}
if (copy_to_user(values + cp * value_size, value, value_size)) {
err = -EFAULT;
goto free_buf;
}
if (!prev_key)
prev_key = buf_prevkey;
swap(prev_key, key);
retry = MAP_LOOKUP_RETRIES;
cp++;
}
if (err == -EFAULT)
goto free_buf;
if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) ||
(cp && copy_to_user(uobatch, prev_key, map->key_size))))
err = -EFAULT;
free_buf:
kfree(buf_prevkey);
kfree(buf);
return err;
}
#define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD value #define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD value
static int map_lookup_and_delete_elem(union bpf_attr *attr) static int map_lookup_and_delete_elem(union bpf_attr *attr)
...@@ -3076,6 +3177,54 @@ static int bpf_task_fd_query(const union bpf_attr *attr, ...@@ -3076,6 +3177,54 @@ static int bpf_task_fd_query(const union bpf_attr *attr,
return err; return err;
} }
#define BPF_MAP_BATCH_LAST_FIELD batch.flags
#define BPF_DO_BATCH(fn) \
do { \
if (!fn) { \
err = -ENOTSUPP; \
goto err_put; \
} \
err = fn(map, attr, uattr); \
} while (0)
static int bpf_map_do_batch(const union bpf_attr *attr,
union bpf_attr __user *uattr,
int cmd)
{
struct bpf_map *map;
int err, ufd;
struct fd f;
if (CHECK_ATTR(BPF_MAP_BATCH))
return -EINVAL;
ufd = attr->batch.map_fd;
f = fdget(ufd);
map = __bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
if (cmd == BPF_MAP_LOOKUP_BATCH &&
!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
err = -EPERM;
goto err_put;
}
if (cmd != BPF_MAP_LOOKUP_BATCH &&
!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
err = -EPERM;
goto err_put;
}
if (cmd == BPF_MAP_LOOKUP_BATCH)
BPF_DO_BATCH(map->ops->map_lookup_batch);
err_put:
fdput(f);
return err;
}
SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size) SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
{ {
union bpf_attr attr = {}; union bpf_attr attr = {};
...@@ -3173,6 +3322,9 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz ...@@ -3173,6 +3322,9 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
case BPF_MAP_LOOKUP_AND_DELETE_ELEM: case BPF_MAP_LOOKUP_AND_DELETE_ELEM:
err = map_lookup_and_delete_elem(&attr); err = map_lookup_and_delete_elem(&attr);
break; break;
case BPF_MAP_LOOKUP_BATCH:
err = bpf_map_do_batch(&attr, uattr, BPF_MAP_LOOKUP_BATCH);
break;
default: default:
err = -EINVAL; err = -EINVAL;
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment