Commit 644bfe51 authored by Lorenzo Bianconi's avatar Lorenzo Bianconi Committed by Daniel Borkmann

cpumap: Formalize map value as a named struct

As it has been already done for devmap, introduce 'struct bpf_cpumap_val'
to formalize the expected values that can be passed in for a CPUMAP.
Update cpumap code to use the struct.
Signed-off-by: default avatarLorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Acked-by: default avatarJesper Dangaard Brouer <brouer@redhat.com>
Link: https://lore.kernel.org/bpf/754f950674665dae6139c061d28c1d982aaf4170.1594734381.git.lorenzo@kernel.org
parent a4e76f1b
...@@ -3849,6 +3849,15 @@ struct bpf_devmap_val { ...@@ -3849,6 +3849,15 @@ struct bpf_devmap_val {
} bpf_prog; } bpf_prog;
}; };
/* CPUMAP map-value layout
*
* The struct data-layout of map-value is a configuration interface.
* New members can only be added to the end of this structure.
*/
struct bpf_cpumap_val {
__u32 qsize; /* queue size to remote target CPU */
};
enum sk_action { enum sk_action {
SK_DROP = 0, SK_DROP = 0,
SK_PASS, SK_PASS,
......
...@@ -52,7 +52,6 @@ struct xdp_bulk_queue { ...@@ -52,7 +52,6 @@ struct xdp_bulk_queue {
struct bpf_cpu_map_entry { struct bpf_cpu_map_entry {
u32 cpu; /* kthread CPU and map index */ u32 cpu; /* kthread CPU and map index */
int map_id; /* Back reference to map */ int map_id; /* Back reference to map */
u32 qsize; /* Queue size placeholder for map lookup */
/* XDP can run multiple RX-ring queues, need __percpu enqueue store */ /* XDP can run multiple RX-ring queues, need __percpu enqueue store */
struct xdp_bulk_queue __percpu *bulkq; struct xdp_bulk_queue __percpu *bulkq;
...@@ -62,10 +61,13 @@ struct bpf_cpu_map_entry { ...@@ -62,10 +61,13 @@ struct bpf_cpu_map_entry {
/* Queue with potential multi-producers, and single-consumer kthread */ /* Queue with potential multi-producers, and single-consumer kthread */
struct ptr_ring *queue; struct ptr_ring *queue;
struct task_struct *kthread; struct task_struct *kthread;
struct work_struct kthread_stop_wq;
struct bpf_cpumap_val value;
atomic_t refcnt; /* Control when this struct can be free'ed */ atomic_t refcnt; /* Control when this struct can be free'ed */
struct rcu_head rcu; struct rcu_head rcu;
struct work_struct kthread_stop_wq;
}; };
struct bpf_cpu_map { struct bpf_cpu_map {
...@@ -307,8 +309,8 @@ static int cpu_map_kthread_run(void *data) ...@@ -307,8 +309,8 @@ static int cpu_map_kthread_run(void *data)
return 0; return 0;
} }
static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu, static struct bpf_cpu_map_entry *
int map_id) __cpu_map_entry_alloc(struct bpf_cpumap_val *value, u32 cpu, int map_id)
{ {
gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
struct bpf_cpu_map_entry *rcpu; struct bpf_cpu_map_entry *rcpu;
...@@ -338,13 +340,13 @@ static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu, ...@@ -338,13 +340,13 @@ static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu,
if (!rcpu->queue) if (!rcpu->queue)
goto free_bulkq; goto free_bulkq;
err = ptr_ring_init(rcpu->queue, qsize, gfp); err = ptr_ring_init(rcpu->queue, value->qsize, gfp);
if (err) if (err)
goto free_queue; goto free_queue;
rcpu->cpu = cpu; rcpu->cpu = cpu;
rcpu->map_id = map_id; rcpu->map_id = map_id;
rcpu->qsize = qsize; rcpu->value.qsize = value->qsize;
/* Setup kthread */ /* Setup kthread */
rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa, rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa,
...@@ -437,12 +439,12 @@ static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value, ...@@ -437,12 +439,12 @@ static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
u64 map_flags) u64 map_flags)
{ {
struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
struct bpf_cpumap_val cpumap_value = {};
struct bpf_cpu_map_entry *rcpu; struct bpf_cpu_map_entry *rcpu;
/* Array index key correspond to CPU number */ /* Array index key correspond to CPU number */
u32 key_cpu = *(u32 *)key; u32 key_cpu = *(u32 *)key;
/* Value is the queue size */
u32 qsize = *(u32 *)value; memcpy(&cpumap_value, value, map->value_size);
if (unlikely(map_flags > BPF_EXIST)) if (unlikely(map_flags > BPF_EXIST))
return -EINVAL; return -EINVAL;
...@@ -450,18 +452,18 @@ static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value, ...@@ -450,18 +452,18 @@ static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
return -E2BIG; return -E2BIG;
if (unlikely(map_flags == BPF_NOEXIST)) if (unlikely(map_flags == BPF_NOEXIST))
return -EEXIST; return -EEXIST;
if (unlikely(qsize > 16384)) /* sanity limit on qsize */ if (unlikely(cpumap_value.qsize > 16384)) /* sanity limit on qsize */
return -EOVERFLOW; return -EOVERFLOW;
/* Make sure CPU is a valid possible cpu */ /* Make sure CPU is a valid possible cpu */
if (key_cpu >= nr_cpumask_bits || !cpu_possible(key_cpu)) if (key_cpu >= nr_cpumask_bits || !cpu_possible(key_cpu))
return -ENODEV; return -ENODEV;
if (qsize == 0) { if (cpumap_value.qsize == 0) {
rcpu = NULL; /* Same as deleting */ rcpu = NULL; /* Same as deleting */
} else { } else {
/* Updating qsize cause re-allocation of bpf_cpu_map_entry */ /* Updating qsize cause re-allocation of bpf_cpu_map_entry */
rcpu = __cpu_map_entry_alloc(qsize, key_cpu, map->id); rcpu = __cpu_map_entry_alloc(&cpumap_value, key_cpu, map->id);
if (!rcpu) if (!rcpu)
return -ENOMEM; return -ENOMEM;
rcpu->cmap = cmap; rcpu->cmap = cmap;
...@@ -523,7 +525,7 @@ static void *cpu_map_lookup_elem(struct bpf_map *map, void *key) ...@@ -523,7 +525,7 @@ static void *cpu_map_lookup_elem(struct bpf_map *map, void *key)
struct bpf_cpu_map_entry *rcpu = struct bpf_cpu_map_entry *rcpu =
__cpu_map_lookup_elem(map, *(u32 *)key); __cpu_map_lookup_elem(map, *(u32 *)key);
return rcpu ? &rcpu->qsize : NULL; return rcpu ? &rcpu->value : NULL;
} }
static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key) static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
......
...@@ -3849,6 +3849,15 @@ struct bpf_devmap_val { ...@@ -3849,6 +3849,15 @@ struct bpf_devmap_val {
} bpf_prog; } bpf_prog;
}; };
/* CPUMAP map-value layout
*
* The struct data-layout of map-value is a configuration interface.
* New members can only be added to the end of this structure.
*/
struct bpf_cpumap_val {
__u32 qsize; /* queue size to remote target CPU */
};
enum sk_action { enum sk_action {
SK_DROP = 0, SK_DROP = 0,
SK_PASS, SK_PASS,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment