Commit e91e2189 authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Daniel Borkmann says:

====================
pull-request: bpf 2018-08-10

The following pull-request contains BPF updates for your *net* tree.

The main changes are:

1) Fix cpumap and devmap on teardown as they're under RCU context
   and won't have same assumption as running under NAPI protection,
   from Jesper.

2) Fix various sockmap bugs in bpf_tcp_sendmsg() code, e.g. we had
   a bug where socket error was not propagated correctly, from Daniel.

3) Fix incompatible libbpf header license for BTF code and match it
   before it gets officially released with the rest of libbpf which
   is LGPL-2.1, from Martin.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 112cbae2 9c954201
...@@ -69,7 +69,7 @@ struct bpf_cpu_map { ...@@ -69,7 +69,7 @@ struct bpf_cpu_map {
}; };
static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu, static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
struct xdp_bulk_queue *bq); struct xdp_bulk_queue *bq, bool in_napi_ctx);
static u64 cpu_map_bitmap_size(const union bpf_attr *attr) static u64 cpu_map_bitmap_size(const union bpf_attr *attr)
{ {
...@@ -375,7 +375,7 @@ static void __cpu_map_entry_free(struct rcu_head *rcu) ...@@ -375,7 +375,7 @@ static void __cpu_map_entry_free(struct rcu_head *rcu)
struct xdp_bulk_queue *bq = per_cpu_ptr(rcpu->bulkq, cpu); struct xdp_bulk_queue *bq = per_cpu_ptr(rcpu->bulkq, cpu);
/* No concurrent bq_enqueue can run at this point */ /* No concurrent bq_enqueue can run at this point */
bq_flush_to_queue(rcpu, bq); bq_flush_to_queue(rcpu, bq, false);
} }
free_percpu(rcpu->bulkq); free_percpu(rcpu->bulkq);
/* Cannot kthread_stop() here, last put free rcpu resources */ /* Cannot kthread_stop() here, last put free rcpu resources */
...@@ -558,7 +558,7 @@ const struct bpf_map_ops cpu_map_ops = { ...@@ -558,7 +558,7 @@ const struct bpf_map_ops cpu_map_ops = {
}; };
static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu, static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
struct xdp_bulk_queue *bq) struct xdp_bulk_queue *bq, bool in_napi_ctx)
{ {
unsigned int processed = 0, drops = 0; unsigned int processed = 0, drops = 0;
const int to_cpu = rcpu->cpu; const int to_cpu = rcpu->cpu;
...@@ -578,7 +578,10 @@ static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu, ...@@ -578,7 +578,10 @@ static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
err = __ptr_ring_produce(q, xdpf); err = __ptr_ring_produce(q, xdpf);
if (err) { if (err) {
drops++; drops++;
if (likely(in_napi_ctx))
xdp_return_frame_rx_napi(xdpf); xdp_return_frame_rx_napi(xdpf);
else
xdp_return_frame(xdpf);
} }
processed++; processed++;
} }
...@@ -598,7 +601,7 @@ static int bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf) ...@@ -598,7 +601,7 @@ static int bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq); struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq);
if (unlikely(bq->count == CPU_MAP_BULK_SIZE)) if (unlikely(bq->count == CPU_MAP_BULK_SIZE))
bq_flush_to_queue(rcpu, bq); bq_flush_to_queue(rcpu, bq, true);
/* Notice, xdp_buff/page MUST be queued here, long enough for /* Notice, xdp_buff/page MUST be queued here, long enough for
* driver to code invoking us to finished, due to driver * driver to code invoking us to finished, due to driver
...@@ -661,7 +664,7 @@ void __cpu_map_flush(struct bpf_map *map) ...@@ -661,7 +664,7 @@ void __cpu_map_flush(struct bpf_map *map)
/* Flush all frames in bulkq to real queue */ /* Flush all frames in bulkq to real queue */
bq = this_cpu_ptr(rcpu->bulkq); bq = this_cpu_ptr(rcpu->bulkq);
bq_flush_to_queue(rcpu, bq); bq_flush_to_queue(rcpu, bq, true);
/* If already running, costs spin_lock_irqsave + smb_mb */ /* If already running, costs spin_lock_irqsave + smb_mb */
wake_up_process(rcpu->kthread); wake_up_process(rcpu->kthread);
......
...@@ -217,7 +217,8 @@ void __dev_map_insert_ctx(struct bpf_map *map, u32 bit) ...@@ -217,7 +217,8 @@ void __dev_map_insert_ctx(struct bpf_map *map, u32 bit)
} }
static int bq_xmit_all(struct bpf_dtab_netdev *obj, static int bq_xmit_all(struct bpf_dtab_netdev *obj,
struct xdp_bulk_queue *bq, u32 flags) struct xdp_bulk_queue *bq, u32 flags,
bool in_napi_ctx)
{ {
struct net_device *dev = obj->dev; struct net_device *dev = obj->dev;
int sent = 0, drops = 0, err = 0; int sent = 0, drops = 0, err = 0;
...@@ -254,7 +255,10 @@ static int bq_xmit_all(struct bpf_dtab_netdev *obj, ...@@ -254,7 +255,10 @@ static int bq_xmit_all(struct bpf_dtab_netdev *obj,
struct xdp_frame *xdpf = bq->q[i]; struct xdp_frame *xdpf = bq->q[i];
/* RX path under NAPI protection, can return frames faster */ /* RX path under NAPI protection, can return frames faster */
if (likely(in_napi_ctx))
xdp_return_frame_rx_napi(xdpf); xdp_return_frame_rx_napi(xdpf);
else
xdp_return_frame(xdpf);
drops++; drops++;
} }
goto out; goto out;
...@@ -286,7 +290,7 @@ void __dev_map_flush(struct bpf_map *map) ...@@ -286,7 +290,7 @@ void __dev_map_flush(struct bpf_map *map)
__clear_bit(bit, bitmap); __clear_bit(bit, bitmap);
bq = this_cpu_ptr(dev->bulkq); bq = this_cpu_ptr(dev->bulkq);
bq_xmit_all(dev, bq, XDP_XMIT_FLUSH); bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, true);
} }
} }
...@@ -316,7 +320,7 @@ static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf, ...@@ -316,7 +320,7 @@ static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf,
struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq); struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq);
if (unlikely(bq->count == DEV_MAP_BULK_SIZE)) if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
bq_xmit_all(obj, bq, 0); bq_xmit_all(obj, bq, 0, true);
/* Ingress dev_rx will be the same for all xdp_frame's in /* Ingress dev_rx will be the same for all xdp_frame's in
* bulk_queue, because bq stored per-CPU and must be flushed * bulk_queue, because bq stored per-CPU and must be flushed
...@@ -385,7 +389,7 @@ static void dev_map_flush_old(struct bpf_dtab_netdev *dev) ...@@ -385,7 +389,7 @@ static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
__clear_bit(dev->bit, bitmap); __clear_bit(dev->bit, bitmap);
bq = per_cpu_ptr(dev->bulkq, cpu); bq = per_cpu_ptr(dev->bulkq, cpu);
bq_xmit_all(dev, bq, XDP_XMIT_FLUSH); bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, false);
} }
} }
} }
......
...@@ -1048,12 +1048,12 @@ static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) ...@@ -1048,12 +1048,12 @@ static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
while (msg_data_left(msg)) { while (msg_data_left(msg)) {
struct sk_msg_buff *m; struct sk_msg_buff *m = NULL;
bool enospc = false; bool enospc = false;
int copy; int copy;
if (sk->sk_err) { if (sk->sk_err) {
err = sk->sk_err; err = -sk->sk_err;
goto out_err; goto out_err;
} }
...@@ -1116,9 +1116,12 @@ static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) ...@@ -1116,9 +1116,12 @@ static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
wait_for_memory: wait_for_memory:
err = sk_stream_wait_memory(sk, &timeo); err = sk_stream_wait_memory(sk, &timeo);
if (err) if (err) {
if (m && m != psock->cork)
free_start_sg(sk, m);
goto out_err; goto out_err;
} }
}
out_err: out_err:
if (err < 0) if (err < 0)
err = sk_stream_error(sk, msg->msg_flags, err); err = sk_stream_error(sk, msg->msg_flags, err);
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
#include <uapi/linux/bpf.h> #include <uapi/linux/bpf.h>
#include "bpf_helpers.h" #include "bpf_helpers.h"
#define MAX_CPUS 12 /* WARNING - sync with _user.c */ #define MAX_CPUS 64 /* WARNING - sync with _user.c */
/* Special map type that can XDP_REDIRECT frames to another CPU */ /* Special map type that can XDP_REDIRECT frames to another CPU */
struct bpf_map_def SEC("maps") cpu_map = { struct bpf_map_def SEC("maps") cpu_map = {
......
...@@ -19,7 +19,7 @@ static const char *__doc__ = ...@@ -19,7 +19,7 @@ static const char *__doc__ =
#include <arpa/inet.h> #include <arpa/inet.h>
#include <linux/if_link.h> #include <linux/if_link.h>
#define MAX_CPUS 12 /* WARNING - sync with _kern.c */ #define MAX_CPUS 64 /* WARNING - sync with _kern.c */
/* How many xdp_progs are defined in _kern.c */ /* How many xdp_progs are defined in _kern.c */
#define MAX_PROG 5 #define MAX_PROG 5
...@@ -527,7 +527,7 @@ static void stress_cpumap(void) ...@@ -527,7 +527,7 @@ static void stress_cpumap(void)
* procedure. * procedure.
*/ */
create_cpu_entry(1, 1024, 0, false); create_cpu_entry(1, 1024, 0, false);
create_cpu_entry(1, 128, 0, false); create_cpu_entry(1, 8, 0, false);
create_cpu_entry(1, 16000, 0, false); create_cpu_entry(1, 16000, 0, false);
} }
......
/* SPDX-License-Identifier: GPL-2.0 */ // SPDX-License-Identifier: LGPL-2.1
/* Copyright (c) 2018 Facebook */ /* Copyright (c) 2018 Facebook */
#include <stdlib.h> #include <stdlib.h>
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: LGPL-2.1 */
/* Copyright (c) 2018 Facebook */ /* Copyright (c) 2018 Facebook */
#ifndef __BPF_BTF_H #ifndef __BPF_BTF_H
......
...@@ -354,7 +354,7 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt, ...@@ -354,7 +354,7 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt,
while (s->bytes_recvd < total_bytes) { while (s->bytes_recvd < total_bytes) {
if (txmsg_cork) { if (txmsg_cork) {
timeout.tv_sec = 0; timeout.tv_sec = 0;
timeout.tv_usec = 1000; timeout.tv_usec = 300000;
} else { } else {
timeout.tv_sec = 1; timeout.tv_sec = 1;
timeout.tv_usec = 0; timeout.tv_usec = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment