Commit e7d12ce1 authored by David S. Miller's avatar David S. Miller

Merge branch 'xdp-more-work-on-xdp-tracepoints'

Jesper Dangaard Brouer says:

====================
xdp: more work on xdp tracepoints

More work on streamlining and performance optimizing the tracepoints
for XDP.

I've created a simple xdp_monitor application that uses this
tracepoint, and prints statistics. Available at github:

https://github.com/netoptimizer/prototype-kernel/blob/master/kernel/samples/bpf/xdp_monitor_kern.c
https://github.com/netoptimizer/prototype-kernel/blob/master/kernel/samples/bpf/xdp_monitor_user.c

The improvement over tracepoint with strcpy: 9810372 - 8428762 = +1381610 pps faster
 - (1/9810372 - 1/8428762)*10^9 = -16.7 nanosec
 - 100-(8428762/9810372*100) = strcpy-trace is 14.08% slower
 - 981037/8428762*100 = removing strcpy made it 11.64% faster

V3: Fix merge conflict with commit e4a8e817 ("bpf: misc xdp redirect cleanups")
V2: Change trace_xdp_redirect() to align with args of trace_xdp_exception()
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents fb3bbbda 315ec399
...@@ -9849,14 +9849,14 @@ static int ixgbe_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) ...@@ -9849,14 +9849,14 @@ static int ixgbe_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
int err; int err;
if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state))) if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state)))
return -EINVAL; return -ENETDOWN;
/* During program transitions its possible adapter->xdp_prog is assigned /* During program transitions its possible adapter->xdp_prog is assigned
* but ring has not been configured yet. In this case simply abort xmit. * but ring has not been configured yet. In this case simply abort xmit.
*/ */
ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL; ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL;
if (unlikely(!ring)) if (unlikely(!ring))
return -EINVAL; return -ENXIO;
err = ixgbe_xmit_xdp_ring(adapter, xdp); err = ixgbe_xmit_xdp_ring(adapter, xdp);
if (err != IXGBE_XDP_TX) if (err != IXGBE_XDP_TX)
......
...@@ -718,7 +718,8 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, ...@@ -718,7 +718,8 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
* because we only track one map and force a flush when the map changes. * because we only track one map and force a flush when the map changes.
* This does not appear to be a real limitation for existing software. * This does not appear to be a real limitation for existing software.
*/ */
int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb); int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
struct bpf_prog *prog);
int xdp_do_redirect(struct net_device *dev, int xdp_do_redirect(struct net_device *dev,
struct xdp_buff *xdp, struct xdp_buff *xdp,
struct bpf_prog *prog); struct bpf_prog *prog);
......
...@@ -31,53 +31,53 @@ TRACE_EVENT(xdp_exception, ...@@ -31,53 +31,53 @@ TRACE_EVENT(xdp_exception,
TP_ARGS(dev, xdp, act), TP_ARGS(dev, xdp, act),
TP_STRUCT__entry( TP_STRUCT__entry(
__string(name, dev->name)
__array(u8, prog_tag, 8) __array(u8, prog_tag, 8)
__field(u32, act) __field(u32, act)
__field(int, ifindex)
), ),
TP_fast_assign( TP_fast_assign(
BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(xdp->tag)); BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(xdp->tag));
memcpy(__entry->prog_tag, xdp->tag, sizeof(xdp->tag)); memcpy(__entry->prog_tag, xdp->tag, sizeof(xdp->tag));
__assign_str(name, dev->name); __entry->act = act;
__entry->act = act; __entry->ifindex = dev->ifindex;
), ),
TP_printk("prog=%s device=%s action=%s", TP_printk("prog=%s action=%s ifindex=%d",
__print_hex_str(__entry->prog_tag, 8), __print_hex_str(__entry->prog_tag, 8),
__get_str(name), __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
__print_symbolic(__entry->act, __XDP_ACT_SYM_TAB)) __entry->ifindex)
); );
TRACE_EVENT(xdp_redirect, TRACE_EVENT(xdp_redirect,
TP_PROTO(const struct net_device *from, TP_PROTO(const struct net_device *dev,
const struct net_device *to, const struct bpf_prog *xdp, u32 act,
const struct bpf_prog *xdp, u32 act, int err), int to_index, int err),
TP_ARGS(from, to, xdp, act, err), TP_ARGS(dev, xdp, act, to_index, err),
TP_STRUCT__entry( TP_STRUCT__entry(
__string(name_from, from->name)
__string(name_to, to->name)
__array(u8, prog_tag, 8) __array(u8, prog_tag, 8)
__field(u32, act) __field(u32, act)
__field(int, ifindex)
__field(int, to_index)
__field(int, err) __field(int, err)
), ),
TP_fast_assign( TP_fast_assign(
BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(xdp->tag)); BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(xdp->tag));
memcpy(__entry->prog_tag, xdp->tag, sizeof(xdp->tag)); memcpy(__entry->prog_tag, xdp->tag, sizeof(xdp->tag));
__assign_str(name_from, from->name); __entry->act = act;
__assign_str(name_to, to->name); __entry->ifindex = dev->ifindex;
__entry->act = act; __entry->to_index = to_index;
__entry->err = err; __entry->err = err;
), ),
TP_printk("prog=%s from=%s to=%s action=%s err=%d", TP_printk("prog=%s action=%s ifindex=%d to_index=%d err=%d",
__print_hex_str(__entry->prog_tag, 8), __print_hex_str(__entry->prog_tag, 8),
__get_str(name_from), __get_str(name_to),
__print_symbolic(__entry->act, __XDP_ACT_SYM_TAB), __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
__entry->ifindex, __entry->to_index,
__entry->err) __entry->err)
); );
#endif /* _TRACE_XDP_H */ #endif /* _TRACE_XDP_H */
......
...@@ -3953,7 +3953,8 @@ int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb) ...@@ -3953,7 +3953,8 @@ int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
if (act != XDP_PASS) { if (act != XDP_PASS) {
switch (act) { switch (act) {
case XDP_REDIRECT: case XDP_REDIRECT:
err = xdp_do_generic_redirect(skb->dev, skb); err = xdp_do_generic_redirect(skb->dev, skb,
xdp_prog);
if (err) if (err)
goto out_redir; goto out_redir;
/* fallthru to submit skb */ /* fallthru to submit skb */
...@@ -3966,7 +3967,6 @@ int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb) ...@@ -3966,7 +3967,6 @@ int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
} }
return XDP_PASS; return XDP_PASS;
out_redir: out_redir:
trace_xdp_exception(skb->dev, xdp_prog, XDP_REDIRECT);
kfree_skb(skb); kfree_skb(skb);
return XDP_DROP; return XDP_DROP;
} }
......
...@@ -2476,7 +2476,6 @@ static int __bpf_tx_xdp(struct net_device *dev, ...@@ -2476,7 +2476,6 @@ static int __bpf_tx_xdp(struct net_device *dev,
int err; int err;
if (!dev->netdev_ops->ndo_xdp_xmit) { if (!dev->netdev_ops->ndo_xdp_xmit) {
bpf_warn_invalid_xdp_redirect(dev->ifindex);
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -2525,7 +2524,7 @@ static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp, ...@@ -2525,7 +2524,7 @@ static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
if (likely(!err)) if (likely(!err))
ri->map_to_flush = map; ri->map_to_flush = map;
out: out:
trace_xdp_redirect(dev, fwd, xdp_prog, XDP_REDIRECT, err); trace_xdp_redirect(dev, xdp_prog, XDP_REDIRECT, index, err);
return err; return err;
} }
...@@ -2543,39 +2542,48 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp, ...@@ -2543,39 +2542,48 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
fwd = dev_get_by_index_rcu(dev_net(dev), index); fwd = dev_get_by_index_rcu(dev_net(dev), index);
ri->ifindex = 0; ri->ifindex = 0;
if (unlikely(!fwd)) { if (unlikely(!fwd)) {
bpf_warn_invalid_xdp_redirect(index);
err = -EINVAL; err = -EINVAL;
goto out; goto out;
} }
err = __bpf_tx_xdp(fwd, NULL, xdp, 0); err = __bpf_tx_xdp(fwd, NULL, xdp, 0);
out: out:
trace_xdp_redirect(dev, fwd, xdp_prog, XDP_REDIRECT, err); trace_xdp_redirect(dev, xdp_prog, XDP_REDIRECT, index, err);
return err; return err;
} }
EXPORT_SYMBOL_GPL(xdp_do_redirect); EXPORT_SYMBOL_GPL(xdp_do_redirect);
int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb) int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
struct bpf_prog *xdp_prog)
{ {
struct redirect_info *ri = this_cpu_ptr(&redirect_info); struct redirect_info *ri = this_cpu_ptr(&redirect_info);
unsigned int len;
u32 index = ri->ifindex; u32 index = ri->ifindex;
struct net_device *fwd;
unsigned int len;
int err = 0;
dev = dev_get_by_index_rcu(dev_net(dev), index); fwd = dev_get_by_index_rcu(dev_net(dev), index);
ri->ifindex = 0; ri->ifindex = 0;
if (unlikely(!dev)) { if (unlikely(!fwd)) {
bpf_warn_invalid_xdp_redirect(index); err = -EINVAL;
return -EINVAL; goto out;
} }
if (unlikely(!(dev->flags & IFF_UP))) if (unlikely(!(fwd->flags & IFF_UP))) {
return -ENETDOWN; err = -ENETDOWN;
len = dev->mtu + dev->hard_header_len + VLAN_HLEN; goto out;
if (skb->len > len) }
return -E2BIG;
skb->dev = dev; len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN;
return 0; if (skb->len > len) {
err = -EMSGSIZE;
goto out;
}
skb->dev = fwd;
out:
trace_xdp_redirect(dev, xdp_prog, XDP_REDIRECT, index, err);
return err;
} }
EXPORT_SYMBOL_GPL(xdp_do_generic_redirect); EXPORT_SYMBOL_GPL(xdp_do_generic_redirect);
...@@ -3565,11 +3573,6 @@ void bpf_warn_invalid_xdp_action(u32 act) ...@@ -3565,11 +3573,6 @@ void bpf_warn_invalid_xdp_action(u32 act)
} }
EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action); EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action);
void bpf_warn_invalid_xdp_redirect(u32 ifindex)
{
WARN_ONCE(1, "Illegal XDP redirect to unsupported device ifindex(%i)\n", ifindex);
}
static bool __is_valid_sock_ops_access(int off, int size) static bool __is_valid_sock_ops_access(int off, int size)
{ {
if (off < 0 || off >= sizeof(struct bpf_sock_ops)) if (off < 0 || off >= sizeof(struct bpf_sock_ops))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment