Commit 84c6b868 authored by Jakub Kicinski's avatar Jakub Kicinski Committed by David S. Miller

xsk: don't allow umem replace at stack level

Currently drivers have to check if they already have a umem
installed for a given queue and return an error if so.  Make
better use of XDP_QUERY_XSK_UMEM and move this functionality
to the core.

We need to keep rtnl across the calls now.
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: default avatarQuentin Monnet <quentin.monnet@netronome.com>
Acked-by: default avatarBjörn Töpel <bjorn.topel@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f734607e
...@@ -872,10 +872,10 @@ struct netdev_bpf { ...@@ -872,10 +872,10 @@ struct netdev_bpf {
struct { struct {
struct bpf_offloaded_map *offmap; struct bpf_offloaded_map *offmap;
}; };
/* XDP_SETUP_XSK_UMEM */ /* XDP_QUERY_XSK_UMEM, XDP_SETUP_XSK_UMEM */
struct { struct {
struct xdp_umem *umem; struct xdp_umem *umem; /* out for query*/
u16 queue_id; u16 queue_id; /* in for query */
} xsk; } xsk;
}; };
}; };
...@@ -3568,6 +3568,7 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, ...@@ -3568,6 +3568,7 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
int fd, u32 flags); int fd, u32 flags);
u32 __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op, u32 __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op,
enum bpf_netdev_command cmd); enum bpf_netdev_command cmd);
int xdp_umem_query(struct net_device *dev, u16 queue_id);
int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
......
...@@ -11,6 +11,8 @@ ...@@ -11,6 +11,8 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/bpf.h> #include <linux/bpf.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include "xdp_umem.h" #include "xdp_umem.h"
#include "xsk_queue.h" #include "xsk_queue.h"
...@@ -40,6 +42,21 @@ void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs) ...@@ -40,6 +42,21 @@ void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
} }
} }
int xdp_umem_query(struct net_device *dev, u16 queue_id)
{
struct netdev_bpf bpf;
ASSERT_RTNL();
memset(&bpf, 0, sizeof(bpf));
bpf.command = XDP_QUERY_XSK_UMEM;
bpf.xsk.queue_id = queue_id;
if (!dev->netdev_ops->ndo_bpf)
return 0;
return dev->netdev_ops->ndo_bpf(dev, &bpf) ?: !!bpf.xsk.umem;
}
int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev, int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
u32 queue_id, u16 flags) u32 queue_id, u16 flags)
{ {
...@@ -62,28 +79,30 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev, ...@@ -62,28 +79,30 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
bpf.command = XDP_QUERY_XSK_UMEM; bpf.command = XDP_QUERY_XSK_UMEM;
rtnl_lock(); rtnl_lock();
err = dev->netdev_ops->ndo_bpf(dev, &bpf); err = xdp_umem_query(dev, queue_id);
rtnl_unlock(); if (err) {
err = err < 0 ? -ENOTSUPP : -EBUSY;
if (err) goto err_rtnl_unlock;
return force_zc ? -ENOTSUPP : 0; }
bpf.command = XDP_SETUP_XSK_UMEM; bpf.command = XDP_SETUP_XSK_UMEM;
bpf.xsk.umem = umem; bpf.xsk.umem = umem;
bpf.xsk.queue_id = queue_id; bpf.xsk.queue_id = queue_id;
rtnl_lock();
err = dev->netdev_ops->ndo_bpf(dev, &bpf); err = dev->netdev_ops->ndo_bpf(dev, &bpf);
rtnl_unlock();
if (err) if (err)
return force_zc ? err : 0; /* fail or fallback */ goto err_rtnl_unlock;
rtnl_unlock();
dev_hold(dev); dev_hold(dev);
umem->dev = dev; umem->dev = dev;
umem->queue_id = queue_id; umem->queue_id = queue_id;
umem->zc = true; umem->zc = true;
return 0; return 0;
err_rtnl_unlock:
rtnl_unlock();
return force_zc ? err : 0; /* fail or fallback */
} }
static void xdp_umem_clear_dev(struct xdp_umem *umem) static void xdp_umem_clear_dev(struct xdp_umem *umem)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment