Commit cac320c8 authored by Jesper Dangaard Brouer's avatar Jesper Dangaard Brouer Committed by David S. Miller

virtio_net: convert to use generic xdp_frame and xdp_return_frame API

The virtio_net driver assumes XDP frames are always released based on
page refcnt (via put_page).  Thus, is only queues the XDP data pointer
address and uses virt_to_head_page() to retrieve struct page.

Use the XDP return API to get away from such assumptions. Instead
queue an xdp_frame, which allow us to use the xdp_return_frame API,
when releasing the frame.

V8: Avoid endianness issues (found by kbuild test robot)
V9: Change __virtnet_xdp_xmit from bool to int return value (found by Dan Carpenter)
Signed-off-by: default avatarJesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 1ffcbc85
...@@ -415,38 +415,48 @@ static void virtnet_xdp_flush(struct net_device *dev) ...@@ -415,38 +415,48 @@ static void virtnet_xdp_flush(struct net_device *dev)
virtqueue_kick(sq->vq); virtqueue_kick(sq->vq);
} }
static bool __virtnet_xdp_xmit(struct virtnet_info *vi, static int __virtnet_xdp_xmit(struct virtnet_info *vi,
struct xdp_buff *xdp) struct xdp_buff *xdp)
{ {
struct virtio_net_hdr_mrg_rxbuf *hdr; struct virtio_net_hdr_mrg_rxbuf *hdr;
unsigned int len; struct xdp_frame *xdpf, *xdpf_sent;
struct send_queue *sq; struct send_queue *sq;
unsigned int len;
unsigned int qp; unsigned int qp;
void *xdp_sent;
int err; int err;
qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id(); qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
sq = &vi->sq[qp]; sq = &vi->sq[qp];
/* Free up any pending old buffers before queueing new ones. */ /* Free up any pending old buffers before queueing new ones. */
while ((xdp_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) { while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL)
struct page *sent_page = virt_to_head_page(xdp_sent); xdp_return_frame(xdpf_sent->data, &xdpf_sent->mem);
put_page(sent_page); xdpf = convert_to_xdp_frame(xdp);
} if (unlikely(!xdpf))
return -EOVERFLOW;
/* virtqueue want to use data area in-front of packet */
if (unlikely(xdpf->metasize > 0))
return -EOPNOTSUPP;
xdp->data -= vi->hdr_len; if (unlikely(xdpf->headroom < vi->hdr_len))
return -EOVERFLOW;
/* Make room for virtqueue hdr (also change xdpf->headroom?) */
xdpf->data -= vi->hdr_len;
/* Zero header and leave csum up to XDP layers */ /* Zero header and leave csum up to XDP layers */
hdr = xdp->data; hdr = xdpf->data;
memset(hdr, 0, vi->hdr_len); memset(hdr, 0, vi->hdr_len);
xdpf->len += vi->hdr_len;
sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data); sg_init_one(sq->sg, xdpf->data, xdpf->len);
err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp->data, GFP_ATOMIC); err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdpf, GFP_ATOMIC);
if (unlikely(err)) if (unlikely(err))
return false; /* Caller handle free/refcnt */ return -ENOSPC; /* Caller handle free/refcnt */
return true; return 0;
} }
static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
...@@ -454,7 +464,6 @@ static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) ...@@ -454,7 +464,6 @@ static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
struct virtnet_info *vi = netdev_priv(dev); struct virtnet_info *vi = netdev_priv(dev);
struct receive_queue *rq = vi->rq; struct receive_queue *rq = vi->rq;
struct bpf_prog *xdp_prog; struct bpf_prog *xdp_prog;
bool sent;
/* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
* indicate XDP resources have been successfully allocated. * indicate XDP resources have been successfully allocated.
...@@ -463,10 +472,7 @@ static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) ...@@ -463,10 +472,7 @@ static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
if (!xdp_prog) if (!xdp_prog)
return -ENXIO; return -ENXIO;
sent = __virtnet_xdp_xmit(vi, xdp); return __virtnet_xdp_xmit(vi, xdp);
if (!sent)
return -ENOSPC;
return 0;
} }
static unsigned int virtnet_get_headroom(struct virtnet_info *vi) static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
...@@ -555,7 +561,6 @@ static struct sk_buff *receive_small(struct net_device *dev, ...@@ -555,7 +561,6 @@ static struct sk_buff *receive_small(struct net_device *dev,
struct page *page = virt_to_head_page(buf); struct page *page = virt_to_head_page(buf);
unsigned int delta = 0; unsigned int delta = 0;
struct page *xdp_page; struct page *xdp_page;
bool sent;
int err; int err;
len -= vi->hdr_len; len -= vi->hdr_len;
...@@ -606,8 +611,8 @@ static struct sk_buff *receive_small(struct net_device *dev, ...@@ -606,8 +611,8 @@ static struct sk_buff *receive_small(struct net_device *dev,
delta = orig_data - xdp.data; delta = orig_data - xdp.data;
break; break;
case XDP_TX: case XDP_TX:
sent = __virtnet_xdp_xmit(vi, &xdp); err = __virtnet_xdp_xmit(vi, &xdp);
if (unlikely(!sent)) { if (unlikely(err)) {
trace_xdp_exception(vi->dev, xdp_prog, act); trace_xdp_exception(vi->dev, xdp_prog, act);
goto err_xdp; goto err_xdp;
} }
...@@ -690,7 +695,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, ...@@ -690,7 +695,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
struct bpf_prog *xdp_prog; struct bpf_prog *xdp_prog;
unsigned int truesize; unsigned int truesize;
unsigned int headroom = mergeable_ctx_to_headroom(ctx); unsigned int headroom = mergeable_ctx_to_headroom(ctx);
bool sent;
int err; int err;
head_skb = NULL; head_skb = NULL;
...@@ -762,8 +766,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, ...@@ -762,8 +766,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
} }
break; break;
case XDP_TX: case XDP_TX:
sent = __virtnet_xdp_xmit(vi, &xdp); err = __virtnet_xdp_xmit(vi, &xdp);
if (unlikely(!sent)) { if (unlikely(err)) {
trace_xdp_exception(vi->dev, xdp_prog, act); trace_xdp_exception(vi->dev, xdp_prog, act);
if (unlikely(xdp_page != page)) if (unlikely(xdp_page != page))
put_page(xdp_page); put_page(xdp_page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment