Commit 2471c75e authored by Jesper Dangaard Brouer's avatar Jesper Dangaard Brouer Committed by David S. Miller

virtio_net: split XDP_TX kick and XDP_REDIRECT map flushing

The driver was combining XDP_TX virtqueue_kick and XDP_REDIRECT
map flushing (xdp_do_flush_map).  This is suboptimal, these two
flush operations should be kept separate.

The suboptimal behavior was introduced in commit 9267c430
("virtio-net: add missing virtqueue kick when flushing packets").

Fixes: 9267c430 ("virtio-net: add missing virtqueue kick when flushing packets")
Signed-off-by: default avatarJesper Dangaard Brouer <brouer@redhat.com>
Acked-by: default avatarJason Wang <jasowang@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 2e689312
...@@ -53,6 +53,10 @@ module_param(napi_tx, bool, 0644); ...@@ -53,6 +53,10 @@ module_param(napi_tx, bool, 0644);
/* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */ /* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
#define VIRTIO_XDP_HEADROOM 256 #define VIRTIO_XDP_HEADROOM 256
/* Separating two types of XDP xmit */
#define VIRTIO_XDP_TX BIT(0)
#define VIRTIO_XDP_REDIR BIT(1)
/* RX packet size EWMA. The average packet size is used to determine the packet /* RX packet size EWMA. The average packet size is used to determine the packet
* buffer size when refilling RX rings. As the entire RX ring may be refilled * buffer size when refilling RX rings. As the entire RX ring may be refilled
* at once, the weight is chosen so that the EWMA will be insensitive to short- * at once, the weight is chosen so that the EWMA will be insensitive to short-
...@@ -582,7 +586,7 @@ static struct sk_buff *receive_small(struct net_device *dev, ...@@ -582,7 +586,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
struct receive_queue *rq, struct receive_queue *rq,
void *buf, void *ctx, void *buf, void *ctx,
unsigned int len, unsigned int len,
bool *xdp_xmit) unsigned int *xdp_xmit)
{ {
struct sk_buff *skb; struct sk_buff *skb;
struct bpf_prog *xdp_prog; struct bpf_prog *xdp_prog;
...@@ -654,14 +658,14 @@ static struct sk_buff *receive_small(struct net_device *dev, ...@@ -654,14 +658,14 @@ static struct sk_buff *receive_small(struct net_device *dev,
trace_xdp_exception(vi->dev, xdp_prog, act); trace_xdp_exception(vi->dev, xdp_prog, act);
goto err_xdp; goto err_xdp;
} }
*xdp_xmit = true; *xdp_xmit |= VIRTIO_XDP_TX;
rcu_read_unlock(); rcu_read_unlock();
goto xdp_xmit; goto xdp_xmit;
case XDP_REDIRECT: case XDP_REDIRECT:
err = xdp_do_redirect(dev, &xdp, xdp_prog); err = xdp_do_redirect(dev, &xdp, xdp_prog);
if (err) if (err)
goto err_xdp; goto err_xdp;
*xdp_xmit = true; *xdp_xmit |= VIRTIO_XDP_REDIR;
rcu_read_unlock(); rcu_read_unlock();
goto xdp_xmit; goto xdp_xmit;
default: default:
...@@ -723,7 +727,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, ...@@ -723,7 +727,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
void *buf, void *buf,
void *ctx, void *ctx,
unsigned int len, unsigned int len,
bool *xdp_xmit) unsigned int *xdp_xmit)
{ {
struct virtio_net_hdr_mrg_rxbuf *hdr = buf; struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
...@@ -818,7 +822,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, ...@@ -818,7 +822,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
put_page(xdp_page); put_page(xdp_page);
goto err_xdp; goto err_xdp;
} }
*xdp_xmit = true; *xdp_xmit |= VIRTIO_XDP_TX;
if (unlikely(xdp_page != page)) if (unlikely(xdp_page != page))
put_page(page); put_page(page);
rcu_read_unlock(); rcu_read_unlock();
...@@ -830,7 +834,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, ...@@ -830,7 +834,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
put_page(xdp_page); put_page(xdp_page);
goto err_xdp; goto err_xdp;
} }
*xdp_xmit = true; *xdp_xmit |= VIRTIO_XDP_REDIR;
if (unlikely(xdp_page != page)) if (unlikely(xdp_page != page))
put_page(page); put_page(page);
rcu_read_unlock(); rcu_read_unlock();
...@@ -939,7 +943,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, ...@@ -939,7 +943,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
} }
static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq, static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
void *buf, unsigned int len, void **ctx, bool *xdp_xmit) void *buf, unsigned int len, void **ctx,
unsigned int *xdp_xmit)
{ {
struct net_device *dev = vi->dev; struct net_device *dev = vi->dev;
struct sk_buff *skb; struct sk_buff *skb;
...@@ -1232,7 +1237,8 @@ static void refill_work(struct work_struct *work) ...@@ -1232,7 +1237,8 @@ static void refill_work(struct work_struct *work)
} }
} }
static int virtnet_receive(struct receive_queue *rq, int budget, bool *xdp_xmit) static int virtnet_receive(struct receive_queue *rq, int budget,
unsigned int *xdp_xmit)
{ {
struct virtnet_info *vi = rq->vq->vdev->priv; struct virtnet_info *vi = rq->vq->vdev->priv;
unsigned int len, received = 0, bytes = 0; unsigned int len, received = 0, bytes = 0;
...@@ -1321,7 +1327,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget) ...@@ -1321,7 +1327,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
struct virtnet_info *vi = rq->vq->vdev->priv; struct virtnet_info *vi = rq->vq->vdev->priv;
struct send_queue *sq; struct send_queue *sq;
unsigned int received, qp; unsigned int received, qp;
bool xdp_xmit = false; unsigned int xdp_xmit = 0;
virtnet_poll_cleantx(rq); virtnet_poll_cleantx(rq);
...@@ -1331,12 +1337,14 @@ static int virtnet_poll(struct napi_struct *napi, int budget) ...@@ -1331,12 +1337,14 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
if (received < budget) if (received < budget)
virtqueue_napi_complete(napi, rq->vq, received); virtqueue_napi_complete(napi, rq->vq, received);
if (xdp_xmit) { if (xdp_xmit & VIRTIO_XDP_REDIR)
xdp_do_flush_map();
if (xdp_xmit & VIRTIO_XDP_TX) {
qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + qp = vi->curr_queue_pairs - vi->xdp_queue_pairs +
smp_processor_id(); smp_processor_id();
sq = &vi->sq[qp]; sq = &vi->sq[qp];
virtqueue_kick(sq->vq); virtqueue_kick(sq->vq);
xdp_do_flush_map();
} }
return received; return received;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment