Commit 92bab4e7 authored by David S. Miller's avatar David S. Miller

Merge branch 'virtio-net-busy-poll'

Jason Wang says:

====================
rx busy polling support for virtio-net

This series introduces the support for rx busy polling support.  This
was useful for reducing the latency for a kvm guest.  Instead of
introducing new states and spinlocks, this series re-uses NAPI state
to synchonrize between NAPI and busy polling.  This grealy simplified
the codes and reduce the overheads of spinlocks for normal NAPI fast
path.

Test was done between a kvm guest and an external host.  Two hosts were
connected through 40gb mlx4 cards. With both busy_poll and  busy_read
are set to 50 in guest, 1 byte netperf tcp_rr shows 127% improvement:
transaction rate was increased from 8353.33 to 18966.87.

Changes from V2:
- Avoid introducing new states and spinlocks by reusuing the NAPI
  state
- Fix the budget calculation in virtnet_poll()
- Drop patch 1/3 from V2 since it was useless

Changes from V1:
- split the patch info smaller ones
- add more details about test setup/configuration
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 96b3bff4 91815639
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/average.h> #include <linux/average.h>
#include <net/busy_poll.h>
static int napi_weight = NAPI_POLL_WEIGHT; static int napi_weight = NAPI_POLL_WEIGHT;
module_param(napi_weight, int, 0444); module_param(napi_weight, int, 0444);
...@@ -521,6 +522,8 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) ...@@ -521,6 +522,8 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
skb_shinfo(skb)->gso_segs = 0; skb_shinfo(skb)->gso_segs = 0;
} }
skb_mark_napi_id(skb, &rq->napi);
netif_receive_skb(skb); netif_receive_skb(skb);
return; return;
...@@ -725,15 +728,12 @@ static void refill_work(struct work_struct *work) ...@@ -725,15 +728,12 @@ static void refill_work(struct work_struct *work)
} }
} }
static int virtnet_poll(struct napi_struct *napi, int budget) static int virtnet_receive(struct receive_queue *rq, int budget)
{ {
struct receive_queue *rq =
container_of(napi, struct receive_queue, napi);
struct virtnet_info *vi = rq->vq->vdev->priv; struct virtnet_info *vi = rq->vq->vdev->priv;
unsigned int len, received = 0;
void *buf; void *buf;
unsigned int r, len, received = 0;
again:
while (received < budget && while (received < budget &&
(buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
receive_buf(rq, buf, len); receive_buf(rq, buf, len);
...@@ -745,6 +745,18 @@ static int virtnet_poll(struct napi_struct *napi, int budget) ...@@ -745,6 +745,18 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
schedule_delayed_work(&vi->refill, 0); schedule_delayed_work(&vi->refill, 0);
} }
return received;
}
static int virtnet_poll(struct napi_struct *napi, int budget)
{
struct receive_queue *rq =
container_of(napi, struct receive_queue, napi);
unsigned int r, received = 0;
again:
received += virtnet_receive(rq, budget - received);
/* Out of packets? */ /* Out of packets? */
if (received < budget) { if (received < budget) {
r = virtqueue_enable_cb_prepare(rq->vq); r = virtqueue_enable_cb_prepare(rq->vq);
...@@ -760,6 +772,43 @@ static int virtnet_poll(struct napi_struct *napi, int budget) ...@@ -760,6 +772,43 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
return received; return received;
} }
#ifdef CONFIG_NET_RX_BUSY_POLL
/* must be called with local_bh_disable()d */
static int virtnet_busy_poll(struct napi_struct *napi)
{
struct receive_queue *rq =
container_of(napi, struct receive_queue, napi);
struct virtnet_info *vi = rq->vq->vdev->priv;
int r, received = 0, budget = 4;
if (!(vi->status & VIRTIO_NET_S_LINK_UP))
return LL_FLUSH_FAILED;
if (!napi_schedule_prep(napi))
return LL_FLUSH_BUSY;
virtqueue_disable_cb(rq->vq);
again:
received += virtnet_receive(rq, budget);
r = virtqueue_enable_cb_prepare(rq->vq);
clear_bit(NAPI_STATE_SCHED, &napi->state);
if (unlikely(virtqueue_poll(rq->vq, r)) &&
napi_schedule_prep(napi)) {
virtqueue_disable_cb(rq->vq);
if (received < budget) {
budget -= received;
goto again;
} else {
__napi_schedule(napi);
}
}
return received;
}
#endif /* CONFIG_NET_RX_BUSY_POLL */
static int virtnet_open(struct net_device *dev) static int virtnet_open(struct net_device *dev)
{ {
struct virtnet_info *vi = netdev_priv(dev); struct virtnet_info *vi = netdev_priv(dev);
...@@ -1347,6 +1396,9 @@ static const struct net_device_ops virtnet_netdev = { ...@@ -1347,6 +1396,9 @@ static const struct net_device_ops virtnet_netdev = {
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = virtnet_netpoll, .ndo_poll_controller = virtnet_netpoll,
#endif #endif
#ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = virtnet_busy_poll,
#endif
}; };
static void virtnet_config_changed_work(struct work_struct *work) static void virtnet_config_changed_work(struct work_struct *work)
...@@ -1552,6 +1604,7 @@ static int virtnet_alloc_queues(struct virtnet_info *vi) ...@@ -1552,6 +1604,7 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
vi->rq[i].pages = NULL; vi->rq[i].pages = NULL;
netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll, netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
napi_weight); napi_weight);
napi_hash_add(&vi->rq[i].napi);
sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
ewma_init(&vi->rq[i].mrg_avg_pkt_len, 1, RECEIVE_AVG_WEIGHT); ewma_init(&vi->rq[i].mrg_avg_pkt_len, 1, RECEIVE_AVG_WEIGHT);
...@@ -1853,11 +1906,13 @@ static int virtnet_freeze(struct virtio_device *vdev) ...@@ -1853,11 +1906,13 @@ static int virtnet_freeze(struct virtio_device *vdev)
netif_device_detach(vi->dev); netif_device_detach(vi->dev);
cancel_delayed_work_sync(&vi->refill); cancel_delayed_work_sync(&vi->refill);
if (netif_running(vi->dev)) if (netif_running(vi->dev)) {
for (i = 0; i < vi->max_queue_pairs; i++) { for (i = 0; i < vi->max_queue_pairs; i++) {
napi_disable(&vi->rq[i].napi); napi_disable(&vi->rq[i].napi);
napi_hash_del(&vi->rq[i].napi);
netif_napi_del(&vi->rq[i].napi); netif_napi_del(&vi->rq[i].napi);
} }
}
remove_vq_common(vi); remove_vq_common(vi);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment