Commit 441abde4 authored by Tonghao Zhang's avatar Tonghao Zhang Committed by David S. Miller

net: vhost: add rx busy polling in tx path

This patch improves the guest receive performance.
On the handle_tx side, we poll the sock receive queue at the
same time. handle_rx do that in the same way.

We set the poll-us=100us and use the netperf to test throughput
and mean latency. When running the tests, the vhost-net kthread
of that VM, is alway 100% CPU. The commands are shown as below.

Rx performance is greatly improved by this patch. There is not
notable performance change on tx with this series though. This
patch is useful for bi-directional traffic.

netperf -H IP -t TCP_STREAM -l 20 -- -O "THROUGHPUT, THROUGHPUT_UNITS, MEAN_LATENCY"

Topology:
[Host] ->linux bridge -> tap vhost-net ->[Guest]

TCP_STREAM:
* Without the patch:  19842.95 Mbps, 6.50 us mean latency
* With the patch:     37598.20 Mbps, 3.43 us mean latency
Signed-off-by: default avatarTonghao Zhang <xiangxia.m.yue@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent dc151282
...@@ -547,34 +547,27 @@ static void vhost_net_busy_poll(struct vhost_net *net, ...@@ -547,34 +547,27 @@ static void vhost_net_busy_poll(struct vhost_net *net,
mutex_unlock(&vq->mutex); mutex_unlock(&vq->mutex);
} }
static int vhost_net_tx_get_vq_desc(struct vhost_net *net, static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
struct vhost_net_virtqueue *nvq, struct vhost_net_virtqueue *tnvq,
unsigned int *out_num, unsigned int *in_num, unsigned int *out_num, unsigned int *in_num,
struct msghdr *msghdr, bool *busyloop_intr) struct msghdr *msghdr, bool *busyloop_intr)
{ {
struct vhost_virtqueue *vq = &nvq->vq; struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX];
unsigned long uninitialized_var(endtime); struct vhost_virtqueue *rvq = &rnvq->vq;
int r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), struct vhost_virtqueue *tvq = &tnvq->vq;
int r = vhost_get_vq_desc(tvq, tvq->iov, ARRAY_SIZE(tvq->iov),
out_num, in_num, NULL, NULL); out_num, in_num, NULL, NULL);
if (r == vq->num && vq->busyloop_timeout) { if (r == tvq->num && tvq->busyloop_timeout) {
/* Flush batched packets first */ /* Flush batched packets first */
if (!vhost_sock_zcopy(vq->private_data)) if (!vhost_sock_zcopy(tvq->private_data))
vhost_tx_batch(net, nvq, vq->private_data, msghdr); // vhost_net_signal_used(tnvq);
preempt_disable(); vhost_tx_batch(net, tnvq, tvq->private_data, msghdr);
endtime = busy_clock() + vq->busyloop_timeout;
while (vhost_can_busy_poll(endtime)) { vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, false);
if (vhost_has_work(vq->dev)) {
*busyloop_intr = true; r = vhost_get_vq_desc(tvq, tvq->iov, ARRAY_SIZE(tvq->iov),
break;
}
if (!vhost_vq_avail_empty(vq->dev, vq))
break;
cpu_relax();
}
preempt_enable();
r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
out_num, in_num, NULL, NULL); out_num, in_num, NULL, NULL);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment