Commit f4f1c23d authored by stephen hemminger's avatar stephen hemminger Committed by David S. Miller

netvsc: fix NAPI performance regression

When using NAPI, the single stream performance declined signifcantly
because the poll routine was updating host after every burst
of packets. This excess signalling caused host throttling.

This fix restores the old behavior. Host is only signalled
after the ring has been emptied.
Signed-off-by: default avatarStephen Hemminger <sthemmin@microsoft.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 6069f3fb
......@@ -723,6 +723,7 @@ struct net_device_context {
/* Per channel data */
struct netvsc_channel {
struct vmbus_channel *channel;
const struct vmpacket_descriptor *desc;
struct napi_struct napi;
struct multi_send_data msd;
struct multi_recv_comp mrc;
......
......@@ -1173,7 +1173,6 @@ static int netvsc_process_raw_pkt(struct hv_device *device,
struct vmbus_channel *channel,
struct netvsc_device *net_device,
struct net_device *ndev,
u64 request_id,
const struct vmpacket_descriptor *desc)
{
struct net_device_context *net_device_ctx = netdev_priv(ndev);
......@@ -1195,7 +1194,7 @@ static int netvsc_process_raw_pkt(struct hv_device *device,
default:
netdev_err(ndev, "unhandled packet type %d, tid %llx\n",
desc->type, request_id);
desc->type, desc->trans_id);
break;
}
......@@ -1222,28 +1221,20 @@ int netvsc_poll(struct napi_struct *napi, int budget)
u16 q_idx = channel->offermsg.offer.sub_channel_index;
struct net_device *ndev = hv_get_drvdata(device);
struct netvsc_device *net_device = net_device_to_netvsc_device(ndev);
const struct vmpacket_descriptor *desc;
int work_done = 0;
desc = hv_pkt_iter_first(channel);
while (desc) {
int count;
/* If starting a new interval */
if (!nvchan->desc)
nvchan->desc = hv_pkt_iter_first(channel);
count = netvsc_process_raw_pkt(device, channel, net_device,
ndev, desc->trans_id, desc);
work_done += count;
desc = __hv_pkt_iter_next(channel, desc);
/* If receive packet budget is exhausted, reschedule */
if (work_done >= budget) {
work_done = budget;
break;
}
while (nvchan->desc && work_done < budget) {
work_done += netvsc_process_raw_pkt(device, channel, net_device,
ndev, nvchan->desc);
nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
}
hv_pkt_iter_close(channel);
/* If budget was not exhausted and
* not doing busy poll
/* If receive ring was exhausted
* and not doing busy poll
* then re-enable host interrupts
* and reschedule if ring is not empty.
*/
......@@ -1253,7 +1244,9 @@ int netvsc_poll(struct napi_struct *napi, int budget)
napi_reschedule(napi);
netvsc_chk_recv_comp(net_device, channel, q_idx);
return work_done;
/* Driver may overshoot since multiple packets per descriptor */
return min(work_done, budget);
}
/* Call back when data is available in host ring buffer.
......@@ -1263,10 +1256,12 @@ void netvsc_channel_cb(void *context)
{
struct netvsc_channel *nvchan = context;
if (napi_schedule_prep(&nvchan->napi)) {
/* disable interupts from host */
hv_begin_read(&nvchan->channel->inbound);
napi_schedule(&nvchan->napi);
__napi_schedule(&nvchan->napi);
}
}
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment