Commit 47be2479 authored by Wanlong Gao's avatar Wanlong Gao Committed by David S. Miller

virtio-net: fix the set affinity bug when CPU IDs are not consecutive

As Michael mentioned, set affinity and select queue will not work very
well when CPU IDs are not consecutive, this can happen with hot unplug.
Fix this bug by traversal the online CPUs, and create a per cpu variable
to find the mapping from CPU to the preferable virtual-queue.

Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Jason Wang <jasowang@redhat.com>
Cc: Eric Dumazet <erdnetdev@gmail.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: virtualization@lists.linux-foundation.org
Cc: netdev@vger.kernel.org
Signed-off-by: default avatarWanlong Gao <gaowanlong@cn.fujitsu.com>
Acked-by: default avatarMichael S. Tsirkin <mst@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ee50e135
...@@ -123,6 +123,9 @@ struct virtnet_info { ...@@ -123,6 +123,9 @@ struct virtnet_info {
/* Does the affinity hint is set for virtqueues? */ /* Does the affinity hint is set for virtqueues? */
bool affinity_hint_set; bool affinity_hint_set;
/* Per-cpu variable to show the mapping from CPU to virtqueue */
int __percpu *vq_index;
}; };
struct skb_vnet_hdr { struct skb_vnet_hdr {
...@@ -1016,6 +1019,7 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid) ...@@ -1016,6 +1019,7 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
static void virtnet_set_affinity(struct virtnet_info *vi, bool set) static void virtnet_set_affinity(struct virtnet_info *vi, bool set)
{ {
int i; int i;
int cpu;
/* In multiqueue mode, when the number of cpu is equal to the number of /* In multiqueue mode, when the number of cpu is equal to the number of
* queue pairs, we let the queue pairs to be private to one cpu by * queue pairs, we let the queue pairs to be private to one cpu by
...@@ -1023,22 +1027,40 @@ static void virtnet_set_affinity(struct virtnet_info *vi, bool set) ...@@ -1023,22 +1027,40 @@ static void virtnet_set_affinity(struct virtnet_info *vi, bool set)
*/ */
if ((vi->curr_queue_pairs == 1 || if ((vi->curr_queue_pairs == 1 ||
vi->max_queue_pairs != num_online_cpus()) && set) { vi->max_queue_pairs != num_online_cpus()) && set) {
if (vi->affinity_hint_set) if (vi->affinity_hint_set) {
set = false; set = false;
else } else {
i = 0;
for_each_online_cpu(cpu)
*per_cpu_ptr(vi->vq_index, cpu) =
++i % vi->curr_queue_pairs;
return; return;
}
} }
for (i = 0; i < vi->max_queue_pairs; i++) { if (set) {
int cpu = set ? i : -1; i = 0;
virtqueue_set_affinity(vi->rq[i].vq, cpu); for_each_online_cpu(cpu) {
virtqueue_set_affinity(vi->sq[i].vq, cpu); virtqueue_set_affinity(vi->rq[i].vq, cpu);
} virtqueue_set_affinity(vi->sq[i].vq, cpu);
*per_cpu_ptr(vi->vq_index, cpu) = i;
i++;
}
if (set)
vi->affinity_hint_set = true; vi->affinity_hint_set = true;
else } else {
for(i = 0; i < vi->max_queue_pairs; i++) {
virtqueue_set_affinity(vi->rq[i].vq, -1);
virtqueue_set_affinity(vi->sq[i].vq, -1);
}
i = 0;
for_each_online_cpu(cpu)
*per_cpu_ptr(vi->vq_index, cpu) =
++i % vi->curr_queue_pairs;
vi->affinity_hint_set = false; vi->affinity_hint_set = false;
}
} }
static void virtnet_get_ringparam(struct net_device *dev, static void virtnet_get_ringparam(struct net_device *dev,
...@@ -1082,6 +1104,7 @@ static int virtnet_set_channels(struct net_device *dev, ...@@ -1082,6 +1104,7 @@ static int virtnet_set_channels(struct net_device *dev,
if (queue_pairs > vi->max_queue_pairs) if (queue_pairs > vi->max_queue_pairs)
return -EINVAL; return -EINVAL;
get_online_cpus();
err = virtnet_set_queues(vi, queue_pairs); err = virtnet_set_queues(vi, queue_pairs);
if (!err) { if (!err) {
netif_set_real_num_tx_queues(dev, queue_pairs); netif_set_real_num_tx_queues(dev, queue_pairs);
...@@ -1089,6 +1112,7 @@ static int virtnet_set_channels(struct net_device *dev, ...@@ -1089,6 +1112,7 @@ static int virtnet_set_channels(struct net_device *dev,
virtnet_set_affinity(vi, true); virtnet_set_affinity(vi, true);
} }
put_online_cpus();
return err; return err;
} }
...@@ -1127,12 +1151,19 @@ static int virtnet_change_mtu(struct net_device *dev, int new_mtu) ...@@ -1127,12 +1151,19 @@ static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
/* To avoid contending a lock hold by a vcpu who would exit to host, select the /* To avoid contending a lock hold by a vcpu who would exit to host, select the
* txq based on the processor id. * txq based on the processor id.
* TODO: handle cpu hotplug.
*/ */
static u16 virtnet_select_queue(struct net_device *dev, struct sk_buff *skb) static u16 virtnet_select_queue(struct net_device *dev, struct sk_buff *skb)
{ {
int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : int txq;
smp_processor_id(); struct virtnet_info *vi = netdev_priv(dev);
if (skb_rx_queue_recorded(skb)) {
txq = skb_get_rx_queue(skb);
} else {
txq = *__this_cpu_ptr(vi->vq_index);
if (txq == -1)
txq = 0;
}
while (unlikely(txq >= dev->real_num_tx_queues)) while (unlikely(txq >= dev->real_num_tx_queues))
txq -= dev->real_num_tx_queues; txq -= dev->real_num_tx_queues;
...@@ -1371,7 +1402,10 @@ static int init_vqs(struct virtnet_info *vi) ...@@ -1371,7 +1402,10 @@ static int init_vqs(struct virtnet_info *vi)
if (ret) if (ret)
goto err_free; goto err_free;
get_online_cpus();
virtnet_set_affinity(vi, true); virtnet_set_affinity(vi, true);
put_online_cpus();
return 0; return 0;
err_free: err_free:
...@@ -1453,6 +1487,10 @@ static int virtnet_probe(struct virtio_device *vdev) ...@@ -1453,6 +1487,10 @@ static int virtnet_probe(struct virtio_device *vdev)
if (vi->stats == NULL) if (vi->stats == NULL)
goto free; goto free;
vi->vq_index = alloc_percpu(int);
if (vi->vq_index == NULL)
goto free_stats;
mutex_init(&vi->config_lock); mutex_init(&vi->config_lock);
vi->config_enable = true; vi->config_enable = true;
INIT_WORK(&vi->config_work, virtnet_config_changed_work); INIT_WORK(&vi->config_work, virtnet_config_changed_work);
...@@ -1476,7 +1514,7 @@ static int virtnet_probe(struct virtio_device *vdev) ...@@ -1476,7 +1514,7 @@ static int virtnet_probe(struct virtio_device *vdev)
/* Allocate/initialize the rx/tx queues, and invoke find_vqs */ /* Allocate/initialize the rx/tx queues, and invoke find_vqs */
err = init_vqs(vi); err = init_vqs(vi);
if (err) if (err)
goto free_stats; goto free_index;
netif_set_real_num_tx_queues(dev, 1); netif_set_real_num_tx_queues(dev, 1);
netif_set_real_num_rx_queues(dev, 1); netif_set_real_num_rx_queues(dev, 1);
...@@ -1520,6 +1558,8 @@ static int virtnet_probe(struct virtio_device *vdev) ...@@ -1520,6 +1558,8 @@ static int virtnet_probe(struct virtio_device *vdev)
free_vqs: free_vqs:
cancel_delayed_work_sync(&vi->refill); cancel_delayed_work_sync(&vi->refill);
virtnet_del_vqs(vi); virtnet_del_vqs(vi);
free_index:
free_percpu(vi->vq_index);
free_stats: free_stats:
free_percpu(vi->stats); free_percpu(vi->stats);
free: free:
...@@ -1554,6 +1594,7 @@ static void virtnet_remove(struct virtio_device *vdev) ...@@ -1554,6 +1594,7 @@ static void virtnet_remove(struct virtio_device *vdev)
flush_work(&vi->config_work); flush_work(&vi->config_work);
free_percpu(vi->vq_index);
free_percpu(vi->stats); free_percpu(vi->stats);
free_netdev(vi->dev); free_netdev(vi->dev);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment