Commit ff883aeb authored by David S. Miller's avatar David S. Miller

Merge branch 'xen-netfront'

David Vrabel says:

====================
xen-netfront: multi-queue related locking fixes

Two fixes to the per-queue locking bugs in xen-netfront that were
introduced in 3.16-rc1 with the multi-queue support.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 6e08d5e3 f9feb1e6
...@@ -1439,16 +1439,11 @@ static void xennet_disconnect_backend(struct netfront_info *info) ...@@ -1439,16 +1439,11 @@ static void xennet_disconnect_backend(struct netfront_info *info)
unsigned int i = 0; unsigned int i = 0;
unsigned int num_queues = info->netdev->real_num_tx_queues; unsigned int num_queues = info->netdev->real_num_tx_queues;
netif_carrier_off(info->netdev);
for (i = 0; i < num_queues; ++i) { for (i = 0; i < num_queues; ++i) {
struct netfront_queue *queue = &info->queues[i]; struct netfront_queue *queue = &info->queues[i];
/* Stop old i/f to prevent errors whilst we rebuild the state. */
spin_lock_bh(&queue->rx_lock);
spin_lock_irq(&queue->tx_lock);
netif_carrier_off(queue->info->netdev);
spin_unlock_irq(&queue->tx_lock);
spin_unlock_bh(&queue->rx_lock);
if (queue->tx_irq && (queue->tx_irq == queue->rx_irq)) if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
unbind_from_irqhandler(queue->tx_irq, queue); unbind_from_irqhandler(queue->tx_irq, queue);
if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) { if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
...@@ -1458,6 +1453,8 @@ static void xennet_disconnect_backend(struct netfront_info *info) ...@@ -1458,6 +1453,8 @@ static void xennet_disconnect_backend(struct netfront_info *info)
queue->tx_evtchn = queue->rx_evtchn = 0; queue->tx_evtchn = queue->rx_evtchn = 0;
queue->tx_irq = queue->rx_irq = 0; queue->tx_irq = queue->rx_irq = 0;
napi_synchronize(&queue->napi);
/* End access and free the pages */ /* End access and free the pages */
xennet_end_access(queue->tx_ring_ref, queue->tx.sring); xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
xennet_end_access(queue->rx_ring_ref, queue->rx.sring); xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
...@@ -2046,13 +2043,15 @@ static int xennet_connect(struct net_device *dev) ...@@ -2046,13 +2043,15 @@ static int xennet_connect(struct net_device *dev)
/* By now, the queue structures have been set up */ /* By now, the queue structures have been set up */
for (j = 0; j < num_queues; ++j) { for (j = 0; j < num_queues; ++j) {
queue = &np->queues[j]; queue = &np->queues[j];
spin_lock_bh(&queue->rx_lock);
spin_lock_irq(&queue->tx_lock);
/* Step 1: Discard all pending TX packet fragments. */ /* Step 1: Discard all pending TX packet fragments. */
spin_lock_irq(&queue->tx_lock);
xennet_release_tx_bufs(queue); xennet_release_tx_bufs(queue);
spin_unlock_irq(&queue->tx_lock);
/* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
spin_lock_bh(&queue->rx_lock);
for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
skb_frag_t *frag; skb_frag_t *frag;
const struct page *page; const struct page *page;
...@@ -2076,6 +2075,8 @@ static int xennet_connect(struct net_device *dev) ...@@ -2076,6 +2075,8 @@ static int xennet_connect(struct net_device *dev)
} }
queue->rx.req_prod_pvt = requeue_idx; queue->rx.req_prod_pvt = requeue_idx;
spin_unlock_bh(&queue->rx_lock);
} }
/* /*
...@@ -2087,13 +2088,17 @@ static int xennet_connect(struct net_device *dev) ...@@ -2087,13 +2088,17 @@ static int xennet_connect(struct net_device *dev)
netif_carrier_on(np->netdev); netif_carrier_on(np->netdev);
for (j = 0; j < num_queues; ++j) { for (j = 0; j < num_queues; ++j) {
queue = &np->queues[j]; queue = &np->queues[j];
notify_remote_via_irq(queue->tx_irq); notify_remote_via_irq(queue->tx_irq);
if (queue->tx_irq != queue->rx_irq) if (queue->tx_irq != queue->rx_irq)
notify_remote_via_irq(queue->rx_irq); notify_remote_via_irq(queue->rx_irq);
xennet_tx_buf_gc(queue);
xennet_alloc_rx_buffers(queue);
spin_lock_irq(&queue->tx_lock);
xennet_tx_buf_gc(queue);
spin_unlock_irq(&queue->tx_lock); spin_unlock_irq(&queue->tx_lock);
spin_lock_bh(&queue->rx_lock);
xennet_alloc_rx_buffers(queue);
spin_unlock_bh(&queue->rx_lock); spin_unlock_bh(&queue->rx_lock);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment