Commit 92b85671 authored by David S. Miller's avatar David S. Miller

Merge branch 'xen-netfront'

David Vrabel says:

====================
xen-netfront: more multiqueue fixes

A few more xen-netfront fixes for the multiqueue support added in
3.16-rc1.  It would be great if these could make it into 3.16 but I
suspect it's a little late for that now.

The second patch fixes a significant resource leak that prevents
guests from migrating more than a handful of times.

These have been tested by repeatedly migrating a guest over 250 times
(it would previously fail with this guest after only 8 iterations).
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 081e83a7 69cb8524
......@@ -1196,22 +1196,6 @@ static void xennet_release_rx_bufs(struct netfront_queue *queue)
spin_unlock_bh(&queue->rx_lock);
}
static void xennet_uninit(struct net_device *dev)
{
struct netfront_info *np = netdev_priv(dev);
unsigned int num_queues = dev->real_num_tx_queues;
struct netfront_queue *queue;
unsigned int i;
for (i = 0; i < num_queues; ++i) {
queue = &np->queues[i];
xennet_release_tx_bufs(queue);
xennet_release_rx_bufs(queue);
gnttab_free_grant_references(queue->gref_tx_head);
gnttab_free_grant_references(queue->gref_rx_head);
}
}
static netdev_features_t xennet_fix_features(struct net_device *dev,
netdev_features_t features)
{
......@@ -1313,7 +1297,6 @@ static void xennet_poll_controller(struct net_device *dev)
static const struct net_device_ops xennet_netdev_ops = {
.ndo_open = xennet_open,
.ndo_uninit = xennet_uninit,
.ndo_stop = xennet_close,
.ndo_start_xmit = xennet_start_xmit,
.ndo_change_mtu = xennet_change_mtu,
......@@ -1455,6 +1438,11 @@ static void xennet_disconnect_backend(struct netfront_info *info)
napi_synchronize(&queue->napi);
xennet_release_tx_bufs(queue);
xennet_release_rx_bufs(queue);
gnttab_free_grant_references(queue->gref_tx_head);
gnttab_free_grant_references(queue->gref_rx_head);
/* End access and free the pages */
xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
......@@ -1827,8 +1815,8 @@ static int xennet_create_queues(struct netfront_info *info,
ret = xennet_init_queue(queue);
if (ret < 0) {
dev_warn(&info->netdev->dev, "only created %d queues\n",
num_queues);
dev_warn(&info->netdev->dev,
"only created %d queues\n", i);
num_queues = i;
break;
}
......@@ -2001,7 +1989,7 @@ static int talk_to_netback(struct xenbus_device *dev,
info->queues = NULL;
rtnl_lock();
netif_set_real_num_tx_queues(info->netdev, 0);
rtnl_lock();
rtnl_unlock();
out:
return err;
}
......@@ -2010,10 +1998,7 @@ static int xennet_connect(struct net_device *dev)
{
struct netfront_info *np = netdev_priv(dev);
unsigned int num_queues = 0;
int i, requeue_idx, err;
struct sk_buff *skb;
grant_ref_t ref;
struct xen_netif_rx_request *req;
int err;
unsigned int feature_rx_copy;
unsigned int j = 0;
struct netfront_queue *queue = NULL;
......@@ -2040,47 +2025,8 @@ static int xennet_connect(struct net_device *dev)
netdev_update_features(dev);
rtnl_unlock();
/* By now, the queue structures have been set up */
for (j = 0; j < num_queues; ++j) {
queue = &np->queues[j];
/* Step 1: Discard all pending TX packet fragments. */
spin_lock_irq(&queue->tx_lock);
xennet_release_tx_bufs(queue);
spin_unlock_irq(&queue->tx_lock);
/* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
spin_lock_bh(&queue->rx_lock);
for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
skb_frag_t *frag;
const struct page *page;
if (!queue->rx_skbs[i])
continue;
skb = queue->rx_skbs[requeue_idx] = xennet_get_rx_skb(queue, i);
ref = queue->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(queue, i);
req = RING_GET_REQUEST(&queue->rx, requeue_idx);
frag = &skb_shinfo(skb)->frags[0];
page = skb_frag_page(frag);
gnttab_grant_foreign_access_ref(
ref, queue->info->xbdev->otherend_id,
pfn_to_mfn(page_to_pfn(page)),
0);
req->gref = ref;
req->id = requeue_idx;
requeue_idx++;
}
queue->rx.req_prod_pvt = requeue_idx;
spin_unlock_bh(&queue->rx_lock);
}
/*
* Step 3: All public and private state should now be sane. Get
* All public and private state should now be sane. Get
* ready to start sending and receiving packets and give the driver
* domain a kick because we've probably just requeued some
* packets.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment