Commit 2613af0e authored by Michael Dalton's avatar Michael Dalton Committed by David S. Miller

virtio_net: migrate mergeable rx buffers to page frag allocators

The virtio_net driver's mergeable receive buffer allocator
uses 4KB packet buffers. For MTU-sized traffic, SKB truesize
is > 4KB but only ~1500 bytes of the buffer is used to store
packet data, reducing the effective TCP window size
substantially. This patch addresses the performance concerns
with mergeable receive buffers by allocating MTU-sized packet
buffers using page frag allocators. If more than MAX_SKB_FRAGS
buffers are needed, the SKB frag_list is used.
Signed-off-by: default avatarMichael Dalton <mwdalton@google.com>
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 5d9efa7e
...@@ -124,6 +124,11 @@ struct virtnet_info { ...@@ -124,6 +124,11 @@ struct virtnet_info {
/* Lock for config space updates */ /* Lock for config space updates */
struct mutex config_lock; struct mutex config_lock;
/* Page_frag for GFP_KERNEL packet buffer allocation when we run
* low on memory.
*/
struct page_frag alloc_frag;
/* Does the affinity hint is set for virtqueues? */ /* Does the affinity hint is set for virtqueues? */
bool affinity_hint_set; bool affinity_hint_set;
...@@ -217,33 +222,18 @@ static void skb_xmit_done(struct virtqueue *vq) ...@@ -217,33 +222,18 @@ static void skb_xmit_done(struct virtqueue *vq)
netif_wake_subqueue(vi->dev, vq2txq(vq)); netif_wake_subqueue(vi->dev, vq2txq(vq));
} }
static void set_skb_frag(struct sk_buff *skb, struct page *page,
unsigned int offset, unsigned int *len)
{
int size = min((unsigned)PAGE_SIZE - offset, *len);
int i = skb_shinfo(skb)->nr_frags;
__skb_fill_page_desc(skb, i, page, offset, size);
skb->data_len += size;
skb->len += size;
skb->truesize += PAGE_SIZE;
skb_shinfo(skb)->nr_frags++;
skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
*len -= size;
}
/* Called from bottom half context */ /* Called from bottom half context */
static struct sk_buff *page_to_skb(struct receive_queue *rq, static struct sk_buff *page_to_skb(struct receive_queue *rq,
struct page *page, unsigned int len) struct page *page, unsigned int offset,
unsigned int len, unsigned int truesize)
{ {
struct virtnet_info *vi = rq->vq->vdev->priv; struct virtnet_info *vi = rq->vq->vdev->priv;
struct sk_buff *skb; struct sk_buff *skb;
struct skb_vnet_hdr *hdr; struct skb_vnet_hdr *hdr;
unsigned int copy, hdr_len, offset; unsigned int copy, hdr_len, hdr_padded_len;
char *p; char *p;
p = page_address(page); p = page_address(page) + offset;
/* copy small packet so we can reuse these pages for small data */ /* copy small packet so we can reuse these pages for small data */
skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN); skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
...@@ -254,16 +244,17 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq, ...@@ -254,16 +244,17 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
if (vi->mergeable_rx_bufs) { if (vi->mergeable_rx_bufs) {
hdr_len = sizeof hdr->mhdr; hdr_len = sizeof hdr->mhdr;
offset = hdr_len; hdr_padded_len = sizeof hdr->mhdr;
} else { } else {
hdr_len = sizeof hdr->hdr; hdr_len = sizeof hdr->hdr;
offset = sizeof(struct padded_vnet_hdr); hdr_padded_len = sizeof(struct padded_vnet_hdr);
} }
memcpy(hdr, p, hdr_len); memcpy(hdr, p, hdr_len);
len -= hdr_len; len -= hdr_len;
p += offset; offset += hdr_padded_len;
p += hdr_padded_len;
copy = len; copy = len;
if (copy > skb_tailroom(skb)) if (copy > skb_tailroom(skb))
...@@ -273,6 +264,14 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq, ...@@ -273,6 +264,14 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
len -= copy; len -= copy;
offset += copy; offset += copy;
if (vi->mergeable_rx_bufs) {
if (len)
skb_add_rx_frag(skb, 0, page, offset, len, truesize);
else
put_page(page);
return skb;
}
/* /*
* Verify that we can indeed put this data into a skb. * Verify that we can indeed put this data into a skb.
* This is here to handle cases when the device erroneously * This is here to handle cases when the device erroneously
...@@ -284,9 +283,12 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq, ...@@ -284,9 +283,12 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
dev_kfree_skb(skb); dev_kfree_skb(skb);
return NULL; return NULL;
} }
BUG_ON(offset >= PAGE_SIZE);
while (len) { while (len) {
set_skb_frag(skb, page, offset, &len); unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
frag_size, truesize);
len -= frag_size;
page = (struct page *)page->private; page = (struct page *)page->private;
offset = 0; offset = 0;
} }
...@@ -297,33 +299,52 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq, ...@@ -297,33 +299,52 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
return skb; return skb;
} }
static int receive_mergeable(struct receive_queue *rq, struct sk_buff *skb) static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb)
{ {
struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); struct skb_vnet_hdr *hdr = skb_vnet_hdr(head_skb);
struct sk_buff *curr_skb = head_skb;
char *buf;
struct page *page; struct page *page;
int num_buf, i, len; int num_buf, len;
num_buf = hdr->mhdr.num_buffers; num_buf = hdr->mhdr.num_buffers;
while (--num_buf) { while (--num_buf) {
i = skb_shinfo(skb)->nr_frags; int num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
if (i >= MAX_SKB_FRAGS) { buf = virtqueue_get_buf(rq->vq, &len);
pr_debug("%s: packet too long\n", skb->dev->name); if (unlikely(!buf)) {
skb->dev->stats.rx_length_errors++;
return -EINVAL;
}
page = virtqueue_get_buf(rq->vq, &len);
if (!page) {
pr_debug("%s: rx error: %d buffers missing\n", pr_debug("%s: rx error: %d buffers missing\n",
skb->dev->name, hdr->mhdr.num_buffers); head_skb->dev->name, hdr->mhdr.num_buffers);
skb->dev->stats.rx_length_errors++; head_skb->dev->stats.rx_length_errors++;
return -EINVAL; return -EINVAL;
} }
if (unlikely(len > MAX_PACKET_LEN)) {
if (len > PAGE_SIZE) pr_debug("%s: rx error: merge buffer too long\n",
len = PAGE_SIZE; head_skb->dev->name);
len = MAX_PACKET_LEN;
set_skb_frag(skb, page, 0, &len); }
if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
if (unlikely(!nskb)) {
head_skb->dev->stats.rx_dropped++;
return -ENOMEM;
}
if (curr_skb == head_skb)
skb_shinfo(curr_skb)->frag_list = nskb;
else
curr_skb->next = nskb;
curr_skb = nskb;
head_skb->truesize += nskb->truesize;
num_skb_frags = 0;
}
if (curr_skb != head_skb) {
head_skb->data_len += len;
head_skb->len += len;
head_skb->truesize += MAX_PACKET_LEN;
}
page = virt_to_head_page(buf);
skb_add_rx_frag(curr_skb, num_skb_frags, page,
buf - (char *)page_address(page), len,
MAX_PACKET_LEN);
--rq->num; --rq->num;
} }
return 0; return 0;
...@@ -341,8 +362,10 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) ...@@ -341,8 +362,10 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
pr_debug("%s: short packet %i\n", dev->name, len); pr_debug("%s: short packet %i\n", dev->name, len);
dev->stats.rx_length_errors++; dev->stats.rx_length_errors++;
if (vi->mergeable_rx_bufs || vi->big_packets) if (vi->big_packets)
give_pages(rq, buf); give_pages(rq, buf);
else if (vi->mergeable_rx_bufs)
put_page(virt_to_head_page(buf));
else else
dev_kfree_skb(buf); dev_kfree_skb(buf);
return; return;
...@@ -352,19 +375,28 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) ...@@ -352,19 +375,28 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
skb = buf; skb = buf;
len -= sizeof(struct virtio_net_hdr); len -= sizeof(struct virtio_net_hdr);
skb_trim(skb, len); skb_trim(skb, len);
} else if (vi->mergeable_rx_bufs) {
struct page *page = virt_to_head_page(buf);
skb = page_to_skb(rq, page,
(char *)buf - (char *)page_address(page),
len, MAX_PACKET_LEN);
if (unlikely(!skb)) {
dev->stats.rx_dropped++;
put_page(page);
return;
}
if (receive_mergeable(rq, skb)) {
dev_kfree_skb(skb);
return;
}
} else { } else {
page = buf; page = buf;
skb = page_to_skb(rq, page, len); skb = page_to_skb(rq, page, 0, len, PAGE_SIZE);
if (unlikely(!skb)) { if (unlikely(!skb)) {
dev->stats.rx_dropped++; dev->stats.rx_dropped++;
give_pages(rq, page); give_pages(rq, page);
return; return;
} }
if (vi->mergeable_rx_bufs)
if (receive_mergeable(rq, skb)) {
dev_kfree_skb(skb);
return;
}
} }
hdr = skb_vnet_hdr(skb); hdr = skb_vnet_hdr(skb);
...@@ -501,18 +533,28 @@ static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp) ...@@ -501,18 +533,28 @@ static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp)
static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp) static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp)
{ {
struct page *page; struct virtnet_info *vi = rq->vq->vdev->priv;
char *buf = NULL;
int err; int err;
page = get_a_page(rq, gfp); if (gfp & __GFP_WAIT) {
if (!page) if (skb_page_frag_refill(MAX_PACKET_LEN, &vi->alloc_frag,
gfp)) {
buf = (char *)page_address(vi->alloc_frag.page) +
vi->alloc_frag.offset;
get_page(vi->alloc_frag.page);
vi->alloc_frag.offset += MAX_PACKET_LEN;
}
} else {
buf = netdev_alloc_frag(MAX_PACKET_LEN);
}
if (!buf)
return -ENOMEM; return -ENOMEM;
sg_init_one(rq->sg, page_address(page), PAGE_SIZE); sg_init_one(rq->sg, buf, MAX_PACKET_LEN);
err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, buf, gfp);
err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, page, gfp);
if (err < 0) if (err < 0)
give_pages(rq, page); put_page(virt_to_head_page(buf));
return err; return err;
} }
...@@ -1343,8 +1385,10 @@ static void free_unused_bufs(struct virtnet_info *vi) ...@@ -1343,8 +1385,10 @@ static void free_unused_bufs(struct virtnet_info *vi)
struct virtqueue *vq = vi->rq[i].vq; struct virtqueue *vq = vi->rq[i].vq;
while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
if (vi->mergeable_rx_bufs || vi->big_packets) if (vi->big_packets)
give_pages(&vi->rq[i], buf); give_pages(&vi->rq[i], buf);
else if (vi->mergeable_rx_bufs)
put_page(virt_to_head_page(buf));
else else
dev_kfree_skb(buf); dev_kfree_skb(buf);
--vi->rq[i].num; --vi->rq[i].num;
...@@ -1650,6 +1694,8 @@ static int virtnet_probe(struct virtio_device *vdev) ...@@ -1650,6 +1694,8 @@ static int virtnet_probe(struct virtio_device *vdev)
free_vqs: free_vqs:
cancel_delayed_work_sync(&vi->refill); cancel_delayed_work_sync(&vi->refill);
virtnet_del_vqs(vi); virtnet_del_vqs(vi);
if (vi->alloc_frag.page)
put_page(vi->alloc_frag.page);
free_index: free_index:
free_percpu(vi->vq_index); free_percpu(vi->vq_index);
free_stats: free_stats:
...@@ -1685,6 +1731,8 @@ static void virtnet_remove(struct virtio_device *vdev) ...@@ -1685,6 +1731,8 @@ static void virtnet_remove(struct virtio_device *vdev)
unregister_netdev(vi->dev); unregister_netdev(vi->dev);
remove_vq_common(vi); remove_vq_common(vi);
if (vi->alloc_frag.page)
put_page(vi->alloc_frag.page);
flush_work(&vi->config_work); flush_work(&vi->config_work);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment