Commit 20d93869 authored by Michael S. Tsirkin's avatar Michael S. Tsirkin Committed by Greg Kroah-Hartman

virtio_net: fix error handling for mergeable buffers

Eric Dumazet noticed that if we encounter an error
when processing a mergeable buffer, we don't
dequeue all of the buffers from this packet,
the result is almost sure to be loss of networking.

Fix this issue.

Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Michael Dalton <mwdalton@google.com>
Acked-by: default avatarMichael Dalton <mwdalton@google.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Jason Wang <jasowang@redhat.com>
Cc: David S. Miller <davem@davemloft.net>
Signed-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>

(cherry picked from commit 8fc3b9e9)
Acked-by: default avatarJason Wang <jasowang@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent fd661199
...@@ -297,26 +297,33 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq, ...@@ -297,26 +297,33 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
return skb; return skb;
} }
static int receive_mergeable(struct receive_queue *rq, struct sk_buff *skb) static struct sk_buff *receive_mergeable(struct net_device *dev,
struct receive_queue *rq,
void *buf,
unsigned int len)
{ {
struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); struct skb_vnet_hdr *hdr = page_address(buf);
struct page *page; int num_buf = hdr->mhdr.num_buffers;
int num_buf, i, len; struct page *page = buf;
struct sk_buff *skb = page_to_skb(rq, page, len);
int i;
if (unlikely(!skb))
goto err_skb;
num_buf = hdr->mhdr.num_buffers;
while (--num_buf) { while (--num_buf) {
i = skb_shinfo(skb)->nr_frags; i = skb_shinfo(skb)->nr_frags;
if (i >= MAX_SKB_FRAGS) { if (i >= MAX_SKB_FRAGS) {
pr_debug("%s: packet too long\n", skb->dev->name); pr_debug("%s: packet too long\n", skb->dev->name);
skb->dev->stats.rx_length_errors++; skb->dev->stats.rx_length_errors++;
return -EINVAL; return NULL;
} }
page = virtqueue_get_buf(rq->vq, &len); page = virtqueue_get_buf(rq->vq, &len);
if (!page) { if (!page) {
pr_debug("%s: rx error: %d buffers missing\n", pr_debug("%s: rx error: %d buffers %d missing\n",
skb->dev->name, hdr->mhdr.num_buffers); dev->name, hdr->mhdr.num_buffers, num_buf);
skb->dev->stats.rx_length_errors++; dev->stats.rx_length_errors++;
return -EINVAL; goto err_buf;
} }
if (len > PAGE_SIZE) if (len > PAGE_SIZE)
...@@ -326,7 +333,25 @@ static int receive_mergeable(struct receive_queue *rq, struct sk_buff *skb) ...@@ -326,7 +333,25 @@ static int receive_mergeable(struct receive_queue *rq, struct sk_buff *skb)
--rq->num; --rq->num;
} }
return 0; return skb;
err_skb:
give_pages(rq, page);
while (--num_buf) {
buf = virtqueue_get_buf(rq->vq, &len);
if (unlikely(!buf)) {
pr_debug("%s: rx error: %d buffers missing\n",
dev->name, num_buf);
dev->stats.rx_length_errors++;
break;
}
page = buf;
give_pages(rq, page);
--rq->num;
}
err_buf:
dev->stats.rx_dropped++;
dev_kfree_skb(skb);
return NULL;
} }
static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
...@@ -354,16 +379,17 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) ...@@ -354,16 +379,17 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
skb_trim(skb, len); skb_trim(skb, len);
} else { } else {
page = buf; page = buf;
if (vi->mergeable_rx_bufs) {
skb = receive_mergeable(dev, rq, page, len);
if (unlikely(!skb))
return;
} else {
skb = page_to_skb(rq, page, len); skb = page_to_skb(rq, page, len);
if (unlikely(!skb)) { if (unlikely(!skb)) {
dev->stats.rx_dropped++; dev->stats.rx_dropped++;
give_pages(rq, page); give_pages(rq, page);
return; return;
} }
if (vi->mergeable_rx_bufs)
if (receive_mergeable(rq, skb)) {
dev_kfree_skb(skb);
return;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment