Commit 8fc3b9e9 authored by Michael S. Tsirkin's avatar Michael S. Tsirkin Committed by David S. Miller

virtio_net: fix error handling for mergeable buffers

Eric Dumazet noticed that if we encounter an error
when processing a mergeable buffer, we don't
dequeue all of the buffers from this packet,
the result is almost sure to be loss of networking.

Jason Wang noticed that we also leak a page and that we don't decrement
the rq buf count, so we won't repost buffers (a resource leak).

Fix both issues.

Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Michael Dalton <mwdalton@google.com>
Reported-by: default avatarEric Dumazet <edumazet@google.com>
Reported-by: default avatarJason Wang <jasowang@redhat.com>
Signed-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>
Acked-by: default avatarJason Wang <jasowang@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 99e872ae
...@@ -299,35 +299,47 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq, ...@@ -299,35 +299,47 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
return skb; return skb;
} }
static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb) static struct sk_buff *receive_mergeable(struct net_device *dev,
struct receive_queue *rq,
void *buf,
unsigned int len)
{ {
struct skb_vnet_hdr *hdr = skb_vnet_hdr(head_skb); struct skb_vnet_hdr *hdr = buf;
int num_buf = hdr->mhdr.num_buffers;
struct page *page = virt_to_head_page(buf);
int offset = buf - page_address(page);
struct sk_buff *head_skb = page_to_skb(rq, page, offset, len,
MERGE_BUFFER_LEN);
struct sk_buff *curr_skb = head_skb; struct sk_buff *curr_skb = head_skb;
char *buf;
struct page *page;
int num_buf, len, offset;
num_buf = hdr->mhdr.num_buffers; if (unlikely(!curr_skb))
goto err_skb;
while (--num_buf) { while (--num_buf) {
int num_skb_frags = skb_shinfo(curr_skb)->nr_frags; int num_skb_frags;
buf = virtqueue_get_buf(rq->vq, &len); buf = virtqueue_get_buf(rq->vq, &len);
if (unlikely(!buf)) { if (unlikely(!buf)) {
pr_debug("%s: rx error: %d buffers missing\n", pr_debug("%s: rx error: %d buffers out of %d missing\n",
head_skb->dev->name, hdr->mhdr.num_buffers); dev->name, num_buf, hdr->mhdr.num_buffers);
head_skb->dev->stats.rx_length_errors++; dev->stats.rx_length_errors++;
return -EINVAL; goto err_buf;
} }
if (unlikely(len > MERGE_BUFFER_LEN)) { if (unlikely(len > MERGE_BUFFER_LEN)) {
pr_debug("%s: rx error: merge buffer too long\n", pr_debug("%s: rx error: merge buffer too long\n",
head_skb->dev->name); dev->name);
len = MERGE_BUFFER_LEN; len = MERGE_BUFFER_LEN;
} }
page = virt_to_head_page(buf);
--rq->num;
num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
if (unlikely(!nskb)) {
head_skb->dev->stats.rx_dropped++; if (unlikely(!nskb))
return -ENOMEM; goto err_skb;
}
if (curr_skb == head_skb) if (curr_skb == head_skb)
skb_shinfo(curr_skb)->frag_list = nskb; skb_shinfo(curr_skb)->frag_list = nskb;
else else
...@@ -341,8 +353,7 @@ static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb) ...@@ -341,8 +353,7 @@ static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb)
head_skb->len += len; head_skb->len += len;
head_skb->truesize += MERGE_BUFFER_LEN; head_skb->truesize += MERGE_BUFFER_LEN;
} }
page = virt_to_head_page(buf); offset = buf - page_address(page);
offset = buf - (char *)page_address(page);
if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
put_page(page); put_page(page);
skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
...@@ -351,9 +362,28 @@ static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb) ...@@ -351,9 +362,28 @@ static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb)
skb_add_rx_frag(curr_skb, num_skb_frags, page, skb_add_rx_frag(curr_skb, num_skb_frags, page,
offset, len, MERGE_BUFFER_LEN); offset, len, MERGE_BUFFER_LEN);
} }
}
return head_skb;
err_skb:
put_page(page);
while (--num_buf) {
buf = virtqueue_get_buf(rq->vq, &len);
if (unlikely(!buf)) {
pr_debug("%s: rx error: %d buffers missing\n",
dev->name, num_buf);
dev->stats.rx_length_errors++;
break;
}
page = virt_to_head_page(buf);
put_page(page);
--rq->num; --rq->num;
} }
return 0; err_buf:
dev->stats.rx_dropped++;
dev_kfree_skb(head_skb);
return NULL;
} }
static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
...@@ -382,19 +412,9 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) ...@@ -382,19 +412,9 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
len -= sizeof(struct virtio_net_hdr); len -= sizeof(struct virtio_net_hdr);
skb_trim(skb, len); skb_trim(skb, len);
} else if (vi->mergeable_rx_bufs) { } else if (vi->mergeable_rx_bufs) {
struct page *page = virt_to_head_page(buf); skb = receive_mergeable(dev, rq, buf, len);
skb = page_to_skb(rq, page, if (unlikely(!skb))
(char *)buf - (char *)page_address(page),
len, MERGE_BUFFER_LEN);
if (unlikely(!skb)) {
dev->stats.rx_dropped++;
put_page(page);
return; return;
}
if (receive_mergeable(rq, skb)) {
dev_kfree_skb(skb);
return;
}
} else { } else {
page = buf; page = buf;
skb = page_to_skb(rq, page, 0, len, PAGE_SIZE); skb = page_to_skb(rq, page, 0, len, PAGE_SIZE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment