Commit 653bbf19 authored by David S. Miller's avatar David S. Miller

Merge branch 'xen-netback'

Zoltan Kiss says:

====================
xen-netback: Fixing up xenvif_tx_check_gop

This series fixes a lot of bugs on the error path around this function, which
were introduced with my grant mapping series in 3.15. They apply to the latest
net tree, but probably to net-next as well without any modification.
I'll post an another series which applies to 3.15 stable, as the problem was
first discovered there. The only difference is that the "queue" variable name is
replaced to "vif".
====================
Signed-off-by: default avatarZoltan Kiss <zoltan.kiss@citrix.com>
Reported-by: default avatarArmin Zentai <armin.zentai@ezit.hu>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 7801db8a d8cfbfc4
...@@ -1030,14 +1030,21 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue, ...@@ -1030,14 +1030,21 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
{ {
struct gnttab_map_grant_ref *gop_map = *gopp_map; struct gnttab_map_grant_ref *gop_map = *gopp_map;
u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
/* This always points to the shinfo of the skb being checked, which
* could be either the first or the one on the frag_list
*/
struct skb_shared_info *shinfo = skb_shinfo(skb); struct skb_shared_info *shinfo = skb_shinfo(skb);
/* If this is non-NULL, we are currently checking the frag_list skb, and
* this points to the shinfo of the first one
*/
struct skb_shared_info *first_shinfo = NULL;
int nr_frags = shinfo->nr_frags; int nr_frags = shinfo->nr_frags;
const bool sharedslot = nr_frags &&
frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
int i, err; int i, err;
struct sk_buff *first_skb = NULL;
/* Check status of header. */ /* Check status of header. */
err = (*gopp_copy)->status; err = (*gopp_copy)->status;
(*gopp_copy)++;
if (unlikely(err)) { if (unlikely(err)) {
if (net_ratelimit()) if (net_ratelimit())
netdev_dbg(queue->vif->dev, netdev_dbg(queue->vif->dev,
...@@ -1045,8 +1052,12 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue, ...@@ -1045,8 +1052,12 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
(*gopp_copy)->status, (*gopp_copy)->status,
pending_idx, pending_idx,
(*gopp_copy)->source.u.ref); (*gopp_copy)->source.u.ref);
xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR); /* The first frag might still have this slot mapped */
if (!sharedslot)
xenvif_idx_release(queue, pending_idx,
XEN_NETIF_RSP_ERROR);
} }
(*gopp_copy)++;
check_frags: check_frags:
for (i = 0; i < nr_frags; i++, gop_map++) { for (i = 0; i < nr_frags; i++, gop_map++) {
...@@ -1062,8 +1073,19 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue, ...@@ -1062,8 +1073,19 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
pending_idx, pending_idx,
gop_map->handle); gop_map->handle);
/* Had a previous error? Invalidate this fragment. */ /* Had a previous error? Invalidate this fragment. */
if (unlikely(err)) if (unlikely(err)) {
xenvif_idx_unmap(queue, pending_idx); xenvif_idx_unmap(queue, pending_idx);
/* If the mapping of the first frag was OK, but
* the header's copy failed, and they are
* sharing a slot, send an error
*/
if (i == 0 && sharedslot)
xenvif_idx_release(queue, pending_idx,
XEN_NETIF_RSP_ERROR);
else
xenvif_idx_release(queue, pending_idx,
XEN_NETIF_RSP_OKAY);
}
continue; continue;
} }
...@@ -1075,42 +1097,53 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue, ...@@ -1075,42 +1097,53 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
gop_map->status, gop_map->status,
pending_idx, pending_idx,
gop_map->ref); gop_map->ref);
xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR); xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
/* Not the first error? Preceding frags already invalidated. */ /* Not the first error? Preceding frags already invalidated. */
if (err) if (err)
continue; continue;
/* First error: invalidate preceding fragments. */
/* First error: if the header haven't shared a slot with the
* first frag, release it as well.
*/
if (!sharedslot)
xenvif_idx_release(queue,
XENVIF_TX_CB(skb)->pending_idx,
XEN_NETIF_RSP_OKAY);
/* Invalidate preceding fragments of this skb. */
for (j = 0; j < i; j++) { for (j = 0; j < i; j++) {
pending_idx = frag_get_pending_idx(&shinfo->frags[j]); pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
xenvif_idx_unmap(queue, pending_idx); xenvif_idx_unmap(queue, pending_idx);
xenvif_idx_release(queue, pending_idx,
XEN_NETIF_RSP_OKAY);
}
/* And if we found the error while checking the frag_list, unmap
* the first skb's frags
*/
if (first_shinfo) {
for (j = 0; j < first_shinfo->nr_frags; j++) {
pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
xenvif_idx_unmap(queue, pending_idx);
xenvif_idx_release(queue, pending_idx,
XEN_NETIF_RSP_OKAY);
}
} }
/* Remember the error: invalidate all subsequent fragments. */ /* Remember the error: invalidate all subsequent fragments. */
err = newerr; err = newerr;
} }
if (skb_has_frag_list(skb)) { if (skb_has_frag_list(skb) && !first_shinfo) {
first_skb = skb; first_shinfo = skb_shinfo(skb);
skb = shinfo->frag_list; shinfo = skb_shinfo(skb_shinfo(skb)->frag_list);
shinfo = skb_shinfo(skb);
nr_frags = shinfo->nr_frags; nr_frags = shinfo->nr_frags;
goto check_frags; goto check_frags;
} }
/* There was a mapping error in the frag_list skb. We have to unmap
* the first skb's frags
*/
if (first_skb && err) {
int j;
shinfo = skb_shinfo(first_skb);
for (j = 0; j < shinfo->nr_frags; j++) {
pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
xenvif_idx_unmap(queue, pending_idx);
}
}
*gopp_map = gop_map; *gopp_map = gop_map;
return err; return err;
} }
...@@ -1518,7 +1551,16 @@ static int xenvif_tx_submit(struct xenvif_queue *queue) ...@@ -1518,7 +1551,16 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
/* Check the remap error code. */ /* Check the remap error code. */
if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) { if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
/* If there was an error, xenvif_tx_check_gop is
* expected to release all the frags which were mapped,
* so kfree_skb shouldn't do it again
*/
skb_shinfo(skb)->nr_frags = 0; skb_shinfo(skb)->nr_frags = 0;
if (skb_has_frag_list(skb)) {
struct sk_buff *nskb =
skb_shinfo(skb)->frag_list;
skb_shinfo(nskb)->nr_frags = 0;
}
kfree_skb(skb); kfree_skb(skb);
continue; continue;
} }
...@@ -1822,8 +1864,6 @@ void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx) ...@@ -1822,8 +1864,6 @@ void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
tx_unmap_op.status); tx_unmap_op.status);
BUG(); BUG();
} }
xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_OKAY);
} }
static inline int rx_work_todo(struct xenvif_queue *queue) static inline int rx_work_todo(struct xenvif_queue *queue)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment