Commit 8446066b authored by Juergen Gross's avatar Juergen Gross Committed by David S. Miller

xen/netfront: read response from backend only once

In order to avoid problems in case the backend is modifying a response
on the ring page while the frontend has already seen it, just read the
response into a local buffer in one go and then operate on that buffer
only.
Signed-off-by: default avatarJuergen Gross <jgross@suse.com>
Reviewed-by: default avatarJan Beulich <jbeulich@suse.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 755f9053
...@@ -399,13 +399,13 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue) ...@@ -399,13 +399,13 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
rmb(); /* Ensure we see responses up to 'rp'. */ rmb(); /* Ensure we see responses up to 'rp'. */
for (cons = queue->tx.rsp_cons; cons != prod; cons++) { for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
struct xen_netif_tx_response *txrsp; struct xen_netif_tx_response txrsp;
txrsp = RING_GET_RESPONSE(&queue->tx, cons); RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
if (txrsp->status == XEN_NETIF_RSP_NULL) if (txrsp.status == XEN_NETIF_RSP_NULL)
continue; continue;
id = txrsp->id; id = txrsp.id;
skb = queue->tx_skbs[id].skb; skb = queue->tx_skbs[id].skb;
if (unlikely(gnttab_query_foreign_access( if (unlikely(gnttab_query_foreign_access(
queue->grant_tx_ref[id]) != 0)) { queue->grant_tx_ref[id]) != 0)) {
...@@ -814,7 +814,7 @@ static int xennet_get_extras(struct netfront_queue *queue, ...@@ -814,7 +814,7 @@ static int xennet_get_extras(struct netfront_queue *queue,
RING_IDX rp) RING_IDX rp)
{ {
struct xen_netif_extra_info *extra; struct xen_netif_extra_info extra;
struct device *dev = &queue->info->netdev->dev; struct device *dev = &queue->info->netdev->dev;
RING_IDX cons = queue->rx.rsp_cons; RING_IDX cons = queue->rx.rsp_cons;
int err = 0; int err = 0;
...@@ -830,24 +830,22 @@ static int xennet_get_extras(struct netfront_queue *queue, ...@@ -830,24 +830,22 @@ static int xennet_get_extras(struct netfront_queue *queue,
break; break;
} }
extra = (struct xen_netif_extra_info *) RING_COPY_RESPONSE(&queue->rx, ++cons, &extra);
RING_GET_RESPONSE(&queue->rx, ++cons);
if (unlikely(!extra->type || if (unlikely(!extra.type ||
extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
if (net_ratelimit()) if (net_ratelimit())
dev_warn(dev, "Invalid extra type: %d\n", dev_warn(dev, "Invalid extra type: %d\n",
extra->type); extra.type);
err = -EINVAL; err = -EINVAL;
} else { } else {
memcpy(&extras[extra->type - 1], extra, extras[extra.type - 1] = extra;
sizeof(*extra));
} }
skb = xennet_get_rx_skb(queue, cons); skb = xennet_get_rx_skb(queue, cons);
ref = xennet_get_rx_ref(queue, cons); ref = xennet_get_rx_ref(queue, cons);
xennet_move_rx_slot(queue, skb, ref); xennet_move_rx_slot(queue, skb, ref);
} while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
queue->rx.rsp_cons = cons; queue->rx.rsp_cons = cons;
return err; return err;
...@@ -905,7 +903,7 @@ static int xennet_get_responses(struct netfront_queue *queue, ...@@ -905,7 +903,7 @@ static int xennet_get_responses(struct netfront_queue *queue,
struct sk_buff_head *list, struct sk_buff_head *list,
bool *need_xdp_flush) bool *need_xdp_flush)
{ {
struct xen_netif_rx_response *rx = &rinfo->rx; struct xen_netif_rx_response *rx = &rinfo->rx, rx_local;
int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD); int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD);
RING_IDX cons = queue->rx.rsp_cons; RING_IDX cons = queue->rx.rsp_cons;
struct sk_buff *skb = xennet_get_rx_skb(queue, cons); struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
...@@ -989,7 +987,8 @@ static int xennet_get_responses(struct netfront_queue *queue, ...@@ -989,7 +987,8 @@ static int xennet_get_responses(struct netfront_queue *queue,
break; break;
} }
rx = RING_GET_RESPONSE(&queue->rx, cons + slots); RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx_local);
rx = &rx_local;
skb = xennet_get_rx_skb(queue, cons + slots); skb = xennet_get_rx_skb(queue, cons + slots);
ref = xennet_get_rx_ref(queue, cons + slots); ref = xennet_get_rx_ref(queue, cons + slots);
slots++; slots++;
...@@ -1044,10 +1043,11 @@ static int xennet_fill_frags(struct netfront_queue *queue, ...@@ -1044,10 +1043,11 @@ static int xennet_fill_frags(struct netfront_queue *queue,
struct sk_buff *nskb; struct sk_buff *nskb;
while ((nskb = __skb_dequeue(list))) { while ((nskb = __skb_dequeue(list))) {
struct xen_netif_rx_response *rx = struct xen_netif_rx_response rx;
RING_GET_RESPONSE(&queue->rx, ++cons);
skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0]; skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
RING_COPY_RESPONSE(&queue->rx, ++cons, &rx);
if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) { if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to; unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
...@@ -1062,7 +1062,7 @@ static int xennet_fill_frags(struct netfront_queue *queue, ...@@ -1062,7 +1062,7 @@ static int xennet_fill_frags(struct netfront_queue *queue,
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
skb_frag_page(nfrag), skb_frag_page(nfrag),
rx->offset, rx->status, PAGE_SIZE); rx.offset, rx.status, PAGE_SIZE);
skb_shinfo(nskb)->nr_frags = 0; skb_shinfo(nskb)->nr_frags = 0;
kfree_skb(nskb); kfree_skb(nskb);
...@@ -1161,7 +1161,7 @@ static int xennet_poll(struct napi_struct *napi, int budget) ...@@ -1161,7 +1161,7 @@ static int xennet_poll(struct napi_struct *napi, int budget)
i = queue->rx.rsp_cons; i = queue->rx.rsp_cons;
work_done = 0; work_done = 0;
while ((i != rp) && (work_done < budget)) { while ((i != rp) && (work_done < budget)) {
memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx)); RING_COPY_RESPONSE(&queue->rx, i, rx);
memset(extras, 0, sizeof(rinfo.extras)); memset(extras, 0, sizeof(rinfo.extras));
err = xennet_get_responses(queue, &rinfo, rp, &tmpq, err = xennet_get_responses(queue, &rinfo, rp, &tmpq,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment