Commit 162081ec authored by Juergen Gross's avatar Juergen Gross Committed by David S. Miller

xen/netfront: don't read data from request on the ring page

In order to avoid a malicious backend being able to influence the local
processing of a request build the request locally first and then copy
it to the ring page. Any reading from the request influencing the
processing in the frontend needs to be done on the local instance.
Signed-off-by: default avatarJuergen Gross <jgross@suse.com>
Reviewed-by: default avatarJan Beulich <jbeulich@suse.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 8446066b
...@@ -435,7 +435,8 @@ struct xennet_gnttab_make_txreq { ...@@ -435,7 +435,8 @@ struct xennet_gnttab_make_txreq {
struct netfront_queue *queue; struct netfront_queue *queue;
struct sk_buff *skb; struct sk_buff *skb;
struct page *page; struct page *page;
struct xen_netif_tx_request *tx; /* Last request */ struct xen_netif_tx_request *tx; /* Last request on ring page */
struct xen_netif_tx_request tx_local; /* Last request local copy*/
unsigned int size; unsigned int size;
}; };
...@@ -463,30 +464,27 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset, ...@@ -463,30 +464,27 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
queue->grant_tx_page[id] = page; queue->grant_tx_page[id] = page;
queue->grant_tx_ref[id] = ref; queue->grant_tx_ref[id] = ref;
tx->id = id; info->tx_local.id = id;
tx->gref = ref; info->tx_local.gref = ref;
tx->offset = offset; info->tx_local.offset = offset;
tx->size = len; info->tx_local.size = len;
tx->flags = 0; info->tx_local.flags = 0;
*tx = info->tx_local;
info->tx = tx; info->tx = tx;
info->size += tx->size; info->size += info->tx_local.size;
} }
static struct xen_netif_tx_request *xennet_make_first_txreq( static struct xen_netif_tx_request *xennet_make_first_txreq(
struct netfront_queue *queue, struct sk_buff *skb, struct xennet_gnttab_make_txreq *info,
struct page *page, unsigned int offset, unsigned int len) unsigned int offset, unsigned int len)
{ {
struct xennet_gnttab_make_txreq info = { info->size = 0;
.queue = queue,
.skb = skb,
.page = page,
.size = 0,
};
gnttab_for_one_grant(page, offset, len, xennet_tx_setup_grant, &info); gnttab_for_one_grant(info->page, offset, len, xennet_tx_setup_grant, info);
return info.tx; return info->tx;
} }
static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset, static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
...@@ -499,35 +497,27 @@ static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset, ...@@ -499,35 +497,27 @@ static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
xennet_tx_setup_grant(gfn, offset, len, data); xennet_tx_setup_grant(gfn, offset, len, data);
} }
static struct xen_netif_tx_request *xennet_make_txreqs( static void xennet_make_txreqs(
struct netfront_queue *queue, struct xen_netif_tx_request *tx, struct xennet_gnttab_make_txreq *info,
struct sk_buff *skb, struct page *page, struct page *page,
unsigned int offset, unsigned int len) unsigned int offset, unsigned int len)
{ {
struct xennet_gnttab_make_txreq info = {
.queue = queue,
.skb = skb,
.tx = tx,
};
/* Skip unused frames from start of page */ /* Skip unused frames from start of page */
page += offset >> PAGE_SHIFT; page += offset >> PAGE_SHIFT;
offset &= ~PAGE_MASK; offset &= ~PAGE_MASK;
while (len) { while (len) {
info.page = page; info->page = page;
info.size = 0; info->size = 0;
gnttab_foreach_grant_in_range(page, offset, len, gnttab_foreach_grant_in_range(page, offset, len,
xennet_make_one_txreq, xennet_make_one_txreq,
&info); info);
page++; page++;
offset = 0; offset = 0;
len -= info.size; len -= info->size;
} }
return info.tx;
} }
/* /*
...@@ -580,10 +570,14 @@ static int xennet_xdp_xmit_one(struct net_device *dev, ...@@ -580,10 +570,14 @@ static int xennet_xdp_xmit_one(struct net_device *dev,
{ {
struct netfront_info *np = netdev_priv(dev); struct netfront_info *np = netdev_priv(dev);
struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats); struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
struct xennet_gnttab_make_txreq info = {
.queue = queue,
.skb = NULL,
.page = virt_to_page(xdpf->data),
};
int notify; int notify;
xennet_make_first_txreq(queue, NULL, xennet_make_first_txreq(&info,
virt_to_page(xdpf->data),
offset_in_page(xdpf->data), offset_in_page(xdpf->data),
xdpf->len); xdpf->len);
...@@ -638,7 +632,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev ...@@ -638,7 +632,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
{ {
struct netfront_info *np = netdev_priv(dev); struct netfront_info *np = netdev_priv(dev);
struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats); struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
struct xen_netif_tx_request *tx, *first_tx; struct xen_netif_tx_request *first_tx;
unsigned int i; unsigned int i;
int notify; int notify;
int slots; int slots;
...@@ -647,6 +641,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev ...@@ -647,6 +641,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
unsigned int len; unsigned int len;
unsigned long flags; unsigned long flags;
struct netfront_queue *queue = NULL; struct netfront_queue *queue = NULL;
struct xennet_gnttab_make_txreq info = { };
unsigned int num_queues = dev->real_num_tx_queues; unsigned int num_queues = dev->real_num_tx_queues;
u16 queue_index; u16 queue_index;
struct sk_buff *nskb; struct sk_buff *nskb;
...@@ -704,21 +699,24 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev ...@@ -704,21 +699,24 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
} }
/* First request for the linear area. */ /* First request for the linear area. */
first_tx = tx = xennet_make_first_txreq(queue, skb, info.queue = queue;
page, offset, len); info.skb = skb;
offset += tx->size; info.page = page;
first_tx = xennet_make_first_txreq(&info, offset, len);
offset += info.tx_local.size;
if (offset == PAGE_SIZE) { if (offset == PAGE_SIZE) {
page++; page++;
offset = 0; offset = 0;
} }
len -= tx->size; len -= info.tx_local.size;
if (skb->ip_summed == CHECKSUM_PARTIAL) if (skb->ip_summed == CHECKSUM_PARTIAL)
/* local packet? */ /* local packet? */
tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated; first_tx->flags |= XEN_NETTXF_csum_blank |
XEN_NETTXF_data_validated;
else if (skb->ip_summed == CHECKSUM_UNNECESSARY) else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
/* remote but checksummed. */ /* remote but checksummed. */
tx->flags |= XEN_NETTXF_data_validated; first_tx->flags |= XEN_NETTXF_data_validated;
/* Optional extra info after the first request. */ /* Optional extra info after the first request. */
if (skb_shinfo(skb)->gso_size) { if (skb_shinfo(skb)->gso_size) {
...@@ -727,7 +725,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev ...@@ -727,7 +725,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
gso = (struct xen_netif_extra_info *) gso = (struct xen_netif_extra_info *)
RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
tx->flags |= XEN_NETTXF_extra_info; first_tx->flags |= XEN_NETTXF_extra_info;
gso->u.gso.size = skb_shinfo(skb)->gso_size; gso->u.gso.size = skb_shinfo(skb)->gso_size;
gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ? gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
...@@ -741,12 +739,12 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev ...@@ -741,12 +739,12 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
} }
/* Requests for the rest of the linear area. */ /* Requests for the rest of the linear area. */
tx = xennet_make_txreqs(queue, tx, skb, page, offset, len); xennet_make_txreqs(&info, page, offset, len);
/* Requests for all the frags. */ /* Requests for all the frags. */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
tx = xennet_make_txreqs(queue, tx, skb, skb_frag_page(frag), xennet_make_txreqs(&info, skb_frag_page(frag),
skb_frag_off(frag), skb_frag_off(frag),
skb_frag_size(frag)); skb_frag_size(frag));
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment