Commit b71101d6 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-xsa-6.1-rc9-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen fixes from Juergen Gross:
 "Two zero-day fixes for the xen-netback driver (XSA-423 and XSA-424)"

* tag 'for-linus-xsa-6.1-rc9-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen/netback: don't call kfree_skb() with interrupts disabled
  xen/netback: Ensure protocol headers don't fall in the non-linear area
parents bce93322 74e7e1ef
......@@ -386,7 +386,7 @@ int xenvif_dealloc_kthread(void *data);
irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data);
bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread);
void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
void xenvif_carrier_on(struct xenvif *vif);
......
......@@ -254,14 +254,16 @@ xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
skb_clear_hash(skb);
xenvif_rx_queue_tail(queue, skb);
if (!xenvif_rx_queue_tail(queue, skb))
goto drop;
xenvif_kick_thread(queue);
return NETDEV_TX_OK;
drop:
vif->dev->stats.tx_dropped++;
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
......
......@@ -332,10 +332,13 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
struct xenvif_tx_cb {
u16 pending_idx;
u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1];
u8 copy_count;
};
#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
#define copy_pending_idx(skb, i) (XENVIF_TX_CB(skb)->copy_pending_idx[i])
#define copy_count(skb) (XENVIF_TX_CB(skb)->copy_count)
static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
u16 pending_idx,
......@@ -370,31 +373,93 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
return skb;
}
static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
struct sk_buff *skb,
struct xen_netif_tx_request *txp,
struct gnttab_map_grant_ref *gop,
unsigned int frag_overflow,
struct sk_buff *nskb)
static void xenvif_get_requests(struct xenvif_queue *queue,
struct sk_buff *skb,
struct xen_netif_tx_request *first,
struct xen_netif_tx_request *txfrags,
unsigned *copy_ops,
unsigned *map_ops,
unsigned int frag_overflow,
struct sk_buff *nskb,
unsigned int extra_count,
unsigned int data_len)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
skb_frag_t *frags = shinfo->frags;
u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
int start;
u16 pending_idx;
pending_ring_idx_t index;
unsigned int nr_slots;
struct gnttab_copy *cop = queue->tx_copy_ops + *copy_ops;
struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops;
struct xen_netif_tx_request *txp = first;
nr_slots = shinfo->nr_frags + 1;
copy_count(skb) = 0;
/* Create copy ops for exactly data_len bytes into the skb head. */
__skb_put(skb, data_len);
while (data_len > 0) {
int amount = data_len > txp->size ? txp->size : data_len;
cop->source.u.ref = txp->gref;
cop->source.domid = queue->vif->domid;
cop->source.offset = txp->offset;
cop->dest.domid = DOMID_SELF;
cop->dest.offset = (offset_in_page(skb->data +
skb_headlen(skb) -
data_len)) & ~XEN_PAGE_MASK;
cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb)
- data_len);
cop->len = amount;
cop->flags = GNTCOPY_source_gref;
nr_slots = shinfo->nr_frags;
index = pending_index(queue->pending_cons);
pending_idx = queue->pending_ring[index];
callback_param(queue, pending_idx).ctx = NULL;
copy_pending_idx(skb, copy_count(skb)) = pending_idx;
copy_count(skb)++;
cop++;
data_len -= amount;
/* Skip first skb fragment if it is on same page as header fragment. */
start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
if (amount == txp->size) {
/* The copy op covered the full tx_request */
memcpy(&queue->pending_tx_info[pending_idx].req,
txp, sizeof(*txp));
queue->pending_tx_info[pending_idx].extra_count =
(txp == first) ? extra_count : 0;
if (txp == first)
txp = txfrags;
else
txp++;
queue->pending_cons++;
nr_slots--;
} else {
/* The copy op partially covered the tx_request.
* The remainder will be mapped.
*/
txp->offset += amount;
txp->size -= amount;
}
}
for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
shinfo->nr_frags++, txp++, gop++) {
for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots;
shinfo->nr_frags++, gop++) {
index = pending_index(queue->pending_cons++);
pending_idx = queue->pending_ring[index];
xenvif_tx_create_map_op(queue, pending_idx, txp, 0, gop);
xenvif_tx_create_map_op(queue, pending_idx, txp,
txp == first ? extra_count : 0, gop);
frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
if (txp == first)
txp = txfrags;
else
txp++;
}
if (frag_overflow) {
......@@ -415,7 +480,8 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que
skb_shinfo(skb)->frag_list = nskb;
}
return gop;
(*copy_ops) = cop - queue->tx_copy_ops;
(*map_ops) = gop - queue->tx_map_ops;
}
static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
......@@ -451,7 +517,7 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
struct gnttab_copy **gopp_copy)
{
struct gnttab_map_grant_ref *gop_map = *gopp_map;
u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
u16 pending_idx;
/* This always points to the shinfo of the skb being checked, which
* could be either the first or the one on the frag_list
*/
......@@ -462,24 +528,37 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
struct skb_shared_info *first_shinfo = NULL;
int nr_frags = shinfo->nr_frags;
const bool sharedslot = nr_frags &&
frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
frag_get_pending_idx(&shinfo->frags[0]) ==
copy_pending_idx(skb, copy_count(skb) - 1);
int i, err;
/* Check status of header. */
err = (*gopp_copy)->status;
if (unlikely(err)) {
if (net_ratelimit())
netdev_dbg(queue->vif->dev,
"Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
(*gopp_copy)->status,
pending_idx,
(*gopp_copy)->source.u.ref);
/* The first frag might still have this slot mapped */
if (!sharedslot)
xenvif_idx_release(queue, pending_idx,
XEN_NETIF_RSP_ERROR);
for (i = 0; i < copy_count(skb); i++) {
int newerr;
/* Check status of header. */
pending_idx = copy_pending_idx(skb, i);
newerr = (*gopp_copy)->status;
if (likely(!newerr)) {
/* The first frag might still have this slot mapped */
if (i < copy_count(skb) - 1 || !sharedslot)
xenvif_idx_release(queue, pending_idx,
XEN_NETIF_RSP_OKAY);
} else {
err = newerr;
if (net_ratelimit())
netdev_dbg(queue->vif->dev,
"Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
(*gopp_copy)->status,
pending_idx,
(*gopp_copy)->source.u.ref);
/* The first frag might still have this slot mapped */
if (i < copy_count(skb) - 1 || !sharedslot)
xenvif_idx_release(queue, pending_idx,
XEN_NETIF_RSP_ERROR);
}
(*gopp_copy)++;
}
(*gopp_copy)++;
check_frags:
for (i = 0; i < nr_frags; i++, gop_map++) {
......@@ -526,14 +605,6 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
if (err)
continue;
/* First error: if the header haven't shared a slot with the
* first frag, release it as well.
*/
if (!sharedslot)
xenvif_idx_release(queue,
XENVIF_TX_CB(skb)->pending_idx,
XEN_NETIF_RSP_OKAY);
/* Invalidate preceding fragments of this skb. */
for (j = 0; j < i; j++) {
pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
......@@ -803,7 +874,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
unsigned *copy_ops,
unsigned *map_ops)
{
struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
struct sk_buff *skb, *nskb;
int ret;
unsigned int frag_overflow;
......@@ -885,8 +955,12 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
continue;
}
data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN) ?
XEN_NETBACK_TX_COPY_LEN : txreq.size;
ret = xenvif_count_requests(queue, &txreq, extra_count,
txfrags, work_to_do);
if (unlikely(ret < 0))
break;
......@@ -912,9 +986,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
index = pending_index(queue->pending_cons);
pending_idx = queue->pending_ring[index];
data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN &&
ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
XEN_NETBACK_TX_COPY_LEN : txreq.size;
if (ret >= XEN_NETBK_LEGACY_SLOTS_MAX - 1 && data_len < txreq.size)
data_len = txreq.size;
skb = xenvif_alloc_skb(data_len);
if (unlikely(skb == NULL)) {
......@@ -925,8 +998,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
}
skb_shinfo(skb)->nr_frags = ret;
if (data_len < txreq.size)
skb_shinfo(skb)->nr_frags++;
/* At this point shinfo->nr_frags is in fact the number of
* slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
*/
......@@ -988,54 +1059,19 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
type);
}
XENVIF_TX_CB(skb)->pending_idx = pending_idx;
__skb_put(skb, data_len);
queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid;
queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
virt_to_gfn(skb->data);
queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
queue->tx_copy_ops[*copy_ops].dest.offset =
offset_in_page(skb->data) & ~XEN_PAGE_MASK;
queue->tx_copy_ops[*copy_ops].len = data_len;
queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
(*copy_ops)++;
if (data_len < txreq.size) {
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
pending_idx);
xenvif_tx_create_map_op(queue, pending_idx, &txreq,
extra_count, gop);
gop++;
} else {
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
INVALID_PENDING_IDX);
memcpy(&queue->pending_tx_info[pending_idx].req,
&txreq, sizeof(txreq));
queue->pending_tx_info[pending_idx].extra_count =
extra_count;
}
queue->pending_cons++;
gop = xenvif_get_requests(queue, skb, txfrags, gop,
frag_overflow, nskb);
xenvif_get_requests(queue, skb, &txreq, txfrags, copy_ops,
map_ops, frag_overflow, nskb, extra_count,
data_len);
__skb_queue_tail(&queue->tx_queue, skb);
queue->tx.req_cons = idx;
if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) ||
if ((*map_ops >= ARRAY_SIZE(queue->tx_map_ops)) ||
(*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
break;
}
(*map_ops) = gop - queue->tx_map_ops;
return;
}
......@@ -1114,9 +1150,8 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
struct xen_netif_tx_request *txp;
u16 pending_idx;
unsigned data_len;
pending_idx = XENVIF_TX_CB(skb)->pending_idx;
pending_idx = copy_pending_idx(skb, 0);
txp = &queue->pending_tx_info[pending_idx].req;
/* Check the remap error code. */
......@@ -1135,18 +1170,6 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
continue;
}
data_len = skb->len;
callback_param(queue, pending_idx).ctx = NULL;
if (data_len < txp->size) {
/* Append the packet payload as a fragment. */
txp->offset += data_len;
txp->size -= data_len;
} else {
/* Schedule a response immediately. */
xenvif_idx_release(queue, pending_idx,
XEN_NETIF_RSP_OKAY);
}
if (txp->flags & XEN_NETTXF_csum_blank)
skb->ip_summed = CHECKSUM_PARTIAL;
else if (txp->flags & XEN_NETTXF_data_validated)
......@@ -1332,7 +1355,7 @@ static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
/* Called after netfront has transmitted */
int xenvif_tx_action(struct xenvif_queue *queue, int budget)
{
unsigned nr_mops, nr_cops = 0;
unsigned nr_mops = 0, nr_cops = 0;
int work_done, ret;
if (unlikely(!tx_work_todo(queue)))
......
......@@ -82,9 +82,10 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
return false;
}
void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
{
unsigned long flags;
bool ret = true;
spin_lock_irqsave(&queue->rx_queue.lock, flags);
......@@ -92,8 +93,7 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
struct net_device *dev = queue->vif->dev;
netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
kfree_skb(skb);
queue->vif->dev->stats.rx_dropped++;
ret = false;
} else {
if (skb_queue_empty(&queue->rx_queue))
xenvif_update_needed_slots(queue, skb);
......@@ -104,6 +104,8 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
}
spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
return ret;
}
static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment