Commit c07ff859 authored by Stephen Hemminger's avatar Stephen Hemminger Committed by Jakub Kicinski

netem: fix return value if duplicate enqueue fails

There is a bug in netem_enqueue() introduced by
commit 5845f706 ("net: netem: fix skb length BUG_ON in __skb_to_sgvec")
that can lead to a use-after-free.

This commit made netem_enqueue() always return NET_XMIT_SUCCESS
when a packet is duplicated, which can cause the parent qdisc's q.qlen
to be mistakenly incremented. When this happens qlen_notify() may be
skipped on the parent during destruction, leaving a dangling pointer
for some classful qdiscs like DRR.

There are two ways for the bug happen:

- If the duplicated packet is dropped by rootq->enqueue() and then
  the original packet is also dropped.
- If rootq->enqueue() sends the duplicated packet to a different qdisc
  and the original packet is dropped.

In both cases NET_XMIT_SUCCESS is returned even though no packets
are enqueued at the netem qdisc.

The fix is to defer the enqueue of the duplicate packet until after
the original packet has been guaranteed to return NET_XMIT_SUCCESS.

Fixes: 5845f706 ("net: netem: fix skb length BUG_ON in __skb_to_sgvec")
Reported-by: default avatarBudimir Markovic <markovicbudimir@gmail.com>
Signed-off-by: default avatarStephen Hemminger <stephen@networkplumber.org>
Reviewed-by: default avatarSimon Horman <horms@kernel.org>
Link: https://patch.msgid.link/20240819175753.5151-1-stephen@networkplumber.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 528876d8
......@@ -446,12 +446,10 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct netem_sched_data *q = qdisc_priv(sch);
/* We don't fill cb now as skb_unshare() may invalidate it */
struct netem_skb_cb *cb;
struct sk_buff *skb2;
struct sk_buff *skb2 = NULL;
struct sk_buff *segs = NULL;
unsigned int prev_len = qdisc_pkt_len(skb);
int count = 1;
int rc = NET_XMIT_SUCCESS;
int rc_drop = NET_XMIT_DROP;
/* Do not fool qdisc_drop_all() */
skb->prev = NULL;
......@@ -480,19 +478,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
skb_orphan_partial(skb);
/*
* If we need to duplicate packet, then re-insert at top of the
* qdisc tree, since parent queuer expects that only one
* skb will be queued.
* If we need to duplicate packet, then clone it before
* original is modified.
*/
if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
struct Qdisc *rootq = qdisc_root_bh(sch);
u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
q->duplicate = 0;
rootq->enqueue(skb2, rootq, to_free);
q->duplicate = dupsave;
rc_drop = NET_XMIT_SUCCESS;
}
if (count > 1)
skb2 = skb_clone(skb, GFP_ATOMIC);
/*
* Randomized packet corruption.
......@@ -504,7 +494,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
if (skb_is_gso(skb)) {
skb = netem_segment(skb, sch, to_free);
if (!skb)
return rc_drop;
goto finish_segs;
segs = skb->next;
skb_mark_not_on_list(skb);
qdisc_skb_cb(skb)->pkt_len = skb->len;
......@@ -530,7 +521,24 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
/* re-link segs, so that qdisc_drop_all() frees them all */
skb->next = segs;
qdisc_drop_all(skb, sch, to_free);
return rc_drop;
if (skb2)
__qdisc_drop(skb2, to_free);
return NET_XMIT_DROP;
}
/*
* If doing duplication then re-insert at top of the
* qdisc tree, since parent queuer expects that only one
* skb will be queued.
*/
if (skb2) {
struct Qdisc *rootq = qdisc_root_bh(sch);
u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
q->duplicate = 0;
rootq->enqueue(skb2, rootq, to_free);
q->duplicate = dupsave;
skb2 = NULL;
}
qdisc_qstats_backlog_inc(sch, skb);
......@@ -601,9 +609,12 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
}
finish_segs:
if (skb2)
__qdisc_drop(skb2, to_free);
if (segs) {
unsigned int len, last_len;
int nb;
int rc, nb;
len = skb ? skb->len : 0;
nb = skb ? 1 : 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment