Commit e83e5bb1 authored by David S. Miller's avatar David S. Miller

Merge branch 'net-sched-bulk-dequeue'

Eric Dumazet says:

====================
net_sched: bulk dequeue and deferred drops

First patch adds an additional parameter to ->enqueue() qdisc method
so that drops can be done outside of critical section
(after locks are released).

Then fq_codel can have a small optimization to reduce number of cache
lines misses during a drop event
(possibly accumulating hundreds of packets to be freed).

A small htb change exports the backlog in class dumps.

Final patch adds bulk dequeue to qdiscs that were lacking this feature.

This series brings a nice qdisc performance increase (more than 80 %
in some cases).
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 36195d86 4d202a0d
...@@ -52,6 +52,7 @@ ...@@ -52,6 +52,7 @@
/* Qdiscs using codel plugin must use codel_skb_cb in their own cb[] */ /* Qdiscs using codel plugin must use codel_skb_cb in their own cb[] */
struct codel_skb_cb { struct codel_skb_cb {
codel_time_t enqueue_time; codel_time_t enqueue_time;
unsigned int mem_usage;
}; };
static struct codel_skb_cb *get_codel_cb(const struct sk_buff *skb) static struct codel_skb_cb *get_codel_cb(const struct sk_buff *skb)
......
...@@ -37,8 +37,10 @@ struct qdisc_size_table { ...@@ -37,8 +37,10 @@ struct qdisc_size_table {
}; };
struct Qdisc { struct Qdisc {
int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev); int (*enqueue)(struct sk_buff *skb,
struct sk_buff * (*dequeue)(struct Qdisc *dev); struct Qdisc *sch,
struct sk_buff **to_free);
struct sk_buff * (*dequeue)(struct Qdisc *sch);
unsigned int flags; unsigned int flags;
#define TCQ_F_BUILTIN 1 #define TCQ_F_BUILTIN 1
#define TCQ_F_INGRESS 2 #define TCQ_F_INGRESS 2
...@@ -73,13 +75,14 @@ struct Qdisc { ...@@ -73,13 +75,14 @@ struct Qdisc {
/* /*
* For performance sake on SMP, we put highly modified fields at the end * For performance sake on SMP, we put highly modified fields at the end
*/ */
struct Qdisc *next_sched ____cacheline_aligned_in_smp; struct sk_buff *gso_skb ____cacheline_aligned_in_smp;
struct sk_buff *gso_skb;
unsigned long state;
struct sk_buff_head q; struct sk_buff_head q;
struct gnet_stats_basic_packed bstats; struct gnet_stats_basic_packed bstats;
seqcount_t running; seqcount_t running;
struct gnet_stats_queue qstats; struct gnet_stats_queue qstats;
unsigned long state;
struct Qdisc *next_sched;
struct sk_buff *skb_bad_txq;
struct rcu_head rcu_head; struct rcu_head rcu_head;
int padded; int padded;
atomic_t refcnt; atomic_t refcnt;
...@@ -160,7 +163,9 @@ struct Qdisc_ops { ...@@ -160,7 +163,9 @@ struct Qdisc_ops {
char id[IFNAMSIZ]; char id[IFNAMSIZ];
int priv_size; int priv_size;
int (*enqueue)(struct sk_buff *, struct Qdisc *); int (*enqueue)(struct sk_buff *skb,
struct Qdisc *sch,
struct sk_buff **to_free);
struct sk_buff * (*dequeue)(struct Qdisc *); struct sk_buff * (*dequeue)(struct Qdisc *);
struct sk_buff * (*peek)(struct Qdisc *); struct sk_buff * (*peek)(struct Qdisc *);
...@@ -498,10 +503,11 @@ static inline void qdisc_calculate_pkt_len(struct sk_buff *skb, ...@@ -498,10 +503,11 @@ static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
#endif #endif
} }
static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
qdisc_calculate_pkt_len(skb, sch); qdisc_calculate_pkt_len(skb, sch);
return sch->enqueue(skb, sch); return sch->enqueue(skb, sch, to_free);
} }
static inline bool qdisc_is_percpu_stats(const struct Qdisc *q) static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
...@@ -626,24 +632,36 @@ static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch) ...@@ -626,24 +632,36 @@ static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
return __qdisc_dequeue_head(sch, &sch->q); return __qdisc_dequeue_head(sch, &sch->q);
} }
/* Instead of calling kfree_skb() while root qdisc lock is held,
* queue the skb for future freeing at end of __dev_xmit_skb()
*/
static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
{
skb->next = *to_free;
*to_free = skb;
}
static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
struct sk_buff_head *list) struct sk_buff_head *list,
struct sk_buff **to_free)
{ {
struct sk_buff *skb = __skb_dequeue(list); struct sk_buff *skb = __skb_dequeue(list);
if (likely(skb != NULL)) { if (likely(skb != NULL)) {
unsigned int len = qdisc_pkt_len(skb); unsigned int len = qdisc_pkt_len(skb);
qdisc_qstats_backlog_dec(sch, skb); qdisc_qstats_backlog_dec(sch, skb);
kfree_skb(skb); __qdisc_drop(skb, to_free);
return len; return len;
} }
return 0; return 0;
} }
static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch) static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch,
struct sk_buff **to_free)
{ {
return __qdisc_queue_drop_head(sch, &sch->q); return __qdisc_queue_drop_head(sch, &sch->q, to_free);
} }
static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch) static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
...@@ -724,9 +742,11 @@ static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) ...@@ -724,9 +742,11 @@ static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
qdisc_qstats_drop(sch); qdisc_qstats_drop(sch);
} }
static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
kfree_skb(skb); __qdisc_drop(skb, to_free);
qdisc_qstats_drop(sch); qdisc_qstats_drop(sch);
return NET_XMIT_DROP; return NET_XMIT_DROP;
......
...@@ -3070,6 +3070,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, ...@@ -3070,6 +3070,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
struct netdev_queue *txq) struct netdev_queue *txq)
{ {
spinlock_t *root_lock = qdisc_lock(q); spinlock_t *root_lock = qdisc_lock(q);
struct sk_buff *to_free = NULL;
bool contended; bool contended;
int rc; int rc;
...@@ -3086,7 +3087,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, ...@@ -3086,7 +3087,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
spin_lock(root_lock); spin_lock(root_lock);
if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
kfree_skb(skb); __qdisc_drop(skb, &to_free);
rc = NET_XMIT_DROP; rc = NET_XMIT_DROP;
} else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
qdisc_run_begin(q)) { qdisc_run_begin(q)) {
...@@ -3109,7 +3110,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, ...@@ -3109,7 +3110,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
rc = NET_XMIT_SUCCESS; rc = NET_XMIT_SUCCESS;
} else { } else {
rc = q->enqueue(skb, q) & NET_XMIT_MASK; rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
if (qdisc_run_begin(q)) { if (qdisc_run_begin(q)) {
if (unlikely(contended)) { if (unlikely(contended)) {
spin_unlock(&q->busylock); spin_unlock(&q->busylock);
...@@ -3119,6 +3120,8 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, ...@@ -3119,6 +3120,8 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
} }
} }
spin_unlock(root_lock); spin_unlock(root_lock);
if (unlikely(to_free))
kfree_skb_list(to_free);
if (unlikely(contended)) if (unlikely(contended))
spin_unlock(&q->busylock); spin_unlock(&q->busylock);
return rc; return rc;
......
...@@ -357,7 +357,8 @@ static struct tcf_proto __rcu **atm_tc_find_tcf(struct Qdisc *sch, ...@@ -357,7 +357,8 @@ static struct tcf_proto __rcu **atm_tc_find_tcf(struct Qdisc *sch,
/* --------------------------- Qdisc operations ---------------------------- */ /* --------------------------- Qdisc operations ---------------------------- */
static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
struct atm_qdisc_data *p = qdisc_priv(sch); struct atm_qdisc_data *p = qdisc_priv(sch);
struct atm_flow_data *flow; struct atm_flow_data *flow;
...@@ -398,10 +399,10 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -398,10 +399,10 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
switch (result) { switch (result) {
case TC_ACT_QUEUED: case TC_ACT_QUEUED:
case TC_ACT_STOLEN: case TC_ACT_STOLEN:
kfree_skb(skb); __qdisc_drop(skb, to_free);
return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
case TC_ACT_SHOT: case TC_ACT_SHOT:
kfree_skb(skb); __qdisc_drop(skb, to_free);
goto drop; goto drop;
case TC_ACT_RECLASSIFY: case TC_ACT_RECLASSIFY:
if (flow->excess) if (flow->excess)
...@@ -413,7 +414,7 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -413,7 +414,7 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
#endif #endif
} }
ret = qdisc_enqueue(skb, flow->q); ret = qdisc_enqueue(skb, flow->q, to_free);
if (ret != NET_XMIT_SUCCESS) { if (ret != NET_XMIT_SUCCESS) {
drop: __maybe_unused drop: __maybe_unused
if (net_xmit_drop_count(ret)) { if (net_xmit_drop_count(ret)) {
......
...@@ -17,9 +17,10 @@ ...@@ -17,9 +17,10 @@
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <net/pkt_sched.h> #include <net/pkt_sched.h>
static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
qdisc_drop(skb, sch); qdisc_drop(skb, sch, to_free);
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
......
...@@ -358,7 +358,8 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl) ...@@ -358,7 +358,8 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
} }
static int static int
cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
struct cbq_sched_data *q = qdisc_priv(sch); struct cbq_sched_data *q = qdisc_priv(sch);
int uninitialized_var(ret); int uninitialized_var(ret);
...@@ -370,11 +371,11 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -370,11 +371,11 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (cl == NULL) { if (cl == NULL) {
if (ret & __NET_XMIT_BYPASS) if (ret & __NET_XMIT_BYPASS)
qdisc_qstats_drop(sch); qdisc_qstats_drop(sch);
kfree_skb(skb); __qdisc_drop(skb, to_free);
return ret; return ret;
} }
ret = qdisc_enqueue(skb, cl->q); ret = qdisc_enqueue(skb, cl->q, to_free);
if (ret == NET_XMIT_SUCCESS) { if (ret == NET_XMIT_SUCCESS) {
sch->q.qlen++; sch->q.qlen++;
cbq_mark_toplevel(q, cl); cbq_mark_toplevel(q, cl);
......
...@@ -115,7 +115,8 @@ static void choke_zap_tail_holes(struct choke_sched_data *q) ...@@ -115,7 +115,8 @@ static void choke_zap_tail_holes(struct choke_sched_data *q)
} }
/* Drop packet from queue array by creating a "hole" */ /* Drop packet from queue array by creating a "hole" */
static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx) static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx,
struct sk_buff **to_free)
{ {
struct choke_sched_data *q = qdisc_priv(sch); struct choke_sched_data *q = qdisc_priv(sch);
struct sk_buff *skb = q->tab[idx]; struct sk_buff *skb = q->tab[idx];
...@@ -129,7 +130,7 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx) ...@@ -129,7 +130,7 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
qdisc_qstats_backlog_dec(sch, skb); qdisc_qstats_backlog_dec(sch, skb);
qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb)); qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
qdisc_drop(skb, sch); qdisc_drop(skb, sch, to_free);
--sch->q.qlen; --sch->q.qlen;
} }
...@@ -261,7 +262,8 @@ static bool choke_match_random(const struct choke_sched_data *q, ...@@ -261,7 +262,8 @@ static bool choke_match_random(const struct choke_sched_data *q,
return choke_match_flow(oskb, nskb); return choke_match_flow(oskb, nskb);
} }
static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
struct choke_sched_data *q = qdisc_priv(sch); struct choke_sched_data *q = qdisc_priv(sch);
...@@ -288,7 +290,7 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -288,7 +290,7 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
/* Draw a packet at random from queue and compare flow */ /* Draw a packet at random from queue and compare flow */
if (choke_match_random(q, skb, &idx)) { if (choke_match_random(q, skb, &idx)) {
q->stats.matched++; q->stats.matched++;
choke_drop_by_idx(sch, idx); choke_drop_by_idx(sch, idx, to_free);
goto congestion_drop; goto congestion_drop;
} }
...@@ -331,16 +333,16 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -331,16 +333,16 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
} }
q->stats.pdrop++; q->stats.pdrop++;
return qdisc_drop(skb, sch); return qdisc_drop(skb, sch, to_free);
congestion_drop: congestion_drop:
qdisc_drop(skb, sch); qdisc_drop(skb, sch, to_free);
return NET_XMIT_CN; return NET_XMIT_CN;
other_drop: other_drop:
if (ret & __NET_XMIT_BYPASS) if (ret & __NET_XMIT_BYPASS)
qdisc_qstats_drop(sch); qdisc_qstats_drop(sch);
kfree_skb(skb); __qdisc_drop(skb, to_free);
return ret; return ret;
} }
......
...@@ -82,7 +82,8 @@ static void drop_func(struct sk_buff *skb, void *ctx) ...@@ -82,7 +82,8 @@ static void drop_func(struct sk_buff *skb, void *ctx)
{ {
struct Qdisc *sch = ctx; struct Qdisc *sch = ctx;
qdisc_drop(skb, sch); kfree_skb(skb);
qdisc_qstats_drop(sch);
} }
static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch) static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
...@@ -107,7 +108,8 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch) ...@@ -107,7 +108,8 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
return skb; return skb;
} }
static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
struct codel_sched_data *q; struct codel_sched_data *q;
...@@ -117,7 +119,7 @@ static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -117,7 +119,7 @@ static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
} }
q = qdisc_priv(sch); q = qdisc_priv(sch);
q->drop_overlimit++; q->drop_overlimit++;
return qdisc_drop(skb, sch); return qdisc_drop(skb, sch, to_free);
} }
static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = { static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
......
...@@ -350,7 +350,8 @@ static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch, ...@@ -350,7 +350,8 @@ static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch,
return NULL; return NULL;
} }
static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
struct drr_sched *q = qdisc_priv(sch); struct drr_sched *q = qdisc_priv(sch);
struct drr_class *cl; struct drr_class *cl;
...@@ -360,11 +361,11 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -360,11 +361,11 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (cl == NULL) { if (cl == NULL) {
if (err & __NET_XMIT_BYPASS) if (err & __NET_XMIT_BYPASS)
qdisc_qstats_drop(sch); qdisc_qstats_drop(sch);
kfree_skb(skb); __qdisc_drop(skb, to_free);
return err; return err;
} }
err = qdisc_enqueue(skb, cl->qdisc); err = qdisc_enqueue(skb, cl->qdisc, to_free);
if (unlikely(err != NET_XMIT_SUCCESS)) { if (unlikely(err != NET_XMIT_SUCCESS)) {
if (net_xmit_drop_count(err)) { if (net_xmit_drop_count(err)) {
cl->qstats.drops++; cl->qstats.drops++;
......
...@@ -191,7 +191,8 @@ static inline struct tcf_proto __rcu **dsmark_find_tcf(struct Qdisc *sch, ...@@ -191,7 +191,8 @@ static inline struct tcf_proto __rcu **dsmark_find_tcf(struct Qdisc *sch,
/* --------------------------- Qdisc operations ---------------------------- */ /* --------------------------- Qdisc operations ---------------------------- */
static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
struct dsmark_qdisc_data *p = qdisc_priv(sch); struct dsmark_qdisc_data *p = qdisc_priv(sch);
int err; int err;
...@@ -234,7 +235,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -234,7 +235,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
case TC_ACT_QUEUED: case TC_ACT_QUEUED:
case TC_ACT_STOLEN: case TC_ACT_STOLEN:
kfree_skb(skb); __qdisc_drop(skb, to_free);
return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
case TC_ACT_SHOT: case TC_ACT_SHOT:
...@@ -251,7 +252,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -251,7 +252,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
} }
} }
err = qdisc_enqueue(skb, p->q); err = qdisc_enqueue(skb, p->q, to_free);
if (err != NET_XMIT_SUCCESS) { if (err != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(err)) if (net_xmit_drop_count(err))
qdisc_qstats_drop(sch); qdisc_qstats_drop(sch);
...@@ -264,7 +265,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -264,7 +265,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
drop: drop:
qdisc_drop(skb, sch); qdisc_drop(skb, sch, to_free);
return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
} }
......
...@@ -19,29 +19,32 @@ ...@@ -19,29 +19,32 @@
/* 1 band FIFO pseudo-"scheduler" */ /* 1 band FIFO pseudo-"scheduler" */
static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit)) if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit))
return qdisc_enqueue_tail(skb, sch); return qdisc_enqueue_tail(skb, sch);
return qdisc_drop(skb, sch); return qdisc_drop(skb, sch, to_free);
} }
static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
if (likely(skb_queue_len(&sch->q) < sch->limit)) if (likely(skb_queue_len(&sch->q) < sch->limit))
return qdisc_enqueue_tail(skb, sch); return qdisc_enqueue_tail(skb, sch);
return qdisc_drop(skb, sch); return qdisc_drop(skb, sch, to_free);
} }
static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
if (likely(skb_queue_len(&sch->q) < sch->limit)) if (likely(skb_queue_len(&sch->q) < sch->limit))
return qdisc_enqueue_tail(skb, sch); return qdisc_enqueue_tail(skb, sch);
/* queue full, remove one skb to fulfill the limit */ /* queue full, remove one skb to fulfill the limit */
__qdisc_queue_drop_head(sch, &sch->q); __qdisc_queue_drop_head(sch, &sch->q, to_free);
qdisc_qstats_drop(sch); qdisc_qstats_drop(sch);
qdisc_enqueue_tail(skb, sch); qdisc_enqueue_tail(skb, sch);
......
...@@ -368,18 +368,19 @@ static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb) ...@@ -368,18 +368,19 @@ static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb)
} }
} }
static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
struct fq_sched_data *q = qdisc_priv(sch); struct fq_sched_data *q = qdisc_priv(sch);
struct fq_flow *f; struct fq_flow *f;
if (unlikely(sch->q.qlen >= sch->limit)) if (unlikely(sch->q.qlen >= sch->limit))
return qdisc_drop(skb, sch); return qdisc_drop(skb, sch, to_free);
f = fq_classify(skb, q); f = fq_classify(skb, q);
if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) { if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) {
q->stat_flows_plimit++; q->stat_flows_plimit++;
return qdisc_drop(skb, sch); return qdisc_drop(skb, sch, to_free);
} }
f->qlen++; f->qlen++;
......
...@@ -139,7 +139,8 @@ static inline void flow_queue_add(struct fq_codel_flow *flow, ...@@ -139,7 +139,8 @@ static inline void flow_queue_add(struct fq_codel_flow *flow,
skb->next = NULL; skb->next = NULL;
} }
static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets) static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets,
struct sk_buff **to_free)
{ {
struct fq_codel_sched_data *q = qdisc_priv(sch); struct fq_codel_sched_data *q = qdisc_priv(sch);
struct sk_buff *skb; struct sk_buff *skb;
...@@ -171,8 +172,8 @@ static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets) ...@@ -171,8 +172,8 @@ static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets)
do { do {
skb = dequeue_head(flow); skb = dequeue_head(flow);
len += qdisc_pkt_len(skb); len += qdisc_pkt_len(skb);
mem += skb->truesize; mem += get_codel_cb(skb)->mem_usage;
kfree_skb(skb); __qdisc_drop(skb, to_free);
} while (++i < max_packets && len < threshold); } while (++i < max_packets && len < threshold);
flow->dropped += i; flow->dropped += i;
...@@ -184,7 +185,8 @@ static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets) ...@@ -184,7 +185,8 @@ static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets)
return idx; return idx;
} }
static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
struct fq_codel_sched_data *q = qdisc_priv(sch); struct fq_codel_sched_data *q = qdisc_priv(sch);
unsigned int idx, prev_backlog, prev_qlen; unsigned int idx, prev_backlog, prev_qlen;
...@@ -197,7 +199,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -197,7 +199,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (idx == 0) { if (idx == 0) {
if (ret & __NET_XMIT_BYPASS) if (ret & __NET_XMIT_BYPASS)
qdisc_qstats_drop(sch); qdisc_qstats_drop(sch);
kfree_skb(skb); __qdisc_drop(skb, to_free);
return ret; return ret;
} }
idx--; idx--;
...@@ -214,7 +216,8 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -214,7 +216,8 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
flow->deficit = q->quantum; flow->deficit = q->quantum;
flow->dropped = 0; flow->dropped = 0;
} }
q->memory_usage += skb->truesize; get_codel_cb(skb)->mem_usage = skb->truesize;
q->memory_usage += get_codel_cb(skb)->mem_usage;
memory_limited = q->memory_usage > q->memory_limit; memory_limited = q->memory_usage > q->memory_limit;
if (++sch->q.qlen <= sch->limit && !memory_limited) if (++sch->q.qlen <= sch->limit && !memory_limited)
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
...@@ -229,7 +232,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -229,7 +232,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
* So instead of dropping a single packet, drop half of its backlog * So instead of dropping a single packet, drop half of its backlog
* with a 64 packets limit to not add a too big cpu spike here. * with a 64 packets limit to not add a too big cpu spike here.
*/ */
ret = fq_codel_drop(sch, q->drop_batch_size); ret = fq_codel_drop(sch, q->drop_batch_size, to_free);
prev_qlen -= sch->q.qlen; prev_qlen -= sch->q.qlen;
prev_backlog -= sch->qstats.backlog; prev_backlog -= sch->qstats.backlog;
...@@ -265,7 +268,7 @@ static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx) ...@@ -265,7 +268,7 @@ static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
if (flow->head) { if (flow->head) {
skb = dequeue_head(flow); skb = dequeue_head(flow);
q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb); q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
q->memory_usage -= skb->truesize; q->memory_usage -= get_codel_cb(skb)->mem_usage;
sch->q.qlen--; sch->q.qlen--;
sch->qstats.backlog -= qdisc_pkt_len(skb); sch->qstats.backlog -= qdisc_pkt_len(skb);
} }
...@@ -276,7 +279,8 @@ static void drop_func(struct sk_buff *skb, void *ctx) ...@@ -276,7 +279,8 @@ static void drop_func(struct sk_buff *skb, void *ctx)
{ {
struct Qdisc *sch = ctx; struct Qdisc *sch = ctx;
qdisc_drop(skb, sch); kfree_skb(skb);
qdisc_qstats_drop(sch);
} }
static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch) static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
......
...@@ -77,6 +77,34 @@ static void try_bulk_dequeue_skb(struct Qdisc *q, ...@@ -77,6 +77,34 @@ static void try_bulk_dequeue_skb(struct Qdisc *q,
skb->next = NULL; skb->next = NULL;
} }
/* This variant of try_bulk_dequeue_skb() makes sure
* all skbs in the chain are for the same txq
*/
static void try_bulk_dequeue_skb_slow(struct Qdisc *q,
struct sk_buff *skb,
int *packets)
{
int mapping = skb_get_queue_mapping(skb);
struct sk_buff *nskb;
int cnt = 0;
do {
nskb = q->dequeue(q);
if (!nskb)
break;
if (unlikely(skb_get_queue_mapping(nskb) != mapping)) {
q->skb_bad_txq = nskb;
qdisc_qstats_backlog_inc(q, nskb);
q->q.qlen++;
break;
}
skb->next = nskb;
skb = nskb;
} while (++cnt < 8);
(*packets) += cnt;
skb->next = NULL;
}
/* Note that dequeue_skb can possibly return a SKB list (via skb->next). /* Note that dequeue_skb can possibly return a SKB list (via skb->next).
* A requeued skb (via q->gso_skb) can also be a SKB list. * A requeued skb (via q->gso_skb) can also be a SKB list.
*/ */
...@@ -87,8 +115,9 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate, ...@@ -87,8 +115,9 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
const struct netdev_queue *txq = q->dev_queue; const struct netdev_queue *txq = q->dev_queue;
*packets = 1; *packets = 1;
*validate = true;
if (unlikely(skb)) { if (unlikely(skb)) {
/* skb in gso_skb were already validated */
*validate = false;
/* check the reason of requeuing without tx lock first */ /* check the reason of requeuing without tx lock first */
txq = skb_get_tx_queue(txq->dev, skb); txq = skb_get_tx_queue(txq->dev, skb);
if (!netif_xmit_frozen_or_stopped(txq)) { if (!netif_xmit_frozen_or_stopped(txq)) {
...@@ -97,15 +126,30 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate, ...@@ -97,15 +126,30 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
q->q.qlen--; q->q.qlen--;
} else } else
skb = NULL; skb = NULL;
/* skb in gso_skb were already validated */ return skb;
*validate = false; }
} else { *validate = true;
if (!(q->flags & TCQ_F_ONETXQUEUE) || skb = q->skb_bad_txq;
!netif_xmit_frozen_or_stopped(txq)) { if (unlikely(skb)) {
skb = q->dequeue(q); /* check the reason of requeuing without tx lock first */
if (skb && qdisc_may_bulk(q)) txq = skb_get_tx_queue(txq->dev, skb);
try_bulk_dequeue_skb(q, skb, txq, packets); if (!netif_xmit_frozen_or_stopped(txq)) {
q->skb_bad_txq = NULL;
qdisc_qstats_backlog_dec(q, skb);
q->q.qlen--;
goto bulk;
} }
return NULL;
}
if (!(q->flags & TCQ_F_ONETXQUEUE) ||
!netif_xmit_frozen_or_stopped(txq))
skb = q->dequeue(q);
if (skb) {
bulk:
if (qdisc_may_bulk(q))
try_bulk_dequeue_skb(q, skb, txq, packets);
else
try_bulk_dequeue_skb_slow(q, skb, packets);
} }
return skb; return skb;
} }
...@@ -348,9 +392,10 @@ EXPORT_SYMBOL(netif_carrier_off); ...@@ -348,9 +392,10 @@ EXPORT_SYMBOL(netif_carrier_off);
cheaper. cheaper.
*/ */
static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc) static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
struct sk_buff **to_free)
{ {
kfree_skb(skb); __qdisc_drop(skb, to_free);
return NET_XMIT_CN; return NET_XMIT_CN;
} }
...@@ -439,7 +484,8 @@ static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv, ...@@ -439,7 +484,8 @@ static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv,
return priv->q + band; return priv->q + band;
} }
static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc) static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
struct sk_buff **to_free)
{ {
if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) { if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) {
int band = prio2band[skb->priority & TC_PRIO_MAX]; int band = prio2band[skb->priority & TC_PRIO_MAX];
...@@ -451,7 +497,7 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc) ...@@ -451,7 +497,7 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc)
return __qdisc_enqueue_tail(skb, qdisc, list); return __qdisc_enqueue_tail(skb, qdisc, list);
} }
return qdisc_drop(skb, qdisc); return qdisc_drop(skb, qdisc, to_free);
} }
static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc) static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
...@@ -622,11 +668,14 @@ void qdisc_reset(struct Qdisc *qdisc) ...@@ -622,11 +668,14 @@ void qdisc_reset(struct Qdisc *qdisc)
if (ops->reset) if (ops->reset)
ops->reset(qdisc); ops->reset(qdisc);
kfree_skb(qdisc->skb_bad_txq);
qdisc->skb_bad_txq = NULL;
if (qdisc->gso_skb) { if (qdisc->gso_skb) {
kfree_skb_list(qdisc->gso_skb); kfree_skb_list(qdisc->gso_skb);
qdisc->gso_skb = NULL; qdisc->gso_skb = NULL;
qdisc->q.qlen = 0;
} }
qdisc->q.qlen = 0;
} }
EXPORT_SYMBOL(qdisc_reset); EXPORT_SYMBOL(qdisc_reset);
...@@ -665,6 +714,7 @@ void qdisc_destroy(struct Qdisc *qdisc) ...@@ -665,6 +714,7 @@ void qdisc_destroy(struct Qdisc *qdisc)
dev_put(qdisc_dev(qdisc)); dev_put(qdisc_dev(qdisc));
kfree_skb_list(qdisc->gso_skb); kfree_skb_list(qdisc->gso_skb);
kfree_skb(qdisc->skb_bad_txq);
/* /*
* gen_estimator est_timer() might access qdisc->q.lock, * gen_estimator est_timer() might access qdisc->q.lock,
* wait a RCU grace period before freeing qdisc. * wait a RCU grace period before freeing qdisc.
......
...@@ -149,7 +149,8 @@ static inline int gred_use_harddrop(struct gred_sched *t) ...@@ -149,7 +149,8 @@ static inline int gred_use_harddrop(struct gred_sched *t)
return t->red_flags & TC_RED_HARDDROP; return t->red_flags & TC_RED_HARDDROP;
} }
static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
struct gred_sched_data *q = NULL; struct gred_sched_data *q = NULL;
struct gred_sched *t = qdisc_priv(sch); struct gred_sched *t = qdisc_priv(sch);
...@@ -237,10 +238,10 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -237,10 +238,10 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch)
q->stats.pdrop++; q->stats.pdrop++;
drop: drop:
return qdisc_drop(skb, sch); return qdisc_drop(skb, sch, to_free);
congestion_drop: congestion_drop:
qdisc_drop(skb, sch); qdisc_drop(skb, sch, to_free);
return NET_XMIT_CN; return NET_XMIT_CN;
} }
......
...@@ -1572,7 +1572,7 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb) ...@@ -1572,7 +1572,7 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
} }
static int static int
hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
{ {
struct hfsc_class *cl; struct hfsc_class *cl;
int uninitialized_var(err); int uninitialized_var(err);
...@@ -1581,11 +1581,11 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -1581,11 +1581,11 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (cl == NULL) { if (cl == NULL) {
if (err & __NET_XMIT_BYPASS) if (err & __NET_XMIT_BYPASS)
qdisc_qstats_drop(sch); qdisc_qstats_drop(sch);
kfree_skb(skb); __qdisc_drop(skb, to_free);
return err; return err;
} }
err = qdisc_enqueue(skb, cl->qdisc); err = qdisc_enqueue(skb, cl->qdisc, to_free);
if (unlikely(err != NET_XMIT_SUCCESS)) { if (unlikely(err != NET_XMIT_SUCCESS)) {
if (net_xmit_drop_count(err)) { if (net_xmit_drop_count(err)) {
cl->qstats.drops++; cl->qstats.drops++;
......
...@@ -345,7 +345,7 @@ static void bucket_add(struct wdrr_bucket *bucket, struct sk_buff *skb) ...@@ -345,7 +345,7 @@ static void bucket_add(struct wdrr_bucket *bucket, struct sk_buff *skb)
skb->next = NULL; skb->next = NULL;
} }
static unsigned int hhf_drop(struct Qdisc *sch) static unsigned int hhf_drop(struct Qdisc *sch, struct sk_buff **to_free)
{ {
struct hhf_sched_data *q = qdisc_priv(sch); struct hhf_sched_data *q = qdisc_priv(sch);
struct wdrr_bucket *bucket; struct wdrr_bucket *bucket;
...@@ -359,16 +359,16 @@ static unsigned int hhf_drop(struct Qdisc *sch) ...@@ -359,16 +359,16 @@ static unsigned int hhf_drop(struct Qdisc *sch)
struct sk_buff *skb = dequeue_head(bucket); struct sk_buff *skb = dequeue_head(bucket);
sch->q.qlen--; sch->q.qlen--;
qdisc_qstats_drop(sch);
qdisc_qstats_backlog_dec(sch, skb); qdisc_qstats_backlog_dec(sch, skb);
kfree_skb(skb); qdisc_drop(skb, sch, to_free);
} }
/* Return id of the bucket from which the packet was dropped. */ /* Return id of the bucket from which the packet was dropped. */
return bucket - q->buckets; return bucket - q->buckets;
} }
static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
struct hhf_sched_data *q = qdisc_priv(sch); struct hhf_sched_data *q = qdisc_priv(sch);
enum wdrr_bucket_idx idx; enum wdrr_bucket_idx idx;
...@@ -406,7 +406,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -406,7 +406,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
/* Return Congestion Notification only if we dropped a packet from this /* Return Congestion Notification only if we dropped a packet from this
* bucket. * bucket.
*/ */
if (hhf_drop(sch) == idx) if (hhf_drop(sch, to_free) == idx)
return NET_XMIT_CN; return NET_XMIT_CN;
/* As we dropped a packet, better let upper stack know this. */ /* As we dropped a packet, better let upper stack know this. */
......
...@@ -117,7 +117,6 @@ struct htb_class { ...@@ -117,7 +117,6 @@ struct htb_class {
* Written often fields * Written often fields
*/ */
struct gnet_stats_basic_packed bstats; struct gnet_stats_basic_packed bstats;
struct gnet_stats_queue qstats;
struct tc_htb_xstats xstats; /* our special stats */ struct tc_htb_xstats xstats; /* our special stats */
/* token bucket parameters */ /* token bucket parameters */
...@@ -140,6 +139,8 @@ struct htb_class { ...@@ -140,6 +139,8 @@ struct htb_class {
enum htb_cmode cmode; /* current mode of the class */ enum htb_cmode cmode; /* current mode of the class */
struct rb_node pq_node; /* node for event queue */ struct rb_node pq_node; /* node for event queue */
struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */ struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
unsigned int drops ____cacheline_aligned_in_smp;
}; };
struct htb_level { struct htb_level {
...@@ -569,7 +570,8 @@ static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl) ...@@ -569,7 +570,8 @@ static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
list_del_init(&cl->un.leaf.drop_list); list_del_init(&cl->un.leaf.drop_list);
} }
static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
int uninitialized_var(ret); int uninitialized_var(ret);
struct htb_sched *q = qdisc_priv(sch); struct htb_sched *q = qdisc_priv(sch);
...@@ -581,19 +583,20 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -581,19 +583,20 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
__skb_queue_tail(&q->direct_queue, skb); __skb_queue_tail(&q->direct_queue, skb);
q->direct_pkts++; q->direct_pkts++;
} else { } else {
return qdisc_drop(skb, sch); return qdisc_drop(skb, sch, to_free);
} }
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
} else if (!cl) { } else if (!cl) {
if (ret & __NET_XMIT_BYPASS) if (ret & __NET_XMIT_BYPASS)
qdisc_qstats_drop(sch); qdisc_qstats_drop(sch);
kfree_skb(skb); __qdisc_drop(skb, to_free);
return ret; return ret;
#endif #endif
} else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) { } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q,
to_free)) != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret)) { if (net_xmit_drop_count(ret)) {
qdisc_qstats_drop(sch); qdisc_qstats_drop(sch);
cl->qstats.drops++; cl->drops++;
} }
return ret; return ret;
} else { } else {
...@@ -1108,17 +1111,22 @@ static int ...@@ -1108,17 +1111,22 @@ static int
htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d) htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
{ {
struct htb_class *cl = (struct htb_class *)arg; struct htb_class *cl = (struct htb_class *)arg;
struct gnet_stats_queue qs = {
.drops = cl->drops,
};
__u32 qlen = 0; __u32 qlen = 0;
if (!cl->level && cl->un.leaf.q) if (!cl->level && cl->un.leaf.q) {
qlen = cl->un.leaf.q->q.qlen; qlen = cl->un.leaf.q->q.qlen;
qs.backlog = cl->un.leaf.q->qstats.backlog;
}
cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens); cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens);
cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens); cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens);
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
d, NULL, &cl->bstats) < 0 || d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 || gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 ||
gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0) gnet_stats_copy_queue(d, NULL, &qs, qlen) < 0)
return -1; return -1;
return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats)); return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
......
...@@ -65,7 +65,8 @@ multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) ...@@ -65,7 +65,8 @@ multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
} }
static int static int
multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch) multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
struct Qdisc *qdisc; struct Qdisc *qdisc;
int ret; int ret;
...@@ -76,12 +77,12 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -76,12 +77,12 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (ret & __NET_XMIT_BYPASS) if (ret & __NET_XMIT_BYPASS)
qdisc_qstats_drop(sch); qdisc_qstats_drop(sch);
kfree_skb(skb); __qdisc_drop(skb, to_free);
return ret; return ret;
} }
#endif #endif
ret = qdisc_enqueue(skb, qdisc); ret = qdisc_enqueue(skb, qdisc, to_free);
if (ret == NET_XMIT_SUCCESS) { if (ret == NET_XMIT_SUCCESS) {
sch->q.qlen++; sch->q.qlen++;
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
......
...@@ -397,7 +397,8 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) ...@@ -397,7 +397,8 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
* when we statistically choose to corrupt one, we instead segment it, returning * when we statistically choose to corrupt one, we instead segment it, returning
* the first packet to be corrupted, and re-enqueue the remaining frames * the first packet to be corrupted, and re-enqueue the remaining frames
*/ */
static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch) static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
struct sk_buff *segs; struct sk_buff *segs;
netdev_features_t features = netif_skb_features(skb); netdev_features_t features = netif_skb_features(skb);
...@@ -405,7 +406,7 @@ static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch) ...@@ -405,7 +406,7 @@ static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch)
segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
if (IS_ERR_OR_NULL(segs)) { if (IS_ERR_OR_NULL(segs)) {
qdisc_drop(skb, sch); qdisc_drop(skb, sch, to_free);
return NULL; return NULL;
} }
consume_skb(skb); consume_skb(skb);
...@@ -418,7 +419,8 @@ static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch) ...@@ -418,7 +419,8 @@ static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch)
* NET_XMIT_DROP: queue length didn't change. * NET_XMIT_DROP: queue length didn't change.
* NET_XMIT_SUCCESS: one skb was queued. * NET_XMIT_SUCCESS: one skb was queued.
*/ */
static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
struct netem_sched_data *q = qdisc_priv(sch); struct netem_sched_data *q = qdisc_priv(sch);
/* We don't fill cb now as skb_unshare() may invalidate it */ /* We don't fill cb now as skb_unshare() may invalidate it */
...@@ -443,7 +445,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -443,7 +445,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
} }
if (count == 0) { if (count == 0) {
qdisc_qstats_drop(sch); qdisc_qstats_drop(sch);
kfree_skb(skb); __qdisc_drop(skb, to_free);
return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
} }
...@@ -463,7 +465,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -463,7 +465,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
q->duplicate = 0; q->duplicate = 0;
rootq->enqueue(skb2, rootq); rootq->enqueue(skb2, rootq, to_free);
q->duplicate = dupsave; q->duplicate = dupsave;
} }
...@@ -475,7 +477,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -475,7 +477,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
*/ */
if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
if (skb_is_gso(skb)) { if (skb_is_gso(skb)) {
segs = netem_segment(skb, sch); segs = netem_segment(skb, sch, to_free);
if (!segs) if (!segs)
return NET_XMIT_DROP; return NET_XMIT_DROP;
} else { } else {
...@@ -488,7 +490,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -488,7 +490,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (!(skb = skb_unshare(skb, GFP_ATOMIC)) || if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
(skb->ip_summed == CHECKSUM_PARTIAL && (skb->ip_summed == CHECKSUM_PARTIAL &&
skb_checksum_help(skb))) { skb_checksum_help(skb))) {
rc = qdisc_drop(skb, sch); rc = qdisc_drop(skb, sch, to_free);
goto finish_segs; goto finish_segs;
} }
...@@ -497,7 +499,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -497,7 +499,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
} }
if (unlikely(skb_queue_len(&sch->q) >= sch->limit)) if (unlikely(skb_queue_len(&sch->q) >= sch->limit))
return qdisc_drop(skb, sch); return qdisc_drop(skb, sch, to_free);
qdisc_qstats_backlog_inc(sch, skb); qdisc_qstats_backlog_inc(sch, skb);
...@@ -557,7 +559,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -557,7 +559,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
segs->next = NULL; segs->next = NULL;
qdisc_skb_cb(segs)->pkt_len = segs->len; qdisc_skb_cb(segs)->pkt_len = segs->len;
last_len = segs->len; last_len = segs->len;
rc = qdisc_enqueue(segs, sch); rc = qdisc_enqueue(segs, sch, to_free);
if (rc != NET_XMIT_SUCCESS) { if (rc != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(rc)) if (net_xmit_drop_count(rc))
qdisc_qstats_drop(sch); qdisc_qstats_drop(sch);
...@@ -615,8 +617,11 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch) ...@@ -615,8 +617,11 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
#endif #endif
if (q->qdisc) { if (q->qdisc) {
int err = qdisc_enqueue(skb, q->qdisc); struct sk_buff *to_free = NULL;
int err;
err = qdisc_enqueue(skb, q->qdisc, &to_free);
kfree_skb_list(to_free);
if (unlikely(err != NET_XMIT_SUCCESS)) { if (unlikely(err != NET_XMIT_SUCCESS)) {
if (net_xmit_drop_count(err)) { if (net_xmit_drop_count(err)) {
qdisc_qstats_drop(sch); qdisc_qstats_drop(sch);
......
...@@ -134,7 +134,8 @@ static bool drop_early(struct Qdisc *sch, u32 packet_size) ...@@ -134,7 +134,8 @@ static bool drop_early(struct Qdisc *sch, u32 packet_size)
return false; return false;
} }
static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
struct pie_sched_data *q = qdisc_priv(sch); struct pie_sched_data *q = qdisc_priv(sch);
bool enqueue = false; bool enqueue = false;
...@@ -166,7 +167,7 @@ static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -166,7 +167,7 @@ static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
out: out:
q->stats.dropped++; q->stats.dropped++;
return qdisc_drop(skb, sch); return qdisc_drop(skb, sch, to_free);
} }
static const struct nla_policy pie_policy[TCA_PIE_MAX + 1] = { static const struct nla_policy pie_policy[TCA_PIE_MAX + 1] = {
......
...@@ -88,7 +88,8 @@ struct plug_sched_data { ...@@ -88,7 +88,8 @@ struct plug_sched_data {
u32 pkts_to_release; u32 pkts_to_release;
}; };
static int plug_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int plug_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
struct plug_sched_data *q = qdisc_priv(sch); struct plug_sched_data *q = qdisc_priv(sch);
...@@ -98,7 +99,7 @@ static int plug_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -98,7 +99,7 @@ static int plug_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return qdisc_enqueue_tail(skb, sch); return qdisc_enqueue_tail(skb, sch);
} }
return qdisc_drop(skb, sch); return qdisc_drop(skb, sch, to_free);
} }
static struct sk_buff *plug_dequeue(struct Qdisc *sch) static struct sk_buff *plug_dequeue(struct Qdisc *sch)
......
...@@ -67,7 +67,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) ...@@ -67,7 +67,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
} }
static int static int
prio_enqueue(struct sk_buff *skb, struct Qdisc *sch) prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
{ {
struct Qdisc *qdisc; struct Qdisc *qdisc;
int ret; int ret;
...@@ -83,7 +83,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -83,7 +83,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
} }
#endif #endif
ret = qdisc_enqueue(skb, qdisc); ret = qdisc_enqueue(skb, qdisc, to_free);
if (ret == NET_XMIT_SUCCESS) { if (ret == NET_XMIT_SUCCESS) {
qdisc_qstats_backlog_inc(sch, skb); qdisc_qstats_backlog_inc(sch, skb);
sch->q.qlen++; sch->q.qlen++;
......
...@@ -1217,7 +1217,8 @@ static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q) ...@@ -1217,7 +1217,8 @@ static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q)
return agg; return agg;
} }
static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
struct qfq_sched *q = qdisc_priv(sch); struct qfq_sched *q = qdisc_priv(sch);
struct qfq_class *cl; struct qfq_class *cl;
...@@ -1240,11 +1241,11 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -1240,11 +1241,11 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
qdisc_pkt_len(skb)); qdisc_pkt_len(skb));
if (err) { if (err) {
cl->qstats.drops++; cl->qstats.drops++;
return qdisc_drop(skb, sch); return qdisc_drop(skb, sch, to_free);
} }
} }
err = qdisc_enqueue(skb, cl->qdisc); err = qdisc_enqueue(skb, cl->qdisc, to_free);
if (unlikely(err != NET_XMIT_SUCCESS)) { if (unlikely(err != NET_XMIT_SUCCESS)) {
pr_debug("qfq_enqueue: enqueue failed %d\n", err); pr_debug("qfq_enqueue: enqueue failed %d\n", err);
if (net_xmit_drop_count(err)) { if (net_xmit_drop_count(err)) {
......
...@@ -56,7 +56,8 @@ static inline int red_use_harddrop(struct red_sched_data *q) ...@@ -56,7 +56,8 @@ static inline int red_use_harddrop(struct red_sched_data *q)
return q->flags & TC_RED_HARDDROP; return q->flags & TC_RED_HARDDROP;
} }
static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
struct red_sched_data *q = qdisc_priv(sch); struct red_sched_data *q = qdisc_priv(sch);
struct Qdisc *child = q->qdisc; struct Qdisc *child = q->qdisc;
...@@ -95,7 +96,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -95,7 +96,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch)
break; break;
} }
ret = qdisc_enqueue(skb, child); ret = qdisc_enqueue(skb, child, to_free);
if (likely(ret == NET_XMIT_SUCCESS)) { if (likely(ret == NET_XMIT_SUCCESS)) {
qdisc_qstats_backlog_inc(sch, skb); qdisc_qstats_backlog_inc(sch, skb);
sch->q.qlen++; sch->q.qlen++;
...@@ -106,7 +107,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -106,7 +107,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return ret; return ret;
congestion_drop: congestion_drop:
qdisc_drop(skb, sch); qdisc_drop(skb, sch, to_free);
return NET_XMIT_CN; return NET_XMIT_CN;
} }
......
...@@ -275,7 +275,8 @@ static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl, ...@@ -275,7 +275,8 @@ static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl,
return false; return false;
} }
static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
struct sfb_sched_data *q = qdisc_priv(sch); struct sfb_sched_data *q = qdisc_priv(sch);
...@@ -397,7 +398,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -397,7 +398,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
} }
enqueue: enqueue:
ret = qdisc_enqueue(skb, child); ret = qdisc_enqueue(skb, child, to_free);
if (likely(ret == NET_XMIT_SUCCESS)) { if (likely(ret == NET_XMIT_SUCCESS)) {
sch->q.qlen++; sch->q.qlen++;
increment_qlen(skb, q); increment_qlen(skb, q);
...@@ -408,7 +409,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -408,7 +409,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return ret; return ret;
drop: drop:
qdisc_drop(skb, sch); qdisc_drop(skb, sch, to_free);
return NET_XMIT_CN; return NET_XMIT_CN;
other_drop: other_drop:
if (ret & __NET_XMIT_BYPASS) if (ret & __NET_XMIT_BYPASS)
......
...@@ -343,7 +343,7 @@ static int sfq_headdrop(const struct sfq_sched_data *q) ...@@ -343,7 +343,7 @@ static int sfq_headdrop(const struct sfq_sched_data *q)
} }
static int static int
sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
{ {
struct sfq_sched_data *q = qdisc_priv(sch); struct sfq_sched_data *q = qdisc_priv(sch);
unsigned int hash, dropped; unsigned int hash, dropped;
...@@ -367,7 +367,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -367,7 +367,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (x == SFQ_EMPTY_SLOT) { if (x == SFQ_EMPTY_SLOT) {
x = q->dep[0].next; /* get a free slot */ x = q->dep[0].next; /* get a free slot */
if (x >= SFQ_MAX_FLOWS) if (x >= SFQ_MAX_FLOWS)
return qdisc_drop(skb, sch); return qdisc_drop(skb, sch, to_free);
q->ht[hash] = x; q->ht[hash] = x;
slot = &q->slots[x]; slot = &q->slots[x];
slot->hash = hash; slot->hash = hash;
...@@ -424,14 +424,14 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -424,14 +424,14 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (slot->qlen >= q->maxdepth) { if (slot->qlen >= q->maxdepth) {
congestion_drop: congestion_drop:
if (!sfq_headdrop(q)) if (!sfq_headdrop(q))
return qdisc_drop(skb, sch); return qdisc_drop(skb, sch, to_free);
/* We know we have at least one packet in queue */ /* We know we have at least one packet in queue */
head = slot_dequeue_head(slot); head = slot_dequeue_head(slot);
delta = qdisc_pkt_len(head) - qdisc_pkt_len(skb); delta = qdisc_pkt_len(head) - qdisc_pkt_len(skb);
sch->qstats.backlog -= delta; sch->qstats.backlog -= delta;
slot->backlog -= delta; slot->backlog -= delta;
qdisc_drop(head, sch); qdisc_drop(head, sch, to_free);
slot_queue_add(slot, skb); slot_queue_add(slot, skb);
return NET_XMIT_CN; return NET_XMIT_CN;
......
...@@ -155,7 +155,8 @@ static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb) ...@@ -155,7 +155,8 @@ static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
/* GSO packet is too big, segment it so that tbf can transmit /* GSO packet is too big, segment it so that tbf can transmit
* each segment in time * each segment in time
*/ */
static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch) static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
struct tbf_sched_data *q = qdisc_priv(sch); struct tbf_sched_data *q = qdisc_priv(sch);
struct sk_buff *segs, *nskb; struct sk_buff *segs, *nskb;
...@@ -166,7 +167,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch) ...@@ -166,7 +167,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
if (IS_ERR_OR_NULL(segs)) if (IS_ERR_OR_NULL(segs))
return qdisc_drop(skb, sch); return qdisc_drop(skb, sch, to_free);
nb = 0; nb = 0;
while (segs) { while (segs) {
...@@ -174,7 +175,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch) ...@@ -174,7 +175,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
segs->next = NULL; segs->next = NULL;
qdisc_skb_cb(segs)->pkt_len = segs->len; qdisc_skb_cb(segs)->pkt_len = segs->len;
len += segs->len; len += segs->len;
ret = qdisc_enqueue(segs, q->qdisc); ret = qdisc_enqueue(segs, q->qdisc, to_free);
if (ret != NET_XMIT_SUCCESS) { if (ret != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret)) if (net_xmit_drop_count(ret))
qdisc_qstats_drop(sch); qdisc_qstats_drop(sch);
...@@ -190,17 +191,18 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch) ...@@ -190,17 +191,18 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP; return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
} }
static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch) static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
struct tbf_sched_data *q = qdisc_priv(sch); struct tbf_sched_data *q = qdisc_priv(sch);
int ret; int ret;
if (qdisc_pkt_len(skb) > q->max_size) { if (qdisc_pkt_len(skb) > q->max_size) {
if (skb_is_gso(skb) && skb_gso_mac_seglen(skb) <= q->max_size) if (skb_is_gso(skb) && skb_gso_mac_seglen(skb) <= q->max_size)
return tbf_segment(skb, sch); return tbf_segment(skb, sch, to_free);
return qdisc_drop(skb, sch); return qdisc_drop(skb, sch, to_free);
} }
ret = qdisc_enqueue(skb, q->qdisc); ret = qdisc_enqueue(skb, q->qdisc, to_free);
if (ret != NET_XMIT_SUCCESS) { if (ret != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret)) if (net_xmit_drop_count(ret))
qdisc_qstats_drop(sch); qdisc_qstats_drop(sch);
......
...@@ -77,7 +77,7 @@ struct teql_sched_data { ...@@ -77,7 +77,7 @@ struct teql_sched_data {
/* "teql*" qdisc routines */ /* "teql*" qdisc routines */
static int static int
teql_enqueue(struct sk_buff *skb, struct Qdisc *sch) teql_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
{ {
struct net_device *dev = qdisc_dev(sch); struct net_device *dev = qdisc_dev(sch);
struct teql_sched_data *q = qdisc_priv(sch); struct teql_sched_data *q = qdisc_priv(sch);
...@@ -87,7 +87,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -87,7 +87,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
return qdisc_drop(skb, sch); return qdisc_drop(skb, sch, to_free);
} }
static struct sk_buff * static struct sk_buff *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment