Commit edb09eb1 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

net: sched: do not acquire qdisc spinlock in qdisc/class stats dump

Large tc dumps (tc -s {qdisc|class} sh dev ethX) done by Google BwE host
agent [1] are problematic at scale :

For each qdisc/class found in the dump, we currently lock the root qdisc
spinlock in order to get stats. Sampling stats every 5 seconds from
thousands of HTB classes is a challenge when the root qdisc spinlock is
under high pressure. Not only the dumps take time, they also slow
down the fast path (queue/dequeue packets) by 10 % to 20 % in some cases.

An audit of existing qdiscs showed that sch_fq_codel is the only qdisc
that might need the qdisc lock in fq_codel_dump_stats() and
fq_codel_dump_class_stats()

In v2 of this patch, I now use the Qdisc running seqcount to provide
consistent reads of packets/bytes counters, regardless of 32/64 bit arches.

I also changed rate estimators to use the same infrastructure
so that they no longer need to lock root qdisc lock.

[1]
http://static.googleusercontent.com/media/research.google.com/en//pubs/archive/43838.pdfSigned-off-by: default avatarEric Dumazet <edumazet@google.com>
Cc: Cong Wang <xiyou.wangcong@gmail.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Cc: Kevin Athey <kda@google.com>
Cc: Xiaotian Pei <xiaotian@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f9eb8aea
...@@ -21,7 +21,7 @@ struct mystruct { ...@@ -21,7 +21,7 @@ struct mystruct {
... ...
}; };
Update statistics: Update statistics, in dequeue() methods only, (while owning qdisc->running)
mystruct->tstats.packet++; mystruct->tstats.packet++;
mystruct->qstats.backlog += skb->pkt_len; mystruct->qstats.backlog += skb->pkt_len;
......
...@@ -33,10 +33,12 @@ int gnet_stats_start_copy_compat(struct sk_buff *skb, int type, ...@@ -33,10 +33,12 @@ int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
spinlock_t *lock, struct gnet_dump *d, spinlock_t *lock, struct gnet_dump *d,
int padattr); int padattr);
int gnet_stats_copy_basic(struct gnet_dump *d, int gnet_stats_copy_basic(const seqcount_t *running,
struct gnet_dump *d,
struct gnet_stats_basic_cpu __percpu *cpu, struct gnet_stats_basic_cpu __percpu *cpu,
struct gnet_stats_basic_packed *b); struct gnet_stats_basic_packed *b);
void __gnet_stats_copy_basic(struct gnet_stats_basic_packed *bstats, void __gnet_stats_copy_basic(const seqcount_t *running,
struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu, struct gnet_stats_basic_cpu __percpu *cpu,
struct gnet_stats_basic_packed *b); struct gnet_stats_basic_packed *b);
int gnet_stats_copy_rate_est(struct gnet_dump *d, int gnet_stats_copy_rate_est(struct gnet_dump *d,
...@@ -52,13 +54,15 @@ int gnet_stats_finish_copy(struct gnet_dump *d); ...@@ -52,13 +54,15 @@ int gnet_stats_finish_copy(struct gnet_dump *d);
int gen_new_estimator(struct gnet_stats_basic_packed *bstats, int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats, struct gnet_stats_basic_cpu __percpu *cpu_bstats,
struct gnet_stats_rate_est64 *rate_est, struct gnet_stats_rate_est64 *rate_est,
spinlock_t *stats_lock, struct nlattr *opt); spinlock_t *stats_lock,
seqcount_t *running, struct nlattr *opt);
void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_rate_est64 *rate_est); struct gnet_stats_rate_est64 *rate_est);
int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats, struct gnet_stats_basic_cpu __percpu *cpu_bstats,
struct gnet_stats_rate_est64 *rate_est, struct gnet_stats_rate_est64 *rate_est,
spinlock_t *stats_lock, struct nlattr *opt); spinlock_t *stats_lock,
seqcount_t *running, struct nlattr *opt);
bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
const struct gnet_stats_rate_est64 *rate_est); const struct gnet_stats_rate_est64 *rate_est);
#endif #endif
...@@ -314,6 +314,14 @@ static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc) ...@@ -314,6 +314,14 @@ static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
return qdisc_lock(root); return qdisc_lock(root);
} }
static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
{
struct Qdisc *root = qdisc_root_sleeping(qdisc);
ASSERT_RTNL();
return &root->running;
}
static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc) static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
{ {
return qdisc->dev_queue->dev; return qdisc->dev_queue->dev;
......
...@@ -84,6 +84,7 @@ struct gen_estimator ...@@ -84,6 +84,7 @@ struct gen_estimator
struct gnet_stats_basic_packed *bstats; struct gnet_stats_basic_packed *bstats;
struct gnet_stats_rate_est64 *rate_est; struct gnet_stats_rate_est64 *rate_est;
spinlock_t *stats_lock; spinlock_t *stats_lock;
seqcount_t *running;
int ewma_log; int ewma_log;
u32 last_packets; u32 last_packets;
unsigned long avpps; unsigned long avpps;
...@@ -121,26 +122,28 @@ static void est_timer(unsigned long arg) ...@@ -121,26 +122,28 @@ static void est_timer(unsigned long arg)
unsigned long rate; unsigned long rate;
u64 brate; u64 brate;
spin_lock(e->stats_lock); if (e->stats_lock)
spin_lock(e->stats_lock);
read_lock(&est_lock); read_lock(&est_lock);
if (e->bstats == NULL) if (e->bstats == NULL)
goto skip; goto skip;
__gnet_stats_copy_basic(&b, e->cpu_bstats, e->bstats); __gnet_stats_copy_basic(e->running, &b, e->cpu_bstats, e->bstats);
brate = (b.bytes - e->last_bytes)<<(7 - idx); brate = (b.bytes - e->last_bytes)<<(7 - idx);
e->last_bytes = b.bytes; e->last_bytes = b.bytes;
e->avbps += (brate >> e->ewma_log) - (e->avbps >> e->ewma_log); e->avbps += (brate >> e->ewma_log) - (e->avbps >> e->ewma_log);
e->rate_est->bps = (e->avbps+0xF)>>5; WRITE_ONCE(e->rate_est->bps, (e->avbps + 0xF) >> 5);
rate = b.packets - e->last_packets; rate = b.packets - e->last_packets;
rate <<= (7 - idx); rate <<= (7 - idx);
e->last_packets = b.packets; e->last_packets = b.packets;
e->avpps += (rate >> e->ewma_log) - (e->avpps >> e->ewma_log); e->avpps += (rate >> e->ewma_log) - (e->avpps >> e->ewma_log);
e->rate_est->pps = (e->avpps + 0xF) >> 5; WRITE_ONCE(e->rate_est->pps, (e->avpps + 0xF) >> 5);
skip: skip:
read_unlock(&est_lock); read_unlock(&est_lock);
spin_unlock(e->stats_lock); if (e->stats_lock)
spin_unlock(e->stats_lock);
} }
if (!list_empty(&elist[idx].list)) if (!list_empty(&elist[idx].list))
...@@ -194,6 +197,7 @@ struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats ...@@ -194,6 +197,7 @@ struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats
* @cpu_bstats: bstats per cpu * @cpu_bstats: bstats per cpu
* @rate_est: rate estimator statistics * @rate_est: rate estimator statistics
* @stats_lock: statistics lock * @stats_lock: statistics lock
* @running: qdisc running seqcount
* @opt: rate estimator configuration TLV * @opt: rate estimator configuration TLV
* *
* Creates a new rate estimator with &bstats as source and &rate_est * Creates a new rate estimator with &bstats as source and &rate_est
...@@ -209,6 +213,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, ...@@ -209,6 +213,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats, struct gnet_stats_basic_cpu __percpu *cpu_bstats,
struct gnet_stats_rate_est64 *rate_est, struct gnet_stats_rate_est64 *rate_est,
spinlock_t *stats_lock, spinlock_t *stats_lock,
seqcount_t *running,
struct nlattr *opt) struct nlattr *opt)
{ {
struct gen_estimator *est; struct gen_estimator *est;
...@@ -226,12 +231,13 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, ...@@ -226,12 +231,13 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
if (est == NULL) if (est == NULL)
return -ENOBUFS; return -ENOBUFS;
__gnet_stats_copy_basic(&b, cpu_bstats, bstats); __gnet_stats_copy_basic(running, &b, cpu_bstats, bstats);
idx = parm->interval + 2; idx = parm->interval + 2;
est->bstats = bstats; est->bstats = bstats;
est->rate_est = rate_est; est->rate_est = rate_est;
est->stats_lock = stats_lock; est->stats_lock = stats_lock;
est->running = running;
est->ewma_log = parm->ewma_log; est->ewma_log = parm->ewma_log;
est->last_bytes = b.bytes; est->last_bytes = b.bytes;
est->avbps = rate_est->bps<<5; est->avbps = rate_est->bps<<5;
...@@ -291,6 +297,7 @@ EXPORT_SYMBOL(gen_kill_estimator); ...@@ -291,6 +297,7 @@ EXPORT_SYMBOL(gen_kill_estimator);
* @cpu_bstats: bstats per cpu * @cpu_bstats: bstats per cpu
* @rate_est: rate estimator statistics * @rate_est: rate estimator statistics
* @stats_lock: statistics lock * @stats_lock: statistics lock
* @running: qdisc running seqcount (might be NULL)
* @opt: rate estimator configuration TLV * @opt: rate estimator configuration TLV
* *
* Replaces the configuration of a rate estimator by calling * Replaces the configuration of a rate estimator by calling
...@@ -301,10 +308,11 @@ EXPORT_SYMBOL(gen_kill_estimator); ...@@ -301,10 +308,11 @@ EXPORT_SYMBOL(gen_kill_estimator);
int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats, struct gnet_stats_basic_cpu __percpu *cpu_bstats,
struct gnet_stats_rate_est64 *rate_est, struct gnet_stats_rate_est64 *rate_est,
spinlock_t *stats_lock, struct nlattr *opt) spinlock_t *stats_lock,
seqcount_t *running, struct nlattr *opt)
{ {
gen_kill_estimator(bstats, rate_est); gen_kill_estimator(bstats, rate_est);
return gen_new_estimator(bstats, cpu_bstats, rate_est, stats_lock, opt); return gen_new_estimator(bstats, cpu_bstats, rate_est, stats_lock, running, opt);
} }
EXPORT_SYMBOL(gen_replace_estimator); EXPORT_SYMBOL(gen_replace_estimator);
......
...@@ -32,10 +32,11 @@ gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size, int padattr) ...@@ -32,10 +32,11 @@ gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size, int padattr)
return 0; return 0;
nla_put_failure: nla_put_failure:
if (d->lock)
spin_unlock_bh(d->lock);
kfree(d->xstats); kfree(d->xstats);
d->xstats = NULL; d->xstats = NULL;
d->xstats_len = 0; d->xstats_len = 0;
spin_unlock_bh(d->lock);
return -1; return -1;
} }
...@@ -65,15 +66,16 @@ gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type, ...@@ -65,15 +66,16 @@ gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type,
{ {
memset(d, 0, sizeof(*d)); memset(d, 0, sizeof(*d));
spin_lock_bh(lock);
d->lock = lock;
if (type) if (type)
d->tail = (struct nlattr *)skb_tail_pointer(skb); d->tail = (struct nlattr *)skb_tail_pointer(skb);
d->skb = skb; d->skb = skb;
d->compat_tc_stats = tc_stats_type; d->compat_tc_stats = tc_stats_type;
d->compat_xstats = xstats_type; d->compat_xstats = xstats_type;
d->padattr = padattr; d->padattr = padattr;
if (lock) {
d->lock = lock;
spin_lock_bh(lock);
}
if (d->tail) if (d->tail)
return gnet_stats_copy(d, type, NULL, 0, padattr); return gnet_stats_copy(d, type, NULL, 0, padattr);
...@@ -126,16 +128,23 @@ __gnet_stats_copy_basic_cpu(struct gnet_stats_basic_packed *bstats, ...@@ -126,16 +128,23 @@ __gnet_stats_copy_basic_cpu(struct gnet_stats_basic_packed *bstats,
} }
void void
__gnet_stats_copy_basic(struct gnet_stats_basic_packed *bstats, __gnet_stats_copy_basic(const seqcount_t *running,
struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu, struct gnet_stats_basic_cpu __percpu *cpu,
struct gnet_stats_basic_packed *b) struct gnet_stats_basic_packed *b)
{ {
unsigned int seq;
if (cpu) { if (cpu) {
__gnet_stats_copy_basic_cpu(bstats, cpu); __gnet_stats_copy_basic_cpu(bstats, cpu);
} else { return;
}
do {
if (running)
seq = read_seqcount_begin(running);
bstats->bytes = b->bytes; bstats->bytes = b->bytes;
bstats->packets = b->packets; bstats->packets = b->packets;
} } while (running && read_seqcount_retry(running, seq));
} }
EXPORT_SYMBOL(__gnet_stats_copy_basic); EXPORT_SYMBOL(__gnet_stats_copy_basic);
...@@ -152,13 +161,14 @@ EXPORT_SYMBOL(__gnet_stats_copy_basic); ...@@ -152,13 +161,14 @@ EXPORT_SYMBOL(__gnet_stats_copy_basic);
* if the room in the socket buffer was not sufficient. * if the room in the socket buffer was not sufficient.
*/ */
int int
gnet_stats_copy_basic(struct gnet_dump *d, gnet_stats_copy_basic(const seqcount_t *running,
struct gnet_dump *d,
struct gnet_stats_basic_cpu __percpu *cpu, struct gnet_stats_basic_cpu __percpu *cpu,
struct gnet_stats_basic_packed *b) struct gnet_stats_basic_packed *b)
{ {
struct gnet_stats_basic_packed bstats = {0}; struct gnet_stats_basic_packed bstats = {0};
__gnet_stats_copy_basic(&bstats, cpu, b); __gnet_stats_copy_basic(running, &bstats, cpu, b);
if (d->compat_tc_stats) { if (d->compat_tc_stats) {
d->tc_stats.bytes = bstats.bytes; d->tc_stats.bytes = bstats.bytes;
...@@ -328,8 +338,9 @@ gnet_stats_copy_app(struct gnet_dump *d, void *st, int len) ...@@ -328,8 +338,9 @@ gnet_stats_copy_app(struct gnet_dump *d, void *st, int len)
return 0; return 0;
err_out: err_out:
if (d->lock)
spin_unlock_bh(d->lock);
d->xstats_len = 0; d->xstats_len = 0;
spin_unlock_bh(d->lock);
return -1; return -1;
} }
EXPORT_SYMBOL(gnet_stats_copy_app); EXPORT_SYMBOL(gnet_stats_copy_app);
...@@ -363,10 +374,11 @@ gnet_stats_finish_copy(struct gnet_dump *d) ...@@ -363,10 +374,11 @@ gnet_stats_finish_copy(struct gnet_dump *d)
return -1; return -1;
} }
if (d->lock)
spin_unlock_bh(d->lock);
kfree(d->xstats); kfree(d->xstats);
d->xstats = NULL; d->xstats = NULL;
d->xstats_len = 0; d->xstats_len = 0;
spin_unlock_bh(d->lock);
return 0; return 0;
} }
EXPORT_SYMBOL(gnet_stats_finish_copy); EXPORT_SYMBOL(gnet_stats_finish_copy);
...@@ -137,7 +137,7 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par) ...@@ -137,7 +137,7 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
cfg.est.ewma_log = info->ewma_log; cfg.est.ewma_log = info->ewma_log;
ret = gen_new_estimator(&est->bstats, NULL, &est->rstats, ret = gen_new_estimator(&est->bstats, NULL, &est->rstats,
&est->lock, &cfg.opt); &est->lock, NULL, &cfg.opt);
if (ret < 0) if (ret < 0)
goto err2; goto err2;
......
...@@ -287,7 +287,7 @@ int tcf_hash_create(struct tc_action_net *tn, u32 index, struct nlattr *est, ...@@ -287,7 +287,7 @@ int tcf_hash_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
if (est) { if (est) {
err = gen_new_estimator(&p->tcfc_bstats, p->cpu_bstats, err = gen_new_estimator(&p->tcfc_bstats, p->cpu_bstats,
&p->tcfc_rate_est, &p->tcfc_rate_est,
&p->tcfc_lock, est); &p->tcfc_lock, NULL, est);
if (err) { if (err) {
free_percpu(p->cpu_qstats); free_percpu(p->cpu_qstats);
goto err2; goto err2;
...@@ -671,7 +671,7 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a, ...@@ -671,7 +671,7 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a,
if (err < 0) if (err < 0)
goto errout; goto errout;
if (gnet_stats_copy_basic(&d, p->cpu_bstats, &p->tcfc_bstats) < 0 || if (gnet_stats_copy_basic(NULL, &d, p->cpu_bstats, &p->tcfc_bstats) < 0 ||
gnet_stats_copy_rate_est(&d, &p->tcfc_bstats, gnet_stats_copy_rate_est(&d, &p->tcfc_bstats,
&p->tcfc_rate_est) < 0 || &p->tcfc_rate_est) < 0 ||
gnet_stats_copy_queue(&d, p->cpu_qstats, gnet_stats_copy_queue(&d, p->cpu_qstats,
......
...@@ -185,7 +185,8 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla, ...@@ -185,7 +185,8 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla,
if (est) { if (est) {
err = gen_replace_estimator(&police->tcf_bstats, NULL, err = gen_replace_estimator(&police->tcf_bstats, NULL,
&police->tcf_rate_est, &police->tcf_rate_est,
&police->tcf_lock, est); &police->tcf_lock,
NULL, est);
if (err) if (err)
goto failure_unlock; goto failure_unlock;
} else if (tb[TCA_POLICE_AVRATE] && } else if (tb[TCA_POLICE_AVRATE] &&
......
...@@ -982,7 +982,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, ...@@ -982,7 +982,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
rcu_assign_pointer(sch->stab, stab); rcu_assign_pointer(sch->stab, stab);
} }
if (tca[TCA_RATE]) { if (tca[TCA_RATE]) {
spinlock_t *root_lock; seqcount_t *running;
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
if (sch->flags & TCQ_F_MQROOT) if (sch->flags & TCQ_F_MQROOT)
...@@ -991,14 +991,15 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, ...@@ -991,14 +991,15 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
if ((sch->parent != TC_H_ROOT) && if ((sch->parent != TC_H_ROOT) &&
!(sch->flags & TCQ_F_INGRESS) && !(sch->flags & TCQ_F_INGRESS) &&
(!p || !(p->flags & TCQ_F_MQROOT))) (!p || !(p->flags & TCQ_F_MQROOT)))
root_lock = qdisc_root_sleeping_lock(sch); running = qdisc_root_sleeping_running(sch);
else else
root_lock = qdisc_lock(sch); running = &sch->running;
err = gen_new_estimator(&sch->bstats, err = gen_new_estimator(&sch->bstats,
sch->cpu_bstats, sch->cpu_bstats,
&sch->rate_est, &sch->rate_est,
root_lock, NULL,
running,
tca[TCA_RATE]); tca[TCA_RATE]);
if (err) if (err)
goto err_out4; goto err_out4;
...@@ -1061,7 +1062,8 @@ static int qdisc_change(struct Qdisc *sch, struct nlattr **tca) ...@@ -1061,7 +1062,8 @@ static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
gen_replace_estimator(&sch->bstats, gen_replace_estimator(&sch->bstats,
sch->cpu_bstats, sch->cpu_bstats,
&sch->rate_est, &sch->rate_est,
qdisc_root_sleeping_lock(sch), NULL,
qdisc_root_sleeping_running(sch),
tca[TCA_RATE]); tca[TCA_RATE]);
} }
out: out:
...@@ -1369,8 +1371,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, ...@@ -1369,8 +1371,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
goto nla_put_failure; goto nla_put_failure;
if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS, if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
qdisc_root_sleeping_lock(q), &d, NULL, &d, TCA_PAD) < 0)
TCA_PAD) < 0)
goto nla_put_failure; goto nla_put_failure;
if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0) if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
...@@ -1381,7 +1382,8 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, ...@@ -1381,7 +1382,8 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
cpu_qstats = q->cpu_qstats; cpu_qstats = q->cpu_qstats;
} }
if (gnet_stats_copy_basic(&d, cpu_bstats, &q->bstats) < 0 || if (gnet_stats_copy_basic(qdisc_root_sleeping_running(q),
&d, cpu_bstats, &q->bstats) < 0 ||
gnet_stats_copy_rate_est(&d, &q->bstats, &q->rate_est) < 0 || gnet_stats_copy_rate_est(&d, &q->bstats, &q->rate_est) < 0 ||
gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0) gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
goto nla_put_failure; goto nla_put_failure;
...@@ -1684,8 +1686,7 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q, ...@@ -1684,8 +1686,7 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
goto nla_put_failure; goto nla_put_failure;
if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS, if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
qdisc_root_sleeping_lock(q), &d, NULL, &d, TCA_PAD) < 0)
TCA_PAD) < 0)
goto nla_put_failure; goto nla_put_failure;
if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0) if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
......
...@@ -637,7 +637,8 @@ atm_tc_dump_class_stats(struct Qdisc *sch, unsigned long arg, ...@@ -637,7 +637,8 @@ atm_tc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
{ {
struct atm_flow_data *flow = (struct atm_flow_data *)arg; struct atm_flow_data *flow = (struct atm_flow_data *)arg;
if (gnet_stats_copy_basic(d, NULL, &flow->bstats) < 0 || if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
d, NULL, &flow->bstats) < 0 ||
gnet_stats_copy_queue(d, NULL, &flow->qstats, flow->q->q.qlen) < 0) gnet_stats_copy_queue(d, NULL, &flow->qstats, flow->q->q.qlen) < 0)
return -1; return -1;
......
...@@ -1600,7 +1600,8 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg, ...@@ -1600,7 +1600,8 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
if (cl->undertime != PSCHED_PASTPERFECT) if (cl->undertime != PSCHED_PASTPERFECT)
cl->xstats.undertime = cl->undertime - q->now; cl->xstats.undertime = cl->undertime - q->now;
if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 || if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->q->q.qlen) < 0) gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->q->q.qlen) < 0)
return -1; return -1;
...@@ -1755,7 +1756,8 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t ...@@ -1755,7 +1756,8 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
if (tca[TCA_RATE]) { if (tca[TCA_RATE]) {
err = gen_replace_estimator(&cl->bstats, NULL, err = gen_replace_estimator(&cl->bstats, NULL,
&cl->rate_est, &cl->rate_est,
qdisc_root_sleeping_lock(sch), NULL,
qdisc_root_sleeping_running(sch),
tca[TCA_RATE]); tca[TCA_RATE]);
if (err) { if (err) {
qdisc_put_rtab(rtab); qdisc_put_rtab(rtab);
...@@ -1848,7 +1850,8 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t ...@@ -1848,7 +1850,8 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
if (tca[TCA_RATE]) { if (tca[TCA_RATE]) {
err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est, err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
qdisc_root_sleeping_lock(sch), NULL,
qdisc_root_sleeping_running(sch),
tca[TCA_RATE]); tca[TCA_RATE]);
if (err) { if (err) {
kfree(cl); kfree(cl);
......
...@@ -91,7 +91,8 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid, ...@@ -91,7 +91,8 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (tca[TCA_RATE]) { if (tca[TCA_RATE]) {
err = gen_replace_estimator(&cl->bstats, NULL, err = gen_replace_estimator(&cl->bstats, NULL,
&cl->rate_est, &cl->rate_est,
qdisc_root_sleeping_lock(sch), NULL,
qdisc_root_sleeping_running(sch),
tca[TCA_RATE]); tca[TCA_RATE]);
if (err) if (err)
return err; return err;
...@@ -119,7 +120,8 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid, ...@@ -119,7 +120,8 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (tca[TCA_RATE]) { if (tca[TCA_RATE]) {
err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est, err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est,
qdisc_root_sleeping_lock(sch), NULL,
qdisc_root_sleeping_running(sch),
tca[TCA_RATE]); tca[TCA_RATE]);
if (err) { if (err) {
qdisc_destroy(cl->qdisc); qdisc_destroy(cl->qdisc);
...@@ -279,7 +281,8 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg, ...@@ -279,7 +281,8 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
if (qlen) if (qlen)
xstats.deficit = cl->deficit; xstats.deficit = cl->deficit;
if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 || if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
gnet_stats_copy_queue(d, NULL, &cl->qdisc->qstats, qlen) < 0) gnet_stats_copy_queue(d, NULL, &cl->qdisc->qstats, qlen) < 0)
return -1; return -1;
......
...@@ -566,11 +566,13 @@ static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d) ...@@ -566,11 +566,13 @@ static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
st.qdisc_stats.memory_usage = q->memory_usage; st.qdisc_stats.memory_usage = q->memory_usage;
st.qdisc_stats.drop_overmemory = q->drop_overmemory; st.qdisc_stats.drop_overmemory = q->drop_overmemory;
sch_tree_lock(sch);
list_for_each(pos, &q->new_flows) list_for_each(pos, &q->new_flows)
st.qdisc_stats.new_flows_len++; st.qdisc_stats.new_flows_len++;
list_for_each(pos, &q->old_flows) list_for_each(pos, &q->old_flows)
st.qdisc_stats.old_flows_len++; st.qdisc_stats.old_flows_len++;
sch_tree_unlock(sch);
return gnet_stats_copy_app(d, &st, sizeof(st)); return gnet_stats_copy_app(d, &st, sizeof(st));
} }
...@@ -624,7 +626,7 @@ static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl, ...@@ -624,7 +626,7 @@ static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
if (idx < q->flows_cnt) { if (idx < q->flows_cnt) {
const struct fq_codel_flow *flow = &q->flows[idx]; const struct fq_codel_flow *flow = &q->flows[idx];
const struct sk_buff *skb = flow->head; const struct sk_buff *skb;
memset(&xstats, 0, sizeof(xstats)); memset(&xstats, 0, sizeof(xstats));
xstats.type = TCA_FQ_CODEL_XSTATS_CLASS; xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
...@@ -642,9 +644,14 @@ static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl, ...@@ -642,9 +644,14 @@ static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
codel_time_to_us(delta) : codel_time_to_us(delta) :
-codel_time_to_us(-delta); -codel_time_to_us(-delta);
} }
while (skb) { if (flow->head) {
qs.qlen++; sch_tree_lock(sch);
skb = skb->next; skb = flow->head;
while (skb) {
qs.qlen++;
skb = skb->next;
}
sch_tree_unlock(sch);
} }
qs.backlog = q->backlogs[idx]; qs.backlog = q->backlogs[idx];
qs.drops = flow->dropped; qs.drops = flow->dropped;
......
...@@ -1015,11 +1015,10 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, ...@@ -1015,11 +1015,10 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
cur_time = psched_get_time(); cur_time = psched_get_time();
if (tca[TCA_RATE]) { if (tca[TCA_RATE]) {
spinlock_t *lock = qdisc_root_sleeping_lock(sch);
err = gen_replace_estimator(&cl->bstats, NULL, err = gen_replace_estimator(&cl->bstats, NULL,
&cl->rate_est, &cl->rate_est,
lock, NULL,
qdisc_root_sleeping_running(sch),
tca[TCA_RATE]); tca[TCA_RATE]);
if (err) if (err)
return err; return err;
...@@ -1068,7 +1067,8 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, ...@@ -1068,7 +1067,8 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (tca[TCA_RATE]) { if (tca[TCA_RATE]) {
err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est, err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
qdisc_root_sleeping_lock(sch), NULL,
qdisc_root_sleeping_running(sch),
tca[TCA_RATE]); tca[TCA_RATE]);
if (err) { if (err) {
kfree(cl); kfree(cl);
...@@ -1373,7 +1373,7 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg, ...@@ -1373,7 +1373,7 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
xstats.work = cl->cl_total; xstats.work = cl->cl_total;
xstats.rtwork = cl->cl_cumul; xstats.rtwork = cl->cl_cumul;
if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 || if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->qdisc->q.qlen) < 0) gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->qdisc->q.qlen) < 0)
return -1; return -1;
......
...@@ -1141,7 +1141,8 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d) ...@@ -1141,7 +1141,8 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens); cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens);
cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens); cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens);
if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 || if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 || gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 ||
gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0) gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
return -1; return -1;
...@@ -1395,7 +1396,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, ...@@ -1395,7 +1396,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
if (htb_rate_est || tca[TCA_RATE]) { if (htb_rate_est || tca[TCA_RATE]) {
err = gen_new_estimator(&cl->bstats, NULL, err = gen_new_estimator(&cl->bstats, NULL,
&cl->rate_est, &cl->rate_est,
qdisc_root_sleeping_lock(sch), NULL,
qdisc_root_sleeping_running(sch),
tca[TCA_RATE] ? : &est.nla); tca[TCA_RATE] ? : &est.nla);
if (err) { if (err) {
kfree(cl); kfree(cl);
...@@ -1457,11 +1459,10 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, ...@@ -1457,11 +1459,10 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
parent->children++; parent->children++;
} else { } else {
if (tca[TCA_RATE]) { if (tca[TCA_RATE]) {
spinlock_t *lock = qdisc_root_sleeping_lock(sch);
err = gen_replace_estimator(&cl->bstats, NULL, err = gen_replace_estimator(&cl->bstats, NULL,
&cl->rate_est, &cl->rate_est,
lock, NULL,
qdisc_root_sleeping_running(sch),
tca[TCA_RATE]); tca[TCA_RATE]);
if (err) if (err)
return err; return err;
......
...@@ -199,7 +199,7 @@ static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl, ...@@ -199,7 +199,7 @@ static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
struct netdev_queue *dev_queue = mq_queue_get(sch, cl); struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
sch = dev_queue->qdisc_sleeping; sch = dev_queue->qdisc_sleeping;
if (gnet_stats_copy_basic(d, NULL, &sch->bstats) < 0 || if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0) gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0)
return -1; return -1;
return 0; return 0;
......
...@@ -342,7 +342,8 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, ...@@ -342,7 +342,8 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
* hold here is the look on dev_queue->qdisc_sleeping * hold here is the look on dev_queue->qdisc_sleeping
* also acquired below. * also acquired below.
*/ */
spin_unlock_bh(d->lock); if (d->lock)
spin_unlock_bh(d->lock);
for (i = tc.offset; i < tc.offset + tc.count; i++) { for (i = tc.offset; i < tc.offset + tc.count; i++) {
struct netdev_queue *q = netdev_get_tx_queue(dev, i); struct netdev_queue *q = netdev_get_tx_queue(dev, i);
...@@ -359,15 +360,17 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, ...@@ -359,15 +360,17 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
spin_unlock_bh(qdisc_lock(qdisc)); spin_unlock_bh(qdisc_lock(qdisc));
} }
/* Reclaim root sleeping lock before completing stats */ /* Reclaim root sleeping lock before completing stats */
spin_lock_bh(d->lock); if (d->lock)
if (gnet_stats_copy_basic(d, NULL, &bstats) < 0 || spin_lock_bh(d->lock);
if (gnet_stats_copy_basic(NULL, d, NULL, &bstats) < 0 ||
gnet_stats_copy_queue(d, NULL, &qstats, qlen) < 0) gnet_stats_copy_queue(d, NULL, &qstats, qlen) < 0)
return -1; return -1;
} else { } else {
struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
sch = dev_queue->qdisc_sleeping; sch = dev_queue->qdisc_sleeping;
if (gnet_stats_copy_basic(d, NULL, &sch->bstats) < 0 || if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
d, NULL, &sch->bstats) < 0 ||
gnet_stats_copy_queue(d, NULL, gnet_stats_copy_queue(d, NULL,
&sch->qstats, sch->q.qlen) < 0) &sch->qstats, sch->q.qlen) < 0)
return -1; return -1;
......
...@@ -356,7 +356,8 @@ static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl, ...@@ -356,7 +356,8 @@ static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
struct Qdisc *cl_q; struct Qdisc *cl_q;
cl_q = q->queues[cl - 1]; cl_q = q->queues[cl - 1];
if (gnet_stats_copy_basic(d, NULL, &cl_q->bstats) < 0 || if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
d, NULL, &cl_q->bstats) < 0 ||
gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0) gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0)
return -1; return -1;
......
...@@ -319,7 +319,8 @@ static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl, ...@@ -319,7 +319,8 @@ static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
struct Qdisc *cl_q; struct Qdisc *cl_q;
cl_q = q->queues[cl - 1]; cl_q = q->queues[cl - 1];
if (gnet_stats_copy_basic(d, NULL, &cl_q->bstats) < 0 || if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
d, NULL, &cl_q->bstats) < 0 ||
gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0) gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0)
return -1; return -1;
......
...@@ -460,7 +460,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, ...@@ -460,7 +460,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (tca[TCA_RATE]) { if (tca[TCA_RATE]) {
err = gen_replace_estimator(&cl->bstats, NULL, err = gen_replace_estimator(&cl->bstats, NULL,
&cl->rate_est, &cl->rate_est,
qdisc_root_sleeping_lock(sch), NULL,
qdisc_root_sleeping_running(sch),
tca[TCA_RATE]); tca[TCA_RATE]);
if (err) if (err)
return err; return err;
...@@ -486,7 +487,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, ...@@ -486,7 +487,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (tca[TCA_RATE]) { if (tca[TCA_RATE]) {
err = gen_new_estimator(&cl->bstats, NULL, err = gen_new_estimator(&cl->bstats, NULL,
&cl->rate_est, &cl->rate_est,
qdisc_root_sleeping_lock(sch), NULL,
qdisc_root_sleeping_running(sch),
tca[TCA_RATE]); tca[TCA_RATE]);
if (err) if (err)
goto destroy_class; goto destroy_class;
...@@ -663,7 +665,8 @@ static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg, ...@@ -663,7 +665,8 @@ static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
xstats.weight = cl->agg->class_weight; xstats.weight = cl->agg->class_weight;
xstats.lmax = cl->agg->lmax; xstats.lmax = cl->agg->lmax;
if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 || if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
gnet_stats_copy_queue(d, NULL, gnet_stats_copy_queue(d, NULL,
&cl->qdisc->qstats, cl->qdisc->q.qlen) < 0) &cl->qdisc->qstats, cl->qdisc->q.qlen) < 0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment