Commit f56940da authored by Ahmed S. Darwish's avatar Ahmed S. Darwish Committed by David S. Miller

net: sched: Use _bstats_update/set() instead of raw writes

The Qdisc::running sequence counter, used to protect Qdisc::bstats reads
from parallel writes, is in the process of being removed. Qdisc::bstats
read/writes will synchronize using an internal u64_stats sync point
instead.

Modify all bstats writes to use _bstats_update(). This ensures that
the internal u64_stats sync point is always acquired and released as
appropriate.
Signed-off-by: default avatarAhmed S. Darwish <a.darwish@linutronix.de>
Signed-off-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 67c9e627
...@@ -126,6 +126,7 @@ EXPORT_SYMBOL(gnet_stats_basic_packed_init); ...@@ -126,6 +126,7 @@ EXPORT_SYMBOL(gnet_stats_basic_packed_init);
static void gnet_stats_add_basic_cpu(struct gnet_stats_basic_packed *bstats, static void gnet_stats_add_basic_cpu(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu) struct gnet_stats_basic_cpu __percpu *cpu)
{ {
u64 t_bytes = 0, t_packets = 0;
int i; int i;
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
...@@ -139,9 +140,10 @@ static void gnet_stats_add_basic_cpu(struct gnet_stats_basic_packed *bstats, ...@@ -139,9 +140,10 @@ static void gnet_stats_add_basic_cpu(struct gnet_stats_basic_packed *bstats,
packets = bcpu->bstats.packets; packets = bcpu->bstats.packets;
} while (u64_stats_fetch_retry_irq(&bcpu->syncp, start)); } while (u64_stats_fetch_retry_irq(&bcpu->syncp, start));
bstats->bytes += bytes; t_bytes += bytes;
bstats->packets += packets; t_packets += packets;
} }
_bstats_update(bstats, t_bytes, t_packets);
} }
void gnet_stats_add_basic(const seqcount_t *running, void gnet_stats_add_basic(const seqcount_t *running,
...@@ -164,8 +166,7 @@ void gnet_stats_add_basic(const seqcount_t *running, ...@@ -164,8 +166,7 @@ void gnet_stats_add_basic(const seqcount_t *running,
packets = b->packets; packets = b->packets;
} while (running && read_seqcount_retry(running, seq)); } while (running && read_seqcount_retry(running, seq));
bstats->bytes += bytes; _bstats_update(bstats, bytes, packets);
bstats->packets += packets;
} }
EXPORT_SYMBOL(gnet_stats_add_basic); EXPORT_SYMBOL(gnet_stats_add_basic);
......
...@@ -565,8 +565,7 @@ cbq_update(struct cbq_sched_data *q) ...@@ -565,8 +565,7 @@ cbq_update(struct cbq_sched_data *q)
long avgidle = cl->avgidle; long avgidle = cl->avgidle;
long idle; long idle;
cl->bstats.packets++; _bstats_update(&cl->bstats, len, 1);
cl->bstats.bytes += len;
/* /*
* (now - last) is total time between packet right edges. * (now - last) is total time between packet right edges.
......
...@@ -353,6 +353,7 @@ static int gred_offload_dump_stats(struct Qdisc *sch) ...@@ -353,6 +353,7 @@ static int gred_offload_dump_stats(struct Qdisc *sch)
{ {
struct gred_sched *table = qdisc_priv(sch); struct gred_sched *table = qdisc_priv(sch);
struct tc_gred_qopt_offload *hw_stats; struct tc_gred_qopt_offload *hw_stats;
u64 bytes = 0, packets = 0;
unsigned int i; unsigned int i;
int ret; int ret;
...@@ -381,15 +382,15 @@ static int gred_offload_dump_stats(struct Qdisc *sch) ...@@ -381,15 +382,15 @@ static int gred_offload_dump_stats(struct Qdisc *sch)
table->tab[i]->bytesin += hw_stats->stats.bstats[i].bytes; table->tab[i]->bytesin += hw_stats->stats.bstats[i].bytes;
table->tab[i]->backlog += hw_stats->stats.qstats[i].backlog; table->tab[i]->backlog += hw_stats->stats.qstats[i].backlog;
_bstats_update(&sch->bstats, bytes += hw_stats->stats.bstats[i].bytes;
hw_stats->stats.bstats[i].bytes, packets += hw_stats->stats.bstats[i].packets;
hw_stats->stats.bstats[i].packets);
sch->qstats.qlen += hw_stats->stats.qstats[i].qlen; sch->qstats.qlen += hw_stats->stats.qstats[i].qlen;
sch->qstats.backlog += hw_stats->stats.qstats[i].backlog; sch->qstats.backlog += hw_stats->stats.qstats[i].backlog;
sch->qstats.drops += hw_stats->stats.qstats[i].drops; sch->qstats.drops += hw_stats->stats.qstats[i].drops;
sch->qstats.requeues += hw_stats->stats.qstats[i].requeues; sch->qstats.requeues += hw_stats->stats.qstats[i].requeues;
sch->qstats.overlimits += hw_stats->stats.qstats[i].overlimits; sch->qstats.overlimits += hw_stats->stats.qstats[i].overlimits;
} }
_bstats_update(&sch->bstats, bytes, packets);
kfree(hw_stats); kfree(hw_stats);
return ret; return ret;
......
...@@ -1308,6 +1308,7 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg, ...@@ -1308,6 +1308,7 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
static void htb_offload_aggregate_stats(struct htb_sched *q, static void htb_offload_aggregate_stats(struct htb_sched *q,
struct htb_class *cl) struct htb_class *cl)
{ {
u64 bytes = 0, packets = 0;
struct htb_class *c; struct htb_class *c;
unsigned int i; unsigned int i;
...@@ -1323,14 +1324,15 @@ static void htb_offload_aggregate_stats(struct htb_sched *q, ...@@ -1323,14 +1324,15 @@ static void htb_offload_aggregate_stats(struct htb_sched *q,
if (p != cl) if (p != cl)
continue; continue;
cl->bstats.bytes += c->bstats_bias.bytes; bytes += c->bstats_bias.bytes;
cl->bstats.packets += c->bstats_bias.packets; packets += c->bstats_bias.packets;
if (c->level == 0) { if (c->level == 0) {
cl->bstats.bytes += c->leaf.q->bstats.bytes; bytes += c->leaf.q->bstats.bytes;
cl->bstats.packets += c->leaf.q->bstats.packets; packets += c->leaf.q->bstats.packets;
} }
} }
} }
_bstats_update(&cl->bstats, bytes, packets);
} }
static int static int
...@@ -1358,8 +1360,9 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d) ...@@ -1358,8 +1360,9 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
cl->bstats = cl->leaf.q->bstats; cl->bstats = cl->leaf.q->bstats;
else else
gnet_stats_basic_packed_init(&cl->bstats); gnet_stats_basic_packed_init(&cl->bstats);
cl->bstats.bytes += cl->bstats_bias.bytes; _bstats_update(&cl->bstats,
cl->bstats.packets += cl->bstats_bias.packets; cl->bstats_bias.bytes,
cl->bstats_bias.packets);
} else { } else {
htb_offload_aggregate_stats(q, cl); htb_offload_aggregate_stats(q, cl);
} }
...@@ -1578,8 +1581,9 @@ static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl, ...@@ -1578,8 +1581,9 @@ static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl,
WARN_ON(old != q); WARN_ON(old != q);
if (cl->parent) { if (cl->parent) {
cl->parent->bstats_bias.bytes += q->bstats.bytes; _bstats_update(&cl->parent->bstats_bias,
cl->parent->bstats_bias.packets += q->bstats.packets; q->bstats.bytes,
q->bstats.packets);
} }
offload_opt = (struct tc_htb_qopt_offload) { offload_opt = (struct tc_htb_qopt_offload) {
...@@ -1925,8 +1929,9 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, ...@@ -1925,8 +1929,9 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
htb_graft_helper(dev_queue, old_q); htb_graft_helper(dev_queue, old_q);
goto err_kill_estimator; goto err_kill_estimator;
} }
parent->bstats_bias.bytes += old_q->bstats.bytes; _bstats_update(&parent->bstats_bias,
parent->bstats_bias.packets += old_q->bstats.packets; old_q->bstats.bytes,
old_q->bstats.packets);
qdisc_put(old_q); qdisc_put(old_q);
} }
new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops, new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
......
...@@ -1235,8 +1235,7 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, ...@@ -1235,8 +1235,7 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return err; return err;
} }
cl->bstats.bytes += len; _bstats_update(&cl->bstats, len, gso_segs);
cl->bstats.packets += gso_segs;
sch->qstats.backlog += len; sch->qstats.backlog += len;
++sch->q.qlen; ++sch->q.qlen;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment