Commit 50dc9a85 authored by Ahmed S. Darwish's avatar Ahmed S. Darwish Committed by David S. Miller

net: sched: Merge Qdisc::bstats and Qdisc::cpu_bstats data types

The only factor differentiating per-CPU bstats data type (struct
gnet_stats_basic_cpu) from the packed non-per-CPU one (struct
gnet_stats_basic_packed) was a u64_stats sync point inside the former.
The two data types are now equivalent: earlier commits added a u64_stats
sync point to the latter.

Combine both data types into "struct gnet_stats_basic_sync". This
eliminates redundancy and simplifies the bstats read/write APIs.

Use u64_stats_t for bstats "packets" and "bytes" data types. On 64-bit
architectures, u64_stats sync points do not use sequence counter
protection.
Signed-off-by: default avatarAhmed S. Darwish <a.darwish@linutronix.de>
Signed-off-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f56940da
...@@ -458,7 +458,7 @@ nfp_abm_qdisc_graft(struct nfp_abm_link *alink, u32 handle, u32 child_handle, ...@@ -458,7 +458,7 @@ nfp_abm_qdisc_graft(struct nfp_abm_link *alink, u32 handle, u32 child_handle,
static void static void
nfp_abm_stats_calculate(struct nfp_alink_stats *new, nfp_abm_stats_calculate(struct nfp_alink_stats *new,
struct nfp_alink_stats *old, struct nfp_alink_stats *old,
struct gnet_stats_basic_packed *bstats, struct gnet_stats_basic_sync *bstats,
struct gnet_stats_queue *qstats) struct gnet_stats_queue *qstats)
{ {
_bstats_update(bstats, new->tx_bytes - old->tx_bytes, _bstats_update(bstats, new->tx_bytes - old->tx_bytes,
......
...@@ -30,13 +30,13 @@ struct tc_action { ...@@ -30,13 +30,13 @@ struct tc_action {
atomic_t tcfa_bindcnt; atomic_t tcfa_bindcnt;
int tcfa_action; int tcfa_action;
struct tcf_t tcfa_tm; struct tcf_t tcfa_tm;
struct gnet_stats_basic_packed tcfa_bstats; struct gnet_stats_basic_sync tcfa_bstats;
struct gnet_stats_basic_packed tcfa_bstats_hw; struct gnet_stats_basic_sync tcfa_bstats_hw;
struct gnet_stats_queue tcfa_qstats; struct gnet_stats_queue tcfa_qstats;
struct net_rate_estimator __rcu *tcfa_rate_est; struct net_rate_estimator __rcu *tcfa_rate_est;
spinlock_t tcfa_lock; spinlock_t tcfa_lock;
struct gnet_stats_basic_cpu __percpu *cpu_bstats; struct gnet_stats_basic_sync __percpu *cpu_bstats;
struct gnet_stats_basic_cpu __percpu *cpu_bstats_hw; struct gnet_stats_basic_sync __percpu *cpu_bstats_hw;
struct gnet_stats_queue __percpu *cpu_qstats; struct gnet_stats_queue __percpu *cpu_qstats;
struct tc_cookie __rcu *act_cookie; struct tc_cookie __rcu *act_cookie;
struct tcf_chain __rcu *goto_chain; struct tcf_chain __rcu *goto_chain;
...@@ -206,7 +206,7 @@ static inline void tcf_action_update_bstats(struct tc_action *a, ...@@ -206,7 +206,7 @@ static inline void tcf_action_update_bstats(struct tc_action *a,
struct sk_buff *skb) struct sk_buff *skb)
{ {
if (likely(a->cpu_bstats)) { if (likely(a->cpu_bstats)) {
bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), skb); bstats_update(this_cpu_ptr(a->cpu_bstats), skb);
return; return;
} }
spin_lock(&a->tcfa_lock); spin_lock(&a->tcfa_lock);
......
...@@ -7,15 +7,17 @@ ...@@ -7,15 +7,17 @@
#include <linux/rtnetlink.h> #include <linux/rtnetlink.h>
#include <linux/pkt_sched.h> #include <linux/pkt_sched.h>
/* Note: this used to be in include/uapi/linux/gen_stats.h */ /* Throughput stats.
struct gnet_stats_basic_packed { * Must be initialized beforehand with gnet_stats_basic_sync_init().
__u64 bytes; *
__u64 packets; * If no reads can ever occur parallel to writes (e.g. stack-allocated
struct u64_stats_sync syncp; * bstats), then the internal stat values can be written to and read
}; * from directly. Otherwise, use _bstats_set/update() for writes and
* gnet_stats_add_basic() for reads.
struct gnet_stats_basic_cpu { */
struct gnet_stats_basic_packed bstats; struct gnet_stats_basic_sync {
u64_stats_t bytes;
u64_stats_t packets;
struct u64_stats_sync syncp; struct u64_stats_sync syncp;
} __aligned(2 * sizeof(u64)); } __aligned(2 * sizeof(u64));
...@@ -35,7 +37,7 @@ struct gnet_dump { ...@@ -35,7 +37,7 @@ struct gnet_dump {
struct tc_stats tc_stats; struct tc_stats tc_stats;
}; };
void gnet_stats_basic_packed_init(struct gnet_stats_basic_packed *b); void gnet_stats_basic_sync_init(struct gnet_stats_basic_sync *b);
int gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock, int gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
struct gnet_dump *d, int padattr); struct gnet_dump *d, int padattr);
...@@ -46,16 +48,16 @@ int gnet_stats_start_copy_compat(struct sk_buff *skb, int type, ...@@ -46,16 +48,16 @@ int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
int gnet_stats_copy_basic(const seqcount_t *running, int gnet_stats_copy_basic(const seqcount_t *running,
struct gnet_dump *d, struct gnet_dump *d,
struct gnet_stats_basic_cpu __percpu *cpu, struct gnet_stats_basic_sync __percpu *cpu,
struct gnet_stats_basic_packed *b); struct gnet_stats_basic_sync *b);
void gnet_stats_add_basic(const seqcount_t *running, void gnet_stats_add_basic(const seqcount_t *running,
struct gnet_stats_basic_packed *bstats, struct gnet_stats_basic_sync *bstats,
struct gnet_stats_basic_cpu __percpu *cpu, struct gnet_stats_basic_sync __percpu *cpu,
struct gnet_stats_basic_packed *b); struct gnet_stats_basic_sync *b);
int gnet_stats_copy_basic_hw(const seqcount_t *running, int gnet_stats_copy_basic_hw(const seqcount_t *running,
struct gnet_dump *d, struct gnet_dump *d,
struct gnet_stats_basic_cpu __percpu *cpu, struct gnet_stats_basic_sync __percpu *cpu,
struct gnet_stats_basic_packed *b); struct gnet_stats_basic_sync *b);
int gnet_stats_copy_rate_est(struct gnet_dump *d, int gnet_stats_copy_rate_est(struct gnet_dump *d,
struct net_rate_estimator __rcu **ptr); struct net_rate_estimator __rcu **ptr);
int gnet_stats_copy_queue(struct gnet_dump *d, int gnet_stats_copy_queue(struct gnet_dump *d,
...@@ -68,14 +70,14 @@ int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len); ...@@ -68,14 +70,14 @@ int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
int gnet_stats_finish_copy(struct gnet_dump *d); int gnet_stats_finish_copy(struct gnet_dump *d);
int gen_new_estimator(struct gnet_stats_basic_packed *bstats, int gen_new_estimator(struct gnet_stats_basic_sync *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats, struct gnet_stats_basic_sync __percpu *cpu_bstats,
struct net_rate_estimator __rcu **rate_est, struct net_rate_estimator __rcu **rate_est,
spinlock_t *lock, spinlock_t *lock,
seqcount_t *running, struct nlattr *opt); seqcount_t *running, struct nlattr *opt);
void gen_kill_estimator(struct net_rate_estimator __rcu **ptr); void gen_kill_estimator(struct net_rate_estimator __rcu **ptr);
int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, int gen_replace_estimator(struct gnet_stats_basic_sync *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats, struct gnet_stats_basic_sync __percpu *cpu_bstats,
struct net_rate_estimator __rcu **ptr, struct net_rate_estimator __rcu **ptr,
spinlock_t *lock, spinlock_t *lock,
seqcount_t *running, struct nlattr *opt); seqcount_t *running, struct nlattr *opt);
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
struct xt_rateest { struct xt_rateest {
/* keep lock and bstats on same cache line to speedup xt_rateest_tg() */ /* keep lock and bstats on same cache line to speedup xt_rateest_tg() */
struct gnet_stats_basic_packed bstats; struct gnet_stats_basic_sync bstats;
spinlock_t lock; spinlock_t lock;
......
...@@ -765,7 +765,7 @@ struct tc_cookie { ...@@ -765,7 +765,7 @@ struct tc_cookie {
}; };
struct tc_qopt_offload_stats { struct tc_qopt_offload_stats {
struct gnet_stats_basic_packed *bstats; struct gnet_stats_basic_sync *bstats;
struct gnet_stats_queue *qstats; struct gnet_stats_queue *qstats;
}; };
...@@ -885,7 +885,7 @@ struct tc_gred_qopt_offload_params { ...@@ -885,7 +885,7 @@ struct tc_gred_qopt_offload_params {
}; };
struct tc_gred_qopt_offload_stats { struct tc_gred_qopt_offload_stats {
struct gnet_stats_basic_packed bstats[MAX_DPs]; struct gnet_stats_basic_sync bstats[MAX_DPs];
struct gnet_stats_queue qstats[MAX_DPs]; struct gnet_stats_queue qstats[MAX_DPs];
struct red_stats *xstats[MAX_DPs]; struct red_stats *xstats[MAX_DPs];
}; };
......
...@@ -97,7 +97,7 @@ struct Qdisc { ...@@ -97,7 +97,7 @@ struct Qdisc {
struct netdev_queue *dev_queue; struct netdev_queue *dev_queue;
struct net_rate_estimator __rcu *rate_est; struct net_rate_estimator __rcu *rate_est;
struct gnet_stats_basic_cpu __percpu *cpu_bstats; struct gnet_stats_basic_sync __percpu *cpu_bstats;
struct gnet_stats_queue __percpu *cpu_qstats; struct gnet_stats_queue __percpu *cpu_qstats;
int pad; int pad;
refcount_t refcnt; refcount_t refcnt;
...@@ -107,7 +107,7 @@ struct Qdisc { ...@@ -107,7 +107,7 @@ struct Qdisc {
*/ */
struct sk_buff_head gso_skb ____cacheline_aligned_in_smp; struct sk_buff_head gso_skb ____cacheline_aligned_in_smp;
struct qdisc_skb_head q; struct qdisc_skb_head q;
struct gnet_stats_basic_packed bstats; struct gnet_stats_basic_sync bstats;
seqcount_t running; seqcount_t running;
struct gnet_stats_queue qstats; struct gnet_stats_queue qstats;
unsigned long state; unsigned long state;
...@@ -849,16 +849,16 @@ static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, ...@@ -849,16 +849,16 @@ static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return sch->enqueue(skb, sch, to_free); return sch->enqueue(skb, sch, to_free);
} }
static inline void _bstats_update(struct gnet_stats_basic_packed *bstats, static inline void _bstats_update(struct gnet_stats_basic_sync *bstats,
__u64 bytes, __u32 packets) __u64 bytes, __u32 packets)
{ {
u64_stats_update_begin(&bstats->syncp); u64_stats_update_begin(&bstats->syncp);
bstats->bytes += bytes; u64_stats_add(&bstats->bytes, bytes);
bstats->packets += packets; u64_stats_add(&bstats->packets, packets);
u64_stats_update_end(&bstats->syncp); u64_stats_update_end(&bstats->syncp);
} }
static inline void bstats_update(struct gnet_stats_basic_packed *bstats, static inline void bstats_update(struct gnet_stats_basic_sync *bstats,
const struct sk_buff *skb) const struct sk_buff *skb)
{ {
_bstats_update(bstats, _bstats_update(bstats,
...@@ -866,26 +866,10 @@ static inline void bstats_update(struct gnet_stats_basic_packed *bstats, ...@@ -866,26 +866,10 @@ static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1); skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1);
} }
static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
__u64 bytes, __u32 packets)
{
u64_stats_update_begin(&bstats->syncp);
_bstats_update(&bstats->bstats, bytes, packets);
u64_stats_update_end(&bstats->syncp);
}
static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
const struct sk_buff *skb)
{
u64_stats_update_begin(&bstats->syncp);
bstats_update(&bstats->bstats, skb);
u64_stats_update_end(&bstats->syncp);
}
static inline void qdisc_bstats_cpu_update(struct Qdisc *sch, static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
const struct sk_buff *skb) const struct sk_buff *skb)
{ {
bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb); bstats_update(this_cpu_ptr(sch->cpu_bstats), skb);
} }
static inline void qdisc_bstats_update(struct Qdisc *sch, static inline void qdisc_bstats_update(struct Qdisc *sch,
...@@ -1317,7 +1301,7 @@ void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64); ...@@ -1317,7 +1301,7 @@ void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64);
struct mini_Qdisc { struct mini_Qdisc {
struct tcf_proto *filter_list; struct tcf_proto *filter_list;
struct tcf_block *block; struct tcf_block *block;
struct gnet_stats_basic_cpu __percpu *cpu_bstats; struct gnet_stats_basic_sync __percpu *cpu_bstats;
struct gnet_stats_queue __percpu *cpu_qstats; struct gnet_stats_queue __percpu *cpu_qstats;
struct rcu_head rcu; struct rcu_head rcu;
}; };
...@@ -1325,7 +1309,7 @@ struct mini_Qdisc { ...@@ -1325,7 +1309,7 @@ struct mini_Qdisc {
static inline void mini_qdisc_bstats_cpu_update(struct mini_Qdisc *miniq, static inline void mini_qdisc_bstats_cpu_update(struct mini_Qdisc *miniq,
const struct sk_buff *skb) const struct sk_buff *skb)
{ {
bstats_cpu_update(this_cpu_ptr(miniq->cpu_bstats), skb); bstats_update(this_cpu_ptr(miniq->cpu_bstats), skb);
} }
static inline void mini_qdisc_qstats_cpu_drop(struct mini_Qdisc *miniq) static inline void mini_qdisc_qstats_cpu_drop(struct mini_Qdisc *miniq)
......
...@@ -40,10 +40,10 @@ ...@@ -40,10 +40,10 @@
*/ */
struct net_rate_estimator { struct net_rate_estimator {
struct gnet_stats_basic_packed *bstats; struct gnet_stats_basic_sync *bstats;
spinlock_t *stats_lock; spinlock_t *stats_lock;
seqcount_t *running; seqcount_t *running;
struct gnet_stats_basic_cpu __percpu *cpu_bstats; struct gnet_stats_basic_sync __percpu *cpu_bstats;
u8 ewma_log; u8 ewma_log;
u8 intvl_log; /* period : (250ms << intvl_log) */ u8 intvl_log; /* period : (250ms << intvl_log) */
...@@ -60,9 +60,9 @@ struct net_rate_estimator { ...@@ -60,9 +60,9 @@ struct net_rate_estimator {
}; };
static void est_fetch_counters(struct net_rate_estimator *e, static void est_fetch_counters(struct net_rate_estimator *e,
struct gnet_stats_basic_packed *b) struct gnet_stats_basic_sync *b)
{ {
gnet_stats_basic_packed_init(b); gnet_stats_basic_sync_init(b);
if (e->stats_lock) if (e->stats_lock)
spin_lock(e->stats_lock); spin_lock(e->stats_lock);
...@@ -76,14 +76,18 @@ static void est_fetch_counters(struct net_rate_estimator *e, ...@@ -76,14 +76,18 @@ static void est_fetch_counters(struct net_rate_estimator *e,
static void est_timer(struct timer_list *t) static void est_timer(struct timer_list *t)
{ {
struct net_rate_estimator *est = from_timer(est, t, timer); struct net_rate_estimator *est = from_timer(est, t, timer);
struct gnet_stats_basic_packed b; struct gnet_stats_basic_sync b;
u64 b_bytes, b_packets;
u64 rate, brate; u64 rate, brate;
est_fetch_counters(est, &b); est_fetch_counters(est, &b);
brate = (b.bytes - est->last_bytes) << (10 - est->intvl_log); b_bytes = u64_stats_read(&b.bytes);
b_packets = u64_stats_read(&b.packets);
brate = (b_bytes - est->last_bytes) << (10 - est->intvl_log);
brate = (brate >> est->ewma_log) - (est->avbps >> est->ewma_log); brate = (brate >> est->ewma_log) - (est->avbps >> est->ewma_log);
rate = (b.packets - est->last_packets) << (10 - est->intvl_log); rate = (b_packets - est->last_packets) << (10 - est->intvl_log);
rate = (rate >> est->ewma_log) - (est->avpps >> est->ewma_log); rate = (rate >> est->ewma_log) - (est->avpps >> est->ewma_log);
write_seqcount_begin(&est->seq); write_seqcount_begin(&est->seq);
...@@ -91,8 +95,8 @@ static void est_timer(struct timer_list *t) ...@@ -91,8 +95,8 @@ static void est_timer(struct timer_list *t)
est->avpps += rate; est->avpps += rate;
write_seqcount_end(&est->seq); write_seqcount_end(&est->seq);
est->last_bytes = b.bytes; est->last_bytes = b_bytes;
est->last_packets = b.packets; est->last_packets = b_packets;
est->next_jiffies += ((HZ/4) << est->intvl_log); est->next_jiffies += ((HZ/4) << est->intvl_log);
...@@ -121,8 +125,8 @@ static void est_timer(struct timer_list *t) ...@@ -121,8 +125,8 @@ static void est_timer(struct timer_list *t)
* Returns 0 on success or a negative error code. * Returns 0 on success or a negative error code.
* *
*/ */
int gen_new_estimator(struct gnet_stats_basic_packed *bstats, int gen_new_estimator(struct gnet_stats_basic_sync *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats, struct gnet_stats_basic_sync __percpu *cpu_bstats,
struct net_rate_estimator __rcu **rate_est, struct net_rate_estimator __rcu **rate_est,
spinlock_t *lock, spinlock_t *lock,
seqcount_t *running, seqcount_t *running,
...@@ -130,7 +134,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, ...@@ -130,7 +134,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
{ {
struct gnet_estimator *parm = nla_data(opt); struct gnet_estimator *parm = nla_data(opt);
struct net_rate_estimator *old, *est; struct net_rate_estimator *old, *est;
struct gnet_stats_basic_packed b; struct gnet_stats_basic_sync b;
int intvl_log; int intvl_log;
if (nla_len(opt) < sizeof(*parm)) if (nla_len(opt) < sizeof(*parm))
...@@ -164,8 +168,8 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, ...@@ -164,8 +168,8 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
est_fetch_counters(est, &b); est_fetch_counters(est, &b);
if (lock) if (lock)
local_bh_enable(); local_bh_enable();
est->last_bytes = b.bytes; est->last_bytes = u64_stats_read(&b.bytes);
est->last_packets = b.packets; est->last_packets = u64_stats_read(&b.packets);
if (lock) if (lock)
spin_lock_bh(lock); spin_lock_bh(lock);
...@@ -222,8 +226,8 @@ EXPORT_SYMBOL(gen_kill_estimator); ...@@ -222,8 +226,8 @@ EXPORT_SYMBOL(gen_kill_estimator);
* *
* Returns 0 on success or a negative error code. * Returns 0 on success or a negative error code.
*/ */
int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, int gen_replace_estimator(struct gnet_stats_basic_sync *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats, struct gnet_stats_basic_sync __percpu *cpu_bstats,
struct net_rate_estimator __rcu **rate_est, struct net_rate_estimator __rcu **rate_est,
spinlock_t *lock, spinlock_t *lock,
seqcount_t *running, struct nlattr *opt) seqcount_t *running, struct nlattr *opt)
......
...@@ -115,29 +115,29 @@ gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock, ...@@ -115,29 +115,29 @@ gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
EXPORT_SYMBOL(gnet_stats_start_copy); EXPORT_SYMBOL(gnet_stats_start_copy);
/* Must not be inlined, due to u64_stats seqcount_t lockdep key */ /* Must not be inlined, due to u64_stats seqcount_t lockdep key */
void gnet_stats_basic_packed_init(struct gnet_stats_basic_packed *b) void gnet_stats_basic_sync_init(struct gnet_stats_basic_sync *b)
{ {
b->bytes = 0; u64_stats_set(&b->bytes, 0);
b->packets = 0; u64_stats_set(&b->packets, 0);
u64_stats_init(&b->syncp); u64_stats_init(&b->syncp);
} }
EXPORT_SYMBOL(gnet_stats_basic_packed_init); EXPORT_SYMBOL(gnet_stats_basic_sync_init);
static void gnet_stats_add_basic_cpu(struct gnet_stats_basic_packed *bstats, static void gnet_stats_add_basic_cpu(struct gnet_stats_basic_sync *bstats,
struct gnet_stats_basic_cpu __percpu *cpu) struct gnet_stats_basic_sync __percpu *cpu)
{ {
u64 t_bytes = 0, t_packets = 0; u64 t_bytes = 0, t_packets = 0;
int i; int i;
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
struct gnet_stats_basic_cpu *bcpu = per_cpu_ptr(cpu, i); struct gnet_stats_basic_sync *bcpu = per_cpu_ptr(cpu, i);
unsigned int start; unsigned int start;
u64 bytes, packets; u64 bytes, packets;
do { do {
start = u64_stats_fetch_begin_irq(&bcpu->syncp); start = u64_stats_fetch_begin_irq(&bcpu->syncp);
bytes = bcpu->bstats.bytes; bytes = u64_stats_read(&bcpu->bytes);
packets = bcpu->bstats.packets; packets = u64_stats_read(&bcpu->packets);
} while (u64_stats_fetch_retry_irq(&bcpu->syncp, start)); } while (u64_stats_fetch_retry_irq(&bcpu->syncp, start));
t_bytes += bytes; t_bytes += bytes;
...@@ -147,9 +147,9 @@ static void gnet_stats_add_basic_cpu(struct gnet_stats_basic_packed *bstats, ...@@ -147,9 +147,9 @@ static void gnet_stats_add_basic_cpu(struct gnet_stats_basic_packed *bstats,
} }
void gnet_stats_add_basic(const seqcount_t *running, void gnet_stats_add_basic(const seqcount_t *running,
struct gnet_stats_basic_packed *bstats, struct gnet_stats_basic_sync *bstats,
struct gnet_stats_basic_cpu __percpu *cpu, struct gnet_stats_basic_sync __percpu *cpu,
struct gnet_stats_basic_packed *b) struct gnet_stats_basic_sync *b)
{ {
unsigned int seq; unsigned int seq;
u64 bytes = 0; u64 bytes = 0;
...@@ -162,8 +162,8 @@ void gnet_stats_add_basic(const seqcount_t *running, ...@@ -162,8 +162,8 @@ void gnet_stats_add_basic(const seqcount_t *running,
do { do {
if (running) if (running)
seq = read_seqcount_begin(running); seq = read_seqcount_begin(running);
bytes = b->bytes; bytes = u64_stats_read(&b->bytes);
packets = b->packets; packets = u64_stats_read(&b->packets);
} while (running && read_seqcount_retry(running, seq)); } while (running && read_seqcount_retry(running, seq));
_bstats_update(bstats, bytes, packets); _bstats_update(bstats, bytes, packets);
...@@ -173,18 +173,22 @@ EXPORT_SYMBOL(gnet_stats_add_basic); ...@@ -173,18 +173,22 @@ EXPORT_SYMBOL(gnet_stats_add_basic);
static int static int
___gnet_stats_copy_basic(const seqcount_t *running, ___gnet_stats_copy_basic(const seqcount_t *running,
struct gnet_dump *d, struct gnet_dump *d,
struct gnet_stats_basic_cpu __percpu *cpu, struct gnet_stats_basic_sync __percpu *cpu,
struct gnet_stats_basic_packed *b, struct gnet_stats_basic_sync *b,
int type) int type)
{ {
struct gnet_stats_basic_packed bstats; struct gnet_stats_basic_sync bstats;
u64 bstats_bytes, bstats_packets;
gnet_stats_basic_packed_init(&bstats); gnet_stats_basic_sync_init(&bstats);
gnet_stats_add_basic(running, &bstats, cpu, b); gnet_stats_add_basic(running, &bstats, cpu, b);
bstats_bytes = u64_stats_read(&bstats.bytes);
bstats_packets = u64_stats_read(&bstats.packets);
if (d->compat_tc_stats && type == TCA_STATS_BASIC) { if (d->compat_tc_stats && type == TCA_STATS_BASIC) {
d->tc_stats.bytes = bstats.bytes; d->tc_stats.bytes = bstats_bytes;
d->tc_stats.packets = bstats.packets; d->tc_stats.packets = bstats_packets;
} }
if (d->tail) { if (d->tail) {
...@@ -192,14 +196,14 @@ ___gnet_stats_copy_basic(const seqcount_t *running, ...@@ -192,14 +196,14 @@ ___gnet_stats_copy_basic(const seqcount_t *running,
int res; int res;
memset(&sb, 0, sizeof(sb)); memset(&sb, 0, sizeof(sb));
sb.bytes = bstats.bytes; sb.bytes = bstats_bytes;
sb.packets = bstats.packets; sb.packets = bstats_packets;
res = gnet_stats_copy(d, type, &sb, sizeof(sb), TCA_STATS_PAD); res = gnet_stats_copy(d, type, &sb, sizeof(sb), TCA_STATS_PAD);
if (res < 0 || sb.packets == bstats.packets) if (res < 0 || sb.packets == bstats_packets)
return res; return res;
/* emit 64bit stats only if needed */ /* emit 64bit stats only if needed */
return gnet_stats_copy(d, TCA_STATS_PKT64, &bstats.packets, return gnet_stats_copy(d, TCA_STATS_PKT64, &bstats_packets,
sizeof(bstats.packets), TCA_STATS_PAD); sizeof(bstats_packets), TCA_STATS_PAD);
} }
return 0; return 0;
} }
...@@ -220,8 +224,8 @@ ___gnet_stats_copy_basic(const seqcount_t *running, ...@@ -220,8 +224,8 @@ ___gnet_stats_copy_basic(const seqcount_t *running,
int int
gnet_stats_copy_basic(const seqcount_t *running, gnet_stats_copy_basic(const seqcount_t *running,
struct gnet_dump *d, struct gnet_dump *d,
struct gnet_stats_basic_cpu __percpu *cpu, struct gnet_stats_basic_sync __percpu *cpu,
struct gnet_stats_basic_packed *b) struct gnet_stats_basic_sync *b)
{ {
return ___gnet_stats_copy_basic(running, d, cpu, b, return ___gnet_stats_copy_basic(running, d, cpu, b,
TCA_STATS_BASIC); TCA_STATS_BASIC);
...@@ -244,8 +248,8 @@ EXPORT_SYMBOL(gnet_stats_copy_basic); ...@@ -244,8 +248,8 @@ EXPORT_SYMBOL(gnet_stats_copy_basic);
int int
gnet_stats_copy_basic_hw(const seqcount_t *running, gnet_stats_copy_basic_hw(const seqcount_t *running,
struct gnet_dump *d, struct gnet_dump *d,
struct gnet_stats_basic_cpu __percpu *cpu, struct gnet_stats_basic_sync __percpu *cpu,
struct gnet_stats_basic_packed *b) struct gnet_stats_basic_sync *b)
{ {
return ___gnet_stats_copy_basic(running, d, cpu, b, return ___gnet_stats_copy_basic(running, d, cpu, b,
TCA_STATS_BASIC_HW); TCA_STATS_BASIC_HW);
......
...@@ -94,11 +94,11 @@ static unsigned int ...@@ -94,11 +94,11 @@ static unsigned int
xt_rateest_tg(struct sk_buff *skb, const struct xt_action_param *par) xt_rateest_tg(struct sk_buff *skb, const struct xt_action_param *par)
{ {
const struct xt_rateest_target_info *info = par->targinfo; const struct xt_rateest_target_info *info = par->targinfo;
struct gnet_stats_basic_packed *stats = &info->est->bstats; struct gnet_stats_basic_sync *stats = &info->est->bstats;
spin_lock_bh(&info->est->lock); spin_lock_bh(&info->est->lock);
stats->bytes += skb->len; u64_stats_add(&stats->bytes, skb->len);
stats->packets++; u64_stats_inc(&stats->packets);
spin_unlock_bh(&info->est->lock); spin_unlock_bh(&info->est->lock);
return XT_CONTINUE; return XT_CONTINUE;
...@@ -143,7 +143,7 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par) ...@@ -143,7 +143,7 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
if (!est) if (!est)
goto err1; goto err1;
gnet_stats_basic_packed_init(&est->bstats); gnet_stats_basic_sync_init(&est->bstats);
strlcpy(est->name, info->name, sizeof(est->name)); strlcpy(est->name, info->name, sizeof(est->name));
spin_lock_init(&est->lock); spin_lock_init(&est->lock);
est->refcnt = 1; est->refcnt = 1;
......
...@@ -480,18 +480,18 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, ...@@ -480,18 +480,18 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
atomic_set(&p->tcfa_bindcnt, 1); atomic_set(&p->tcfa_bindcnt, 1);
if (cpustats) { if (cpustats) {
p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu); p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync);
if (!p->cpu_bstats) if (!p->cpu_bstats)
goto err1; goto err1;
p->cpu_bstats_hw = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu); p->cpu_bstats_hw = netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync);
if (!p->cpu_bstats_hw) if (!p->cpu_bstats_hw)
goto err2; goto err2;
p->cpu_qstats = alloc_percpu(struct gnet_stats_queue); p->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
if (!p->cpu_qstats) if (!p->cpu_qstats)
goto err3; goto err3;
} }
gnet_stats_basic_packed_init(&p->tcfa_bstats); gnet_stats_basic_sync_init(&p->tcfa_bstats);
gnet_stats_basic_packed_init(&p->tcfa_bstats_hw); gnet_stats_basic_sync_init(&p->tcfa_bstats_hw);
spin_lock_init(&p->tcfa_lock); spin_lock_init(&p->tcfa_lock);
p->tcfa_index = index; p->tcfa_index = index;
p->tcfa_tm.install = jiffies; p->tcfa_tm.install = jiffies;
...@@ -1128,13 +1128,13 @@ void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets, ...@@ -1128,13 +1128,13 @@ void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets,
u64 drops, bool hw) u64 drops, bool hw)
{ {
if (a->cpu_bstats) { if (a->cpu_bstats) {
_bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets); _bstats_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
this_cpu_ptr(a->cpu_qstats)->drops += drops; this_cpu_ptr(a->cpu_qstats)->drops += drops;
if (hw) if (hw)
_bstats_cpu_update(this_cpu_ptr(a->cpu_bstats_hw), _bstats_update(this_cpu_ptr(a->cpu_bstats_hw),
bytes, packets); bytes, packets);
return; return;
} }
......
...@@ -41,7 +41,7 @@ static int tcf_bpf_act(struct sk_buff *skb, const struct tc_action *act, ...@@ -41,7 +41,7 @@ static int tcf_bpf_act(struct sk_buff *skb, const struct tc_action *act,
int action, filter_res; int action, filter_res;
tcf_lastuse_update(&prog->tcf_tm); tcf_lastuse_update(&prog->tcf_tm);
bstats_cpu_update(this_cpu_ptr(prog->common.cpu_bstats), skb); bstats_update(this_cpu_ptr(prog->common.cpu_bstats), skb);
filter = rcu_dereference(prog->filter); filter = rcu_dereference(prog->filter);
if (at_ingress) { if (at_ingress) {
......
...@@ -718,7 +718,7 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a, ...@@ -718,7 +718,7 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
u8 *tlv_data; u8 *tlv_data;
u16 metalen; u16 metalen;
bstats_cpu_update(this_cpu_ptr(ife->common.cpu_bstats), skb); bstats_update(this_cpu_ptr(ife->common.cpu_bstats), skb);
tcf_lastuse_update(&ife->tcf_tm); tcf_lastuse_update(&ife->tcf_tm);
if (skb_at_tc_ingress(skb)) if (skb_at_tc_ingress(skb))
...@@ -806,7 +806,7 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a, ...@@ -806,7 +806,7 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
exceed_mtu = true; exceed_mtu = true;
} }
bstats_cpu_update(this_cpu_ptr(ife->common.cpu_bstats), skb); bstats_update(this_cpu_ptr(ife->common.cpu_bstats), skb);
tcf_lastuse_update(&ife->tcf_tm); tcf_lastuse_update(&ife->tcf_tm);
if (!metalen) { /* no metadata to send */ if (!metalen) { /* no metadata to send */
......
...@@ -59,7 +59,7 @@ static int tcf_mpls_act(struct sk_buff *skb, const struct tc_action *a, ...@@ -59,7 +59,7 @@ static int tcf_mpls_act(struct sk_buff *skb, const struct tc_action *a,
int ret, mac_len; int ret, mac_len;
tcf_lastuse_update(&m->tcf_tm); tcf_lastuse_update(&m->tcf_tm);
bstats_cpu_update(this_cpu_ptr(m->common.cpu_bstats), skb); bstats_update(this_cpu_ptr(m->common.cpu_bstats), skb);
/* Ensure 'data' points at mac_header prior calling mpls manipulating /* Ensure 'data' points at mac_header prior calling mpls manipulating
* functions. * functions.
......
...@@ -248,7 +248,7 @@ static int tcf_police_act(struct sk_buff *skb, const struct tc_action *a, ...@@ -248,7 +248,7 @@ static int tcf_police_act(struct sk_buff *skb, const struct tc_action *a,
int ret; int ret;
tcf_lastuse_update(&police->tcf_tm); tcf_lastuse_update(&police->tcf_tm);
bstats_cpu_update(this_cpu_ptr(police->common.cpu_bstats), skb); bstats_update(this_cpu_ptr(police->common.cpu_bstats), skb);
ret = READ_ONCE(police->tcf_action); ret = READ_ONCE(police->tcf_action);
p = rcu_dereference_bh(police->params); p = rcu_dereference_bh(police->params);
......
...@@ -163,7 +163,7 @@ static int tcf_sample_act(struct sk_buff *skb, const struct tc_action *a, ...@@ -163,7 +163,7 @@ static int tcf_sample_act(struct sk_buff *skb, const struct tc_action *a,
int retval; int retval;
tcf_lastuse_update(&s->tcf_tm); tcf_lastuse_update(&s->tcf_tm);
bstats_cpu_update(this_cpu_ptr(s->common.cpu_bstats), skb); bstats_update(this_cpu_ptr(s->common.cpu_bstats), skb);
retval = READ_ONCE(s->tcf_action); retval = READ_ONCE(s->tcf_action);
psample_group = rcu_dereference_bh(s->psample_group); psample_group = rcu_dereference_bh(s->psample_group);
......
...@@ -36,7 +36,8 @@ static int tcf_simp_act(struct sk_buff *skb, const struct tc_action *a, ...@@ -36,7 +36,8 @@ static int tcf_simp_act(struct sk_buff *skb, const struct tc_action *a,
* then it would look like "hello_3" (without quotes) * then it would look like "hello_3" (without quotes)
*/ */
pr_info("simple: %s_%llu\n", pr_info("simple: %s_%llu\n",
(char *)d->tcfd_defdata, d->tcf_bstats.packets); (char *)d->tcfd_defdata,
u64_stats_read(&d->tcf_bstats.packets));
spin_unlock(&d->tcf_lock); spin_unlock(&d->tcf_lock);
return d->tcf_action; return d->tcf_action;
} }
......
...@@ -31,7 +31,7 @@ static int tcf_skbedit_act(struct sk_buff *skb, const struct tc_action *a, ...@@ -31,7 +31,7 @@ static int tcf_skbedit_act(struct sk_buff *skb, const struct tc_action *a,
int action; int action;
tcf_lastuse_update(&d->tcf_tm); tcf_lastuse_update(&d->tcf_tm);
bstats_cpu_update(this_cpu_ptr(d->common.cpu_bstats), skb); bstats_update(this_cpu_ptr(d->common.cpu_bstats), skb);
params = rcu_dereference_bh(d->params); params = rcu_dereference_bh(d->params);
action = READ_ONCE(d->tcf_action); action = READ_ONCE(d->tcf_action);
......
...@@ -31,7 +31,7 @@ static int tcf_skbmod_act(struct sk_buff *skb, const struct tc_action *a, ...@@ -31,7 +31,7 @@ static int tcf_skbmod_act(struct sk_buff *skb, const struct tc_action *a,
u64 flags; u64 flags;
tcf_lastuse_update(&d->tcf_tm); tcf_lastuse_update(&d->tcf_tm);
bstats_cpu_update(this_cpu_ptr(d->common.cpu_bstats), skb); bstats_update(this_cpu_ptr(d->common.cpu_bstats), skb);
action = READ_ONCE(d->tcf_action); action = READ_ONCE(d->tcf_action);
if (unlikely(action == TC_ACT_SHOT)) if (unlikely(action == TC_ACT_SHOT))
......
...@@ -885,7 +885,7 @@ static void qdisc_offload_graft_root(struct net_device *dev, ...@@ -885,7 +885,7 @@ static void qdisc_offload_graft_root(struct net_device *dev,
static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
u32 portid, u32 seq, u16 flags, int event) u32 portid, u32 seq, u16 flags, int event)
{ {
struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL; struct gnet_stats_basic_sync __percpu *cpu_bstats = NULL;
struct gnet_stats_queue __percpu *cpu_qstats = NULL; struct gnet_stats_queue __percpu *cpu_qstats = NULL;
struct tcmsg *tcm; struct tcmsg *tcm;
struct nlmsghdr *nlh; struct nlmsghdr *nlh;
......
...@@ -52,7 +52,7 @@ struct atm_flow_data { ...@@ -52,7 +52,7 @@ struct atm_flow_data {
struct atm_qdisc_data *parent; /* parent qdisc */ struct atm_qdisc_data *parent; /* parent qdisc */
struct socket *sock; /* for closing */ struct socket *sock; /* for closing */
int ref; /* reference count */ int ref; /* reference count */
struct gnet_stats_basic_packed bstats; struct gnet_stats_basic_sync bstats;
struct gnet_stats_queue qstats; struct gnet_stats_queue qstats;
struct list_head list; struct list_head list;
struct atm_flow_data *excess; /* flow for excess traffic; struct atm_flow_data *excess; /* flow for excess traffic;
...@@ -548,7 +548,7 @@ static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt, ...@@ -548,7 +548,7 @@ static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt,
pr_debug("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt); pr_debug("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt);
INIT_LIST_HEAD(&p->flows); INIT_LIST_HEAD(&p->flows);
INIT_LIST_HEAD(&p->link.list); INIT_LIST_HEAD(&p->link.list);
gnet_stats_basic_packed_init(&p->link.bstats); gnet_stats_basic_sync_init(&p->link.bstats);
list_add(&p->link.list, &p->flows); list_add(&p->link.list, &p->flows);
p->link.q = qdisc_create_dflt(sch->dev_queue, p->link.q = qdisc_create_dflt(sch->dev_queue,
&pfifo_qdisc_ops, sch->handle, extack); &pfifo_qdisc_ops, sch->handle, extack);
......
...@@ -116,7 +116,7 @@ struct cbq_class { ...@@ -116,7 +116,7 @@ struct cbq_class {
long avgidle; long avgidle;
long deficit; /* Saved deficit for WRR */ long deficit; /* Saved deficit for WRR */
psched_time_t penalized; psched_time_t penalized;
struct gnet_stats_basic_packed bstats; struct gnet_stats_basic_sync bstats;
struct gnet_stats_queue qstats; struct gnet_stats_queue qstats;
struct net_rate_estimator __rcu *rate_est; struct net_rate_estimator __rcu *rate_est;
struct tc_cbq_xstats xstats; struct tc_cbq_xstats xstats;
...@@ -1610,7 +1610,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t ...@@ -1610,7 +1610,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
if (cl == NULL) if (cl == NULL)
goto failure; goto failure;
gnet_stats_basic_packed_init(&cl->bstats); gnet_stats_basic_sync_init(&cl->bstats);
err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack); err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
if (err) { if (err) {
kfree(cl); kfree(cl);
......
...@@ -19,7 +19,7 @@ struct drr_class { ...@@ -19,7 +19,7 @@ struct drr_class {
struct Qdisc_class_common common; struct Qdisc_class_common common;
unsigned int filter_cnt; unsigned int filter_cnt;
struct gnet_stats_basic_packed bstats; struct gnet_stats_basic_sync bstats;
struct gnet_stats_queue qstats; struct gnet_stats_queue qstats;
struct net_rate_estimator __rcu *rate_est; struct net_rate_estimator __rcu *rate_est;
struct list_head alist; struct list_head alist;
...@@ -106,7 +106,7 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid, ...@@ -106,7 +106,7 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (cl == NULL) if (cl == NULL)
return -ENOBUFS; return -ENOBUFS;
gnet_stats_basic_packed_init(&cl->bstats); gnet_stats_basic_sync_init(&cl->bstats);
cl->common.classid = classid; cl->common.classid = classid;
cl->quantum = quantum; cl->quantum = quantum;
cl->qdisc = qdisc_create_dflt(sch->dev_queue, cl->qdisc = qdisc_create_dflt(sch->dev_queue,
......
...@@ -41,7 +41,7 @@ struct ets_class { ...@@ -41,7 +41,7 @@ struct ets_class {
struct Qdisc *qdisc; struct Qdisc *qdisc;
u32 quantum; u32 quantum;
u32 deficit; u32 deficit;
struct gnet_stats_basic_packed bstats; struct gnet_stats_basic_sync bstats;
struct gnet_stats_queue qstats; struct gnet_stats_queue qstats;
}; };
...@@ -689,7 +689,7 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt, ...@@ -689,7 +689,7 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
q->classes[i].qdisc = NULL; q->classes[i].qdisc = NULL;
q->classes[i].quantum = 0; q->classes[i].quantum = 0;
q->classes[i].deficit = 0; q->classes[i].deficit = 0;
gnet_stats_basic_packed_init(&q->classes[i].bstats); gnet_stats_basic_sync_init(&q->classes[i].bstats);
memset(&q->classes[i].qstats, 0, sizeof(q->classes[i].qstats)); memset(&q->classes[i].qstats, 0, sizeof(q->classes[i].qstats));
} }
return 0; return 0;
......
...@@ -892,12 +892,12 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, ...@@ -892,12 +892,12 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
__skb_queue_head_init(&sch->gso_skb); __skb_queue_head_init(&sch->gso_skb);
__skb_queue_head_init(&sch->skb_bad_txq); __skb_queue_head_init(&sch->skb_bad_txq);
qdisc_skb_head_init(&sch->q); qdisc_skb_head_init(&sch->q);
gnet_stats_basic_packed_init(&sch->bstats); gnet_stats_basic_sync_init(&sch->bstats);
spin_lock_init(&sch->q.lock); spin_lock_init(&sch->q.lock);
if (ops->static_flags & TCQ_F_CPUSTATS) { if (ops->static_flags & TCQ_F_CPUSTATS) {
sch->cpu_bstats = sch->cpu_bstats =
netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu); netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync);
if (!sch->cpu_bstats) if (!sch->cpu_bstats)
goto errout1; goto errout1;
......
...@@ -366,7 +366,7 @@ static int gred_offload_dump_stats(struct Qdisc *sch) ...@@ -366,7 +366,7 @@ static int gred_offload_dump_stats(struct Qdisc *sch)
hw_stats->parent = sch->parent; hw_stats->parent = sch->parent;
for (i = 0; i < MAX_DPs; i++) { for (i = 0; i < MAX_DPs; i++) {
gnet_stats_basic_packed_init(&hw_stats->stats.bstats[i]); gnet_stats_basic_sync_init(&hw_stats->stats.bstats[i]);
if (table->tab[i]) if (table->tab[i])
hw_stats->stats.xstats[i] = &table->tab[i]->stats; hw_stats->stats.xstats[i] = &table->tab[i]->stats;
} }
...@@ -378,12 +378,12 @@ static int gred_offload_dump_stats(struct Qdisc *sch) ...@@ -378,12 +378,12 @@ static int gred_offload_dump_stats(struct Qdisc *sch)
for (i = 0; i < MAX_DPs; i++) { for (i = 0; i < MAX_DPs; i++) {
if (!table->tab[i]) if (!table->tab[i])
continue; continue;
table->tab[i]->packetsin += hw_stats->stats.bstats[i].packets; table->tab[i]->packetsin += u64_stats_read(&hw_stats->stats.bstats[i].packets);
table->tab[i]->bytesin += hw_stats->stats.bstats[i].bytes; table->tab[i]->bytesin += u64_stats_read(&hw_stats->stats.bstats[i].bytes);
table->tab[i]->backlog += hw_stats->stats.qstats[i].backlog; table->tab[i]->backlog += hw_stats->stats.qstats[i].backlog;
bytes += hw_stats->stats.bstats[i].bytes; bytes += u64_stats_read(&hw_stats->stats.bstats[i].bytes);
packets += hw_stats->stats.bstats[i].packets; packets += u64_stats_read(&hw_stats->stats.bstats[i].packets);
sch->qstats.qlen += hw_stats->stats.qstats[i].qlen; sch->qstats.qlen += hw_stats->stats.qstats[i].qlen;
sch->qstats.backlog += hw_stats->stats.qstats[i].backlog; sch->qstats.backlog += hw_stats->stats.qstats[i].backlog;
sch->qstats.drops += hw_stats->stats.qstats[i].drops; sch->qstats.drops += hw_stats->stats.qstats[i].drops;
......
...@@ -111,7 +111,7 @@ enum hfsc_class_flags { ...@@ -111,7 +111,7 @@ enum hfsc_class_flags {
struct hfsc_class { struct hfsc_class {
struct Qdisc_class_common cl_common; struct Qdisc_class_common cl_common;
struct gnet_stats_basic_packed bstats; struct gnet_stats_basic_sync bstats;
struct gnet_stats_queue qstats; struct gnet_stats_queue qstats;
struct net_rate_estimator __rcu *rate_est; struct net_rate_estimator __rcu *rate_est;
struct tcf_proto __rcu *filter_list; /* filter list */ struct tcf_proto __rcu *filter_list; /* filter list */
...@@ -1406,7 +1406,7 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt, ...@@ -1406,7 +1406,7 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
if (err) if (err)
return err; return err;
gnet_stats_basic_packed_init(&q->root.bstats); gnet_stats_basic_sync_init(&q->root.bstats);
q->root.cl_common.classid = sch->handle; q->root.cl_common.classid = sch->handle;
q->root.sched = q; q->root.sched = q;
q->root.qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, q->root.qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
......
...@@ -113,8 +113,8 @@ struct htb_class { ...@@ -113,8 +113,8 @@ struct htb_class {
/* /*
* Written often fields * Written often fields
*/ */
struct gnet_stats_basic_packed bstats; struct gnet_stats_basic_sync bstats;
struct gnet_stats_basic_packed bstats_bias; struct gnet_stats_basic_sync bstats_bias;
struct tc_htb_xstats xstats; /* our special stats */ struct tc_htb_xstats xstats; /* our special stats */
/* token bucket parameters */ /* token bucket parameters */
...@@ -1312,7 +1312,7 @@ static void htb_offload_aggregate_stats(struct htb_sched *q, ...@@ -1312,7 +1312,7 @@ static void htb_offload_aggregate_stats(struct htb_sched *q,
struct htb_class *c; struct htb_class *c;
unsigned int i; unsigned int i;
gnet_stats_basic_packed_init(&cl->bstats); gnet_stats_basic_sync_init(&cl->bstats);
for (i = 0; i < q->clhash.hashsize; i++) { for (i = 0; i < q->clhash.hashsize; i++) {
hlist_for_each_entry(c, &q->clhash.hash[i], common.hnode) { hlist_for_each_entry(c, &q->clhash.hash[i], common.hnode) {
...@@ -1324,11 +1324,11 @@ static void htb_offload_aggregate_stats(struct htb_sched *q, ...@@ -1324,11 +1324,11 @@ static void htb_offload_aggregate_stats(struct htb_sched *q,
if (p != cl) if (p != cl)
continue; continue;
bytes += c->bstats_bias.bytes; bytes += u64_stats_read(&c->bstats_bias.bytes);
packets += c->bstats_bias.packets; packets += u64_stats_read(&c->bstats_bias.packets);
if (c->level == 0) { if (c->level == 0) {
bytes += c->leaf.q->bstats.bytes; bytes += u64_stats_read(&c->leaf.q->bstats.bytes);
packets += c->leaf.q->bstats.packets; packets += u64_stats_read(&c->leaf.q->bstats.packets);
} }
} }
} }
...@@ -1359,10 +1359,10 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d) ...@@ -1359,10 +1359,10 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
if (cl->leaf.q) if (cl->leaf.q)
cl->bstats = cl->leaf.q->bstats; cl->bstats = cl->leaf.q->bstats;
else else
gnet_stats_basic_packed_init(&cl->bstats); gnet_stats_basic_sync_init(&cl->bstats);
_bstats_update(&cl->bstats, _bstats_update(&cl->bstats,
cl->bstats_bias.bytes, u64_stats_read(&cl->bstats_bias.bytes),
cl->bstats_bias.packets); u64_stats_read(&cl->bstats_bias.packets));
} else { } else {
htb_offload_aggregate_stats(q, cl); htb_offload_aggregate_stats(q, cl);
} }
...@@ -1582,8 +1582,8 @@ static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl, ...@@ -1582,8 +1582,8 @@ static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl,
if (cl->parent) { if (cl->parent) {
_bstats_update(&cl->parent->bstats_bias, _bstats_update(&cl->parent->bstats_bias,
q->bstats.bytes, u64_stats_read(&q->bstats.bytes),
q->bstats.packets); u64_stats_read(&q->bstats.packets));
} }
offload_opt = (struct tc_htb_qopt_offload) { offload_opt = (struct tc_htb_qopt_offload) {
...@@ -1853,8 +1853,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, ...@@ -1853,8 +1853,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
if (!cl) if (!cl)
goto failure; goto failure;
gnet_stats_basic_packed_init(&cl->bstats); gnet_stats_basic_sync_init(&cl->bstats);
gnet_stats_basic_packed_init(&cl->bstats_bias); gnet_stats_basic_sync_init(&cl->bstats_bias);
err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack); err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
if (err) { if (err) {
...@@ -1930,8 +1930,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, ...@@ -1930,8 +1930,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
goto err_kill_estimator; goto err_kill_estimator;
} }
_bstats_update(&parent->bstats_bias, _bstats_update(&parent->bstats_bias,
old_q->bstats.bytes, u64_stats_read(&old_q->bstats.bytes),
old_q->bstats.packets); u64_stats_read(&old_q->bstats.packets));
qdisc_put(old_q); qdisc_put(old_q);
} }
new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops, new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
......
...@@ -132,7 +132,7 @@ static int mq_dump(struct Qdisc *sch, struct sk_buff *skb) ...@@ -132,7 +132,7 @@ static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
unsigned int ntx; unsigned int ntx;
sch->q.qlen = 0; sch->q.qlen = 0;
gnet_stats_basic_packed_init(&sch->bstats); gnet_stats_basic_sync_init(&sch->bstats);
memset(&sch->qstats, 0, sizeof(sch->qstats)); memset(&sch->qstats, 0, sizeof(sch->qstats));
/* MQ supports lockless qdiscs. However, statistics accounting needs /* MQ supports lockless qdiscs. However, statistics accounting needs
......
...@@ -390,7 +390,7 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb) ...@@ -390,7 +390,7 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
unsigned int ntx, tc; unsigned int ntx, tc;
sch->q.qlen = 0; sch->q.qlen = 0;
gnet_stats_basic_packed_init(&sch->bstats); gnet_stats_basic_sync_init(&sch->bstats);
memset(&sch->qstats, 0, sizeof(sch->qstats)); memset(&sch->qstats, 0, sizeof(sch->qstats));
/* MQ supports lockless qdiscs. However, statistics accounting needs /* MQ supports lockless qdiscs. However, statistics accounting needs
...@@ -500,11 +500,11 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, ...@@ -500,11 +500,11 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
int i; int i;
__u32 qlen; __u32 qlen;
struct gnet_stats_queue qstats = {0}; struct gnet_stats_queue qstats = {0};
struct gnet_stats_basic_packed bstats; struct gnet_stats_basic_sync bstats;
struct net_device *dev = qdisc_dev(sch); struct net_device *dev = qdisc_dev(sch);
struct netdev_tc_txq tc = dev->tc_to_txq[cl & TC_BITMASK]; struct netdev_tc_txq tc = dev->tc_to_txq[cl & TC_BITMASK];
gnet_stats_basic_packed_init(&bstats); gnet_stats_basic_sync_init(&bstats);
/* Drop lock here it will be reclaimed before touching /* Drop lock here it will be reclaimed before touching
* statistics this is required because the d->lock we * statistics this is required because the d->lock we
* hold here is the look on dev_queue->qdisc_sleeping * hold here is the look on dev_queue->qdisc_sleeping
......
...@@ -131,7 +131,7 @@ struct qfq_class { ...@@ -131,7 +131,7 @@ struct qfq_class {
unsigned int filter_cnt; unsigned int filter_cnt;
struct gnet_stats_basic_packed bstats; struct gnet_stats_basic_sync bstats;
struct gnet_stats_queue qstats; struct gnet_stats_queue qstats;
struct net_rate_estimator __rcu *rate_est; struct net_rate_estimator __rcu *rate_est;
struct Qdisc *qdisc; struct Qdisc *qdisc;
...@@ -465,7 +465,7 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, ...@@ -465,7 +465,7 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (cl == NULL) if (cl == NULL)
return -ENOBUFS; return -ENOBUFS;
gnet_stats_basic_packed_init(&cl->bstats); gnet_stats_basic_sync_init(&cl->bstats);
cl->common.classid = classid; cl->common.classid = classid;
cl->deficit = lmax; cl->deficit = lmax;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment