Commit 5343a7f8 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

net_sched: htb: do not mix 1ns and 64ns time units

commit 56b765b7 ("htb: improved accuracy at high rates") added another
regression for low rates, because it mixes 1ns and 64ns time units.

So the maximum delay (mbuffer) was not 60 second, but 937 ms.

Lets convert all time fields to 1ns as 64bit arches are becoming the
norm.
Reported-by: default avatarJesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Tested-by: default avatarJesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 5e71d9d7
...@@ -109,7 +109,7 @@ struct htb_class { ...@@ -109,7 +109,7 @@ struct htb_class {
} un; } un;
struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */ struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
struct rb_node pq_node; /* node for event queue */ struct rb_node pq_node; /* node for event queue */
psched_time_t pq_key; s64 pq_key;
int prio_activity; /* for which prios are we active */ int prio_activity; /* for which prios are we active */
enum htb_cmode cmode; /* current mode of the class */ enum htb_cmode cmode; /* current mode of the class */
...@@ -122,9 +122,9 @@ struct htb_class { ...@@ -122,9 +122,9 @@ struct htb_class {
struct psched_ratecfg rate; struct psched_ratecfg rate;
struct psched_ratecfg ceil; struct psched_ratecfg ceil;
s64 buffer, cbuffer; /* token bucket depth/rate */ s64 buffer, cbuffer; /* token bucket depth/rate */
psched_tdiff_t mbuffer; /* max wait time */ s64 mbuffer; /* max wait time */
s64 tokens, ctokens; /* current number of tokens */ s64 tokens, ctokens; /* current number of tokens */
psched_time_t t_c; /* checkpoint time */ s64 t_c; /* checkpoint time */
}; };
struct htb_sched { struct htb_sched {
...@@ -141,7 +141,7 @@ struct htb_sched { ...@@ -141,7 +141,7 @@ struct htb_sched {
struct rb_root wait_pq[TC_HTB_MAXDEPTH]; struct rb_root wait_pq[TC_HTB_MAXDEPTH];
/* time of nearest event per level (row) */ /* time of nearest event per level (row) */
psched_time_t near_ev_cache[TC_HTB_MAXDEPTH]; s64 near_ev_cache[TC_HTB_MAXDEPTH];
int defcls; /* class where unclassified flows go to */ int defcls; /* class where unclassified flows go to */
...@@ -149,7 +149,7 @@ struct htb_sched { ...@@ -149,7 +149,7 @@ struct htb_sched {
struct tcf_proto *filter_list; struct tcf_proto *filter_list;
int rate2quantum; /* quant = rate / rate2quantum */ int rate2quantum; /* quant = rate / rate2quantum */
psched_time_t now; /* cached dequeue time */ s64 now; /* cached dequeue time */
struct qdisc_watchdog watchdog; struct qdisc_watchdog watchdog;
/* non shaped skbs; let them go directly thru */ /* non shaped skbs; let them go directly thru */
...@@ -664,7 +664,7 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl, ...@@ -664,7 +664,7 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
* next pending event (0 for no event in pq, q->now for too many events). * next pending event (0 for no event in pq, q->now for too many events).
* Note: Applied are events whose have cl->pq_key <= q->now. * Note: Applied are events whose have cl->pq_key <= q->now.
*/ */
static psched_time_t htb_do_events(struct htb_sched *q, int level, static s64 htb_do_events(struct htb_sched *q, int level,
unsigned long start) unsigned long start)
{ {
/* don't run for longer than 2 jiffies; 2 is used instead of /* don't run for longer than 2 jiffies; 2 is used instead of
...@@ -857,7 +857,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch) ...@@ -857,7 +857,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
struct sk_buff *skb; struct sk_buff *skb;
struct htb_sched *q = qdisc_priv(sch); struct htb_sched *q = qdisc_priv(sch);
int level; int level;
psched_time_t next_event; s64 next_event;
unsigned long start_at; unsigned long start_at;
/* try to dequeue direct packets as high prio (!) to minimize cpu work */ /* try to dequeue direct packets as high prio (!) to minimize cpu work */
...@@ -880,7 +880,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch) ...@@ -880,7 +880,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
for (level = 0; level < TC_HTB_MAXDEPTH; level++) { for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
/* common case optimization - skip event handler quickly */ /* common case optimization - skip event handler quickly */
int m; int m;
psched_time_t event; s64 event;
if (q->now >= q->near_ev_cache[level]) { if (q->now >= q->near_ev_cache[level]) {
event = htb_do_events(q, level, start_at); event = htb_do_events(q, level, start_at);
...@@ -1117,8 +1117,8 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d) ...@@ -1117,8 +1117,8 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
if (!cl->level && cl->un.leaf.q) if (!cl->level && cl->un.leaf.q)
cl->qstats.qlen = cl->un.leaf.q->q.qlen; cl->qstats.qlen = cl->un.leaf.q->q.qlen;
cl->xstats.tokens = cl->tokens; cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens);
cl->xstats.ctokens = cl->ctokens; cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens);
if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 || gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 ||
...@@ -1200,7 +1200,7 @@ static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl, ...@@ -1200,7 +1200,7 @@ static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
parent->un.leaf.q = new_q ? new_q : &noop_qdisc; parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
parent->tokens = parent->buffer; parent->tokens = parent->buffer;
parent->ctokens = parent->cbuffer; parent->ctokens = parent->cbuffer;
parent->t_c = psched_get_time(); parent->t_c = ktime_to_ns(ktime_get());
parent->cmode = HTB_CAN_SEND; parent->cmode = HTB_CAN_SEND;
} }
...@@ -1417,8 +1417,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, ...@@ -1417,8 +1417,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
/* set class to be in HTB_CAN_SEND state */ /* set class to be in HTB_CAN_SEND state */
cl->tokens = PSCHED_TICKS2NS(hopt->buffer); cl->tokens = PSCHED_TICKS2NS(hopt->buffer);
cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer); cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer);
cl->mbuffer = 60 * PSCHED_TICKS_PER_SEC; /* 1min */ cl->mbuffer = 60ULL * NSEC_PER_SEC; /* 1min */
cl->t_c = psched_get_time(); cl->t_c = ktime_to_ns(ktime_get());
cl->cmode = HTB_CAN_SEND; cl->cmode = HTB_CAN_SEND;
/* attach to the hash list and parent's family */ /* attach to the hash list and parent's family */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment