Commit e69f73bf authored by David S. Miller's avatar David S. Miller

Merge branch 'remove-qdisc-throttle'

Eric Dumazet says:

====================
net_sched: remove qdisc_is_throttled()

HTB, CBQ and HFSC pay a very high cost updating the qdisc 'throttled'
status that nothing but CBQ seems to use.

CBQ usage is flaky anyway, since no qdisc ->enqueue() updates the
'throttled' qdisc status.

This looks like some 'optimization' that actually cost more than code
without the optimization, and might cause latency issues with CBQ.

In my tests, I could achieve a 8 % performance increase in TCP_RR
workload through HTB qdisc, in presence of throttled classes,
and 5 % without throttled classes.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 7a04c012 45f50bed
...@@ -67,12 +67,12 @@ struct qdisc_watchdog { ...@@ -67,12 +67,12 @@ struct qdisc_watchdog {
}; };
void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc); void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc);
void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires, bool throttle); void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires);
static inline void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, static inline void qdisc_watchdog_schedule(struct qdisc_watchdog *wd,
psched_time_t expires) psched_time_t expires)
{ {
qdisc_watchdog_schedule_ns(wd, PSCHED_TICKS2NS(expires), true); qdisc_watchdog_schedule_ns(wd, PSCHED_TICKS2NS(expires));
} }
void qdisc_watchdog_cancel(struct qdisc_watchdog *wd); void qdisc_watchdog_cancel(struct qdisc_watchdog *wd);
......
...@@ -26,7 +26,6 @@ struct qdisc_rate_table { ...@@ -26,7 +26,6 @@ struct qdisc_rate_table {
enum qdisc_state_t { enum qdisc_state_t {
__QDISC_STATE_SCHED, __QDISC_STATE_SCHED,
__QDISC_STATE_DEACTIVATED, __QDISC_STATE_DEACTIVATED,
__QDISC_STATE_THROTTLED,
}; };
struct qdisc_size_table { struct qdisc_size_table {
...@@ -125,21 +124,6 @@ static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq) ...@@ -125,21 +124,6 @@ static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
#endif #endif
} }
static inline bool qdisc_is_throttled(const struct Qdisc *qdisc)
{
return test_bit(__QDISC_STATE_THROTTLED, &qdisc->state) ? true : false;
}
static inline void qdisc_throttled(struct Qdisc *qdisc)
{
set_bit(__QDISC_STATE_THROTTLED, &qdisc->state);
}
static inline void qdisc_unthrottled(struct Qdisc *qdisc)
{
clear_bit(__QDISC_STATE_THROTTLED, &qdisc->state);
}
struct Qdisc_class_ops { struct Qdisc_class_ops {
/* Child qdisc manipulation */ /* Child qdisc manipulation */
struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);
......
...@@ -583,7 +583,6 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer) ...@@ -583,7 +583,6 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
timer); timer);
rcu_read_lock(); rcu_read_lock();
qdisc_unthrottled(wd->qdisc);
__netif_schedule(qdisc_root(wd->qdisc)); __netif_schedule(qdisc_root(wd->qdisc));
rcu_read_unlock(); rcu_read_unlock();
...@@ -598,15 +597,12 @@ void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc) ...@@ -598,15 +597,12 @@ void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
} }
EXPORT_SYMBOL(qdisc_watchdog_init); EXPORT_SYMBOL(qdisc_watchdog_init);
void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires, bool throttle) void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires)
{ {
if (test_bit(__QDISC_STATE_DEACTIVATED, if (test_bit(__QDISC_STATE_DEACTIVATED,
&qdisc_root_sleeping(wd->qdisc)->state)) &qdisc_root_sleeping(wd->qdisc)->state))
return; return;
if (throttle)
qdisc_throttled(wd->qdisc);
if (wd->last_expires == expires) if (wd->last_expires == expires)
return; return;
...@@ -620,7 +616,6 @@ EXPORT_SYMBOL(qdisc_watchdog_schedule_ns); ...@@ -620,7 +616,6 @@ EXPORT_SYMBOL(qdisc_watchdog_schedule_ns);
void qdisc_watchdog_cancel(struct qdisc_watchdog *wd) void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
{ {
hrtimer_cancel(&wd->timer); hrtimer_cancel(&wd->timer);
qdisc_unthrottled(wd->qdisc);
} }
EXPORT_SYMBOL(qdisc_watchdog_cancel); EXPORT_SYMBOL(qdisc_watchdog_cancel);
......
...@@ -345,7 +345,7 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl) ...@@ -345,7 +345,7 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
{ {
int toplevel = q->toplevel; int toplevel = q->toplevel;
if (toplevel > cl->level && !(qdisc_is_throttled(cl->q))) { if (toplevel > cl->level) {
psched_time_t now = psched_get_time(); psched_time_t now = psched_get_time();
do { do {
...@@ -513,7 +513,6 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer) ...@@ -513,7 +513,6 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS_PINNED); hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS_PINNED);
} }
qdisc_unthrottled(sch);
__netif_schedule(qdisc_root(sch)); __netif_schedule(qdisc_root(sch));
return HRTIMER_NORESTART; return HRTIMER_NORESTART;
} }
...@@ -819,7 +818,6 @@ cbq_dequeue(struct Qdisc *sch) ...@@ -819,7 +818,6 @@ cbq_dequeue(struct Qdisc *sch)
if (skb) { if (skb) {
qdisc_bstats_update(sch, skb); qdisc_bstats_update(sch, skb);
sch->q.qlen--; sch->q.qlen--;
qdisc_unthrottled(sch);
return skb; return skb;
} }
......
...@@ -445,8 +445,7 @@ static struct sk_buff *fq_dequeue(struct Qdisc *sch) ...@@ -445,8 +445,7 @@ static struct sk_buff *fq_dequeue(struct Qdisc *sch)
if (!head->first) { if (!head->first) {
if (q->time_next_delayed_flow != ~0ULL) if (q->time_next_delayed_flow != ~0ULL)
qdisc_watchdog_schedule_ns(&q->watchdog, qdisc_watchdog_schedule_ns(&q->watchdog,
q->time_next_delayed_flow, q->time_next_delayed_flow);
false);
return NULL; return NULL;
} }
} }
......
...@@ -1664,7 +1664,6 @@ hfsc_dequeue(struct Qdisc *sch) ...@@ -1664,7 +1664,6 @@ hfsc_dequeue(struct Qdisc *sch)
set_passive(cl); set_passive(cl);
} }
qdisc_unthrottled(sch);
qdisc_bstats_update(sch, skb); qdisc_bstats_update(sch, skb);
qdisc_qstats_backlog_dec(sch, skb); qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--; sch->q.qlen--;
......
...@@ -889,7 +889,6 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch) ...@@ -889,7 +889,6 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
if (skb != NULL) { if (skb != NULL) {
ok: ok:
qdisc_bstats_update(sch, skb); qdisc_bstats_update(sch, skb);
qdisc_unthrottled(sch);
qdisc_qstats_backlog_dec(sch, skb); qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--; sch->q.qlen--;
return skb; return skb;
...@@ -929,7 +928,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch) ...@@ -929,7 +928,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
} }
qdisc_qstats_overlimit(sch); qdisc_qstats_overlimit(sch);
if (likely(next_event > q->now)) if (likely(next_event > q->now))
qdisc_watchdog_schedule_ns(&q->watchdog, next_event, true); qdisc_watchdog_schedule_ns(&q->watchdog, next_event);
else else
schedule_work(&q->work); schedule_work(&q->work);
fin: fin:
......
...@@ -582,15 +582,11 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch) ...@@ -582,15 +582,11 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
struct sk_buff *skb; struct sk_buff *skb;
struct rb_node *p; struct rb_node *p;
if (qdisc_is_throttled(sch))
return NULL;
tfifo_dequeue: tfifo_dequeue:
skb = __skb_dequeue(&sch->q); skb = __skb_dequeue(&sch->q);
if (skb) { if (skb) {
qdisc_qstats_backlog_dec(sch, skb); qdisc_qstats_backlog_dec(sch, skb);
deliver: deliver:
qdisc_unthrottled(sch);
qdisc_bstats_update(sch, skb); qdisc_bstats_update(sch, skb);
return skb; return skb;
} }
......
...@@ -64,6 +64,8 @@ struct plug_sched_data { ...@@ -64,6 +64,8 @@ struct plug_sched_data {
*/ */
bool unplug_indefinite; bool unplug_indefinite;
bool throttled;
/* Queue Limit in bytes */ /* Queue Limit in bytes */
u32 limit; u32 limit;
...@@ -103,7 +105,7 @@ static struct sk_buff *plug_dequeue(struct Qdisc *sch) ...@@ -103,7 +105,7 @@ static struct sk_buff *plug_dequeue(struct Qdisc *sch)
{ {
struct plug_sched_data *q = qdisc_priv(sch); struct plug_sched_data *q = qdisc_priv(sch);
if (qdisc_is_throttled(sch)) if (q->throttled)
return NULL; return NULL;
if (!q->unplug_indefinite) { if (!q->unplug_indefinite) {
...@@ -111,7 +113,7 @@ static struct sk_buff *plug_dequeue(struct Qdisc *sch) ...@@ -111,7 +113,7 @@ static struct sk_buff *plug_dequeue(struct Qdisc *sch)
/* No more packets to dequeue. Block the queue /* No more packets to dequeue. Block the queue
* and wait for the next release command. * and wait for the next release command.
*/ */
qdisc_throttled(sch); q->throttled = true;
return NULL; return NULL;
} }
q->pkts_to_release--; q->pkts_to_release--;
...@@ -141,7 +143,7 @@ static int plug_init(struct Qdisc *sch, struct nlattr *opt) ...@@ -141,7 +143,7 @@ static int plug_init(struct Qdisc *sch, struct nlattr *opt)
q->limit = ctl->limit; q->limit = ctl->limit;
} }
qdisc_throttled(sch); q->throttled = true;
return 0; return 0;
} }
...@@ -173,7 +175,7 @@ static int plug_change(struct Qdisc *sch, struct nlattr *opt) ...@@ -173,7 +175,7 @@ static int plug_change(struct Qdisc *sch, struct nlattr *opt)
q->pkts_last_epoch = q->pkts_current_epoch; q->pkts_last_epoch = q->pkts_current_epoch;
q->pkts_current_epoch = 0; q->pkts_current_epoch = 0;
if (q->unplug_indefinite) if (q->unplug_indefinite)
qdisc_throttled(sch); q->throttled = true;
q->unplug_indefinite = false; q->unplug_indefinite = false;
break; break;
case TCQ_PLUG_RELEASE_ONE: case TCQ_PLUG_RELEASE_ONE:
...@@ -182,7 +184,7 @@ static int plug_change(struct Qdisc *sch, struct nlattr *opt) ...@@ -182,7 +184,7 @@ static int plug_change(struct Qdisc *sch, struct nlattr *opt)
*/ */
q->pkts_to_release += q->pkts_last_epoch; q->pkts_to_release += q->pkts_last_epoch;
q->pkts_last_epoch = 0; q->pkts_last_epoch = 0;
qdisc_unthrottled(sch); q->throttled = false;
netif_schedule_queue(sch->dev_queue); netif_schedule_queue(sch->dev_queue);
break; break;
case TCQ_PLUG_RELEASE_INDEFINITE: case TCQ_PLUG_RELEASE_INDEFINITE:
...@@ -190,7 +192,7 @@ static int plug_change(struct Qdisc *sch, struct nlattr *opt) ...@@ -190,7 +192,7 @@ static int plug_change(struct Qdisc *sch, struct nlattr *opt)
q->pkts_to_release = 0; q->pkts_to_release = 0;
q->pkts_last_epoch = 0; q->pkts_last_epoch = 0;
q->pkts_current_epoch = 0; q->pkts_current_epoch = 0;
qdisc_unthrottled(sch); q->throttled = false;
netif_schedule_queue(sch->dev_queue); netif_schedule_queue(sch->dev_queue);
break; break;
case TCQ_PLUG_LIMIT: case TCQ_PLUG_LIMIT:
......
...@@ -254,14 +254,12 @@ static struct sk_buff *tbf_dequeue(struct Qdisc *sch) ...@@ -254,14 +254,12 @@ static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
q->ptokens = ptoks; q->ptokens = ptoks;
qdisc_qstats_backlog_dec(sch, skb); qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--; sch->q.qlen--;
qdisc_unthrottled(sch);
qdisc_bstats_update(sch, skb); qdisc_bstats_update(sch, skb);
return skb; return skb;
} }
qdisc_watchdog_schedule_ns(&q->watchdog, qdisc_watchdog_schedule_ns(&q->watchdog,
now + max_t(long, -toks, -ptoks), now + max_t(long, -toks, -ptoks));
true);
/* Maybe we have a shorter packet in the queue, /* Maybe we have a shorter packet in the queue,
which can be sent now. It sounds cool, which can be sent now. It sounds cool,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment