Commit 008830bc authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

net_sched: fq_codel: cache skb->truesize into skb->cb

Now we defer skb drops, it makes sense to keep a copy
of skb->truesize in struct codel_skb_cb to avoid one
cache line miss per dropped skb in fq_codel_drop(),
to reduce latencies a bit further.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 520ac30f
...@@ -52,6 +52,7 @@ ...@@ -52,6 +52,7 @@
/* Qdiscs using codel plugin must use codel_skb_cb in their own cb[] */ /* Qdiscs using codel plugin must use codel_skb_cb in their own cb[] */
struct codel_skb_cb { struct codel_skb_cb {
codel_time_t enqueue_time; codel_time_t enqueue_time;
unsigned int mem_usage;
}; };
static struct codel_skb_cb *get_codel_cb(const struct sk_buff *skb) static struct codel_skb_cb *get_codel_cb(const struct sk_buff *skb)
......
...@@ -172,7 +172,7 @@ static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets, ...@@ -172,7 +172,7 @@ static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets,
do { do {
skb = dequeue_head(flow); skb = dequeue_head(flow);
len += qdisc_pkt_len(skb); len += qdisc_pkt_len(skb);
mem += skb->truesize; mem += get_codel_cb(skb)->mem_usage;
__qdisc_drop(skb, to_free); __qdisc_drop(skb, to_free);
} while (++i < max_packets && len < threshold); } while (++i < max_packets && len < threshold);
...@@ -216,7 +216,8 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch, ...@@ -216,7 +216,8 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch,
flow->deficit = q->quantum; flow->deficit = q->quantum;
flow->dropped = 0; flow->dropped = 0;
} }
q->memory_usage += skb->truesize; get_codel_cb(skb)->mem_usage = skb->truesize;
q->memory_usage += get_codel_cb(skb)->mem_usage;
memory_limited = q->memory_usage > q->memory_limit; memory_limited = q->memory_usage > q->memory_limit;
if (++sch->q.qlen <= sch->limit && !memory_limited) if (++sch->q.qlen <= sch->limit && !memory_limited)
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
...@@ -267,7 +268,7 @@ static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx) ...@@ -267,7 +268,7 @@ static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
if (flow->head) { if (flow->head) {
skb = dequeue_head(flow); skb = dequeue_head(flow);
q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb); q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
q->memory_usage -= skb->truesize; q->memory_usage -= get_codel_cb(skb)->mem_usage;
sch->q.qlen--; sch->q.qlen--;
sch->qstats.backlog -= qdisc_pkt_len(skb); sch->qstats.backlog -= qdisc_pkt_len(skb);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment