Commit f4f64050 authored by John Fastabend's avatar John Fastabend Committed by David S. Miller

net: sched: make cls_u32 per cpu

This uses per cpu counters in cls_u32 in preparation
to convert over to rcu.
Signed-off-by: default avatarJohn Fastabend <john.r.fastabend@intel.com>
Acked-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 8332904a
...@@ -55,10 +55,12 @@ struct tc_u_knode { ...@@ -55,10 +55,12 @@ struct tc_u_knode {
struct tcf_result res; struct tcf_result res;
struct tc_u_hnode *ht_down; struct tc_u_hnode *ht_down;
#ifdef CONFIG_CLS_U32_PERF #ifdef CONFIG_CLS_U32_PERF
struct tc_u32_pcnt *pf; struct tc_u32_pcnt __percpu *pf;
#endif #endif
#ifdef CONFIG_CLS_U32_MARK #ifdef CONFIG_CLS_U32_MARK
struct tc_u32_mark mark; u32 val;
u32 mask;
u32 __percpu *pcpu_success;
#endif #endif
struct tc_u32_sel sel; struct tc_u32_sel sel;
}; };
...@@ -115,16 +117,16 @@ static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct ...@@ -115,16 +117,16 @@ static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct
struct tc_u32_key *key = n->sel.keys; struct tc_u32_key *key = n->sel.keys;
#ifdef CONFIG_CLS_U32_PERF #ifdef CONFIG_CLS_U32_PERF
n->pf->rcnt += 1; __this_cpu_inc(n->pf->rcnt);
j = 0; j = 0;
#endif #endif
#ifdef CONFIG_CLS_U32_MARK #ifdef CONFIG_CLS_U32_MARK
if ((skb->mark & n->mark.mask) != n->mark.val) { if ((skb->mark & n->mask) != n->val) {
n = n->next; n = n->next;
goto next_knode; goto next_knode;
} else { } else {
n->mark.success++; __this_cpu_inc(*n->pcpu_success);
} }
#endif #endif
...@@ -143,7 +145,7 @@ static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct ...@@ -143,7 +145,7 @@ static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct
goto next_knode; goto next_knode;
} }
#ifdef CONFIG_CLS_U32_PERF #ifdef CONFIG_CLS_U32_PERF
n->pf->kcnts[j] += 1; __this_cpu_inc(n->pf->kcnts[j]);
j++; j++;
#endif #endif
} }
...@@ -159,7 +161,7 @@ static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct ...@@ -159,7 +161,7 @@ static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct
} }
#endif #endif
#ifdef CONFIG_CLS_U32_PERF #ifdef CONFIG_CLS_U32_PERF
n->pf->rhit += 1; __this_cpu_inc(n->pf->rhit);
#endif #endif
r = tcf_exts_exec(skb, &n->exts, res); r = tcf_exts_exec(skb, &n->exts, res);
if (r < 0) { if (r < 0) {
...@@ -342,7 +344,7 @@ static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n) ...@@ -342,7 +344,7 @@ static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n)
if (n->ht_down) if (n->ht_down)
n->ht_down->refcnt--; n->ht_down->refcnt--;
#ifdef CONFIG_CLS_U32_PERF #ifdef CONFIG_CLS_U32_PERF
kfree(n->pf); free_percpu(n->pf);
#endif #endif
kfree(n); kfree(n);
return 0; return 0;
...@@ -564,6 +566,9 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, ...@@ -564,6 +566,9 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
struct nlattr *tb[TCA_U32_MAX + 1]; struct nlattr *tb[TCA_U32_MAX + 1];
u32 htid; u32 htid;
int err; int err;
#ifdef CONFIG_CLS_U32_PERF
size_t size;
#endif
if (opt == NULL) if (opt == NULL)
return handle ? -EINVAL : 0; return handle ? -EINVAL : 0;
...@@ -642,8 +647,9 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, ...@@ -642,8 +647,9 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
return -ENOBUFS; return -ENOBUFS;
#ifdef CONFIG_CLS_U32_PERF #ifdef CONFIG_CLS_U32_PERF
n->pf = kzalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64), GFP_KERNEL); size = sizeof(struct tc_u32_pcnt) + s->nkeys * sizeof(u64);
if (n->pf == NULL) { n->pf = __alloc_percpu(size, __alignof__(struct tc_u32_pcnt));
if (!n->pf) {
kfree(n); kfree(n);
return -ENOBUFS; return -ENOBUFS;
} }
...@@ -656,12 +662,14 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, ...@@ -656,12 +662,14 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
tcf_exts_init(&n->exts, TCA_U32_ACT, TCA_U32_POLICE); tcf_exts_init(&n->exts, TCA_U32_ACT, TCA_U32_POLICE);
#ifdef CONFIG_CLS_U32_MARK #ifdef CONFIG_CLS_U32_MARK
n->pcpu_success = alloc_percpu(u32);
if (tb[TCA_U32_MARK]) { if (tb[TCA_U32_MARK]) {
struct tc_u32_mark *mark; struct tc_u32_mark *mark;
mark = nla_data(tb[TCA_U32_MARK]); mark = nla_data(tb[TCA_U32_MARK]);
memcpy(&n->mark, mark, sizeof(struct tc_u32_mark)); n->val = mark->val;
n->mark.success = 0; n->mask = mark->mask;
} }
#endif #endif
...@@ -745,6 +753,11 @@ static int u32_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, ...@@ -745,6 +753,11 @@ static int u32_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor)) if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor))
goto nla_put_failure; goto nla_put_failure;
} else { } else {
#ifdef CONFIG_CLS_U32_PERF
struct tc_u32_pcnt *gpf;
#endif
int cpu;
if (nla_put(skb, TCA_U32_SEL, if (nla_put(skb, TCA_U32_SEL,
sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key), sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
&n->sel)) &n->sel))
...@@ -762,9 +775,20 @@ static int u32_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, ...@@ -762,9 +775,20 @@ static int u32_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
goto nla_put_failure; goto nla_put_failure;
#ifdef CONFIG_CLS_U32_MARK #ifdef CONFIG_CLS_U32_MARK
if ((n->mark.val || n->mark.mask) && if ((n->val || n->mask)) {
nla_put(skb, TCA_U32_MARK, sizeof(n->mark), &n->mark)) struct tc_u32_mark mark = {.val = n->val,
goto nla_put_failure; .mask = n->mask,
.success = 0};
for_each_possible_cpu(cpu) {
__u32 cnt = *per_cpu_ptr(n->pcpu_success, cpu);
mark.success += cnt;
}
if (nla_put(skb, TCA_U32_MARK, sizeof(mark), &mark))
goto nla_put_failure;
}
#endif #endif
if (tcf_exts_dump(skb, &n->exts) < 0) if (tcf_exts_dump(skb, &n->exts) < 0)
...@@ -779,10 +803,29 @@ static int u32_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, ...@@ -779,10 +803,29 @@ static int u32_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
} }
#endif #endif
#ifdef CONFIG_CLS_U32_PERF #ifdef CONFIG_CLS_U32_PERF
gpf = kzalloc(sizeof(struct tc_u32_pcnt) +
n->sel.nkeys * sizeof(u64),
GFP_KERNEL);
if (!gpf)
goto nla_put_failure;
for_each_possible_cpu(cpu) {
int i;
struct tc_u32_pcnt *pf = per_cpu_ptr(n->pf, cpu);
gpf->rcnt += pf->rcnt;
gpf->rhit += pf->rhit;
for (i = 0; i < n->sel.nkeys; i++)
gpf->kcnts[i] += pf->kcnts[i];
}
if (nla_put(skb, TCA_U32_PCNT, if (nla_put(skb, TCA_U32_PCNT,
sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64), sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64),
n->pf)) gpf)) {
kfree(gpf);
goto nla_put_failure; goto nla_put_failure;
}
kfree(gpf);
#endif #endif
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment