Commit e7614370 authored by Cong Wang's avatar Cong Wang Committed by David S. Miller

net_sched: use idr to allocate u32 filter handles

Instead of calling u32_lookup_ht() in a loop to find
a unused handle, just switch to idr API to allocate
new handles. u32 filters are special as the handle
could contain a hash table id and a key id, so we
need two IDR to allocate each of them.

Cc: Chris Mi <chrism@mellanox.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: default avatarCong Wang <xiyou.wangcong@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 1d8134fe
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
#include <net/act_api.h> #include <net/act_api.h>
#include <net/pkt_cls.h> #include <net/pkt_cls.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/idr.h>
struct tc_u_knode { struct tc_u_knode {
struct tc_u_knode __rcu *next; struct tc_u_knode __rcu *next;
...@@ -82,6 +83,7 @@ struct tc_u_hnode { ...@@ -82,6 +83,7 @@ struct tc_u_hnode {
struct tc_u_common *tp_c; struct tc_u_common *tp_c;
int refcnt; int refcnt;
unsigned int divisor; unsigned int divisor;
struct idr handle_idr;
struct rcu_head rcu; struct rcu_head rcu;
/* The 'ht' field MUST be the last field in structure to allow for /* The 'ht' field MUST be the last field in structure to allow for
* more entries allocated at end of structure. * more entries allocated at end of structure.
...@@ -93,7 +95,7 @@ struct tc_u_common { ...@@ -93,7 +95,7 @@ struct tc_u_common {
struct tc_u_hnode __rcu *hlist; struct tc_u_hnode __rcu *hlist;
struct Qdisc *q; struct Qdisc *q;
int refcnt; int refcnt;
u32 hgenerator; struct idr handle_idr;
struct hlist_node hnode; struct hlist_node hnode;
struct rcu_head rcu; struct rcu_head rcu;
}; };
...@@ -311,19 +313,19 @@ static void *u32_get(struct tcf_proto *tp, u32 handle) ...@@ -311,19 +313,19 @@ static void *u32_get(struct tcf_proto *tp, u32 handle)
return u32_lookup_key(ht, handle); return u32_lookup_key(ht, handle);
} }
static u32 gen_new_htid(struct tc_u_common *tp_c) static u32 gen_new_htid(struct tc_u_common *tp_c, struct tc_u_hnode *ptr)
{ {
int i = 0x800; unsigned long idr_index;
int err;
/* hgenerator only used inside rtnl lock it is safe to increment /* This is only used inside rtnl lock it is safe to increment
* without read _copy_ update semantics * without read _copy_ update semantics
*/ */
do { err = idr_alloc_ext(&tp_c->handle_idr, ptr, &idr_index,
if (++tp_c->hgenerator == 0x7FF) 1, 0x7FF, GFP_KERNEL);
tp_c->hgenerator = 1; if (err)
} while (--i > 0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20)); return 0;
return (u32)(idr_index | 0x800) << 20;
return i > 0 ? (tp_c->hgenerator|0x800)<<20 : 0;
} }
static struct hlist_head *tc_u_common_hash; static struct hlist_head *tc_u_common_hash;
...@@ -366,8 +368,9 @@ static int u32_init(struct tcf_proto *tp) ...@@ -366,8 +368,9 @@ static int u32_init(struct tcf_proto *tp)
return -ENOBUFS; return -ENOBUFS;
root_ht->refcnt++; root_ht->refcnt++;
root_ht->handle = tp_c ? gen_new_htid(tp_c) : 0x80000000; root_ht->handle = tp_c ? gen_new_htid(tp_c, root_ht) : 0x80000000;
root_ht->prio = tp->prio; root_ht->prio = tp->prio;
idr_init(&root_ht->handle_idr);
if (tp_c == NULL) { if (tp_c == NULL) {
tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL); tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
...@@ -377,6 +380,7 @@ static int u32_init(struct tcf_proto *tp) ...@@ -377,6 +380,7 @@ static int u32_init(struct tcf_proto *tp)
} }
tp_c->q = tp->q; tp_c->q = tp->q;
INIT_HLIST_NODE(&tp_c->hnode); INIT_HLIST_NODE(&tp_c->hnode);
idr_init(&tp_c->handle_idr);
h = tc_u_hash(tp); h = tc_u_hash(tp);
hlist_add_head(&tp_c->hnode, &tc_u_common_hash[h]); hlist_add_head(&tp_c->hnode, &tc_u_common_hash[h]);
...@@ -565,6 +569,7 @@ static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht) ...@@ -565,6 +569,7 @@ static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
rtnl_dereference(n->next)); rtnl_dereference(n->next));
tcf_unbind_filter(tp, &n->res); tcf_unbind_filter(tp, &n->res);
u32_remove_hw_knode(tp, n->handle); u32_remove_hw_knode(tp, n->handle);
idr_remove_ext(&ht->handle_idr, n->handle);
call_rcu(&n->rcu, u32_delete_key_freepf_rcu); call_rcu(&n->rcu, u32_delete_key_freepf_rcu);
} }
} }
...@@ -586,6 +591,8 @@ static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht) ...@@ -586,6 +591,8 @@ static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
hn = &phn->next, phn = rtnl_dereference(*hn)) { hn = &phn->next, phn = rtnl_dereference(*hn)) {
if (phn == ht) { if (phn == ht) {
u32_clear_hw_hnode(tp, ht); u32_clear_hw_hnode(tp, ht);
idr_destroy(&ht->handle_idr);
idr_remove_ext(&tp_c->handle_idr, ht->handle);
RCU_INIT_POINTER(*hn, ht->next); RCU_INIT_POINTER(*hn, ht->next);
kfree_rcu(ht, rcu); kfree_rcu(ht, rcu);
return 0; return 0;
...@@ -633,6 +640,7 @@ static void u32_destroy(struct tcf_proto *tp) ...@@ -633,6 +640,7 @@ static void u32_destroy(struct tcf_proto *tp)
kfree_rcu(ht, rcu); kfree_rcu(ht, rcu);
} }
idr_destroy(&tp_c->handle_idr);
kfree(tp_c); kfree(tp_c);
} }
...@@ -701,27 +709,21 @@ static int u32_delete(struct tcf_proto *tp, void *arg, bool *last) ...@@ -701,27 +709,21 @@ static int u32_delete(struct tcf_proto *tp, void *arg, bool *last)
return ret; return ret;
} }
#define NR_U32_NODE (1<<12) static u32 gen_new_kid(struct tc_u_hnode *ht, u32 htid)
static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
{ {
struct tc_u_knode *n; unsigned long idr_index;
unsigned long i; u32 start = htid | 0x800;
unsigned long *bitmap = kzalloc(BITS_TO_LONGS(NR_U32_NODE) * sizeof(unsigned long), u32 max = htid | 0xFFF;
GFP_KERNEL); u32 min = htid;
if (!bitmap)
return handle | 0xFFF; if (idr_alloc_ext(&ht->handle_idr, NULL, &idr_index,
start, max + 1, GFP_KERNEL)) {
for (n = rtnl_dereference(ht->ht[TC_U32_HASH(handle)]); if (idr_alloc_ext(&ht->handle_idr, NULL, &idr_index,
n; min + 1, max + 1, GFP_KERNEL))
n = rtnl_dereference(n->next)) return max;
set_bit(TC_U32_NODE(n->handle), bitmap); }
i = find_next_zero_bit(bitmap, NR_U32_NODE, 0x800);
if (i >= NR_U32_NODE)
i = find_next_zero_bit(bitmap, NR_U32_NODE, 1);
kfree(bitmap); return (u32)idr_index;
return handle | (i >= NR_U32_NODE ? 0xFFF : i);
} }
static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = { static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
...@@ -806,6 +808,7 @@ static void u32_replace_knode(struct tcf_proto *tp, struct tc_u_common *tp_c, ...@@ -806,6 +808,7 @@ static void u32_replace_knode(struct tcf_proto *tp, struct tc_u_common *tp_c,
if (pins->handle == n->handle) if (pins->handle == n->handle)
break; break;
idr_replace_ext(&ht->handle_idr, n, n->handle);
RCU_INIT_POINTER(n->next, pins->next); RCU_INIT_POINTER(n->next, pins->next);
rcu_assign_pointer(*ins, n); rcu_assign_pointer(*ins, n);
} }
...@@ -937,22 +940,33 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, ...@@ -937,22 +940,33 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
return -EINVAL; return -EINVAL;
if (TC_U32_KEY(handle)) if (TC_U32_KEY(handle))
return -EINVAL; return -EINVAL;
if (handle == 0) {
handle = gen_new_htid(tp->data);
if (handle == 0)
return -ENOMEM;
}
ht = kzalloc(sizeof(*ht) + divisor*sizeof(void *), GFP_KERNEL); ht = kzalloc(sizeof(*ht) + divisor*sizeof(void *), GFP_KERNEL);
if (ht == NULL) if (ht == NULL)
return -ENOBUFS; return -ENOBUFS;
if (handle == 0) {
handle = gen_new_htid(tp->data, ht);
if (handle == 0) {
kfree(ht);
return -ENOMEM;
}
} else {
err = idr_alloc_ext(&tp_c->handle_idr, ht, NULL,
handle, handle + 1, GFP_KERNEL);
if (err) {
kfree(ht);
return err;
}
}
ht->tp_c = tp_c; ht->tp_c = tp_c;
ht->refcnt = 1; ht->refcnt = 1;
ht->divisor = divisor; ht->divisor = divisor;
ht->handle = handle; ht->handle = handle;
ht->prio = tp->prio; ht->prio = tp->prio;
idr_init(&ht->handle_idr);
err = u32_replace_hw_hnode(tp, ht, flags); err = u32_replace_hw_hnode(tp, ht, flags);
if (err) { if (err) {
idr_remove_ext(&tp_c->handle_idr, handle);
kfree(ht); kfree(ht);
return err; return err;
} }
...@@ -986,24 +1000,33 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, ...@@ -986,24 +1000,33 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
if (TC_U32_HTID(handle) && TC_U32_HTID(handle^htid)) if (TC_U32_HTID(handle) && TC_U32_HTID(handle^htid))
return -EINVAL; return -EINVAL;
handle = htid | TC_U32_NODE(handle); handle = htid | TC_U32_NODE(handle);
err = idr_alloc_ext(&ht->handle_idr, NULL, NULL,
handle, handle + 1,
GFP_KERNEL);
if (err)
return err;
} else } else
handle = gen_new_kid(ht, htid); handle = gen_new_kid(ht, htid);
if (tb[TCA_U32_SEL] == NULL) if (tb[TCA_U32_SEL] == NULL) {
return -EINVAL; err = -EINVAL;
goto erridr;
}
s = nla_data(tb[TCA_U32_SEL]); s = nla_data(tb[TCA_U32_SEL]);
n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL); n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
if (n == NULL) if (n == NULL) {
return -ENOBUFS; err = -ENOBUFS;
goto erridr;
}
#ifdef CONFIG_CLS_U32_PERF #ifdef CONFIG_CLS_U32_PERF
size = sizeof(struct tc_u32_pcnt) + s->nkeys * sizeof(u64); size = sizeof(struct tc_u32_pcnt) + s->nkeys * sizeof(u64);
n->pf = __alloc_percpu(size, __alignof__(struct tc_u32_pcnt)); n->pf = __alloc_percpu(size, __alignof__(struct tc_u32_pcnt));
if (!n->pf) { if (!n->pf) {
kfree(n); err = -ENOBUFS;
return -ENOBUFS; goto errfree;
} }
#endif #endif
...@@ -1066,9 +1089,12 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, ...@@ -1066,9 +1089,12 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
errout: errout:
tcf_exts_destroy(&n->exts); tcf_exts_destroy(&n->exts);
#ifdef CONFIG_CLS_U32_PERF #ifdef CONFIG_CLS_U32_PERF
errfree:
free_percpu(n->pf); free_percpu(n->pf);
#endif #endif
kfree(n); kfree(n);
erridr:
idr_remove_ext(&ht->handle_idr, handle);
return err; return err;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment