Commit 5b3501fa authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

netfilter: nf_conntrack: per netns nf_conntrack_cachep

nf_conntrack_cachep is currently shared by all netns instances, but
because of SLAB_DESTROY_BY_RCU special semantics, this is wrong.

If we use a shared slab cache, one object can instantly flight between
one hash table (netns ONE) to another one (netns TWO), and concurrent
reader (doing a lookup in netns ONE, 'finding' an object of netns TWO)
can be fooled without notice, because no RCU grace period has to be
observed between object freeing and its reuse.

We dont have this problem with UDP/TCP slab caches because TCP/UDP
hashtables are global to the machine (and each object has a pointer to
its netns).

If we use per netns conntrack hash tables, we also *must* use per netns
conntrack slab caches, to guarantee an object can not escape from one
namespace to another one.
Signed-off-by: default avatarEric Dumazet <eric.dumazet@gmail.com>
[Patrick: added unique slab name allocation]
Cc: stable@kernel.org
Signed-off-by: default avatarPatrick McHardy <kaber@trash.net>
parent 9edd7ca0
...@@ -11,6 +11,7 @@ struct nf_conntrack_ecache; ...@@ -11,6 +11,7 @@ struct nf_conntrack_ecache;
struct netns_ct { struct netns_ct {
atomic_t count; atomic_t count;
unsigned int expect_count; unsigned int expect_count;
struct kmem_cache *nf_conntrack_cachep;
struct hlist_nulls_head *hash; struct hlist_nulls_head *hash;
struct hlist_head *expect_hash; struct hlist_head *expect_hash;
struct hlist_nulls_head unconfirmed; struct hlist_nulls_head unconfirmed;
...@@ -28,5 +29,6 @@ struct netns_ct { ...@@ -28,5 +29,6 @@ struct netns_ct {
#endif #endif
int hash_vmalloc; int hash_vmalloc;
int expect_vmalloc; int expect_vmalloc;
char *slabname;
}; };
#endif #endif
...@@ -63,8 +63,6 @@ EXPORT_SYMBOL_GPL(nf_conntrack_max); ...@@ -63,8 +63,6 @@ EXPORT_SYMBOL_GPL(nf_conntrack_max);
struct nf_conn nf_conntrack_untracked __read_mostly; struct nf_conn nf_conntrack_untracked __read_mostly;
EXPORT_SYMBOL_GPL(nf_conntrack_untracked); EXPORT_SYMBOL_GPL(nf_conntrack_untracked);
static struct kmem_cache *nf_conntrack_cachep __read_mostly;
static int nf_conntrack_hash_rnd_initted; static int nf_conntrack_hash_rnd_initted;
static unsigned int nf_conntrack_hash_rnd; static unsigned int nf_conntrack_hash_rnd;
...@@ -572,7 +570,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net, ...@@ -572,7 +570,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
* Do not use kmem_cache_zalloc(), as this cache uses * Do not use kmem_cache_zalloc(), as this cache uses
* SLAB_DESTROY_BY_RCU. * SLAB_DESTROY_BY_RCU.
*/ */
ct = kmem_cache_alloc(nf_conntrack_cachep, gfp); ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp);
if (ct == NULL) { if (ct == NULL) {
pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n"); pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n");
atomic_dec(&net->ct.count); atomic_dec(&net->ct.count);
...@@ -611,7 +609,7 @@ void nf_conntrack_free(struct nf_conn *ct) ...@@ -611,7 +609,7 @@ void nf_conntrack_free(struct nf_conn *ct)
nf_ct_ext_destroy(ct); nf_ct_ext_destroy(ct);
atomic_dec(&net->ct.count); atomic_dec(&net->ct.count);
nf_ct_ext_free(ct); nf_ct_ext_free(ct);
kmem_cache_free(nf_conntrack_cachep, ct); kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
} }
EXPORT_SYMBOL_GPL(nf_conntrack_free); EXPORT_SYMBOL_GPL(nf_conntrack_free);
...@@ -1119,7 +1117,6 @@ static void nf_conntrack_cleanup_init_net(void) ...@@ -1119,7 +1117,6 @@ static void nf_conntrack_cleanup_init_net(void)
nf_conntrack_helper_fini(); nf_conntrack_helper_fini();
nf_conntrack_proto_fini(); nf_conntrack_proto_fini();
kmem_cache_destroy(nf_conntrack_cachep);
} }
static void nf_conntrack_cleanup_net(struct net *net) static void nf_conntrack_cleanup_net(struct net *net)
...@@ -1137,6 +1134,8 @@ static void nf_conntrack_cleanup_net(struct net *net) ...@@ -1137,6 +1134,8 @@ static void nf_conntrack_cleanup_net(struct net *net)
nf_conntrack_ecache_fini(net); nf_conntrack_ecache_fini(net);
nf_conntrack_acct_fini(net); nf_conntrack_acct_fini(net);
nf_conntrack_expect_fini(net); nf_conntrack_expect_fini(net);
kmem_cache_destroy(net->ct.nf_conntrack_cachep);
kfree(net->ct.slabname);
free_percpu(net->ct.stat); free_percpu(net->ct.stat);
} }
...@@ -1272,15 +1271,6 @@ static int nf_conntrack_init_init_net(void) ...@@ -1272,15 +1271,6 @@ static int nf_conntrack_init_init_net(void)
NF_CONNTRACK_VERSION, nf_conntrack_htable_size, NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
nf_conntrack_max); nf_conntrack_max);
nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
sizeof(struct nf_conn),
0, SLAB_DESTROY_BY_RCU, NULL);
if (!nf_conntrack_cachep) {
printk(KERN_ERR "Unable to create nf_conn slab cache\n");
ret = -ENOMEM;
goto err_cache;
}
ret = nf_conntrack_proto_init(); ret = nf_conntrack_proto_init();
if (ret < 0) if (ret < 0)
goto err_proto; goto err_proto;
...@@ -1302,8 +1292,6 @@ static int nf_conntrack_init_init_net(void) ...@@ -1302,8 +1292,6 @@ static int nf_conntrack_init_init_net(void)
err_helper: err_helper:
nf_conntrack_proto_fini(); nf_conntrack_proto_fini();
err_proto: err_proto:
kmem_cache_destroy(nf_conntrack_cachep);
err_cache:
return ret; return ret;
} }
...@@ -1325,6 +1313,21 @@ static int nf_conntrack_init_net(struct net *net) ...@@ -1325,6 +1313,21 @@ static int nf_conntrack_init_net(struct net *net)
ret = -ENOMEM; ret = -ENOMEM;
goto err_stat; goto err_stat;
} }
net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
if (!net->ct.slabname) {
ret = -ENOMEM;
goto err_slabname;
}
net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname,
sizeof(struct nf_conn), 0,
SLAB_DESTROY_BY_RCU, NULL);
if (!net->ct.nf_conntrack_cachep) {
printk(KERN_ERR "Unable to create nf_conn slab cache\n");
ret = -ENOMEM;
goto err_cache;
}
net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size,
&net->ct.hash_vmalloc, 1); &net->ct.hash_vmalloc, 1);
if (!net->ct.hash) { if (!net->ct.hash) {
...@@ -1352,6 +1355,10 @@ static int nf_conntrack_init_net(struct net *net) ...@@ -1352,6 +1355,10 @@ static int nf_conntrack_init_net(struct net *net)
nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
nf_conntrack_htable_size); nf_conntrack_htable_size);
err_hash: err_hash:
kmem_cache_destroy(net->ct.nf_conntrack_cachep);
err_cache:
kfree(net->ct.slabname);
err_slabname:
free_percpu(net->ct.stat); free_percpu(net->ct.stat);
err_stat: err_stat:
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment