Commit 242922a0 authored by Florian Westphal's avatar Florian Westphal Committed by Pablo Neira Ayuso

netfilter: conntrack: simplify early_drop

We don't need to acquire the bucket lock during early drop, we can
use lockless traveral just like ____nf_conntrack_find.

The timer deletion serves as synchronization point, if another cpu
attempts to evict same entry, only one will succeed with timer deletion.
Signed-off-by: default avatarFlorian Westphal <fw@strlen.de>
Signed-off-by: default avatarPablo Neira Ayuso <pablo@netfilter.org>
parent 8786a971
...@@ -301,6 +301,7 @@ void nf_ct_tmpl_free(struct nf_conn *tmpl); ...@@ -301,6 +301,7 @@ void nf_ct_tmpl_free(struct nf_conn *tmpl);
#define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count) #define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count)
#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count) #define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
#define NF_CT_STAT_ADD_ATOMIC(net, count, v) this_cpu_add((net)->ct.stat->count, (v))
#define MODULE_ALIAS_NFCT_HELPER(helper) \ #define MODULE_ALIAS_NFCT_HELPER(helper) \
MODULE_ALIAS("nfct-helper-" helper) MODULE_ALIAS("nfct-helper-" helper)
......
...@@ -834,67 +834,66 @@ EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken); ...@@ -834,67 +834,66 @@ EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
/* There's a small race here where we may free a just-assured /* There's a small race here where we may free a just-assured
connection. Too bad: we're in trouble anyway. */ connection. Too bad: we're in trouble anyway. */
static noinline int early_drop(struct net *net, unsigned int _hash) static unsigned int early_drop_list(struct net *net,
struct hlist_nulls_head *head)
{ {
/* Use oldest entry, which is roughly LRU */
struct nf_conntrack_tuple_hash *h; struct nf_conntrack_tuple_hash *h;
struct nf_conn *tmp;
struct hlist_nulls_node *n; struct hlist_nulls_node *n;
unsigned int i, hash, sequence; unsigned int drops = 0;
struct nf_conn *ct = NULL; struct nf_conn *tmp;
spinlock_t *lockp;
bool ret = false;
i = 0; hlist_nulls_for_each_entry_rcu(h, n, head, hnnode) {
tmp = nf_ct_tuplehash_to_ctrack(h);
local_bh_disable(); if (test_bit(IPS_ASSURED_BIT, &tmp->status) ||
restart: !net_eq(nf_ct_net(tmp), net) ||
sequence = read_seqcount_begin(&nf_conntrack_generation); nf_ct_is_dying(tmp))
for (; i < NF_CT_EVICTION_RANGE; i++) { continue;
hash = scale_hash(_hash++);
lockp = &nf_conntrack_locks[hash % CONNTRACK_LOCKS];
nf_conntrack_lock(lockp);
if (read_seqcount_retry(&nf_conntrack_generation, sequence)) {
spin_unlock(lockp);
goto restart;
}
hlist_nulls_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash],
hnnode) {
tmp = nf_ct_tuplehash_to_ctrack(h);
if (test_bit(IPS_ASSURED_BIT, &tmp->status) ||
!net_eq(nf_ct_net(tmp), net) ||
nf_ct_is_dying(tmp))
continue;
if (atomic_inc_not_zero(&tmp->ct_general.use)) {
ct = tmp;
break;
}
}
spin_unlock(lockp); if (!atomic_inc_not_zero(&tmp->ct_general.use))
if (ct) continue;
break;
/* kill only if still in same netns -- might have moved due to
* SLAB_DESTROY_BY_RCU rules.
*
* We steal the timer reference. If that fails timer has
* already fired or someone else deleted it. Just drop ref
* and move to next entry.
*/
if (net_eq(nf_ct_net(tmp), net) &&
nf_ct_is_confirmed(tmp) &&
del_timer(&tmp->timeout) &&
nf_ct_delete(tmp, 0, 0))
drops++;
nf_ct_put(tmp);
} }
local_bh_enable(); return drops;
}
if (!ct) static noinline int early_drop(struct net *net, unsigned int _hash)
return false; {
unsigned int i;
/* kill only if in same netns -- might have moved due to for (i = 0; i < NF_CT_EVICTION_RANGE; i++) {
* SLAB_DESTROY_BY_RCU rules struct hlist_nulls_head *ct_hash;
*/ unsigned hash, sequence, drops;
if (net_eq(nf_ct_net(ct), net) && del_timer(&ct->timeout)) {
if (nf_ct_delete(ct, 0, 0)) { do {
NF_CT_STAT_INC_ATOMIC(net, early_drop); sequence = read_seqcount_begin(&nf_conntrack_generation);
ret = true; hash = scale_hash(_hash++);
ct_hash = nf_conntrack_hash;
} while (read_seqcount_retry(&nf_conntrack_generation, sequence));
drops = early_drop_list(net, &ct_hash[hash]);
if (drops) {
NF_CT_STAT_ADD_ATOMIC(net, early_drop, drops);
return true;
} }
} }
nf_ct_put(ct); return false;
return ret;
} }
static struct nf_conn * static struct nf_conn *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment