Commit 2954fe60 authored by Florian Westphal's avatar Florian Westphal Committed by Pablo Neira Ayuso

netfilter: let reset rules clean out conntrack entries

iptables/nftables support responding to tcp packets with tcp resets.

The generated tcp reset packet passes through both output and postrouting
netfilter hooks, but conntrack will never see them because the generated
skb has its ->nfct pointer copied over from the packet that triggered the
reset rule.

If the reset rule is used for established connections, this
may result in the conntrack entry to be around for a very long
time (default timeout is 5 days).

One way to avoid this would be to not copy the nf_conn pointer
so that the rest packet passes through conntrack too.

Problem is that output rules might not have the same conntrack
zone setup as the prerouting ones, so its possible that the
reset skb won't find the correct entry.  Generating a template
entry for the skb seems error prone as well.

Add an explicit "closing" function that switches a confirmed
conntrack entry to closed state and wire this up for tcp.

If the entry isn't confirmed, no action is needed because
the conntrack entry will never be committed to the table.
Reported-by: default avatarRussel King <linux@armlinux.org.uk>
Signed-off-by: default avatarFlorian Westphal <fw@strlen.de>
Signed-off-by: default avatarPablo Neira Ayuso <pablo@netfilter.org>
parent e4d0fe71
...@@ -437,11 +437,13 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) ...@@ -437,11 +437,13 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
#include <linux/netfilter/nf_conntrack_zones_common.h> #include <linux/netfilter/nf_conntrack_zones_common.h>
void nf_ct_attach(struct sk_buff *, const struct sk_buff *); void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
void nf_ct_set_closing(struct nf_conntrack *nfct);
struct nf_conntrack_tuple; struct nf_conntrack_tuple;
bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple, bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
const struct sk_buff *skb); const struct sk_buff *skb);
#else #else
static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {} static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
static inline void nf_ct_set_closing(struct nf_conntrack *nfct) {}
struct nf_conntrack_tuple; struct nf_conntrack_tuple;
static inline bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple, static inline bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
const struct sk_buff *skb) const struct sk_buff *skb)
...@@ -459,6 +461,7 @@ struct nf_ct_hook { ...@@ -459,6 +461,7 @@ struct nf_ct_hook {
bool (*get_tuple_skb)(struct nf_conntrack_tuple *, bool (*get_tuple_skb)(struct nf_conntrack_tuple *,
const struct sk_buff *); const struct sk_buff *);
void (*attach)(struct sk_buff *nskb, const struct sk_buff *skb); void (*attach)(struct sk_buff *nskb, const struct sk_buff *skb);
void (*set_closing)(struct nf_conntrack *nfct);
}; };
extern const struct nf_ct_hook __rcu *nf_ct_hook; extern const struct nf_ct_hook __rcu *nf_ct_hook;
......
...@@ -125,6 +125,12 @@ struct nf_conn { ...@@ -125,6 +125,12 @@ struct nf_conn {
union nf_conntrack_proto proto; union nf_conntrack_proto proto;
}; };
static inline struct nf_conn *
nf_ct_to_nf_conn(const struct nf_conntrack *nfct)
{
return container_of(nfct, struct nf_conn, ct_general);
}
static inline struct nf_conn * static inline struct nf_conn *
nf_ct_tuplehash_to_ctrack(const struct nf_conntrack_tuple_hash *hash) nf_ct_tuplehash_to_ctrack(const struct nf_conntrack_tuple_hash *hash)
{ {
...@@ -175,6 +181,8 @@ nf_ct_get(const struct sk_buff *skb, enum ip_conntrack_info *ctinfo) ...@@ -175,6 +181,8 @@ nf_ct_get(const struct sk_buff *skb, enum ip_conntrack_info *ctinfo)
void nf_ct_destroy(struct nf_conntrack *nfct); void nf_ct_destroy(struct nf_conntrack *nfct);
void nf_conntrack_tcp_set_closing(struct nf_conn *ct);
/* decrement reference count on a conntrack */ /* decrement reference count on a conntrack */
static inline void nf_ct_put(struct nf_conn *ct) static inline void nf_ct_put(struct nf_conn *ct)
{ {
......
...@@ -280,6 +280,7 @@ void nf_send_reset(struct net *net, struct sock *sk, struct sk_buff *oldskb, ...@@ -280,6 +280,7 @@ void nf_send_reset(struct net *net, struct sock *sk, struct sk_buff *oldskb,
goto free_nskb; goto free_nskb;
nf_ct_attach(nskb, oldskb); nf_ct_attach(nskb, oldskb);
nf_ct_set_closing(skb_nfct(oldskb));
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
/* If we use ip_local_out for bridged traffic, the MAC source on /* If we use ip_local_out for bridged traffic, the MAC source on
......
...@@ -345,6 +345,7 @@ void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb, ...@@ -345,6 +345,7 @@ void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb,
nf_reject_ip6_tcphdr_put(nskb, oldskb, otcph, otcplen); nf_reject_ip6_tcphdr_put(nskb, oldskb, otcph, otcplen);
nf_ct_attach(nskb, oldskb); nf_ct_attach(nskb, oldskb);
nf_ct_set_closing(skb_nfct(oldskb));
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
/* If we use ip6_local_out for bridged traffic, the MAC source on /* If we use ip6_local_out for bridged traffic, the MAC source on
......
...@@ -702,6 +702,22 @@ void nf_conntrack_destroy(struct nf_conntrack *nfct) ...@@ -702,6 +702,22 @@ void nf_conntrack_destroy(struct nf_conntrack *nfct)
} }
EXPORT_SYMBOL(nf_conntrack_destroy); EXPORT_SYMBOL(nf_conntrack_destroy);
void nf_ct_set_closing(struct nf_conntrack *nfct)
{
const struct nf_ct_hook *ct_hook;
if (!nfct)
return;
rcu_read_lock();
ct_hook = rcu_dereference(nf_ct_hook);
if (ct_hook)
ct_hook->set_closing(nfct);
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(nf_ct_set_closing);
bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple, bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
const struct sk_buff *skb) const struct sk_buff *skb)
{ {
......
...@@ -2747,11 +2747,23 @@ int nf_conntrack_init_start(void) ...@@ -2747,11 +2747,23 @@ int nf_conntrack_init_start(void)
return ret; return ret;
} }
static void nf_conntrack_set_closing(struct nf_conntrack *nfct)
{
struct nf_conn *ct = nf_ct_to_nf_conn(nfct);
switch (nf_ct_protonum(ct)) {
case IPPROTO_TCP:
nf_conntrack_tcp_set_closing(ct);
break;
}
}
static const struct nf_ct_hook nf_conntrack_hook = { static const struct nf_ct_hook nf_conntrack_hook = {
.update = nf_conntrack_update, .update = nf_conntrack_update,
.destroy = nf_ct_destroy, .destroy = nf_ct_destroy,
.get_tuple_skb = nf_conntrack_get_tuple_skb, .get_tuple_skb = nf_conntrack_get_tuple_skb,
.attach = nf_conntrack_attach, .attach = nf_conntrack_attach,
.set_closing = nf_conntrack_set_closing,
}; };
void nf_conntrack_init_end(void) void nf_conntrack_init_end(void)
......
...@@ -911,6 +911,41 @@ static bool tcp_can_early_drop(const struct nf_conn *ct) ...@@ -911,6 +911,41 @@ static bool tcp_can_early_drop(const struct nf_conn *ct)
return false; return false;
} }
void nf_conntrack_tcp_set_closing(struct nf_conn *ct)
{
enum tcp_conntrack old_state;
const unsigned int *timeouts;
u32 timeout;
if (!nf_ct_is_confirmed(ct))
return;
spin_lock_bh(&ct->lock);
old_state = ct->proto.tcp.state;
ct->proto.tcp.state = TCP_CONNTRACK_CLOSE;
if (old_state == TCP_CONNTRACK_CLOSE ||
test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) {
spin_unlock_bh(&ct->lock);
return;
}
timeouts = nf_ct_timeout_lookup(ct);
if (!timeouts) {
const struct nf_tcp_net *tn;
tn = nf_tcp_pernet(nf_ct_net(ct));
timeouts = tn->timeouts;
}
timeout = timeouts[TCP_CONNTRACK_CLOSE];
WRITE_ONCE(ct->timeout, timeout + nfct_time_stamp);
spin_unlock_bh(&ct->lock);
nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
}
static void nf_ct_tcp_state_reset(struct ip_ct_tcp_state *state) static void nf_ct_tcp_state_reset(struct ip_ct_tcp_state *state)
{ {
state->td_end = 0; state->td_end = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment