Commit 203f2e78 authored by Florian Westphal's avatar Florian Westphal Committed by Pablo Neira Ayuso

netfilter: nat: remove l4proto->unique_tuple

fold remaining users (icmp, icmpv6, gre) into nf_nat_l4proto_unique_tuple.
The static-save of old incarnation of resolved key in gre and icmp is
removed as well, just use the prandom based offset like the others.
Signed-off-by: default avatarFlorian Westphal <fw@strlen.de>
Signed-off-by: default avatarPablo Neira Ayuso <pablo@netfilter.org>
parent 716b23c1
...@@ -27,17 +27,6 @@ struct nf_nat_l4proto { ...@@ -27,17 +27,6 @@ struct nf_nat_l4proto {
const union nf_conntrack_man_proto *min, const union nf_conntrack_man_proto *min,
const union nf_conntrack_man_proto *max); const union nf_conntrack_man_proto *max);
/* Alter the per-proto part of the tuple (depending on
* maniptype), to give a unique tuple in the given range if
* possible. Per-protocol part of tuple is initialized to the
* incoming packet.
*/
void (*unique_tuple)(const struct nf_nat_l3proto *l3proto,
struct nf_conntrack_tuple *tuple,
const struct nf_nat_range2 *range,
enum nf_nat_manip_type maniptype,
const struct nf_conn *ct);
int (*nlattr_to_range)(struct nlattr *tb[], int (*nlattr_to_range)(struct nlattr *tb[],
struct nf_nat_range2 *range); struct nf_nat_range2 *range);
}; };
......
...@@ -37,49 +37,6 @@ MODULE_LICENSE("GPL"); ...@@ -37,49 +37,6 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>"); MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
MODULE_DESCRIPTION("Netfilter NAT protocol helper module for GRE"); MODULE_DESCRIPTION("Netfilter NAT protocol helper module for GRE");
/* generate unique tuple ... */
static void
gre_unique_tuple(const struct nf_nat_l3proto *l3proto,
struct nf_conntrack_tuple *tuple,
const struct nf_nat_range2 *range,
enum nf_nat_manip_type maniptype,
const struct nf_conn *ct)
{
static u_int16_t key;
__be16 *keyptr;
unsigned int min, i, range_size;
/* If there is no master conntrack we are not PPTP,
do not change tuples */
if (!ct->master)
return;
if (maniptype == NF_NAT_MANIP_SRC)
keyptr = &tuple->src.u.gre.key;
else
keyptr = &tuple->dst.u.gre.key;
if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
pr_debug("%p: NATing GRE PPTP\n", ct);
min = 1;
range_size = 0xffff;
} else {
min = ntohs(range->min_proto.gre.key);
range_size = ntohs(range->max_proto.gre.key) - min + 1;
}
pr_debug("min = %u, range_size = %u\n", min, range_size);
for (i = 0; ; ++key) {
*keyptr = htons(min + key % range_size);
if (++i == range_size || !nf_nat_used_tuple(tuple, ct))
return;
}
pr_debug("%p: no NAT mapping\n", ct);
return;
}
/* manipulate a GRE packet according to maniptype */ /* manipulate a GRE packet according to maniptype */
static bool static bool
gre_manip_pkt(struct sk_buff *skb, gre_manip_pkt(struct sk_buff *skb,
...@@ -124,7 +81,6 @@ static const struct nf_nat_l4proto gre = { ...@@ -124,7 +81,6 @@ static const struct nf_nat_l4proto gre = {
.l4proto = IPPROTO_GRE, .l4proto = IPPROTO_GRE,
.manip_pkt = gre_manip_pkt, .manip_pkt = gre_manip_pkt,
.in_range = nf_nat_l4proto_in_range, .in_range = nf_nat_l4proto_in_range,
.unique_tuple = gre_unique_tuple,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK) #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.nlattr_to_range = nf_nat_l4proto_nlattr_to_range, .nlattr_to_range = nf_nat_l4proto_nlattr_to_range,
#endif #endif
......
...@@ -27,32 +27,6 @@ icmp_in_range(const struct nf_conntrack_tuple *tuple, ...@@ -27,32 +27,6 @@ icmp_in_range(const struct nf_conntrack_tuple *tuple,
ntohs(tuple->src.u.icmp.id) <= ntohs(max->icmp.id); ntohs(tuple->src.u.icmp.id) <= ntohs(max->icmp.id);
} }
static void
icmp_unique_tuple(const struct nf_nat_l3proto *l3proto,
struct nf_conntrack_tuple *tuple,
const struct nf_nat_range2 *range,
enum nf_nat_manip_type maniptype,
const struct nf_conn *ct)
{
static u_int16_t id;
unsigned int range_size;
unsigned int i;
range_size = ntohs(range->max_proto.icmp.id) -
ntohs(range->min_proto.icmp.id) + 1;
/* If no range specified... */
if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED))
range_size = 0xFFFF;
for (i = 0; ; ++id) {
tuple->src.u.icmp.id = htons(ntohs(range->min_proto.icmp.id) +
(id % range_size));
if (++i == range_size || !nf_nat_used_tuple(tuple, ct))
return;
}
return;
}
static bool static bool
icmp_manip_pkt(struct sk_buff *skb, icmp_manip_pkt(struct sk_buff *skb,
const struct nf_nat_l3proto *l3proto, const struct nf_nat_l3proto *l3proto,
...@@ -76,7 +50,6 @@ const struct nf_nat_l4proto nf_nat_l4proto_icmp = { ...@@ -76,7 +50,6 @@ const struct nf_nat_l4proto nf_nat_l4proto_icmp = {
.l4proto = IPPROTO_ICMP, .l4proto = IPPROTO_ICMP,
.manip_pkt = icmp_manip_pkt, .manip_pkt = icmp_manip_pkt,
.in_range = icmp_in_range, .in_range = icmp_in_range,
.unique_tuple = icmp_unique_tuple,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK) #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.nlattr_to_range = nf_nat_l4proto_nlattr_to_range, .nlattr_to_range = nf_nat_l4proto_nlattr_to_range,
#endif #endif
......
...@@ -29,31 +29,6 @@ icmpv6_in_range(const struct nf_conntrack_tuple *tuple, ...@@ -29,31 +29,6 @@ icmpv6_in_range(const struct nf_conntrack_tuple *tuple,
ntohs(tuple->src.u.icmp.id) <= ntohs(max->icmp.id); ntohs(tuple->src.u.icmp.id) <= ntohs(max->icmp.id);
} }
static void
icmpv6_unique_tuple(const struct nf_nat_l3proto *l3proto,
struct nf_conntrack_tuple *tuple,
const struct nf_nat_range2 *range,
enum nf_nat_manip_type maniptype,
const struct nf_conn *ct)
{
static u16 id;
unsigned int range_size;
unsigned int i;
range_size = ntohs(range->max_proto.icmp.id) -
ntohs(range->min_proto.icmp.id) + 1;
if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED))
range_size = 0xffff;
for (i = 0; ; ++id) {
tuple->src.u.icmp.id = htons(ntohs(range->min_proto.icmp.id) +
(id % range_size));
if (++i == range_size || !nf_nat_used_tuple(tuple, ct))
return;
}
}
static bool static bool
icmpv6_manip_pkt(struct sk_buff *skb, icmpv6_manip_pkt(struct sk_buff *skb,
const struct nf_nat_l3proto *l3proto, const struct nf_nat_l3proto *l3proto,
...@@ -83,7 +58,6 @@ const struct nf_nat_l4proto nf_nat_l4proto_icmpv6 = { ...@@ -83,7 +58,6 @@ const struct nf_nat_l4proto nf_nat_l4proto_icmpv6 = {
.l4proto = IPPROTO_ICMPV6, .l4proto = IPPROTO_ICMPV6,
.manip_pkt = icmpv6_manip_pkt, .manip_pkt = icmpv6_manip_pkt,
.in_range = icmpv6_in_range, .in_range = icmpv6_in_range,
.unique_tuple = icmpv6_unique_tuple,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK) #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.nlattr_to_range = nf_nat_l4proto_nlattr_to_range, .nlattr_to_range = nf_nat_l4proto_nlattr_to_range,
#endif #endif
......
...@@ -310,20 +310,65 @@ find_best_ips_proto(const struct nf_conntrack_zone *zone, ...@@ -310,20 +310,65 @@ find_best_ips_proto(const struct nf_conntrack_zone *zone,
} }
} }
/* Alter the per-proto part of the tuple (depending on maniptype), to
* give a unique tuple in the given range if possible.
*
* Per-protocol part of tuple is initialized to the incoming packet.
*/
static void nf_nat_l4proto_unique_tuple(struct nf_conntrack_tuple *tuple, static void nf_nat_l4proto_unique_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_nat_range2 *range, const struct nf_nat_range2 *range,
enum nf_nat_manip_type maniptype, enum nf_nat_manip_type maniptype,
const struct nf_conn *ct) const struct nf_conn *ct)
{ {
unsigned int range_size, min, max, i, attempts; unsigned int range_size, min, max, i, attempts;
__be16 *portptr; __be16 *keyptr;
u16 off; u16 off;
static const unsigned int max_attempts = 128; static const unsigned int max_attempts = 128;
switch (tuple->dst.protonum) {
case IPPROTO_ICMP: /* fallthrough */
case IPPROTO_ICMPV6:
/* id is same for either direction... */
keyptr = &tuple->src.u.icmp.id;
min = range->min_proto.icmp.id;
range_size = ntohs(range->max_proto.icmp.id) -
ntohs(range->min_proto.icmp.id) + 1;
goto find_free_id;
#if IS_ENABLED(CONFIG_NF_CT_PROTO_GRE)
case IPPROTO_GRE:
/* If there is no master conntrack we are not PPTP,
do not change tuples */
if (!ct->master)
return;
if (maniptype == NF_NAT_MANIP_SRC)
keyptr = &tuple->src.u.gre.key;
else
keyptr = &tuple->dst.u.gre.key;
if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
min = 1;
range_size = 65535;
} else {
min = ntohs(range->min_proto.gre.key);
range_size = ntohs(range->max_proto.gre.key) - min + 1;
}
goto find_free_id;
#endif
case IPPROTO_UDP: /* fallthrough */
case IPPROTO_UDPLITE: /* fallthrough */
case IPPROTO_TCP: /* fallthrough */
case IPPROTO_SCTP: /* fallthrough */
case IPPROTO_DCCP: /* fallthrough */
if (maniptype == NF_NAT_MANIP_SRC) if (maniptype == NF_NAT_MANIP_SRC)
portptr = &tuple->src.u.all; keyptr = &tuple->src.u.all;
else else
portptr = &tuple->dst.u.all; keyptr = &tuple->dst.u.all;
break;
default:
return;
}
/* If no range specified... */ /* If no range specified... */
if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) { if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
...@@ -331,9 +376,9 @@ static void nf_nat_l4proto_unique_tuple(struct nf_conntrack_tuple *tuple, ...@@ -331,9 +376,9 @@ static void nf_nat_l4proto_unique_tuple(struct nf_conntrack_tuple *tuple,
if (maniptype == NF_NAT_MANIP_DST) if (maniptype == NF_NAT_MANIP_DST)
return; return;
if (ntohs(*portptr) < 1024) { if (ntohs(*keyptr) < 1024) {
/* Loose convention: >> 512 is credential passing */ /* Loose convention: >> 512 is credential passing */
if (ntohs(*portptr) < 512) { if (ntohs(*keyptr) < 512) {
min = 1; min = 1;
range_size = 511 - min + 1; range_size = 511 - min + 1;
} else { } else {
...@@ -352,8 +397,9 @@ static void nf_nat_l4proto_unique_tuple(struct nf_conntrack_tuple *tuple, ...@@ -352,8 +397,9 @@ static void nf_nat_l4proto_unique_tuple(struct nf_conntrack_tuple *tuple,
range_size = max - min + 1; range_size = max - min + 1;
} }
find_free_id:
if (range->flags & NF_NAT_RANGE_PROTO_OFFSET) if (range->flags & NF_NAT_RANGE_PROTO_OFFSET)
off = (ntohs(*portptr) - ntohs(range->base_proto.all)); off = (ntohs(*keyptr) - ntohs(range->base_proto.all));
else else
off = prandom_u32(); off = prandom_u32();
...@@ -369,7 +415,7 @@ static void nf_nat_l4proto_unique_tuple(struct nf_conntrack_tuple *tuple, ...@@ -369,7 +415,7 @@ static void nf_nat_l4proto_unique_tuple(struct nf_conntrack_tuple *tuple,
*/ */
another_round: another_round:
for (i = 0; i < attempts; i++, off++) { for (i = 0; i < attempts; i++, off++) {
*portptr = htons(min + off % range_size); *keyptr = htons(min + off % range_size);
if (!nf_nat_used_tuple(tuple, ct)) if (!nf_nat_used_tuple(tuple, ct))
return; return;
} }
...@@ -454,9 +500,6 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple, ...@@ -454,9 +500,6 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
} }
/* Last chance: get protocol to try to obtain unique tuple. */ /* Last chance: get protocol to try to obtain unique tuple. */
if (l4proto->unique_tuple)
l4proto->unique_tuple(l3proto, tuple, range, maniptype, ct);
else
nf_nat_l4proto_unique_tuple(tuple, range, maniptype, ct); nf_nat_l4proto_unique_tuple(tuple, range, maniptype, ct);
out: out:
rcu_read_unlock(); rcu_read_unlock();
......
...@@ -25,18 +25,6 @@ static bool unknown_in_range(const struct nf_conntrack_tuple *tuple, ...@@ -25,18 +25,6 @@ static bool unknown_in_range(const struct nf_conntrack_tuple *tuple,
return true; return true;
} }
static void unknown_unique_tuple(const struct nf_nat_l3proto *l3proto,
struct nf_conntrack_tuple *tuple,
const struct nf_nat_range2 *range,
enum nf_nat_manip_type maniptype,
const struct nf_conn *ct)
{
/* Sorry: we can't help you; if it's not unique, we can't frob
* anything.
*/
return;
}
static bool static bool
unknown_manip_pkt(struct sk_buff *skb, unknown_manip_pkt(struct sk_buff *skb,
const struct nf_nat_l3proto *l3proto, const struct nf_nat_l3proto *l3proto,
...@@ -50,5 +38,4 @@ unknown_manip_pkt(struct sk_buff *skb, ...@@ -50,5 +38,4 @@ unknown_manip_pkt(struct sk_buff *skb,
const struct nf_nat_l4proto nf_nat_l4proto_unknown = { const struct nf_nat_l4proto nf_nat_l4proto_unknown = {
.manip_pkt = unknown_manip_pkt, .manip_pkt = unknown_manip_pkt,
.in_range = unknown_in_range, .in_range = unknown_in_range,
.unique_tuple = unknown_unique_tuple,
}; };
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment