Commit e3a88f9c authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf

Pablo Neira Ayuso says:

====================
netfilter/ipvs fixes for net

The following patchset contains fixes for netfilter/ipvs. This round of
fixes is larger than usual at this stage, specifically because of the
nf_tables bridge reject fixes that I would like to see in 3.18. The
patches are:

1) Fix a null-pointer dereference that may occur when logging
   errors. This problem was introduced by 4a4739d5 ("ipvs: Pull
   out crosses_local_route_boundary logic") in v3.17-rc5.

2) Update hook mask in nft_reject_bridge so we can also filter out
   packets from there. This fixes 36d2af59 ("netfilter: nf_tables: allow
   to filter from prerouting and postrouting"), which needs this chunk
   to work.

3) Two patches to refactor common code to forge the IPv4 and IPv6
   reject packets from the bridge. These are required by the nf_tables
   reject bridge fix.

4) Fix nft_reject_bridge by avoiding the use of the IP stack to reject
   packets from the bridge. The idea is to forge the reject packets and
   inject them to the original port via br_deliver() which is now
   exported for that purpose.

5) Restrict nft_reject_bridge to bridge prerouting and input hooks.
   the original skbuff may cloned after prerouting when the bridge stack
   needs to flood it to several bridge ports, it is too late to reject
   the traffic.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents de11b0e8 127917c2
#ifndef _IPV4_NF_REJECT_H #ifndef _IPV4_NF_REJECT_H
#define _IPV4_NF_REJECT_H #define _IPV4_NF_REJECT_H
#include <linux/skbuff.h>
#include <net/ip.h>
#include <net/icmp.h> #include <net/icmp.h>
static inline void nf_send_unreach(struct sk_buff *skb_in, int code) static inline void nf_send_unreach(struct sk_buff *skb_in, int code)
...@@ -10,4 +12,12 @@ static inline void nf_send_unreach(struct sk_buff *skb_in, int code) ...@@ -10,4 +12,12 @@ static inline void nf_send_unreach(struct sk_buff *skb_in, int code)
void nf_send_reset(struct sk_buff *oldskb, int hook); void nf_send_reset(struct sk_buff *oldskb, int hook);
const struct tcphdr *nf_reject_ip_tcphdr_get(struct sk_buff *oldskb,
struct tcphdr *_oth, int hook);
struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb,
const struct sk_buff *oldskb,
__be16 protocol, int ttl);
void nf_reject_ip_tcphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb,
const struct tcphdr *oth);
#endif /* _IPV4_NF_REJECT_H */ #endif /* _IPV4_NF_REJECT_H */
...@@ -15,4 +15,14 @@ nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code, ...@@ -15,4 +15,14 @@ nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code,
void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook); void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook);
const struct tcphdr *nf_reject_ip6_tcphdr_get(struct sk_buff *oldskb,
struct tcphdr *otcph,
unsigned int *otcplen, int hook);
struct ipv6hdr *nf_reject_ip6hdr_put(struct sk_buff *nskb,
const struct sk_buff *oldskb,
__be16 protocol, int hoplimit);
void nf_reject_ip6_tcphdr_put(struct sk_buff *nskb,
const struct sk_buff *oldskb,
const struct tcphdr *oth, unsigned int otcplen);
#endif /* _IPV6_NF_REJECT_H */ #endif /* _IPV6_NF_REJECT_H */
...@@ -112,6 +112,7 @@ void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) ...@@ -112,6 +112,7 @@ void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
kfree_skb(skb); kfree_skb(skb);
} }
EXPORT_SYMBOL_GPL(br_deliver);
/* called with rcu_read_lock */ /* called with rcu_read_lock */
void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0) void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0)
......
...@@ -75,9 +75,11 @@ static const struct nf_chain_type filter_bridge = { ...@@ -75,9 +75,11 @@ static const struct nf_chain_type filter_bridge = {
.type = NFT_CHAIN_T_DEFAULT, .type = NFT_CHAIN_T_DEFAULT,
.family = NFPROTO_BRIDGE, .family = NFPROTO_BRIDGE,
.owner = THIS_MODULE, .owner = THIS_MODULE,
.hook_mask = (1 << NF_BR_LOCAL_IN) | .hook_mask = (1 << NF_BR_PRE_ROUTING) |
(1 << NF_BR_LOCAL_IN) |
(1 << NF_BR_FORWARD) | (1 << NF_BR_FORWARD) |
(1 << NF_BR_LOCAL_OUT), (1 << NF_BR_LOCAL_OUT) |
(1 << NF_BR_POST_ROUTING),
}; };
static int __init nf_tables_bridge_init(void) static int __init nf_tables_bridge_init(void)
......
...@@ -16,6 +16,238 @@ ...@@ -16,6 +16,238 @@
#include <net/netfilter/nft_reject.h> #include <net/netfilter/nft_reject.h>
#include <net/netfilter/ipv4/nf_reject.h> #include <net/netfilter/ipv4/nf_reject.h>
#include <net/netfilter/ipv6/nf_reject.h> #include <net/netfilter/ipv6/nf_reject.h>
#include <linux/ip.h>
#include <net/ip.h>
#include <linux/netfilter_bridge.h>
#include "../br_private.h"
static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb,
struct sk_buff *nskb)
{
struct ethhdr *eth;
eth = (struct ethhdr *)skb_push(nskb, ETH_HLEN);
skb_reset_mac_header(nskb);
ether_addr_copy(eth->h_source, eth_hdr(oldskb)->h_dest);
ether_addr_copy(eth->h_dest, eth_hdr(oldskb)->h_source);
eth->h_proto = eth_hdr(oldskb)->h_proto;
skb_pull(nskb, ETH_HLEN);
}
static int nft_reject_iphdr_validate(struct sk_buff *oldskb)
{
struct iphdr *iph;
u32 len;
if (!pskb_may_pull(oldskb, sizeof(struct iphdr)))
return 0;
iph = ip_hdr(oldskb);
if (iph->ihl < 5 || iph->version != 4)
return 0;
len = ntohs(iph->tot_len);
if (oldskb->len < len)
return 0;
else if (len < (iph->ihl*4))
return 0;
if (!pskb_may_pull(oldskb, iph->ihl*4))
return 0;
return 1;
}
static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb, int hook)
{
struct sk_buff *nskb;
struct iphdr *niph;
const struct tcphdr *oth;
struct tcphdr _oth;
if (!nft_reject_iphdr_validate(oldskb))
return;
oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook);
if (!oth)
return;
nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
LL_MAX_HEADER, GFP_ATOMIC);
if (!nskb)
return;
skb_reserve(nskb, LL_MAX_HEADER);
niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
sysctl_ip_default_ttl);
nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
niph->ttl = sysctl_ip_default_ttl;
niph->tot_len = htons(nskb->len);
ip_send_check(niph);
nft_reject_br_push_etherhdr(oldskb, nskb);
br_deliver(br_port_get_rcu(oldskb->dev), nskb);
}
static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, int hook,
u8 code)
{
struct sk_buff *nskb;
struct iphdr *niph;
struct icmphdr *icmph;
unsigned int len;
void *payload;
__wsum csum;
if (!nft_reject_iphdr_validate(oldskb))
return;
/* IP header checks: fragment. */
if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
return;
/* RFC says return as much as we can without exceeding 576 bytes. */
len = min_t(unsigned int, 536, oldskb->len);
if (!pskb_may_pull(oldskb, len))
return;
if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), 0))
return;
nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmphdr) +
LL_MAX_HEADER + len, GFP_ATOMIC);
if (!nskb)
return;
skb_reserve(nskb, LL_MAX_HEADER);
niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_ICMP,
sysctl_ip_default_ttl);
skb_reset_transport_header(nskb);
icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr));
memset(icmph, 0, sizeof(*icmph));
icmph->type = ICMP_DEST_UNREACH;
icmph->code = code;
payload = skb_put(nskb, len);
memcpy(payload, skb_network_header(oldskb), len);
csum = csum_partial((void *)icmph, len + sizeof(struct icmphdr), 0);
icmph->checksum = csum_fold(csum);
niph->tot_len = htons(nskb->len);
ip_send_check(niph);
nft_reject_br_push_etherhdr(oldskb, nskb);
br_deliver(br_port_get_rcu(oldskb->dev), nskb);
}
static int nft_reject_ip6hdr_validate(struct sk_buff *oldskb)
{
struct ipv6hdr *hdr;
u32 pkt_len;
if (!pskb_may_pull(oldskb, sizeof(struct ipv6hdr)))
return 0;
hdr = ipv6_hdr(oldskb);
if (hdr->version != 6)
return 0;
pkt_len = ntohs(hdr->payload_len);
if (pkt_len + sizeof(struct ipv6hdr) > oldskb->len)
return 0;
return 1;
}
static void nft_reject_br_send_v6_tcp_reset(struct net *net,
struct sk_buff *oldskb, int hook)
{
struct sk_buff *nskb;
const struct tcphdr *oth;
struct tcphdr _oth;
unsigned int otcplen;
struct ipv6hdr *nip6h;
if (!nft_reject_ip6hdr_validate(oldskb))
return;
oth = nf_reject_ip6_tcphdr_get(oldskb, &_oth, &otcplen, hook);
if (!oth)
return;
nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct tcphdr) +
LL_MAX_HEADER, GFP_ATOMIC);
if (!nskb)
return;
skb_reserve(nskb, LL_MAX_HEADER);
nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
net->ipv6.devconf_all->hop_limit);
nf_reject_ip6_tcphdr_put(nskb, oldskb, oth, otcplen);
nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
nft_reject_br_push_etherhdr(oldskb, nskb);
br_deliver(br_port_get_rcu(oldskb->dev), nskb);
}
static void nft_reject_br_send_v6_unreach(struct net *net,
struct sk_buff *oldskb, int hook,
u8 code)
{
struct sk_buff *nskb;
struct ipv6hdr *nip6h;
struct icmp6hdr *icmp6h;
unsigned int len;
void *payload;
if (!nft_reject_ip6hdr_validate(oldskb))
return;
/* Include "As much of invoking packet as possible without the ICMPv6
* packet exceeding the minimum IPv6 MTU" in the ICMP payload.
*/
len = min_t(unsigned int, 1220, oldskb->len);
if (!pskb_may_pull(oldskb, len))
return;
nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmp6hdr) +
LL_MAX_HEADER + len, GFP_ATOMIC);
if (!nskb)
return;
skb_reserve(nskb, LL_MAX_HEADER);
nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_ICMPV6,
net->ipv6.devconf_all->hop_limit);
skb_reset_transport_header(nskb);
icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr));
memset(icmp6h, 0, sizeof(*icmp6h));
icmp6h->icmp6_type = ICMPV6_DEST_UNREACH;
icmp6h->icmp6_code = code;
payload = skb_put(nskb, len);
memcpy(payload, skb_network_header(oldskb), len);
nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
icmp6h->icmp6_cksum =
csum_ipv6_magic(&nip6h->saddr, &nip6h->daddr,
nskb->len - sizeof(struct ipv6hdr),
IPPROTO_ICMPV6,
csum_partial(icmp6h,
nskb->len - sizeof(struct ipv6hdr),
0));
nft_reject_br_push_etherhdr(oldskb, nskb);
br_deliver(br_port_get_rcu(oldskb->dev), nskb);
}
static void nft_reject_bridge_eval(const struct nft_expr *expr, static void nft_reject_bridge_eval(const struct nft_expr *expr,
struct nft_data data[NFT_REG_MAX + 1], struct nft_data data[NFT_REG_MAX + 1],
...@@ -23,18 +255,27 @@ static void nft_reject_bridge_eval(const struct nft_expr *expr, ...@@ -23,18 +255,27 @@ static void nft_reject_bridge_eval(const struct nft_expr *expr,
{ {
struct nft_reject *priv = nft_expr_priv(expr); struct nft_reject *priv = nft_expr_priv(expr);
struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out); struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out);
const unsigned char *dest = eth_hdr(pkt->skb)->h_dest;
if (is_broadcast_ether_addr(dest) ||
is_multicast_ether_addr(dest))
goto out;
switch (eth_hdr(pkt->skb)->h_proto) { switch (eth_hdr(pkt->skb)->h_proto) {
case htons(ETH_P_IP): case htons(ETH_P_IP):
switch (priv->type) { switch (priv->type) {
case NFT_REJECT_ICMP_UNREACH: case NFT_REJECT_ICMP_UNREACH:
nf_send_unreach(pkt->skb, priv->icmp_code); nft_reject_br_send_v4_unreach(pkt->skb,
pkt->ops->hooknum,
priv->icmp_code);
break; break;
case NFT_REJECT_TCP_RST: case NFT_REJECT_TCP_RST:
nf_send_reset(pkt->skb, pkt->ops->hooknum); nft_reject_br_send_v4_tcp_reset(pkt->skb,
pkt->ops->hooknum);
break; break;
case NFT_REJECT_ICMPX_UNREACH: case NFT_REJECT_ICMPX_UNREACH:
nf_send_unreach(pkt->skb, nft_reject_br_send_v4_unreach(pkt->skb,
pkt->ops->hooknum,
nft_reject_icmp_code(priv->icmp_code)); nft_reject_icmp_code(priv->icmp_code));
break; break;
} }
...@@ -42,16 +283,18 @@ static void nft_reject_bridge_eval(const struct nft_expr *expr, ...@@ -42,16 +283,18 @@ static void nft_reject_bridge_eval(const struct nft_expr *expr,
case htons(ETH_P_IPV6): case htons(ETH_P_IPV6):
switch (priv->type) { switch (priv->type) {
case NFT_REJECT_ICMP_UNREACH: case NFT_REJECT_ICMP_UNREACH:
nf_send_unreach6(net, pkt->skb, priv->icmp_code, nft_reject_br_send_v6_unreach(net, pkt->skb,
pkt->ops->hooknum); pkt->ops->hooknum,
priv->icmp_code);
break; break;
case NFT_REJECT_TCP_RST: case NFT_REJECT_TCP_RST:
nf_send_reset6(net, pkt->skb, pkt->ops->hooknum); nft_reject_br_send_v6_tcp_reset(net, pkt->skb,
pkt->ops->hooknum);
break; break;
case NFT_REJECT_ICMPX_UNREACH: case NFT_REJECT_ICMPX_UNREACH:
nf_send_unreach6(net, pkt->skb, nft_reject_br_send_v6_unreach(net, pkt->skb,
nft_reject_icmpv6_code(priv->icmp_code), pkt->ops->hooknum,
pkt->ops->hooknum); nft_reject_icmpv6_code(priv->icmp_code));
break; break;
} }
break; break;
...@@ -59,15 +302,38 @@ static void nft_reject_bridge_eval(const struct nft_expr *expr, ...@@ -59,15 +302,38 @@ static void nft_reject_bridge_eval(const struct nft_expr *expr,
/* No explicit way to reject this protocol, drop it. */ /* No explicit way to reject this protocol, drop it. */
break; break;
} }
out:
data[NFT_REG_VERDICT].verdict = NF_DROP; data[NFT_REG_VERDICT].verdict = NF_DROP;
} }
static int nft_reject_bridge_validate_hooks(const struct nft_chain *chain)
{
struct nft_base_chain *basechain;
if (chain->flags & NFT_BASE_CHAIN) {
basechain = nft_base_chain(chain);
switch (basechain->ops[0].hooknum) {
case NF_BR_PRE_ROUTING:
case NF_BR_LOCAL_IN:
break;
default:
return -EOPNOTSUPP;
}
}
return 0;
}
static int nft_reject_bridge_init(const struct nft_ctx *ctx, static int nft_reject_bridge_init(const struct nft_ctx *ctx,
const struct nft_expr *expr, const struct nft_expr *expr,
const struct nlattr * const tb[]) const struct nlattr * const tb[])
{ {
struct nft_reject *priv = nft_expr_priv(expr); struct nft_reject *priv = nft_expr_priv(expr);
int icmp_code; int icmp_code, err;
err = nft_reject_bridge_validate_hooks(ctx->chain);
if (err < 0)
return err;
if (tb[NFTA_REJECT_TYPE] == NULL) if (tb[NFTA_REJECT_TYPE] == NULL)
return -EINVAL; return -EINVAL;
...@@ -116,6 +382,13 @@ static int nft_reject_bridge_dump(struct sk_buff *skb, ...@@ -116,6 +382,13 @@ static int nft_reject_bridge_dump(struct sk_buff *skb,
return -1; return -1;
} }
static int nft_reject_bridge_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data)
{
return nft_reject_bridge_validate_hooks(ctx->chain);
}
static struct nft_expr_type nft_reject_bridge_type; static struct nft_expr_type nft_reject_bridge_type;
static const struct nft_expr_ops nft_reject_bridge_ops = { static const struct nft_expr_ops nft_reject_bridge_ops = {
.type = &nft_reject_bridge_type, .type = &nft_reject_bridge_type,
...@@ -123,6 +396,7 @@ static const struct nft_expr_ops nft_reject_bridge_ops = { ...@@ -123,6 +396,7 @@ static const struct nft_expr_ops nft_reject_bridge_ops = {
.eval = nft_reject_bridge_eval, .eval = nft_reject_bridge_eval,
.init = nft_reject_bridge_init, .init = nft_reject_bridge_init,
.dump = nft_reject_bridge_dump, .dump = nft_reject_bridge_dump,
.validate = nft_reject_bridge_validate,
}; };
static struct nft_expr_type nft_reject_bridge_type __read_mostly = { static struct nft_expr_type nft_reject_bridge_type __read_mostly = {
......
...@@ -12,43 +12,39 @@ ...@@ -12,43 +12,39 @@
#include <net/route.h> #include <net/route.h>
#include <net/dst.h> #include <net/dst.h>
#include <linux/netfilter_ipv4.h> #include <linux/netfilter_ipv4.h>
#include <net/netfilter/ipv4/nf_reject.h>
/* Send RST reply */ const struct tcphdr *nf_reject_ip_tcphdr_get(struct sk_buff *oldskb,
void nf_send_reset(struct sk_buff *oldskb, int hook) struct tcphdr *_oth, int hook)
{ {
struct sk_buff *nskb;
const struct iphdr *oiph;
struct iphdr *niph;
const struct tcphdr *oth; const struct tcphdr *oth;
struct tcphdr _otcph, *tcph;
/* IP header checks: fragment. */ /* IP header checks: fragment. */
if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET)) if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
return; return NULL;
oth = skb_header_pointer(oldskb, ip_hdrlen(oldskb), oth = skb_header_pointer(oldskb, ip_hdrlen(oldskb),
sizeof(_otcph), &_otcph); sizeof(struct tcphdr), _oth);
if (oth == NULL) if (oth == NULL)
return; return NULL;
/* No RST for RST. */ /* No RST for RST. */
if (oth->rst) if (oth->rst)
return; return NULL;
if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
return;
/* Check checksum */ /* Check checksum */
if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP)) if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP))
return; return NULL;
oiph = ip_hdr(oldskb);
nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) + return oth;
LL_MAX_HEADER, GFP_ATOMIC); }
if (!nskb) EXPORT_SYMBOL_GPL(nf_reject_ip_tcphdr_get);
return;
skb_reserve(nskb, LL_MAX_HEADER); struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb,
const struct sk_buff *oldskb,
__be16 protocol, int ttl)
{
struct iphdr *niph, *oiph = ip_hdr(oldskb);
skb_reset_network_header(nskb); skb_reset_network_header(nskb);
niph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr)); niph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
...@@ -57,10 +53,23 @@ void nf_send_reset(struct sk_buff *oldskb, int hook) ...@@ -57,10 +53,23 @@ void nf_send_reset(struct sk_buff *oldskb, int hook)
niph->tos = 0; niph->tos = 0;
niph->id = 0; niph->id = 0;
niph->frag_off = htons(IP_DF); niph->frag_off = htons(IP_DF);
niph->protocol = IPPROTO_TCP; niph->protocol = protocol;
niph->check = 0; niph->check = 0;
niph->saddr = oiph->daddr; niph->saddr = oiph->daddr;
niph->daddr = oiph->saddr; niph->daddr = oiph->saddr;
niph->ttl = ttl;
nskb->protocol = htons(ETH_P_IP);
return niph;
}
EXPORT_SYMBOL_GPL(nf_reject_iphdr_put);
void nf_reject_ip_tcphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb,
const struct tcphdr *oth)
{
struct iphdr *niph = ip_hdr(nskb);
struct tcphdr *tcph;
skb_reset_transport_header(nskb); skb_reset_transport_header(nskb);
tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr));
...@@ -69,9 +78,9 @@ void nf_send_reset(struct sk_buff *oldskb, int hook) ...@@ -69,9 +78,9 @@ void nf_send_reset(struct sk_buff *oldskb, int hook)
tcph->dest = oth->source; tcph->dest = oth->source;
tcph->doff = sizeof(struct tcphdr) / 4; tcph->doff = sizeof(struct tcphdr) / 4;
if (oth->ack) if (oth->ack) {
tcph->seq = oth->ack_seq; tcph->seq = oth->ack_seq;
else { } else {
tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin + tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin +
oldskb->len - ip_hdrlen(oldskb) - oldskb->len - ip_hdrlen(oldskb) -
(oth->doff << 2)); (oth->doff << 2));
...@@ -84,16 +93,43 @@ void nf_send_reset(struct sk_buff *oldskb, int hook) ...@@ -84,16 +93,43 @@ void nf_send_reset(struct sk_buff *oldskb, int hook)
nskb->ip_summed = CHECKSUM_PARTIAL; nskb->ip_summed = CHECKSUM_PARTIAL;
nskb->csum_start = (unsigned char *)tcph - nskb->head; nskb->csum_start = (unsigned char *)tcph - nskb->head;
nskb->csum_offset = offsetof(struct tcphdr, check); nskb->csum_offset = offsetof(struct tcphdr, check);
}
EXPORT_SYMBOL_GPL(nf_reject_ip_tcphdr_put);
/* Send RST reply */
void nf_send_reset(struct sk_buff *oldskb, int hook)
{
struct sk_buff *nskb;
const struct iphdr *oiph;
struct iphdr *niph;
const struct tcphdr *oth;
struct tcphdr _oth;
oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook);
if (!oth)
return;
if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
return;
oiph = ip_hdr(oldskb);
nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
LL_MAX_HEADER, GFP_ATOMIC);
if (!nskb)
return;
/* ip_route_me_harder expects skb->dst to be set */ /* ip_route_me_harder expects skb->dst to be set */
skb_dst_set_noref(nskb, skb_dst(oldskb)); skb_dst_set_noref(nskb, skb_dst(oldskb));
nskb->protocol = htons(ETH_P_IP); skb_reserve(nskb, LL_MAX_HEADER);
niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
ip4_dst_hoplimit(skb_dst(nskb)));
nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
if (ip_route_me_harder(nskb, RTN_UNSPEC)) if (ip_route_me_harder(nskb, RTN_UNSPEC))
goto free_nskb; goto free_nskb;
niph->ttl = ip4_dst_hoplimit(skb_dst(nskb));
/* "Never happens" */ /* "Never happens" */
if (nskb->len > dst_mtu(skb_dst(nskb))) if (nskb->len > dst_mtu(skb_dst(nskb)))
goto free_nskb; goto free_nskb;
......
...@@ -12,116 +12,102 @@ ...@@ -12,116 +12,102 @@
#include <net/ip6_fib.h> #include <net/ip6_fib.h>
#include <net/ip6_checksum.h> #include <net/ip6_checksum.h>
#include <linux/netfilter_ipv6.h> #include <linux/netfilter_ipv6.h>
#include <net/netfilter/ipv6/nf_reject.h>
void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook) const struct tcphdr *nf_reject_ip6_tcphdr_get(struct sk_buff *oldskb,
struct tcphdr *otcph,
unsigned int *otcplen, int hook)
{ {
struct sk_buff *nskb;
struct tcphdr otcph, *tcph;
unsigned int otcplen, hh_len;
int tcphoff, needs_ack;
const struct ipv6hdr *oip6h = ipv6_hdr(oldskb); const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
struct ipv6hdr *ip6h;
#define DEFAULT_TOS_VALUE 0x0U
const __u8 tclass = DEFAULT_TOS_VALUE;
struct dst_entry *dst = NULL;
u8 proto; u8 proto;
__be16 frag_off; __be16 frag_off;
struct flowi6 fl6; int tcphoff;
if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) ||
(!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) {
pr_debug("addr is not unicast.\n");
return;
}
proto = oip6h->nexthdr; proto = oip6h->nexthdr;
tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto, &frag_off); tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data),
&proto, &frag_off);
if ((tcphoff < 0) || (tcphoff > oldskb->len)) { if ((tcphoff < 0) || (tcphoff > oldskb->len)) {
pr_debug("Cannot get TCP header.\n"); pr_debug("Cannot get TCP header.\n");
return; return NULL;
} }
otcplen = oldskb->len - tcphoff; *otcplen = oldskb->len - tcphoff;
/* IP header checks: fragment, too short. */ /* IP header checks: fragment, too short. */
if (proto != IPPROTO_TCP || otcplen < sizeof(struct tcphdr)) { if (proto != IPPROTO_TCP || *otcplen < sizeof(struct tcphdr)) {
pr_debug("proto(%d) != IPPROTO_TCP, " pr_debug("proto(%d) != IPPROTO_TCP or too short (len = %d)\n",
"or too short. otcplen = %d\n", proto, *otcplen);
proto, otcplen); return NULL;
return;
} }
if (skb_copy_bits(oldskb, tcphoff, &otcph, sizeof(struct tcphdr))) otcph = skb_header_pointer(oldskb, tcphoff, sizeof(struct tcphdr),
BUG(); otcph);
if (otcph == NULL)
return NULL;
/* No RST for RST. */ /* No RST for RST. */
if (otcph.rst) { if (otcph->rst) {
pr_debug("RST is set\n"); pr_debug("RST is set\n");
return; return NULL;
} }
/* Check checksum. */ /* Check checksum. */
if (nf_ip6_checksum(oldskb, hook, tcphoff, IPPROTO_TCP)) { if (nf_ip6_checksum(oldskb, hook, tcphoff, IPPROTO_TCP)) {
pr_debug("TCP checksum is invalid\n"); pr_debug("TCP checksum is invalid\n");
return; return NULL;
}
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_TCP;
fl6.saddr = oip6h->daddr;
fl6.daddr = oip6h->saddr;
fl6.fl6_sport = otcph.dest;
fl6.fl6_dport = otcph.source;
security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
dst = ip6_route_output(net, NULL, &fl6);
if (dst == NULL || dst->error) {
dst_release(dst);
return;
}
dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
if (IS_ERR(dst))
return;
hh_len = (dst->dev->hard_header_len + 15)&~15;
nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr)
+ sizeof(struct tcphdr) + dst->trailer_len,
GFP_ATOMIC);
if (!nskb) {
net_dbg_ratelimited("cannot alloc skb\n");
dst_release(dst);
return;
} }
skb_dst_set(nskb, dst); return otcph;
}
EXPORT_SYMBOL_GPL(nf_reject_ip6_tcphdr_get);
skb_reserve(nskb, hh_len + dst->header_len); struct ipv6hdr *nf_reject_ip6hdr_put(struct sk_buff *nskb,
const struct sk_buff *oldskb,
__be16 protocol, int hoplimit)
{
struct ipv6hdr *ip6h;
const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
#define DEFAULT_TOS_VALUE 0x0U
const __u8 tclass = DEFAULT_TOS_VALUE;
skb_put(nskb, sizeof(struct ipv6hdr)); skb_put(nskb, sizeof(struct ipv6hdr));
skb_reset_network_header(nskb); skb_reset_network_header(nskb);
ip6h = ipv6_hdr(nskb); ip6h = ipv6_hdr(nskb);
ip6_flow_hdr(ip6h, tclass, 0); ip6_flow_hdr(ip6h, tclass, 0);
ip6h->hop_limit = ip6_dst_hoplimit(dst); ip6h->hop_limit = hoplimit;
ip6h->nexthdr = IPPROTO_TCP; ip6h->nexthdr = protocol;
ip6h->saddr = oip6h->daddr; ip6h->saddr = oip6h->daddr;
ip6h->daddr = oip6h->saddr; ip6h->daddr = oip6h->saddr;
nskb->protocol = htons(ETH_P_IPV6);
return ip6h;
}
EXPORT_SYMBOL_GPL(nf_reject_ip6hdr_put);
void nf_reject_ip6_tcphdr_put(struct sk_buff *nskb,
const struct sk_buff *oldskb,
const struct tcphdr *oth, unsigned int otcplen)
{
struct tcphdr *tcph;
int needs_ack;
skb_reset_transport_header(nskb); skb_reset_transport_header(nskb);
tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr));
/* Truncate to length (no data) */ /* Truncate to length (no data) */
tcph->doff = sizeof(struct tcphdr)/4; tcph->doff = sizeof(struct tcphdr)/4;
tcph->source = otcph.dest; tcph->source = oth->dest;
tcph->dest = otcph.source; tcph->dest = oth->source;
if (otcph.ack) { if (oth->ack) {
needs_ack = 0; needs_ack = 0;
tcph->seq = otcph.ack_seq; tcph->seq = oth->ack_seq;
tcph->ack_seq = 0; tcph->ack_seq = 0;
} else { } else {
needs_ack = 1; needs_ack = 1;
tcph->ack_seq = htonl(ntohl(otcph.seq) + otcph.syn + otcph.fin tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin +
+ otcplen - (otcph.doff<<2)); otcplen - (oth->doff<<2));
tcph->seq = 0; tcph->seq = 0;
} }
...@@ -139,6 +125,63 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook) ...@@ -139,6 +125,63 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
sizeof(struct tcphdr), IPPROTO_TCP, sizeof(struct tcphdr), IPPROTO_TCP,
csum_partial(tcph, csum_partial(tcph,
sizeof(struct tcphdr), 0)); sizeof(struct tcphdr), 0));
}
EXPORT_SYMBOL_GPL(nf_reject_ip6_tcphdr_put);
void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
{
struct sk_buff *nskb;
struct tcphdr _otcph;
const struct tcphdr *otcph;
unsigned int otcplen, hh_len;
const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
struct ipv6hdr *ip6h;
struct dst_entry *dst = NULL;
struct flowi6 fl6;
if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) ||
(!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) {
pr_debug("addr is not unicast.\n");
return;
}
otcph = nf_reject_ip6_tcphdr_get(oldskb, &_otcph, &otcplen, hook);
if (!otcph)
return;
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_TCP;
fl6.saddr = oip6h->daddr;
fl6.daddr = oip6h->saddr;
fl6.fl6_sport = otcph->dest;
fl6.fl6_dport = otcph->source;
security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
dst = ip6_route_output(net, NULL, &fl6);
if (dst == NULL || dst->error) {
dst_release(dst);
return;
}
dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
if (IS_ERR(dst))
return;
hh_len = (dst->dev->hard_header_len + 15)&~15;
nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr)
+ sizeof(struct tcphdr) + dst->trailer_len,
GFP_ATOMIC);
if (!nskb) {
net_dbg_ratelimited("cannot alloc skb\n");
dst_release(dst);
return;
}
skb_dst_set(nskb, dst);
skb_reserve(nskb, hh_len + dst->header_len);
ip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
ip6_dst_hoplimit(dst));
nf_reject_ip6_tcphdr_put(nskb, oldskb, otcph, otcplen);
nf_ct_attach(nskb, oldskb); nf_ct_attach(nskb, oldskb);
......
...@@ -316,7 +316,7 @@ __ip_vs_get_out_rt(int skb_af, struct sk_buff *skb, struct ip_vs_dest *dest, ...@@ -316,7 +316,7 @@ __ip_vs_get_out_rt(int skb_af, struct sk_buff *skb, struct ip_vs_dest *dest,
if (unlikely(crosses_local_route_boundary(skb_af, skb, rt_mode, if (unlikely(crosses_local_route_boundary(skb_af, skb, rt_mode,
local))) { local))) {
IP_VS_DBG_RL("We are crossing local and non-local addresses" IP_VS_DBG_RL("We are crossing local and non-local addresses"
" daddr=%pI4\n", &dest->addr.ip); " daddr=%pI4\n", &daddr);
goto err_put; goto err_put;
} }
...@@ -458,7 +458,7 @@ __ip_vs_get_out_rt_v6(int skb_af, struct sk_buff *skb, struct ip_vs_dest *dest, ...@@ -458,7 +458,7 @@ __ip_vs_get_out_rt_v6(int skb_af, struct sk_buff *skb, struct ip_vs_dest *dest,
if (unlikely(crosses_local_route_boundary(skb_af, skb, rt_mode, if (unlikely(crosses_local_route_boundary(skb_af, skb, rt_mode,
local))) { local))) {
IP_VS_DBG_RL("We are crossing local and non-local addresses" IP_VS_DBG_RL("We are crossing local and non-local addresses"
" daddr=%pI6\n", &dest->addr.in6); " daddr=%pI6\n", daddr);
goto err_put; goto err_put;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment