Commit e7b63ff1 authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next

Steffen Klassert says:

====================
pull request (net-next): ipsec-next 2015-10-30

1) The flow cache is limited by the flow cache limit which
   depends on the number of cpus and the xfrm garbage collector
   threshold which is independent of the number of cpus. This
   leads to the fact that on systems with more than 16 cpus
   we hit the xfrm garbage collector limit and refuse new
   allocations, so new flows are dropped. On systems with 16
   or less cpus, we hit the flowcache limit. In this case, we
   shrink the flow cache instead of refusing new flows.

   We increase the xfrm garbage collector threshold to INT_MAX
   to get the same behaviour, independent of the number of cpus.

2) Fix some unaligned accesses on sparc systems.
   From Sowmini Varadhan.

3) Fix some header checks in _decode_session4. We may call
   pskb_may_pull with a negative value converted to unsigened
   int from pskb_may_pull. This can lead to incorrect policy
   lookups. We fix this by a check of the data pointer position
   before we call pskb_may_pull.

4) Reload skb header pointers after calling pskb_may_pull
   in _decode_session4 as this may change the pointers into
   the packet.

5) Add a missing statistic counter on inner mode errors.

Please pull or let me know if there are problems.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents a493bcf8 cb866e32
...@@ -1216,7 +1216,8 @@ tag - INTEGER ...@@ -1216,7 +1216,8 @@ tag - INTEGER
xfrm4_gc_thresh - INTEGER xfrm4_gc_thresh - INTEGER
The threshold at which we will start garbage collecting for IPv4 The threshold at which we will start garbage collecting for IPv4
destination cache entries. At twice this value the system will destination cache entries. At twice this value the system will
refuse new allocations. refuse new allocations. The value must be set below the flowcache
limit (4096 * number of online cpus) to take effect.
igmp_link_local_mcast_reports - BOOLEAN igmp_link_local_mcast_reports - BOOLEAN
Enable IGMP reports for link local multicast groups in the Enable IGMP reports for link local multicast groups in the
...@@ -1662,7 +1663,8 @@ ratelimit - INTEGER ...@@ -1662,7 +1663,8 @@ ratelimit - INTEGER
xfrm6_gc_thresh - INTEGER xfrm6_gc_thresh - INTEGER
The threshold at which we will start garbage collecting for IPv6 The threshold at which we will start garbage collecting for IPv6
destination cache entries. At twice this value the system will destination cache entries. At twice this value the system will
refuse new allocations. refuse new allocations. The value must be set below the flowcache
limit (4096 * number of online cpus) to take effect.
IPv6 Update by: IPv6 Update by:
......
...@@ -127,7 +127,10 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse) ...@@ -127,7 +127,10 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
case IPPROTO_DCCP: case IPPROTO_DCCP:
if (xprth + 4 < skb->data || if (xprth + 4 < skb->data ||
pskb_may_pull(skb, xprth + 4 - skb->data)) { pskb_may_pull(skb, xprth + 4 - skb->data)) {
__be16 *ports = (__be16 *)xprth; __be16 *ports;
xprth = skb_network_header(skb) + iph->ihl * 4;
ports = (__be16 *)xprth;
fl4->fl4_sport = ports[!!reverse]; fl4->fl4_sport = ports[!!reverse];
fl4->fl4_dport = ports[!reverse]; fl4->fl4_dport = ports[!reverse];
...@@ -135,8 +138,12 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse) ...@@ -135,8 +138,12 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
break; break;
case IPPROTO_ICMP: case IPPROTO_ICMP:
if (pskb_may_pull(skb, xprth + 2 - skb->data)) { if (xprth + 2 < skb->data ||
u8 *icmp = xprth; pskb_may_pull(skb, xprth + 2 - skb->data)) {
u8 *icmp;
xprth = skb_network_header(skb) + iph->ihl * 4;
icmp = xprth;
fl4->fl4_icmp_type = icmp[0]; fl4->fl4_icmp_type = icmp[0];
fl4->fl4_icmp_code = icmp[1]; fl4->fl4_icmp_code = icmp[1];
...@@ -144,33 +151,50 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse) ...@@ -144,33 +151,50 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
break; break;
case IPPROTO_ESP: case IPPROTO_ESP:
if (pskb_may_pull(skb, xprth + 4 - skb->data)) { if (xprth + 4 < skb->data ||
__be32 *ehdr = (__be32 *)xprth; pskb_may_pull(skb, xprth + 4 - skb->data)) {
__be32 *ehdr;
xprth = skb_network_header(skb) + iph->ihl * 4;
ehdr = (__be32 *)xprth;
fl4->fl4_ipsec_spi = ehdr[0]; fl4->fl4_ipsec_spi = ehdr[0];
} }
break; break;
case IPPROTO_AH: case IPPROTO_AH:
if (pskb_may_pull(skb, xprth + 8 - skb->data)) { if (xprth + 8 < skb->data ||
__be32 *ah_hdr = (__be32 *)xprth; pskb_may_pull(skb, xprth + 8 - skb->data)) {
__be32 *ah_hdr;
xprth = skb_network_header(skb) + iph->ihl * 4;
ah_hdr = (__be32 *)xprth;
fl4->fl4_ipsec_spi = ah_hdr[1]; fl4->fl4_ipsec_spi = ah_hdr[1];
} }
break; break;
case IPPROTO_COMP: case IPPROTO_COMP:
if (pskb_may_pull(skb, xprth + 4 - skb->data)) { if (xprth + 4 < skb->data ||
__be16 *ipcomp_hdr = (__be16 *)xprth; pskb_may_pull(skb, xprth + 4 - skb->data)) {
__be16 *ipcomp_hdr;
xprth = skb_network_header(skb) + iph->ihl * 4;
ipcomp_hdr = (__be16 *)xprth;
fl4->fl4_ipsec_spi = htonl(ntohs(ipcomp_hdr[1])); fl4->fl4_ipsec_spi = htonl(ntohs(ipcomp_hdr[1]));
} }
break; break;
case IPPROTO_GRE: case IPPROTO_GRE:
if (pskb_may_pull(skb, xprth + 12 - skb->data)) { if (xprth + 12 < skb->data ||
__be16 *greflags = (__be16 *)xprth; pskb_may_pull(skb, xprth + 12 - skb->data)) {
__be32 *gre_hdr = (__be32 *)xprth; __be16 *greflags;
__be32 *gre_hdr;
xprth = skb_network_header(skb) + iph->ihl * 4;
greflags = (__be16 *)xprth;
gre_hdr = (__be32 *)xprth;
if (greflags[0] & GRE_KEY) { if (greflags[0] & GRE_KEY) {
if (greflags[0] & GRE_CSUM) if (greflags[0] & GRE_CSUM)
...@@ -244,7 +268,7 @@ static struct dst_ops xfrm4_dst_ops = { ...@@ -244,7 +268,7 @@ static struct dst_ops xfrm4_dst_ops = {
.destroy = xfrm4_dst_destroy, .destroy = xfrm4_dst_destroy,
.ifdown = xfrm4_dst_ifdown, .ifdown = xfrm4_dst_ifdown,
.local_out = __ip_local_out, .local_out = __ip_local_out,
.gc_thresh = 32768, .gc_thresh = INT_MAX,
}; };
static struct xfrm_policy_afinfo xfrm4_policy_afinfo = { static struct xfrm_policy_afinfo xfrm4_policy_afinfo = {
......
...@@ -288,7 +288,7 @@ static struct dst_ops xfrm6_dst_ops = { ...@@ -288,7 +288,7 @@ static struct dst_ops xfrm6_dst_ops = {
.destroy = xfrm6_dst_destroy, .destroy = xfrm6_dst_destroy,
.ifdown = xfrm6_dst_ifdown, .ifdown = xfrm6_dst_ifdown,
.local_out = __ip6_local_out, .local_out = __ip6_local_out,
.gc_thresh = 32768, .gc_thresh = INT_MAX,
}; };
static struct xfrm_policy_afinfo xfrm6_policy_afinfo = { static struct xfrm_policy_afinfo xfrm6_policy_afinfo = {
......
...@@ -330,8 +330,10 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) ...@@ -330,8 +330,10 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
if (x->sel.family == AF_UNSPEC) { if (x->sel.family == AF_UNSPEC) {
inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol); inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
if (inner_mode == NULL) if (inner_mode == NULL) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
goto drop; goto drop;
}
} }
if (inner_mode->input(x, skb)) { if (inner_mode->input(x, skb)) {
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
#include <linux/in6.h> #include <linux/in6.h>
#endif #endif
#include <asm/unaligned.h>
static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type) static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type)
{ {
...@@ -728,7 +729,9 @@ static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p) ...@@ -728,7 +729,9 @@ static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
memcpy(&p->sel, &x->sel, sizeof(p->sel)); memcpy(&p->sel, &x->sel, sizeof(p->sel));
memcpy(&p->lft, &x->lft, sizeof(p->lft)); memcpy(&p->lft, &x->lft, sizeof(p->lft));
memcpy(&p->curlft, &x->curlft, sizeof(p->curlft)); memcpy(&p->curlft, &x->curlft, sizeof(p->curlft));
memcpy(&p->stats, &x->stats, sizeof(p->stats)); put_unaligned(x->stats.replay_window, &p->stats.replay_window);
put_unaligned(x->stats.replay, &p->stats.replay);
put_unaligned(x->stats.integrity_failed, &p->stats.integrity_failed);
memcpy(&p->saddr, &x->props.saddr, sizeof(p->saddr)); memcpy(&p->saddr, &x->props.saddr, sizeof(p->saddr));
p->mode = x->props.mode; p->mode = x->props.mode;
p->replay_window = x->props.replay_window; p->replay_window = x->props.replay_window;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment