Commit 32b583a0 authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec

Steffen Klassert says:

====================
pull request (net): ipsec 2016-05-04

1) The flowcache can hit an OOM condition if too
   many entries are in the gc_list. Fix this by
   counting the entries in the gc_list and refuse
   new allocations if the value is too high.

2) The inner headers are invalid after a xfrm transformation,
   so reset the skb encapsulation field to ensure nobody tries
   access the inner headers. Otherwise tunnel devices stacked
   on top of xfrm may build the outer headers based on wrong
   informations.

3) Add pmtu handling to vti, we need it to report
   pmtu informations for local generated packets.

Please pull or let me know if there are problems.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 5f8e4474 d6af1a31
...@@ -80,6 +80,7 @@ struct netns_xfrm { ...@@ -80,6 +80,7 @@ struct netns_xfrm {
struct flow_cache flow_cache_global; struct flow_cache flow_cache_global;
atomic_t flow_cache_genid; atomic_t flow_cache_genid;
struct list_head flow_cache_gc_list; struct list_head flow_cache_gc_list;
atomic_t flow_cache_gc_count;
spinlock_t flow_cache_gc_lock; spinlock_t flow_cache_gc_lock;
struct work_struct flow_cache_gc_work; struct work_struct flow_cache_gc_work;
struct work_struct flow_cache_flush_work; struct work_struct flow_cache_flush_work;
......
...@@ -92,8 +92,11 @@ static void flow_cache_gc_task(struct work_struct *work) ...@@ -92,8 +92,11 @@ static void flow_cache_gc_task(struct work_struct *work)
list_splice_tail_init(&xfrm->flow_cache_gc_list, &gc_list); list_splice_tail_init(&xfrm->flow_cache_gc_list, &gc_list);
spin_unlock_bh(&xfrm->flow_cache_gc_lock); spin_unlock_bh(&xfrm->flow_cache_gc_lock);
list_for_each_entry_safe(fce, n, &gc_list, u.gc_list) list_for_each_entry_safe(fce, n, &gc_list, u.gc_list) {
flow_entry_kill(fce, xfrm); flow_entry_kill(fce, xfrm);
atomic_dec(&xfrm->flow_cache_gc_count);
WARN_ON(atomic_read(&xfrm->flow_cache_gc_count) < 0);
}
} }
static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp, static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp,
...@@ -101,6 +104,7 @@ static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp, ...@@ -101,6 +104,7 @@ static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp,
struct netns_xfrm *xfrm) struct netns_xfrm *xfrm)
{ {
if (deleted) { if (deleted) {
atomic_add(deleted, &xfrm->flow_cache_gc_count);
fcp->hash_count -= deleted; fcp->hash_count -= deleted;
spin_lock_bh(&xfrm->flow_cache_gc_lock); spin_lock_bh(&xfrm->flow_cache_gc_lock);
list_splice_tail(gc_list, &xfrm->flow_cache_gc_list); list_splice_tail(gc_list, &xfrm->flow_cache_gc_list);
...@@ -232,6 +236,13 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir, ...@@ -232,6 +236,13 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
if (fcp->hash_count > fc->high_watermark) if (fcp->hash_count > fc->high_watermark)
flow_cache_shrink(fc, fcp); flow_cache_shrink(fc, fcp);
if (fcp->hash_count > 2 * fc->high_watermark ||
atomic_read(&net->xfrm.flow_cache_gc_count) > fc->high_watermark) {
atomic_inc(&net->xfrm.flow_cache_genid);
flo = ERR_PTR(-ENOBUFS);
goto ret_object;
}
fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC); fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
if (fle) { if (fle) {
fle->net = net; fle->net = net;
...@@ -446,6 +457,7 @@ int flow_cache_init(struct net *net) ...@@ -446,6 +457,7 @@ int flow_cache_init(struct net *net)
INIT_WORK(&net->xfrm.flow_cache_gc_work, flow_cache_gc_task); INIT_WORK(&net->xfrm.flow_cache_gc_work, flow_cache_gc_task);
INIT_WORK(&net->xfrm.flow_cache_flush_work, flow_cache_flush_task); INIT_WORK(&net->xfrm.flow_cache_flush_work, flow_cache_flush_task);
mutex_init(&net->xfrm.flow_flush_sem); mutex_init(&net->xfrm.flow_flush_sem);
atomic_set(&net->xfrm.flow_cache_gc_count, 0);
fc->hash_shift = 10; fc->hash_shift = 10;
fc->low_watermark = 2 * flow_cache_hash_size(fc); fc->low_watermark = 2 * flow_cache_hash_size(fc);
......
...@@ -156,6 +156,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -156,6 +156,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
struct dst_entry *dst = skb_dst(skb); struct dst_entry *dst = skb_dst(skb);
struct net_device *tdev; /* Device to other host */ struct net_device *tdev; /* Device to other host */
int err; int err;
int mtu;
if (!dst) { if (!dst) {
dev->stats.tx_carrier_errors++; dev->stats.tx_carrier_errors++;
...@@ -192,6 +193,23 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -192,6 +193,23 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
tunnel->err_count = 0; tunnel->err_count = 0;
} }
mtu = dst_mtu(dst);
if (skb->len > mtu) {
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
if (skb->protocol == htons(ETH_P_IP)) {
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(mtu));
} else {
if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU;
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
}
dst_release(dst);
goto tx_error;
}
skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev))); skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev)));
skb_dst_set(skb, dst); skb_dst_set(skb, dst);
skb->dev = skb_dst(skb)->dev; skb->dev = skb_dst(skb)->dev;
......
...@@ -99,6 +99,9 @@ static int xfrm_output_one(struct sk_buff *skb, int err) ...@@ -99,6 +99,9 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
skb_dst_force(skb); skb_dst_force(skb);
/* Inner headers are invalid now. */
skb->encapsulation = 0;
err = x->type->output(x, skb); err = x->type->output(x, skb);
if (err == -EINPROGRESS) if (err == -EINPROGRESS)
goto out; goto out;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment