Commit 3dca3f38 authored by Steffen Klassert's avatar Steffen Klassert

xfrm: Separate ESP handling from segmentation for GRO packets.

We change the ESP GSO handlers to only segment the packets.
The ESP handling and encryption is defered to validate_xmit_xfrm()
where this is done for non GRO packets too. This makes the code
more robust and prepares for asynchronous crypto handling.
Signed-off-by: default avatarSteffen Klassert <steffen.klassert@secunet.com>
parent f39a5c01
...@@ -1888,7 +1888,7 @@ static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb) ...@@ -1888,7 +1888,7 @@ static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
void __net_init xfrm_dev_init(void); void __net_init xfrm_dev_init(void);
#ifdef CONFIG_XFRM_OFFLOAD #ifdef CONFIG_XFRM_OFFLOAD
int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features); struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features);
int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
struct xfrm_user_offload *xuo); struct xfrm_user_offload *xuo);
bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x); bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
...@@ -1929,9 +1929,9 @@ static inline void xfrm_dev_state_free(struct xfrm_state *x) ...@@ -1929,9 +1929,9 @@ static inline void xfrm_dev_state_free(struct xfrm_state *x)
} }
} }
#else #else
static inline int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features) static inline struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features)
{ {
return 0; return skb;
} }
static inline int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo) static inline int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo)
......
...@@ -3083,9 +3083,6 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device ...@@ -3083,9 +3083,6 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
__skb_linearize(skb)) __skb_linearize(skb))
goto out_kfree_skb; goto out_kfree_skb;
if (validate_xmit_xfrm(skb, features))
goto out_kfree_skb;
/* If packet is not checksummed and device does not /* If packet is not checksummed and device does not
* support checksumming for this protocol, complete * support checksumming for this protocol, complete
* checksumming here. * checksumming here.
...@@ -3102,6 +3099,8 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device ...@@ -3102,6 +3099,8 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
} }
} }
skb = validate_xmit_xfrm(skb, features);
return skb; return skb;
out_kfree_skb: out_kfree_skb:
......
...@@ -108,75 +108,36 @@ static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb) ...@@ -108,75 +108,36 @@ static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
static struct sk_buff *esp4_gso_segment(struct sk_buff *skb, static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
netdev_features_t features) netdev_features_t features)
{ {
__u32 seq;
int err = 0;
struct sk_buff *skb2;
struct xfrm_state *x; struct xfrm_state *x;
struct ip_esp_hdr *esph; struct ip_esp_hdr *esph;
struct crypto_aead *aead; struct crypto_aead *aead;
struct sk_buff *segs = ERR_PTR(-EINVAL);
netdev_features_t esp_features = features; netdev_features_t esp_features = features;
struct xfrm_offload *xo = xfrm_offload(skb); struct xfrm_offload *xo = xfrm_offload(skb);
if (!xo) if (!xo)
goto out; return ERR_PTR(-EINVAL);
seq = xo->seq.low;
x = skb->sp->xvec[skb->sp->len - 1]; x = skb->sp->xvec[skb->sp->len - 1];
aead = x->data; aead = x->data;
esph = ip_esp_hdr(skb); esph = ip_esp_hdr(skb);
if (esph->spi != x->id.spi) if (esph->spi != x->id.spi)
goto out; return ERR_PTR(-EINVAL);
if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead))) if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
goto out; return ERR_PTR(-EINVAL);
__skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)); __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
skb->encap_hdr_csum = 1; skb->encap_hdr_csum = 1;
if (!(features & NETIF_F_HW_ESP)) if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle ||
(x->xso.dev != skb->dev))
esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK); esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
segs = x->outer_mode->gso_segment(x, skb, esp_features); xo->flags |= XFRM_GSO_SEGMENT;
if (IS_ERR_OR_NULL(segs))
goto out;
__skb_pull(skb, skb->data - skb_mac_header(skb));
skb2 = segs;
do {
struct sk_buff *nskb = skb2->next;
xo = xfrm_offload(skb2);
xo->flags |= XFRM_GSO_SEGMENT;
xo->seq.low = seq;
xo->seq.hi = xfrm_replay_seqhi(x, seq);
if(!(features & NETIF_F_HW_ESP)) return x->outer_mode->gso_segment(x, skb, esp_features);
xo->flags |= CRYPTO_FALLBACK;
x->outer_mode->xmit(x, skb2);
err = x->type_offload->xmit(x, skb2, esp_features);
if (err) {
kfree_skb_list(segs);
return ERR_PTR(err);
}
if (!skb_is_gso(skb2))
seq++;
else
seq += skb_shinfo(skb2)->gso_segs;
skb_push(skb2, skb2->mac_len);
skb2 = nskb;
} while (skb2);
out:
return segs;
} }
static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb) static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb)
...@@ -203,6 +164,7 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_ ...@@ -203,6 +164,7 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_
struct crypto_aead *aead; struct crypto_aead *aead;
struct esp_info esp; struct esp_info esp;
bool hw_offload = true; bool hw_offload = true;
__u32 seq;
esp.inplace = true; esp.inplace = true;
...@@ -241,23 +203,30 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_ ...@@ -241,23 +203,30 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_
return esp.nfrags; return esp.nfrags;
} }
seq = xo->seq.low;
esph = esp.esph; esph = esp.esph;
esph->spi = x->id.spi; esph->spi = x->id.spi;
skb_push(skb, -skb_network_offset(skb)); skb_push(skb, -skb_network_offset(skb));
if (xo->flags & XFRM_GSO_SEGMENT) { if (xo->flags & XFRM_GSO_SEGMENT) {
esph->seq_no = htonl(xo->seq.low); esph->seq_no = htonl(seq);
} else {
ip_hdr(skb)->tot_len = htons(skb->len); if (!skb_is_gso(skb))
ip_send_check(ip_hdr(skb)); xo->seq.low++;
else
xo->seq.low += skb_shinfo(skb)->gso_segs;
} }
esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
ip_hdr(skb)->tot_len = htons(skb->len);
ip_send_check(ip_hdr(skb));
if (hw_offload) if (hw_offload)
return 0; return 0;
esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
err = esp_output_tail(x, skb, &esp); err = esp_output_tail(x, skb, &esp);
if (err) if (err)
return err; return err;
......
...@@ -105,18 +105,15 @@ static struct sk_buff *xfrm4_mode_tunnel_gso_segment(struct xfrm_state *x, ...@@ -105,18 +105,15 @@ static struct sk_buff *xfrm4_mode_tunnel_gso_segment(struct xfrm_state *x,
{ {
__skb_push(skb, skb->mac_len); __skb_push(skb, skb->mac_len);
return skb_mac_gso_segment(skb, features); return skb_mac_gso_segment(skb, features);
} }
static void xfrm4_mode_tunnel_xmit(struct xfrm_state *x, struct sk_buff *skb) static void xfrm4_mode_tunnel_xmit(struct xfrm_state *x, struct sk_buff *skb)
{ {
struct xfrm_offload *xo = xfrm_offload(skb); struct xfrm_offload *xo = xfrm_offload(skb);
if (xo->flags & XFRM_GSO_SEGMENT) { if (xo->flags & XFRM_GSO_SEGMENT)
skb->network_header = skb->network_header - x->props.header_len;
skb->transport_header = skb->network_header + skb->transport_header = skb->network_header +
sizeof(struct iphdr); sizeof(struct iphdr);
}
skb_reset_mac_len(skb); skb_reset_mac_len(skb);
pskb_pull(skb, skb->mac_len + x->props.header_len); pskb_pull(skb, skb->mac_len + x->props.header_len);
......
...@@ -135,75 +135,36 @@ static void esp6_gso_encap(struct xfrm_state *x, struct sk_buff *skb) ...@@ -135,75 +135,36 @@ static void esp6_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
static struct sk_buff *esp6_gso_segment(struct sk_buff *skb, static struct sk_buff *esp6_gso_segment(struct sk_buff *skb,
netdev_features_t features) netdev_features_t features)
{ {
__u32 seq;
int err = 0;
struct sk_buff *skb2;
struct xfrm_state *x; struct xfrm_state *x;
struct ip_esp_hdr *esph; struct ip_esp_hdr *esph;
struct crypto_aead *aead; struct crypto_aead *aead;
struct sk_buff *segs = ERR_PTR(-EINVAL);
netdev_features_t esp_features = features; netdev_features_t esp_features = features;
struct xfrm_offload *xo = xfrm_offload(skb); struct xfrm_offload *xo = xfrm_offload(skb);
if (!xo) if (!xo)
goto out; return ERR_PTR(-EINVAL);
seq = xo->seq.low;
x = skb->sp->xvec[skb->sp->len - 1]; x = skb->sp->xvec[skb->sp->len - 1];
aead = x->data; aead = x->data;
esph = ip_esp_hdr(skb); esph = ip_esp_hdr(skb);
if (esph->spi != x->id.spi) if (esph->spi != x->id.spi)
goto out; return ERR_PTR(-EINVAL);
if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead))) if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
goto out; return ERR_PTR(-EINVAL);
__skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)); __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
skb->encap_hdr_csum = 1; skb->encap_hdr_csum = 1;
if (!(features & NETIF_F_HW_ESP)) if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle ||
(x->xso.dev != skb->dev))
esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK); esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
segs = x->outer_mode->gso_segment(x, skb, esp_features); xo->flags |= XFRM_GSO_SEGMENT;
if (IS_ERR_OR_NULL(segs))
goto out;
__skb_pull(skb, skb->data - skb_mac_header(skb));
skb2 = segs;
do {
struct sk_buff *nskb = skb2->next;
xo = xfrm_offload(skb2);
xo->flags |= XFRM_GSO_SEGMENT;
xo->seq.low = seq;
xo->seq.hi = xfrm_replay_seqhi(x, seq);
if(!(features & NETIF_F_HW_ESP))
xo->flags |= CRYPTO_FALLBACK;
x->outer_mode->xmit(x, skb2);
err = x->type_offload->xmit(x, skb2, esp_features);
if (err) {
kfree_skb_list(segs);
return ERR_PTR(err);
}
if (!skb_is_gso(skb2))
seq++;
else
seq += skb_shinfo(skb2)->gso_segs;
skb_push(skb2, skb2->mac_len);
skb2 = nskb;
} while (skb2);
out: return x->outer_mode->gso_segment(x, skb, esp_features);
return segs;
} }
static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb) static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb)
...@@ -222,6 +183,7 @@ static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb) ...@@ -222,6 +183,7 @@ static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb)
static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features) static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features)
{ {
int len;
int err; int err;
int alen; int alen;
int blksize; int blksize;
...@@ -230,6 +192,7 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features ...@@ -230,6 +192,7 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features
struct crypto_aead *aead; struct crypto_aead *aead;
struct esp_info esp; struct esp_info esp;
bool hw_offload = true; bool hw_offload = true;
__u32 seq;
esp.inplace = true; esp.inplace = true;
...@@ -265,28 +228,33 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features ...@@ -265,28 +228,33 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features
return esp.nfrags; return esp.nfrags;
} }
seq = xo->seq.low;
esph = ip_esp_hdr(skb); esph = ip_esp_hdr(skb);
esph->spi = x->id.spi; esph->spi = x->id.spi;
skb_push(skb, -skb_network_offset(skb)); skb_push(skb, -skb_network_offset(skb));
if (xo->flags & XFRM_GSO_SEGMENT) { if (xo->flags & XFRM_GSO_SEGMENT) {
esph->seq_no = htonl(xo->seq.low); esph->seq_no = htonl(seq);
} else {
int len;
len = skb->len - sizeof(struct ipv6hdr);
if (len > IPV6_MAXPLEN)
len = 0;
ipv6_hdr(skb)->payload_len = htons(len); if (!skb_is_gso(skb))
xo->seq.low++;
else
xo->seq.low += skb_shinfo(skb)->gso_segs;
} }
esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
len = skb->len - sizeof(struct ipv6hdr);
if (len > IPV6_MAXPLEN)
len = 0;
ipv6_hdr(skb)->payload_len = htons(len);
if (hw_offload) if (hw_offload)
return 0; return 0;
esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
err = esp6_output_tail(x, skb, &esp); err = esp6_output_tail(x, skb, &esp);
if (err) if (err)
return err; return err;
......
...@@ -105,17 +105,14 @@ static struct sk_buff *xfrm6_mode_tunnel_gso_segment(struct xfrm_state *x, ...@@ -105,17 +105,14 @@ static struct sk_buff *xfrm6_mode_tunnel_gso_segment(struct xfrm_state *x,
{ {
__skb_push(skb, skb->mac_len); __skb_push(skb, skb->mac_len);
return skb_mac_gso_segment(skb, features); return skb_mac_gso_segment(skb, features);
} }
static void xfrm6_mode_tunnel_xmit(struct xfrm_state *x, struct sk_buff *skb) static void xfrm6_mode_tunnel_xmit(struct xfrm_state *x, struct sk_buff *skb)
{ {
struct xfrm_offload *xo = xfrm_offload(skb); struct xfrm_offload *xo = xfrm_offload(skb);
if (xo->flags & XFRM_GSO_SEGMENT) { if (xo->flags & XFRM_GSO_SEGMENT)
skb->network_header = skb->network_header - x->props.header_len;
skb->transport_header = skb->network_header + sizeof(struct ipv6hdr); skb->transport_header = skb->network_header + sizeof(struct ipv6hdr);
}
skb_reset_mac_len(skb); skb_reset_mac_len(skb);
pskb_pull(skb, skb->mac_len + x->props.header_len); pskb_pull(skb, skb->mac_len + x->props.header_len);
......
...@@ -23,32 +23,99 @@ ...@@ -23,32 +23,99 @@
#include <linux/notifier.h> #include <linux/notifier.h>
#ifdef CONFIG_XFRM_OFFLOAD #ifdef CONFIG_XFRM_OFFLOAD
int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features) struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features)
{ {
int err; int err;
__u32 seq;
struct xfrm_state *x; struct xfrm_state *x;
struct sk_buff *skb2;
netdev_features_t esp_features = features;
struct xfrm_offload *xo = xfrm_offload(skb); struct xfrm_offload *xo = xfrm_offload(skb);
if (skb_is_gso(skb)) if (!xo)
return 0; return skb;
if (xo) { if (!(features & NETIF_F_HW_ESP))
x = skb->sp->xvec[skb->sp->len - 1]; esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
return 0; x = skb->sp->xvec[skb->sp->len - 1];
if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
return skb;
if (skb_is_gso(skb)) {
struct net_device *dev = skb->dev;
if (unlikely(!x->xso.offload_handle || (x->xso.dev != dev))) {
struct sk_buff *segs;
/* Packet got rerouted, fixup features and segment it. */
esp_features = esp_features & ~(NETIF_F_HW_ESP
| NETIF_F_GSO_ESP);
segs = skb_gso_segment(skb, esp_features);
if (IS_ERR(segs)) {
XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
kfree_skb(skb);
return NULL;
} else {
consume_skb(skb);
skb = segs;
}
} else {
return skb;
}
}
if (!skb->next) {
x->outer_mode->xmit(x, skb); x->outer_mode->xmit(x, skb);
err = x->type_offload->xmit(x, skb, features); err = x->type_offload->xmit(x, skb, esp_features);
if (err) { if (err) {
XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR); XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
return err; kfree_skb(skb);
return NULL;
} }
skb_push(skb, skb->data - skb_mac_header(skb)); skb_push(skb, skb->data - skb_mac_header(skb));
return skb;
} }
return 0; skb2 = skb;
seq = xo->seq.low;
do {
struct sk_buff *nskb = skb2->next;
xo = xfrm_offload(skb2);
xo->flags |= XFRM_GSO_SEGMENT;
xo->seq.low = seq;
xo->seq.hi = xfrm_replay_seqhi(x, seq);
if(!(features & NETIF_F_HW_ESP))
xo->flags |= CRYPTO_FALLBACK;
x->outer_mode->xmit(x, skb2);
err = x->type_offload->xmit(x, skb2, esp_features);
if (err) {
XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
skb2->next = nskb;
kfree_skb_list(skb2);
return NULL;
}
if (!skb_is_gso(skb2))
seq++;
else
seq += skb_shinfo(skb2)->gso_segs;
skb_push(skb2, skb2->data - skb_mac_header(skb2));
skb2 = nskb;
} while (skb2);
return skb;
} }
EXPORT_SYMBOL_GPL(validate_xmit_xfrm); EXPORT_SYMBOL_GPL(validate_xmit_xfrm);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment