Commit 9f30e5c5 authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next

Steffen Klassert says:

====================
pull request (net-next): ipsec-next 2017-12-22

1) Separate ESP handling from segmentation for GRO packets.
   This unifies the IPsec GSO and non GSO codepath.

2) Add asynchronous callbacks for xfrm on layer 2. This
   adds the necessary infrastructure to core networking.

3) Allow to use the layer2 IPsec GSO codepath for software
   crypto, all infrastructure is there now.

4) Also allow IPsec GSO with software crypto for local sockets.

5) Don't require synchronous crypto fallback on IPsec offloading,
   it is not needed anymore.

6) Check for xdo_dev_state_free and only call it if implemented.
   From Shannon Nelson.

7) Check for the required add and delete functions when a driver
   registers xdo_dev_ops. From Shannon Nelson.

8) Define xfrmdev_ops only with offload config.
   From Shannon Nelson.

9) Update the xfrm stats documentation.
   From Shannon Nelson.

Please pull or let me know if there are problems.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 04f629f7 1a4bb1d1
...@@ -5,13 +5,15 @@ Masahide NAKAMURA <nakam@linux-ipv6.org> ...@@ -5,13 +5,15 @@ Masahide NAKAMURA <nakam@linux-ipv6.org>
Transformation Statistics Transformation Statistics
------------------------- -------------------------
xfrm_proc is a statistics shown factor dropped by transformation
for developer.
It is a counter designed from current transformation source code
and defined like linux private MIB.
Inbound statistics The xfrm_proc code is a set of statistics showing numbers of packets
~~~~~~~~~~~~~~~~~~ dropped by the transformation code and why. These counters are defined
as part of the linux private MIB. These counters can be viewed in
/proc/net/xfrm_stat.
Inbound errors
~~~~~~~~~~~~~~
XfrmInError: XfrmInError:
All errors which is not matched others All errors which is not matched others
XfrmInBufferError: XfrmInBufferError:
...@@ -46,6 +48,10 @@ XfrmInPolBlock: ...@@ -46,6 +48,10 @@ XfrmInPolBlock:
Policy discards Policy discards
XfrmInPolError: XfrmInPolError:
Policy error Policy error
XfrmAcquireError:
State hasn't been fully acquired before use
XfrmFwdHdrError:
Forward routing of a packet is not allowed
Outbound errors Outbound errors
~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~
...@@ -72,3 +78,5 @@ XfrmOutPolDead: ...@@ -72,3 +78,5 @@ XfrmOutPolDead:
Policy is dead Policy is dead
XfrmOutPolError: XfrmOutPolError:
Policy error Policy error
XfrmOutStateInvalid:
State is invalid, perhaps expired
...@@ -1726,7 +1726,7 @@ struct net_device { ...@@ -1726,7 +1726,7 @@ struct net_device {
const struct ndisc_ops *ndisc_ops; const struct ndisc_ops *ndisc_ops;
#endif #endif
#ifdef CONFIG_XFRM #ifdef CONFIG_XFRM_OFFLOAD
const struct xfrmdev_ops *xfrmdev_ops; const struct xfrmdev_ops *xfrmdev_ops;
#endif #endif
...@@ -2793,7 +2793,9 @@ struct softnet_data { ...@@ -2793,7 +2793,9 @@ struct softnet_data {
struct Qdisc *output_queue; struct Qdisc *output_queue;
struct Qdisc **output_queue_tailp; struct Qdisc **output_queue_tailp;
struct sk_buff *completion_queue; struct sk_buff *completion_queue;
#ifdef CONFIG_XFRM_OFFLOAD
struct sk_buff_head xfrm_backlog;
#endif
#ifdef CONFIG_RPS #ifdef CONFIG_RPS
/* input_queue_head should be written by cpu owning this struct, /* input_queue_head should be written by cpu owning this struct,
* and only read by other cpus. Worth using a cache line. * and only read by other cpus. Worth using a cache line.
...@@ -3325,7 +3327,7 @@ int dev_get_phys_port_id(struct net_device *dev, ...@@ -3325,7 +3327,7 @@ int dev_get_phys_port_id(struct net_device *dev,
int dev_get_phys_port_name(struct net_device *dev, int dev_get_phys_port_name(struct net_device *dev,
char *name, size_t len); char *name, size_t len);
int dev_change_proto_down(struct net_device *dev, bool proto_down); int dev_change_proto_down(struct net_device *dev, bool proto_down);
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev); struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, int *ret); struct netdev_queue *txq, int *ret);
......
...@@ -1051,6 +1051,7 @@ struct xfrm_offload { ...@@ -1051,6 +1051,7 @@ struct xfrm_offload {
#define XFRM_GSO_SEGMENT 16 #define XFRM_GSO_SEGMENT 16
#define XFRM_GRO 32 #define XFRM_GRO 32
#define XFRM_ESP_NO_TRAILER 64 #define XFRM_ESP_NO_TRAILER 64
#define XFRM_DEV_RESUME 128
__u32 status; __u32 status;
#define CRYPTO_SUCCESS 1 #define CRYPTO_SUCCESS 1
...@@ -1874,21 +1875,28 @@ static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb) ...@@ -1874,21 +1875,28 @@ static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
{ {
return skb->sp->xvec[skb->sp->len - 1]; return skb->sp->xvec[skb->sp->len - 1];
} }
#endif
static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb) static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
{ {
#ifdef CONFIG_XFRM
struct sec_path *sp = skb->sp; struct sec_path *sp = skb->sp;
if (!sp || !sp->olen || sp->len != sp->olen) if (!sp || !sp->olen || sp->len != sp->olen)
return NULL; return NULL;
return &sp->ovec[sp->olen - 1]; return &sp->ovec[sp->olen - 1];
} #else
return NULL;
#endif #endif
}
void __net_init xfrm_dev_init(void); void __net_init xfrm_dev_init(void);
#ifdef CONFIG_XFRM_OFFLOAD #ifdef CONFIG_XFRM_OFFLOAD
int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features); void xfrm_dev_resume(struct sk_buff *skb);
void xfrm_dev_backlog(struct softnet_data *sd);
struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again);
int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
struct xfrm_user_offload *xuo); struct xfrm_user_offload *xuo);
bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x); bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
...@@ -1902,6 +1910,8 @@ static inline bool xfrm_dst_offload_ok(struct dst_entry *dst) ...@@ -1902,6 +1910,8 @@ static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
return false; return false;
xdst = (struct xfrm_dst *) dst; xdst = (struct xfrm_dst *) dst;
if (!x->xso.offload_handle && !xdst->child->xfrm)
return true;
if (x->xso.offload_handle && (x->xso.dev == xfrm_dst_path(dst)->dev) && if (x->xso.offload_handle && (x->xso.dev == xfrm_dst_path(dst)->dev) &&
!xdst->child->xfrm) !xdst->child->xfrm)
return true; return true;
...@@ -1923,15 +1933,24 @@ static inline void xfrm_dev_state_free(struct xfrm_state *x) ...@@ -1923,15 +1933,24 @@ static inline void xfrm_dev_state_free(struct xfrm_state *x)
struct net_device *dev = xso->dev; struct net_device *dev = xso->dev;
if (dev && dev->xfrmdev_ops) { if (dev && dev->xfrmdev_ops) {
if (dev->xfrmdev_ops->xdo_dev_state_free)
dev->xfrmdev_ops->xdo_dev_state_free(x); dev->xfrmdev_ops->xdo_dev_state_free(x);
xso->dev = NULL; xso->dev = NULL;
dev_put(dev); dev_put(dev);
} }
} }
#else #else
static inline int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features) static inline void xfrm_dev_resume(struct sk_buff *skb)
{ {
return 0; }
static inline void xfrm_dev_backlog(struct softnet_data *sd)
{
}
static inline struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
{
return skb;
} }
static inline int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo) static inline int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo)
......
...@@ -3059,7 +3059,7 @@ int skb_csum_hwoffload_help(struct sk_buff *skb, ...@@ -3059,7 +3059,7 @@ int skb_csum_hwoffload_help(struct sk_buff *skb,
} }
EXPORT_SYMBOL(skb_csum_hwoffload_help); EXPORT_SYMBOL(skb_csum_hwoffload_help);
static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev) static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
{ {
netdev_features_t features; netdev_features_t features;
...@@ -3083,9 +3083,6 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device ...@@ -3083,9 +3083,6 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
__skb_linearize(skb)) __skb_linearize(skb))
goto out_kfree_skb; goto out_kfree_skb;
if (validate_xmit_xfrm(skb, features))
goto out_kfree_skb;
/* If packet is not checksummed and device does not /* If packet is not checksummed and device does not
* support checksumming for this protocol, complete * support checksumming for this protocol, complete
* checksumming here. * checksumming here.
...@@ -3102,6 +3099,8 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device ...@@ -3102,6 +3099,8 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
} }
} }
skb = validate_xmit_xfrm(skb, features, again);
return skb; return skb;
out_kfree_skb: out_kfree_skb:
...@@ -3111,7 +3110,7 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device ...@@ -3111,7 +3110,7 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
return NULL; return NULL;
} }
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev) struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
{ {
struct sk_buff *next, *head = NULL, *tail; struct sk_buff *next, *head = NULL, *tail;
...@@ -3122,7 +3121,7 @@ struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *d ...@@ -3122,7 +3121,7 @@ struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *d
/* in case skb wont be segmented, point to itself */ /* in case skb wont be segmented, point to itself */
skb->prev = skb; skb->prev = skb;
skb = validate_xmit_skb(skb, dev); skb = validate_xmit_skb(skb, dev, again);
if (!skb) if (!skb)
continue; continue;
...@@ -3449,6 +3448,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) ...@@ -3449,6 +3448,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
struct netdev_queue *txq; struct netdev_queue *txq;
struct Qdisc *q; struct Qdisc *q;
int rc = -ENOMEM; int rc = -ENOMEM;
bool again = false;
skb_reset_mac_header(skb); skb_reset_mac_header(skb);
...@@ -3510,7 +3510,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) ...@@ -3510,7 +3510,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
XMIT_RECURSION_LIMIT)) XMIT_RECURSION_LIMIT))
goto recursion_alert; goto recursion_alert;
skb = validate_xmit_skb(skb, dev); skb = validate_xmit_skb(skb, dev, &again);
if (!skb) if (!skb)
goto out; goto out;
...@@ -4194,6 +4194,8 @@ static __latent_entropy void net_tx_action(struct softirq_action *h) ...@@ -4194,6 +4194,8 @@ static __latent_entropy void net_tx_action(struct softirq_action *h)
spin_unlock(root_lock); spin_unlock(root_lock);
} }
} }
xfrm_dev_backlog(sd);
} }
#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE) #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
...@@ -8875,6 +8877,9 @@ static int __init net_dev_init(void) ...@@ -8875,6 +8877,9 @@ static int __init net_dev_init(void)
skb_queue_head_init(&sd->input_pkt_queue); skb_queue_head_init(&sd->input_pkt_queue);
skb_queue_head_init(&sd->process_queue); skb_queue_head_init(&sd->process_queue);
#ifdef CONFIG_XFRM_OFFLOAD
skb_queue_head_init(&sd->xfrm_backlog);
#endif
INIT_LIST_HEAD(&sd->poll_list); INIT_LIST_HEAD(&sd->poll_list);
sd->output_queue_tailp = &sd->output_queue; sd->output_queue_tailp = &sd->output_queue;
#ifdef CONFIG_RPS #ifdef CONFIG_RPS
......
...@@ -121,14 +121,32 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp) ...@@ -121,14 +121,32 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
static void esp_output_done(struct crypto_async_request *base, int err) static void esp_output_done(struct crypto_async_request *base, int err)
{ {
struct sk_buff *skb = base->data; struct sk_buff *skb = base->data;
struct xfrm_offload *xo = xfrm_offload(skb);
void *tmp; void *tmp;
struct dst_entry *dst = skb_dst(skb); struct xfrm_state *x;
struct xfrm_state *x = dst->xfrm;
if (xo && (xo->flags & XFRM_DEV_RESUME))
x = skb->sp->xvec[skb->sp->len - 1];
else
x = skb_dst(skb)->xfrm;
tmp = ESP_SKB_CB(skb)->tmp; tmp = ESP_SKB_CB(skb)->tmp;
esp_ssg_unref(x, tmp); esp_ssg_unref(x, tmp);
kfree(tmp); kfree(tmp);
if (xo && (xo->flags & XFRM_DEV_RESUME)) {
if (err) {
XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
kfree_skb(skb);
return;
}
skb_push(skb, skb->data - skb_mac_header(skb));
secpath_reset(skb);
xfrm_dev_resume(skb);
} else {
xfrm_output_resume(skb, err); xfrm_output_resume(skb, err);
}
} }
/* Move ESP header back into place. */ /* Move ESP header back into place. */
...@@ -825,17 +843,13 @@ static int esp_init_aead(struct xfrm_state *x) ...@@ -825,17 +843,13 @@ static int esp_init_aead(struct xfrm_state *x)
char aead_name[CRYPTO_MAX_ALG_NAME]; char aead_name[CRYPTO_MAX_ALG_NAME];
struct crypto_aead *aead; struct crypto_aead *aead;
int err; int err;
u32 mask = 0;
err = -ENAMETOOLONG; err = -ENAMETOOLONG;
if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME) x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
goto error; goto error;
if (x->xso.offload_handle) aead = crypto_alloc_aead(aead_name, 0, 0);
mask |= CRYPTO_ALG_ASYNC;
aead = crypto_alloc_aead(aead_name, 0, mask);
err = PTR_ERR(aead); err = PTR_ERR(aead);
if (IS_ERR(aead)) if (IS_ERR(aead))
goto error; goto error;
...@@ -865,7 +879,6 @@ static int esp_init_authenc(struct xfrm_state *x) ...@@ -865,7 +879,6 @@ static int esp_init_authenc(struct xfrm_state *x)
char authenc_name[CRYPTO_MAX_ALG_NAME]; char authenc_name[CRYPTO_MAX_ALG_NAME];
unsigned int keylen; unsigned int keylen;
int err; int err;
u32 mask = 0;
err = -EINVAL; err = -EINVAL;
if (!x->ealg) if (!x->ealg)
...@@ -891,10 +904,7 @@ static int esp_init_authenc(struct xfrm_state *x) ...@@ -891,10 +904,7 @@ static int esp_init_authenc(struct xfrm_state *x)
goto error; goto error;
} }
if (x->xso.offload_handle) aead = crypto_alloc_aead(authenc_name, 0, 0);
mask |= CRYPTO_ALG_ASYNC;
aead = crypto_alloc_aead(authenc_name, 0, mask);
err = PTR_ERR(aead); err = PTR_ERR(aead);
if (IS_ERR(aead)) if (IS_ERR(aead))
goto error; goto error;
......
...@@ -108,75 +108,36 @@ static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb) ...@@ -108,75 +108,36 @@ static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
static struct sk_buff *esp4_gso_segment(struct sk_buff *skb, static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
netdev_features_t features) netdev_features_t features)
{ {
__u32 seq;
int err = 0;
struct sk_buff *skb2;
struct xfrm_state *x; struct xfrm_state *x;
struct ip_esp_hdr *esph; struct ip_esp_hdr *esph;
struct crypto_aead *aead; struct crypto_aead *aead;
struct sk_buff *segs = ERR_PTR(-EINVAL);
netdev_features_t esp_features = features; netdev_features_t esp_features = features;
struct xfrm_offload *xo = xfrm_offload(skb); struct xfrm_offload *xo = xfrm_offload(skb);
if (!xo) if (!xo)
goto out; return ERR_PTR(-EINVAL);
seq = xo->seq.low;
x = skb->sp->xvec[skb->sp->len - 1]; x = skb->sp->xvec[skb->sp->len - 1];
aead = x->data; aead = x->data;
esph = ip_esp_hdr(skb); esph = ip_esp_hdr(skb);
if (esph->spi != x->id.spi) if (esph->spi != x->id.spi)
goto out; return ERR_PTR(-EINVAL);
if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead))) if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
goto out; return ERR_PTR(-EINVAL);
__skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)); __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
skb->encap_hdr_csum = 1; skb->encap_hdr_csum = 1;
if (!(features & NETIF_F_HW_ESP)) if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle ||
(x->xso.dev != skb->dev))
esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK); esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
segs = x->outer_mode->gso_segment(x, skb, esp_features);
if (IS_ERR_OR_NULL(segs))
goto out;
__skb_pull(skb, skb->data - skb_mac_header(skb));
skb2 = segs;
do {
struct sk_buff *nskb = skb2->next;
xo = xfrm_offload(skb2);
xo->flags |= XFRM_GSO_SEGMENT; xo->flags |= XFRM_GSO_SEGMENT;
xo->seq.low = seq;
xo->seq.hi = xfrm_replay_seqhi(x, seq);
if(!(features & NETIF_F_HW_ESP))
xo->flags |= CRYPTO_FALLBACK;
x->outer_mode->xmit(x, skb2);
err = x->type_offload->xmit(x, skb2, esp_features);
if (err) {
kfree_skb_list(segs);
return ERR_PTR(err);
}
if (!skb_is_gso(skb2))
seq++;
else
seq += skb_shinfo(skb2)->gso_segs;
skb_push(skb2, skb2->mac_len);
skb2 = nskb;
} while (skb2);
out: return x->outer_mode->gso_segment(x, skb, esp_features);
return segs;
} }
static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb) static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb)
...@@ -203,6 +164,7 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_ ...@@ -203,6 +164,7 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_
struct crypto_aead *aead; struct crypto_aead *aead;
struct esp_info esp; struct esp_info esp;
bool hw_offload = true; bool hw_offload = true;
__u32 seq;
esp.inplace = true; esp.inplace = true;
...@@ -241,23 +203,30 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_ ...@@ -241,23 +203,30 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_
return esp.nfrags; return esp.nfrags;
} }
seq = xo->seq.low;
esph = esp.esph; esph = esp.esph;
esph->spi = x->id.spi; esph->spi = x->id.spi;
skb_push(skb, -skb_network_offset(skb)); skb_push(skb, -skb_network_offset(skb));
if (xo->flags & XFRM_GSO_SEGMENT) { if (xo->flags & XFRM_GSO_SEGMENT) {
esph->seq_no = htonl(xo->seq.low); esph->seq_no = htonl(seq);
} else {
if (!skb_is_gso(skb))
xo->seq.low++;
else
xo->seq.low += skb_shinfo(skb)->gso_segs;
}
esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
ip_hdr(skb)->tot_len = htons(skb->len); ip_hdr(skb)->tot_len = htons(skb->len);
ip_send_check(ip_hdr(skb)); ip_send_check(ip_hdr(skb));
}
if (hw_offload) if (hw_offload)
return 0; return 0;
esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
err = esp_output_tail(x, skb, &esp); err = esp_output_tail(x, skb, &esp);
if (err) if (err)
return err; return err;
......
...@@ -105,18 +105,15 @@ static struct sk_buff *xfrm4_mode_tunnel_gso_segment(struct xfrm_state *x, ...@@ -105,18 +105,15 @@ static struct sk_buff *xfrm4_mode_tunnel_gso_segment(struct xfrm_state *x,
{ {
__skb_push(skb, skb->mac_len); __skb_push(skb, skb->mac_len);
return skb_mac_gso_segment(skb, features); return skb_mac_gso_segment(skb, features);
} }
static void xfrm4_mode_tunnel_xmit(struct xfrm_state *x, struct sk_buff *skb) static void xfrm4_mode_tunnel_xmit(struct xfrm_state *x, struct sk_buff *skb)
{ {
struct xfrm_offload *xo = xfrm_offload(skb); struct xfrm_offload *xo = xfrm_offload(skb);
if (xo->flags & XFRM_GSO_SEGMENT) { if (xo->flags & XFRM_GSO_SEGMENT)
skb->network_header = skb->network_header - x->props.header_len;
skb->transport_header = skb->network_header + skb->transport_header = skb->network_header +
sizeof(struct iphdr); sizeof(struct iphdr);
}
skb_reset_mac_len(skb); skb_reset_mac_len(skb);
pskb_pull(skb, skb->mac_len + x->props.header_len); pskb_pull(skb, skb->mac_len + x->props.header_len);
......
...@@ -141,14 +141,32 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp) ...@@ -141,14 +141,32 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
static void esp_output_done(struct crypto_async_request *base, int err) static void esp_output_done(struct crypto_async_request *base, int err)
{ {
struct sk_buff *skb = base->data; struct sk_buff *skb = base->data;
struct xfrm_offload *xo = xfrm_offload(skb);
void *tmp; void *tmp;
struct dst_entry *dst = skb_dst(skb); struct xfrm_state *x;
struct xfrm_state *x = dst->xfrm;
if (xo && (xo->flags & XFRM_DEV_RESUME))
x = skb->sp->xvec[skb->sp->len - 1];
else
x = skb_dst(skb)->xfrm;
tmp = ESP_SKB_CB(skb)->tmp; tmp = ESP_SKB_CB(skb)->tmp;
esp_ssg_unref(x, tmp); esp_ssg_unref(x, tmp);
kfree(tmp); kfree(tmp);
if (xo && (xo->flags & XFRM_DEV_RESUME)) {
if (err) {
XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
kfree_skb(skb);
return;
}
skb_push(skb, skb->data - skb_mac_header(skb));
secpath_reset(skb);
xfrm_dev_resume(skb);
} else {
xfrm_output_resume(skb, err); xfrm_output_resume(skb, err);
}
} }
/* Move ESP header back into place. */ /* Move ESP header back into place. */
...@@ -734,17 +752,13 @@ static int esp_init_aead(struct xfrm_state *x) ...@@ -734,17 +752,13 @@ static int esp_init_aead(struct xfrm_state *x)
char aead_name[CRYPTO_MAX_ALG_NAME]; char aead_name[CRYPTO_MAX_ALG_NAME];
struct crypto_aead *aead; struct crypto_aead *aead;
int err; int err;
u32 mask = 0;
err = -ENAMETOOLONG; err = -ENAMETOOLONG;
if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME) x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
goto error; goto error;
if (x->xso.offload_handle) aead = crypto_alloc_aead(aead_name, 0, 0);
mask |= CRYPTO_ALG_ASYNC;
aead = crypto_alloc_aead(aead_name, 0, mask);
err = PTR_ERR(aead); err = PTR_ERR(aead);
if (IS_ERR(aead)) if (IS_ERR(aead))
goto error; goto error;
...@@ -774,7 +788,6 @@ static int esp_init_authenc(struct xfrm_state *x) ...@@ -774,7 +788,6 @@ static int esp_init_authenc(struct xfrm_state *x)
char authenc_name[CRYPTO_MAX_ALG_NAME]; char authenc_name[CRYPTO_MAX_ALG_NAME];
unsigned int keylen; unsigned int keylen;
int err; int err;
u32 mask = 0;
err = -EINVAL; err = -EINVAL;
if (!x->ealg) if (!x->ealg)
...@@ -800,10 +813,7 @@ static int esp_init_authenc(struct xfrm_state *x) ...@@ -800,10 +813,7 @@ static int esp_init_authenc(struct xfrm_state *x)
goto error; goto error;
} }
if (x->xso.offload_handle) aead = crypto_alloc_aead(authenc_name, 0, 0);
mask |= CRYPTO_ALG_ASYNC;
aead = crypto_alloc_aead(authenc_name, 0, mask);
err = PTR_ERR(aead); err = PTR_ERR(aead);
if (IS_ERR(aead)) if (IS_ERR(aead))
goto error; goto error;
......
...@@ -135,75 +135,36 @@ static void esp6_gso_encap(struct xfrm_state *x, struct sk_buff *skb) ...@@ -135,75 +135,36 @@ static void esp6_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
static struct sk_buff *esp6_gso_segment(struct sk_buff *skb, static struct sk_buff *esp6_gso_segment(struct sk_buff *skb,
netdev_features_t features) netdev_features_t features)
{ {
__u32 seq;
int err = 0;
struct sk_buff *skb2;
struct xfrm_state *x; struct xfrm_state *x;
struct ip_esp_hdr *esph; struct ip_esp_hdr *esph;
struct crypto_aead *aead; struct crypto_aead *aead;
struct sk_buff *segs = ERR_PTR(-EINVAL);
netdev_features_t esp_features = features; netdev_features_t esp_features = features;
struct xfrm_offload *xo = xfrm_offload(skb); struct xfrm_offload *xo = xfrm_offload(skb);
if (!xo) if (!xo)
goto out; return ERR_PTR(-EINVAL);
seq = xo->seq.low;
x = skb->sp->xvec[skb->sp->len - 1]; x = skb->sp->xvec[skb->sp->len - 1];
aead = x->data; aead = x->data;
esph = ip_esp_hdr(skb); esph = ip_esp_hdr(skb);
if (esph->spi != x->id.spi) if (esph->spi != x->id.spi)
goto out; return ERR_PTR(-EINVAL);
if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead))) if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
goto out; return ERR_PTR(-EINVAL);
__skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)); __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
skb->encap_hdr_csum = 1; skb->encap_hdr_csum = 1;
if (!(features & NETIF_F_HW_ESP)) if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle ||
(x->xso.dev != skb->dev))
esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK); esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
segs = x->outer_mode->gso_segment(x, skb, esp_features);
if (IS_ERR_OR_NULL(segs))
goto out;
__skb_pull(skb, skb->data - skb_mac_header(skb));
skb2 = segs;
do {
struct sk_buff *nskb = skb2->next;
xo = xfrm_offload(skb2);
xo->flags |= XFRM_GSO_SEGMENT; xo->flags |= XFRM_GSO_SEGMENT;
xo->seq.low = seq;
xo->seq.hi = xfrm_replay_seqhi(x, seq);
if(!(features & NETIF_F_HW_ESP))
xo->flags |= CRYPTO_FALLBACK;
x->outer_mode->xmit(x, skb2); return x->outer_mode->gso_segment(x, skb, esp_features);
err = x->type_offload->xmit(x, skb2, esp_features);
if (err) {
kfree_skb_list(segs);
return ERR_PTR(err);
}
if (!skb_is_gso(skb2))
seq++;
else
seq += skb_shinfo(skb2)->gso_segs;
skb_push(skb2, skb2->mac_len);
skb2 = nskb;
} while (skb2);
out:
return segs;
} }
static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb) static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb)
...@@ -222,6 +183,7 @@ static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb) ...@@ -222,6 +183,7 @@ static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb)
static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features) static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features)
{ {
int len;
int err; int err;
int alen; int alen;
int blksize; int blksize;
...@@ -230,6 +192,7 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features ...@@ -230,6 +192,7 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features
struct crypto_aead *aead; struct crypto_aead *aead;
struct esp_info esp; struct esp_info esp;
bool hw_offload = true; bool hw_offload = true;
__u32 seq;
esp.inplace = true; esp.inplace = true;
...@@ -265,28 +228,33 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features ...@@ -265,28 +228,33 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features
return esp.nfrags; return esp.nfrags;
} }
seq = xo->seq.low;
esph = ip_esp_hdr(skb); esph = ip_esp_hdr(skb);
esph->spi = x->id.spi; esph->spi = x->id.spi;
skb_push(skb, -skb_network_offset(skb)); skb_push(skb, -skb_network_offset(skb));
if (xo->flags & XFRM_GSO_SEGMENT) { if (xo->flags & XFRM_GSO_SEGMENT) {
esph->seq_no = htonl(xo->seq.low); esph->seq_no = htonl(seq);
} else {
int len; if (!skb_is_gso(skb))
xo->seq.low++;
else
xo->seq.low += skb_shinfo(skb)->gso_segs;
}
esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
len = skb->len - sizeof(struct ipv6hdr); len = skb->len - sizeof(struct ipv6hdr);
if (len > IPV6_MAXPLEN) if (len > IPV6_MAXPLEN)
len = 0; len = 0;
ipv6_hdr(skb)->payload_len = htons(len); ipv6_hdr(skb)->payload_len = htons(len);
}
if (hw_offload) if (hw_offload)
return 0; return 0;
esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
err = esp6_output_tail(x, skb, &esp); err = esp6_output_tail(x, skb, &esp);
if (err) if (err)
return err; return err;
......
...@@ -105,17 +105,14 @@ static struct sk_buff *xfrm6_mode_tunnel_gso_segment(struct xfrm_state *x, ...@@ -105,17 +105,14 @@ static struct sk_buff *xfrm6_mode_tunnel_gso_segment(struct xfrm_state *x,
{ {
__skb_push(skb, skb->mac_len); __skb_push(skb, skb->mac_len);
return skb_mac_gso_segment(skb, features); return skb_mac_gso_segment(skb, features);
} }
static void xfrm6_mode_tunnel_xmit(struct xfrm_state *x, struct sk_buff *skb) static void xfrm6_mode_tunnel_xmit(struct xfrm_state *x, struct sk_buff *skb)
{ {
struct xfrm_offload *xo = xfrm_offload(skb); struct xfrm_offload *xo = xfrm_offload(skb);
if (xo->flags & XFRM_GSO_SEGMENT) { if (xo->flags & XFRM_GSO_SEGMENT)
skb->network_header = skb->network_header - x->props.header_len;
skb->transport_header = skb->network_header + sizeof(struct ipv6hdr); skb->transport_header = skb->network_header + sizeof(struct ipv6hdr);
}
skb_reset_mac_len(skb); skb_reset_mac_len(skb);
pskb_pull(skb, skb->mac_len + x->props.header_len); pskb_pull(skb, skb->mac_len + x->props.header_len);
......
...@@ -247,12 +247,13 @@ static int packet_direct_xmit(struct sk_buff *skb) ...@@ -247,12 +247,13 @@ static int packet_direct_xmit(struct sk_buff *skb)
struct sk_buff *orig_skb = skb; struct sk_buff *orig_skb = skb;
struct netdev_queue *txq; struct netdev_queue *txq;
int ret = NETDEV_TX_BUSY; int ret = NETDEV_TX_BUSY;
bool again = false;
if (unlikely(!netif_running(dev) || if (unlikely(!netif_running(dev) ||
!netif_carrier_ok(dev))) !netif_carrier_ok(dev)))
goto drop; goto drop;
skb = validate_xmit_skb_list(skb, dev); skb = validate_xmit_skb_list(skb, dev, &again);
if (skb != orig_skb) if (skb != orig_skb)
goto drop; goto drop;
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <net/pkt_sched.h> #include <net/pkt_sched.h>
#include <net/dst.h> #include <net/dst.h>
#include <trace/events/qdisc.h> #include <trace/events/qdisc.h>
#include <net/xfrm.h>
/* Qdisc to use by default */ /* Qdisc to use by default */
const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops; const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops;
...@@ -230,6 +231,8 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate, ...@@ -230,6 +231,8 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
/* skb in gso_skb were already validated */ /* skb in gso_skb were already validated */
*validate = false; *validate = false;
if (xfrm_offload(skb))
*validate = true;
/* check the reason of requeuing without tx lock first */ /* check the reason of requeuing without tx lock first */
txq = skb_get_tx_queue(txq->dev, skb); txq = skb_get_tx_queue(txq->dev, skb);
if (!netif_xmit_frozen_or_stopped(txq)) { if (!netif_xmit_frozen_or_stopped(txq)) {
...@@ -285,6 +288,7 @@ bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, ...@@ -285,6 +288,7 @@ bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
spinlock_t *root_lock, bool validate) spinlock_t *root_lock, bool validate)
{ {
int ret = NETDEV_TX_BUSY; int ret = NETDEV_TX_BUSY;
bool again = false;
/* And release qdisc */ /* And release qdisc */
if (root_lock) if (root_lock)
...@@ -292,7 +296,17 @@ bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, ...@@ -292,7 +296,17 @@ bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
/* Note that we validate skb (GSO, checksum, ...) outside of locks */ /* Note that we validate skb (GSO, checksum, ...) outside of locks */
if (validate) if (validate)
skb = validate_xmit_skb_list(skb, dev); skb = validate_xmit_skb_list(skb, dev, &again);
#ifdef CONFIG_XFRM_OFFLOAD
if (unlikely(again)) {
if (root_lock)
spin_lock(root_lock);
dev_requeue_skb(skb, q);
return false;
}
#endif
if (likely(skb)) { if (likely(skb)) {
HARD_TX_LOCK(dev, txq, smp_processor_id()); HARD_TX_LOCK(dev, txq, smp_processor_id());
......
...@@ -23,32 +23,114 @@ ...@@ -23,32 +23,114 @@
#include <linux/notifier.h> #include <linux/notifier.h>
#ifdef CONFIG_XFRM_OFFLOAD #ifdef CONFIG_XFRM_OFFLOAD
int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features) struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
{ {
int err; int err;
unsigned long flags;
struct xfrm_state *x; struct xfrm_state *x;
struct sk_buff *skb2;
struct softnet_data *sd;
netdev_features_t esp_features = features;
struct xfrm_offload *xo = xfrm_offload(skb); struct xfrm_offload *xo = xfrm_offload(skb);
if (skb_is_gso(skb)) if (!xo)
return 0; return skb;
if (!(features & NETIF_F_HW_ESP))
esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
if (xo) {
x = skb->sp->xvec[skb->sp->len - 1]; x = skb->sp->xvec[skb->sp->len - 1];
if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND) if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
return 0; return skb;
local_irq_save(flags);
sd = this_cpu_ptr(&softnet_data);
err = !skb_queue_empty(&sd->xfrm_backlog);
local_irq_restore(flags);
if (err) {
*again = true;
return skb;
}
if (skb_is_gso(skb)) {
struct net_device *dev = skb->dev;
if (unlikely(!x->xso.offload_handle || (x->xso.dev != dev))) {
struct sk_buff *segs;
/* Packet got rerouted, fixup features and segment it. */
esp_features = esp_features & ~(NETIF_F_HW_ESP
| NETIF_F_GSO_ESP);
segs = skb_gso_segment(skb, esp_features);
if (IS_ERR(segs)) {
kfree_skb(skb);
atomic_long_inc(&dev->tx_dropped);
return NULL;
} else {
consume_skb(skb);
skb = segs;
}
}
}
if (!skb->next) {
x->outer_mode->xmit(x, skb); x->outer_mode->xmit(x, skb);
err = x->type_offload->xmit(x, skb, features); xo->flags |= XFRM_DEV_RESUME;
err = x->type_offload->xmit(x, skb, esp_features);
if (err) { if (err) {
if (err == -EINPROGRESS)
return NULL;
XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR); XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
return err; kfree_skb(skb);
return NULL;
} }
skb_push(skb, skb->data - skb_mac_header(skb)); skb_push(skb, skb->data - skb_mac_header(skb));
return skb;
} }
return 0; skb2 = skb;
do {
struct sk_buff *nskb = skb2->next;
skb2->next = NULL;
xo = xfrm_offload(skb2);
xo->flags |= XFRM_DEV_RESUME;
x->outer_mode->xmit(x, skb2);
err = x->type_offload->xmit(x, skb2, esp_features);
if (!err) {
skb2->next = nskb;
} else if (err != -EINPROGRESS) {
XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
skb2->next = nskb;
kfree_skb_list(skb2);
return NULL;
} else {
if (skb == skb2)
skb = nskb;
if (!skb)
return NULL;
goto skip_push;
}
skb_push(skb2, skb2->data - skb_mac_header(skb2));
skip_push:
skb2 = nskb;
} while (skb2);
return skb;
} }
EXPORT_SYMBOL_GPL(validate_xmit_xfrm); EXPORT_SYMBOL_GPL(validate_xmit_xfrm);
...@@ -120,8 +202,8 @@ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x) ...@@ -120,8 +202,8 @@ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
if (!x->type_offload || x->encap) if (!x->type_offload || x->encap)
return false; return false;
if ((x->xso.offload_handle && (dev == xfrm_dst_path(dst)->dev)) && if ((!dev || (x->xso.offload_handle && (dev == xfrm_dst_path(dst)->dev))) &&
!xdst->child->xfrm && x->type->get_mtu) { (!xdst->child->xfrm && x->type->get_mtu)) {
mtu = x->type->get_mtu(x, xdst->child_mtu_cached); mtu = x->type->get_mtu(x, xdst->child_mtu_cached);
if (skb->len <= mtu) if (skb->len <= mtu)
...@@ -140,19 +222,82 @@ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x) ...@@ -140,19 +222,82 @@ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
return true; return true;
} }
EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok); EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok);
void xfrm_dev_resume(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
int ret = NETDEV_TX_BUSY;
struct netdev_queue *txq;
struct softnet_data *sd;
unsigned long flags;
rcu_read_lock();
txq = netdev_pick_tx(dev, skb, NULL);
HARD_TX_LOCK(dev, txq, smp_processor_id());
if (!netif_xmit_frozen_or_stopped(txq))
skb = dev_hard_start_xmit(skb, dev, txq, &ret);
HARD_TX_UNLOCK(dev, txq);
if (!dev_xmit_complete(ret)) {
local_irq_save(flags);
sd = this_cpu_ptr(&softnet_data);
skb_queue_tail(&sd->xfrm_backlog, skb);
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
}
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(xfrm_dev_resume);
void xfrm_dev_backlog(struct softnet_data *sd)
{
struct sk_buff_head *xfrm_backlog = &sd->xfrm_backlog;
struct sk_buff_head list;
struct sk_buff *skb;
if (skb_queue_empty(xfrm_backlog))
return;
__skb_queue_head_init(&list);
spin_lock(&xfrm_backlog->lock);
skb_queue_splice_init(xfrm_backlog, &list);
spin_unlock(&xfrm_backlog->lock);
while (!skb_queue_empty(&list)) {
skb = __skb_dequeue(&list);
xfrm_dev_resume(skb);
}
}
#endif #endif
static int xfrm_dev_register(struct net_device *dev) static int xfrm_api_check(struct net_device *dev)
{ {
if ((dev->features & NETIF_F_HW_ESP) && !dev->xfrmdev_ops) #ifdef CONFIG_XFRM_OFFLOAD
return NOTIFY_BAD;
if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) && if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
!(dev->features & NETIF_F_HW_ESP)) !(dev->features & NETIF_F_HW_ESP))
return NOTIFY_BAD; return NOTIFY_BAD;
if ((dev->features & NETIF_F_HW_ESP) &&
(!(dev->xfrmdev_ops &&
dev->xfrmdev_ops->xdo_dev_state_add &&
dev->xfrmdev_ops->xdo_dev_state_delete)))
return NOTIFY_BAD;
#else
if (dev->features & (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM))
return NOTIFY_BAD;
#endif
return NOTIFY_DONE; return NOTIFY_DONE;
} }
static int xfrm_dev_register(struct net_device *dev)
{
return xfrm_api_check(dev);
}
static int xfrm_dev_unregister(struct net_device *dev) static int xfrm_dev_unregister(struct net_device *dev)
{ {
xfrm_policy_cache_flush(); xfrm_policy_cache_flush();
...@@ -161,16 +306,7 @@ static int xfrm_dev_unregister(struct net_device *dev) ...@@ -161,16 +306,7 @@ static int xfrm_dev_unregister(struct net_device *dev)
static int xfrm_dev_feat_change(struct net_device *dev) static int xfrm_dev_feat_change(struct net_device *dev)
{ {
if ((dev->features & NETIF_F_HW_ESP) && !dev->xfrmdev_ops) return xfrm_api_check(dev);
return NOTIFY_BAD;
else if (!(dev->features & NETIF_F_HW_ESP))
dev->xfrmdev_ops = NULL;
if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
!(dev->features & NETIF_F_HW_ESP))
return NOTIFY_BAD;
return NOTIFY_DONE;
} }
static int xfrm_dev_down(struct net_device *dev) static int xfrm_dev_down(struct net_device *dev)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment