Commit b537f584 authored by David S. Miller's avatar David S. Miller

Merge branch '10GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
10GbE Intel Wired LAN Driver Updates 2018-08-28

This series contains updates to ixgbe and ixgbevf only.

Sebastian adds support for firmware NVM recovery mode, which logs a
message when errors are detected and un-registers the device.  Also
fixed RSS type recognition with VF to VF communication.

Shannon Nelson implements IPsec hardware offload for VF devices in
Intel's 10GbE x540 family of Ethernet devices.

The IPsec HW offload feature has been in the x540/Niantic family of
network devices since their release in 2009, but there was no Linux
kernel support for the offload until 2017.  After the XFRM code added
support for the offload last year, the HW offload was added to the ixgbe
PF driver.

Since the related x540 VF device uses same setup as the PF for implementing
the offload, adding the feature to the ixgbevf seemed like a good idea.
In this case, the PF owns the device registers, so the VF simply packages
up the request information into a VF<->PF message and the PF does the
device configuration.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 050cdc6c 5ed4e9e9
...@@ -605,6 +605,7 @@ struct ixgbe_adapter { ...@@ -605,6 +605,7 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_EEE_ENABLED BIT(15) #define IXGBE_FLAG2_EEE_ENABLED BIT(15)
#define IXGBE_FLAG2_RX_LEGACY BIT(16) #define IXGBE_FLAG2_RX_LEGACY BIT(16)
#define IXGBE_FLAG2_IPSEC_ENABLED BIT(17) #define IXGBE_FLAG2_IPSEC_ENABLED BIT(17)
#define IXGBE_FLAG2_VF_IPSEC_ENABLED BIT(18)
/* Tx fast path data */ /* Tx fast path data */
int num_tx_queues; int num_tx_queues;
...@@ -1003,15 +1004,24 @@ void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring, ...@@ -1003,15 +1004,24 @@ void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
struct sk_buff *skb); struct sk_buff *skb);
int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first,
struct ixgbe_ipsec_tx_data *itd); struct ixgbe_ipsec_tx_data *itd);
void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, u32 vf);
int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *mbuf, u32 vf);
int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter, u32 *mbuf, u32 vf);
#else #else
static inline void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter) { }; static inline void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter) { }
static inline void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter) { }; static inline void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter) { }
static inline void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) { }; static inline void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) { }
static inline void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring, static inline void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc, union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb) { }; struct sk_buff *skb) { }
static inline int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring, static inline int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
struct ixgbe_tx_buffer *first, struct ixgbe_tx_buffer *first,
struct ixgbe_ipsec_tx_data *itd) { return 0; }; struct ixgbe_ipsec_tx_data *itd) { return 0; }
static inline void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter,
u32 vf) { }
static inline int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter,
u32 *mbuf, u32 vf) { return -EACCES; }
static inline int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter,
u32 *mbuf, u32 vf) { return -EACCES; }
#endif /* CONFIG_XFRM_OFFLOAD */ #endif /* CONFIG_XFRM_OFFLOAD */
#endif /* _IXGBE_H_ */ #endif /* _IXGBE_H_ */
...@@ -3484,6 +3484,17 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) ...@@ -3484,6 +3484,17 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
} }
/**
* ixgbe_fw_recovery_mode - Check if in FW NVM recovery mode
* @hw: pointer to hardware structure
*/
bool ixgbe_fw_recovery_mode(struct ixgbe_hw *hw)
{
if (hw->mac.ops.fw_recovery_mode)
return hw->mac.ops.fw_recovery_mode(hw);
return false;
}
/** /**
* ixgbe_get_device_caps_generic - Get additional device capabilities * ixgbe_get_device_caps_generic - Get additional device capabilities
* @hw: pointer to hardware structure * @hw: pointer to hardware structure
......
...@@ -136,6 +136,8 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { ...@@ -136,6 +136,8 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = { static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = {
#define IXGBE_PRIV_FLAGS_LEGACY_RX BIT(0) #define IXGBE_PRIV_FLAGS_LEGACY_RX BIT(0)
"legacy-rx", "legacy-rx",
#define IXGBE_PRIV_FLAGS_VF_IPSEC_EN BIT(1)
"vf-ipsec",
}; };
#define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings) #define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings)
...@@ -3409,6 +3411,9 @@ static u32 ixgbe_get_priv_flags(struct net_device *netdev) ...@@ -3409,6 +3411,9 @@ static u32 ixgbe_get_priv_flags(struct net_device *netdev)
if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY) if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
priv_flags |= IXGBE_PRIV_FLAGS_LEGACY_RX; priv_flags |= IXGBE_PRIV_FLAGS_LEGACY_RX;
if (adapter->flags2 & IXGBE_FLAG2_VF_IPSEC_ENABLED)
priv_flags |= IXGBE_PRIV_FLAGS_VF_IPSEC_EN;
return priv_flags; return priv_flags;
} }
...@@ -3421,6 +3426,10 @@ static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags) ...@@ -3421,6 +3426,10 @@ static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
if (priv_flags & IXGBE_PRIV_FLAGS_LEGACY_RX) if (priv_flags & IXGBE_PRIV_FLAGS_LEGACY_RX)
flags2 |= IXGBE_FLAG2_RX_LEGACY; flags2 |= IXGBE_FLAG2_RX_LEGACY;
flags2 &= ~IXGBE_FLAG2_VF_IPSEC_ENABLED;
if (priv_flags & IXGBE_PRIV_FLAGS_VF_IPSEC_EN)
flags2 |= IXGBE_FLAG2_VF_IPSEC_ENABLED;
if (flags2 != adapter->flags2) { if (flags2 != adapter->flags2) {
adapter->flags2 = flags2; adapter->flags2 = flags2;
......
...@@ -5,6 +5,11 @@ ...@@ -5,6 +5,11 @@
#include <net/xfrm.h> #include <net/xfrm.h>
#include <crypto/aead.h> #include <crypto/aead.h>
#define IXGBE_IPSEC_KEY_BITS 160
static const char aes_gcm_name[] = "rfc4106(gcm(aes))";
static void ixgbe_ipsec_del_sa(struct xfrm_state *xs);
/** /**
* ixgbe_ipsec_set_tx_sa - set the Tx SA registers * ixgbe_ipsec_set_tx_sa - set the Tx SA registers
* @hw: hw specific details * @hw: hw specific details
...@@ -113,7 +118,6 @@ static void ixgbe_ipsec_set_rx_ip(struct ixgbe_hw *hw, u16 idx, __be32 addr[]) ...@@ -113,7 +118,6 @@ static void ixgbe_ipsec_set_rx_ip(struct ixgbe_hw *hw, u16 idx, __be32 addr[])
**/ **/
static void ixgbe_ipsec_clear_hw_tables(struct ixgbe_adapter *adapter) static void ixgbe_ipsec_clear_hw_tables(struct ixgbe_adapter *adapter)
{ {
struct ixgbe_ipsec *ipsec = adapter->ipsec;
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
u32 buf[4] = {0, 0, 0, 0}; u32 buf[4] = {0, 0, 0, 0};
u16 idx; u16 idx;
...@@ -132,9 +136,6 @@ static void ixgbe_ipsec_clear_hw_tables(struct ixgbe_adapter *adapter) ...@@ -132,9 +136,6 @@ static void ixgbe_ipsec_clear_hw_tables(struct ixgbe_adapter *adapter)
ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0); ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0);
ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0); ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0);
} }
ipsec->num_rx_sa = 0;
ipsec->num_tx_sa = 0;
} }
/** /**
...@@ -290,6 +291,13 @@ static void ixgbe_ipsec_start_engine(struct ixgbe_adapter *adapter) ...@@ -290,6 +291,13 @@ static void ixgbe_ipsec_start_engine(struct ixgbe_adapter *adapter)
/** /**
* ixgbe_ipsec_restore - restore the ipsec HW settings after a reset * ixgbe_ipsec_restore - restore the ipsec HW settings after a reset
* @adapter: board private structure * @adapter: board private structure
*
* Reload the HW tables from the SW tables after they've been bashed
* by a chip reset.
*
* Any VF entries are removed from the SW and HW tables since either
* (a) the VF also gets reset on PF reset and will ask again for the
* offloads, or (b) the VF has been removed by a change in the num_vfs.
**/ **/
void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter)
{ {
...@@ -305,6 +313,28 @@ void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) ...@@ -305,6 +313,28 @@ void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter)
ixgbe_ipsec_clear_hw_tables(adapter); ixgbe_ipsec_clear_hw_tables(adapter);
ixgbe_ipsec_start_engine(adapter); ixgbe_ipsec_start_engine(adapter);
/* reload the Rx and Tx keys */
for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
struct rx_sa *r = &ipsec->rx_tbl[i];
struct tx_sa *t = &ipsec->tx_tbl[i];
if (r->used) {
if (r->mode & IXGBE_RXTXMOD_VF)
ixgbe_ipsec_del_sa(r->xs);
else
ixgbe_ipsec_set_rx_sa(hw, i, r->xs->id.spi,
r->key, r->salt,
r->mode, r->iptbl_ind);
}
if (t->used) {
if (t->mode & IXGBE_RXTXMOD_VF)
ixgbe_ipsec_del_sa(t->xs);
else
ixgbe_ipsec_set_tx_sa(hw, i, t->key, t->salt);
}
}
/* reload the IP addrs */ /* reload the IP addrs */
for (i = 0; i < IXGBE_IPSEC_MAX_RX_IP_COUNT; i++) { for (i = 0; i < IXGBE_IPSEC_MAX_RX_IP_COUNT; i++) {
struct rx_ip_sa *ipsa = &ipsec->ip_tbl[i]; struct rx_ip_sa *ipsa = &ipsec->ip_tbl[i];
...@@ -312,20 +342,6 @@ void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) ...@@ -312,20 +342,6 @@ void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter)
if (ipsa->used) if (ipsa->used)
ixgbe_ipsec_set_rx_ip(hw, i, ipsa->ipaddr); ixgbe_ipsec_set_rx_ip(hw, i, ipsa->ipaddr);
} }
/* reload the Rx and Tx keys */
for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
struct rx_sa *rsa = &ipsec->rx_tbl[i];
struct tx_sa *tsa = &ipsec->tx_tbl[i];
if (rsa->used)
ixgbe_ipsec_set_rx_sa(hw, i, rsa->xs->id.spi,
rsa->key, rsa->salt,
rsa->mode, rsa->iptbl_ind);
if (tsa->used)
ixgbe_ipsec_set_tx_sa(hw, i, tsa->key, tsa->salt);
}
} }
/** /**
...@@ -382,6 +398,8 @@ static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec, ...@@ -382,6 +398,8 @@ static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec,
rcu_read_lock(); rcu_read_lock();
hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist, hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist,
(__force u32)spi) { (__force u32)spi) {
if (rsa->mode & IXGBE_RXTXMOD_VF)
continue;
if (spi == rsa->xs->id.spi && if (spi == rsa->xs->id.spi &&
((ip4 && *daddr == rsa->xs->id.daddr.a4) || ((ip4 && *daddr == rsa->xs->id.daddr.a4) ||
(!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6, (!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6,
...@@ -411,7 +429,6 @@ static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs, ...@@ -411,7 +429,6 @@ static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs,
struct net_device *dev = xs->xso.dev; struct net_device *dev = xs->xso.dev;
unsigned char *key_data; unsigned char *key_data;
char *alg_name = NULL; char *alg_name = NULL;
const char aes_gcm_name[] = "rfc4106(gcm(aes))";
int key_len; int key_len;
if (!xs->aead) { if (!xs->aead) {
...@@ -439,9 +456,9 @@ static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs, ...@@ -439,9 +456,9 @@ static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs,
* we don't need to do any byteswapping. * we don't need to do any byteswapping.
* 160 accounts for 16 byte key and 4 byte salt * 160 accounts for 16 byte key and 4 byte salt
*/ */
if (key_len == 160) { if (key_len == IXGBE_IPSEC_KEY_BITS) {
*mysalt = ((u32 *)key_data)[4]; *mysalt = ((u32 *)key_data)[4];
} else if (key_len != 128) { } else if (key_len != (IXGBE_IPSEC_KEY_BITS - (sizeof(*mysalt) * 8))) {
netdev_err(dev, "IPsec hw offload only supports keys up to 128 bits with a 32 bit salt\n"); netdev_err(dev, "IPsec hw offload only supports keys up to 128 bits with a 32 bit salt\n");
return -EINVAL; return -EINVAL;
} else { } else {
...@@ -676,6 +693,9 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs) ...@@ -676,6 +693,9 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
} else { } else {
struct tx_sa tsa; struct tx_sa tsa;
if (adapter->num_vfs)
return -EOPNOTSUPP;
/* find the first unused index */ /* find the first unused index */
ret = ixgbe_ipsec_find_empty_idx(ipsec, false); ret = ixgbe_ipsec_find_empty_idx(ipsec, false);
if (ret < 0) { if (ret < 0) {
...@@ -810,6 +830,226 @@ static const struct xfrmdev_ops ixgbe_xfrmdev_ops = { ...@@ -810,6 +830,226 @@ static const struct xfrmdev_ops ixgbe_xfrmdev_ops = {
.xdo_dev_offload_ok = ixgbe_ipsec_offload_ok, .xdo_dev_offload_ok = ixgbe_ipsec_offload_ok,
}; };
/**
* ixgbe_ipsec_vf_clear - clear the tables of data for a VF
* @adapter: board private structure
* @vf: VF id to be removed
**/
void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_ipsec *ipsec = adapter->ipsec;
int i;
/* search rx sa table */
for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT && ipsec->num_rx_sa; i++) {
if (!ipsec->rx_tbl[i].used)
continue;
if (ipsec->rx_tbl[i].mode & IXGBE_RXTXMOD_VF &&
ipsec->rx_tbl[i].vf == vf)
ixgbe_ipsec_del_sa(ipsec->rx_tbl[i].xs);
}
/* search tx sa table */
for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT && ipsec->num_tx_sa; i++) {
if (!ipsec->tx_tbl[i].used)
continue;
if (ipsec->tx_tbl[i].mode & IXGBE_RXTXMOD_VF &&
ipsec->tx_tbl[i].vf == vf)
ixgbe_ipsec_del_sa(ipsec->tx_tbl[i].xs);
}
}
/**
* ixgbe_ipsec_vf_add_sa - translate VF request to SA add
* @adapter: board private structure
* @msgbuf: The message buffer
* @vf: the VF index
*
* Make up a new xs and algorithm info from the data sent by the VF.
* We only need to sketch in just enough to set up the HW offload.
* Put the resulting offload_handle into the return message to the VF.
*
* Returns 0 or error value
**/
int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
{
struct ixgbe_ipsec *ipsec = adapter->ipsec;
struct xfrm_algo_desc *algo;
struct sa_mbx_msg *sam;
struct xfrm_state *xs;
size_t aead_len;
u16 sa_idx;
u32 pfsa;
int err;
sam = (struct sa_mbx_msg *)(&msgbuf[1]);
if (!adapter->vfinfo[vf].trusted ||
!(adapter->flags2 & IXGBE_FLAG2_VF_IPSEC_ENABLED)) {
e_warn(drv, "VF %d attempted to add an IPsec SA\n", vf);
err = -EACCES;
goto err_out;
}
/* Tx IPsec offload doesn't seem to work on this
* device, so block these requests for now.
*/
if (!(sam->flags & XFRM_OFFLOAD_INBOUND)) {
err = -EOPNOTSUPP;
goto err_out;
}
xs = kzalloc(sizeof(*xs), GFP_KERNEL);
if (unlikely(!xs)) {
err = -ENOMEM;
goto err_out;
}
xs->xso.flags = sam->flags;
xs->id.spi = sam->spi;
xs->id.proto = sam->proto;
xs->props.family = sam->family;
if (xs->props.family == AF_INET6)
memcpy(&xs->id.daddr.a6, sam->addr, sizeof(xs->id.daddr.a6));
else
memcpy(&xs->id.daddr.a4, sam->addr, sizeof(xs->id.daddr.a4));
xs->xso.dev = adapter->netdev;
algo = xfrm_aead_get_byname(aes_gcm_name, IXGBE_IPSEC_AUTH_BITS, 1);
if (unlikely(!algo)) {
err = -ENOENT;
goto err_xs;
}
aead_len = sizeof(*xs->aead) + IXGBE_IPSEC_KEY_BITS / 8;
xs->aead = kzalloc(aead_len, GFP_KERNEL);
if (unlikely(!xs->aead)) {
err = -ENOMEM;
goto err_xs;
}
xs->props.ealgo = algo->desc.sadb_alg_id;
xs->geniv = algo->uinfo.aead.geniv;
xs->aead->alg_icv_len = IXGBE_IPSEC_AUTH_BITS;
xs->aead->alg_key_len = IXGBE_IPSEC_KEY_BITS;
memcpy(xs->aead->alg_key, sam->key, sizeof(sam->key));
memcpy(xs->aead->alg_name, aes_gcm_name, sizeof(aes_gcm_name));
/* set up the HW offload */
err = ixgbe_ipsec_add_sa(xs);
if (err)
goto err_aead;
pfsa = xs->xso.offload_handle;
if (pfsa < IXGBE_IPSEC_BASE_TX_INDEX) {
sa_idx = pfsa - IXGBE_IPSEC_BASE_RX_INDEX;
ipsec->rx_tbl[sa_idx].vf = vf;
ipsec->rx_tbl[sa_idx].mode |= IXGBE_RXTXMOD_VF;
} else {
sa_idx = pfsa - IXGBE_IPSEC_BASE_TX_INDEX;
ipsec->tx_tbl[sa_idx].vf = vf;
ipsec->tx_tbl[sa_idx].mode |= IXGBE_RXTXMOD_VF;
}
msgbuf[1] = xs->xso.offload_handle;
return 0;
err_aead:
memset(xs->aead, 0, sizeof(*xs->aead));
kfree(xs->aead);
err_xs:
memset(xs, 0, sizeof(*xs));
kfree(xs);
err_out:
msgbuf[1] = err;
return err;
}
/**
* ixgbe_ipsec_vf_del_sa - translate VF request to SA delete
* @adapter: board private structure
* @msgbuf: The message buffer
* @vf: the VF index
*
* Given the offload_handle sent by the VF, look for the related SA table
* entry and use its xs field to call for a delete of the SA.
*
* Note: We silently ignore requests to delete entries that are already
* set to unused because when a VF is set to "DOWN", the PF first
* gets a reset and clears all the VF's entries; then the VF's
* XFRM stack sends individual deletes for each entry, which the
* reset already removed. In the future it might be good to try to
* optimize this so not so many unnecessary delete messages are sent.
*
* Returns 0 or error value
**/
int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
{
struct ixgbe_ipsec *ipsec = adapter->ipsec;
struct xfrm_state *xs;
u32 pfsa = msgbuf[1];
u16 sa_idx;
if (!adapter->vfinfo[vf].trusted) {
e_err(drv, "vf %d attempted to delete an SA\n", vf);
return -EPERM;
}
if (pfsa < IXGBE_IPSEC_BASE_TX_INDEX) {
struct rx_sa *rsa;
sa_idx = pfsa - IXGBE_IPSEC_BASE_RX_INDEX;
if (sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT) {
e_err(drv, "vf %d SA index %d out of range\n",
vf, sa_idx);
return -EINVAL;
}
rsa = &ipsec->rx_tbl[sa_idx];
if (!rsa->used)
return 0;
if (!(rsa->mode & IXGBE_RXTXMOD_VF) ||
rsa->vf != vf) {
e_err(drv, "vf %d bad Rx SA index %d\n", vf, sa_idx);
return -ENOENT;
}
xs = ipsec->rx_tbl[sa_idx].xs;
} else {
struct tx_sa *tsa;
sa_idx = pfsa - IXGBE_IPSEC_BASE_TX_INDEX;
if (sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT) {
e_err(drv, "vf %d SA index %d out of range\n",
vf, sa_idx);
return -EINVAL;
}
tsa = &ipsec->tx_tbl[sa_idx];
if (!tsa->used)
return 0;
if (!(tsa->mode & IXGBE_RXTXMOD_VF) ||
tsa->vf != vf) {
e_err(drv, "vf %d bad Tx SA index %d\n", vf, sa_idx);
return -ENOENT;
}
xs = ipsec->tx_tbl[sa_idx].xs;
}
ixgbe_ipsec_del_sa(xs);
/* remove the xs that was made-up in the add request */
memset(xs, 0, sizeof(*xs));
kfree(xs);
return 0;
}
/** /**
* ixgbe_ipsec_tx - setup Tx flags for ipsec offload * ixgbe_ipsec_tx - setup Tx flags for ipsec offload
* @tx_ring: outgoing context * @tx_ring: outgoing context
......
...@@ -26,6 +26,7 @@ enum ixgbe_ipsec_tbl_sel { ...@@ -26,6 +26,7 @@ enum ixgbe_ipsec_tbl_sel {
#define IXGBE_RXMOD_PROTO_ESP 0x00000004 #define IXGBE_RXMOD_PROTO_ESP 0x00000004
#define IXGBE_RXMOD_DECRYPT 0x00000008 #define IXGBE_RXMOD_DECRYPT 0x00000008
#define IXGBE_RXMOD_IPV6 0x00000010 #define IXGBE_RXMOD_IPV6 0x00000010
#define IXGBE_RXTXMOD_VF 0x00000020
struct rx_sa { struct rx_sa {
struct hlist_node hlist; struct hlist_node hlist;
...@@ -37,6 +38,7 @@ struct rx_sa { ...@@ -37,6 +38,7 @@ struct rx_sa {
u8 iptbl_ind; u8 iptbl_ind;
bool used; bool used;
bool decrypt; bool decrypt;
u32 vf;
}; };
struct rx_ip_sa { struct rx_ip_sa {
...@@ -49,8 +51,10 @@ struct tx_sa { ...@@ -49,8 +51,10 @@ struct tx_sa {
struct xfrm_state *xs; struct xfrm_state *xs;
u32 key[4]; u32 key[4];
u32 salt; u32 salt;
u32 mode;
bool encrypt; bool encrypt;
bool used; bool used;
u32 vf;
}; };
struct ixgbe_ipsec_tx_data { struct ixgbe_ipsec_tx_data {
...@@ -67,4 +71,13 @@ struct ixgbe_ipsec { ...@@ -67,4 +71,13 @@ struct ixgbe_ipsec {
struct tx_sa *tx_tbl; struct tx_sa *tx_tbl;
DECLARE_HASHTABLE(rx_sa_list, 10); DECLARE_HASHTABLE(rx_sa_list, 10);
}; };
struct sa_mbx_msg {
__be32 spi;
u8 flags;
u8 proto;
u16 family;
__be32 addr[4];
u32 key[5];
};
#endif /* _IXGBE_IPSEC_H_ */ #endif /* _IXGBE_IPSEC_H_ */
...@@ -7774,6 +7774,33 @@ static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter) ...@@ -7774,6 +7774,33 @@ static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
rtnl_unlock(); rtnl_unlock();
} }
/**
* ixgbe_check_fw_error - Check firmware for errors
* @adapter: the adapter private structure
*
* Check firmware errors in register FWSM
*/
static bool ixgbe_check_fw_error(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 fwsm;
/* read fwsm.ext_err_ind register and log errors */
fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
if (fwsm & IXGBE_FWSM_EXT_ERR_IND_MASK ||
!(fwsm & IXGBE_FWSM_FW_VAL_BIT))
e_dev_warn("Warning firmware error detected FWSM: 0x%08X\n",
fwsm);
if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) {
e_dev_err("Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
return true;
}
return false;
}
/** /**
* ixgbe_service_task - manages and runs subtasks * ixgbe_service_task - manages and runs subtasks
* @work: pointer to work_struct containing our data * @work: pointer to work_struct containing our data
...@@ -7792,6 +7819,15 @@ static void ixgbe_service_task(struct work_struct *work) ...@@ -7792,6 +7819,15 @@ static void ixgbe_service_task(struct work_struct *work)
ixgbe_service_event_complete(adapter); ixgbe_service_event_complete(adapter);
return; return;
} }
if (ixgbe_check_fw_error(adapter)) {
if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
rtnl_lock();
unregister_netdev(adapter->netdev);
rtnl_unlock();
}
ixgbe_service_event_complete(adapter);
return;
}
if (adapter->flags2 & IXGBE_FLAG2_UDP_TUN_REREG_NEEDED) { if (adapter->flags2 & IXGBE_FLAG2_UDP_TUN_REREG_NEEDED) {
rtnl_lock(); rtnl_lock();
adapter->flags2 &= ~IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; adapter->flags2 &= ~IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
...@@ -10716,6 +10752,11 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -10716,6 +10752,11 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
netdev->features |= NETIF_F_LRO; netdev->features |= NETIF_F_LRO;
if (ixgbe_check_fw_error(adapter)) {
err = -EIO;
goto err_sw_init;
}
/* make sure the EEPROM is good */ /* make sure the EEPROM is good */
if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) { if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
e_dev_err("The EEPROM Checksum Is Not Valid\n"); e_dev_err("The EEPROM Checksum Is Not Valid\n");
......
...@@ -50,6 +50,7 @@ enum ixgbe_pfvf_api_rev { ...@@ -50,6 +50,7 @@ enum ixgbe_pfvf_api_rev {
ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */ ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */
ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */ ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */
ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */ ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */
ixgbe_mbox_api_14, /* API version 1.4, linux/freebsd VF driver */
/* This value should always be last */ /* This value should always be last */
ixgbe_mbox_api_unknown, /* indicates that API version is not known */ ixgbe_mbox_api_unknown, /* indicates that API version is not known */
}; };
...@@ -80,6 +81,10 @@ enum ixgbe_pfvf_api_rev { ...@@ -80,6 +81,10 @@ enum ixgbe_pfvf_api_rev {
#define IXGBE_VF_UPDATE_XCAST_MODE 0x0c #define IXGBE_VF_UPDATE_XCAST_MODE 0x0c
/* mailbox API, version 1.4 VF requests */
#define IXGBE_VF_IPSEC_ADD 0x0d
#define IXGBE_VF_IPSEC_DEL 0x0e
/* length of permanent address message returned from PF */ /* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4 #define IXGBE_VF_PERMADDR_MSG_LEN 4
/* word in permanent address message with the current multicast type */ /* word in permanent address message with the current multicast type */
......
...@@ -496,6 +496,7 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) ...@@ -496,6 +496,7 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
case ixgbe_mbox_api_11: case ixgbe_mbox_api_11:
case ixgbe_mbox_api_12: case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13: case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
/* Version 1.1 supports jumbo frames on VFs if PF has /* Version 1.1 supports jumbo frames on VFs if PF has
* jumbo frames enabled which means legacy VFs are * jumbo frames enabled which means legacy VFs are
* disabled * disabled
...@@ -728,6 +729,9 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) ...@@ -728,6 +729,9 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
/* reset multicast table array for vf */ /* reset multicast table array for vf */
adapter->vfinfo[vf].num_vf_mc_hashes = 0; adapter->vfinfo[vf].num_vf_mc_hashes = 0;
/* clear any ipsec table info */
ixgbe_ipsec_vf_clear(adapter, vf);
/* Flush and reset the mta with the new values */ /* Flush and reset the mta with the new values */
ixgbe_set_rx_mode(adapter->netdev); ixgbe_set_rx_mode(adapter->netdev);
...@@ -1000,6 +1004,7 @@ static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter, ...@@ -1000,6 +1004,7 @@ static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter,
case ixgbe_mbox_api_11: case ixgbe_mbox_api_11:
case ixgbe_mbox_api_12: case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13: case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
adapter->vfinfo[vf].vf_api = api; adapter->vfinfo[vf].vf_api = api;
return 0; return 0;
default: default:
...@@ -1025,6 +1030,7 @@ static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter, ...@@ -1025,6 +1030,7 @@ static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter,
case ixgbe_mbox_api_11: case ixgbe_mbox_api_11:
case ixgbe_mbox_api_12: case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13: case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
break; break;
default: default:
return -1; return -1;
...@@ -1065,6 +1071,7 @@ static int ixgbe_get_vf_reta(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) ...@@ -1065,6 +1071,7 @@ static int ixgbe_get_vf_reta(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
/* verify the PF is supporting the correct API */ /* verify the PF is supporting the correct API */
switch (adapter->vfinfo[vf].vf_api) { switch (adapter->vfinfo[vf].vf_api) {
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_13: case ixgbe_mbox_api_13:
case ixgbe_mbox_api_12: case ixgbe_mbox_api_12:
break; break;
...@@ -1097,6 +1104,7 @@ static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter, ...@@ -1097,6 +1104,7 @@ static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter,
/* verify the PF is supporting the correct API */ /* verify the PF is supporting the correct API */
switch (adapter->vfinfo[vf].vf_api) { switch (adapter->vfinfo[vf].vf_api) {
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_13: case ixgbe_mbox_api_13:
case ixgbe_mbox_api_12: case ixgbe_mbox_api_12:
break; break;
...@@ -1122,8 +1130,9 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter, ...@@ -1122,8 +1130,9 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter,
/* promisc introduced in 1.3 version */ /* promisc introduced in 1.3 version */
if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC) if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
return -EOPNOTSUPP; return -EOPNOTSUPP;
/* Fall threw */ /* Fall through */
case ixgbe_mbox_api_13: case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
break; break;
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -1249,6 +1258,12 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) ...@@ -1249,6 +1258,12 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
case IXGBE_VF_UPDATE_XCAST_MODE: case IXGBE_VF_UPDATE_XCAST_MODE:
retval = ixgbe_update_vf_xcast_mode(adapter, msgbuf, vf); retval = ixgbe_update_vf_xcast_mode(adapter, msgbuf, vf);
break; break;
case IXGBE_VF_IPSEC_ADD:
retval = ixgbe_ipsec_vf_add_sa(adapter, msgbuf, vf);
break;
case IXGBE_VF_IPSEC_DEL:
retval = ixgbe_ipsec_vf_del_sa(adapter, msgbuf, vf);
break;
default: default:
e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
retval = IXGBE_ERR_MBX; retval = IXGBE_ERR_MBX;
......
...@@ -924,6 +924,9 @@ struct ixgbe_nvm_version { ...@@ -924,6 +924,9 @@ struct ixgbe_nvm_version {
/* Firmware Semaphore Register */ /* Firmware Semaphore Register */
#define IXGBE_FWSM_MODE_MASK 0xE #define IXGBE_FWSM_MODE_MASK 0xE
#define IXGBE_FWSM_FW_MODE_PT 0x4 #define IXGBE_FWSM_FW_MODE_PT 0x4
#define IXGBE_FWSM_FW_NVM_RECOVERY_MODE BIT(5)
#define IXGBE_FWSM_EXT_ERR_IND_MASK 0x01F80000
#define IXGBE_FWSM_FW_VAL_BIT BIT(15)
/* ARC Subsystem registers */ /* ARC Subsystem registers */
#define IXGBE_HICR 0x15F00 #define IXGBE_HICR 0x15F00
...@@ -3461,6 +3464,7 @@ struct ixgbe_mac_operations { ...@@ -3461,6 +3464,7 @@ struct ixgbe_mac_operations {
const char *); const char *);
s32 (*get_thermal_sensor_data)(struct ixgbe_hw *); s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw); s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
bool (*fw_recovery_mode)(struct ixgbe_hw *hw);
void (*disable_rx)(struct ixgbe_hw *hw); void (*disable_rx)(struct ixgbe_hw *hw);
void (*enable_rx)(struct ixgbe_hw *hw); void (*enable_rx)(struct ixgbe_hw *hw);
void (*set_source_address_pruning)(struct ixgbe_hw *, bool, void (*set_source_address_pruning)(struct ixgbe_hw *, bool,
......
...@@ -1247,6 +1247,20 @@ static s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw) ...@@ -1247,6 +1247,20 @@ static s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
return 0; return 0;
} }
/**
* ixgbe_fw_recovery_mode - Check FW NVM recovery mode
* @hw: pointer t hardware structure
*
* Returns true if in FW NVM recovery mode.
*/
static bool ixgbe_fw_recovery_mode_X550(struct ixgbe_hw *hw)
{
u32 fwsm;
fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
return !!(fwsm & IXGBE_FWSM_FW_NVM_RECOVERY_MODE);
}
/** ixgbe_disable_rx_x550 - Disable RX unit /** ixgbe_disable_rx_x550 - Disable RX unit
* *
* Enables the Rx DMA unit for x550 * Enables the Rx DMA unit for x550
...@@ -3816,6 +3830,7 @@ static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, ...@@ -3816,6 +3830,7 @@ static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
.enable_rx_buff = &ixgbe_enable_rx_buff_generic, \ .enable_rx_buff = &ixgbe_enable_rx_buff_generic, \
.get_thermal_sensor_data = NULL, \ .get_thermal_sensor_data = NULL, \
.init_thermal_sensor_thresh = NULL, \ .init_thermal_sensor_thresh = NULL, \
.fw_recovery_mode = &ixgbe_fw_recovery_mode_X550, \
.enable_rx = &ixgbe_enable_rx_generic, \ .enable_rx = &ixgbe_enable_rx_generic, \
.disable_rx = &ixgbe_disable_rx_x550, \ .disable_rx = &ixgbe_disable_rx_x550, \
......
...@@ -10,4 +10,5 @@ ixgbevf-objs := vf.o \ ...@@ -10,4 +10,5 @@ ixgbevf-objs := vf.o \
mbx.o \ mbx.o \
ethtool.o \ ethtool.o \
ixgbevf_main.o ixgbevf_main.o
ixgbevf-$(CONFIG_XFRM_OFFLOAD) += ipsec.o
...@@ -133,9 +133,14 @@ typedef u32 ixgbe_link_speed; ...@@ -133,9 +133,14 @@ typedef u32 ixgbe_link_speed;
#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */ #define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */
#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */ #define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */
#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */ #define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */
#define IXGBE_RXDADV_STAT_SECP 0x00020000 /* IPsec/MACsec pkt found */
#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F #define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F
#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0 #define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0
#define IXGBE_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPv4 hdr present */
#define IXGBE_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPv6 hdr present */
#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */
#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */
#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0 #define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0
#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0 #define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0
#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000 #define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000
...@@ -229,7 +234,7 @@ union ixgbe_adv_rx_desc { ...@@ -229,7 +234,7 @@ union ixgbe_adv_rx_desc {
/* Context descriptors */ /* Context descriptors */
struct ixgbe_adv_tx_context_desc { struct ixgbe_adv_tx_context_desc {
__le32 vlan_macip_lens; __le32 vlan_macip_lens;
__le32 seqnum_seed; __le32 fceof_saidx;
__le32 type_tucmd_mlhl; __le32 type_tucmd_mlhl;
__le32 mss_l4len_idx; __le32 mss_l4len_idx;
}; };
...@@ -250,9 +255,12 @@ struct ixgbe_adv_tx_context_desc { ...@@ -250,9 +255,12 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ #define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ #define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ #define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */
#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000 /* ESP Encrypt Enable */
#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ #define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */ #define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */
#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ #define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */
#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */
#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ #define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
IXGBE_ADVTXD_POPTS_SHIFT) IXGBE_ADVTXD_POPTS_SHIFT)
#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ #define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
......
...@@ -55,6 +55,8 @@ static struct ixgbe_stats ixgbevf_gstrings_stats[] = { ...@@ -55,6 +55,8 @@ static struct ixgbe_stats ixgbevf_gstrings_stats[] = {
IXGBEVF_STAT("alloc_rx_page", alloc_rx_page), IXGBEVF_STAT("alloc_rx_page", alloc_rx_page),
IXGBEVF_STAT("alloc_rx_page_failed", alloc_rx_page_failed), IXGBEVF_STAT("alloc_rx_page_failed", alloc_rx_page_failed),
IXGBEVF_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), IXGBEVF_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
IXGBEVF_STAT("tx_ipsec", tx_ipsec),
IXGBEVF_STAT("rx_ipsec", rx_ipsec),
}; };
#define IXGBEVF_QUEUE_STATS_LEN ( \ #define IXGBEVF_QUEUE_STATS_LEN ( \
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2018 Oracle and/or its affiliates. All rights reserved. */
#include "ixgbevf.h"
#include <net/xfrm.h>
#include <crypto/aead.h>
#define IXGBE_IPSEC_KEY_BITS 160
static const char aes_gcm_name[] = "rfc4106(gcm(aes))";
/**
* ixgbevf_ipsec_set_pf_sa - ask the PF to set up an SA
* @adapter: board private structure
* @xs: xfrm info to be sent to the PF
*
* Returns: positive offload handle from the PF, or negative error code
**/
static int ixgbevf_ipsec_set_pf_sa(struct ixgbevf_adapter *adapter,
struct xfrm_state *xs)
{
u32 msgbuf[IXGBE_VFMAILBOX_SIZE] = { 0 };
struct ixgbe_hw *hw = &adapter->hw;
struct sa_mbx_msg *sam;
u16 msglen;
int ret;
/* send the important bits to the PF */
sam = (struct sa_mbx_msg *)(&msgbuf[1]);
sam->flags = xs->xso.flags;
sam->spi = xs->id.spi;
sam->proto = xs->id.proto;
sam->family = xs->props.family;
if (xs->props.family == AF_INET6)
memcpy(sam->addr, &xs->id.daddr.a6, sizeof(xs->id.daddr.a6));
else
memcpy(sam->addr, &xs->id.daddr.a4, sizeof(xs->id.daddr.a4));
memcpy(sam->key, xs->aead->alg_key, sizeof(sam->key));
msgbuf[0] = IXGBE_VF_IPSEC_ADD;
msglen = sizeof(*sam) + sizeof(msgbuf[0]);
spin_lock_bh(&adapter->mbx_lock);
ret = hw->mbx.ops.write_posted(hw, msgbuf, msglen);
if (ret)
goto out;
msglen = sizeof(msgbuf[0]) * 2;
ret = hw->mbx.ops.read_posted(hw, msgbuf, msglen);
if (ret)
goto out;
ret = (int)msgbuf[1];
if (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK && ret >= 0)
ret = -1;
out:
spin_unlock_bh(&adapter->mbx_lock);
return ret;
}
/**
* ixgbevf_ipsec_del_pf_sa - ask the PF to delete an SA
* @adapter: board private structure
* @pfsa: sa index returned from PF when created, -1 for all
*
* Returns: 0 on success, or negative error code
**/
static int ixgbevf_ipsec_del_pf_sa(struct ixgbevf_adapter *adapter, int pfsa)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 msgbuf[2];
int err;
memset(msgbuf, 0, sizeof(msgbuf));
msgbuf[0] = IXGBE_VF_IPSEC_DEL;
msgbuf[1] = (u32)pfsa;
spin_lock_bh(&adapter->mbx_lock);
err = hw->mbx.ops.write_posted(hw, msgbuf, sizeof(msgbuf));
if (err)
goto out;
err = hw->mbx.ops.read_posted(hw, msgbuf, sizeof(msgbuf));
if (err)
goto out;
out:
spin_unlock_bh(&adapter->mbx_lock);
return err;
}
/**
* ixgbevf_ipsec_restore - restore the IPsec HW settings after a reset
* @adapter: board private structure
*
* Reload the HW tables from the SW tables after they've been bashed
* by a chip reset. While we're here, make sure any stale VF data is
* removed, since we go through reset when num_vfs changes.
**/
void ixgbevf_ipsec_restore(struct ixgbevf_adapter *adapter)
{
struct ixgbevf_ipsec *ipsec = adapter->ipsec;
struct net_device *netdev = adapter->netdev;
int i;
if (!(adapter->netdev->features & NETIF_F_HW_ESP))
return;
/* reload the Rx and Tx keys */
for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
struct rx_sa *r = &ipsec->rx_tbl[i];
struct tx_sa *t = &ipsec->tx_tbl[i];
int ret;
if (r->used) {
ret = ixgbevf_ipsec_set_pf_sa(adapter, r->xs);
if (ret < 0)
netdev_err(netdev, "reload rx_tbl[%d] failed = %d\n",
i, ret);
}
if (t->used) {
ret = ixgbevf_ipsec_set_pf_sa(adapter, t->xs);
if (ret < 0)
netdev_err(netdev, "reload tx_tbl[%d] failed = %d\n",
i, ret);
}
}
}
/**
* ixgbevf_ipsec_find_empty_idx - find the first unused security parameter index
* @ipsec: pointer to IPsec struct
* @rxtable: true if we need to look in the Rx table
*
* Returns the first unused index in either the Rx or Tx SA table
**/
static
int ixgbevf_ipsec_find_empty_idx(struct ixgbevf_ipsec *ipsec, bool rxtable)
{
u32 i;
if (rxtable) {
if (ipsec->num_rx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
return -ENOSPC;
/* search rx sa table */
for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
if (!ipsec->rx_tbl[i].used)
return i;
}
} else {
if (ipsec->num_tx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
return -ENOSPC;
/* search tx sa table */
for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
if (!ipsec->tx_tbl[i].used)
return i;
}
}
return -ENOSPC;
}
/**
* ixgbevf_ipsec_find_rx_state - find the state that matches
* @ipsec: pointer to IPsec struct
* @daddr: inbound address to match
* @proto: protocol to match
* @spi: SPI to match
* @ip4: true if using an IPv4 address
*
* Returns a pointer to the matching SA state information
**/
static
struct xfrm_state *ixgbevf_ipsec_find_rx_state(struct ixgbevf_ipsec *ipsec,
__be32 *daddr, u8 proto,
__be32 spi, bool ip4)
{
struct xfrm_state *ret = NULL;
struct rx_sa *rsa;
rcu_read_lock();
hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist,
(__force u32)spi) {
if (spi == rsa->xs->id.spi &&
((ip4 && *daddr == rsa->xs->id.daddr.a4) ||
(!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6,
sizeof(rsa->xs->id.daddr.a6)))) &&
proto == rsa->xs->id.proto) {
ret = rsa->xs;
xfrm_state_hold(ret);
break;
}
}
rcu_read_unlock();
return ret;
}
/**
* ixgbevf_ipsec_parse_proto_keys - find the key and salt based on the protocol
* @xs: pointer to xfrm_state struct
* @mykey: pointer to key array to populate
* @mysalt: pointer to salt value to populate
*
* This copies the protocol keys and salt to our own data tables. The
* 82599 family only supports the one algorithm.
**/
static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs,
u32 *mykey, u32 *mysalt)
{
struct net_device *dev = xs->xso.dev;
unsigned char *key_data;
char *alg_name = NULL;
int key_len;
if (!xs->aead) {
netdev_err(dev, "Unsupported IPsec algorithm\n");
return -EINVAL;
}
if (xs->aead->alg_icv_len != IXGBE_IPSEC_AUTH_BITS) {
netdev_err(dev, "IPsec offload requires %d bit authentication\n",
IXGBE_IPSEC_AUTH_BITS);
return -EINVAL;
}
key_data = &xs->aead->alg_key[0];
key_len = xs->aead->alg_key_len;
alg_name = xs->aead->alg_name;
if (strcmp(alg_name, aes_gcm_name)) {
netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n",
aes_gcm_name);
return -EINVAL;
}
/* The key bytes come down in a big endian array of bytes, so
* we don't need to do any byte swapping.
* 160 accounts for 16 byte key and 4 byte salt
*/
if (key_len > IXGBE_IPSEC_KEY_BITS) {
*mysalt = ((u32 *)key_data)[4];
} else if (key_len == IXGBE_IPSEC_KEY_BITS) {
*mysalt = 0;
} else {
netdev_err(dev, "IPsec hw offload only supports keys up to 128 bits with a 32 bit salt\n");
return -EINVAL;
}
memcpy(mykey, key_data, 16);
return 0;
}
/**
* ixgbevf_ipsec_add_sa - program device with a security association
* @xs: pointer to transformer state struct
**/
static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)
{
struct net_device *dev = xs->xso.dev;
struct ixgbevf_adapter *adapter = netdev_priv(dev);
struct ixgbevf_ipsec *ipsec = adapter->ipsec;
u16 sa_idx;
int ret;
if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
netdev_err(dev, "Unsupported protocol 0x%04x for IPsec offload\n",
xs->id.proto);
return -EINVAL;
}
if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
struct rx_sa rsa;
if (xs->calg) {
netdev_err(dev, "Compression offload not supported\n");
return -EINVAL;
}
/* find the first unused index */
ret = ixgbevf_ipsec_find_empty_idx(ipsec, true);
if (ret < 0) {
netdev_err(dev, "No space for SA in Rx table!\n");
return ret;
}
sa_idx = (u16)ret;
memset(&rsa, 0, sizeof(rsa));
rsa.used = true;
rsa.xs = xs;
if (rsa.xs->id.proto & IPPROTO_ESP)
rsa.decrypt = xs->ealg || xs->aead;
/* get the key and salt */
ret = ixgbevf_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt);
if (ret) {
netdev_err(dev, "Failed to get key data for Rx SA table\n");
return ret;
}
/* get ip for rx sa table */
if (xs->props.family == AF_INET6)
memcpy(rsa.ipaddr, &xs->id.daddr.a6, 16);
else
memcpy(&rsa.ipaddr[3], &xs->id.daddr.a4, 4);
rsa.mode = IXGBE_RXMOD_VALID;
if (rsa.xs->id.proto & IPPROTO_ESP)
rsa.mode |= IXGBE_RXMOD_PROTO_ESP;
if (rsa.decrypt)
rsa.mode |= IXGBE_RXMOD_DECRYPT;
if (rsa.xs->props.family == AF_INET6)
rsa.mode |= IXGBE_RXMOD_IPV6;
ret = ixgbevf_ipsec_set_pf_sa(adapter, xs);
if (ret < 0)
return ret;
rsa.pfsa = ret;
/* the preparations worked, so save the info */
memcpy(&ipsec->rx_tbl[sa_idx], &rsa, sizeof(rsa));
xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_RX_INDEX;
ipsec->num_rx_sa++;
/* hash the new entry for faster search in Rx path */
hash_add_rcu(ipsec->rx_sa_list, &ipsec->rx_tbl[sa_idx].hlist,
(__force u32)rsa.xs->id.spi);
} else {
struct tx_sa tsa;
/* find the first unused index */
ret = ixgbevf_ipsec_find_empty_idx(ipsec, false);
if (ret < 0) {
netdev_err(dev, "No space for SA in Tx table\n");
return ret;
}
sa_idx = (u16)ret;
memset(&tsa, 0, sizeof(tsa));
tsa.used = true;
tsa.xs = xs;
if (xs->id.proto & IPPROTO_ESP)
tsa.encrypt = xs->ealg || xs->aead;
ret = ixgbevf_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt);
if (ret) {
netdev_err(dev, "Failed to get key data for Tx SA table\n");
memset(&tsa, 0, sizeof(tsa));
return ret;
}
ret = ixgbevf_ipsec_set_pf_sa(adapter, xs);
if (ret < 0)
return ret;
tsa.pfsa = ret;
/* the preparations worked, so save the info */
memcpy(&ipsec->tx_tbl[sa_idx], &tsa, sizeof(tsa));
xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_TX_INDEX;
ipsec->num_tx_sa++;
}
return 0;
}
/**
* ixgbevf_ipsec_del_sa - clear out this specific SA
* @xs: pointer to transformer state struct
**/
static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs)
{
struct net_device *dev = xs->xso.dev;
struct ixgbevf_adapter *adapter = netdev_priv(dev);
struct ixgbevf_ipsec *ipsec = adapter->ipsec;
u16 sa_idx;
if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX;
if (!ipsec->rx_tbl[sa_idx].used) {
netdev_err(dev, "Invalid Rx SA selected sa_idx=%d offload_handle=%lu\n",
sa_idx, xs->xso.offload_handle);
return;
}
ixgbevf_ipsec_del_pf_sa(adapter, ipsec->rx_tbl[sa_idx].pfsa);
hash_del_rcu(&ipsec->rx_tbl[sa_idx].hlist);
memset(&ipsec->rx_tbl[sa_idx], 0, sizeof(struct rx_sa));
ipsec->num_rx_sa--;
} else {
sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
if (!ipsec->tx_tbl[sa_idx].used) {
netdev_err(dev, "Invalid Tx SA selected sa_idx=%d offload_handle=%lu\n",
sa_idx, xs->xso.offload_handle);
return;
}
ixgbevf_ipsec_del_pf_sa(adapter, ipsec->tx_tbl[sa_idx].pfsa);
memset(&ipsec->tx_tbl[sa_idx], 0, sizeof(struct tx_sa));
ipsec->num_tx_sa--;
}
}
/**
* ixgbevf_ipsec_offload_ok - can this packet use the xfrm hw offload
* @skb: current data packet
* @xs: pointer to transformer state struct
**/
static bool ixgbevf_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
{
if (xs->props.family == AF_INET) {
/* Offload with IPv4 options is not supported yet */
if (ip_hdr(skb)->ihl != 5)
return false;
} else {
/* Offload with IPv6 extension headers is not support yet */
if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
return false;
}
return true;
}
static const struct xfrmdev_ops ixgbevf_xfrmdev_ops = {
.xdo_dev_state_add = ixgbevf_ipsec_add_sa,
.xdo_dev_state_delete = ixgbevf_ipsec_del_sa,
.xdo_dev_offload_ok = ixgbevf_ipsec_offload_ok,
};
/**
* ixgbevf_ipsec_tx - setup Tx flags for IPsec offload
* @tx_ring: outgoing context
* @first: current data packet
* @itd: ipsec Tx data for later use in building context descriptor
**/
int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring,
struct ixgbevf_tx_buffer *first,
struct ixgbevf_ipsec_tx_data *itd)
{
struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
struct ixgbevf_ipsec *ipsec = adapter->ipsec;
struct xfrm_state *xs;
struct tx_sa *tsa;
u16 sa_idx;
if (unlikely(!first->skb->sp->len)) {
netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n",
__func__, first->skb->sp->len);
return 0;
}
xs = xfrm_input_state(first->skb);
if (unlikely(!xs)) {
netdev_err(tx_ring->netdev, "%s: no xfrm_input_state() xs = %p\n",
__func__, xs);
return 0;
}
sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
if (unlikely(sa_idx > IXGBE_IPSEC_MAX_SA_COUNT)) {
netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n",
__func__, sa_idx, xs->xso.offload_handle);
return 0;
}
tsa = &ipsec->tx_tbl[sa_idx];
if (unlikely(!tsa->used)) {
netdev_err(tx_ring->netdev, "%s: unused sa_idx=%d\n",
__func__, sa_idx);
return 0;
}
itd->pfsa = tsa->pfsa - IXGBE_IPSEC_BASE_TX_INDEX;
first->tx_flags |= IXGBE_TX_FLAGS_IPSEC | IXGBE_TX_FLAGS_CSUM;
if (xs->id.proto == IPPROTO_ESP) {
itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
IXGBE_ADVTXD_TUCMD_L4T_TCP;
if (first->protocol == htons(ETH_P_IP))
itd->flags |= IXGBE_ADVTXD_TUCMD_IPV4;
/* The actual trailer length is authlen (16 bytes) plus
* 2 bytes for the proto and the padlen values, plus
* padlen bytes of padding. This ends up not the same
* as the static value found in xs->props.trailer_len (21).
*
* ... but if we're doing GSO, don't bother as the stack
* doesn't add a trailer for those.
*/
if (!skb_is_gso(first->skb)) {
/* The "correct" way to get the auth length would be
* to use
* authlen = crypto_aead_authsize(xs->data);
* but since we know we only have one size to worry
* about * we can let the compiler use the constant
* and save us a few CPU cycles.
*/
const int authlen = IXGBE_IPSEC_AUTH_BITS / 8;
struct sk_buff *skb = first->skb;
u8 padlen;
int ret;
ret = skb_copy_bits(skb, skb->len - (authlen + 2),
&padlen, 1);
if (unlikely(ret))
return 0;
itd->trailer_len = authlen + 2 + padlen;
}
}
if (tsa->encrypt)
itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN;
return 1;
}
/**
* ixgbevf_ipsec_rx - decode IPsec bits from Rx descriptor
* @rx_ring: receiving ring
* @rx_desc: receive data descriptor
* @skb: current data packet
*
* Determine if there was an IPsec encapsulation noticed, and if so set up
* the resulting status for later in the receive stack.
**/
void ixgbevf_ipsec_rx(struct ixgbevf_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
struct ixgbevf_adapter *adapter = netdev_priv(rx_ring->netdev);
__le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
__le16 ipsec_pkt_types = cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH |
IXGBE_RXDADV_PKTTYPE_IPSEC_ESP);
struct ixgbevf_ipsec *ipsec = adapter->ipsec;
struct xfrm_offload *xo = NULL;
struct xfrm_state *xs = NULL;
struct ipv6hdr *ip6 = NULL;
struct iphdr *ip4 = NULL;
void *daddr;
__be32 spi;
u8 *c_hdr;
u8 proto;
/* Find the IP and crypto headers in the data.
* We can assume no VLAN header in the way, b/c the
* hw won't recognize the IPsec packet and anyway the
* currently VLAN device doesn't support xfrm offload.
*/
if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV4)) {
ip4 = (struct iphdr *)(skb->data + ETH_HLEN);
daddr = &ip4->daddr;
c_hdr = (u8 *)ip4 + ip4->ihl * 4;
} else if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV6)) {
ip6 = (struct ipv6hdr *)(skb->data + ETH_HLEN);
daddr = &ip6->daddr;
c_hdr = (u8 *)ip6 + sizeof(struct ipv6hdr);
} else {
return;
}
switch (pkt_info & ipsec_pkt_types) {
case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH):
spi = ((struct ip_auth_hdr *)c_hdr)->spi;
proto = IPPROTO_AH;
break;
case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_ESP):
spi = ((struct ip_esp_hdr *)c_hdr)->spi;
proto = IPPROTO_ESP;
break;
default:
return;
}
xs = ixgbevf_ipsec_find_rx_state(ipsec, daddr, proto, spi, !!ip4);
if (unlikely(!xs))
return;
skb->sp = secpath_dup(skb->sp);
if (unlikely(!skb->sp))
return;
skb->sp->xvec[skb->sp->len++] = xs;
skb->sp->olen++;
xo = xfrm_offload(skb);
xo->flags = CRYPTO_DONE;
xo->status = CRYPTO_SUCCESS;
adapter->rx_ipsec++;
}
/**
* ixgbevf_init_ipsec_offload - initialize registers for IPsec operation
* @adapter: board private structure
**/
void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter)
{
struct ixgbevf_ipsec *ipsec;
size_t size;
switch (adapter->hw.api_version) {
case ixgbe_mbox_api_14:
break;
default:
return;
}
ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
if (!ipsec)
goto err1;
hash_init(ipsec->rx_sa_list);
size = sizeof(struct rx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
ipsec->rx_tbl = kzalloc(size, GFP_KERNEL);
if (!ipsec->rx_tbl)
goto err2;
size = sizeof(struct tx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
ipsec->tx_tbl = kzalloc(size, GFP_KERNEL);
if (!ipsec->tx_tbl)
goto err2;
ipsec->num_rx_sa = 0;
ipsec->num_tx_sa = 0;
adapter->ipsec = ipsec;
adapter->netdev->xfrmdev_ops = &ixgbevf_xfrmdev_ops;
#define IXGBEVF_ESP_FEATURES (NETIF_F_HW_ESP | \
NETIF_F_HW_ESP_TX_CSUM | \
NETIF_F_GSO_ESP)
adapter->netdev->features |= IXGBEVF_ESP_FEATURES;
adapter->netdev->hw_enc_features |= IXGBEVF_ESP_FEATURES;
return;
err2:
kfree(ipsec->rx_tbl);
kfree(ipsec->tx_tbl);
kfree(ipsec);
err1:
netdev_err(adapter->netdev, "Unable to allocate memory for SA tables");
}
/**
* ixgbevf_stop_ipsec_offload - tear down the IPsec offload
* @adapter: board private structure
**/
void ixgbevf_stop_ipsec_offload(struct ixgbevf_adapter *adapter)
{
struct ixgbevf_ipsec *ipsec = adapter->ipsec;
adapter->ipsec = NULL;
if (ipsec) {
kfree(ipsec->rx_tbl);
kfree(ipsec->tx_tbl);
kfree(ipsec);
}
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2018 Oracle and/or its affiliates. All rights reserved. */
#ifndef _IXGBEVF_IPSEC_H_
#define _IXGBEVF_IPSEC_H_
#define IXGBE_IPSEC_MAX_SA_COUNT 1024
#define IXGBE_IPSEC_BASE_RX_INDEX 0
#define IXGBE_IPSEC_BASE_TX_INDEX IXGBE_IPSEC_MAX_SA_COUNT
#define IXGBE_IPSEC_AUTH_BITS 128
#define IXGBE_RXMOD_VALID 0x00000001
#define IXGBE_RXMOD_PROTO_ESP 0x00000004
#define IXGBE_RXMOD_DECRYPT 0x00000008
#define IXGBE_RXMOD_IPV6 0x00000010
struct rx_sa {
struct hlist_node hlist;
struct xfrm_state *xs;
__be32 ipaddr[4];
u32 key[4];
u32 salt;
u32 mode;
u32 pfsa;
bool used;
bool decrypt;
};
struct rx_ip_sa {
__be32 ipaddr[4];
u32 ref_cnt;
bool used;
};
struct tx_sa {
struct xfrm_state *xs;
u32 key[4];
u32 salt;
u32 pfsa;
bool encrypt;
bool used;
};
struct ixgbevf_ipsec_tx_data {
u32 flags;
u16 trailer_len;
u16 pfsa;
};
struct ixgbevf_ipsec {
u16 num_rx_sa;
u16 num_tx_sa;
struct rx_sa *rx_tbl;
struct tx_sa *tx_tbl;
DECLARE_HASHTABLE(rx_sa_list, 10);
};
struct sa_mbx_msg {
__be32 spi;
u8 flags;
u8 proto;
u16 family;
__be32 addr[4];
u32 key[5];
};
#endif /* _IXGBEVF_IPSEC_H_ */
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <net/xdp.h> #include <net/xdp.h>
#include "vf.h" #include "vf.h"
#include "ipsec.h"
#define IXGBE_MAX_TXD_PWR 14 #define IXGBE_MAX_TXD_PWR 14
#define IXGBE_MAX_DATA_PER_TXD BIT(IXGBE_MAX_TXD_PWR) #define IXGBE_MAX_DATA_PER_TXD BIT(IXGBE_MAX_TXD_PWR)
...@@ -163,6 +164,7 @@ struct ixgbevf_ring { ...@@ -163,6 +164,7 @@ struct ixgbevf_ring {
#define IXGBE_TX_FLAGS_VLAN BIT(1) #define IXGBE_TX_FLAGS_VLAN BIT(1)
#define IXGBE_TX_FLAGS_TSO BIT(2) #define IXGBE_TX_FLAGS_TSO BIT(2)
#define IXGBE_TX_FLAGS_IPV4 BIT(3) #define IXGBE_TX_FLAGS_IPV4 BIT(3)
#define IXGBE_TX_FLAGS_IPSEC BIT(4)
#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
#define IXGBE_TX_FLAGS_VLAN_SHIFT 16 #define IXGBE_TX_FLAGS_VLAN_SHIFT 16
...@@ -338,6 +340,7 @@ struct ixgbevf_adapter { ...@@ -338,6 +340,7 @@ struct ixgbevf_adapter {
struct ixgbevf_ring *tx_ring[MAX_TX_QUEUES]; /* One per active queue */ struct ixgbevf_ring *tx_ring[MAX_TX_QUEUES]; /* One per active queue */
u64 restart_queue; u64 restart_queue;
u32 tx_timeout_count; u32 tx_timeout_count;
u64 tx_ipsec;
/* RX */ /* RX */
int num_rx_queues; int num_rx_queues;
...@@ -348,6 +351,7 @@ struct ixgbevf_adapter { ...@@ -348,6 +351,7 @@ struct ixgbevf_adapter {
u64 alloc_rx_page_failed; u64 alloc_rx_page_failed;
u64 alloc_rx_buff_failed; u64 alloc_rx_buff_failed;
u64 alloc_rx_page; u64 alloc_rx_page;
u64 rx_ipsec;
struct msix_entry *msix_entries; struct msix_entry *msix_entries;
...@@ -384,6 +388,10 @@ struct ixgbevf_adapter { ...@@ -384,6 +388,10 @@ struct ixgbevf_adapter {
u8 rss_indir_tbl[IXGBEVF_X550_VFRETA_SIZE]; u8 rss_indir_tbl[IXGBEVF_X550_VFRETA_SIZE];
u32 flags; u32 flags;
#define IXGBEVF_FLAGS_LEGACY_RX BIT(1) #define IXGBEVF_FLAGS_LEGACY_RX BIT(1)
#ifdef CONFIG_XFRM
struct ixgbevf_ipsec *ipsec;
#endif /* CONFIG_XFRM */
}; };
enum ixbgevf_state_t { enum ixbgevf_state_t {
...@@ -451,6 +459,31 @@ int ethtool_ioctl(struct ifreq *ifr); ...@@ -451,6 +459,31 @@ int ethtool_ioctl(struct ifreq *ifr);
extern void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector); extern void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector);
#ifdef CONFIG_XFRM_OFFLOAD
void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter);
void ixgbevf_stop_ipsec_offload(struct ixgbevf_adapter *adapter);
void ixgbevf_ipsec_restore(struct ixgbevf_adapter *adapter);
void ixgbevf_ipsec_rx(struct ixgbevf_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb);
int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring,
struct ixgbevf_tx_buffer *first,
struct ixgbevf_ipsec_tx_data *itd);
#else
static inline void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter)
{ }
static inline void ixgbevf_stop_ipsec_offload(struct ixgbevf_adapter *adapter)
{ }
static inline void ixgbevf_ipsec_restore(struct ixgbevf_adapter *adapter) { }
static inline void ixgbevf_ipsec_rx(struct ixgbevf_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb) { }
static inline int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring,
struct ixgbevf_tx_buffer *first,
struct ixgbevf_ipsec_tx_data *itd)
{ return 0; }
#endif /* CONFIG_XFRM_OFFLOAD */
void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter); void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter); void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
......
...@@ -40,7 +40,7 @@ static const char ixgbevf_driver_string[] = ...@@ -40,7 +40,7 @@ static const char ixgbevf_driver_string[] =
#define DRV_VERSION "4.1.0-k" #define DRV_VERSION "4.1.0-k"
const char ixgbevf_driver_version[] = DRV_VERSION; const char ixgbevf_driver_version[] = DRV_VERSION;
static char ixgbevf_copyright[] = static char ixgbevf_copyright[] =
"Copyright (c) 2009 - 2015 Intel Corporation."; "Copyright (c) 2009 - 2018 Intel Corporation.";
static const struct ixgbevf_info *ixgbevf_info_tbl[] = { static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
[board_82599_vf] = &ixgbevf_82599_vf_info, [board_82599_vf] = &ixgbevf_82599_vf_info,
...@@ -268,7 +268,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, ...@@ -268,7 +268,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
struct ixgbevf_adapter *adapter = q_vector->adapter; struct ixgbevf_adapter *adapter = q_vector->adapter;
struct ixgbevf_tx_buffer *tx_buffer; struct ixgbevf_tx_buffer *tx_buffer;
union ixgbe_adv_tx_desc *tx_desc; union ixgbe_adv_tx_desc *tx_desc;
unsigned int total_bytes = 0, total_packets = 0; unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0;
unsigned int budget = tx_ring->count / 2; unsigned int budget = tx_ring->count / 2;
unsigned int i = tx_ring->next_to_clean; unsigned int i = tx_ring->next_to_clean;
...@@ -299,6 +299,8 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, ...@@ -299,6 +299,8 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
/* update the statistics for this packet */ /* update the statistics for this packet */
total_bytes += tx_buffer->bytecount; total_bytes += tx_buffer->bytecount;
total_packets += tx_buffer->gso_segs; total_packets += tx_buffer->gso_segs;
if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC)
total_ipsec++;
/* free the skb */ /* free the skb */
if (ring_is_xdp(tx_ring)) if (ring_is_xdp(tx_ring))
...@@ -361,6 +363,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, ...@@ -361,6 +363,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
u64_stats_update_end(&tx_ring->syncp); u64_stats_update_end(&tx_ring->syncp);
q_vector->tx.total_bytes += total_bytes; q_vector->tx.total_bytes += total_bytes;
q_vector->tx.total_packets += total_packets; q_vector->tx.total_packets += total_packets;
adapter->tx_ipsec += total_ipsec;
if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) { if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) {
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
...@@ -516,6 +519,9 @@ static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring, ...@@ -516,6 +519,9 @@ static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
} }
if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP))
ixgbevf_ipsec_rx(rx_ring, rx_desc, skb);
skb->protocol = eth_type_trans(skb, rx_ring->netdev); skb->protocol = eth_type_trans(skb, rx_ring->netdev);
} }
...@@ -1012,7 +1018,7 @@ static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring, ...@@ -1012,7 +1018,7 @@ static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring,
context_desc = IXGBEVF_TX_CTXTDESC(ring, 0); context_desc = IXGBEVF_TX_CTXTDESC(ring, 0);
context_desc->vlan_macip_lens = context_desc->vlan_macip_lens =
cpu_to_le32(ETH_HLEN << IXGBE_ADVTXD_MACLEN_SHIFT); cpu_to_le32(ETH_HLEN << IXGBE_ADVTXD_MACLEN_SHIFT);
context_desc->seqnum_seed = 0; context_desc->fceof_saidx = 0;
context_desc->type_tucmd_mlhl = context_desc->type_tucmd_mlhl =
cpu_to_le32(IXGBE_TXD_CMD_DEXT | cpu_to_le32(IXGBE_TXD_CMD_DEXT |
IXGBE_ADVTXD_DTYP_CTXT); IXGBE_ADVTXD_DTYP_CTXT);
...@@ -2200,6 +2206,7 @@ static void ixgbevf_configure(struct ixgbevf_adapter *adapter) ...@@ -2200,6 +2206,7 @@ static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
ixgbevf_set_rx_mode(adapter->netdev); ixgbevf_set_rx_mode(adapter->netdev);
ixgbevf_restore_vlan(adapter); ixgbevf_restore_vlan(adapter);
ixgbevf_ipsec_restore(adapter);
ixgbevf_configure_tx(adapter); ixgbevf_configure_tx(adapter);
ixgbevf_configure_rx(adapter); ixgbevf_configure_rx(adapter);
...@@ -2246,7 +2253,8 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) ...@@ -2246,7 +2253,8 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
{ {
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
int api[] = { ixgbe_mbox_api_13, int api[] = { ixgbe_mbox_api_14,
ixgbe_mbox_api_13,
ixgbe_mbox_api_12, ixgbe_mbox_api_12,
ixgbe_mbox_api_11, ixgbe_mbox_api_11,
ixgbe_mbox_api_10, ixgbe_mbox_api_10,
...@@ -2605,6 +2613,7 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) ...@@ -2605,6 +2613,7 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
case ixgbe_mbox_api_11: case ixgbe_mbox_api_11:
case ixgbe_mbox_api_12: case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13: case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
if (adapter->xdp_prog && if (adapter->xdp_prog &&
hw->mac.max_tx_queues == rss) hw->mac.max_tx_queues == rss)
rss = rss > 3 ? 2 : 1; rss = rss > 3 ? 2 : 1;
...@@ -3700,8 +3709,8 @@ static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter) ...@@ -3700,8 +3709,8 @@ static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
} }
static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring, static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
u32 vlan_macip_lens, u32 type_tucmd, u32 vlan_macip_lens, u32 fceof_saidx,
u32 mss_l4len_idx) u32 type_tucmd, u32 mss_l4len_idx)
{ {
struct ixgbe_adv_tx_context_desc *context_desc; struct ixgbe_adv_tx_context_desc *context_desc;
u16 i = tx_ring->next_to_use; u16 i = tx_ring->next_to_use;
...@@ -3715,14 +3724,15 @@ static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring, ...@@ -3715,14 +3724,15 @@ static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
context_desc->seqnum_seed = 0; context_desc->fceof_saidx = cpu_to_le32(fceof_saidx);
context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
} }
static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
struct ixgbevf_tx_buffer *first, struct ixgbevf_tx_buffer *first,
u8 *hdr_len) u8 *hdr_len,
struct ixgbevf_ipsec_tx_data *itd)
{ {
u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
struct sk_buff *skb = first->skb; struct sk_buff *skb = first->skb;
...@@ -3736,6 +3746,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, ...@@ -3736,6 +3746,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
unsigned char *hdr; unsigned char *hdr;
} l4; } l4;
u32 paylen, l4_offset; u32 paylen, l4_offset;
u32 fceof_saidx = 0;
int err; int err;
if (skb->ip_summed != CHECKSUM_PARTIAL) if (skb->ip_summed != CHECKSUM_PARTIAL)
...@@ -3761,13 +3772,15 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, ...@@ -3761,13 +3772,15 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
if (ip.v4->version == 4) { if (ip.v4->version == 4) {
unsigned char *csum_start = skb_checksum_start(skb); unsigned char *csum_start = skb_checksum_start(skb);
unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
int len = csum_start - trans_start;
/* IP header will have to cancel out any data that /* IP header will have to cancel out any data that
* is not a part of the outer IP header * is not a part of the outer IP header, so set to
* a reverse csum if needed, else init check to 0.
*/ */
ip.v4->check = csum_fold(csum_partial(trans_start, ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ?
csum_start - trans_start, csum_fold(csum_partial(trans_start,
0)); len, 0)) : 0;
type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
ip.v4->tot_len = 0; ip.v4->tot_len = 0;
...@@ -3799,13 +3812,16 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, ...@@ -3799,13 +3812,16 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
mss_l4len_idx |= (1u << IXGBE_ADVTXD_IDX_SHIFT); mss_l4len_idx |= (1u << IXGBE_ADVTXD_IDX_SHIFT);
fceof_saidx |= itd->pfsa;
type_tucmd |= itd->flags | itd->trailer_len;
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
vlan_macip_lens = l4.hdr - ip.hdr; vlan_macip_lens = l4.hdr - ip.hdr;
vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd,
type_tucmd, mss_l4len_idx); mss_l4len_idx);
return 1; return 1;
} }
...@@ -3820,10 +3836,12 @@ static inline bool ixgbevf_ipv6_csum_is_sctp(struct sk_buff *skb) ...@@ -3820,10 +3836,12 @@ static inline bool ixgbevf_ipv6_csum_is_sctp(struct sk_buff *skb)
} }
static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
struct ixgbevf_tx_buffer *first) struct ixgbevf_tx_buffer *first,
struct ixgbevf_ipsec_tx_data *itd)
{ {
struct sk_buff *skb = first->skb; struct sk_buff *skb = first->skb;
u32 vlan_macip_lens = 0; u32 vlan_macip_lens = 0;
u32 fceof_saidx = 0;
u32 type_tucmd = 0; u32 type_tucmd = 0;
if (skb->ip_summed != CHECKSUM_PARTIAL) if (skb->ip_summed != CHECKSUM_PARTIAL)
...@@ -3849,6 +3867,10 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, ...@@ -3849,6 +3867,10 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
skb_checksum_help(skb); skb_checksum_help(skb);
goto no_csum; goto no_csum;
} }
if (first->protocol == htons(ETH_P_IP))
type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
/* update TX checksum flag */ /* update TX checksum flag */
first->tx_flags |= IXGBE_TX_FLAGS_CSUM; first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
vlan_macip_lens = skb_checksum_start_offset(skb) - vlan_macip_lens = skb_checksum_start_offset(skb) -
...@@ -3858,7 +3880,11 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, ...@@ -3858,7 +3880,11 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0); fceof_saidx |= itd->pfsa;
type_tucmd |= itd->flags | itd->trailer_len;
ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
fceof_saidx, type_tucmd, 0);
} }
static __le32 ixgbevf_tx_cmd_type(u32 tx_flags) static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
...@@ -3892,8 +3918,12 @@ static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, ...@@ -3892,8 +3918,12 @@ static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
if (tx_flags & IXGBE_TX_FLAGS_IPV4) if (tx_flags & IXGBE_TX_FLAGS_IPV4)
olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM); olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
/* use index 1 context for TSO/FSO/FCOE */ /* enable IPsec */
if (tx_flags & IXGBE_TX_FLAGS_TSO) if (tx_flags & IXGBE_TX_FLAGS_IPSEC)
olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IPSEC);
/* use index 1 context for TSO/FSO/FCOE/IPSEC */
if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_IPSEC))
olinfo_status |= cpu_to_le32(1u << IXGBE_ADVTXD_IDX_SHIFT); olinfo_status |= cpu_to_le32(1u << IXGBE_ADVTXD_IDX_SHIFT);
/* Check Context must be set if Tx switch is enabled, which it /* Check Context must be set if Tx switch is enabled, which it
...@@ -4075,6 +4105,7 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb, ...@@ -4075,6 +4105,7 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb,
int tso; int tso;
u32 tx_flags = 0; u32 tx_flags = 0;
u16 count = TXD_USE_COUNT(skb_headlen(skb)); u16 count = TXD_USE_COUNT(skb_headlen(skb));
struct ixgbevf_ipsec_tx_data ipsec_tx = { 0 };
#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
unsigned short f; unsigned short f;
#endif #endif
...@@ -4119,11 +4150,15 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb, ...@@ -4119,11 +4150,15 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb,
first->tx_flags = tx_flags; first->tx_flags = tx_flags;
first->protocol = vlan_get_protocol(skb); first->protocol = vlan_get_protocol(skb);
tso = ixgbevf_tso(tx_ring, first, &hdr_len); #ifdef CONFIG_XFRM_OFFLOAD
if (skb->sp && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx))
goto out_drop;
#endif
tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx);
if (tso < 0) if (tso < 0)
goto out_drop; goto out_drop;
else if (!tso) else if (!tso)
ixgbevf_tx_csum(tx_ring, first); ixgbevf_tx_csum(tx_ring, first, &ipsec_tx);
ixgbevf_tx_map(tx_ring, first, hdr_len); ixgbevf_tx_map(tx_ring, first, hdr_len);
...@@ -4634,6 +4669,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -4634,6 +4669,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case ixgbe_mbox_api_11: case ixgbe_mbox_api_11:
case ixgbe_mbox_api_12: case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13: case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
(ETH_HLEN + ETH_FCS_LEN); (ETH_HLEN + ETH_FCS_LEN);
break; break;
...@@ -4669,6 +4705,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -4669,6 +4705,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, netdev); pci_set_drvdata(pdev, netdev);
netif_carrier_off(netdev); netif_carrier_off(netdev);
ixgbevf_init_ipsec_offload(adapter);
ixgbevf_init_last_counter_stats(adapter); ixgbevf_init_last_counter_stats(adapter);
...@@ -4735,6 +4772,7 @@ static void ixgbevf_remove(struct pci_dev *pdev) ...@@ -4735,6 +4772,7 @@ static void ixgbevf_remove(struct pci_dev *pdev)
if (netdev->reg_state == NETREG_REGISTERED) if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev); unregister_netdev(netdev);
ixgbevf_stop_ipsec_offload(adapter);
ixgbevf_clear_interrupt_scheme(adapter); ixgbevf_clear_interrupt_scheme(adapter);
ixgbevf_reset_interrupt_capability(adapter); ixgbevf_reset_interrupt_capability(adapter);
......
...@@ -62,6 +62,7 @@ enum ixgbe_pfvf_api_rev { ...@@ -62,6 +62,7 @@ enum ixgbe_pfvf_api_rev {
ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */ ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */
ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */ ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */
ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */ ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */
ixgbe_mbox_api_14, /* API version 1.4, linux/freebsd VF driver */
/* This value should always be last */ /* This value should always be last */
ixgbe_mbox_api_unknown, /* indicates that API version is not known */ ixgbe_mbox_api_unknown, /* indicates that API version is not known */
}; };
...@@ -92,6 +93,10 @@ enum ixgbe_pfvf_api_rev { ...@@ -92,6 +93,10 @@ enum ixgbe_pfvf_api_rev {
#define IXGBE_VF_UPDATE_XCAST_MODE 0x0c #define IXGBE_VF_UPDATE_XCAST_MODE 0x0c
/* mailbox API, version 1.4 VF requests */
#define IXGBE_VF_IPSEC_ADD 0x0d
#define IXGBE_VF_IPSEC_DEL 0x0e
/* length of permanent address message returned from PF */ /* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4 #define IXGBE_VF_PERMADDR_MSG_LEN 4
/* word in permanent address message with the current multicast type */ /* word in permanent address message with the current multicast type */
......
...@@ -309,6 +309,7 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues) ...@@ -309,6 +309,7 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
* is not supported for this device type. * is not supported for this device type.
*/ */
switch (hw->api_version) { switch (hw->api_version) {
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_13: case ixgbe_mbox_api_13:
case ixgbe_mbox_api_12: case ixgbe_mbox_api_12:
if (hw->mac.type < ixgbe_mac_X550_vf) if (hw->mac.type < ixgbe_mac_X550_vf)
...@@ -376,6 +377,7 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key) ...@@ -376,6 +377,7 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
* or if the operation is not supported for this device type. * or if the operation is not supported for this device type.
*/ */
switch (hw->api_version) { switch (hw->api_version) {
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_13: case ixgbe_mbox_api_13:
case ixgbe_mbox_api_12: case ixgbe_mbox_api_12:
if (hw->mac.type < ixgbe_mac_X550_vf) if (hw->mac.type < ixgbe_mac_X550_vf)
...@@ -540,6 +542,7 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) ...@@ -540,6 +542,7 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC) if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
return -EOPNOTSUPP; return -EOPNOTSUPP;
/* Fall threw */ /* Fall threw */
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_13: case ixgbe_mbox_api_13:
break; break;
default: default:
...@@ -890,6 +893,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, ...@@ -890,6 +893,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
case ixgbe_mbox_api_11: case ixgbe_mbox_api_11:
case ixgbe_mbox_api_12: case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13: case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
break; break;
default: default:
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment