Commit 76f919eb authored by Ayush Sawal's avatar Ayush Sawal Committed by David S. Miller

cxgb4/ch_ipsec: Registering xfrmdev_ops with cxgb4

As ch_ipsec was removed without clearing xfrmdev_ops and netdev
feature(esp-hw-offload). When a recalculation of netdev feature is
triggered by changing tls feature(tls-hw-tx-offload) from user
request, it causes a page fault due to absence of valid xfrmdev_ops.

Fixes: 6dad4e8a ("chcr: Add support for Inline IPSec")
Signed-off-by: default avatarAyush Sawal <ayush.sawal@chelsio.com>
Acked-by: default avatarJakub Kicinski <kuba@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 8794ebfe
...@@ -146,6 +146,11 @@ enum { ...@@ -146,6 +146,11 @@ enum {
CXGB4_ETHTOOL_FLASH_BOOTCFG = 4 CXGB4_ETHTOOL_FLASH_BOOTCFG = 4
}; };
enum cxgb4_netdev_tls_ops {
CXGB4_TLSDEV_OPS = 1,
CXGB4_XFRMDEV_OPS
};
struct cxgb4_bootcfg_data { struct cxgb4_bootcfg_data {
__le16 signature; __le16 signature;
__u8 reserved[2]; __u8 reserved[2];
......
...@@ -6396,6 +6396,49 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs) ...@@ -6396,6 +6396,49 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
} }
#endif /* CONFIG_PCI_IOV */ #endif /* CONFIG_PCI_IOV */
#if defined(CONFIG_CHELSIO_TLS_DEVICE) || IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
static int chcr_offload_state(struct adapter *adap,
enum cxgb4_netdev_tls_ops op_val)
{
switch (op_val) {
#if defined(CONFIG_CHELSIO_TLS_DEVICE)
case CXGB4_TLSDEV_OPS:
if (!adap->uld[CXGB4_ULD_CRYPTO].handle) {
dev_dbg(adap->pdev_dev, "chcr driver is not loaded\n");
return -EOPNOTSUPP;
}
if (!adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops) {
dev_dbg(adap->pdev_dev,
"chcr driver has no registered tlsdev_ops\n");
return -EOPNOTSUPP;
}
break;
#endif /* CONFIG_CHELSIO_TLS_DEVICE */
#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
case CXGB4_XFRMDEV_OPS:
if (!adap->uld[CXGB4_ULD_IPSEC].handle) {
dev_dbg(adap->pdev_dev, "chipsec driver is not loaded\n");
return -EOPNOTSUPP;
}
if (!adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops) {
dev_dbg(adap->pdev_dev,
"chipsec driver has no registered xfrmdev_ops\n");
return -EOPNOTSUPP;
}
break;
#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
default:
dev_dbg(adap->pdev_dev,
"driver has no support for offload %d\n", op_val);
return -EOPNOTSUPP;
}
return 0;
}
#endif /* CONFIG_CHELSIO_TLS_DEVICE || CONFIG_CHELSIO_IPSEC_INLINE */
#if defined(CONFIG_CHELSIO_TLS_DEVICE) #if defined(CONFIG_CHELSIO_TLS_DEVICE)
static int cxgb4_ktls_dev_add(struct net_device *netdev, struct sock *sk, static int cxgb4_ktls_dev_add(struct net_device *netdev, struct sock *sk,
...@@ -6404,21 +6447,12 @@ static int cxgb4_ktls_dev_add(struct net_device *netdev, struct sock *sk, ...@@ -6404,21 +6447,12 @@ static int cxgb4_ktls_dev_add(struct net_device *netdev, struct sock *sk,
u32 tcp_sn) u32 tcp_sn)
{ {
struct adapter *adap = netdev2adap(netdev); struct adapter *adap = netdev2adap(netdev);
int ret = 0; int ret;
mutex_lock(&uld_mutex); mutex_lock(&uld_mutex);
if (!adap->uld[CXGB4_ULD_CRYPTO].handle) { ret = chcr_offload_state(adap, CXGB4_TLSDEV_OPS);
dev_err(adap->pdev_dev, "chcr driver is not loaded\n"); if (ret)
ret = -EOPNOTSUPP;
goto out_unlock;
}
if (!adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops) {
dev_err(adap->pdev_dev,
"chcr driver has no registered tlsdev_ops()\n");
ret = -EOPNOTSUPP;
goto out_unlock; goto out_unlock;
}
ret = cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_ENABLE); ret = cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_ENABLE);
if (ret) if (ret)
...@@ -6444,25 +6478,125 @@ static void cxgb4_ktls_dev_del(struct net_device *netdev, ...@@ -6444,25 +6478,125 @@ static void cxgb4_ktls_dev_del(struct net_device *netdev,
struct adapter *adap = netdev2adap(netdev); struct adapter *adap = netdev2adap(netdev);
mutex_lock(&uld_mutex); mutex_lock(&uld_mutex);
if (!adap->uld[CXGB4_ULD_CRYPTO].handle) { if (chcr_offload_state(adap, CXGB4_TLSDEV_OPS))
dev_err(adap->pdev_dev, "chcr driver is not loaded\n");
goto out_unlock; goto out_unlock;
adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops->tls_dev_del(netdev, tls_ctx,
direction);
cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE);
out_unlock:
mutex_unlock(&uld_mutex);
}
#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
static int cxgb4_xfrm_add_state(struct xfrm_state *x)
{
struct adapter *adap = netdev2adap(x->xso.dev);
int ret;
if (!mutex_trylock(&uld_mutex)) {
dev_dbg(adap->pdev_dev,
"crypto uld critical resource is under use\n");
return -EBUSY;
} }
ret = chcr_offload_state(adap, CXGB4_XFRMDEV_OPS);
if (ret)
goto out_unlock;
if (!adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops) { ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_add(x);
dev_err(adap->pdev_dev,
"chcr driver has no registered tlsdev_ops\n"); out_unlock:
mutex_unlock(&uld_mutex);
return ret;
}
static void cxgb4_xfrm_del_state(struct xfrm_state *x)
{
struct adapter *adap = netdev2adap(x->xso.dev);
if (!mutex_trylock(&uld_mutex)) {
dev_dbg(adap->pdev_dev,
"crypto uld critical resource is under use\n");
return;
}
if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
goto out_unlock; goto out_unlock;
adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_delete(x);
out_unlock:
mutex_unlock(&uld_mutex);
}
static void cxgb4_xfrm_free_state(struct xfrm_state *x)
{
struct adapter *adap = netdev2adap(x->xso.dev);
if (!mutex_trylock(&uld_mutex)) {
dev_dbg(adap->pdev_dev,
"crypto uld critical resource is under use\n");
return;
} }
if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
goto out_unlock;
adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops->tls_dev_del(netdev, tls_ctx, adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_free(x);
direction);
cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE); out_unlock:
mutex_unlock(&uld_mutex);
}
static bool cxgb4_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
{
struct adapter *adap = netdev2adap(x->xso.dev);
bool ret = false;
if (!mutex_trylock(&uld_mutex)) {
dev_dbg(adap->pdev_dev,
"crypto uld critical resource is under use\n");
return ret;
}
if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
goto out_unlock;
ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_offload_ok(skb, x);
out_unlock: out_unlock:
mutex_unlock(&uld_mutex); mutex_unlock(&uld_mutex);
return ret;
} }
static void cxgb4_advance_esn_state(struct xfrm_state *x)
{
struct adapter *adap = netdev2adap(x->xso.dev);
if (!mutex_trylock(&uld_mutex)) {
dev_dbg(adap->pdev_dev,
"crypto uld critical resource is under use\n");
return;
}
if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
goto out_unlock;
adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_advance_esn(x);
out_unlock:
mutex_unlock(&uld_mutex);
}
static const struct xfrmdev_ops cxgb4_xfrmdev_ops = {
.xdo_dev_state_add = cxgb4_xfrm_add_state,
.xdo_dev_state_delete = cxgb4_xfrm_del_state,
.xdo_dev_state_free = cxgb4_xfrm_free_state,
.xdo_dev_offload_ok = cxgb4_ipsec_offload_ok,
.xdo_dev_state_advance_esn = cxgb4_advance_esn_state,
};
#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
static const struct tlsdev_ops cxgb4_ktls_ops = { static const struct tlsdev_ops cxgb4_ktls_ops = {
.tls_dev_add = cxgb4_ktls_dev_add, .tls_dev_add = cxgb4_ktls_dev_add,
.tls_dev_del = cxgb4_ktls_dev_del, .tls_dev_del = cxgb4_ktls_dev_del,
...@@ -6728,7 +6862,15 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -6728,7 +6862,15 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* initialize the refcount */ /* initialize the refcount */
refcount_set(&pi->adapter->chcr_ktls.ktls_refcount, 0); refcount_set(&pi->adapter->chcr_ktls.ktls_refcount, 0);
} }
#endif #endif /* CONFIG_CHELSIO_TLS_DEVICE */
#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
if (pi->adapter->params.crypto & FW_CAPS_CONFIG_IPSEC_INLINE) {
netdev->hw_enc_features |= NETIF_F_HW_ESP;
netdev->features |= NETIF_F_HW_ESP;
netdev->xfrmdev_ops = &cxgb4_xfrmdev_ops;
}
#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_UNICAST_FLT;
/* MTU range: 81 - 9600 */ /* MTU range: 81 - 9600 */
......
...@@ -479,6 +479,9 @@ struct cxgb4_uld_info { ...@@ -479,6 +479,9 @@ struct cxgb4_uld_info {
#if IS_ENABLED(CONFIG_TLS_DEVICE) #if IS_ENABLED(CONFIG_TLS_DEVICE)
const struct tlsdev_ops *tlsdev_ops; const struct tlsdev_ops *tlsdev_ops;
#endif #endif
#if IS_ENABLED(CONFIG_XFRM_OFFLOAD)
const struct xfrmdev_ops *xfrmdev_ops;
#endif
}; };
void cxgb4_uld_enable(struct adapter *adap); void cxgb4_uld_enable(struct adapter *adap);
......
...@@ -79,7 +79,6 @@ static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x); ...@@ -79,7 +79,6 @@ static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
static void chcr_advance_esn_state(struct xfrm_state *x); static void chcr_advance_esn_state(struct xfrm_state *x);
static int ch_ipsec_uld_state_change(void *handle, enum cxgb4_state new_state); static int ch_ipsec_uld_state_change(void *handle, enum cxgb4_state new_state);
static void *ch_ipsec_uld_add(const struct cxgb4_lld_info *infop); static void *ch_ipsec_uld_add(const struct cxgb4_lld_info *infop);
static void update_netdev_features(void);
static const struct xfrmdev_ops chcr_xfrmdev_ops = { static const struct xfrmdev_ops chcr_xfrmdev_ops = {
.xdo_dev_state_add = chcr_xfrm_add_state, .xdo_dev_state_add = chcr_xfrm_add_state,
...@@ -89,23 +88,6 @@ static const struct xfrmdev_ops chcr_xfrmdev_ops = { ...@@ -89,23 +88,6 @@ static const struct xfrmdev_ops chcr_xfrmdev_ops = {
.xdo_dev_state_advance_esn = chcr_advance_esn_state, .xdo_dev_state_advance_esn = chcr_advance_esn_state,
}; };
/* Add offload xfrms to Chelsio Interface */
void chcr_add_xfrmops(const struct cxgb4_lld_info *lld)
{
struct net_device *netdev = NULL;
int i;
for (i = 0; i < lld->nports; i++) {
netdev = lld->ports[i];
if (!netdev)
continue;
netdev->xfrmdev_ops = &chcr_xfrmdev_ops;
netdev->hw_enc_features |= NETIF_F_HW_ESP;
netdev->features |= NETIF_F_HW_ESP;
netdev_change_features(netdev);
}
}
static struct cxgb4_uld_info ch_ipsec_uld_info = { static struct cxgb4_uld_info ch_ipsec_uld_info = {
.name = CHIPSEC_DRV_MODULE_NAME, .name = CHIPSEC_DRV_MODULE_NAME,
.nrxq = MAX_ULD_QSETS, .nrxq = MAX_ULD_QSETS,
...@@ -114,6 +96,7 @@ static struct cxgb4_uld_info ch_ipsec_uld_info = { ...@@ -114,6 +96,7 @@ static struct cxgb4_uld_info ch_ipsec_uld_info = {
.add = ch_ipsec_uld_add, .add = ch_ipsec_uld_add,
.state_change = ch_ipsec_uld_state_change, .state_change = ch_ipsec_uld_state_change,
.tx_handler = chcr_ipsec_xmit, .tx_handler = chcr_ipsec_xmit,
.xfrmdev_ops = &chcr_xfrmdev_ops,
}; };
static void *ch_ipsec_uld_add(const struct cxgb4_lld_info *infop) static void *ch_ipsec_uld_add(const struct cxgb4_lld_info *infop)
...@@ -808,26 +791,10 @@ out_free: dev_kfree_skb_any(skb); ...@@ -808,26 +791,10 @@ out_free: dev_kfree_skb_any(skb);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
static void update_netdev_features(void)
{
struct ipsec_uld_ctx *u_ctx, *tmp;
mutex_lock(&dev_mutex);
list_for_each_entry_safe(u_ctx, tmp, &uld_ctx_list, entry) {
if (u_ctx->lldi.crypto & ULP_CRYPTO_IPSEC_INLINE)
chcr_add_xfrmops(&u_ctx->lldi);
}
mutex_unlock(&dev_mutex);
}
static int __init chcr_ipsec_init(void) static int __init chcr_ipsec_init(void)
{ {
cxgb4_register_uld(CXGB4_ULD_IPSEC, &ch_ipsec_uld_info); cxgb4_register_uld(CXGB4_ULD_IPSEC, &ch_ipsec_uld_info);
rtnl_lock();
update_netdev_features();
rtnl_unlock();
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment