Commit 2ea32cd6 authored by Doug Ledford's avatar Doug Ledford

Merge tag 'mlx5-updates-2018-02-28-2' of...

Merge tag 'mlx5-updates-2018-02-28-2' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux into k.o/wip/dl-for-next

mlx5-updates-2018-02-28-2 (IPSec-2)

This series follows our previous one to lay out the foundations for IPSec
in user-space and extend current kernel netdev IPSec support. As noted in
our previous pull request cover letter "mlx5-updates-2018-02-28-1 (IPSec-1)",
the IPSec mechanism will be supported through our flow steering mechanism.
Therefore, we need to change the initialization order. Furthermore, IPsec
is also supported in both egress and ingress. Since our current flow
steering is egress only, we add an empty (only implemented through FPGA
steering ops) egress namespace to handle that case. We also implement
the required flow steering callbacks and logic in our FPGA driver.

We extend the FPGA support for ESN and modifying a xfrm too. Therefore, we
add support for some new FPGA command interface that supports them. The
other required bits are added too. The new features and requirements are
advertised via cap bits.

Last but not least, we revise our driver's accel_esp API. This API will be
shared between our netdev and IB driver, so we need to have all the required
functionality from both worlds.

Regards,
Aviad and Matan
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parents 29cf1351 31135eb3
......@@ -37,24 +37,11 @@
#include "mlx5_core.h"
#include "fpga/ipsec.h"
void *mlx5_accel_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev,
struct mlx5_accel_ipsec_sa *cmd)
{
if (!MLX5_IPSEC_DEV(mdev))
return ERR_PTR(-EOPNOTSUPP);
return mlx5_fpga_ipsec_sa_cmd_exec(mdev, cmd);
}
int mlx5_accel_ipsec_sa_cmd_wait(void *ctx)
{
return mlx5_fpga_ipsec_sa_cmd_wait(ctx);
}
u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev)
{
return mlx5_fpga_ipsec_device_caps(mdev);
}
EXPORT_SYMBOL_GPL(mlx5_accel_ipsec_device_caps);
unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev)
{
......@@ -67,6 +54,21 @@ int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
return mlx5_fpga_ipsec_counters_read(mdev, counters, count);
}
void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *xfrm,
const __be32 saddr[4],
const __be32 daddr[4],
const __be32 spi, bool is_ipv6)
{
return mlx5_fpga_ipsec_create_sa_ctx(mdev, xfrm, saddr, daddr,
spi, is_ipv6);
}
void mlx5_accel_esp_free_hw_context(void *context)
{
mlx5_fpga_ipsec_delete_sa_ctx(context);
}
int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev)
{
return mlx5_fpga_ipsec_init(mdev);
......@@ -76,3 +78,32 @@ void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev)
{
mlx5_fpga_ipsec_cleanup(mdev);
}
struct mlx5_accel_esp_xfrm *
mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 flags)
{
struct mlx5_accel_esp_xfrm *xfrm;
xfrm = mlx5_fpga_esp_create_xfrm(mdev, attrs, flags);
if (IS_ERR(xfrm))
return xfrm;
xfrm->mdev = mdev;
return xfrm;
}
EXPORT_SYMBOL_GPL(mlx5_accel_esp_create_xfrm);
void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
{
mlx5_fpga_esp_destroy_xfrm(xfrm);
}
EXPORT_SYMBOL_GPL(mlx5_accel_esp_destroy_xfrm);
int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
const struct mlx5_accel_esp_xfrm_attrs *attrs)
{
return mlx5_fpga_esp_modify_xfrm(xfrm, attrs);
}
EXPORT_SYMBOL_GPL(mlx5_accel_esp_modify_xfrm);
......@@ -35,88 +35,24 @@
#define __MLX5_ACCEL_IPSEC_H__
#include <linux/mlx5/driver.h>
#include <linux/mlx5/accel.h>
#ifdef CONFIG_MLX5_ACCEL
enum {
MLX5_ACCEL_IPSEC_DEVICE = BIT(1),
MLX5_ACCEL_IPSEC_IPV6 = BIT(2),
MLX5_ACCEL_IPSEC_ESP = BIT(3),
MLX5_ACCEL_IPSEC_LSO = BIT(4),
};
#define MLX5_IPSEC_SADB_IP_AH BIT(7)
#define MLX5_IPSEC_SADB_IP_ESP BIT(6)
#define MLX5_IPSEC_SADB_SA_VALID BIT(5)
#define MLX5_IPSEC_SADB_SPI_EN BIT(4)
#define MLX5_IPSEC_SADB_DIR_SX BIT(3)
#define MLX5_IPSEC_SADB_IPV6 BIT(2)
enum {
MLX5_IPSEC_CMD_ADD_SA = 0,
MLX5_IPSEC_CMD_DEL_SA = 1,
};
enum mlx5_accel_ipsec_enc_mode {
MLX5_IPSEC_SADB_MODE_NONE = 0,
MLX5_IPSEC_SADB_MODE_AES_GCM_128_AUTH_128 = 1,
MLX5_IPSEC_SADB_MODE_AES_GCM_256_AUTH_128 = 3,
};
#define MLX5_IPSEC_DEV(mdev) (mlx5_accel_ipsec_device_caps(mdev) & \
MLX5_ACCEL_IPSEC_DEVICE)
struct mlx5_accel_ipsec_sa {
__be32 cmd;
u8 key_enc[32];
u8 key_auth[32];
__be32 sip[4];
__be32 dip[4];
union {
struct {
__be32 reserved;
u8 salt_iv[8];
__be32 salt;
} __packed gcm;
struct {
u8 salt[16];
} __packed cbc;
};
__be32 spi;
__be32 sw_sa_handle;
__be16 tfclen;
u8 enc_mode;
u8 sip_masklen;
u8 dip_masklen;
u8 flags;
u8 reserved[2];
} __packed;
/**
* mlx5_accel_ipsec_sa_cmd_exec - Execute an IPSec SADB command
* @mdev: mlx5 device
* @cmd: command to execute
* May be called from atomic context. Returns context pointer, or error
* Caller must eventually call mlx5_accel_ipsec_sa_cmd_wait from non-atomic
* context, to cleanup the context pointer
*/
void *mlx5_accel_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev,
struct mlx5_accel_ipsec_sa *cmd);
/**
* mlx5_accel_ipsec_sa_cmd_wait - Wait for command execution completion
* @context: Context pointer returned from call to mlx5_accel_ipsec_sa_cmd_exec
* Sleeps (killable) until command execution is complete.
* Returns the command result, or -EINTR if killed
*/
int mlx5_accel_ipsec_sa_cmd_wait(void *context);
u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev);
MLX5_ACCEL_IPSEC_CAP_DEVICE)
unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev);
int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
unsigned int count);
void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *xfrm,
const __be32 saddr[4],
const __be32 daddr[4],
const __be32 spi, bool is_ipv6);
void mlx5_accel_esp_free_hw_context(void *context);
int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev);
void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev);
......@@ -124,6 +60,20 @@ void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev);
#define MLX5_IPSEC_DEV(mdev) false
static inline void *
mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *xfrm,
const __be32 saddr[4],
const __be32 daddr[4],
const __be32 spi, bool is_ipv6)
{
return NULL;
}
static inline void mlx5_accel_esp_free_hw_context(void *context)
{
}
static inline int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev)
{
return 0;
......
......@@ -109,8 +109,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
cq->cons_index = 0;
cq->arm_sn = 0;
cq->eq = eq;
refcount_set(&cq->refcount, 0);
mlx5_cq_hold(cq);
refcount_set(&cq->refcount, 1);
init_completion(&cq->free);
if (!cq->comp)
cq->comp = mlx5_add_cq_to_tasklet;
......
......@@ -38,17 +38,24 @@
#include <linux/module.h>
#include "en.h"
#include "accel/ipsec.h"
#include "en_accel/ipsec.h"
#include "en_accel/ipsec_rxtx.h"
struct mlx5e_ipsec_sa_entry {
struct hlist_node hlist; /* Item in SADB_RX hashtable */
unsigned int handle; /* Handle in SADB_RX */
struct xfrm_state *x;
struct mlx5e_ipsec *ipsec;
void *context;
};
static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa;
if (!x)
return NULL;
sa = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
if (!sa)
return NULL;
WARN_ON(sa->x != x);
return sa;
}
struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *ipsec,
unsigned int handle)
......@@ -105,78 +112,93 @@ static void mlx5e_ipsec_sadb_rx_free(struct mlx5e_ipsec_sa_entry *sa_entry)
ida_simple_remove(&ipsec->halloc, sa_entry->handle);
}
static enum mlx5_accel_ipsec_enc_mode mlx5e_ipsec_enc_mode(struct xfrm_state *x)
static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
{
unsigned int key_len = (x->aead->alg_key_len + 7) / 8 - 4;
switch (key_len) {
case 16:
return MLX5_IPSEC_SADB_MODE_AES_GCM_128_AUTH_128;
case 32:
return MLX5_IPSEC_SADB_MODE_AES_GCM_256_AUTH_128;
default:
netdev_warn(x->xso.dev, "Bad key len: %d for alg %s\n",
key_len, x->aead->alg_name);
return -1;
struct xfrm_replay_state_esn *replay_esn;
u32 seq_bottom;
u8 overlap;
u32 *esn;
if (!(sa_entry->x->props.flags & XFRM_STATE_ESN)) {
sa_entry->esn_state.trigger = 0;
return false;
}
replay_esn = sa_entry->x->replay_esn;
seq_bottom = replay_esn->seq - replay_esn->replay_window + 1;
overlap = sa_entry->esn_state.overlap;
sa_entry->esn_state.esn = xfrm_replay_seqhi(sa_entry->x,
htonl(seq_bottom));
esn = &sa_entry->esn_state.esn;
sa_entry->esn_state.trigger = 1;
if (unlikely(overlap && seq_bottom < MLX5E_IPSEC_ESN_SCOPE_MID)) {
++(*esn);
sa_entry->esn_state.overlap = 0;
return true;
} else if (unlikely(!overlap &&
(seq_bottom >= MLX5E_IPSEC_ESN_SCOPE_MID))) {
sa_entry->esn_state.overlap = 1;
return true;
}
return false;
}
static void mlx5e_ipsec_build_hw_sa(u32 op, struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5_accel_ipsec_sa *hw_sa)
static void
mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5_accel_esp_xfrm_attrs *attrs)
{
struct xfrm_state *x = sa_entry->x;
struct aes_gcm_keymat *aes_gcm = &attrs->keymat.aes_gcm;
struct aead_geniv_ctx *geniv_ctx;
unsigned int crypto_data_len;
struct crypto_aead *aead;
unsigned int key_len;
unsigned int crypto_data_len, key_len;
int ivsize;
memset(hw_sa, 0, sizeof(*hw_sa));
if (op == MLX5_IPSEC_CMD_ADD_SA) {
crypto_data_len = (x->aead->alg_key_len + 7) / 8;
key_len = crypto_data_len - 4; /* 4 bytes salt at end */
aead = x->data;
geniv_ctx = crypto_aead_ctx(aead);
ivsize = crypto_aead_ivsize(aead);
memcpy(&hw_sa->key_enc, x->aead->alg_key, key_len);
/* Duplicate 128 bit key twice according to HW layout */
if (key_len == 16)
memcpy(&hw_sa->key_enc[16], x->aead->alg_key, key_len);
memcpy(&hw_sa->gcm.salt_iv, geniv_ctx->salt, ivsize);
hw_sa->gcm.salt = *((__be32 *)(x->aead->alg_key + key_len));
}
memset(attrs, 0, sizeof(*attrs));
hw_sa->cmd = htonl(op);
hw_sa->flags |= MLX5_IPSEC_SADB_SA_VALID | MLX5_IPSEC_SADB_SPI_EN;
if (x->props.family == AF_INET) {
hw_sa->sip[3] = x->props.saddr.a4;
hw_sa->dip[3] = x->id.daddr.a4;
hw_sa->sip_masklen = 32;
hw_sa->dip_masklen = 32;
} else {
memcpy(hw_sa->sip, x->props.saddr.a6, sizeof(hw_sa->sip));
memcpy(hw_sa->dip, x->id.daddr.a6, sizeof(hw_sa->dip));
hw_sa->sip_masklen = 128;
hw_sa->dip_masklen = 128;
hw_sa->flags |= MLX5_IPSEC_SADB_IPV6;
}
hw_sa->spi = x->id.spi;
hw_sa->sw_sa_handle = htonl(sa_entry->handle);
switch (x->id.proto) {
case IPPROTO_ESP:
hw_sa->flags |= MLX5_IPSEC_SADB_IP_ESP;
break;
case IPPROTO_AH:
hw_sa->flags |= MLX5_IPSEC_SADB_IP_AH;
break;
default:
break;
/* key */
crypto_data_len = (x->aead->alg_key_len + 7) / 8;
key_len = crypto_data_len - 4; /* 4 bytes salt at end */
memcpy(aes_gcm->aes_key, x->aead->alg_key, key_len);
aes_gcm->key_len = key_len * 8;
/* salt and seq_iv */
aead = x->data;
geniv_ctx = crypto_aead_ctx(aead);
ivsize = crypto_aead_ivsize(aead);
memcpy(&aes_gcm->seq_iv, &geniv_ctx->salt, ivsize);
memcpy(&aes_gcm->salt, x->aead->alg_key + key_len,
sizeof(aes_gcm->salt));
/* iv len */
aes_gcm->icv_len = x->aead->alg_icv_len;
/* esn */
if (sa_entry->esn_state.trigger) {
attrs->flags |= MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED;
attrs->esn = sa_entry->esn_state.esn;
if (sa_entry->esn_state.overlap)
attrs->flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
}
hw_sa->enc_mode = mlx5e_ipsec_enc_mode(x);
if (!(x->xso.flags & XFRM_OFFLOAD_INBOUND))
hw_sa->flags |= MLX5_IPSEC_SADB_DIR_SX;
/* rx handle */
attrs->sa_handle = sa_entry->handle;
/* algo type */
attrs->keymat_type = MLX5_ACCEL_ESP_KEYMAT_AES_GCM;
/* action */
attrs->action = (!(x->xso.flags & XFRM_OFFLOAD_INBOUND)) ?
MLX5_ACCEL_ESP_ACTION_ENCRYPT :
MLX5_ACCEL_ESP_ACTION_DECRYPT;
/* flags */
attrs->flags |= (x->props.mode == XFRM_MODE_TRANSPORT) ?
MLX5_ACCEL_ESP_FLAGS_TRANSPORT :
MLX5_ACCEL_ESP_FLAGS_TUNNEL;
}
static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
......@@ -198,7 +220,9 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
netdev_info(netdev, "Cannot offload compressed xfrm states\n");
return -EINVAL;
}
if (x->props.flags & XFRM_STATE_ESN) {
if (x->props.flags & XFRM_STATE_ESN &&
!(mlx5_accel_ipsec_device_caps(priv->mdev) &
MLX5_ACCEL_IPSEC_CAP_ESN)) {
netdev_info(netdev, "Cannot offload ESN xfrm states\n");
return -EINVAL;
}
......@@ -246,7 +270,8 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
return -EINVAL;
}
if (x->props.family == AF_INET6 &&
!(mlx5_accel_ipsec_device_caps(priv->mdev) & MLX5_ACCEL_IPSEC_IPV6)) {
!(mlx5_accel_ipsec_device_caps(priv->mdev) &
MLX5_ACCEL_IPSEC_CAP_IPV6)) {
netdev_info(netdev, "IPv6 xfrm state offload is not supported by this device\n");
return -EINVAL;
}
......@@ -257,9 +282,10 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = NULL;
struct net_device *netdev = x->xso.dev;
struct mlx5_accel_ipsec_sa hw_sa;
struct mlx5_accel_esp_xfrm_attrs attrs;
struct mlx5e_priv *priv;
void *context;
__be32 saddr[4] = {0}, daddr[4] = {0}, spi;
bool is_ipv6 = false;
int err;
priv = netdev_priv(netdev);
......@@ -286,22 +312,49 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
netdev_info(netdev, "Failed adding to SADB_RX: %d\n", err);
goto err_entry;
}
} else {
sa_entry->set_iv_op = (x->props.flags & XFRM_STATE_ESN) ?
mlx5e_ipsec_set_iv_esn : mlx5e_ipsec_set_iv;
}
mlx5e_ipsec_build_hw_sa(MLX5_IPSEC_CMD_ADD_SA, sa_entry, &hw_sa);
context = mlx5_accel_ipsec_sa_cmd_exec(sa_entry->ipsec->en_priv->mdev, &hw_sa);
if (IS_ERR(context)) {
err = PTR_ERR(context);
/* check esn */
mlx5e_ipsec_update_esn_state(sa_entry);
/* create xfrm */
mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs);
sa_entry->xfrm =
mlx5_accel_esp_create_xfrm(priv->mdev, &attrs,
MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA);
if (IS_ERR(sa_entry->xfrm)) {
err = PTR_ERR(sa_entry->xfrm);
goto err_sadb_rx;
}
err = mlx5_accel_ipsec_sa_cmd_wait(context);
if (err)
goto err_sadb_rx;
/* create hw context */
if (x->props.family == AF_INET) {
saddr[3] = x->props.saddr.a4;
daddr[3] = x->id.daddr.a4;
} else {
memcpy(saddr, x->props.saddr.a6, sizeof(saddr));
memcpy(daddr, x->id.daddr.a6, sizeof(daddr));
is_ipv6 = true;
}
spi = x->id.spi;
sa_entry->hw_context =
mlx5_accel_esp_create_hw_context(priv->mdev,
sa_entry->xfrm,
saddr, daddr, spi,
is_ipv6);
if (IS_ERR(sa_entry->hw_context)) {
err = PTR_ERR(sa_entry->hw_context);
goto err_xfrm;
}
x->xso.offload_handle = (unsigned long)sa_entry;
goto out;
err_xfrm:
mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm);
err_sadb_rx:
if (x->xso.flags & XFRM_OFFLOAD_INBOUND) {
mlx5e_ipsec_sadb_rx_del(sa_entry);
......@@ -315,43 +368,26 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
static void mlx5e_xfrm_del_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry;
struct mlx5_accel_ipsec_sa hw_sa;
void *context;
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
if (!x->xso.offload_handle)
if (!sa_entry)
return;
sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
WARN_ON(sa_entry->x != x);
if (x->xso.flags & XFRM_OFFLOAD_INBOUND)
mlx5e_ipsec_sadb_rx_del(sa_entry);
mlx5e_ipsec_build_hw_sa(MLX5_IPSEC_CMD_DEL_SA, sa_entry, &hw_sa);
context = mlx5_accel_ipsec_sa_cmd_exec(sa_entry->ipsec->en_priv->mdev, &hw_sa);
if (IS_ERR(context))
return;
sa_entry->context = context;
}
static void mlx5e_xfrm_free_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry;
int res;
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
if (!x->xso.offload_handle)
if (!sa_entry)
return;
sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
WARN_ON(sa_entry->x != x);
res = mlx5_accel_ipsec_sa_cmd_wait(sa_entry->context);
sa_entry->context = NULL;
if (res) {
/* Leftover object will leak */
return;
if (sa_entry->hw_context) {
flush_workqueue(sa_entry->ipsec->wq);
mlx5_accel_esp_free_hw_context(sa_entry->hw_context);
mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm);
}
if (x->xso.flags & XFRM_OFFLOAD_INBOUND)
......@@ -378,6 +414,14 @@ int mlx5e_ipsec_init(struct mlx5e_priv *priv)
ida_init(&ipsec->halloc);
ipsec->en_priv = priv;
ipsec->en_priv->ipsec = ipsec;
ipsec->no_trailer = !!(mlx5_accel_ipsec_device_caps(priv->mdev) &
MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER);
ipsec->wq = alloc_ordered_workqueue("mlx5e_ipsec: %s", 0,
priv->netdev->name);
if (!ipsec->wq) {
kfree(ipsec);
return -ENOMEM;
}
netdev_dbg(priv->netdev, "IPSec attached to netdevice\n");
return 0;
}
......@@ -389,6 +433,9 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
if (!ipsec)
return;
drain_workqueue(ipsec->wq);
destroy_workqueue(ipsec->wq);
ida_destroy(&ipsec->halloc);
kfree(ipsec);
priv->ipsec = NULL;
......@@ -409,11 +456,58 @@ static bool mlx5e_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
return true;
}
struct mlx5e_ipsec_modify_state_work {
struct work_struct work;
struct mlx5_accel_esp_xfrm_attrs attrs;
struct mlx5e_ipsec_sa_entry *sa_entry;
};
static void _update_xfrm_state(struct work_struct *work)
{
int ret;
struct mlx5e_ipsec_modify_state_work *modify_work =
container_of(work, struct mlx5e_ipsec_modify_state_work, work);
struct mlx5e_ipsec_sa_entry *sa_entry = modify_work->sa_entry;
ret = mlx5_accel_esp_modify_xfrm(sa_entry->xfrm,
&modify_work->attrs);
if (ret)
netdev_warn(sa_entry->ipsec->en_priv->netdev,
"Not an IPSec offload device\n");
kfree(modify_work);
}
static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
struct mlx5e_ipsec_modify_state_work *modify_work;
bool need_update;
if (!sa_entry)
return;
need_update = mlx5e_ipsec_update_esn_state(sa_entry);
if (!need_update)
return;
modify_work = kzalloc(sizeof(*modify_work), GFP_ATOMIC);
if (!modify_work)
return;
mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &modify_work->attrs);
modify_work->sa_entry = sa_entry;
INIT_WORK(&modify_work->work, _update_xfrm_state);
WARN_ON(!queue_work(sa_entry->ipsec->wq, &modify_work->work));
}
static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
.xdo_dev_state_add = mlx5e_xfrm_add_state,
.xdo_dev_state_delete = mlx5e_xfrm_del_state,
.xdo_dev_state_free = mlx5e_xfrm_free_state,
.xdo_dev_offload_ok = mlx5e_ipsec_offload_ok,
.xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
};
void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
......@@ -424,7 +518,7 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
if (!priv->ipsec)
return;
if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_ESP) ||
if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_ESP) ||
!MLX5_CAP_ETH(mdev, swp)) {
mlx5_core_dbg(mdev, "mlx5e: ESP and SWP offload not supported\n");
return;
......@@ -443,7 +537,7 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
netdev->features |= NETIF_F_HW_ESP_TX_CSUM;
netdev->hw_enc_features |= NETIF_F_HW_ESP_TX_CSUM;
if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_LSO) ||
if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_LSO) ||
!MLX5_CAP_ETH(mdev, swp_lso)) {
mlx5_core_dbg(mdev, "mlx5e: ESP LSO not supported\n");
return;
......
......@@ -40,7 +40,11 @@
#include <net/xfrm.h>
#include <linux/idr.h>
#include "accel/ipsec.h"
#define MLX5E_IPSEC_SADB_RX_BITS 10
#define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L
#define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
#define MLX5E_METADATA_ETHER_LEN 8
......@@ -77,10 +81,30 @@ struct mlx5e_ipsec_stats {
struct mlx5e_ipsec {
struct mlx5e_priv *en_priv;
DECLARE_HASHTABLE(sadb_rx, MLX5E_IPSEC_SADB_RX_BITS);
bool no_trailer;
spinlock_t sadb_rx_lock; /* Protects sadb_rx and halloc */
struct ida halloc;
struct mlx5e_ipsec_sw_stats sw_stats;
struct mlx5e_ipsec_stats stats;
struct workqueue_struct *wq;
};
struct mlx5e_ipsec_esn_state {
u32 esn;
u8 trigger: 1;
u8 overlap: 1;
};
struct mlx5e_ipsec_sa_entry {
struct hlist_node hlist; /* Item in SADB_RX hashtable */
struct mlx5e_ipsec_esn_state esn_state;
unsigned int handle; /* Handle in SADB_RX */
struct xfrm_state *x;
struct mlx5e_ipsec *ipsec;
struct mlx5_accel_esp_xfrm *xfrm;
void *hw_context;
void (*set_iv_op)(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo);
};
void mlx5e_ipsec_build_inverse_table(void);
......
......@@ -42,10 +42,11 @@
enum {
MLX5E_IPSEC_RX_SYNDROME_DECRYPTED = 0x11,
MLX5E_IPSEC_RX_SYNDROME_AUTH_FAILED = 0x12,
MLX5E_IPSEC_RX_SYNDROME_BAD_PROTO = 0x17,
};
struct mlx5e_ipsec_rx_metadata {
unsigned char reserved;
unsigned char nexthdr;
__be32 sa_handle;
} __packed;
......@@ -175,7 +176,30 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
}
}
static void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_offload *xo)
void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo)
{
struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
__u32 oseq = replay_esn->oseq;
int iv_offset;
__be64 seqno;
u32 seq_hi;
if (unlikely(skb_is_gso(skb) && oseq < MLX5E_IPSEC_ESN_SCOPE_MID &&
MLX5E_IPSEC_ESN_SCOPE_MID < (oseq - skb_shinfo(skb)->gso_segs))) {
seq_hi = xo->seq.hi - 1;
} else {
seq_hi = xo->seq.hi;
}
/* Place the SN in the IV field */
seqno = cpu_to_be64(xo->seq.low + ((u64)seq_hi << 32));
iv_offset = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr);
skb_store_bits(skb, iv_offset, &seqno, 8);
}
void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo)
{
int iv_offset;
__be64 seqno;
......@@ -227,6 +251,7 @@ struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
struct mlx5e_priv *priv = netdev_priv(netdev);
struct xfrm_offload *xo = xfrm_offload(skb);
struct mlx5e_ipsec_metadata *mdata;
struct mlx5e_ipsec_sa_entry *sa_entry;
struct xfrm_state *x;
if (!xo)
......@@ -261,7 +286,8 @@ struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
goto drop;
}
mlx5e_ipsec_set_swp(skb, &wqe->eth, x->props.mode, xo);
mlx5e_ipsec_set_iv(skb, xo);
sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
sa_entry->set_iv_op(skb, x, xo);
mlx5e_ipsec_set_metadata(skb, mdata, xo);
return skb;
......@@ -301,10 +327,17 @@ mlx5e_ipsec_build_sp(struct net_device *netdev, struct sk_buff *skb,
switch (mdata->syndrome) {
case MLX5E_IPSEC_RX_SYNDROME_DECRYPTED:
xo->status = CRYPTO_SUCCESS;
if (likely(priv->ipsec->no_trailer)) {
xo->flags |= XFRM_ESP_NO_TRAILER;
xo->proto = mdata->content.rx.nexthdr;
}
break;
case MLX5E_IPSEC_RX_SYNDROME_AUTH_FAILED:
xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED;
break;
case MLX5E_IPSEC_RX_SYNDROME_BAD_PROTO:
xo->status = CRYPTO_INVALID_PROTOCOL;
break;
default:
atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome);
return NULL;
......
......@@ -37,6 +37,7 @@
#ifdef CONFIG_MLX5_EN_IPSEC
#include <linux/skbuff.h>
#include <net/xfrm.h>
#include "en.h"
struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
......@@ -46,6 +47,10 @@ void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
void mlx5e_ipsec_inverse_table_init(void);
bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
netdev_features_t features);
void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo);
void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo);
struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
struct mlx5e_tx_wqe *wqe,
struct sk_buff *skb);
......
......@@ -31,9 +31,14 @@
*
*/
#include <linux/rhashtable.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/fs_helpers.h>
#include <linux/mlx5/fs.h>
#include <linux/rbtree.h>
#include "mlx5_core.h"
#include "fs_cmd.h"
#include "fpga/ipsec.h"
#include "fpga/sdk.h"
#include "fpga/core.h"
......@@ -41,40 +46,76 @@
#define SBU_QP_QUEUE_SIZE 8
#define MLX5_FPGA_IPSEC_CMD_TIMEOUT_MSEC (60 * 1000)
enum mlx5_ipsec_response_syndrome {
MLX5_IPSEC_RESPONSE_SUCCESS = 0,
MLX5_IPSEC_RESPONSE_ILLEGAL_REQUEST = 1,
MLX5_IPSEC_RESPONSE_SADB_ISSUE = 2,
MLX5_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE = 3,
enum mlx5_fpga_ipsec_cmd_status {
MLX5_FPGA_IPSEC_CMD_PENDING,
MLX5_FPGA_IPSEC_CMD_SEND_FAIL,
MLX5_FPGA_IPSEC_CMD_COMPLETE,
};
enum mlx5_fpga_ipsec_sacmd_status {
MLX5_FPGA_IPSEC_SACMD_PENDING,
MLX5_FPGA_IPSEC_SACMD_SEND_FAIL,
MLX5_FPGA_IPSEC_SACMD_COMPLETE,
};
struct mlx5_ipsec_command_context {
struct mlx5_fpga_ipsec_cmd_context {
struct mlx5_fpga_dma_buf buf;
struct mlx5_accel_ipsec_sa sa;
enum mlx5_fpga_ipsec_sacmd_status status;
enum mlx5_fpga_ipsec_cmd_status status;
struct mlx5_ifc_fpga_ipsec_cmd_resp resp;
int status_code;
struct completion complete;
struct mlx5_fpga_device *dev;
struct list_head list; /* Item in pending_cmds */
u8 command[0];
};
struct mlx5_fpga_esp_xfrm;
struct mlx5_fpga_ipsec_sa_ctx {
struct rhash_head hash;
struct mlx5_ifc_fpga_ipsec_sa hw_sa;
struct mlx5_core_dev *dev;
struct mlx5_fpga_esp_xfrm *fpga_xfrm;
};
struct mlx5_fpga_esp_xfrm {
unsigned int num_rules;
struct mlx5_fpga_ipsec_sa_ctx *sa_ctx;
struct mutex lock; /* xfrm lock */
struct mlx5_accel_esp_xfrm accel_xfrm;
};
struct mlx5_fpga_ipsec_rule {
struct rb_node node;
struct fs_fte *fte;
struct mlx5_fpga_ipsec_sa_ctx *ctx;
};
struct mlx5_ipsec_sadb_resp {
__be32 syndrome;
__be32 sw_sa_handle;
u8 reserved[24];
} __packed;
static const struct rhashtable_params rhash_sa = {
.key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa),
.key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa),
.head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash),
.automatic_shrinking = true,
.min_size = 1,
};
struct mlx5_fpga_ipsec {
struct mlx5_fpga_device *fdev;
struct list_head pending_cmds;
spinlock_t pending_cmds_lock; /* Protects pending_cmds */
u32 caps[MLX5_ST_SZ_DW(ipsec_extended_cap)];
struct mlx5_fpga_conn *conn;
struct notifier_block fs_notifier_ingress_bypass;
struct notifier_block fs_notifier_egress;
/* Map hardware SA --> SA context
* (mlx5_fpga_ipsec_sa) (mlx5_fpga_ipsec_sa_ctx)
* We will use this hash to avoid SAs duplication in fpga which
* aren't allowed
*/
struct rhashtable sa_hash; /* hw_sa -> mlx5_fpga_ipsec_sa_ctx */
struct mutex sa_hash_lock;
/* Tree holding all rules for this fpga device
* Key for searching a rule (mlx5_fpga_ipsec_rule) is (ft, id)
*/
struct rb_root rules_rb;
struct mutex rules_rb_lock; /* rules lock */
};
static bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev)
......@@ -98,28 +139,29 @@ static void mlx5_fpga_ipsec_send_complete(struct mlx5_fpga_conn *conn,
struct mlx5_fpga_dma_buf *buf,
u8 status)
{
struct mlx5_ipsec_command_context *context;
struct mlx5_fpga_ipsec_cmd_context *context;
if (status) {
context = container_of(buf, struct mlx5_ipsec_command_context,
context = container_of(buf, struct mlx5_fpga_ipsec_cmd_context,
buf);
mlx5_fpga_warn(fdev, "IPSec command send failed with status %u\n",
status);
context->status = MLX5_FPGA_IPSEC_SACMD_SEND_FAIL;
context->status = MLX5_FPGA_IPSEC_CMD_SEND_FAIL;
complete(&context->complete);
}
}
static inline int syndrome_to_errno(enum mlx5_ipsec_response_syndrome syndrome)
static inline
int syndrome_to_errno(enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome)
{
switch (syndrome) {
case MLX5_IPSEC_RESPONSE_SUCCESS:
case MLX5_FPGA_IPSEC_RESPONSE_SUCCESS:
return 0;
case MLX5_IPSEC_RESPONSE_SADB_ISSUE:
case MLX5_FPGA_IPSEC_RESPONSE_SADB_ISSUE:
return -EEXIST;
case MLX5_IPSEC_RESPONSE_ILLEGAL_REQUEST:
case MLX5_FPGA_IPSEC_RESPONSE_ILLEGAL_REQUEST:
return -EINVAL;
case MLX5_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE:
case MLX5_FPGA_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE:
return -EIO;
}
return -EIO;
......@@ -127,9 +169,9 @@ static inline int syndrome_to_errno(enum mlx5_ipsec_response_syndrome syndrome)
static void mlx5_fpga_ipsec_recv(void *cb_arg, struct mlx5_fpga_dma_buf *buf)
{
struct mlx5_ipsec_sadb_resp *resp = buf->sg[0].data;
struct mlx5_ipsec_command_context *context;
enum mlx5_ipsec_response_syndrome syndrome;
struct mlx5_ifc_fpga_ipsec_cmd_resp *resp = buf->sg[0].data;
struct mlx5_fpga_ipsec_cmd_context *context;
enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome;
struct mlx5_fpga_device *fdev = cb_arg;
unsigned long flags;
......@@ -139,12 +181,12 @@ static void mlx5_fpga_ipsec_recv(void *cb_arg, struct mlx5_fpga_dma_buf *buf)
return;
}
mlx5_fpga_dbg(fdev, "mlx5_ipsec recv_cb syndrome %08x sa_id %x\n",
ntohl(resp->syndrome), ntohl(resp->sw_sa_handle));
mlx5_fpga_dbg(fdev, "mlx5_ipsec recv_cb syndrome %08x\n",
ntohl(resp->syndrome));
spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
context = list_first_entry_or_null(&fdev->ipsec->pending_cmds,
struct mlx5_ipsec_command_context,
struct mlx5_fpga_ipsec_cmd_context,
list);
if (context)
list_del(&context->list);
......@@ -156,51 +198,48 @@ static void mlx5_fpga_ipsec_recv(void *cb_arg, struct mlx5_fpga_dma_buf *buf)
}
mlx5_fpga_dbg(fdev, "Handling response for %p\n", context);
if (context->sa.sw_sa_handle != resp->sw_sa_handle) {
mlx5_fpga_err(fdev, "mismatch SA handle. cmd 0x%08x vs resp 0x%08x\n",
ntohl(context->sa.sw_sa_handle),
ntohl(resp->sw_sa_handle));
return;
}
syndrome = ntohl(resp->syndrome);
context->status_code = syndrome_to_errno(syndrome);
context->status = MLX5_FPGA_IPSEC_SACMD_COMPLETE;
context->status = MLX5_FPGA_IPSEC_CMD_COMPLETE;
memcpy(&context->resp, resp, sizeof(*resp));
if (context->status_code)
mlx5_fpga_warn(fdev, "IPSec SADB command failed with syndrome %08x\n",
mlx5_fpga_warn(fdev, "IPSec command failed with syndrome %08x\n",
syndrome);
complete(&context->complete);
}
void *mlx5_fpga_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev,
struct mlx5_accel_ipsec_sa *cmd)
static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev,
const void *cmd, int cmd_size)
{
struct mlx5_ipsec_command_context *context;
struct mlx5_fpga_ipsec_cmd_context *context;
struct mlx5_fpga_device *fdev = mdev->fpga;
unsigned long flags;
int res = 0;
int res;
BUILD_BUG_ON((sizeof(struct mlx5_accel_ipsec_sa) & 3) != 0);
if (!fdev || !fdev->ipsec)
return ERR_PTR(-EOPNOTSUPP);
context = kzalloc(sizeof(*context), GFP_ATOMIC);
if (cmd_size & 3)
return ERR_PTR(-EINVAL);
context = kzalloc(sizeof(*context) + cmd_size, GFP_ATOMIC);
if (!context)
return ERR_PTR(-ENOMEM);
memcpy(&context->sa, cmd, sizeof(*cmd));
context->status = MLX5_FPGA_IPSEC_CMD_PENDING;
context->dev = fdev;
context->buf.complete = mlx5_fpga_ipsec_send_complete;
context->buf.sg[0].size = sizeof(context->sa);
context->buf.sg[0].data = &context->sa;
init_completion(&context->complete);
context->dev = fdev;
memcpy(&context->command, cmd, cmd_size);
context->buf.sg[0].size = cmd_size;
context->buf.sg[0].data = &context->command;
spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
list_add_tail(&context->list, &fdev->ipsec->pending_cmds);
spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
context->status = MLX5_FPGA_IPSEC_SACMD_PENDING;
res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf);
if (res) {
mlx5_fpga_warn(fdev, "Failure sending IPSec command: %d\n",
......@@ -215,9 +254,9 @@ void *mlx5_fpga_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev,
return context;
}
int mlx5_fpga_ipsec_sa_cmd_wait(void *ctx)
static int mlx5_fpga_ipsec_cmd_wait(void *ctx)
{
struct mlx5_ipsec_command_context *context = ctx;
struct mlx5_fpga_ipsec_cmd_context *context = ctx;
unsigned long timeout =
msecs_to_jiffies(MLX5_FPGA_IPSEC_CMD_TIMEOUT_MSEC);
int res;
......@@ -228,36 +267,90 @@ int mlx5_fpga_ipsec_sa_cmd_wait(void *ctx)
return -ETIMEDOUT;
}
if (context->status == MLX5_FPGA_IPSEC_SACMD_COMPLETE)
if (context->status == MLX5_FPGA_IPSEC_CMD_COMPLETE)
res = context->status_code;
else
res = -EIO;
kfree(context);
return res;
}
static inline bool is_v2_sadb_supported(struct mlx5_fpga_ipsec *fipsec)
{
if (MLX5_GET(ipsec_extended_cap, fipsec->caps, v2_command))
return true;
return false;
}
static int mlx5_fpga_ipsec_update_hw_sa(struct mlx5_fpga_device *fdev,
struct mlx5_ifc_fpga_ipsec_sa *hw_sa,
int opcode)
{
struct mlx5_core_dev *dev = fdev->mdev;
struct mlx5_ifc_fpga_ipsec_sa *sa;
struct mlx5_fpga_ipsec_cmd_context *cmd_context;
size_t sa_cmd_size;
int err;
hw_sa->ipsec_sa_v1.cmd = htonl(opcode);
if (is_v2_sadb_supported(fdev->ipsec))
sa_cmd_size = sizeof(*hw_sa);
else
sa_cmd_size = sizeof(hw_sa->ipsec_sa_v1);
cmd_context = (struct mlx5_fpga_ipsec_cmd_context *)
mlx5_fpga_ipsec_cmd_exec(dev, hw_sa, sa_cmd_size);
if (IS_ERR(cmd_context))
return PTR_ERR(cmd_context);
err = mlx5_fpga_ipsec_cmd_wait(cmd_context);
if (err)
goto out;
sa = (struct mlx5_ifc_fpga_ipsec_sa *)&cmd_context->command;
if (sa->ipsec_sa_v1.sw_sa_handle != cmd_context->resp.sw_sa_handle) {
mlx5_fpga_err(fdev, "mismatch SA handle. cmd 0x%08x vs resp 0x%08x\n",
ntohl(sa->ipsec_sa_v1.sw_sa_handle),
ntohl(cmd_context->resp.sw_sa_handle));
err = -EIO;
}
out:
kfree(cmd_context);
return err;
}
u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev)
{
struct mlx5_fpga_device *fdev = mdev->fpga;
u32 ret = 0;
if (mlx5_fpga_is_ipsec_device(mdev))
ret |= MLX5_ACCEL_IPSEC_DEVICE;
else
if (mlx5_fpga_is_ipsec_device(mdev)) {
ret |= MLX5_ACCEL_IPSEC_CAP_DEVICE;
ret |= MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA;
} else {
return ret;
}
if (!fdev->ipsec)
return ret;
if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, esp))
ret |= MLX5_ACCEL_IPSEC_ESP;
ret |= MLX5_ACCEL_IPSEC_CAP_ESP;
if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, ipv6))
ret |= MLX5_ACCEL_IPSEC_IPV6;
ret |= MLX5_ACCEL_IPSEC_CAP_IPV6;
if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, lso))
ret |= MLX5_ACCEL_IPSEC_LSO;
ret |= MLX5_ACCEL_IPSEC_CAP_LSO;
if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, rx_no_trailer))
ret |= MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER;
if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, esn)) {
ret |= MLX5_ACCEL_IPSEC_CAP_ESN;
ret |= MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN;
}
return ret;
}
......@@ -321,6 +414,828 @@ int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
return ret;
}
static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags)
{
struct mlx5_fpga_ipsec_cmd_context *context;
struct mlx5_ifc_fpga_ipsec_cmd_cap cmd = {0};
int err;
cmd.cmd = htonl(MLX5_FPGA_IPSEC_CMD_OP_SET_CAP);
cmd.flags = htonl(flags);
context = mlx5_fpga_ipsec_cmd_exec(mdev, &cmd, sizeof(cmd));
if (IS_ERR(context)) {
err = PTR_ERR(context);
goto out;
}
err = mlx5_fpga_ipsec_cmd_wait(context);
if (err)
goto out;
if ((context->resp.flags & cmd.flags) != cmd.flags) {
mlx5_fpga_err(context->dev, "Failed to set capabilities. cmd 0x%08x vs resp 0x%08x\n",
cmd.flags,
context->resp.flags);
err = -EIO;
}
out:
return err;
}
static int mlx5_fpga_ipsec_enable_supported_caps(struct mlx5_core_dev *mdev)
{
u32 dev_caps = mlx5_fpga_ipsec_device_caps(mdev);
u32 flags = 0;
if (dev_caps & MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER)
flags |= MLX5_FPGA_IPSEC_CAP_NO_TRAILER;
return mlx5_fpga_ipsec_set_caps(mdev, flags);
}
static void
mlx5_fpga_ipsec_build_hw_xfrm(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs,
struct mlx5_ifc_fpga_ipsec_sa *hw_sa)
{
const struct aes_gcm_keymat *aes_gcm = &xfrm_attrs->keymat.aes_gcm;
/* key */
memcpy(&hw_sa->ipsec_sa_v1.key_enc, aes_gcm->aes_key,
aes_gcm->key_len / 8);
/* Duplicate 128 bit key twice according to HW layout */
if (aes_gcm->key_len == 128)
memcpy(&hw_sa->ipsec_sa_v1.key_enc[16],
aes_gcm->aes_key, aes_gcm->key_len / 8);
/* salt and seq_iv */
memcpy(&hw_sa->ipsec_sa_v1.gcm.salt_iv, &aes_gcm->seq_iv,
sizeof(aes_gcm->seq_iv));
memcpy(&hw_sa->ipsec_sa_v1.gcm.salt, &aes_gcm->salt,
sizeof(aes_gcm->salt));
/* esn */
if (xfrm_attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) {
hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_ESN_EN;
hw_sa->ipsec_sa_v1.flags |=
(xfrm_attrs->flags &
MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) ?
MLX5_FPGA_IPSEC_SA_ESN_OVERLAP : 0;
hw_sa->esn = htonl(xfrm_attrs->esn);
} else {
hw_sa->ipsec_sa_v1.flags &= ~MLX5_FPGA_IPSEC_SA_ESN_EN;
hw_sa->ipsec_sa_v1.flags &=
~(xfrm_attrs->flags &
MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) ?
MLX5_FPGA_IPSEC_SA_ESN_OVERLAP : 0;
hw_sa->esn = 0;
}
/* rx handle */
hw_sa->ipsec_sa_v1.sw_sa_handle = htonl(xfrm_attrs->sa_handle);
/* enc mode */
switch (aes_gcm->key_len) {
case 128:
hw_sa->ipsec_sa_v1.enc_mode =
MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_128_AUTH_128;
break;
case 256:
hw_sa->ipsec_sa_v1.enc_mode =
MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_256_AUTH_128;
break;
}
/* flags */
hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_SA_VALID |
MLX5_FPGA_IPSEC_SA_SPI_EN |
MLX5_FPGA_IPSEC_SA_IP_ESP;
if (xfrm_attrs->action & MLX5_ACCEL_ESP_ACTION_ENCRYPT)
hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_DIR_SX;
else
hw_sa->ipsec_sa_v1.flags &= ~MLX5_FPGA_IPSEC_SA_DIR_SX;
}
static void
mlx5_fpga_ipsec_build_hw_sa(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs,
const __be32 saddr[4],
const __be32 daddr[4],
const __be32 spi, bool is_ipv6,
struct mlx5_ifc_fpga_ipsec_sa *hw_sa)
{
mlx5_fpga_ipsec_build_hw_xfrm(mdev, xfrm_attrs, hw_sa);
/* IPs */
memcpy(hw_sa->ipsec_sa_v1.sip, saddr, sizeof(hw_sa->ipsec_sa_v1.sip));
memcpy(hw_sa->ipsec_sa_v1.dip, daddr, sizeof(hw_sa->ipsec_sa_v1.dip));
/* SPI */
hw_sa->ipsec_sa_v1.spi = spi;
/* flags */
if (is_ipv6)
hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_IPV6;
}
static bool is_full_mask(const void *p, size_t len)
{
WARN_ON(len % 4);
return !memchr_inv(p, 0xff, len);
}
static bool validate_fpga_full_mask(struct mlx5_core_dev *dev,
const u32 *match_c,
const u32 *match_v)
{
const void *misc_params_c = MLX5_ADDR_OF(fte_match_param,
match_c,
misc_parameters);
const void *headers_c = MLX5_ADDR_OF(fte_match_param,
match_c,
outer_headers);
const void *headers_v = MLX5_ADDR_OF(fte_match_param,
match_v,
outer_headers);
if (mlx5_fs_is_outer_ipv4_flow(dev, headers_c, headers_v)) {
const void *s_ipv4_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
headers_c,
src_ipv4_src_ipv6.ipv4_layout.ipv4);
const void *d_ipv4_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
headers_c,
dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
if (!is_full_mask(s_ipv4_c, MLX5_FLD_SZ_BYTES(ipv4_layout,
ipv4)) ||
!is_full_mask(d_ipv4_c, MLX5_FLD_SZ_BYTES(ipv4_layout,
ipv4)))
return false;
} else {
const void *s_ipv6_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
headers_c,
src_ipv4_src_ipv6.ipv6_layout.ipv6);
const void *d_ipv6_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
headers_c,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
if (!is_full_mask(s_ipv6_c, MLX5_FLD_SZ_BYTES(ipv6_layout,
ipv6)) ||
!is_full_mask(d_ipv6_c, MLX5_FLD_SZ_BYTES(ipv6_layout,
ipv6)))
return false;
}
if (!is_full_mask(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c,
outer_esp_spi),
MLX5_FLD_SZ_BYTES(fte_match_set_misc, outer_esp_spi)))
return false;
return true;
}
static bool mlx5_is_fpga_ipsec_rule(struct mlx5_core_dev *dev,
u8 match_criteria_enable,
const u32 *match_c,
const u32 *match_v)
{
u32 ipsec_dev_caps = mlx5_accel_ipsec_device_caps(dev);
bool ipv6_flow;
ipv6_flow = mlx5_fs_is_outer_ipv6_flow(dev, match_c, match_v);
if (!(match_criteria_enable & MLX5_MATCH_OUTER_HEADERS) ||
mlx5_fs_is_outer_udp_flow(match_c, match_v) ||
mlx5_fs_is_outer_tcp_flow(match_c, match_v) ||
mlx5_fs_is_vxlan_flow(match_c) ||
!(mlx5_fs_is_outer_ipv4_flow(dev, match_c, match_v) ||
ipv6_flow))
return false;
if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_DEVICE))
return false;
if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_ESP) &&
mlx5_fs_is_outer_ipsec_flow(match_c))
return false;
if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_IPV6) &&
ipv6_flow)
return false;
if (!validate_fpga_full_mask(dev, match_c, match_v))
return false;
return true;
}
static bool mlx5_is_fpga_egress_ipsec_rule(struct mlx5_core_dev *dev,
u8 match_criteria_enable,
const u32 *match_c,
const u32 *match_v,
struct mlx5_flow_act *flow_act)
{
const void *outer_c = MLX5_ADDR_OF(fte_match_param, match_c,
outer_headers);
bool is_dmac = MLX5_GET(fte_match_set_lyr_2_4, outer_c, dmac_47_16) ||
MLX5_GET(fte_match_set_lyr_2_4, outer_c, dmac_15_0);
bool is_smac = MLX5_GET(fte_match_set_lyr_2_4, outer_c, smac_47_16) ||
MLX5_GET(fte_match_set_lyr_2_4, outer_c, smac_15_0);
int ret;
ret = mlx5_is_fpga_ipsec_rule(dev, match_criteria_enable, match_c,
match_v);
if (!ret)
return ret;
if (is_dmac || is_smac ||
(match_criteria_enable &
~(MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS)) ||
(flow_act->action & ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | MLX5_FLOW_CONTEXT_ACTION_ALLOW)) ||
flow_act->has_flow_tag)
return false;
return true;
}
void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *accel_xfrm,
const __be32 saddr[4],
const __be32 daddr[4],
const __be32 spi, bool is_ipv6)
{
struct mlx5_fpga_ipsec_sa_ctx *sa_ctx;
struct mlx5_fpga_esp_xfrm *fpga_xfrm =
container_of(accel_xfrm, typeof(*fpga_xfrm),
accel_xfrm);
struct mlx5_fpga_device *fdev = mdev->fpga;
struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
int opcode, err;
void *context;
/* alloc SA */
sa_ctx = kzalloc(sizeof(*sa_ctx), GFP_KERNEL);
if (!sa_ctx)
return ERR_PTR(-ENOMEM);
sa_ctx->dev = mdev;
/* build candidate SA */
mlx5_fpga_ipsec_build_hw_sa(mdev, &accel_xfrm->attrs,
saddr, daddr, spi, is_ipv6,
&sa_ctx->hw_sa);
mutex_lock(&fpga_xfrm->lock);
if (fpga_xfrm->sa_ctx) { /* multiple rules for same accel_xfrm */
/* all rules must be with same IPs and SPI */
if (memcmp(&sa_ctx->hw_sa, &fpga_xfrm->sa_ctx->hw_sa,
sizeof(sa_ctx->hw_sa))) {
context = ERR_PTR(-EINVAL);
goto exists;
}
++fpga_xfrm->num_rules;
context = fpga_xfrm->sa_ctx;
goto exists;
}
/* This is unbounded fpga_xfrm, try to add to hash */
mutex_lock(&fipsec->sa_hash_lock);
err = rhashtable_lookup_insert_fast(&fipsec->sa_hash, &sa_ctx->hash,
rhash_sa);
if (err) {
/* Can't bound different accel_xfrm to already existing sa_ctx.
* This is because we can't support multiple ketmats for
* same IPs and SPI
*/
context = ERR_PTR(-EEXIST);
goto unlock_hash;
}
/* Bound accel_xfrm to sa_ctx */
opcode = is_v2_sadb_supported(fdev->ipsec) ?
MLX5_FPGA_IPSEC_CMD_OP_ADD_SA_V2 :
MLX5_FPGA_IPSEC_CMD_OP_ADD_SA;
err = mlx5_fpga_ipsec_update_hw_sa(fdev, &sa_ctx->hw_sa, opcode);
sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
if (err) {
context = ERR_PTR(err);
goto delete_hash;
}
mutex_unlock(&fipsec->sa_hash_lock);
++fpga_xfrm->num_rules;
fpga_xfrm->sa_ctx = sa_ctx;
sa_ctx->fpga_xfrm = fpga_xfrm;
mutex_unlock(&fpga_xfrm->lock);
return sa_ctx;
delete_hash:
WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash,
rhash_sa));
unlock_hash:
mutex_unlock(&fipsec->sa_hash_lock);
exists:
mutex_unlock(&fpga_xfrm->lock);
kfree(sa_ctx);
return context;
}
static void *
mlx5_fpga_ipsec_fs_create_sa_ctx(struct mlx5_core_dev *mdev,
struct fs_fte *fte,
bool is_egress)
{
struct mlx5_accel_esp_xfrm *accel_xfrm;
__be32 saddr[4], daddr[4], spi;
struct mlx5_flow_group *fg;
bool is_ipv6 = false;
fs_get_obj(fg, fte->node.parent);
/* validate */
if (is_egress &&
!mlx5_is_fpga_egress_ipsec_rule(mdev,
fg->mask.match_criteria_enable,
fg->mask.match_criteria,
fte->val,
&fte->action))
return ERR_PTR(-EINVAL);
else if (!mlx5_is_fpga_ipsec_rule(mdev,
fg->mask.match_criteria_enable,
fg->mask.match_criteria,
fte->val))
return ERR_PTR(-EINVAL);
/* get xfrm context */
accel_xfrm =
(struct mlx5_accel_esp_xfrm *)fte->action.esp_id;
/* IPs */
if (mlx5_fs_is_outer_ipv4_flow(mdev, fg->mask.match_criteria,
fte->val)) {
memcpy(&saddr[3],
MLX5_ADDR_OF(fte_match_set_lyr_2_4,
fte->val,
src_ipv4_src_ipv6.ipv4_layout.ipv4),
sizeof(saddr[3]));
memcpy(&daddr[3],
MLX5_ADDR_OF(fte_match_set_lyr_2_4,
fte->val,
dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
sizeof(daddr[3]));
} else {
memcpy(saddr,
MLX5_ADDR_OF(fte_match_param,
fte->val,
outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
sizeof(saddr));
memcpy(daddr,
MLX5_ADDR_OF(fte_match_param,
fte->val,
outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
sizeof(daddr));
is_ipv6 = true;
}
/* SPI */
spi = MLX5_GET_BE(typeof(spi),
fte_match_param, fte->val,
misc_parameters.outer_esp_spi);
/* create */
return mlx5_fpga_ipsec_create_sa_ctx(mdev, accel_xfrm,
saddr, daddr,
spi, is_ipv6);
}
static void
mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx)
{
struct mlx5_fpga_device *fdev = sa_ctx->dev->fpga;
struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
int opcode = is_v2_sadb_supported(fdev->ipsec) ?
MLX5_FPGA_IPSEC_CMD_OP_DEL_SA_V2 :
MLX5_FPGA_IPSEC_CMD_OP_DEL_SA;
int err;
err = mlx5_fpga_ipsec_update_hw_sa(fdev, &sa_ctx->hw_sa, opcode);
sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
if (err) {
WARN_ON(err);
return;
}
mutex_lock(&fipsec->sa_hash_lock);
WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash,
rhash_sa));
mutex_unlock(&fipsec->sa_hash_lock);
}
void mlx5_fpga_ipsec_delete_sa_ctx(void *context)
{
struct mlx5_fpga_esp_xfrm *fpga_xfrm =
((struct mlx5_fpga_ipsec_sa_ctx *)context)->fpga_xfrm;
mutex_lock(&fpga_xfrm->lock);
if (!--fpga_xfrm->num_rules) {
mlx5_fpga_ipsec_release_sa_ctx(fpga_xfrm->sa_ctx);
fpga_xfrm->sa_ctx = NULL;
}
mutex_unlock(&fpga_xfrm->lock);
}
static inline struct mlx5_fpga_ipsec_rule *
_rule_search(struct rb_root *root, struct fs_fte *fte)
{
struct rb_node *node = root->rb_node;
while (node) {
struct mlx5_fpga_ipsec_rule *rule =
container_of(node, struct mlx5_fpga_ipsec_rule,
node);
if (rule->fte < fte)
node = node->rb_left;
else if (rule->fte > fte)
node = node->rb_right;
else
return rule;
}
return NULL;
}
static struct mlx5_fpga_ipsec_rule *
rule_search(struct mlx5_fpga_ipsec *ipsec_dev, struct fs_fte *fte)
{
struct mlx5_fpga_ipsec_rule *rule;
mutex_lock(&ipsec_dev->rules_rb_lock);
rule = _rule_search(&ipsec_dev->rules_rb, fte);
mutex_unlock(&ipsec_dev->rules_rb_lock);
return rule;
}
static inline int _rule_insert(struct rb_root *root,
struct mlx5_fpga_ipsec_rule *rule)
{
struct rb_node **new = &root->rb_node, *parent = NULL;
/* Figure out where to put new node */
while (*new) {
struct mlx5_fpga_ipsec_rule *this =
container_of(*new, struct mlx5_fpga_ipsec_rule,
node);
parent = *new;
if (rule->fte < this->fte)
new = &((*new)->rb_left);
else if (rule->fte > this->fte)
new = &((*new)->rb_right);
else
return -EEXIST;
}
/* Add new node and rebalance tree. */
rb_link_node(&rule->node, parent, new);
rb_insert_color(&rule->node, root);
return 0;
}
static int rule_insert(struct mlx5_fpga_ipsec *ipsec_dev,
struct mlx5_fpga_ipsec_rule *rule)
{
int ret;
mutex_lock(&ipsec_dev->rules_rb_lock);
ret = _rule_insert(&ipsec_dev->rules_rb, rule);
mutex_unlock(&ipsec_dev->rules_rb_lock);
return ret;
}
static inline void _rule_delete(struct mlx5_fpga_ipsec *ipsec_dev,
struct mlx5_fpga_ipsec_rule *rule)
{
struct rb_root *root = &ipsec_dev->rules_rb;
mutex_lock(&ipsec_dev->rules_rb_lock);
rb_erase(&rule->node, root);
mutex_unlock(&ipsec_dev->rules_rb_lock);
}
static void rule_delete(struct mlx5_fpga_ipsec *ipsec_dev,
struct mlx5_fpga_ipsec_rule *rule)
{
_rule_delete(ipsec_dev, rule);
kfree(rule);
}
struct mailbox_mod {
uintptr_t saved_esp_id;
u32 saved_action;
u32 saved_outer_esp_spi_value;
};
static void restore_spec_mailbox(struct fs_fte *fte,
struct mailbox_mod *mbox_mod)
{
char *misc_params_v = MLX5_ADDR_OF(fte_match_param,
fte->val,
misc_parameters);
MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi,
mbox_mod->saved_outer_esp_spi_value);
fte->action.action |= mbox_mod->saved_action;
fte->action.esp_id = (uintptr_t)mbox_mod->saved_esp_id;
}
static void modify_spec_mailbox(struct mlx5_core_dev *mdev,
struct fs_fte *fte,
struct mailbox_mod *mbox_mod)
{
char *misc_params_v = MLX5_ADDR_OF(fte_match_param,
fte->val,
misc_parameters);
mbox_mod->saved_esp_id = fte->action.esp_id;
mbox_mod->saved_action = fte->action.action &
(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
MLX5_FLOW_CONTEXT_ACTION_DECRYPT);
mbox_mod->saved_outer_esp_spi_value =
MLX5_GET(fte_match_set_misc, misc_params_v,
outer_esp_spi);
fte->action.esp_id = 0;
fte->action.action &= ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
MLX5_FLOW_CONTEXT_ACTION_DECRYPT);
if (!MLX5_CAP_FLOWTABLE(mdev,
flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi, 0);
}
static enum fs_flow_table_type egress_to_fs_ft(bool egress)
{
return egress ? FS_FT_NIC_TX : FS_FT_NIC_RX;
}
static int fpga_ipsec_fs_create_flow_group(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft,
u32 *in,
unsigned int *group_id,
bool is_egress)
{
int (*create_flow_group)(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft, u32 *in,
unsigned int *group_id) =
mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_flow_group;
char *misc_params_c = MLX5_ADDR_OF(create_flow_group_in, in,
match_criteria.misc_parameters);
u32 saved_outer_esp_spi_mask;
u8 match_criteria_enable;
int ret;
if (MLX5_CAP_FLOWTABLE(dev,
flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
return create_flow_group(dev, ft, in, group_id);
match_criteria_enable =
MLX5_GET(create_flow_group_in, in, match_criteria_enable);
saved_outer_esp_spi_mask =
MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi);
if (!match_criteria_enable || !saved_outer_esp_spi_mask)
return create_flow_group(dev, ft, in, group_id);
MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, 0);
if (!(*misc_params_c) &&
!memcmp(misc_params_c, misc_params_c + 1, MLX5_ST_SZ_BYTES(fte_match_set_misc) - 1))
MLX5_SET(create_flow_group_in, in, match_criteria_enable,
match_criteria_enable & ~MLX5_MATCH_MISC_PARAMETERS);
ret = create_flow_group(dev, ft, in, group_id);
MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, saved_outer_esp_spi_mask);
MLX5_SET(create_flow_group_in, in, match_criteria_enable, match_criteria_enable);
return ret;
}
static int fpga_ipsec_fs_create_fte(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg,
struct fs_fte *fte,
bool is_egress)
{
int (*create_fte)(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg,
struct fs_fte *fte) =
mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_fte;
struct mlx5_fpga_device *fdev = dev->fpga;
struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
struct mlx5_fpga_ipsec_rule *rule;
bool is_esp = fte->action.esp_id;
struct mailbox_mod mbox_mod;
int ret;
if (!is_esp ||
!(fte->action.action &
(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
return create_fte(dev, ft, fg, fte);
rule = kzalloc(sizeof(*rule), GFP_KERNEL);
if (!rule)
return -ENOMEM;
rule->ctx = mlx5_fpga_ipsec_fs_create_sa_ctx(dev, fte, is_egress);
if (IS_ERR(rule->ctx)) {
kfree(rule);
return PTR_ERR(rule->ctx);
}
rule->fte = fte;
WARN_ON(rule_insert(fipsec, rule));
modify_spec_mailbox(dev, fte, &mbox_mod);
ret = create_fte(dev, ft, fg, fte);
restore_spec_mailbox(fte, &mbox_mod);
if (ret) {
_rule_delete(fipsec, rule);
mlx5_fpga_ipsec_delete_sa_ctx(rule->ctx);
kfree(rule);
}
return ret;
}
static int fpga_ipsec_fs_update_fte(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft,
unsigned int group_id,
int modify_mask,
struct fs_fte *fte,
bool is_egress)
{
int (*update_fte)(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft,
unsigned int group_id,
int modify_mask,
struct fs_fte *fte) =
mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->update_fte;
bool is_esp = fte->action.esp_id;
struct mailbox_mod mbox_mod;
int ret;
if (!is_esp ||
!(fte->action.action &
(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
return update_fte(dev, ft, group_id, modify_mask, fte);
modify_spec_mailbox(dev, fte, &mbox_mod);
ret = update_fte(dev, ft, group_id, modify_mask, fte);
restore_spec_mailbox(fte, &mbox_mod);
return ret;
}
static int fpga_ipsec_fs_delete_fte(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft,
struct fs_fte *fte,
bool is_egress)
{
int (*delete_fte)(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft,
struct fs_fte *fte) =
mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->delete_fte;
struct mlx5_fpga_device *fdev = dev->fpga;
struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
struct mlx5_fpga_ipsec_rule *rule;
bool is_esp = fte->action.esp_id;
struct mailbox_mod mbox_mod;
int ret;
if (!is_esp ||
!(fte->action.action &
(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
return delete_fte(dev, ft, fte);
rule = rule_search(fipsec, fte);
if (!rule)
return -ENOENT;
mlx5_fpga_ipsec_delete_sa_ctx(rule->ctx);
rule_delete(fipsec, rule);
modify_spec_mailbox(dev, fte, &mbox_mod);
ret = delete_fte(dev, ft, fte);
restore_spec_mailbox(fte, &mbox_mod);
return ret;
}
static int
mlx5_fpga_ipsec_fs_create_flow_group_egress(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft,
u32 *in,
unsigned int *group_id)
{
return fpga_ipsec_fs_create_flow_group(dev, ft, in, group_id, true);
}
static int
mlx5_fpga_ipsec_fs_create_fte_egress(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg,
struct fs_fte *fte)
{
return fpga_ipsec_fs_create_fte(dev, ft, fg, fte, true);
}
static int
mlx5_fpga_ipsec_fs_update_fte_egress(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft,
unsigned int group_id,
int modify_mask,
struct fs_fte *fte)
{
return fpga_ipsec_fs_update_fte(dev, ft, group_id, modify_mask, fte,
true);
}
static int
mlx5_fpga_ipsec_fs_delete_fte_egress(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft,
struct fs_fte *fte)
{
return fpga_ipsec_fs_delete_fte(dev, ft, fte, true);
}
static int
mlx5_fpga_ipsec_fs_create_flow_group_ingress(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft,
u32 *in,
unsigned int *group_id)
{
return fpga_ipsec_fs_create_flow_group(dev, ft, in, group_id, false);
}
static int
mlx5_fpga_ipsec_fs_create_fte_ingress(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg,
struct fs_fte *fte)
{
return fpga_ipsec_fs_create_fte(dev, ft, fg, fte, false);
}
static int
mlx5_fpga_ipsec_fs_update_fte_ingress(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft,
unsigned int group_id,
int modify_mask,
struct fs_fte *fte)
{
return fpga_ipsec_fs_update_fte(dev, ft, group_id, modify_mask, fte,
false);
}
static int
mlx5_fpga_ipsec_fs_delete_fte_ingress(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft,
struct fs_fte *fte)
{
return fpga_ipsec_fs_delete_fte(dev, ft, fte, false);
}
static struct mlx5_flow_cmds fpga_ipsec_ingress;
static struct mlx5_flow_cmds fpga_ipsec_egress;
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type)
{
switch (type) {
case FS_FT_NIC_RX:
return &fpga_ipsec_ingress;
case FS_FT_NIC_TX:
return &fpga_ipsec_egress;
default:
WARN_ON(true);
return NULL;
}
}
int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev)
{
struct mlx5_fpga_conn_attr init_attr = {0};
......@@ -335,6 +1250,8 @@ int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev)
if (!fdev->ipsec)
return -ENOMEM;
fdev->ipsec->fdev = fdev;
err = mlx5_fpga_get_sbu_caps(fdev, sizeof(fdev->ipsec->caps),
fdev->ipsec->caps);
if (err) {
......@@ -358,14 +1275,47 @@ int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev)
goto error;
}
fdev->ipsec->conn = conn;
err = rhashtable_init(&fdev->ipsec->sa_hash, &rhash_sa);
if (err)
goto err_destroy_conn;
mutex_init(&fdev->ipsec->sa_hash_lock);
fdev->ipsec->rules_rb = RB_ROOT;
mutex_init(&fdev->ipsec->rules_rb_lock);
err = mlx5_fpga_ipsec_enable_supported_caps(mdev);
if (err) {
mlx5_fpga_err(fdev, "Failed to enable IPSec extended capabilities: %d\n",
err);
goto err_destroy_hash;
}
return 0;
err_destroy_hash:
rhashtable_destroy(&fdev->ipsec->sa_hash);
err_destroy_conn:
mlx5_fpga_sbu_conn_destroy(conn);
error:
kfree(fdev->ipsec);
fdev->ipsec = NULL;
return err;
}
static void destroy_rules_rb(struct rb_root *root)
{
struct mlx5_fpga_ipsec_rule *r, *tmp;
rbtree_postorder_for_each_entry_safe(r, tmp, root, node) {
rb_erase(&r->node, root);
mlx5_fpga_ipsec_delete_sa_ctx(r->ctx);
kfree(r);
}
}
void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev)
{
struct mlx5_fpga_device *fdev = mdev->fpga;
......@@ -373,7 +1323,209 @@ void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev)
if (!mlx5_fpga_is_ipsec_device(mdev))
return;
destroy_rules_rb(&fdev->ipsec->rules_rb);
rhashtable_destroy(&fdev->ipsec->sa_hash);
mlx5_fpga_sbu_conn_destroy(fdev->ipsec->conn);
kfree(fdev->ipsec);
fdev->ipsec = NULL;
}
void mlx5_fpga_ipsec_build_fs_cmds(void)
{
/* ingress */
fpga_ipsec_ingress.create_flow_table =
mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->create_flow_table;
fpga_ipsec_ingress.destroy_flow_table =
mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->destroy_flow_table;
fpga_ipsec_ingress.modify_flow_table =
mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->modify_flow_table;
fpga_ipsec_ingress.create_flow_group =
mlx5_fpga_ipsec_fs_create_flow_group_ingress;
fpga_ipsec_ingress.destroy_flow_group =
mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->destroy_flow_group;
fpga_ipsec_ingress.create_fte =
mlx5_fpga_ipsec_fs_create_fte_ingress;
fpga_ipsec_ingress.update_fte =
mlx5_fpga_ipsec_fs_update_fte_ingress;
fpga_ipsec_ingress.delete_fte =
mlx5_fpga_ipsec_fs_delete_fte_ingress;
fpga_ipsec_ingress.update_root_ft =
mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->update_root_ft;
/* egress */
fpga_ipsec_egress.create_flow_table =
mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->create_flow_table;
fpga_ipsec_egress.destroy_flow_table =
mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->destroy_flow_table;
fpga_ipsec_egress.modify_flow_table =
mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->modify_flow_table;
fpga_ipsec_egress.create_flow_group =
mlx5_fpga_ipsec_fs_create_flow_group_egress;
fpga_ipsec_egress.destroy_flow_group =
mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->destroy_flow_group;
fpga_ipsec_egress.create_fte =
mlx5_fpga_ipsec_fs_create_fte_egress;
fpga_ipsec_egress.update_fte =
mlx5_fpga_ipsec_fs_update_fte_egress;
fpga_ipsec_egress.delete_fte =
mlx5_fpga_ipsec_fs_delete_fte_egress;
fpga_ipsec_egress.update_root_ft =
mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->update_root_ft;
}
static int
mlx5_fpga_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *attrs)
{
if (attrs->tfc_pad) {
mlx5_core_err(mdev, "Cannot offload xfrm states with tfc padding\n");
return -EOPNOTSUPP;
}
if (attrs->replay_type != MLX5_ACCEL_ESP_REPLAY_NONE) {
mlx5_core_err(mdev, "Cannot offload xfrm states with anti replay\n");
return -EOPNOTSUPP;
}
if (attrs->keymat_type != MLX5_ACCEL_ESP_KEYMAT_AES_GCM) {
mlx5_core_err(mdev, "Only aes gcm keymat is supported\n");
return -EOPNOTSUPP;
}
if (attrs->keymat.aes_gcm.iv_algo !=
MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ) {
mlx5_core_err(mdev, "Only iv sequence algo is supported\n");
return -EOPNOTSUPP;
}
if (attrs->keymat.aes_gcm.icv_len != 128) {
mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD ICV length other than 128bit\n");
return -EOPNOTSUPP;
}
if (attrs->keymat.aes_gcm.key_len != 128 &&
attrs->keymat.aes_gcm.key_len != 256) {
mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
return -EOPNOTSUPP;
}
if ((attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) &&
(!MLX5_GET(ipsec_extended_cap, mdev->fpga->ipsec->caps,
v2_command))) {
mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
return -EOPNOTSUPP;
}
return 0;
}
struct mlx5_accel_esp_xfrm *
mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 flags)
{
struct mlx5_fpga_esp_xfrm *fpga_xfrm;
if (!(flags & MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA)) {
mlx5_core_warn(mdev, "Tried to create an esp action without metadata\n");
return ERR_PTR(-EINVAL);
}
if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n");
return ERR_PTR(-EOPNOTSUPP);
}
fpga_xfrm = kzalloc(sizeof(*fpga_xfrm), GFP_KERNEL);
if (!fpga_xfrm)
return ERR_PTR(-ENOMEM);
mutex_init(&fpga_xfrm->lock);
memcpy(&fpga_xfrm->accel_xfrm.attrs, attrs,
sizeof(fpga_xfrm->accel_xfrm.attrs));
return &fpga_xfrm->accel_xfrm;
}
void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
{
struct mlx5_fpga_esp_xfrm *fpga_xfrm =
container_of(xfrm, struct mlx5_fpga_esp_xfrm,
accel_xfrm);
/* assuming no sa_ctx are connected to this xfrm_ctx */
kfree(fpga_xfrm);
}
int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
const struct mlx5_accel_esp_xfrm_attrs *attrs)
{
struct mlx5_core_dev *mdev = xfrm->mdev;
struct mlx5_fpga_device *fdev = mdev->fpga;
struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
struct mlx5_fpga_esp_xfrm *fpga_xfrm;
struct mlx5_ifc_fpga_ipsec_sa org_hw_sa;
int err = 0;
if (!memcmp(&xfrm->attrs, attrs, sizeof(xfrm->attrs)))
return 0;
if (!mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n");
return -EOPNOTSUPP;
}
if (is_v2_sadb_supported(fipsec)) {
mlx5_core_warn(mdev, "Modify esp is not supported\n");
return -EOPNOTSUPP;
}
fpga_xfrm = container_of(xfrm, struct mlx5_fpga_esp_xfrm, accel_xfrm);
mutex_lock(&fpga_xfrm->lock);
if (!fpga_xfrm->sa_ctx)
/* Unbounded xfrm, chane only sw attrs */
goto change_sw_xfrm_attrs;
/* copy original hw sa */
memcpy(&org_hw_sa, &fpga_xfrm->sa_ctx->hw_sa, sizeof(org_hw_sa));
mutex_lock(&fipsec->sa_hash_lock);
/* remove original hw sa from hash */
WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash,
&fpga_xfrm->sa_ctx->hash, rhash_sa));
/* update hw_sa with new xfrm attrs*/
mlx5_fpga_ipsec_build_hw_xfrm(xfrm->mdev, attrs,
&fpga_xfrm->sa_ctx->hw_sa);
/* try to insert new hw_sa to hash */
err = rhashtable_insert_fast(&fipsec->sa_hash,
&fpga_xfrm->sa_ctx->hash, rhash_sa);
if (err)
goto rollback_sa;
/* modify device with new hw_sa */
err = mlx5_fpga_ipsec_update_hw_sa(fdev, &fpga_xfrm->sa_ctx->hw_sa,
MLX5_FPGA_IPSEC_CMD_OP_MOD_SA_V2);
fpga_xfrm->sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
if (err)
WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash,
&fpga_xfrm->sa_ctx->hash,
rhash_sa));
rollback_sa:
if (err) {
/* return original hw_sa to hash */
memcpy(&fpga_xfrm->sa_ctx->hw_sa, &org_hw_sa,
sizeof(org_hw_sa));
WARN_ON(rhashtable_insert_fast(&fipsec->sa_hash,
&fpga_xfrm->sa_ctx->hash,
rhash_sa));
}
mutex_unlock(&fipsec->sa_hash_lock);
change_sw_xfrm_attrs:
if (!err)
memcpy(&xfrm->attrs, attrs, sizeof(xfrm->attrs));
mutex_unlock(&fpga_xfrm->lock);
return err;
}
......@@ -35,33 +35,38 @@
#define __MLX5_FPGA_IPSEC_H__
#include "accel/ipsec.h"
#include "fs_cmd.h"
#ifdef CONFIG_MLX5_FPGA
void *mlx5_fpga_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev,
struct mlx5_accel_ipsec_sa *cmd);
int mlx5_fpga_ipsec_sa_cmd_wait(void *context);
u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev);
unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev);
int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
unsigned int counters_count);
void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *accel_xfrm,
const __be32 saddr[4],
const __be32 daddr[4],
const __be32 spi, bool is_ipv6);
void mlx5_fpga_ipsec_delete_sa_ctx(void *context);
int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev);
void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev);
void mlx5_fpga_ipsec_build_fs_cmds(void);
#else
struct mlx5_accel_esp_xfrm *
mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 flags);
void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm);
int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
const struct mlx5_accel_esp_xfrm_attrs *attrs);
static inline void *mlx5_fpga_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev,
struct mlx5_accel_ipsec_sa *cmd)
{
return ERR_PTR(-EOPNOTSUPP);
}
const struct mlx5_flow_cmds *
mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type);
static inline int mlx5_fpga_ipsec_sa_cmd_wait(void *context)
{
return -EOPNOTSUPP;
}
#else
static inline u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev)
{
......@@ -80,6 +85,20 @@ static inline int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev,
return 0;
}
static inline void *
mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *accel_xfrm,
const __be32 saddr[4],
const __be32 daddr[4],
const __be32 spi, bool is_ipv6)
{
return NULL;
}
static inline void mlx5_fpga_ipsec_delete_sa_ctx(void *context)
{
}
static inline int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev)
{
return 0;
......@@ -89,6 +108,35 @@ static inline void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev)
{
}
static inline void mlx5_fpga_ipsec_build_fs_cmds(void)
{
}
static inline struct mlx5_accel_esp_xfrm *
mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 flags)
{
return ERR_PTR(-EOPNOTSUPP);
}
static inline void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
{
}
static inline int
mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
const struct mlx5_accel_esp_xfrm_attrs *attrs)
{
return -EOPNOTSUPP;
}
static inline const struct mlx5_flow_cmds *
mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type)
{
return mlx5_fs_cmd_get_default(type);
}
#endif /* CONFIG_MLX5_FPGA */
#endif /* __MLX5_FPGA_SADB_H__ */
......@@ -38,6 +38,7 @@
#include "fs_cmd.h"
#include "diag/fs_tracepoint.h"
#include "accel/ipsec.h"
#include "fpga/ipsec.h"
#define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
sizeof(struct init_tree_node))
......@@ -2251,6 +2252,10 @@ static struct mlx5_flow_root_namespace
struct mlx5_flow_root_namespace *root_ns;
struct mlx5_flow_namespace *ns;
if (mlx5_accel_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE &&
(table_type == FS_FT_NIC_RX || table_type == FS_FT_NIC_TX))
cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type);
/* Create the root namespace */
root_ns = kvzalloc(sizeof(*root_ns), GFP_KERNEL);
if (!root_ns)
......@@ -2642,8 +2647,7 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
goto err;
}
if (mlx5_accel_ipsec_device_caps(steering->dev) &
MLX5_ACCEL_IPSEC_DEVICE) {
if (MLX5_IPSEC_DEV(dev)) {
err = init_egress_root_ns(steering);
if (err)
goto err;
......
......@@ -58,6 +58,7 @@
#include "eswitch.h"
#include "lib/mlx5.h"
#include "fpga/core.h"
#include "fpga/ipsec.h"
#include "accel/ipsec.h"
#include "lib/clock.h"
......@@ -1658,6 +1659,7 @@ static int __init init(void)
get_random_bytes(&sw_owner_id, sizeof(sw_owner_id));
mlx5_core_verify_params();
mlx5_fpga_ipsec_build_fs_cmds();
mlx5_register_debugfs();
err = pci_register_driver(&mlx5_core_driver);
......
/*
* Copyright (c) 2018 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef __MLX5_ACCEL_H__
#define __MLX5_ACCEL_H__
#include <linux/mlx5/driver.h>
enum mlx5_accel_esp_aes_gcm_keymat_iv_algo {
MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ,
};
enum mlx5_accel_esp_flags {
MLX5_ACCEL_ESP_FLAGS_TUNNEL = 0, /* Default */
MLX5_ACCEL_ESP_FLAGS_TRANSPORT = 1UL << 0,
MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED = 1UL << 1,
MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP = 1UL << 2,
};
enum mlx5_accel_esp_action {
MLX5_ACCEL_ESP_ACTION_DECRYPT,
MLX5_ACCEL_ESP_ACTION_ENCRYPT,
};
enum mlx5_accel_esp_keymats {
MLX5_ACCEL_ESP_KEYMAT_AES_NONE,
MLX5_ACCEL_ESP_KEYMAT_AES_GCM,
};
enum mlx5_accel_esp_replay {
MLX5_ACCEL_ESP_REPLAY_NONE,
MLX5_ACCEL_ESP_REPLAY_BMP,
};
struct aes_gcm_keymat {
u64 seq_iv;
enum mlx5_accel_esp_aes_gcm_keymat_iv_algo iv_algo;
u32 salt;
u32 icv_len;
u32 key_len;
u32 aes_key[256 / 32];
};
struct mlx5_accel_esp_xfrm_attrs {
enum mlx5_accel_esp_action action;
u32 esn;
u32 spi;
u32 seq;
u32 tfc_pad;
u32 flags;
u32 sa_handle;
enum mlx5_accel_esp_replay replay_type;
union {
struct {
u32 size;
} bmp;
} replay;
enum mlx5_accel_esp_keymats keymat_type;
union {
struct aes_gcm_keymat aes_gcm;
} keymat;
};
struct mlx5_accel_esp_xfrm {
struct mlx5_core_dev *mdev;
struct mlx5_accel_esp_xfrm_attrs attrs;
};
enum {
MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA = 1UL << 0,
};
enum mlx5_accel_ipsec_cap {
MLX5_ACCEL_IPSEC_CAP_DEVICE = 1 << 0,
MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA = 1 << 1,
MLX5_ACCEL_IPSEC_CAP_ESP = 1 << 2,
MLX5_ACCEL_IPSEC_CAP_IPV6 = 1 << 3,
MLX5_ACCEL_IPSEC_CAP_LSO = 1 << 4,
MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER = 1 << 5,
MLX5_ACCEL_IPSEC_CAP_ESN = 1 << 6,
MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN = 1 << 7,
};
#ifdef CONFIG_MLX5_ACCEL
u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev);
struct mlx5_accel_esp_xfrm *
mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 flags);
void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm);
int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
const struct mlx5_accel_esp_xfrm_attrs *attrs);
#else
static inline u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev) { return 0; }
static inline struct mlx5_accel_esp_xfrm *
mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 flags) { return ERR_PTR(-EOPNOTSUPP); }
static inline void
mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) {}
static inline int
mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
const struct mlx5_accel_esp_xfrm_attrs *attrs) { return -EOPNOTSUPP; }
#endif
#endif
......@@ -40,6 +40,8 @@
enum {
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO = 1 << 16,
MLX5_FLOW_CONTEXT_ACTION_ENCRYPT = 1 << 17,
MLX5_FLOW_CONTEXT_ACTION_DECRYPT = 1 << 18,
};
enum {
......@@ -146,6 +148,7 @@ struct mlx5_flow_act {
u32 flow_tag;
u32 encap_id;
u32 modify_id;
uintptr_t esp_id;
};
#define MLX5_DECLARE_FLOW_ACT(name) \
......
......@@ -373,7 +373,10 @@ struct mlx5_ifc_fpga_destroy_qp_out_bits {
struct mlx5_ifc_ipsec_extended_cap_bits {
u8 encapsulation[0x20];
u8 reserved_0[0x15];
u8 reserved_0[0x12];
u8 v2_command[0x1];
u8 udp_encap[0x1];
u8 rx_no_trailer[0x1];
u8 ipv4_fragment[0x1];
u8 ipv6[0x1];
u8 esn[0x1];
......@@ -429,4 +432,91 @@ struct mlx5_ifc_ipsec_counters_bits {
u8 dropped_cmd[0x40];
};
enum mlx5_ifc_fpga_ipsec_response_syndrome {
MLX5_FPGA_IPSEC_RESPONSE_SUCCESS = 0,
MLX5_FPGA_IPSEC_RESPONSE_ILLEGAL_REQUEST = 1,
MLX5_FPGA_IPSEC_RESPONSE_SADB_ISSUE = 2,
MLX5_FPGA_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE = 3,
};
struct mlx5_ifc_fpga_ipsec_cmd_resp {
__be32 syndrome;
union {
__be32 sw_sa_handle;
__be32 flags;
};
u8 reserved[24];
} __packed;
enum mlx5_ifc_fpga_ipsec_cmd_opcode {
MLX5_FPGA_IPSEC_CMD_OP_ADD_SA = 0,
MLX5_FPGA_IPSEC_CMD_OP_DEL_SA = 1,
MLX5_FPGA_IPSEC_CMD_OP_ADD_SA_V2 = 2,
MLX5_FPGA_IPSEC_CMD_OP_DEL_SA_V2 = 3,
MLX5_FPGA_IPSEC_CMD_OP_MOD_SA_V2 = 4,
MLX5_FPGA_IPSEC_CMD_OP_SET_CAP = 5,
};
enum mlx5_ifc_fpga_ipsec_cap {
MLX5_FPGA_IPSEC_CAP_NO_TRAILER = BIT(0),
};
struct mlx5_ifc_fpga_ipsec_cmd_cap {
__be32 cmd;
__be32 flags;
u8 reserved[24];
} __packed;
enum mlx5_ifc_fpga_ipsec_sa_flags {
MLX5_FPGA_IPSEC_SA_ESN_EN = BIT(0),
MLX5_FPGA_IPSEC_SA_ESN_OVERLAP = BIT(1),
MLX5_FPGA_IPSEC_SA_IPV6 = BIT(2),
MLX5_FPGA_IPSEC_SA_DIR_SX = BIT(3),
MLX5_FPGA_IPSEC_SA_SPI_EN = BIT(4),
MLX5_FPGA_IPSEC_SA_SA_VALID = BIT(5),
MLX5_FPGA_IPSEC_SA_IP_ESP = BIT(6),
MLX5_FPGA_IPSEC_SA_IP_AH = BIT(7),
};
enum mlx5_ifc_fpga_ipsec_sa_enc_mode {
MLX5_FPGA_IPSEC_SA_ENC_MODE_NONE = 0,
MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_128_AUTH_128 = 1,
MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_256_AUTH_128 = 3,
};
struct mlx5_ifc_fpga_ipsec_sa_v1 {
__be32 cmd;
u8 key_enc[32];
u8 key_auth[32];
__be32 sip[4];
__be32 dip[4];
union {
struct {
__be32 reserved;
u8 salt_iv[8];
__be32 salt;
} __packed gcm;
struct {
u8 salt[16];
} __packed cbc;
};
__be32 spi;
__be32 sw_sa_handle;
__be16 tfclen;
u8 enc_mode;
u8 reserved1[2];
u8 flags;
u8 reserved2[2];
};
struct mlx5_ifc_fpga_ipsec_sa {
struct mlx5_ifc_fpga_ipsec_sa_v1 ipsec_sa_v1;
__be16 udp_sp;
__be16 udp_dp;
u8 reserved1[4];
__be32 esn;
__be16 vid; /* only 12 bits, rest is reserved */
__be16 reserved2;
} __packed;
#endif /* MLX5_IFC_FPGA_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment