Commit e3840530 authored by Leon Romanovsky's avatar Leon Romanovsky Committed by Steffen Klassert

net/mlx5e: Remove extra layers of defines

Instead of performing redefinition of XFRM core defines to same
values but with MLX5_* prefix, cache the input values as is by making
sure that the proper storage objects are used.
Reviewed-by: default avatarRaed Salem <raeds@nvidia.com>
Reviewed-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@nvidia.com>
Signed-off-by: default avatarSteffen Klassert <steffen.klassert@secunet.com>
parent cded6d80
...@@ -162,29 +162,20 @@ mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry, ...@@ -162,29 +162,20 @@ mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
/* esn */ /* esn */
if (sa_entry->esn_state.trigger) { if (sa_entry->esn_state.trigger) {
attrs->flags |= MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED; attrs->esn_trigger = true;
attrs->esn = sa_entry->esn_state.esn; attrs->esn = sa_entry->esn_state.esn;
if (sa_entry->esn_state.overlap) attrs->esn_overlap = sa_entry->esn_state.overlap;
attrs->flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
attrs->replay_window = x->replay_esn->replay_window; attrs->replay_window = x->replay_esn->replay_window;
} }
/* action */ attrs->dir = x->xso.dir;
attrs->action = (x->xso.dir == XFRM_DEV_OFFLOAD_OUT) ?
MLX5_ACCEL_ESP_ACTION_ENCRYPT :
MLX5_ACCEL_ESP_ACTION_DECRYPT;
/* flags */
attrs->flags |= (x->props.mode == XFRM_MODE_TRANSPORT) ?
MLX5_ACCEL_ESP_FLAGS_TRANSPORT :
MLX5_ACCEL_ESP_FLAGS_TUNNEL;
/* spi */ /* spi */
attrs->spi = be32_to_cpu(x->id.spi); attrs->spi = be32_to_cpu(x->id.spi);
/* source , destination ips */ /* source , destination ips */
memcpy(&attrs->saddr, x->props.saddr.a6, sizeof(attrs->saddr)); memcpy(&attrs->saddr, x->props.saddr.a6, sizeof(attrs->saddr));
memcpy(&attrs->daddr, x->id.daddr.a6, sizeof(attrs->daddr)); memcpy(&attrs->daddr, x->id.daddr.a6, sizeof(attrs->daddr));
attrs->is_ipv6 = (x->props.family != AF_INET); attrs->family = x->props.family;
} }
static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x) static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
......
...@@ -43,18 +43,6 @@ ...@@ -43,18 +43,6 @@
#define MLX5E_IPSEC_SADB_RX_BITS 10 #define MLX5E_IPSEC_SADB_RX_BITS 10
#define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L #define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L
enum mlx5_accel_esp_flags {
MLX5_ACCEL_ESP_FLAGS_TUNNEL = 0, /* Default */
MLX5_ACCEL_ESP_FLAGS_TRANSPORT = 1UL << 0,
MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED = 1UL << 1,
MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP = 1UL << 2,
};
enum mlx5_accel_esp_action {
MLX5_ACCEL_ESP_ACTION_DECRYPT,
MLX5_ACCEL_ESP_ACTION_ENCRYPT,
};
struct aes_gcm_keymat { struct aes_gcm_keymat {
u64 seq_iv; u64 seq_iv;
...@@ -66,7 +54,6 @@ struct aes_gcm_keymat { ...@@ -66,7 +54,6 @@ struct aes_gcm_keymat {
}; };
struct mlx5_accel_esp_xfrm_attrs { struct mlx5_accel_esp_xfrm_attrs {
enum mlx5_accel_esp_action action;
u32 esn; u32 esn;
u32 spi; u32 spi;
u32 flags; u32 flags;
...@@ -82,7 +69,10 @@ struct mlx5_accel_esp_xfrm_attrs { ...@@ -82,7 +69,10 @@ struct mlx5_accel_esp_xfrm_attrs {
__be32 a6[4]; __be32 a6[4];
} daddr; } daddr;
u8 is_ipv6; u8 dir : 2;
u8 esn_overlap : 1;
u8 esn_trigger : 1;
u8 family;
u32 replay_window; u32 replay_window;
}; };
......
...@@ -341,7 +341,7 @@ static void setup_fte_common(struct mlx5_accel_esp_xfrm_attrs *attrs, ...@@ -341,7 +341,7 @@ static void setup_fte_common(struct mlx5_accel_esp_xfrm_attrs *attrs,
struct mlx5_flow_spec *spec, struct mlx5_flow_spec *spec,
struct mlx5_flow_act *flow_act) struct mlx5_flow_act *flow_act)
{ {
u8 ip_version = attrs->is_ipv6 ? 6 : 4; u8 ip_version = (attrs->family == AF_INET) ? 4 : 6;
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS; spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS;
...@@ -411,7 +411,7 @@ static int rx_add_rule(struct mlx5e_priv *priv, ...@@ -411,7 +411,7 @@ static int rx_add_rule(struct mlx5e_priv *priv,
int err = 0; int err = 0;
accel_esp = priv->ipsec->rx_fs; accel_esp = priv->ipsec->rx_fs;
type = attrs->is_ipv6 ? ACCEL_FS_ESP6 : ACCEL_FS_ESP4; type = (attrs->family == AF_INET) ? ACCEL_FS_ESP4 : ACCEL_FS_ESP6;
fs_prot = &accel_esp->fs_prot[type]; fs_prot = &accel_esp->fs_prot[type];
err = rx_ft_get(priv, type); err = rx_ft_get(priv, type);
...@@ -453,8 +453,8 @@ static int rx_add_rule(struct mlx5e_priv *priv, ...@@ -453,8 +453,8 @@ static int rx_add_rule(struct mlx5e_priv *priv,
rule = mlx5_add_flow_rules(fs_prot->ft, spec, &flow_act, &dest, 1); rule = mlx5_add_flow_rules(fs_prot->ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) { if (IS_ERR(rule)) {
err = PTR_ERR(rule); err = PTR_ERR(rule);
netdev_err(priv->netdev, "fail to add ipsec rule attrs->action=0x%x, err=%d\n", netdev_err(priv->netdev, "fail to add RX ipsec rule err=%d\n",
attrs->action, err); err);
goto out_err; goto out_err;
} }
...@@ -505,8 +505,8 @@ static int tx_add_rule(struct mlx5e_priv *priv, ...@@ -505,8 +505,8 @@ static int tx_add_rule(struct mlx5e_priv *priv,
rule = mlx5_add_flow_rules(priv->ipsec->tx_fs->ft, spec, &flow_act, NULL, 0); rule = mlx5_add_flow_rules(priv->ipsec->tx_fs->ft, spec, &flow_act, NULL, 0);
if (IS_ERR(rule)) { if (IS_ERR(rule)) {
err = PTR_ERR(rule); err = PTR_ERR(rule);
netdev_err(priv->netdev, "fail to add ipsec rule attrs->action=0x%x, err=%d\n", netdev_err(priv->netdev, "fail to add TX ipsec rule err=%d\n",
sa_entry->attrs.action, err); err);
goto out; goto out;
} }
...@@ -522,7 +522,7 @@ static int tx_add_rule(struct mlx5e_priv *priv, ...@@ -522,7 +522,7 @@ static int tx_add_rule(struct mlx5e_priv *priv,
int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv, int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv,
struct mlx5e_ipsec_sa_entry *sa_entry) struct mlx5e_ipsec_sa_entry *sa_entry)
{ {
if (sa_entry->attrs.action == MLX5_ACCEL_ESP_ACTION_ENCRYPT) if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
return tx_add_rule(priv, sa_entry); return tx_add_rule(priv, sa_entry);
return rx_add_rule(priv, sa_entry); return rx_add_rule(priv, sa_entry);
...@@ -533,17 +533,18 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv, ...@@ -533,17 +533,18 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv,
{ {
struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule; struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry); struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
enum accel_fs_esp_type type;
mlx5_del_flow_rules(ipsec_rule->rule); mlx5_del_flow_rules(ipsec_rule->rule);
if (sa_entry->attrs.action == MLX5_ACCEL_ESP_ACTION_ENCRYPT) { if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT) {
tx_ft_put(priv); tx_ft_put(priv);
return; return;
} }
mlx5_modify_header_dealloc(mdev, ipsec_rule->set_modify_hdr); mlx5_modify_header_dealloc(mdev, ipsec_rule->set_modify_hdr);
rx_ft_put(priv, type = (sa_entry->attrs.family == AF_INET) ? ACCEL_FS_ESP4 : ACCEL_FS_ESP6;
sa_entry->attrs.is_ipv6 ? ACCEL_FS_ESP6 : ACCEL_FS_ESP4); rx_ft_put(priv, type);
} }
void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec) void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
......
...@@ -72,11 +72,10 @@ static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry) ...@@ -72,11 +72,10 @@ static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
salt_iv_p = MLX5_ADDR_OF(ipsec_obj, obj, implicit_iv); salt_iv_p = MLX5_ADDR_OF(ipsec_obj, obj, implicit_iv);
memcpy(salt_iv_p, &aes_gcm->seq_iv, sizeof(aes_gcm->seq_iv)); memcpy(salt_iv_p, &aes_gcm->seq_iv, sizeof(aes_gcm->seq_iv));
/* esn */ /* esn */
if (attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) { if (attrs->esn_trigger) {
MLX5_SET(ipsec_obj, obj, esn_en, 1); MLX5_SET(ipsec_obj, obj, esn_en, 1);
MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn); MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn);
if (attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->esn_overlap);
MLX5_SET(ipsec_obj, obj, esn_overlap, 1);
} }
MLX5_SET(ipsec_obj, obj, dekn, sa_entry->enc_key_id); MLX5_SET(ipsec_obj, obj, dekn, sa_entry->enc_key_id);
...@@ -158,7 +157,7 @@ static int mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry, ...@@ -158,7 +157,7 @@ static int mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
void *obj; void *obj;
int err; int err;
if (!(attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED)) if (!attrs->esn_trigger)
return 0; return 0;
general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types); general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
...@@ -189,8 +188,7 @@ static int mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry, ...@@ -189,8 +188,7 @@ static int mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP | MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP |
MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB); MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB);
MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn); MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn);
if (attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->esn_overlap);
MLX5_SET(ipsec_obj, obj, esn_overlap, 1);
/* general object fields set */ /* general object fields set */
MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT); MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment