Commit 6b164687 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'mlx5-misc-patches'

Tariq Toukan says:

====================
mlx5 misc patches

This patchset includes small features and misc code enhancements for the
mlx5 core and EN drivers.

Patches 1-4 by Gal improves the mlx5e ethtool stats implementation, for
example by using standard helpers ethtool_sprintf/puts.

Patch 5 by me adds a reset option for the FW command interface debugfs
stats entries. This allows explicit FW command interface stats reset
between different runs of a test case.

Patches 6 and 7 are simple cleanups.

Patch 8 by Gal adds driver support for 800Gbps link modes.

Patch 9 by Jianbo enhances the L4 steering abilities.

Patches 10-11 by Jianbo save redundant operations.
====================

Link: https://lore.kernel.org/r/20240402133043.56322-1-tariqt@nvidia.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 8c73e8b5 07e1bc78
......@@ -143,7 +143,7 @@ static ssize_t average_read(struct file *filp, char __user *buf, size_t count,
return simple_read_from_buffer(buf, count, pos, tbuf, ret);
}
static ssize_t average_write(struct file *filp, const char __user *buf,
static ssize_t reset_write(struct file *filp, const char __user *buf,
size_t count, loff_t *pos)
{
struct mlx5_cmd_stats *stats;
......@@ -152,6 +152,11 @@ static ssize_t average_write(struct file *filp, const char __user *buf,
spin_lock_irq(&stats->lock);
stats->sum = 0;
stats->n = 0;
stats->failed = 0;
stats->failed_mbox_status = 0;
stats->last_failed_errno = 0;
stats->last_failed_mbox_status = 0;
stats->last_failed_syndrome = 0;
spin_unlock_irq(&stats->lock);
*pos += count;
......@@ -159,11 +164,16 @@ static ssize_t average_write(struct file *filp, const char __user *buf,
return count;
}
static const struct file_operations stats_fops = {
static const struct file_operations reset_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.write = reset_write,
};
static const struct file_operations average_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = average_read,
.write = average_write,
};
static ssize_t slots_read(struct file *filp, char __user *buf, size_t count,
......@@ -228,8 +238,10 @@ void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
continue;
stats->root = debugfs_create_dir(namep, *cmd);
debugfs_create_file("reset", 0200, stats->root, stats,
&reset_fops);
debugfs_create_file("average", 0400, stats->root, stats,
&stats_fops);
&average_fops);
debugfs_create_u64("n", 0400, stats->root, &stats->n);
debugfs_create_u64("failed", 0400, stats->root, &stats->failed);
debugfs_create_u64("failed_mbox_status", 0400, stats->root,
......
......@@ -1160,7 +1160,7 @@ void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv);
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
struct ethtool_drvinfo *drvinfo);
void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv,
uint32_t stringset, uint8_t *data);
u32 stringset, u8 *data);
int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset);
void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
struct ethtool_stats *stats, u64 *data);
......
......@@ -565,7 +565,7 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
linear = !!(dma_len - inline_hdr_sz);
ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT + linear + !!inline_hdr_sz;
/* check_result must be 0 if sinfo is passed. */
/* check_result must be 0 if xdptxd->has_frags is true. */
if (!check_result) {
int stop_room = 1;
......
......@@ -73,7 +73,7 @@ void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule)
struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_flow_steering *fs,
struct sock *sk, u32 tirn,
uint32_t flow_tag)
u32 flow_tag)
{
struct mlx5e_accel_fs_tcp *fs_tcp = mlx5e_fs_get_accel_tcp(fs);
struct mlx5_flow_destination dest = {};
......
......@@ -11,14 +11,14 @@ int mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering *fs);
void mlx5e_accel_fs_tcp_destroy(struct mlx5e_flow_steering *fs);
struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_flow_steering *fs,
struct sock *sk, u32 tirn,
uint32_t flow_tag);
u32 flow_tag);
void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule);
#else
static inline int mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering *fs) { return 0; }
static inline void mlx5e_accel_fs_tcp_destroy(struct mlx5e_flow_steering *fs) {}
static inline struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_flow_steering *fs,
struct sock *sk, u32 tirn,
uint32_t flow_tag)
u32 flow_tag)
{ return ERR_PTR(-EOPNOTSUPP); }
static inline void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule) {}
#endif
......
......@@ -78,13 +78,10 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec_hw)
unsigned int i;
if (!priv->ipsec)
return idx;
return;
for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
mlx5e_ipsec_hw_stats_desc[i].format);
return idx;
ethtool_puts(data, mlx5e_ipsec_hw_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_hw)
......@@ -92,14 +89,14 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_hw)
int i;
if (!priv->ipsec)
return idx;
return;
mlx5e_accel_ipsec_fs_read_stats(priv, &priv->ipsec->hw_stats);
for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
data[idx++] = MLX5E_READ_CTR_ATOMIC64(&priv->ipsec->hw_stats,
mlx5e_ipsec_hw_stats_desc, i);
return idx;
mlx5e_ethtool_put_stat(
data,
MLX5E_READ_CTR_ATOMIC64(&priv->ipsec->hw_stats,
mlx5e_ipsec_hw_stats_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_sw)
......@@ -115,9 +112,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec_sw)
if (priv->ipsec)
for (i = 0; i < NUM_IPSEC_SW_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
mlx5e_ipsec_sw_stats_desc[i].format);
return idx;
ethtool_puts(data, mlx5e_ipsec_sw_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_sw)
......@@ -126,9 +121,10 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_sw)
if (priv->ipsec)
for (i = 0; i < NUM_IPSEC_SW_COUNTERS; i++)
data[idx++] = MLX5E_READ_CTR_ATOMIC64(&priv->ipsec->sw_stats,
mlx5e_ipsec_sw_stats_desc, i);
return idx;
mlx5e_ethtool_put_stat(
data, MLX5E_READ_CTR_ATOMIC64(
&priv->ipsec->sw_stats,
mlx5e_ipsec_sw_stats_desc, i));
}
MLX5E_DEFINE_STATS_GRP(ipsec_hw, 0);
......
......@@ -95,8 +95,8 @@ int mlx5e_ktls_init(struct mlx5e_priv *priv);
void mlx5e_ktls_cleanup(struct mlx5e_priv *priv);
int mlx5e_ktls_get_count(struct mlx5e_priv *priv);
int mlx5e_ktls_get_strings(struct mlx5e_priv *priv, uint8_t *data);
int mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 *data);
void mlx5e_ktls_get_strings(struct mlx5e_priv *priv, u8 **data);
void mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 **data);
#else
static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
......@@ -144,15 +144,9 @@ static inline bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev)
static inline int mlx5e_ktls_init(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_ktls_cleanup(struct mlx5e_priv *priv) { }
static inline int mlx5e_ktls_get_count(struct mlx5e_priv *priv) { return 0; }
static inline int mlx5e_ktls_get_strings(struct mlx5e_priv *priv, uint8_t *data)
{
return 0;
}
static inline void mlx5e_ktls_get_strings(struct mlx5e_priv *priv, u8 **data) { }
static inline int mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 *data)
{
return 0;
}
static inline void mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 **data) { }
#endif
#endif /* __MLX5E_TLS_H__ */
......@@ -58,35 +58,31 @@ int mlx5e_ktls_get_count(struct mlx5e_priv *priv)
return ARRAY_SIZE(mlx5e_ktls_sw_stats_desc);
}
int mlx5e_ktls_get_strings(struct mlx5e_priv *priv, uint8_t *data)
void mlx5e_ktls_get_strings(struct mlx5e_priv *priv, u8 **data)
{
unsigned int i, n, idx = 0;
unsigned int i, n;
if (!priv->tls)
return 0;
return;
n = mlx5e_ktls_get_count(priv);
for (i = 0; i < n; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
mlx5e_ktls_sw_stats_desc[i].format);
return n;
ethtool_puts(data, mlx5e_ktls_sw_stats_desc[i].format);
}
int mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 *data)
void mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 **data)
{
unsigned int i, n, idx = 0;
unsigned int i, n;
if (!priv->tls)
return 0;
return;
n = mlx5e_ktls_get_count(priv);
for (i = 0; i < n; i++)
data[idx++] = MLX5E_READ_CTR_ATOMIC64(&priv->tls->sw_stats,
mlx5e_ktls_sw_stats_desc,
i);
return n;
mlx5e_ethtool_put_stat(
data,
MLX5E_READ_CTR_ATOMIC64(&priv->tls->sw_stats,
mlx5e_ktls_sw_stats_desc, i));
}
......@@ -38,16 +38,13 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(macsec_hw)
unsigned int i;
if (!priv->macsec)
return idx;
return;
if (!mlx5e_is_macsec_device(priv->mdev))
return idx;
return;
for (i = 0; i < NUM_MACSEC_HW_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
mlx5e_macsec_hw_stats_desc[i].format);
return idx;
ethtool_puts(data, mlx5e_macsec_hw_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(macsec_hw)
......@@ -56,19 +53,18 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(macsec_hw)
int i;
if (!priv->macsec)
return idx;
return;
if (!mlx5e_is_macsec_device(priv->mdev))
return idx;
return;
macsec_fs = priv->mdev->macsec_fs;
mlx5_macsec_fs_get_stats_fill(macsec_fs, mlx5_macsec_fs_get_stats(macsec_fs));
for (i = 0; i < NUM_MACSEC_HW_COUNTERS; i++)
data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_macsec_fs_get_stats(macsec_fs),
mlx5e_macsec_hw_stats_desc,
i);
return idx;
mlx5e_ethtool_put_stat(
data, MLX5E_READ_CTR64_CPU(
mlx5_macsec_fs_get_stats(macsec_fs),
mlx5e_macsec_hw_stats_desc, i));
}
MLX5E_DEFINE_STATS_GRP(macsec_hw, 0);
......@@ -219,6 +219,13 @@ void mlx5e_build_ptys2ethtool_map(void)
ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT,
ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT,
ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_800GAUI_8_800GBASE_CR8_KR8, ext,
ETHTOOL_LINK_MODE_800000baseCR8_Full_BIT,
ETHTOOL_LINK_MODE_800000baseKR8_Full_BIT,
ETHTOOL_LINK_MODE_800000baseDR8_Full_BIT,
ETHTOOL_LINK_MODE_800000baseDR8_2_Full_BIT,
ETHTOOL_LINK_MODE_800000baseSR8_Full_BIT,
ETHTOOL_LINK_MODE_800000baseVR8_Full_BIT);
}
static void mlx5e_ethtool_get_speed_arr(struct mlx5_core_dev *mdev,
......@@ -269,8 +276,7 @@ void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, u32 stringset, u8 *data)
switch (stringset) {
case ETH_SS_PRIV_FLAGS:
for (i = 0; i < MLX5E_NUM_PFLAGS; i++)
strcpy(data + i * ETH_GSTRING_LEN,
mlx5e_priv_flags[i].name);
ethtool_puts(&data, mlx5e_priv_flags[i].name);
break;
case ETH_SS_TEST:
......
......@@ -896,8 +896,7 @@ static void mlx5e_set_inner_ttc_params(struct mlx5e_flow_steering *fs,
int tt;
memset(ttc_params, 0, sizeof(*ttc_params));
ttc_params->ns = mlx5_get_flow_namespace(fs->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
ttc_params->ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
ft_attr->level = MLX5E_INNER_TTC_FT_LEVEL;
ft_attr->prio = MLX5E_NIC_PRIO;
......@@ -920,8 +919,7 @@ void mlx5e_set_ttc_params(struct mlx5e_flow_steering *fs,
int tt;
memset(ttc_params, 0, sizeof(*ttc_params));
ttc_params->ns = mlx5_get_flow_namespace(fs->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
ttc_params->ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
ft_attr->level = MLX5E_TTC_FT_LEVEL;
ft_attr->prio = MLX5E_NIC_PRIO;
......
......@@ -135,9 +135,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw_rep)
int i;
for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
sw_rep_stats_desc[i].format);
return idx;
ethtool_puts(data, sw_rep_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw_rep)
......@@ -145,9 +143,9 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw_rep)
int i;
for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
sw_rep_stats_desc, i);
return idx;
mlx5e_ethtool_put_stat(
data, MLX5E_READ_CTR64_CPU(&priv->stats.sw,
sw_rep_stats_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw_rep)
......@@ -176,11 +174,9 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport_rep)
int i;
for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_rep_stats_desc[i].format);
ethtool_puts(data, vport_rep_stats_desc[i].format);
for (i = 0; i < NUM_VPORT_REP_LOOPBACK_COUNTERS(priv->mdev); i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
vport_rep_loopback_stats_desc[i].format);
return idx;
ethtool_puts(data, vport_rep_loopback_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport_rep)
......@@ -188,12 +184,14 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport_rep)
int i;
for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.rep_stats,
vport_rep_stats_desc, i);
mlx5e_ethtool_put_stat(
data, MLX5E_READ_CTR64_CPU(&priv->stats.rep_stats,
vport_rep_stats_desc, i));
for (i = 0; i < NUM_VPORT_REP_LOOPBACK_COUNTERS(priv->mdev); i++)
data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.rep_stats,
vport_rep_loopback_stats_desc, i);
return idx;
mlx5e_ethtool_put_stat(
data,
MLX5E_READ_CTR64_CPU(&priv->stats.rep_stats,
vport_rep_loopback_stats_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep)
......@@ -276,7 +274,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep)
}
static void mlx5e_rep_get_strings(struct net_device *dev,
u32 stringset, uint8_t *data)
u32 stringset, u8 *data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
......
......@@ -359,7 +359,7 @@ int mlx5e_self_test_fill_strings(struct mlx5e_priv *priv, u8 *data)
if (st.cond_func && st.cond_func(priv))
continue;
if (data)
strcpy(data + count * ETH_GSTRING_LEN, st.name);
ethtool_puts(&data, st.name);
count++;
}
return count;
......
......@@ -71,11 +71,13 @@ struct mlx5e_priv;
struct mlx5e_stats_grp {
u16 update_stats_mask;
int (*get_num_stats)(struct mlx5e_priv *priv);
int (*fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx);
int (*fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx);
void (*fill_strings)(struct mlx5e_priv *priv, u8 **data);
void (*fill_stats)(struct mlx5e_priv *priv, u64 **data);
void (*update_stats)(struct mlx5e_priv *priv);
};
void mlx5e_ethtool_put_stat(u64 **data, u64 val);
typedef const struct mlx5e_stats_grp *const mlx5e_stats_grp_t;
#define MLX5E_STATS_GRP_OP(grp, name) mlx5e_stats_grp_ ## grp ## _ ## name
......@@ -87,10 +89,10 @@ typedef const struct mlx5e_stats_grp *const mlx5e_stats_grp_t;
void MLX5E_STATS_GRP_OP(grp, update_stats)(struct mlx5e_priv *priv)
#define MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(grp) \
int MLX5E_STATS_GRP_OP(grp, fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx)
void MLX5E_STATS_GRP_OP(grp, fill_strings)(struct mlx5e_priv *priv, u8 **data)
#define MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(grp) \
int MLX5E_STATS_GRP_OP(grp, fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx)
void MLX5E_STATS_GRP_OP(grp, fill_stats)(struct mlx5e_priv *priv, u64 **data)
#define MLX5E_STATS_GRP(grp) mlx5e_stats_grp_ ## grp
......
......@@ -835,8 +835,7 @@ static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
memset(ttc_params, 0, sizeof(*ttc_params));
ttc_params->ns = mlx5_get_flow_namespace(hp->func_mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
ttc_params->ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
for (tt = 0; tt < MLX5_NUM_TT; tt++) {
ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
ttc_params->dests[tt].tir_num =
......
......@@ -688,6 +688,12 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
if (err)
goto err2;
/* Skip page eq creation when the device does not request for page requests */
if (MLX5_CAP_GEN(dev, page_request_disable)) {
mlx5_core_dbg(dev, "Skip page EQ creation\n");
return 0;
}
param = (struct mlx5_eq_param) {
.irq = table->ctrl_irq,
.nent = /* TODO: sriov max_vf + */ 1,
......@@ -716,6 +722,7 @@ static void destroy_async_eqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
if (!MLX5_CAP_GEN(dev, page_request_disable))
cleanup_async_eq(dev, &table->pages_eq, "pages");
cleanup_async_eq(dev, &table->async_eq, "async");
mlx5_cmd_allowed_opcode(dev, MLX5_CMD_OP_DESTROY_EQ);
......
......@@ -283,7 +283,7 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return 0;
}
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id)
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, u32 *sw_owner_id)
{
u32 in[MLX5_ST_SZ_DW(init_hca_in)] = {};
int i;
......
......@@ -449,13 +449,11 @@ static void set_tt_map(struct mlx5_lag_port_sel *port_sel,
static void mlx5_lag_set_inner_ttc_params(struct mlx5_lag *ldev,
struct ttc_params *ttc_params)
{
struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
struct mlx5_flow_table_attr *ft_attr;
int tt;
ttc_params->ns = mlx5_get_flow_namespace(dev,
MLX5_FLOW_NAMESPACE_PORT_SEL);
ttc_params->ns_type = MLX5_FLOW_NAMESPACE_PORT_SEL;
ft_attr = &ttc_params->ft_attr;
ft_attr->level = MLX5_LAG_FT_LEVEL_INNER_TTC;
......@@ -470,13 +468,11 @@ static void mlx5_lag_set_inner_ttc_params(struct mlx5_lag *ldev,
static void mlx5_lag_set_outer_ttc_params(struct mlx5_lag *ldev,
struct ttc_params *ttc_params)
{
struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
struct mlx5_flow_table_attr *ft_attr;
int tt;
ttc_params->ns = mlx5_get_flow_namespace(dev,
MLX5_FLOW_NAMESPACE_PORT_SEL);
ttc_params->ns_type = MLX5_FLOW_NAMESPACE_PORT_SEL;
ft_attr = &ttc_params->ft_attr;
ft_attr->level = MLX5_LAG_FT_LEVEL_TTC;
......
......@@ -40,7 +40,7 @@ struct mlx5_ttc_rule {
struct mlx5_ttc_table;
struct ttc_params {
struct mlx5_flow_namespace *ns;
enum mlx5_flow_namespace_type ns_type;
struct mlx5_flow_table_attr ft_attr;
struct mlx5_flow_destination dests[MLX5_NUM_TT];
DECLARE_BITMAP(ignore_dests, MLX5_NUM_TT);
......
......@@ -205,7 +205,7 @@ int mlx5_cmd_enable(struct mlx5_core_dev *dev);
void mlx5_cmd_disable(struct mlx5_core_dev *dev);
void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
enum mlx5_cmdif_state cmdif_state);
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id);
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, u32 *sw_owner_id);
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev);
......
......@@ -660,6 +660,9 @@ int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
npages, boot ? "boot" : "init", func_id);
if (!npages)
return 0;
return give_pages(dev, func_id, npages, 0, mlx5_core_is_ecpf(dev));
}
......
......@@ -1883,7 +1883,7 @@ dr_ste_v0_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
static int dr_ste_v0_build_tnl_header_0_1_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
uint8_t *tag)
u8 *tag)
{
struct mlx5dr_match_misc5 *misc5 = &value->misc5;
......
......@@ -1897,7 +1897,7 @@ void dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
static int dr_ste_v1_build_tnl_header_0_1_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
uint8_t *tag)
u8 *tag)
{
struct mlx5dr_match_misc5 *misc5 = &value->misc5;
......@@ -2129,7 +2129,7 @@ dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
static int
dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
uint8_t *tag)
u8 *tag)
{
u8 parser_id = sb->caps->flex_parser_id_geneve_tlv_option_0;
struct mlx5dr_match_misc *misc = &value->misc;
......
......@@ -68,7 +68,7 @@
#define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
#define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
#define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
#define MLX5_ADDR_OF(typ, p, fld) ((void *)((uint8_t *)(p) + MLX5_BYTE_OFF(typ, fld)))
#define MLX5_ADDR_OF(typ, p, fld) ((void *)((u8 *)(p) + MLX5_BYTE_OFF(typ, fld)))
/* insert a value to a struct */
#define MLX5_SET(typ, p, fld, v) do { \
......@@ -1336,6 +1336,9 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_ESW_FT_FIELD_SUPPORT_2(mdev, cap) \
MLX5_CAP_ESW_FLOWTABLE(mdev, ft_field_support_2_esw_fdb.cap)
#define MLX5_CAP_NIC_RX_FT_FIELD_SUPPORT_2(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, ft_field_support_2_nic_receive.cap)
#define MLX5_CAP_ESW(mdev, cap) \
MLX5_GET(e_switch_cap, \
mdev->caps.hca[MLX5_CAP_ESWITCH]->cur, cap)
......@@ -1359,6 +1362,9 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) \
MLX5_CAP_PORT_SELECTION(mdev, flow_table_properties_port_selection.cap)
#define MLX5_CAP_PORT_SELECTION_FT_FIELD_SUPPORT_2(mdev, cap) \
MLX5_CAP_PORT_SELECTION(mdev, ft_field_support_2_port_selection.cap)
#define MLX5_CAP_ODP(mdev, cap)\
MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, cap)
......
......@@ -416,7 +416,10 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
/* Table 2170 - Flow Table Fields Supported 2 Format */
struct mlx5_ifc_flow_table_fields_supported_2_bits {
u8 reserved_at_0[0xe];
u8 reserved_at_0[0x2];
u8 inner_l4_type[0x1];
u8 outer_l4_type[0x1];
u8 reserved_at_4[0xa];
u8 bth_opcode[0x1];
u8 reserved_at_f[0x1];
u8 tunnel_header_0_1[0x1];
......@@ -525,6 +528,12 @@ union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits {
u8 reserved_at_0[0x80];
};
enum {
MLX5_PACKET_L4_TYPE_NONE,
MLX5_PACKET_L4_TYPE_TCP,
MLX5_PACKET_L4_TYPE_UDP,
};
struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
u8 smac_47_16[0x20];
......@@ -550,7 +559,8 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
u8 tcp_sport[0x10];
u8 tcp_dport[0x10];
u8 reserved_at_c0[0x10];
u8 l4_type[0x2];
u8 reserved_at_c2[0xe];
u8 ipv4_ihl[0x4];
u8 reserved_at_c4[0x4];
......@@ -846,7 +856,11 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer;
u8 reserved_at_e00[0x700];
u8 reserved_at_e00[0x600];
struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_nic_receive;
u8 reserved_at_1480[0x80];
struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_nic_receive_rdma;
......@@ -876,7 +890,9 @@ struct mlx5_ifc_port_selection_cap_bits {
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_port_selection;
u8 reserved_at_400[0x7c00];
struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_port_selection;
u8 reserved_at_480[0x7b80];
};
enum {
......@@ -1469,7 +1485,9 @@ enum {
};
struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_0[0x10];
u8 reserved_at_0[0x6];
u8 page_request_disable[0x1];
u8 reserved_at_7[0x9];
u8 shared_object_to_user_object_allowed[0x1];
u8 reserved_at_13[0xe];
u8 vhca_resource_manager[0x1];
......@@ -2004,7 +2022,13 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 reserved_at_3a0[0x10];
u8 max_rqt_vhca_id[0x10];
u8 reserved_at_3c0[0x440];
u8 reserved_at_3c0[0x20];
u8 reserved_at_3e0[0x10];
u8 pcc_ifa2[0x1];
u8 reserved_at_3f1[0xf];
u8 reserved_at_400[0x400];
};
enum mlx5_ifc_flow_destination_type {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment