Commit 32e41702 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'mlx5-next-2020-12-02' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux

Saeed Mahameed says:

====================
mlx5-next-2020-12-02

Low level mlx5 updates required by both netdev and rdma trees.

* tag 'mlx5-next-2020-12-02' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux:
  net/mlx5: Treat host PF vport as other (non eswitch manager) vport
  net/mlx5: Enable host PF HCA after eswitch is initialized
  net/mlx5: Rename peer_pf to host_pf
  net/mlx5: Make API mlx5_core_is_ecpf accept const pointer
  net/mlx5: Export steering related functions
  net/mlx5: Expose other function ifc bits
  net/mlx5: Expose IP-in-IP TX and RX capability bits
  net/mlx5: Update the hardware interface definition for vhca state
  net/mlx5: Update the list of the PCI supported devices
  net/mlx5: Avoid exposing driver internal command helpers
  net/mlx5: Add ts_cqe_to_dest_cqn related bits
  net/mlx5: Add misc4 to mlx5_ifc_fte_match_param_bits
  net/mlx5: Check dr mask size against mlx5_match_param size
  net/mlx5: Add sampler destination type
  net/mlx5: Add sample offload hardware bits and structures
====================

Link: https://lore.kernel.org/r/20201203011010.213440-1-saeedm@nvidia.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 6ec1dfb5 617b860c
...@@ -2142,7 +2142,6 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) ...@@ -2142,7 +2142,6 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
kvfree(cmd->stats); kvfree(cmd->stats);
return err; return err;
} }
EXPORT_SYMBOL(mlx5_cmd_init);
void mlx5_cmd_cleanup(struct mlx5_core_dev *dev) void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
{ {
...@@ -2155,11 +2154,9 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev) ...@@ -2155,11 +2154,9 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
dma_pool_destroy(cmd->pool); dma_pool_destroy(cmd->pool);
kvfree(cmd->stats); kvfree(cmd->stats);
} }
EXPORT_SYMBOL(mlx5_cmd_cleanup);
void mlx5_cmd_set_state(struct mlx5_core_dev *dev, void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
enum mlx5_cmdif_state cmdif_state) enum mlx5_cmdif_state cmdif_state)
{ {
dev->cmd.state = cmdif_state; dev->cmd.state = cmdif_state;
} }
EXPORT_SYMBOL(mlx5_cmd_set_state);
...@@ -247,6 +247,9 @@ const char *parse_fs_dst(struct trace_seq *p, ...@@ -247,6 +247,9 @@ const char *parse_fs_dst(struct trace_seq *p,
case MLX5_FLOW_DESTINATION_TYPE_TIR: case MLX5_FLOW_DESTINATION_TYPE_TIR:
trace_seq_printf(p, "tir=%u\n", dst->tir_num); trace_seq_printf(p, "tir=%u\n", dst->tir_num);
break; break;
case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
trace_seq_printf(p, "sampler_id=%u\n", dst->sampler_id);
break;
case MLX5_FLOW_DESTINATION_TYPE_COUNTER: case MLX5_FLOW_DESTINATION_TYPE_COUNTER:
trace_seq_printf(p, "counter_id=%u\n", counter_id); trace_seq_printf(p, "counter_id=%u\n", counter_id);
break; break;
......
...@@ -8,37 +8,66 @@ bool mlx5_read_embedded_cpu(struct mlx5_core_dev *dev) ...@@ -8,37 +8,66 @@ bool mlx5_read_embedded_cpu(struct mlx5_core_dev *dev)
return (ioread32be(&dev->iseg->initializing) >> MLX5_ECPU_BIT_NUM) & 1; return (ioread32be(&dev->iseg->initializing) >> MLX5_ECPU_BIT_NUM) & 1;
} }
static int mlx5_peer_pf_init(struct mlx5_core_dev *dev) static bool mlx5_ecpf_esw_admins_host_pf(const struct mlx5_core_dev *dev)
{ {
/* In separate host mode, PF enables itself.
* When ECPF is eswitch manager, eswitch enables host PF after
* eswitch is setup.
*/
return mlx5_core_is_ecpf_esw_manager(dev);
}
int mlx5_cmd_host_pf_enable_hca(struct mlx5_core_dev *dev)
{
u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {};
u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {}; u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {};
int err;
MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA); MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
err = mlx5_cmd_exec_in(dev, enable_hca, in); MLX5_SET(enable_hca_in, in, function_id, 0);
MLX5_SET(enable_hca_in, in, embedded_cpu_function, 0);
return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
}
int mlx5_cmd_host_pf_disable_hca(struct mlx5_core_dev *dev)
{
u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {};
u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {};
MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
MLX5_SET(disable_hca_in, in, function_id, 0);
MLX5_SET(disable_hca_in, in, embedded_cpu_function, 0);
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
static int mlx5_host_pf_init(struct mlx5_core_dev *dev)
{
int err;
if (mlx5_ecpf_esw_admins_host_pf(dev))
return 0;
/* ECPF shall enable HCA for host PF in the same way a PF
* does this for its VFs when ECPF is not a eswitch manager.
*/
err = mlx5_cmd_host_pf_enable_hca(dev);
if (err) if (err)
mlx5_core_err(dev, "Failed to enable peer PF HCA err(%d)\n", mlx5_core_err(dev, "Failed to enable external host PF HCA err(%d)\n", err);
err);
return err; return err;
} }
static void mlx5_peer_pf_cleanup(struct mlx5_core_dev *dev) static void mlx5_host_pf_cleanup(struct mlx5_core_dev *dev)
{ {
u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {};
int err; int err;
MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA); if (mlx5_ecpf_esw_admins_host_pf(dev))
err = mlx5_cmd_exec_in(dev, disable_hca, in); return;
err = mlx5_cmd_host_pf_disable_hca(dev);
if (err) { if (err) {
mlx5_core_err(dev, "Failed to disable peer PF HCA err(%d)\n", mlx5_core_err(dev, "Failed to disable external host PF HCA err(%d)\n", err);
err);
return; return;
} }
err = mlx5_wait_for_pages(dev, &dev->priv.peer_pf_pages);
if (err)
mlx5_core_warn(dev, "Timeout reclaiming peer PF pages err(%d)\n",
err);
} }
int mlx5_ec_init(struct mlx5_core_dev *dev) int mlx5_ec_init(struct mlx5_core_dev *dev)
...@@ -46,16 +75,19 @@ int mlx5_ec_init(struct mlx5_core_dev *dev) ...@@ -46,16 +75,19 @@ int mlx5_ec_init(struct mlx5_core_dev *dev)
if (!mlx5_core_is_ecpf(dev)) if (!mlx5_core_is_ecpf(dev))
return 0; return 0;
/* ECPF shall enable HCA for peer PF in the same way a PF return mlx5_host_pf_init(dev);
* does this for its VFs.
*/
return mlx5_peer_pf_init(dev);
} }
void mlx5_ec_cleanup(struct mlx5_core_dev *dev) void mlx5_ec_cleanup(struct mlx5_core_dev *dev)
{ {
int err;
if (!mlx5_core_is_ecpf(dev)) if (!mlx5_core_is_ecpf(dev))
return; return;
mlx5_peer_pf_cleanup(dev); mlx5_host_pf_cleanup(dev);
err = mlx5_wait_for_pages(dev, &dev->priv.host_pf_pages);
if (err)
mlx5_core_warn(dev, "Timeout reclaiming external host PF pages err(%d)\n", err);
} }
...@@ -17,6 +17,9 @@ bool mlx5_read_embedded_cpu(struct mlx5_core_dev *dev); ...@@ -17,6 +17,9 @@ bool mlx5_read_embedded_cpu(struct mlx5_core_dev *dev);
int mlx5_ec_init(struct mlx5_core_dev *dev); int mlx5_ec_init(struct mlx5_core_dev *dev);
void mlx5_ec_cleanup(struct mlx5_core_dev *dev); void mlx5_ec_cleanup(struct mlx5_core_dev *dev);
int mlx5_cmd_host_pf_enable_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_host_pf_disable_hca(struct mlx5_core_dev *dev);
#else /* CONFIG_MLX5_ESWITCH */ #else /* CONFIG_MLX5_ESWITCH */
static inline bool static inline bool
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
struct mlx5_flow_table * struct mlx5_flow_table *
esw_acl_table_create(struct mlx5_eswitch *esw, u16 vport_num, int ns, int size) esw_acl_table_create(struct mlx5_eswitch *esw, u16 vport_num, int ns, int size)
{ {
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_core_dev *dev = esw->dev; struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *root_ns; struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *acl; struct mlx5_flow_table *acl;
...@@ -33,7 +34,9 @@ esw_acl_table_create(struct mlx5_eswitch *esw, u16 vport_num, int ns, int size) ...@@ -33,7 +34,9 @@ esw_acl_table_create(struct mlx5_eswitch *esw, u16 vport_num, int ns, int size)
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
} }
acl = mlx5_create_vport_flow_table(root_ns, 0, size, 0, vport_num); ft_attr.max_fte = size;
ft_attr.flags = MLX5_FLOW_TABLE_OTHER_VPORT;
acl = mlx5_create_vport_flow_table(root_ns, &ft_attr, vport_num);
if (IS_ERR(acl)) { if (IS_ERR(acl)) {
err = PTR_ERR(acl); err = PTR_ERR(acl);
esw_warn(dev, "vport[%d] create %s ACL table, err(%d)\n", vport_num, esw_warn(dev, "vport[%d] create %s ACL table, err(%d)\n", vport_num,
......
...@@ -1474,6 +1474,26 @@ int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs, ...@@ -1474,6 +1474,26 @@ int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
return err; return err;
} }
static int host_pf_enable_hca(struct mlx5_core_dev *dev)
{
if (!mlx5_core_is_ecpf(dev))
return 0;
/* Once vport and representor are ready, take out the external host PF
* out of initializing state. Enabling HCA clears the iser->initializing
* bit and host PF driver loading can progress.
*/
return mlx5_cmd_host_pf_enable_hca(dev);
}
static void host_pf_disable_hca(struct mlx5_core_dev *dev)
{
if (!mlx5_core_is_ecpf(dev))
return;
mlx5_cmd_host_pf_disable_hca(dev);
}
/* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs /* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs
* whichever are present on the eswitch. * whichever are present on the eswitch.
*/ */
...@@ -1488,6 +1508,11 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw, ...@@ -1488,6 +1508,11 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
if (ret) if (ret)
return ret; return ret;
/* Enable external host PF HCA */
ret = host_pf_enable_hca(esw->dev);
if (ret)
goto pf_hca_err;
/* Enable ECPF vport */ /* Enable ECPF vport */
if (mlx5_ecpf_vport_exists(esw->dev)) { if (mlx5_ecpf_vport_exists(esw->dev)) {
ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_ECPF, enabled_events); ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_ECPF, enabled_events);
...@@ -1505,8 +1530,9 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw, ...@@ -1505,8 +1530,9 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
vf_err: vf_err:
if (mlx5_ecpf_vport_exists(esw->dev)) if (mlx5_ecpf_vport_exists(esw->dev))
mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF); mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF);
ecpf_err: ecpf_err:
host_pf_disable_hca(esw->dev);
pf_hca_err:
mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF); mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF);
return ret; return ret;
} }
...@@ -1521,6 +1547,7 @@ void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw) ...@@ -1521,6 +1547,7 @@ void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
if (mlx5_ecpf_vport_exists(esw->dev)) if (mlx5_ecpf_vport_exists(esw->dev))
mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF); mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF);
host_pf_disable_hca(esw->dev);
mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF); mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF);
} }
......
...@@ -172,10 +172,9 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns, ...@@ -172,10 +172,9 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
MLX5_SET(set_flow_table_root_in, in, table_id, ft->id); MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn); MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn);
if (ft->vport) {
MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport); MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
MLX5_SET(set_flow_table_root_in, in, other_vport, 1); MLX5_SET(set_flow_table_root_in, in, other_vport,
} !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
return mlx5_cmd_exec_in(dev, set_flow_table_root, in); return mlx5_cmd_exec_in(dev, set_flow_table_root, in);
} }
...@@ -199,10 +198,9 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns, ...@@ -199,10 +198,9 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
MLX5_SET(create_flow_table_in, in, table_type, ft->type); MLX5_SET(create_flow_table_in, in, table_type, ft->type);
MLX5_SET(create_flow_table_in, in, flow_table_context.level, ft->level); MLX5_SET(create_flow_table_in, in, flow_table_context.level, ft->level);
MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, log_size); MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, log_size);
if (ft->vport) {
MLX5_SET(create_flow_table_in, in, vport_number, ft->vport); MLX5_SET(create_flow_table_in, in, vport_number, ft->vport);
MLX5_SET(create_flow_table_in, in, other_vport, 1); MLX5_SET(create_flow_table_in, in, other_vport,
} !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en, MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
en_decap); en_decap);
...@@ -252,10 +250,9 @@ static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns, ...@@ -252,10 +250,9 @@ static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
MLX5_CMD_OP_DESTROY_FLOW_TABLE); MLX5_CMD_OP_DESTROY_FLOW_TABLE);
MLX5_SET(destroy_flow_table_in, in, table_type, ft->type); MLX5_SET(destroy_flow_table_in, in, table_type, ft->type);
MLX5_SET(destroy_flow_table_in, in, table_id, ft->id); MLX5_SET(destroy_flow_table_in, in, table_id, ft->id);
if (ft->vport) {
MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport); MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
MLX5_SET(destroy_flow_table_in, in, other_vport, 1); MLX5_SET(destroy_flow_table_in, in, other_vport,
} !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
return mlx5_cmd_exec_in(dev, destroy_flow_table, in); return mlx5_cmd_exec_in(dev, destroy_flow_table, in);
} }
...@@ -283,11 +280,9 @@ static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns, ...@@ -283,11 +280,9 @@ static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns,
flow_table_context.lag_master_next_table_id, 0); flow_table_context.lag_master_next_table_id, 0);
} }
} else { } else {
if (ft->vport) { MLX5_SET(modify_flow_table_in, in, vport_number, ft->vport);
MLX5_SET(modify_flow_table_in, in, vport_number, MLX5_SET(modify_flow_table_in, in, other_vport,
ft->vport); !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
MLX5_SET(modify_flow_table_in, in, other_vport, 1);
}
MLX5_SET(modify_flow_table_in, in, modify_field_select, MLX5_SET(modify_flow_table_in, in, modify_field_select,
MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID); MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
if (next_ft) { if (next_ft) {
...@@ -325,6 +320,9 @@ static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns, ...@@ -325,6 +320,9 @@ static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns,
MLX5_SET(create_flow_group_in, in, other_vport, 1); MLX5_SET(create_flow_group_in, in, other_vport, 1);
} }
MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
MLX5_SET(create_flow_group_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
err = mlx5_cmd_exec_inout(dev, create_flow_group, in, out); err = mlx5_cmd_exec_inout(dev, create_flow_group, in, out);
if (!err) if (!err)
fg->id = MLX5_GET(create_flow_group_out, out, fg->id = MLX5_GET(create_flow_group_out, out,
...@@ -344,11 +342,9 @@ static int mlx5_cmd_destroy_flow_group(struct mlx5_flow_root_namespace *ns, ...@@ -344,11 +342,9 @@ static int mlx5_cmd_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
MLX5_SET(destroy_flow_group_in, in, table_type, ft->type); MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
MLX5_SET(destroy_flow_group_in, in, table_id, ft->id); MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
MLX5_SET(destroy_flow_group_in, in, group_id, fg->id); MLX5_SET(destroy_flow_group_in, in, group_id, fg->id);
if (ft->vport) {
MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport); MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
MLX5_SET(destroy_flow_group_in, in, other_vport, 1); MLX5_SET(destroy_flow_group_in, in, other_vport,
} !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
return mlx5_cmd_exec_in(dev, destroy_flow_group, in); return mlx5_cmd_exec_in(dev, destroy_flow_group, in);
} }
...@@ -427,10 +423,9 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, ...@@ -427,10 +423,9 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(set_fte_in, in, ignore_flow_level, MLX5_SET(set_fte_in, in, ignore_flow_level,
!!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL)); !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL));
if (ft->vport) {
MLX5_SET(set_fte_in, in, vport_number, ft->vport); MLX5_SET(set_fte_in, in, vport_number, ft->vport);
MLX5_SET(set_fte_in, in, other_vport, 1); MLX5_SET(set_fte_in, in, other_vport,
} !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context); in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
MLX5_SET(flow_context, in_flow_context, group_id, group_id); MLX5_SET(flow_context, in_flow_context, group_id, group_id);
...@@ -515,6 +510,9 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, ...@@ -515,6 +510,9 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
dst->dest_attr.vport.pkt_reformat->id); dst->dest_attr.vport.pkt_reformat->id);
} }
break; break;
case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
id = dst->dest_attr.sampler_id;
break;
default: default:
id = dst->dest_attr.tir_num; id = dst->dest_attr.tir_num;
} }
...@@ -601,10 +599,9 @@ static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns, ...@@ -601,10 +599,9 @@ static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns,
MLX5_SET(delete_fte_in, in, table_type, ft->type); MLX5_SET(delete_fte_in, in, table_type, ft->type);
MLX5_SET(delete_fte_in, in, table_id, ft->id); MLX5_SET(delete_fte_in, in, table_id, ft->id);
MLX5_SET(delete_fte_in, in, flow_index, fte->index); MLX5_SET(delete_fte_in, in, flow_index, fte->index);
if (ft->vport) {
MLX5_SET(delete_fte_in, in, vport_number, ft->vport); MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
MLX5_SET(delete_fte_in, in, other_vport, 1); MLX5_SET(delete_fte_in, in, other_vport,
} !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
return mlx5_cmd_exec_in(dev, delete_fte, in); return mlx5_cmd_exec_in(dev, delete_fte, in);
} }
......
...@@ -1152,18 +1152,13 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns, ...@@ -1152,18 +1152,13 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
{ {
return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, 0); return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, 0);
} }
EXPORT_SYMBOL(mlx5_create_flow_table);
struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns, struct mlx5_flow_table *
int prio, int max_fte, mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
u32 level, u16 vport) struct mlx5_flow_table_attr *ft_attr, u16 vport)
{ {
struct mlx5_flow_table_attr ft_attr = {}; return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, vport);
ft_attr.max_fte = max_fte;
ft_attr.level = level;
ft_attr.prio = prio;
return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_NORMAL, vport);
} }
struct mlx5_flow_table* struct mlx5_flow_table*
...@@ -1243,6 +1238,7 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft, ...@@ -1243,6 +1238,7 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
return fg; return fg;
} }
EXPORT_SYMBOL(mlx5_create_flow_group);
static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest) static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
{ {
...@@ -2146,6 +2142,7 @@ void mlx5_destroy_flow_group(struct mlx5_flow_group *fg) ...@@ -2146,6 +2142,7 @@ void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n", mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
fg->id); fg->id);
} }
EXPORT_SYMBOL(mlx5_destroy_flow_group);
struct mlx5_flow_namespace *mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev, struct mlx5_flow_namespace *mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev,
int n) int n)
......
...@@ -194,7 +194,7 @@ struct mlx5_ft_underlay_qp { ...@@ -194,7 +194,7 @@ struct mlx5_ft_underlay_qp {
u32 qpn; u32 qpn;
}; };
#define MLX5_FTE_MATCH_PARAM_RESERVED reserved_at_a00 #define MLX5_FTE_MATCH_PARAM_RESERVED reserved_at_c00
/* Calculate the fte_match_param length and without the reserved length. /* Calculate the fte_match_param length and without the reserved length.
* Make sure the reserved field is the last. * Make sure the reserved field is the last.
*/ */
......
...@@ -1126,23 +1126,23 @@ static int mlx5_load(struct mlx5_core_dev *dev) ...@@ -1126,23 +1126,23 @@ static int mlx5_load(struct mlx5_core_dev *dev)
goto err_sriov; goto err_sriov;
} }
err = mlx5_sriov_attach(dev);
if (err) {
mlx5_core_err(dev, "sriov init failed %d\n", err);
goto err_sriov;
}
err = mlx5_ec_init(dev); err = mlx5_ec_init(dev);
if (err) { if (err) {
mlx5_core_err(dev, "Failed to init embedded CPU\n"); mlx5_core_err(dev, "Failed to init embedded CPU\n");
goto err_ec; goto err_ec;
} }
err = mlx5_sriov_attach(dev);
if (err) {
mlx5_core_err(dev, "sriov init failed %d\n", err);
goto err_sriov;
}
return 0; return 0;
err_ec:
mlx5_sriov_detach(dev);
err_sriov: err_sriov:
mlx5_ec_cleanup(dev);
err_ec:
mlx5_cleanup_fs(dev); mlx5_cleanup_fs(dev);
err_fs: err_fs:
mlx5_accel_tls_cleanup(dev); mlx5_accel_tls_cleanup(dev);
...@@ -1168,8 +1168,8 @@ static int mlx5_load(struct mlx5_core_dev *dev) ...@@ -1168,8 +1168,8 @@ static int mlx5_load(struct mlx5_core_dev *dev)
static void mlx5_unload(struct mlx5_core_dev *dev) static void mlx5_unload(struct mlx5_core_dev *dev)
{ {
mlx5_ec_cleanup(dev);
mlx5_sriov_detach(dev); mlx5_sriov_detach(dev);
mlx5_ec_cleanup(dev);
mlx5_cleanup_fs(dev); mlx5_cleanup_fs(dev);
mlx5_accel_ipsec_cleanup(dev); mlx5_accel_ipsec_cleanup(dev);
mlx5_accel_tls_cleanup(dev); mlx5_accel_tls_cleanup(dev);
...@@ -1594,6 +1594,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = { ...@@ -1594,6 +1594,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */ { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
{ PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */ { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
{ PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */ { PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
{ PCI_VDEVICE(MELLANOX, 0xa2dc) }, /* BlueField-3 integrated ConnectX-7 network controller */
{ 0, } { 0, }
}; };
......
...@@ -122,6 +122,10 @@ enum mlx5_semaphore_space_address { ...@@ -122,6 +122,10 @@ enum mlx5_semaphore_space_address {
int mlx5_query_hca_caps(struct mlx5_core_dev *dev); int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
int mlx5_query_board_id(struct mlx5_core_dev *dev); int mlx5_query_board_id(struct mlx5_core_dev *dev);
int mlx5_cmd_init(struct mlx5_core_dev *dev);
void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
enum mlx5_cmdif_state cmdif_state);
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id); int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id);
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev); int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev); int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev);
......
...@@ -374,7 +374,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, ...@@ -374,7 +374,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
if (func_id) if (func_id)
dev->priv.vfs_pages += npages; dev->priv.vfs_pages += npages;
else if (mlx5_core_is_ecpf(dev) && !ec_function) else if (mlx5_core_is_ecpf(dev) && !ec_function)
dev->priv.peer_pf_pages += npages; dev->priv.host_pf_pages += npages;
mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x, err %d\n", mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x, err %d\n",
npages, ec_function, func_id, err); npages, ec_function, func_id, err);
...@@ -416,7 +416,7 @@ static void release_all_pages(struct mlx5_core_dev *dev, u32 func_id, ...@@ -416,7 +416,7 @@ static void release_all_pages(struct mlx5_core_dev *dev, u32 func_id,
if (func_id) if (func_id)
dev->priv.vfs_pages -= npages; dev->priv.vfs_pages -= npages;
else if (mlx5_core_is_ecpf(dev) && !ec_function) else if (mlx5_core_is_ecpf(dev) && !ec_function)
dev->priv.peer_pf_pages -= npages; dev->priv.host_pf_pages -= npages;
mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x\n", mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x\n",
npages, ec_function, func_id); npages, ec_function, func_id);
...@@ -506,7 +506,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, ...@@ -506,7 +506,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
if (func_id) if (func_id)
dev->priv.vfs_pages -= num_claimed; dev->priv.vfs_pages -= num_claimed;
else if (mlx5_core_is_ecpf(dev) && !ec_function) else if (mlx5_core_is_ecpf(dev) && !ec_function)
dev->priv.peer_pf_pages -= num_claimed; dev->priv.host_pf_pages -= num_claimed;
out_free: out_free:
kvfree(out); kvfree(out);
...@@ -661,9 +661,9 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) ...@@ -661,9 +661,9 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
WARN(dev->priv.vfs_pages, WARN(dev->priv.vfs_pages,
"VFs FW pages counter is %d after reclaiming all pages\n", "VFs FW pages counter is %d after reclaiming all pages\n",
dev->priv.vfs_pages); dev->priv.vfs_pages);
WARN(dev->priv.peer_pf_pages, WARN(dev->priv.host_pf_pages,
"Peer PF FW pages counter is %d after reclaiming all pages\n", "External host PF FW pages counter is %d after reclaiming all pages\n",
dev->priv.peer_pf_pages); dev->priv.host_pf_pages);
return 0; return 0;
} }
......
...@@ -643,7 +643,7 @@ static int dr_matcher_init(struct mlx5dr_matcher *matcher, ...@@ -643,7 +643,7 @@ static int dr_matcher_init(struct mlx5dr_matcher *matcher,
} }
if (mask) { if (mask) {
if (mask->match_sz > sizeof(struct mlx5dr_match_param)) { if (mask->match_sz > DR_SZ_MATCH_PARAM) {
mlx5dr_err(dmn, "Invalid match size attribute\n"); mlx5dr_err(dmn, "Invalid match size attribute\n");
return -EINVAL; return -EINVAL;
} }
......
...@@ -874,8 +874,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher, ...@@ -874,8 +874,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
u32 s_idx, e_idx; u32 s_idx, e_idx;
if (!value_size || if (!value_size ||
(value_size > sizeof(struct mlx5dr_match_param) || (value_size > DR_SZ_MATCH_PARAM || (value_size % sizeof(u32)))) {
(value_size % sizeof(u32)))) {
mlx5dr_err(matcher->tbl->dmn, "Rule parameters length is incorrect\n"); mlx5dr_err(matcher->tbl->dmn, "Rule parameters length is incorrect\n");
return false; return false;
} }
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#define WIRE_PORT 0xFFFF #define WIRE_PORT 0xFFFF
#define DR_STE_SVLAN 0x1 #define DR_STE_SVLAN 0x1
#define DR_STE_CVLAN 0x2 #define DR_STE_CVLAN 0x2
#define DR_SZ_MATCH_PARAM (MLX5_ST_SZ_DW_MATCH_PARAM * 4)
#define mlx5dr_err(dmn, arg...) mlx5_core_err((dmn)->mdev, ##arg) #define mlx5dr_err(dmn, arg...) mlx5_core_err((dmn)->mdev, ##arg)
#define mlx5dr_info(dmn, arg...) mlx5_core_info((dmn)->mdev, ##arg) #define mlx5dr_info(dmn, arg...) mlx5_core_info((dmn)->mdev, ##arg)
......
...@@ -346,6 +346,7 @@ enum mlx5_event { ...@@ -346,6 +346,7 @@ enum mlx5_event {
MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd, MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd,
MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED = 0xe, MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED = 0xe,
MLX5_EVENT_TYPE_VHCA_STATE_CHANGE = 0xf,
MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c, MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c,
MLX5_EVENT_TYPE_DCT_KEY_VIOLATION = 0x1d, MLX5_EVENT_TYPE_DCT_KEY_VIOLATION = 0x1d,
...@@ -717,6 +718,11 @@ struct mlx5_eqe_sync_fw_update { ...@@ -717,6 +718,11 @@ struct mlx5_eqe_sync_fw_update {
u8 sync_rst_state; u8 sync_rst_state;
}; };
struct mlx5_eqe_vhca_state {
__be16 ec_function;
__be16 function_id;
} __packed;
union ev_data { union ev_data {
__be32 raw[7]; __be32 raw[7];
struct mlx5_eqe_cmd cmd; struct mlx5_eqe_cmd cmd;
...@@ -736,6 +742,7 @@ union ev_data { ...@@ -736,6 +742,7 @@ union ev_data {
struct mlx5_eqe_temp_warning temp_warning; struct mlx5_eqe_temp_warning temp_warning;
struct mlx5_eqe_xrq_err xrq_err; struct mlx5_eqe_xrq_err xrq_err;
struct mlx5_eqe_sync_fw_update sync_fw_update; struct mlx5_eqe_sync_fw_update sync_fw_update;
struct mlx5_eqe_vhca_state vhca_state;
} __packed; } __packed;
struct mlx5_eqe { struct mlx5_eqe {
...@@ -1076,6 +1083,7 @@ enum { ...@@ -1076,6 +1083,7 @@ enum {
MLX5_MATCH_INNER_HEADERS = 1 << 2, MLX5_MATCH_INNER_HEADERS = 1 << 2,
MLX5_MATCH_MISC_PARAMETERS_2 = 1 << 3, MLX5_MATCH_MISC_PARAMETERS_2 = 1 << 3,
MLX5_MATCH_MISC_PARAMETERS_3 = 1 << 4, MLX5_MATCH_MISC_PARAMETERS_3 = 1 << 4,
MLX5_MATCH_MISC_PARAMETERS_4 = 1 << 5,
}; };
enum { enum {
......
...@@ -547,7 +547,7 @@ struct mlx5_priv { ...@@ -547,7 +547,7 @@ struct mlx5_priv {
atomic_t reg_pages; atomic_t reg_pages;
struct list_head free_list; struct list_head free_list;
int vfs_pages; int vfs_pages;
int peer_pf_pages; int host_pf_pages;
struct mlx5_core_health health; struct mlx5_core_health health;
...@@ -888,10 +888,6 @@ enum { ...@@ -888,10 +888,6 @@ enum {
CMD_ALLOWED_OPCODE_ALL, CMD_ALLOWED_OPCODE_ALL,
}; };
int mlx5_cmd_init(struct mlx5_core_dev *dev);
void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
enum mlx5_cmdif_state cmdif_state);
void mlx5_cmd_use_events(struct mlx5_core_dev *dev); void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode); void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode);
...@@ -1137,7 +1133,7 @@ static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev) ...@@ -1137,7 +1133,7 @@ static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev)
return dev->coredev_type == MLX5_COREDEV_VF; return dev->coredev_type == MLX5_COREDEV_VF;
} }
static inline bool mlx5_core_is_ecpf(struct mlx5_core_dev *dev) static inline bool mlx5_core_is_ecpf(const struct mlx5_core_dev *dev)
{ {
return dev->caps.embedded_cpu; return dev->caps.embedded_cpu;
} }
......
...@@ -50,6 +50,7 @@ enum { ...@@ -50,6 +50,7 @@ enum {
MLX5_FLOW_TABLE_TUNNEL_EN_DECAP = BIT(1), MLX5_FLOW_TABLE_TUNNEL_EN_DECAP = BIT(1),
MLX5_FLOW_TABLE_TERMINATION = BIT(2), MLX5_FLOW_TABLE_TERMINATION = BIT(2),
MLX5_FLOW_TABLE_UNMANAGED = BIT(3), MLX5_FLOW_TABLE_UNMANAGED = BIT(3),
MLX5_FLOW_TABLE_OTHER_VPORT = BIT(4),
}; };
#define LEFTOVERS_RULE_NUM 2 #define LEFTOVERS_RULE_NUM 2
...@@ -132,6 +133,7 @@ struct mlx5_flow_destination { ...@@ -132,6 +133,7 @@ struct mlx5_flow_destination {
struct mlx5_pkt_reformat *pkt_reformat; struct mlx5_pkt_reformat *pkt_reformat;
u8 flags; u8 flags;
} vport; } vport;
u32 sampler_id;
}; };
}; };
...@@ -173,9 +175,7 @@ mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, ...@@ -173,9 +175,7 @@ mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
struct mlx5_flow_table * struct mlx5_flow_table *
mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns, mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
int prio, struct mlx5_flow_table_attr *ft_attr, u16 vport);
int num_flow_table_entries,
u32 level, u16 vport);
struct mlx5_flow_table *mlx5_create_lag_demux_flow_table( struct mlx5_flow_table *mlx5_create_lag_demux_flow_table(
struct mlx5_flow_namespace *ns, struct mlx5_flow_namespace *ns,
int prio, u32 level); int prio, u32 level);
......
...@@ -299,6 +299,8 @@ enum { ...@@ -299,6 +299,8 @@ enum {
MLX5_CMD_OP_CREATE_UMEM = 0xa08, MLX5_CMD_OP_CREATE_UMEM = 0xa08,
MLX5_CMD_OP_DESTROY_UMEM = 0xa0a, MLX5_CMD_OP_DESTROY_UMEM = 0xa0a,
MLX5_CMD_OP_SYNC_STEERING = 0xb00, MLX5_CMD_OP_SYNC_STEERING = 0xb00,
MLX5_CMD_OP_QUERY_VHCA_STATE = 0xb0d,
MLX5_CMD_OP_MODIFY_VHCA_STATE = 0xb0e,
MLX5_CMD_OP_MAX MLX5_CMD_OP_MAX
}; };
...@@ -623,6 +625,26 @@ struct mlx5_ifc_fte_match_set_misc3_bits { ...@@ -623,6 +625,26 @@ struct mlx5_ifc_fte_match_set_misc3_bits {
u8 reserved_at_140[0xc0]; u8 reserved_at_140[0xc0];
}; };
struct mlx5_ifc_fte_match_set_misc4_bits {
u8 prog_sample_field_value_0[0x20];
u8 prog_sample_field_id_0[0x20];
u8 prog_sample_field_value_1[0x20];
u8 prog_sample_field_id_1[0x20];
u8 prog_sample_field_value_2[0x20];
u8 prog_sample_field_id_2[0x20];
u8 prog_sample_field_value_3[0x20];
u8 prog_sample_field_id_3[0x20];
u8 reserved_at_100[0x100];
};
struct mlx5_ifc_cmd_pas_bits { struct mlx5_ifc_cmd_pas_bits {
u8 pa_h[0x20]; u8 pa_h[0x20];
...@@ -891,7 +913,10 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { ...@@ -891,7 +913,10 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 tunnel_stateless_ipv4_over_vxlan[0x1]; u8 tunnel_stateless_ipv4_over_vxlan[0x1];
u8 tunnel_stateless_ip_over_ip[0x1]; u8 tunnel_stateless_ip_over_ip[0x1];
u8 insert_trailer[0x1]; u8 insert_trailer[0x1];
u8 reserved_at_2b[0x5]; u8 reserved_at_2b[0x1];
u8 tunnel_stateless_ip_over_ip_rx[0x1];
u8 tunnel_stateless_ip_over_ip_tx[0x1];
u8 reserved_at_2e[0x2];
u8 max_vxlan_udp_ports[0x8]; u8 max_vxlan_udp_ports[0x8];
u8 reserved_at_38[0x6]; u8 reserved_at_38[0x6];
u8 max_geneve_opt_len[0x1]; u8 max_geneve_opt_len[0x1];
...@@ -1224,7 +1249,16 @@ enum mlx5_fc_bulk_alloc_bitmask { ...@@ -1224,7 +1249,16 @@ enum mlx5_fc_bulk_alloc_bitmask {
#define MLX5_FC_BULK_NUM_FCS(fc_enum) (MLX5_FC_BULK_SIZE_FACTOR * (fc_enum)) #define MLX5_FC_BULK_NUM_FCS(fc_enum) (MLX5_FC_BULK_SIZE_FACTOR * (fc_enum))
struct mlx5_ifc_cmd_hca_cap_bits { struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_0[0x30]; u8 reserved_at_0[0x1f];
u8 vhca_resource_manager[0x1];
u8 reserved_at_20[0x3];
u8 event_on_vhca_state_teardown_request[0x1];
u8 event_on_vhca_state_in_use[0x1];
u8 event_on_vhca_state_active[0x1];
u8 event_on_vhca_state_allocated[0x1];
u8 event_on_vhca_state_invalid[0x1];
u8 reserved_at_28[0x8];
u8 vhca_id[0x10]; u8 vhca_id[0x10];
u8 reserved_at_40[0x40]; u8 reserved_at_40[0x40];
...@@ -1241,7 +1275,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { ...@@ -1241,7 +1275,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 ece_support[0x1]; u8 ece_support[0x1];
u8 reserved_at_a4[0x7]; u8 reserved_at_a4[0x7];
u8 log_max_srq[0x5]; u8 log_max_srq[0x5];
u8 reserved_at_b0[0x10]; u8 reserved_at_b0[0x2];
u8 ts_cqe_to_dest_cqn[0x1];
u8 reserved_at_b3[0xd];
u8 max_sgl_for_optimized_performance[0x8]; u8 max_sgl_for_optimized_performance[0x8];
u8 log_max_cq_sz[0x8]; u8 log_max_cq_sz[0x8];
...@@ -1512,7 +1548,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { ...@@ -1512,7 +1548,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 disable_local_lb_uc[0x1]; u8 disable_local_lb_uc[0x1];
u8 disable_local_lb_mc[0x1]; u8 disable_local_lb_mc[0x1];
u8 log_min_hairpin_wq_data_sz[0x5]; u8 log_min_hairpin_wq_data_sz[0x5];
u8 reserved_at_3e8[0x3]; u8 reserved_at_3e8[0x2];
u8 vhca_state[0x1];
u8 log_max_vlan_list[0x5]; u8 log_max_vlan_list[0x5];
u8 reserved_at_3f0[0x3]; u8 reserved_at_3f0[0x3];
u8 log_max_current_mc_list[0x5]; u8 log_max_current_mc_list[0x5];
...@@ -1580,7 +1617,7 @@ struct mlx5_ifc_cmd_hca_cap_bits { ...@@ -1580,7 +1617,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 max_num_of_monitor_counters[0x10]; u8 max_num_of_monitor_counters[0x10];
u8 num_ppcnt_monitor_counters[0x10]; u8 num_ppcnt_monitor_counters[0x10];
u8 reserved_at_640[0x10]; u8 max_num_sf[0x10];
u8 num_q_monitor_counters[0x10]; u8 num_q_monitor_counters[0x10];
u8 reserved_at_660[0x20]; u8 reserved_at_660[0x20];
...@@ -1616,6 +1653,7 @@ enum mlx5_flow_destination_type { ...@@ -1616,6 +1653,7 @@ enum mlx5_flow_destination_type {
MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0, MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0,
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1, MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2, MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2,
MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER = 0x6,
MLX5_FLOW_DESTINATION_TYPE_PORT = 0x99, MLX5_FLOW_DESTINATION_TYPE_PORT = 0x99,
MLX5_FLOW_DESTINATION_TYPE_COUNTER = 0x100, MLX5_FLOW_DESTINATION_TYPE_COUNTER = 0x100,
...@@ -1668,7 +1706,9 @@ struct mlx5_ifc_fte_match_param_bits { ...@@ -1668,7 +1706,9 @@ struct mlx5_ifc_fte_match_param_bits {
struct mlx5_ifc_fte_match_set_misc3_bits misc_parameters_3; struct mlx5_ifc_fte_match_set_misc3_bits misc_parameters_3;
u8 reserved_at_a00[0x600]; struct mlx5_ifc_fte_match_set_misc4_bits misc_parameters_4;
u8 reserved_at_c00[0x400];
}; };
enum { enum {
...@@ -3289,8 +3329,12 @@ struct mlx5_ifc_sqc_bits { ...@@ -3289,8 +3329,12 @@ struct mlx5_ifc_sqc_bits {
u8 reserved_at_80[0x10]; u8 reserved_at_80[0x10];
u8 hairpin_peer_vhca[0x10]; u8 hairpin_peer_vhca[0x10];
u8 reserved_at_a0[0x50]; u8 reserved_at_a0[0x20];
u8 reserved_at_c0[0x8];
u8 ts_cqe_to_dest_cqn[0x18];
u8 reserved_at_e0[0x10];
u8 packet_pacing_rate_limit_index[0x10]; u8 packet_pacing_rate_limit_index[0x10];
u8 tis_lst_sz[0x10]; u8 tis_lst_sz[0x10];
u8 reserved_at_110[0x10]; u8 reserved_at_110[0x10];
...@@ -4204,7 +4248,11 @@ struct mlx5_ifc_set_hca_cap_in_bits { ...@@ -4204,7 +4248,11 @@ struct mlx5_ifc_set_hca_cap_in_bits {
u8 reserved_at_20[0x10]; u8 reserved_at_20[0x10];
u8 op_mod[0x10]; u8 op_mod[0x10];
u8 reserved_at_40[0x40]; u8 other_function[0x1];
u8 reserved_at_41[0xf];
u8 function_id[0x10];
u8 reserved_at_60[0x20];
union mlx5_ifc_hca_cap_union_bits capability; union mlx5_ifc_hca_cap_union_bits capability;
}; };
...@@ -5461,6 +5509,7 @@ enum { ...@@ -5461,6 +5509,7 @@ enum {
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2, MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0x3, MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0x3,
MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_3 = 0x4, MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_3 = 0x4,
MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_4 = 0x5,
}; };
struct mlx5_ifc_query_flow_group_out_bits { struct mlx5_ifc_query_flow_group_out_bits {
...@@ -10657,11 +10706,13 @@ struct mlx5_ifc_affiliated_event_header_bits { ...@@ -10657,11 +10706,13 @@ struct mlx5_ifc_affiliated_event_header_bits {
enum { enum {
MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = BIT(0xc), MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = BIT(0xc),
MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC = BIT(0x13), MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC = BIT(0x13),
MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_SAMPLER = BIT(0x20),
}; };
enum { enum {
MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = 0xc, MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = 0xc,
MLX5_GENERAL_OBJECT_TYPES_IPSEC = 0x13, MLX5_GENERAL_OBJECT_TYPES_IPSEC = 0x13,
MLX5_GENERAL_OBJECT_TYPES_SAMPLER = 0x20,
}; };
enum { enum {
...@@ -10736,6 +10787,33 @@ struct mlx5_ifc_create_encryption_key_in_bits { ...@@ -10736,6 +10787,33 @@ struct mlx5_ifc_create_encryption_key_in_bits {
struct mlx5_ifc_encryption_key_obj_bits encryption_key_object; struct mlx5_ifc_encryption_key_obj_bits encryption_key_object;
}; };
struct mlx5_ifc_sampler_obj_bits {
u8 modify_field_select[0x40];
u8 table_type[0x8];
u8 level[0x8];
u8 reserved_at_50[0xf];
u8 ignore_flow_level[0x1];
u8 sample_ratio[0x20];
u8 reserved_at_80[0x8];
u8 sample_table_id[0x18];
u8 reserved_at_a0[0x8];
u8 default_table_id[0x18];
u8 sw_steering_icm_address_rx[0x40];
u8 sw_steering_icm_address_tx[0x40];
u8 reserved_at_140[0xa0];
};
struct mlx5_ifc_create_sampler_obj_in_bits {
struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
struct mlx5_ifc_sampler_obj_bits sampler_object;
};
enum { enum {
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128 = 0x0, MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128 = 0x0,
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_256 = 0x1, MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_256 = 0x1,
......
...@@ -232,7 +232,7 @@ enum mlx5_ib_device_query_context_attrs { ...@@ -232,7 +232,7 @@ enum mlx5_ib_device_query_context_attrs {
MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX = (1U << UVERBS_ID_NS_SHIFT), MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX = (1U << UVERBS_ID_NS_SHIFT),
}; };
#define MLX5_IB_DW_MATCH_PARAM 0x80 #define MLX5_IB_DW_MATCH_PARAM 0x90
struct mlx5_ib_match_params { struct mlx5_ib_match_params {
__u32 match_params[MLX5_IB_DW_MATCH_PARAM]; __u32 match_params[MLX5_IB_DW_MATCH_PARAM];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment