Commit 62a41dc7 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'mlx5-updates-2023-05-19' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2023-05-19

mlx5 misc changes and code clean up:

The following series contains general changes for improving
E-Switch driver behavior.

1) improving condition checking
2) Code clean up
3) Using metadata matching on send-to-vport rules.
4) Using RoCE v2 instead of v1 for loopback rules.

* tag 'mlx5-updates-2023-05-19' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5e: E-Switch, Initialize E-Switch for eswitch manager
  net/mlx5: devlink, Only show PF related devlink warning when needed
  net/mlx5: E-Switch, Use metadata matching for RoCE loopback rule
  net/mlx5: E-Switch, Use RoCE version 2 for loopback traffic
  net/mlx5e: E-Switch, Add a check that log_max_l2_table is valid
  net/mlx5e: E-Switch: move debug print of adding mac to correct place
  net/mlx5e: E-Switch, Check device is PF when stopping esw offloads
  net/mlx5: Remove redundant vport_group_manager cap check
  net/mlx5e: E-Switch, Use metadata for vport matching in send-to-vport rules
  net/mlx5e: E-Switch, Allow get vport api if esw exists
  net/mlx5e: E-Switch, Update when to set other vport context
  net/mlx5e: Remove redundant __func__ arg from fs_err() calls
  net/mlx5e: E-Switch, Remove flow_source check for metadata matching
  net/mlx5: E-Switch, Remove redundant check
  net/mlx5: Remove redundant esw multiport validate function
====================
Acked-by: default avatarJakub Kicinski <kuba@kernel.org>
Link: https://lore.kernel.org/r/20230519175557.15683-1-saeed@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents de5c9bf4 f5d87b47
...@@ -162,9 +162,8 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change, ...@@ -162,9 +162,8 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
if (pci_num_vf(pdev)) { if (mlx5_core_is_pf(dev) && pci_num_vf(pdev))
NL_SET_ERR_MSG_MOD(extack, "reload while VFs are present is unfavorable"); NL_SET_ERR_MSG_MOD(extack, "reload while VFs are present is unfavorable");
}
switch (action) { switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
...@@ -464,27 +463,6 @@ static int mlx5_devlink_esw_multiport_get(struct devlink *devlink, u32 id, ...@@ -464,27 +463,6 @@ static int mlx5_devlink_esw_multiport_get(struct devlink *devlink, u32 id,
ctx->val.vbool = mlx5_lag_is_mpesw(dev); ctx->val.vbool = mlx5_lag_is_mpesw(dev);
return 0; return 0;
} }
static int mlx5_devlink_esw_multiport_validate(struct devlink *devlink, u32 id,
union devlink_param_value val,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
if (!MLX5_ESWITCH_MANAGER(dev)) {
NL_SET_ERR_MSG_MOD(extack, "E-Switch is unsupported");
return -EOPNOTSUPP;
}
if (mlx5_eswitch_mode(dev) != MLX5_ESWITCH_OFFLOADS) {
NL_SET_ERR_MSG_MOD(extack,
"E-Switch must be in switchdev mode");
return -EBUSY;
}
return 0;
}
#endif #endif
static int mlx5_devlink_eq_depth_validate(struct devlink *devlink, u32 id, static int mlx5_devlink_eq_depth_validate(struct devlink *devlink, u32 id,
...@@ -563,7 +541,7 @@ static const struct devlink_param mlx5_devlink_params[] = { ...@@ -563,7 +541,7 @@ static const struct devlink_param mlx5_devlink_params[] = {
BIT(DEVLINK_PARAM_CMODE_RUNTIME), BIT(DEVLINK_PARAM_CMODE_RUNTIME),
mlx5_devlink_esw_multiport_get, mlx5_devlink_esw_multiport_get,
mlx5_devlink_esw_multiport_set, mlx5_devlink_esw_multiport_set,
mlx5_devlink_esw_multiport_validate), NULL),
#endif #endif
DEVLINK_PARAM_GENERIC(IO_EQ_SIZE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), DEVLINK_PARAM_GENERIC(IO_EQ_SIZE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
NULL, NULL, mlx5_devlink_eq_depth_validate), NULL, NULL, mlx5_devlink_eq_depth_validate),
......
...@@ -283,7 +283,7 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_flow_steering *fs, ...@@ -283,7 +283,7 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_flow_steering *fs,
if (IS_ERR(*rule_p)) { if (IS_ERR(*rule_p)) {
err = PTR_ERR(*rule_p); err = PTR_ERR(*rule_p);
*rule_p = NULL; *rule_p = NULL;
fs_err(fs, "%s: add rule failed\n", __func__); fs_err(fs, "add rule failed\n");
} }
return err; return err;
...@@ -395,8 +395,7 @@ int mlx5e_add_vlan_trap(struct mlx5e_flow_steering *fs, int trap_id, int tir_num ...@@ -395,8 +395,7 @@ int mlx5e_add_vlan_trap(struct mlx5e_flow_steering *fs, int trap_id, int tir_num
if (IS_ERR(rule)) { if (IS_ERR(rule)) {
err = PTR_ERR(rule); err = PTR_ERR(rule);
fs->vlan->trap_rule = NULL; fs->vlan->trap_rule = NULL;
fs_err(fs, "%s: add VLAN trap rule failed, err %d\n", fs_err(fs, "add VLAN trap rule failed, err %d\n", err);
__func__, err);
return err; return err;
} }
fs->vlan->trap_rule = rule; fs->vlan->trap_rule = rule;
...@@ -421,8 +420,7 @@ int mlx5e_add_mac_trap(struct mlx5e_flow_steering *fs, int trap_id, int tir_num) ...@@ -421,8 +420,7 @@ int mlx5e_add_mac_trap(struct mlx5e_flow_steering *fs, int trap_id, int tir_num)
if (IS_ERR(rule)) { if (IS_ERR(rule)) {
err = PTR_ERR(rule); err = PTR_ERR(rule);
fs->l2.trap_rule = NULL; fs->l2.trap_rule = NULL;
fs_err(fs, "%s: add MAC trap rule failed, err %d\n", fs_err(fs, "add MAC trap rule failed, err %d\n", err);
__func__, err);
return err; return err;
} }
fs->l2.trap_rule = rule; fs->l2.trap_rule = rule;
...@@ -763,7 +761,7 @@ static int mlx5e_add_promisc_rule(struct mlx5e_flow_steering *fs) ...@@ -763,7 +761,7 @@ static int mlx5e_add_promisc_rule(struct mlx5e_flow_steering *fs)
if (IS_ERR(*rule_p)) { if (IS_ERR(*rule_p)) {
err = PTR_ERR(*rule_p); err = PTR_ERR(*rule_p);
*rule_p = NULL; *rule_p = NULL;
fs_err(fs, "%s: add promiscuous rule failed\n", __func__); fs_err(fs, "add promiscuous rule failed\n");
} }
kvfree(spec); kvfree(spec);
return err; return err;
...@@ -995,7 +993,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_flow_steering *fs, ...@@ -995,7 +993,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_flow_steering *fs,
ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(ai->rule)) { if (IS_ERR(ai->rule)) {
fs_err(fs, "%s: add l2 rule(mac:%pM) failed\n", __func__, mv_dmac); fs_err(fs, "add l2 rule(mac:%pM) failed\n", mv_dmac);
err = PTR_ERR(ai->rule); err = PTR_ERR(ai->rule);
ai->rule = NULL; ai->rule = NULL;
} }
......
...@@ -35,7 +35,8 @@ esw_acl_table_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport, int ns, ...@@ -35,7 +35,8 @@ esw_acl_table_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport, int ns,
} }
ft_attr.max_fte = size; ft_attr.max_fte = size;
ft_attr.flags = MLX5_FLOW_TABLE_OTHER_VPORT; if (vport_num || mlx5_core_is_ecpf(esw->dev))
ft_attr.flags = MLX5_FLOW_TABLE_OTHER_VPORT;
acl = mlx5_create_vport_flow_table(root_ns, &ft_attr, vport_num); acl = mlx5_create_vport_flow_table(root_ns, &ft_attr, vport_num);
if (IS_ERR(acl)) { if (IS_ERR(acl)) {
err = PTR_ERR(acl); err = PTR_ERR(acl);
......
...@@ -92,7 +92,7 @@ mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num) ...@@ -92,7 +92,7 @@ mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num)
{ {
struct mlx5_vport *vport; struct mlx5_vport *vport;
if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager)) if (!esw)
return ERR_PTR(-EPERM); return ERR_PTR(-EPERM);
vport = xa_load(&esw->vports, vport_num); vport = xa_load(&esw->vports, vport_num);
...@@ -113,7 +113,8 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport, ...@@ -113,7 +113,8 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1); MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1); if (vport || mlx5_core_is_ecpf(dev))
MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
in, nic_vport_context); in, nic_vport_context);
...@@ -309,11 +310,12 @@ static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) ...@@ -309,11 +310,12 @@ static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
fdb_add: fdb_add:
/* SRIOV is enabled: Forward UC MAC to vport */ /* SRIOV is enabled: Forward UC MAC to vport */
if (esw->fdb_table.legacy.fdb && esw->mode == MLX5_ESWITCH_LEGACY) if (esw->fdb_table.legacy.fdb && esw->mode == MLX5_ESWITCH_LEGACY) {
vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport); vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n", esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n",
vport, mac, vaddr->flow_rule); vport, mac, vaddr->flow_rule);
}
return 0; return 0;
} }
...@@ -710,6 +712,9 @@ void esw_vport_change_handle_locked(struct mlx5_vport *vport) ...@@ -710,6 +712,9 @@ void esw_vport_change_handle_locked(struct mlx5_vport *vport)
struct mlx5_eswitch *esw = dev->priv.eswitch; struct mlx5_eswitch *esw = dev->priv.eswitch;
u8 mac[ETH_ALEN]; u8 mac[ETH_ALEN];
if (!MLX5_CAP_GEN(dev, log_max_l2_table))
return;
mlx5_query_nic_vport_mac_address(dev, vport->vport, true, mac); mlx5_query_nic_vport_mac_address(dev, vport->vport, true, mac);
esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n", esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
vport->vport, mac); vport->vport, mac);
...@@ -946,7 +951,8 @@ void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num) ...@@ -946,7 +951,8 @@ void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
vport->enabled = false; vport->enabled = false;
/* Disable events from this vport */ /* Disable events from this vport */
arm_vport_context_events_cmd(esw->dev, vport->vport, 0); if (MLX5_CAP_GEN(esw->dev, log_max_l2_table))
arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
if (!mlx5_esw_is_manager_vport(esw, vport->vport) && if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
...@@ -1616,7 +1622,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) ...@@ -1616,7 +1622,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
struct mlx5_eswitch *esw; struct mlx5_eswitch *esw;
int err; int err;
if (!MLX5_VPORT_MANAGER(dev)) if (!MLX5_VPORT_MANAGER(dev) && !MLX5_ESWITCH_MANAGER(dev))
return 0; return 0;
esw = kzalloc(sizeof(*esw), GFP_KERNEL); esw = kzalloc(sizeof(*esw), GFP_KERNEL);
...@@ -1686,7 +1692,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) ...@@ -1686,7 +1692,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
{ {
if (!esw || !MLX5_VPORT_MANAGER(esw->dev)) if (!esw)
return; return;
esw_info(esw->dev, "cleanup\n"); esw_info(esw->dev, "cleanup\n");
......
...@@ -683,6 +683,14 @@ mlx5_esw_vporttbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr ...@@ -683,6 +683,14 @@ mlx5_esw_vporttbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr
struct mlx5_flow_handle * struct mlx5_flow_handle *
esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag); esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag);
void mlx5_esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
u32 *flow_group_in,
int match_params);
void mlx5_esw_set_spec_source_port(struct mlx5_eswitch *esw,
u16 vport,
struct mlx5_flow_spec *spec);
int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num); int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num);
void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num); void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num);
......
...@@ -838,6 +838,7 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw, ...@@ -838,6 +838,7 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
struct mlx5_flow_handle *flow_rule; struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
void *misc; void *misc;
u16 vport;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL); spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) { if (!spec) {
...@@ -847,20 +848,43 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw, ...@@ -847,20 +848,43 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn); MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
/* source vport is the esw manager */
MLX5_SET(fte_match_set_misc, misc, source_port, from_esw->manager_vport);
if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch))
MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
MLX5_CAP_GEN(from_esw->dev, vhca_id));
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn); MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch))
MLX5_SET_TO_ONES(fte_match_set_misc, misc,
source_eswitch_owner_vhca_id);
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
/* source vport is the esw manager */
vport = from_esw->manager_vport;
if (mlx5_eswitch_vport_match_metadata_enabled(on_esw)) {
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
mlx5_eswitch_get_vport_metadata_for_match(from_esw, vport));
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
mlx5_eswitch_get_vport_metadata_mask());
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
} else {
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
MLX5_SET(fte_match_set_misc, misc, source_port, vport);
if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch))
MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
MLX5_CAP_GEN(from_esw->dev, vhca_id));
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch))
MLX5_SET_TO_ONES(fte_match_set_misc, misc,
source_eswitch_owner_vhca_id);
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
}
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport.num = rep->vport; dest.vport.num = rep->vport;
dest.vport.vhca_id = MLX5_CAP_GEN(rep->esw->dev, vhca_id); dest.vport.vhca_id = MLX5_CAP_GEN(rep->esw->dev, vhca_id);
...@@ -1269,8 +1293,10 @@ esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag) ...@@ -1269,8 +1293,10 @@ esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
#define MAX_PF_SQ 256 #define MAX_PF_SQ 256
#define MAX_SQ_NVPORTS 32 #define MAX_SQ_NVPORTS 32
static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw, void
u32 *flow_group_in) mlx5_esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
u32 *flow_group_in,
int match_params)
{ {
void *match_criteria = MLX5_ADDR_OF(create_flow_group_in, void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
flow_group_in, flow_group_in,
...@@ -1279,7 +1305,7 @@ static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw, ...@@ -1279,7 +1305,7 @@ static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
MLX5_SET(create_flow_group_in, flow_group_in, MLX5_SET(create_flow_group_in, flow_group_in,
match_criteria_enable, match_criteria_enable,
MLX5_MATCH_MISC_PARAMETERS_2); MLX5_MATCH_MISC_PARAMETERS_2 | match_params);
MLX5_SET(fte_match_param, match_criteria, MLX5_SET(fte_match_param, match_criteria,
misc_parameters_2.metadata_reg_c_0, misc_parameters_2.metadata_reg_c_0,
...@@ -1287,7 +1313,7 @@ static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw, ...@@ -1287,7 +1313,7 @@ static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
} else { } else {
MLX5_SET(create_flow_group_in, flow_group_in, MLX5_SET(create_flow_group_in, flow_group_in,
match_criteria_enable, match_criteria_enable,
MLX5_MATCH_MISC_PARAMETERS); MLX5_MATCH_MISC_PARAMETERS | match_params);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, MLX5_SET_TO_ONES(fte_match_param, match_criteria,
misc_parameters.source_port); misc_parameters.source_port);
...@@ -1463,14 +1489,13 @@ esw_create_send_to_vport_group(struct mlx5_eswitch *esw, ...@@ -1463,14 +1489,13 @@ esw_create_send_to_vport_group(struct mlx5_eswitch *esw,
memset(flow_group_in, 0, inlen); memset(flow_group_in, 0, inlen);
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, mlx5_esw_set_flow_group_source_port(esw, flow_group_in, MLX5_MATCH_MISC_PARAMETERS);
MLX5_MATCH_MISC_PARAMETERS);
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn); MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) { if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
MLX5_SET_TO_ONES(fte_match_param, match_criteria, MLX5_SET_TO_ONES(fte_match_param, match_criteria,
misc_parameters.source_eswitch_owner_vhca_id); misc_parameters.source_eswitch_owner_vhca_id);
MLX5_SET(create_flow_group_in, flow_group_in, MLX5_SET(create_flow_group_in, flow_group_in,
...@@ -1558,7 +1583,7 @@ esw_create_peer_esw_miss_group(struct mlx5_eswitch *esw, ...@@ -1558,7 +1583,7 @@ esw_create_peer_esw_miss_group(struct mlx5_eswitch *esw,
memset(flow_group_in, 0, inlen); memset(flow_group_in, 0, inlen);
esw_set_flow_group_source_port(esw, flow_group_in); mlx5_esw_set_flow_group_source_port(esw, flow_group_in, 0);
if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) { if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
match_criteria = MLX5_ADDR_OF(create_flow_group_in, match_criteria = MLX5_ADDR_OF(create_flow_group_in,
...@@ -1845,7 +1870,7 @@ static int esw_create_vport_rx_group(struct mlx5_eswitch *esw) ...@@ -1845,7 +1870,7 @@ static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
return -ENOMEM; return -ENOMEM;
/* create vport rx group */ /* create vport rx group */
esw_set_flow_group_source_port(esw, flow_group_in); mlx5_esw_set_flow_group_source_port(esw, flow_group_in, 0);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1); MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
...@@ -1915,21 +1940,13 @@ static void esw_destroy_vport_rx_drop_group(struct mlx5_eswitch *esw) ...@@ -1915,21 +1940,13 @@ static void esw_destroy_vport_rx_drop_group(struct mlx5_eswitch *esw)
mlx5_destroy_flow_group(esw->offloads.vport_rx_drop_group); mlx5_destroy_flow_group(esw->offloads.vport_rx_drop_group);
} }
struct mlx5_flow_handle * void
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport, mlx5_esw_set_spec_source_port(struct mlx5_eswitch *esw,
struct mlx5_flow_destination *dest) u16 vport,
struct mlx5_flow_spec *spec)
{ {
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_spec *spec;
void *misc; void *misc;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
flow_rule = ERR_PTR(-ENOMEM);
goto out;
}
if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2); misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
...@@ -1949,6 +1966,23 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport, ...@@ -1949,6 +1966,23 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
} }
}
struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
struct mlx5_flow_destination *dest)
{
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_spec *spec;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
flow_rule = ERR_PTR(-ENOMEM);
goto out;
}
mlx5_esw_set_spec_source_port(esw, vport, spec);
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec, flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
...@@ -2827,9 +2861,6 @@ bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw) ...@@ -2827,9 +2861,6 @@ bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
MLX5_FDB_TO_VPORT_REG_C_0)) MLX5_FDB_TO_VPORT_REG_C_0))
return false; return false;
if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
return false;
return true; return true;
} }
...@@ -3280,7 +3311,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw, ...@@ -3280,7 +3311,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
/* If changing from switchdev to legacy mode without sriov enabled, /* If changing from switchdev to legacy mode without sriov enabled,
* no need to create legacy fdb. * no need to create legacy fdb.
*/ */
if (!mlx5_sriov_is_enabled(esw->dev)) if (!mlx5_core_is_pf(esw->dev) || !mlx5_sriov_is_enabled(esw->dev))
return 0; return 0;
err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS); err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS);
......
...@@ -196,14 +196,11 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) ...@@ -196,14 +196,11 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err; return err;
} }
if (MLX5_CAP_GEN(dev, vport_group_manager) && if (MLX5_ESWITCH_MANAGER(dev)) {
MLX5_ESWITCH_MANAGER(dev)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE); err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE);
if (err) if (err)
return err; return err;
}
if (MLX5_ESWITCH_MANAGER(dev)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH); err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH);
if (err) if (err)
return err; return err;
......
...@@ -99,7 +99,7 @@ int mlx5_mpfs_init(struct mlx5_core_dev *dev) ...@@ -99,7 +99,7 @@ int mlx5_mpfs_init(struct mlx5_core_dev *dev)
int l2table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table); int l2table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table);
struct mlx5_mpfs *mpfs; struct mlx5_mpfs *mpfs;
if (!MLX5_ESWITCH_MANAGER(dev)) if (!MLX5_ESWITCH_MANAGER(dev) || l2table_size == 1)
return 0; return 0;
mpfs = kzalloc(sizeof(*mpfs), GFP_KERNEL); mpfs = kzalloc(sizeof(*mpfs), GFP_KERNEL);
......
...@@ -30,9 +30,8 @@ static int mlx5_rdma_enable_roce_steering(struct mlx5_core_dev *dev) ...@@ -30,9 +30,8 @@ static int mlx5_rdma_enable_roce_steering(struct mlx5_core_dev *dev)
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
struct mlx5_flow_table *ft; struct mlx5_flow_table *ft;
struct mlx5_flow_group *fg; struct mlx5_flow_group *fg;
void *match_criteria; struct mlx5_eswitch *esw;
u32 *flow_group_in; u32 *flow_group_in;
void *misc;
int err; int err;
if (!(MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) && if (!(MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
...@@ -63,12 +62,8 @@ static int mlx5_rdma_enable_roce_steering(struct mlx5_core_dev *dev) ...@@ -63,12 +62,8 @@ static int mlx5_rdma_enable_roce_steering(struct mlx5_core_dev *dev)
goto free; goto free;
} }
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, esw = dev->priv.eswitch;
MLX5_MATCH_MISC_PARAMETERS); mlx5_esw_set_flow_group_source_port(esw, flow_group_in, 0);
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
match_criteria);
MLX5_SET_TO_ONES(fte_match_param, match_criteria,
misc_parameters.source_port);
fg = mlx5_create_flow_group(ft, flow_group_in); fg = mlx5_create_flow_group(ft, flow_group_in);
if (IS_ERR(fg)) { if (IS_ERR(fg)) {
...@@ -77,14 +72,7 @@ static int mlx5_rdma_enable_roce_steering(struct mlx5_core_dev *dev) ...@@ -77,14 +72,7 @@ static int mlx5_rdma_enable_roce_steering(struct mlx5_core_dev *dev)
goto destroy_flow_table; goto destroy_flow_table;
} }
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; mlx5_esw_set_spec_source_port(esw, esw->manager_vport, spec);
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters);
MLX5_SET(fte_match_set_misc, misc, source_port,
dev->priv.eswitch->manager_vport);
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
misc_parameters);
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, NULL, 0); flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, NULL, 0);
...@@ -115,7 +103,7 @@ static int mlx5_rdma_enable_roce_steering(struct mlx5_core_dev *dev) ...@@ -115,7 +103,7 @@ static int mlx5_rdma_enable_roce_steering(struct mlx5_core_dev *dev)
static void mlx5_rdma_del_roce_addr(struct mlx5_core_dev *dev) static void mlx5_rdma_del_roce_addr(struct mlx5_core_dev *dev)
{ {
mlx5_core_roce_gid_set(dev, 0, 0, 0, mlx5_core_roce_gid_set(dev, 0, MLX5_ROCE_VERSION_2, 0,
NULL, NULL, false, 0, 1); NULL, NULL, false, 0, 1);
} }
...@@ -135,7 +123,7 @@ static int mlx5_rdma_add_roce_addr(struct mlx5_core_dev *dev) ...@@ -135,7 +123,7 @@ static int mlx5_rdma_add_roce_addr(struct mlx5_core_dev *dev)
mlx5_rdma_make_default_gid(dev, &gid); mlx5_rdma_make_default_gid(dev, &gid);
return mlx5_core_roce_gid_set(dev, 0, return mlx5_core_roce_gid_set(dev, 0,
MLX5_ROCE_VERSION_1, MLX5_ROCE_VERSION_2,
0, gid.raw, mac, 0, gid.raw, mac,
false, 0, 1); false, 0, 1);
} }
......
...@@ -74,9 +74,6 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs) ...@@ -74,9 +74,6 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
struct mlx5_core_sriov *sriov = &dev->priv.sriov; struct mlx5_core_sriov *sriov = &dev->priv.sriov;
int err, vf, num_msix_count; int err, vf, num_msix_count;
if (!MLX5_ESWITCH_MANAGER(dev))
goto enable_vfs_hca;
err = mlx5_eswitch_enable(dev->priv.eswitch, num_vfs); err = mlx5_eswitch_enable(dev->priv.eswitch, num_vfs);
if (err) { if (err) {
mlx5_core_warn(dev, mlx5_core_warn(dev,
...@@ -84,7 +81,6 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs) ...@@ -84,7 +81,6 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
return err; return err;
} }
enable_vfs_hca:
num_msix_count = mlx5_get_default_msix_vec_count(dev, num_vfs); num_msix_count = mlx5_get_default_msix_vec_count(dev, num_vfs);
for (vf = 0; vf < num_vfs; vf++) { for (vf = 0; vf < num_vfs; vf++) {
/* Notify the VF before its enablement to let it set /* Notify the VF before its enablement to let it set
......
...@@ -288,7 +288,8 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev, ...@@ -288,7 +288,8 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT); MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type); MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type);
MLX5_SET(query_nic_vport_context_in, in, vport_number, vport); MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
MLX5_SET(query_nic_vport_context_in, in, other_vport, 1); if (vport || mlx5_core_is_ecpf(dev))
MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz); err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
if (err) if (err)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment