Commit 2b68abf9 authored by Paolo Abeni's avatar Paolo Abeni

Merge tag 'mlx5-updates-2022-05-02' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2022-05-02

1) Trivial Misc updates to mlx5 driver

2) From Mark Bloch: Flow steering, general steering refactoring/cleaning

An issue with flow steering deletion flow (when creating a rule without
dests) turned out to be easy to fix but during the fix some issue
with the flow steering creation/deletion flows have been found.

The following patch series tries to fix long standing issues with flow
steering code and hopefully preventing silly future bugs.

  A) Fix an issue where a proper dest type wasn't assigned.
  B) Refactor and fix dests enums values, refactor deletion
     function and do proper bookkeeping of dests.
  C) Change mlx5_del_flow_rules() to delete rules when there are no
     no more rules attached associated with an FTE.
  D) Don't call hard coded deletion function but use the node's
     defined one.
  E) Add a WARN_ON() to catch future bugs when an FTE with dests
     is deleted.

* tag 'mlx5-updates-2022-05-02' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5: fs, an FTE should have no dests when deleted
  net/mlx5: fs, call the deletion function of the node
  net/mlx5: fs, delete the FTE when there are no rules attached to it
  net/mlx5: fs, do proper bookkeeping for forward destinations
  net/mlx5: fs, add unused destination type
  net/mlx5: fs, jump to exit point and don't fall through
  net/mlx5: fs, refactor software deletion rule
  net/mlx5: fs, split software and IFC flow destination definitions
  net/mlx5e: TC, set proper dest type
  net/mlx5e: Remove unused mlx5e_dcbnl_build_rep_netdev function
  net/mlx5e: Drop error CQE handling from the XSK RX handler
  net/mlx5: Print initializing field in case of timeout
  net/mlx5: Delete redundant default assignment of runtime devlink params
  net/mlx5: Remove useless kfree
  net/mlx5: use kvfree() for kvzalloc() in mlx5_ct_fs_smfs_matcher_create
====================

Link: https://lore.kernel.org/r/Signed-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parents f4f1fd76 3a09fae0
......@@ -584,14 +584,6 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
struct mlx5_core_dev *dev = devlink_priv(devlink);
union devlink_param_value value;
if (dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_DMFS)
strcpy(value.vstr, "dmfs");
else
strcpy(value.vstr, "smfs");
devlink_param_driverinit_value_set(devlink,
MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
value);
value.vbool = MLX5_CAP_GEN(dev, roce);
devlink_param_driverinit_value_set(devlink,
DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
......@@ -602,18 +594,6 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
devlink_param_driverinit_value_set(devlink,
MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
value);
if (MLX5_ESWITCH_MANAGER(dev)) {
if (mlx5_esw_vport_match_metadata_supported(dev->priv.eswitch)) {
dev->priv.eswitch->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
value.vbool = true;
} else {
value.vbool = false;
}
devlink_param_driverinit_value_set(devlink,
MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA,
value);
}
#endif
value.vu32 = MLX5_COMP_EQ_SIZE;
......
......@@ -259,6 +259,9 @@ const char *parse_fs_dst(struct trace_seq *p,
case MLX5_FLOW_DESTINATION_TYPE_PORT:
trace_seq_printf(p, "port\n");
break;
case MLX5_FLOW_DESTINATION_TYPE_NONE:
trace_seq_printf(p, "none\n");
break;
}
trace_seq_putc(p, 0);
......
......@@ -648,8 +648,8 @@ typedef struct sk_buff *
(*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
u16 cqe_bcnt, u32 head_offset, u32 page_idx);
typedef struct sk_buff *
(*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
(*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
u32 cqe_bcnt);
typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
typedef void (*mlx5e_fp_shampo_dealloc_hd)(struct mlx5e_rq*, u16, u16, bool);
......
......@@ -40,13 +40,11 @@ struct mlx5e_dcbx_dp {
};
void mlx5e_dcbnl_build_netdev(struct net_device *netdev);
void mlx5e_dcbnl_build_rep_netdev(struct net_device *netdev);
void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv);
void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv);
void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv);
#else
static inline void mlx5e_dcbnl_build_netdev(struct net_device *netdev) {}
static inline void mlx5e_dcbnl_build_rep_netdev(struct net_device *netdev) {}
static inline void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv) {}
static inline void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv) {}
static inline void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv) {}
......
......@@ -100,7 +100,7 @@ mlx5_ct_fs_smfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5dr_table *tbl,
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2 | MLX5_MATCH_OUTER_HEADERS;
dr_matcher = mlx5_smfs_matcher_create(tbl, priority, spec);
kfree(spec);
kvfree(spec);
if (!dr_matcher)
return ERR_PTR(-EINVAL);
......
......@@ -93,6 +93,7 @@ sampler_termtbl_create(struct mlx5e_tc_psample *tc_psample)
act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
dest.vport.num = esw->manager_vport;
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
tc_psample->termtbl_rule = mlx5_add_flow_rules(tc_psample->termtbl, NULL, &act, &dest, 1);
if (IS_ERR(tc_psample->termtbl_rule)) {
err = PTR_ERR(tc_psample->termtbl_rule);
......
......@@ -1812,7 +1812,6 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
ct_flow = kzalloc(sizeof(*ct_flow), GFP_KERNEL);
if (!ct_flow) {
kfree(ct_flow);
return ERR_PTR(-ENOMEM);
}
......
......@@ -80,7 +80,6 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
}
struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
struct mlx5_cqe64 *cqe,
struct mlx5e_wqe_frag_info *wi,
u32 cqe_bcnt)
{
......@@ -99,11 +98,6 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool);
net_prefetch(xdp->data);
if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) {
rq->stats->wqe_err++;
return NULL;
}
prog = rcu_dereference(rq->xdp_prog);
if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, xdp)))
return NULL; /* page/packet was consumed by XDP */
......
......@@ -15,7 +15,6 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
u32 head_offset,
u32 page_idx);
struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
struct mlx5_cqe64 *cqe,
struct mlx5e_wqe_frag_info *wi,
u32 cqe_bcnt);
......
......@@ -1026,15 +1026,6 @@ void mlx5e_dcbnl_build_netdev(struct net_device *netdev)
netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
}
void mlx5e_dcbnl_build_rep_netdev(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
if (MLX5_CAP_GEN(mdev, qos))
netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
}
static void mlx5e_dcbnl_query_dcbx_mode(struct mlx5e_priv *priv,
enum mlx5_dcbx_oper_mode *mode)
{
......
......@@ -1521,8 +1521,8 @@ static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom,
}
static struct sk_buff *
mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt)
mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
u32 cqe_bcnt)
{
struct mlx5e_dma_info *di = wi->di;
u16 rx_headroom = rq->buff.headroom;
......@@ -1565,8 +1565,8 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
}
static struct sk_buff *
mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt)
mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
u32 cqe_bcnt)
{
struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
struct mlx5e_wqe_frag_info *head_wi = wi;
......@@ -1709,7 +1709,7 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
mlx5e_skb_from_cqe_linear,
mlx5e_skb_from_cqe_nonlinear,
rq, cqe, wi, cqe_bcnt);
rq, wi, cqe_bcnt);
if (!skb) {
/* probably for XDP */
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
......@@ -1762,7 +1762,7 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
mlx5e_skb_from_cqe_linear,
mlx5e_skb_from_cqe_nonlinear,
rq, cqe, wi, cqe_bcnt);
rq, wi, cqe_bcnt);
if (!skb) {
/* probably for XDP */
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
......@@ -2361,7 +2361,7 @@ static void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
mlx5e_skb_from_cqe_linear,
mlx5e_skb_from_cqe_nonlinear,
rq, cqe, wi, cqe_bcnt);
rq, wi, cqe_bcnt);
if (!skb)
goto wq_free_wqe;
......@@ -2453,7 +2453,7 @@ static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe
goto free_wqe;
}
skb = mlx5e_skb_from_cqe_nonlinear(rq, cqe, wi, cqe_bcnt);
skb = mlx5e_skb_from_cqe_nonlinear(rq, wi, cqe_bcnt);
if (!skb)
goto free_wqe;
......
......@@ -1582,6 +1582,9 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
else
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
if (MLX5_ESWITCH_MANAGER(dev) &&
mlx5_esw_vport_match_metadata_supported(esw))
esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
dev->priv.eswitch = esw;
BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head);
......
......@@ -455,7 +455,8 @@ static int mlx5_set_extended_dest(struct mlx5_core_dev *dev,
return 0;
list_for_each_entry(dst, &fte->node.children, node.list) {
if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER ||
dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_NONE)
continue;
if ((dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
......@@ -571,18 +572,23 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
int list_size = 0;
list_for_each_entry(dst, &fte->node.children, node.list) {
unsigned int id, type = dst->dest_attr.type;
enum mlx5_flow_destination_type type = dst->dest_attr.type;
enum mlx5_ifc_flow_destination_type ifc_type;
unsigned int id;
if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
continue;
switch (type) {
case MLX5_FLOW_DESTINATION_TYPE_NONE:
continue;
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
id = dst->dest_attr.ft_num;
type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
break;
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
id = dst->dest_attr.ft->id;
ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
break;
case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
case MLX5_FLOW_DESTINATION_TYPE_VPORT:
......@@ -596,8 +602,10 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
if (type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) {
/* destination_id is reserved */
id = 0;
ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK;
break;
}
ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT;
id = dst->dest_attr.vport.num;
if (extended_dest &&
dst->dest_attr.vport.pkt_reformat) {
......@@ -612,13 +620,15 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
break;
case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
id = dst->dest_attr.sampler_id;
ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
break;
default:
id = dst->dest_attr.tir_num;
ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_TIR;
}
MLX5_SET(dest_format_struct, in_dests, destination_type,
type);
ifc_type);
MLX5_SET(dest_format_struct, in_dests, destination_id, id);
in_dests += dst_cnt_size;
list_size++;
......
......@@ -424,6 +424,16 @@ static bool is_fwd_next_action(u32 action)
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS);
}
static bool is_fwd_dest_type(enum mlx5_flow_destination_type type)
{
return type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM ||
type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE ||
type == MLX5_FLOW_DESTINATION_TYPE_UPLINK ||
type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
type == MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER ||
type == MLX5_FLOW_DESTINATION_TYPE_TIR;
}
static bool check_valid_spec(const struct mlx5_flow_spec *spec)
{
int i;
......@@ -550,8 +560,8 @@ static void del_sw_hw_rule(struct fs_node *node)
mutex_unlock(&rule->dest_attr.ft->lock);
}
if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER &&
--fte->dests_size) {
if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) {
--fte->dests_size;
fte->modify_mask |=
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
......@@ -559,17 +569,23 @@ static void del_sw_hw_rule(struct fs_node *node)
goto out;
}
if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_PORT &&
--fte->dests_size) {
if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_PORT) {
--fte->dests_size;
fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW;
goto out;
}
if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
--fte->dests_size) {
if (is_fwd_dest_type(rule->dest_attr.type)) {
--fte->dests_size;
--fte->fwd_dests;
if (!fte->fwd_dests)
fte->action.action &=
~MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
fte->modify_mask |=
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
goto out;
}
out:
kfree(rule);
......@@ -589,6 +605,7 @@ static void del_hw_fte(struct fs_node *node)
fs_get_obj(ft, fg->node.parent);
trace_mlx5_fs_del_fte(fte);
WARN_ON(fte->dests_size);
dev = get_dev(&ft->node);
root = find_root(&ft->node);
if (node->active) {
......@@ -1288,6 +1305,8 @@ static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
rule->node.type = FS_TYPE_FLOW_DEST;
if (dest)
memcpy(&rule->dest_attr, dest, sizeof(*dest));
else
rule->dest_attr.type = MLX5_FLOW_DESTINATION_TYPE_NONE;
return rule;
}
......@@ -1364,6 +1383,9 @@ create_flow_handle(struct fs_fte *fte,
if (dest) {
fte->dests_size++;
if (is_fwd_dest_type(dest[i].type))
fte->fwd_dests++;
type = dest[i].type ==
MLX5_FLOW_DESTINATION_TYPE_COUNTER;
*modify_mask |= type ? count : dst;
......@@ -2063,16 +2085,16 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
down_write_ref_node(&fte->node, false);
for (i = handle->num_rules - 1; i >= 0; i--)
tree_remove_node(&handle->rule[i]->node, true);
if (fte->dests_size) {
if (fte->modify_mask)
modify_fte(fte);
up_write_ref_node(&fte->node, false);
} else if (list_empty(&fte->node.children)) {
del_hw_fte(&fte->node);
if (list_empty(&fte->node.children)) {
fte->node.del_hw_func(&fte->node);
/* Avoid double call to del_hw_fte */
fte->node.del_hw_func = NULL;
up_write_ref_node(&fte->node, false);
tree_put_node(&fte->node, false);
} else if (fte->dests_size) {
if (fte->modify_mask)
modify_fte(fte);
up_write_ref_node(&fte->node, false);
} else {
up_write_ref_node(&fte->node, false);
}
......
......@@ -226,6 +226,7 @@ struct fs_fte {
struct mlx5_fs_dr_rule fs_dr_rule;
u32 val[MLX5_ST_SZ_DW_MATCH_PARAM];
u32 dests_size;
u32 fwd_dests;
u32 index;
struct mlx5_flow_context flow_context;
struct mlx5_flow_act action;
......
......@@ -177,30 +177,29 @@ static struct mlx5_profile profile[] = {
},
};
static int fw_initializing(struct mlx5_core_dev *dev)
{
return ioread32be(&dev->iseg->initializing) >> 31;
}
static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili,
u32 warn_time_mili)
{
unsigned long warn = jiffies + msecs_to_jiffies(warn_time_mili);
unsigned long end = jiffies + msecs_to_jiffies(max_wait_mili);
u32 fw_initializing;
int err = 0;
while (fw_initializing(dev)) {
do {
fw_initializing = ioread32be(&dev->iseg->initializing);
if (!(fw_initializing >> 31))
break;
if (time_after(jiffies, end)) {
err = -EBUSY;
break;
}
if (warn_time_mili && time_after(jiffies, warn)) {
mlx5_core_warn(dev, "Waiting for FW initialization, timeout abort in %ds\n",
jiffies_to_msecs(end - warn) / 1000);
mlx5_core_warn(dev, "Waiting for FW initialization, timeout abort in %ds (0x%x)\n",
jiffies_to_msecs(end - warn) / 1000, fw_initializing);
warn = jiffies + msecs_to_jiffies(warn_time_mili);
}
msleep(mlx5_tout_ms(dev, FW_PRE_INIT_WAIT));
}
} while (true);
return err;
}
......
......@@ -311,7 +311,7 @@ int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
MLX5_SET(dest_format_struct, in_dests, destination_type,
MLX5_FLOW_DESTINATION_TYPE_VPORT);
MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT);
MLX5_SET(dest_format_struct, in_dests, destination_id, vport);
err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
......@@ -604,7 +604,8 @@ static int mlx5dr_cmd_set_extended_dest(struct mlx5_core_dev *dev,
if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
return 0;
for (i = 0; i < fte->dests_size; i++) {
if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_COUNTER ||
fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_NONE)
continue;
if ((fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
......@@ -719,18 +720,24 @@ int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
int list_size = 0;
for (i = 0; i < fte->dests_size; i++) {
unsigned int id, type = fte->dest_arr[i].type;
enum mlx5_flow_destination_type type = fte->dest_arr[i].type;
enum mlx5_ifc_flow_destination_type ifc_type;
unsigned int id;
if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
continue;
switch (type) {
case MLX5_FLOW_DESTINATION_TYPE_NONE:
continue;
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
id = fte->dest_arr[i].ft_num;
type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
break;
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
id = fte->dest_arr[i].ft_id;
ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
break;
case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
case MLX5_FLOW_DESTINATION_TYPE_VPORT:
......@@ -740,8 +747,10 @@ int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
destination_eswitch_owner_vhca_id_valid,
!!(fte->dest_arr[i].vport.flags &
MLX5_FLOW_DEST_VPORT_VHCA_ID));
ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT;
} else {
id = 0;
ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK;
MLX5_SET(dest_format_struct, in_dests,
destination_eswitch_owner_vhca_id_valid, 1);
}
......@@ -761,13 +770,15 @@ int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
break;
case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
id = fte->dest_arr[i].sampler_id;
ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
break;
default:
id = fte->dest_arr[i].tir_num;
ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_TIR;
}
MLX5_SET(dest_format_struct, in_dests, destination_type,
type);
ifc_type);
MLX5_SET(dest_format_struct, in_dests, destination_id, id);
in_dests += dst_cnt_size;
list_size++;
......
......@@ -40,6 +40,18 @@
#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
enum mlx5_flow_destination_type {
MLX5_FLOW_DESTINATION_TYPE_NONE,
MLX5_FLOW_DESTINATION_TYPE_VPORT,
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
MLX5_FLOW_DESTINATION_TYPE_TIR,
MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER,
MLX5_FLOW_DESTINATION_TYPE_UPLINK,
MLX5_FLOW_DESTINATION_TYPE_PORT,
MLX5_FLOW_DESTINATION_TYPE_COUNTER,
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM,
};
enum {
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO = 1 << 16,
MLX5_FLOW_CONTEXT_ACTION_ENCRYPT = 1 << 17,
......
......@@ -1806,16 +1806,12 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 reserved_at_c0[0x740];
};
enum mlx5_flow_destination_type {
MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0,
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2,
MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER = 0x6,
MLX5_FLOW_DESTINATION_TYPE_UPLINK = 0x8,
MLX5_FLOW_DESTINATION_TYPE_PORT = 0x99,
MLX5_FLOW_DESTINATION_TYPE_COUNTER = 0x100,
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM = 0x101,
enum mlx5_ifc_flow_destination_type {
MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT = 0x0,
MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
MLX5_IFC_FLOW_DESTINATION_TYPE_TIR = 0x2,
MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER = 0x6,
MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK = 0x8,
};
enum mlx5_flow_table_miss_action {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment