Commit 94d39978 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2020-01-07' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2020-01-07

This series adds 2 sets of changes to mlx5 driver
1) Misc updates and cleanups:

1.1) Stack usages warning cleanups and log level reduction
1.2) Increase the max number of supported rings
1.3) Support accept TC action on native NIC netdev.

2) Software steering support for multi destination steering rules:
First three patches from Erez are adding the low level FW command support
and SW steering infrastructure to create the mult-destination FW tables.

Last four patches from Alex are introducing the needed changes and APIs in
SW steering to create and manage multi-destination actions and rules.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 53ebeca2 7ee3f6d2
......@@ -71,8 +71,8 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
return cpu_handle;
}
int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
struct mlx5_frag_buf *buf, int node)
static int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
struct mlx5_frag_buf *buf, int node)
{
dma_addr_t t;
......
......@@ -135,7 +135,7 @@ struct page_pool;
#define MLX5E_LOG_INDIR_RQT_SIZE 0x7
#define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE)
#define MLX5E_MIN_NUM_CHANNELS 0x1
#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1)
#define MLX5E_MAX_NUM_CHANNELS MLX5E_INDIR_RQT_SIZE
#define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
#define MLX5E_TX_CQ_POLL_BUDGET 128
#define MLX5E_TX_XSK_POLL_BUDGET 64
......@@ -1175,11 +1175,11 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv);
void mlx5e_detach_netdev(struct mlx5e_priv *priv);
void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv);
void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
void mlx5e_build_nic_params(struct mlx5e_priv *priv,
struct mlx5e_xsk *xsk,
struct mlx5e_rss_params *rss_params,
struct mlx5e_params *params,
u16 max_channels, u16 mtu);
u16 mtu);
void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params);
void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
......
......@@ -4739,17 +4739,19 @@ void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
tirc_default_config[tt].rx_hash_fields;
}
void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
void mlx5e_build_nic_params(struct mlx5e_priv *priv,
struct mlx5e_xsk *xsk,
struct mlx5e_rss_params *rss_params,
struct mlx5e_params *params,
u16 max_channels, u16 mtu)
u16 mtu)
{
struct mlx5_core_dev *mdev = priv->mdev;
u8 rx_cq_period_mode;
params->sw_mtu = mtu;
params->hard_mtu = MLX5E_ETH_HARD_MTU;
params->num_channels = max_channels;
params->num_channels = min_t(unsigned int, MLX5E_MAX_NUM_CHANNELS / 2,
priv->max_nch);
params->num_tc = 1;
/* SQ */
......@@ -4986,8 +4988,8 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
if (err)
return err;
mlx5e_build_nic_params(mdev, &priv->xsk, rss, &priv->channels.params,
priv->max_nch, netdev->mtu);
mlx5e_build_nic_params(priv, &priv->xsk, rss, &priv->channels.params,
netdev->mtu);
mlx5e_timestamp_init(priv);
......
......@@ -297,6 +297,9 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req;
#endif
s->tx_cqes += sq_stats->cqes;
/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
barrier();
}
}
}
......
......@@ -2842,6 +2842,10 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_ACCEPT:
action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
break;
case FLOW_ACTION_DROP:
action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
if (MLX5_CAP_FLOWTABLE(priv->mdev,
......
......@@ -156,7 +156,8 @@ static int mlx5_eq_comp_int(struct notifier_block *nb,
cq->comp(cq, eqe);
mlx5_cq_put(cq);
} else {
mlx5_core_warn(eq->dev, "Completion event for bogus CQ 0x%x\n", cqn);
dev_dbg_ratelimited(eq->dev->device,
"Completion event for bogus CQ 0x%x\n", cqn);
}
++eq->cons_index;
......@@ -563,6 +564,39 @@ static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4])
gather_user_async_events(dev, mask);
}
static int
setup_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq_async *eq,
struct mlx5_eq_param *param, const char *name)
{
int err;
eq->irq_nb.notifier_call = mlx5_eq_async_int;
err = create_async_eq(dev, &eq->core, param);
if (err) {
mlx5_core_warn(dev, "failed to create %s EQ %d\n", name, err);
return err;
}
err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb);
if (err) {
mlx5_core_warn(dev, "failed to enable %s EQ %d\n", name, err);
destroy_async_eq(dev, &eq->core);
}
return err;
}
static void cleanup_async_eq(struct mlx5_core_dev *dev,
struct mlx5_eq_async *eq, const char *name)
{
int err;
mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
err = destroy_async_eq(dev, &eq->core);
if (err)
mlx5_core_err(dev, "failed to destroy %s eq, err(%d)\n",
name, err);
}
static int create_async_eqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
......@@ -572,77 +606,45 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
MLX5_NB_INIT(&table->cq_err_nb, cq_err_event_notifier, CQ_ERROR);
mlx5_eq_notifier_register(dev, &table->cq_err_nb);
table->cmd_eq.irq_nb.notifier_call = mlx5_eq_async_int;
param = (struct mlx5_eq_param) {
.irq_index = 0,
.nent = MLX5_NUM_CMD_EQE,
.mask[0] = 1ull << MLX5_EVENT_TYPE_CMD,
};
param.mask[0] = 1ull << MLX5_EVENT_TYPE_CMD;
err = create_async_eq(dev, &table->cmd_eq.core, &param);
if (err) {
mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
goto err0;
}
err = mlx5_eq_enable(dev, &table->cmd_eq.core, &table->cmd_eq.irq_nb);
if (err) {
mlx5_core_warn(dev, "failed to enable cmd EQ %d\n", err);
err = setup_async_eq(dev, &table->cmd_eq, &param, "cmd");
if (err)
goto err1;
}
mlx5_cmd_use_events(dev);
table->async_eq.irq_nb.notifier_call = mlx5_eq_async_int;
param = (struct mlx5_eq_param) {
.irq_index = 0,
.nent = MLX5_NUM_ASYNC_EQE,
};
gather_async_events_mask(dev, param.mask);
err = create_async_eq(dev, &table->async_eq.core, &param);
if (err) {
mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
err = setup_async_eq(dev, &table->async_eq, &param, "async");
if (err)
goto err2;
}
err = mlx5_eq_enable(dev, &table->async_eq.core,
&table->async_eq.irq_nb);
if (err) {
mlx5_core_warn(dev, "failed to enable async EQ %d\n", err);
goto err3;
}
table->pages_eq.irq_nb.notifier_call = mlx5_eq_async_int;
param = (struct mlx5_eq_param) {
.irq_index = 0,
.nent = /* TODO: sriov max_vf + */ 1,
.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST,
};
param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST;
err = create_async_eq(dev, &table->pages_eq.core, &param);
if (err) {
mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
goto err4;
}
err = mlx5_eq_enable(dev, &table->pages_eq.core,
&table->pages_eq.irq_nb);
if (err) {
mlx5_core_warn(dev, "failed to enable pages EQ %d\n", err);
goto err5;
}
err = setup_async_eq(dev, &table->pages_eq, &param, "pages");
if (err)
goto err3;
return err;
return 0;
err5:
destroy_async_eq(dev, &table->pages_eq.core);
err4:
mlx5_eq_disable(dev, &table->async_eq.core, &table->async_eq.irq_nb);
err3:
destroy_async_eq(dev, &table->async_eq.core);
cleanup_async_eq(dev, &table->async_eq, "async");
err2:
mlx5_cmd_use_polling(dev);
mlx5_eq_disable(dev, &table->cmd_eq.core, &table->cmd_eq.irq_nb);
cleanup_async_eq(dev, &table->cmd_eq, "cmd");
err1:
destroy_async_eq(dev, &table->cmd_eq.core);
err0:
mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
return err;
}
......@@ -650,28 +652,11 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
static void destroy_async_eqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
int err;
mlx5_eq_disable(dev, &table->pages_eq.core, &table->pages_eq.irq_nb);
err = destroy_async_eq(dev, &table->pages_eq.core);
if (err)
mlx5_core_err(dev, "failed to destroy pages eq, err(%d)\n",
err);
mlx5_eq_disable(dev, &table->async_eq.core, &table->async_eq.irq_nb);
err = destroy_async_eq(dev, &table->async_eq.core);
if (err)
mlx5_core_err(dev, "failed to destroy async eq, err(%d)\n",
err);
cleanup_async_eq(dev, &table->pages_eq, "pages");
cleanup_async_eq(dev, &table->async_eq, "async");
mlx5_cmd_use_polling(dev);
mlx5_eq_disable(dev, &table->cmd_eq.core, &table->cmd_eq.irq_nb);
err = destroy_async_eq(dev, &table->cmd_eq.core);
if (err)
mlx5_core_err(dev, "failed to destroy command eq, err(%d)\n",
err);
cleanup_async_eq(dev, &table->cmd_eq, "cmd");
mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
}
......
......@@ -87,8 +87,8 @@ int mlx5i_init(struct mlx5_core_dev *mdev,
mlx5e_set_netdev_mtu_boundaries(priv);
netdev->mtu = netdev->max_mtu;
mlx5e_build_nic_params(mdev, NULL, &priv->rss_params, &priv->channels.params,
priv->max_nch, netdev->mtu);
mlx5e_build_nic_params(priv, NULL, &priv->rss_params, &priv->channels.params,
netdev->mtu);
mlx5i_build_nic_params(mdev, &priv->channels.params);
mlx5e_timestamp_init(priv);
......
......@@ -690,9 +690,9 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
/* get the relevant addresses */
if (!action->dest_tbl.fw_tbl.rx_icm_addr) {
ret = mlx5dr_cmd_query_flow_table(action->dest_tbl.fw_tbl.mdev,
action->dest_tbl.fw_tbl.ft->type,
action->dest_tbl.fw_tbl.ft->id,
ret = mlx5dr_cmd_query_flow_table(dmn->mdev,
action->dest_tbl.fw_tbl.type,
action->dest_tbl.fw_tbl.id,
&output);
if (!ret) {
action->dest_tbl.fw_tbl.tx_icm_addr =
......@@ -982,8 +982,106 @@ mlx5dr_action_create_dest_table(struct mlx5dr_table *tbl)
}
struct mlx5dr_action *
mlx5dr_create_action_dest_flow_fw_table(struct mlx5_flow_table *ft,
struct mlx5_core_dev *mdev)
mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
struct mlx5dr_action_dest *dests,
u32 num_of_dests)
{
struct mlx5dr_cmd_flow_destination_hw_info *hw_dests;
struct mlx5dr_action **ref_actions;
struct mlx5dr_action *action;
bool reformat_req = false;
u32 num_of_ref = 0;
int ret;
int i;
if (dmn->type != MLX5DR_DOMAIN_TYPE_FDB) {
mlx5dr_err(dmn, "Multiple destination support is for FDB only\n");
return NULL;
}
hw_dests = kzalloc(sizeof(*hw_dests) * num_of_dests, GFP_KERNEL);
if (!hw_dests)
return NULL;
ref_actions = kzalloc(sizeof(*ref_actions) * num_of_dests * 2, GFP_KERNEL);
if (!ref_actions)
goto free_hw_dests;
for (i = 0; i < num_of_dests; i++) {
struct mlx5dr_action *reformat_action = dests[i].reformat;
struct mlx5dr_action *dest_action = dests[i].dest;
ref_actions[num_of_ref++] = dest_action;
switch (dest_action->action_type) {
case DR_ACTION_TYP_VPORT:
hw_dests[i].vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
hw_dests[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
hw_dests[i].vport.num = dest_action->vport.caps->num;
hw_dests[i].vport.vhca_id = dest_action->vport.caps->vhca_gvmi;
if (reformat_action) {
reformat_req = true;
hw_dests[i].vport.reformat_id =
reformat_action->reformat.reformat_id;
ref_actions[num_of_ref++] = reformat_action;
hw_dests[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
}
break;
case DR_ACTION_TYP_FT:
hw_dests[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
if (dest_action->dest_tbl.is_fw_tbl)
hw_dests[i].ft_id = dest_action->dest_tbl.fw_tbl.id;
else
hw_dests[i].ft_id = dest_action->dest_tbl.tbl->table_id;
break;
default:
mlx5dr_dbg(dmn, "Invalid multiple destinations action\n");
goto free_ref_actions;
}
}
action = dr_action_create_generic(DR_ACTION_TYP_FT);
if (!action)
goto free_ref_actions;
ret = mlx5dr_fw_create_md_tbl(dmn,
hw_dests,
num_of_dests,
reformat_req,
&action->dest_tbl.fw_tbl.id,
&action->dest_tbl.fw_tbl.group_id);
if (ret)
goto free_action;
refcount_inc(&dmn->refcount);
for (i = 0; i < num_of_ref; i++)
refcount_inc(&ref_actions[i]->refcount);
action->dest_tbl.is_fw_tbl = true;
action->dest_tbl.fw_tbl.dmn = dmn;
action->dest_tbl.fw_tbl.type = FS_FT_FDB;
action->dest_tbl.fw_tbl.ref_actions = ref_actions;
action->dest_tbl.fw_tbl.num_of_ref_actions = num_of_ref;
kfree(hw_dests);
return action;
free_action:
kfree(action);
free_ref_actions:
kfree(ref_actions);
free_hw_dests:
kfree(hw_dests);
return NULL;
}
struct mlx5dr_action *
mlx5dr_action_create_dest_flow_fw_table(struct mlx5dr_domain *dmn,
struct mlx5_flow_table *ft)
{
struct mlx5dr_action *action;
......@@ -992,8 +1090,11 @@ mlx5dr_create_action_dest_flow_fw_table(struct mlx5_flow_table *ft,
return NULL;
action->dest_tbl.is_fw_tbl = 1;
action->dest_tbl.fw_tbl.ft = ft;
action->dest_tbl.fw_tbl.mdev = mdev;
action->dest_tbl.fw_tbl.type = ft->type;
action->dest_tbl.fw_tbl.id = ft->id;
action->dest_tbl.fw_tbl.dmn = dmn;
refcount_inc(&dmn->refcount);
return action;
}
......@@ -1559,8 +1660,26 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action)
switch (action->action_type) {
case DR_ACTION_TYP_FT:
if (!action->dest_tbl.is_fw_tbl)
if (action->dest_tbl.is_fw_tbl)
refcount_dec(&action->dest_tbl.fw_tbl.dmn->refcount);
else
refcount_dec(&action->dest_tbl.tbl->refcount);
if (action->dest_tbl.is_fw_tbl &&
action->dest_tbl.fw_tbl.num_of_ref_actions) {
struct mlx5dr_action **ref_actions;
int i;
ref_actions = action->dest_tbl.fw_tbl.ref_actions;
for (i = 0; i < action->dest_tbl.fw_tbl.num_of_ref_actions; i++)
refcount_dec(&ref_actions[i]->refcount);
kfree(ref_actions);
mlx5dr_fw_destroy_md_tbl(action->dest_tbl.fw_tbl.dmn,
action->dest_tbl.fw_tbl.id,
action->dest_tbl.fw_tbl.group_id);
}
break;
case DR_ACTION_TYP_TNL_L2_TO_L2:
refcount_dec(&action->reformat.dmn->refcount);
......
......@@ -320,12 +320,7 @@ int mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev *mdev,
}
int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
u32 table_type,
u64 icm_addr_rx,
u64 icm_addr_tx,
u8 level,
bool sw_owner,
bool term_tbl,
struct mlx5dr_cmd_create_flow_table_attr *attr,
u64 *fdb_rx_icm_addr,
u32 *table_id)
{
......@@ -335,37 +330,43 @@ int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
int err;
MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE);
MLX5_SET(create_flow_table_in, in, table_type, table_type);
MLX5_SET(create_flow_table_in, in, table_type, attr->table_type);
ft_mdev = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
MLX5_SET(flow_table_context, ft_mdev, termination_table, term_tbl);
MLX5_SET(flow_table_context, ft_mdev, sw_owner, sw_owner);
MLX5_SET(flow_table_context, ft_mdev, level, level);
MLX5_SET(flow_table_context, ft_mdev, termination_table, attr->term_tbl);
MLX5_SET(flow_table_context, ft_mdev, sw_owner, attr->sw_owner);
MLX5_SET(flow_table_context, ft_mdev, level, attr->level);
if (sw_owner) {
if (attr->sw_owner) {
/* icm_addr_0 used for FDB RX / NIC TX / NIC_RX
* icm_addr_1 used for FDB TX
*/
if (table_type == MLX5_FLOW_TABLE_TYPE_NIC_RX) {
if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_RX) {
MLX5_SET64(flow_table_context, ft_mdev,
sw_owner_icm_root_0, icm_addr_rx);
} else if (table_type == MLX5_FLOW_TABLE_TYPE_NIC_TX) {
sw_owner_icm_root_0, attr->icm_addr_rx);
} else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_TX) {
MLX5_SET64(flow_table_context, ft_mdev,
sw_owner_icm_root_0, icm_addr_tx);
} else if (table_type == MLX5_FLOW_TABLE_TYPE_FDB) {
sw_owner_icm_root_0, attr->icm_addr_tx);
} else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_FDB) {
MLX5_SET64(flow_table_context, ft_mdev,
sw_owner_icm_root_0, icm_addr_rx);
sw_owner_icm_root_0, attr->icm_addr_rx);
MLX5_SET64(flow_table_context, ft_mdev,
sw_owner_icm_root_1, icm_addr_tx);
sw_owner_icm_root_1, attr->icm_addr_tx);
}
}
MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
attr->decap_en);
MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
attr->reformat_en);
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
*table_id = MLX5_GET(create_flow_table_out, out, table_id);
if (!sw_owner && table_type == MLX5_FLOW_TABLE_TYPE_FDB)
if (!attr->sw_owner && attr->table_type == MLX5_FLOW_TABLE_TYPE_FDB &&
fdb_rx_icm_addr)
*fdb_rx_icm_addr =
(u64)MLX5_GET(create_flow_table_out, out, icm_address_31_0) |
(u64)MLX5_GET(create_flow_table_out, out, icm_address_39_32) << 32 |
......@@ -478,3 +479,208 @@ int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
return 0;
}
static int mlx5dr_cmd_set_extended_dest(struct mlx5_core_dev *dev,
struct mlx5dr_cmd_fte_info *fte,
bool *extended_dest)
{
int fw_log_max_fdb_encap_uplink = MLX5_CAP_ESW(dev, log_max_fdb_encap_uplink);
int num_fwd_destinations = 0;
int num_encap = 0;
int i;
*extended_dest = false;
if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
return 0;
for (i = 0; i < fte->dests_size; i++) {
if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
continue;
if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
fte->dest_arr[i].vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
num_encap++;
num_fwd_destinations++;
}
if (num_fwd_destinations > 1 && num_encap > 0)
*extended_dest = true;
if (*extended_dest && !fw_log_max_fdb_encap_uplink) {
mlx5_core_warn(dev, "FW does not support extended destination");
return -EOPNOTSUPP;
}
if (num_encap > (1 << fw_log_max_fdb_encap_uplink)) {
mlx5_core_warn(dev, "FW does not support more than %d encaps",
1 << fw_log_max_fdb_encap_uplink);
return -EOPNOTSUPP;
}
return 0;
}
int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
int opmod, int modify_mask,
struct mlx5dr_cmd_ft_info *ft,
u32 group_id,
struct mlx5dr_cmd_fte_info *fte)
{
u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {};
void *in_flow_context, *vlan;
bool extended_dest = false;
void *in_match_value;
unsigned int inlen;
int dst_cnt_size;
void *in_dests;
u32 *in;
int err;
int i;
if (mlx5dr_cmd_set_extended_dest(dev, fte, &extended_dest))
return -EOPNOTSUPP;
if (!extended_dest)
dst_cnt_size = MLX5_ST_SZ_BYTES(dest_format_struct);
else
dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format);
inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
MLX5_SET(set_fte_in, in, op_mod, opmod);
MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
MLX5_SET(set_fte_in, in, table_type, ft->type);
MLX5_SET(set_fte_in, in, table_id, ft->id);
MLX5_SET(set_fte_in, in, flow_index, fte->index);
if (ft->vport) {
MLX5_SET(set_fte_in, in, vport_number, ft->vport);
MLX5_SET(set_fte_in, in, other_vport, 1);
}
in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
MLX5_SET(flow_context, in_flow_context, flow_tag,
fte->flow_context.flow_tag);
MLX5_SET(flow_context, in_flow_context, flow_source,
fte->flow_context.flow_source);
MLX5_SET(flow_context, in_flow_context, extended_destination,
extended_dest);
if (extended_dest) {
u32 action;
action = fte->action.action &
~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
MLX5_SET(flow_context, in_flow_context, action, action);
} else {
MLX5_SET(flow_context, in_flow_context, action,
fte->action.action);
if (fte->action.pkt_reformat)
MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
fte->action.pkt_reformat->id);
}
if (fte->action.modify_hdr)
MLX5_SET(flow_context, in_flow_context, modify_header_id,
fte->action.modify_hdr->id);
vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid);
MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio);
vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype);
MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid);
MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio);
in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
match_value);
memcpy(in_match_value, fte->val, sizeof(u32) * MLX5_ST_SZ_DW_MATCH_PARAM);
in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
int list_size = 0;
for (i = 0; i < fte->dests_size; i++) {
unsigned int id, type = fte->dest_arr[i].type;
if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
continue;
switch (type) {
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
id = fte->dest_arr[i].ft_num;
type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
break;
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
id = fte->dest_arr[i].ft_id;
break;
case MLX5_FLOW_DESTINATION_TYPE_VPORT:
id = fte->dest_arr[i].vport.num;
MLX5_SET(dest_format_struct, in_dests,
destination_eswitch_owner_vhca_id_valid,
!!(fte->dest_arr[i].vport.flags &
MLX5_FLOW_DEST_VPORT_VHCA_ID));
MLX5_SET(dest_format_struct, in_dests,
destination_eswitch_owner_vhca_id,
fte->dest_arr[i].vport.vhca_id);
if (extended_dest && (fte->dest_arr[i].vport.flags &
MLX5_FLOW_DEST_VPORT_REFORMAT_ID)) {
MLX5_SET(dest_format_struct, in_dests,
packet_reformat,
!!(fte->dest_arr[i].vport.flags &
MLX5_FLOW_DEST_VPORT_REFORMAT_ID));
MLX5_SET(extended_dest_format, in_dests,
packet_reformat_id,
fte->dest_arr[i].vport.reformat_id);
}
break;
default:
id = fte->dest_arr[i].tir_num;
}
MLX5_SET(dest_format_struct, in_dests, destination_type,
type);
MLX5_SET(dest_format_struct, in_dests, destination_id, id);
in_dests += dst_cnt_size;
list_size++;
}
MLX5_SET(flow_context, in_flow_context, destination_list_size,
list_size);
}
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
log_max_flow_counter,
ft->type));
int list_size = 0;
for (i = 0; i < fte->dests_size; i++) {
if (fte->dest_arr[i].type !=
MLX5_FLOW_DESTINATION_TYPE_COUNTER)
continue;
MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
fte->dest_arr[i].counter_id);
in_dests += dst_cnt_size;
list_size++;
}
if (list_size > max_list_size) {
err = -EINVAL;
goto err_out;
}
MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
list_size);
}
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
err_out:
kvfree(in);
return err;
}
......@@ -7,6 +7,7 @@
struct mlx5dr_fw_recalc_cs_ft *
mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u32 vport_num)
{
struct mlx5dr_cmd_create_flow_table_attr ft_attr = {};
struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
u32 table_id, group_id, modify_hdr_id;
u64 rx_icm_addr, modify_ttl_action;
......@@ -16,9 +17,14 @@ mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u32 vport_num)
if (!recalc_cs_ft)
return NULL;
ret = mlx5dr_cmd_create_flow_table(dmn->mdev, MLX5_FLOW_TABLE_TYPE_FDB,
0, 0, dmn->info.caps.max_ft_level - 1,
false, true, &rx_icm_addr, &table_id);
ft_attr.table_type = MLX5_FLOW_TABLE_TYPE_FDB;
ft_attr.level = dmn->info.caps.max_ft_level - 1;
ft_attr.term_tbl = true;
ret = mlx5dr_cmd_create_flow_table(dmn->mdev,
&ft_attr,
&rx_icm_addr,
&table_id);
if (ret) {
mlx5dr_err(dmn, "Failed creating TTL W/A FW flow table %d\n", ret);
goto free_ttl_tbl;
......@@ -91,3 +97,70 @@ void mlx5dr_fw_destroy_recalc_cs_ft(struct mlx5dr_domain *dmn,
kfree(recalc_cs_ft);
}
int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
struct mlx5dr_cmd_flow_destination_hw_info *dest,
int num_dest,
bool reformat_req,
u32 *tbl_id,
u32 *group_id)
{
struct mlx5dr_cmd_create_flow_table_attr ft_attr = {};
struct mlx5dr_cmd_fte_info fte_info = {};
u32 val[MLX5_ST_SZ_DW_MATCH_PARAM] = {};
struct mlx5dr_cmd_ft_info ft_info = {};
int ret;
ft_attr.table_type = MLX5_FLOW_TABLE_TYPE_FDB;
ft_attr.level = dmn->info.caps.max_ft_level - 2;
ft_attr.reformat_en = reformat_req;
ft_attr.decap_en = reformat_req;
ret = mlx5dr_cmd_create_flow_table(dmn->mdev, &ft_attr, NULL, tbl_id);
if (ret) {
mlx5dr_err(dmn, "Failed creating multi dest FW flow table %d\n", ret);
return ret;
}
ret = mlx5dr_cmd_create_empty_flow_group(dmn->mdev,
MLX5_FLOW_TABLE_TYPE_FDB,
*tbl_id, group_id);
if (ret) {
mlx5dr_err(dmn, "Failed creating multi dest FW flow group %d\n", ret);
goto free_flow_table;
}
ft_info.id = *tbl_id;
ft_info.type = FS_FT_FDB;
fte_info.action.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
fte_info.dests_size = num_dest;
fte_info.val = val;
fte_info.dest_arr = dest;
ret = mlx5dr_cmd_set_fte(dmn->mdev, 0, 0, &ft_info, *group_id, &fte_info);
if (ret) {
mlx5dr_err(dmn, "Failed setting fte into table %d\n", ret);
goto free_flow_group;
}
return 0;
free_flow_group:
mlx5dr_cmd_destroy_flow_group(dmn->mdev, MLX5_FLOW_TABLE_TYPE_FDB,
*tbl_id, *group_id);
free_flow_table:
mlx5dr_cmd_destroy_flow_table(dmn->mdev, *tbl_id,
MLX5_FLOW_TABLE_TYPE_FDB);
return ret;
}
void mlx5dr_fw_destroy_md_tbl(struct mlx5dr_domain *dmn,
u32 tbl_id, u32 group_id)
{
mlx5dr_cmd_del_flow_table_entry(dmn->mdev, FS_FT_FDB, tbl_id);
mlx5dr_cmd_destroy_flow_group(dmn->mdev,
MLX5_FLOW_TABLE_TYPE_FDB,
tbl_id, group_id);
mlx5dr_cmd_destroy_flow_table(dmn->mdev, tbl_id,
MLX5_FLOW_TABLE_TYPE_FDB);
}
......@@ -211,6 +211,9 @@ static int dr_table_destroy_sw_owned_tbl(struct mlx5dr_table *tbl)
static int dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl)
{
bool en_encap = !!(tbl->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
bool en_decap = !!(tbl->flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
struct mlx5dr_cmd_create_flow_table_attr ft_attr = {};
u64 icm_addr_rx = 0;
u64 icm_addr_tx = 0;
int ret;
......@@ -221,18 +224,21 @@ static int dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl)
if (tbl->tx.s_anchor)
icm_addr_tx = tbl->tx.s_anchor->chunk->icm_addr;
ret = mlx5dr_cmd_create_flow_table(tbl->dmn->mdev,
tbl->table_type,
icm_addr_rx,
icm_addr_tx,
tbl->dmn->info.caps.max_ft_level - 1,
true, false, NULL,
&tbl->table_id);
ft_attr.table_type = tbl->table_type;
ft_attr.icm_addr_rx = icm_addr_rx;
ft_attr.icm_addr_tx = icm_addr_tx;
ft_attr.level = tbl->dmn->info.caps.max_ft_level - 1;
ft_attr.sw_owner = true;
ft_attr.decap_en = en_decap;
ft_attr.reformat_en = en_encap;
ret = mlx5dr_cmd_create_flow_table(tbl->dmn->mdev, &ft_attr,
NULL, &tbl->table_id);
return ret;
}
struct mlx5dr_table *mlx5dr_table_create(struct mlx5dr_domain *dmn, u32 level)
struct mlx5dr_table *mlx5dr_table_create(struct mlx5dr_domain *dmn, u32 level, u32 flags)
{
struct mlx5dr_table *tbl;
int ret;
......@@ -245,6 +251,7 @@ struct mlx5dr_table *mlx5dr_table_create(struct mlx5dr_domain *dmn, u32 level)
tbl->dmn = dmn;
tbl->level = level;
tbl->flags = flags;
refcount_set(&tbl->refcount, 1);
ret = dr_table_init(tbl);
......
......@@ -679,6 +679,7 @@ struct mlx5dr_table {
u32 level;
u32 table_type;
u32 table_id;
u32 flags;
struct list_head matcher_list;
struct mlx5dr_action *miss_action;
refcount_t refcount;
......@@ -742,10 +743,14 @@ struct mlx5dr_action {
union {
struct mlx5dr_table *tbl;
struct {
struct mlx5_flow_table *ft;
struct mlx5dr_domain *dmn;
u32 id;
u32 group_id;
enum fs_flow_table_type type;
u64 rx_icm_addr;
u64 tx_icm_addr;
struct mlx5_core_dev *mdev;
struct mlx5dr_action **ref_actions;
u32 num_of_ref_actions;
} fw_tbl;
};
} dest_tbl;
......@@ -867,6 +872,17 @@ struct mlx5dr_cmd_query_flow_table_details {
u64 sw_owner_icm_root_0;
};
struct mlx5dr_cmd_create_flow_table_attr {
u32 table_type;
u64 icm_addr_rx;
u64 icm_addr_tx;
u8 level;
bool sw_owner;
bool term_tbl;
bool decap_en;
bool reformat_en;
};
/* internal API functions */
int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
struct mlx5dr_cmd_caps *caps);
......@@ -904,12 +920,7 @@ int mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev *mdev,
u32 table_id,
u32 group_id);
int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
u32 table_type,
u64 icm_addr_rx,
u64 icm_addr_tx,
u8 level,
bool sw_owner,
bool term_tbl,
struct mlx5dr_cmd_create_flow_table_attr *attr,
u64 *fdb_rx_icm_addr,
u32 *table_id);
int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev,
......@@ -1051,6 +1062,43 @@ int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
struct mlx5dr_action *action);
struct mlx5dr_cmd_ft_info {
u32 id;
u16 vport;
enum fs_flow_table_type type;
};
struct mlx5dr_cmd_flow_destination_hw_info {
enum mlx5_flow_destination_type type;
union {
u32 tir_num;
u32 ft_num;
u32 ft_id;
u32 counter_id;
struct {
u16 num;
u16 vhca_id;
u32 reformat_id;
u8 flags;
} vport;
};
};
struct mlx5dr_cmd_fte_info {
u32 dests_size;
u32 index;
struct mlx5_flow_context flow_context;
u32 *val;
struct mlx5_flow_act action;
struct mlx5dr_cmd_flow_destination_hw_info *dest_arr;
};
int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
int opmod, int modify_mask,
struct mlx5dr_cmd_ft_info *ft,
u32 group_id,
struct mlx5dr_cmd_fte_info *fte);
struct mlx5dr_fw_recalc_cs_ft {
u64 rx_icm_addr;
u32 table_id;
......@@ -1065,4 +1113,12 @@ void mlx5dr_fw_destroy_recalc_cs_ft(struct mlx5dr_domain *dmn,
int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
u32 vport_num,
u64 *rx_icm_addr);
int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
struct mlx5dr_cmd_flow_destination_hw_info *dest,
int num_dest,
bool reformat_req,
u32 *tbl_id,
u32 *group_id);
void mlx5dr_fw_destroy_md_tbl(struct mlx5dr_domain *dmn, u32 tbl_id,
u32 group_id);
#endif /* _DR_TYPES_H_ */
......@@ -74,7 +74,7 @@ static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns,
next_ft);
tbl = mlx5dr_table_create(ns->fs_dr_domain.dr_domain,
ft->level);
ft->level, ft->flags);
if (!tbl) {
mlx5_core_err(ns->dev, "Failed creating dr flow_table\n");
return -EINVAL;
......@@ -184,13 +184,13 @@ static struct mlx5dr_action *create_vport_action(struct mlx5dr_domain *domain,
dest_attr->vport.vhca_id);
}
static struct mlx5dr_action *create_ft_action(struct mlx5_core_dev *dev,
static struct mlx5dr_action *create_ft_action(struct mlx5dr_domain *domain,
struct mlx5_flow_rule *dst)
{
struct mlx5_flow_table *dest_ft = dst->dest_attr.ft;
if (mlx5_dr_is_fw_table(dest_ft->flags))
return mlx5dr_create_action_dest_flow_fw_table(dest_ft, dev);
return mlx5dr_action_create_dest_flow_fw_table(domain, dest_ft);
return mlx5dr_action_create_dest_table(dest_ft->fs_dr_table.dr_table);
}
......@@ -206,6 +206,12 @@ static struct mlx5dr_action *create_action_push_vlan(struct mlx5dr_domain *domai
return mlx5dr_action_create_push_vlan(domain, htonl(vlan_hdr));
}
static bool contain_vport_reformat_action(struct mlx5_flow_rule *dst)
{
return dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
}
#define MLX5_FLOW_CONTEXT_ACTION_MAX 20
static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
......@@ -213,7 +219,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
struct fs_fte *fte)
{
struct mlx5dr_domain *domain = ns->fs_dr_domain.dr_domain;
struct mlx5dr_action *term_action = NULL;
struct mlx5dr_action_dest *term_actions;
struct mlx5dr_match_parameters params;
struct mlx5_core_dev *dev = ns->dev;
struct mlx5dr_action **fs_dr_actions;
......@@ -223,6 +229,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5dr_rule *rule;
struct mlx5_flow_rule *dst;
int fs_dr_num_actions = 0;
int num_term_actions = 0;
int num_actions = 0;
size_t match_sz;
int err = 0;
......@@ -233,18 +240,38 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, sizeof(*actions),
GFP_KERNEL);
if (!actions)
return -ENOMEM;
if (!actions) {
err = -ENOMEM;
goto out_err;
}
fs_dr_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX,
sizeof(*fs_dr_actions), GFP_KERNEL);
if (!fs_dr_actions) {
kfree(actions);
return -ENOMEM;
err = -ENOMEM;
goto free_actions_alloc;
}
term_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX,
sizeof(*term_actions), GFP_KERNEL);
if (!term_actions) {
err = -ENOMEM;
goto free_fs_dr_actions_alloc;
}
match_sz = sizeof(fte->val);
/* Drop reformat action bit if destination vport set with reformat */
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
list_for_each_entry(dst, &fte->node.children, node.list) {
if (!contain_vport_reformat_action(dst))
continue;
fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
break;
}
}
/* The order of the actions are must to be keep, only the following
* order is supported by SW steering:
* TX: push vlan -> modify header -> encap
......@@ -335,7 +362,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
goto free_actions;
}
fs_dr_actions[fs_dr_num_actions++] = tmp_action;
term_action = tmp_action;
term_actions[num_term_actions++].dest = tmp_action;
}
if (fte->flow_context.flow_tag) {
......@@ -354,7 +381,8 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
enum mlx5_flow_destination_type type = dst->dest_attr.type;
u32 id;
if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
num_term_actions >= MLX5_FLOW_CONTEXT_ACTION_MAX) {
err = -ENOSPC;
goto free_actions;
}
......@@ -373,13 +401,13 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
actions[num_actions++] = tmp_action;
break;
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
tmp_action = create_ft_action(dev, dst);
tmp_action = create_ft_action(domain, dst);
if (!tmp_action) {
err = -ENOMEM;
goto free_actions;
}
fs_dr_actions[fs_dr_num_actions++] = tmp_action;
term_action = tmp_action;
term_actions[num_term_actions++].dest = tmp_action;
break;
case MLX5_FLOW_DESTINATION_TYPE_VPORT:
tmp_action = create_vport_action(domain, dst);
......@@ -388,7 +416,14 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
goto free_actions;
}
fs_dr_actions[fs_dr_num_actions++] = tmp_action;
term_action = tmp_action;
term_actions[num_term_actions].dest = tmp_action;
if (dst->dest_attr.vport.flags &
MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
term_actions[num_term_actions].reformat =
dst->dest_attr.vport.pkt_reformat->action.dr_action;
num_term_actions++;
break;
default:
err = -EOPNOTSUPP;
......@@ -399,9 +434,22 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
params.match_sz = match_sz;
params.match_buf = (u64 *)fte->val;
if (term_action)
actions[num_actions++] = term_action;
if (num_term_actions == 1) {
if (term_actions->reformat)
actions[num_actions++] = term_actions->reformat;
actions[num_actions++] = term_actions->dest;
} else if (num_term_actions > 1) {
tmp_action = mlx5dr_action_create_mult_dest_tbl(domain,
term_actions,
num_term_actions);
if (!tmp_action) {
err = -EOPNOTSUPP;
goto free_actions;
}
fs_dr_actions[fs_dr_num_actions++] = tmp_action;
actions[num_actions++] = tmp_action;
}
rule = mlx5dr_rule_create(group->fs_dr_matcher.dr_matcher,
&params,
......@@ -412,7 +460,9 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
goto free_actions;
}
kfree(term_actions);
kfree(actions);
fte->fs_dr_rule.dr_rule = rule;
fte->fs_dr_rule.num_actions = fs_dr_num_actions;
fte->fs_dr_rule.dr_actions = fs_dr_actions;
......@@ -420,13 +470,18 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
return 0;
free_actions:
for (i = 0; i < fs_dr_num_actions; i++)
/* Free in reverse order to handle action dependencies */
for (i = fs_dr_num_actions - 1; i >= 0; i--)
if (!IS_ERR_OR_NULL(fs_dr_actions[i]))
mlx5dr_action_destroy(fs_dr_actions[i]);
mlx5_core_err(dev, "Failed to create dr rule err(%d)\n", err);
kfree(actions);
kfree(term_actions);
free_fs_dr_actions_alloc:
kfree(fs_dr_actions);
free_actions_alloc:
kfree(actions);
out_err:
mlx5_core_err(dev, "Failed to create dr rule err(%d)\n", err);
return err;
}
......@@ -533,7 +588,8 @@ static int mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace *ns,
if (err)
return err;
for (i = 0; i < rule->num_actions; i++)
/* Free in reverse order to handle action dependencies */
for (i = rule->num_actions - 1; i >= 0; i--)
if (!IS_ERR_OR_NULL(rule->dr_actions[i]))
mlx5dr_action_destroy(rule->dr_actions[i]);
......
......@@ -33,6 +33,11 @@ struct mlx5dr_match_parameters {
u64 *match_buf; /* Device spec format */
};
struct mlx5dr_action_dest {
struct mlx5dr_action *dest;
struct mlx5dr_action *reformat;
};
#ifdef CONFIG_MLX5_SW_STEERING
struct mlx5dr_domain *
......@@ -46,7 +51,7 @@ void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
struct mlx5dr_domain *peer_dmn);
struct mlx5dr_table *
mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level);
mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level, u32 flags);
int mlx5dr_table_destroy(struct mlx5dr_table *table);
......@@ -75,14 +80,19 @@ struct mlx5dr_action *
mlx5dr_action_create_dest_table(struct mlx5dr_table *table);
struct mlx5dr_action *
mlx5dr_create_action_dest_flow_fw_table(struct mlx5_flow_table *ft,
struct mlx5_core_dev *mdev);
mlx5dr_action_create_dest_flow_fw_table(struct mlx5dr_domain *domain,
struct mlx5_flow_table *ft);
struct mlx5dr_action *
mlx5dr_action_create_dest_vport(struct mlx5dr_domain *domain,
u32 vport, u8 vhca_id_valid,
u16 vhca_id);
struct mlx5dr_action *
mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
struct mlx5dr_action_dest *dests,
u32 num_of_dests);
struct mlx5dr_action *mlx5dr_action_create_drop(void);
struct mlx5dr_action *mlx5dr_action_create_tag(u32 tag_value);
......@@ -131,7 +141,7 @@ mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
struct mlx5dr_domain *peer_dmn) { }
static inline struct mlx5dr_table *
mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level) { return NULL; }
mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level, u32 flags) { return NULL; }
static inline int
mlx5dr_table_destroy(struct mlx5dr_table *table) { return 0; }
......@@ -165,14 +175,19 @@ static inline struct mlx5dr_action *
mlx5dr_action_create_dest_table(struct mlx5dr_table *table) { return NULL; }
static inline struct mlx5dr_action *
mlx5dr_create_action_dest_flow_fw_table(struct mlx5_flow_table *ft,
struct mlx5_core_dev *mdev) { return NULL; }
mlx5dr_action_create_dest_flow_fw_table(struct mlx5dr_domain *domain,
struct mlx5_flow_table *ft) { return NULL; }
static inline struct mlx5dr_action *
mlx5dr_action_create_dest_vport(struct mlx5dr_domain *domain,
u32 vport, u8 vhca_id_valid,
u16 vhca_id) { return NULL; }
static inline struct mlx5dr_action *
mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
struct mlx5dr_action_dest *dests,
u32 num_of_dests) { return NULL; }
static inline struct mlx5dr_action *
mlx5dr_action_create_drop(void) { return NULL; }
......
......@@ -928,8 +928,6 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev);
void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health);
void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
struct mlx5_frag_buf *buf, int node);
int mlx5_buf_alloc(struct mlx5_core_dev *dev,
int size, struct mlx5_frag_buf *buf);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment