Commit 6c225706 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5e-pedit' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Or Gerlitz says:

====================
mlx5e-pedit 2017-03-28

This series adds support for offloading modifications of packet headers using
ConnectX-5 HW header re-write as an action applied during packet steering.

The offloaded SW mechanism is TC's pedit action. The offloading is
supported for E-Switch steering of VF traffic in the SRIOV
switchdev mode and for NIC (non eswitch) RX.

One use-case for this offload on virtual networks, is when the hypervisor
implements flow based router such as Open-Stack's DVR, where L2 headers
of guest packets re-written with routers' MAC addresses and the IP TTL
is decremented.

Another use case (which can be applied in parallel with routing) is
stateless NAT where guest L3/L4 headers are re-written.

The series is built as follows: the 1st six patches are preperations which
don't yet add new functionality, patches 7-8 add the FW APIs (data-structures
and commands) for header re-write, and patch nine allows offloading driver
to access pedit keys.

The 10th patch is somehow the core of the series, where we translate from
the pedit way to represent set of header modification elements to the FW
API for that same matter.

Once a set of HW modification is established, we register it with the FW
and get a modify header ID. When this ID is used with an action during
packet steering, the HW applies the header modification on the packet.

Patches 11 and 12 implement the above logic as an offload for pedit action
for the NIC and E-Switch use-cases.

I'd like to thanks Elijah Shakkour <elijahs@mellanox.com> for implementing
and helping me testing this functionality on HW simulator, before it could
be done with FW.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents e944e97a d7e75a32
...@@ -279,6 +279,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, ...@@ -279,6 +279,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_DESTROY_XRC_SRQ: case MLX5_CMD_OP_DESTROY_XRC_SRQ:
case MLX5_CMD_OP_DESTROY_DCT: case MLX5_CMD_OP_DESTROY_DCT:
case MLX5_CMD_OP_DEALLOC_Q_COUNTER: case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT:
case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT:
case MLX5_CMD_OP_DEALLOC_PD: case MLX5_CMD_OP_DEALLOC_PD:
case MLX5_CMD_OP_DEALLOC_UAR: case MLX5_CMD_OP_DEALLOC_UAR:
case MLX5_CMD_OP_DETACH_FROM_MCG: case MLX5_CMD_OP_DETACH_FROM_MCG:
...@@ -305,8 +307,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, ...@@ -305,8 +307,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT: case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER: case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER:
case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT: case MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT:
case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT:
return MLX5_CMD_STAT_OK; return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP: case MLX5_CMD_OP_QUERY_HCA_CAP:
...@@ -363,6 +364,10 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, ...@@ -363,6 +364,10 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_QUERY_Q_COUNTER: case MLX5_CMD_OP_QUERY_Q_COUNTER:
case MLX5_CMD_OP_SET_RATE_LIMIT: case MLX5_CMD_OP_SET_RATE_LIMIT:
case MLX5_CMD_OP_QUERY_RATE_LIMIT: case MLX5_CMD_OP_QUERY_RATE_LIMIT:
case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT:
case MLX5_CMD_OP_ALLOC_PD: case MLX5_CMD_OP_ALLOC_PD:
case MLX5_CMD_OP_ALLOC_UAR: case MLX5_CMD_OP_ALLOC_UAR:
case MLX5_CMD_OP_CONFIG_INT_MODERATION: case MLX5_CMD_OP_CONFIG_INT_MODERATION:
...@@ -414,10 +419,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, ...@@ -414,10 +419,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
case MLX5_CMD_OP_QUERY_FLOW_COUNTER: case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
case MLX5_CMD_OP_ALLOC_ENCAP_HEADER: case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT: case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT:
*status = MLX5_DRIVER_STATUS_ABORTED; *status = MLX5_DRIVER_STATUS_ABORTED;
*synd = MLX5_DRIVER_SYND; *synd = MLX5_DRIVER_SYND;
return -EIO; return -EIO;
...@@ -501,6 +503,12 @@ const char *mlx5_command_str(int command) ...@@ -501,6 +503,12 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER); MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
MLX5_COMMAND_STR_CASE(SET_RATE_LIMIT); MLX5_COMMAND_STR_CASE(SET_RATE_LIMIT);
MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT); MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT);
MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT);
MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT);
MLX5_COMMAND_STR_CASE(QUERY_SCHEDULING_ELEMENT);
MLX5_COMMAND_STR_CASE(MODIFY_SCHEDULING_ELEMENT);
MLX5_COMMAND_STR_CASE(CREATE_QOS_PARA_VPORT);
MLX5_COMMAND_STR_CASE(DESTROY_QOS_PARA_VPORT);
MLX5_COMMAND_STR_CASE(ALLOC_PD); MLX5_COMMAND_STR_CASE(ALLOC_PD);
MLX5_COMMAND_STR_CASE(DEALLOC_PD); MLX5_COMMAND_STR_CASE(DEALLOC_PD);
MLX5_COMMAND_STR_CASE(ALLOC_UAR); MLX5_COMMAND_STR_CASE(ALLOC_UAR);
...@@ -576,12 +584,8 @@ const char *mlx5_command_str(int command) ...@@ -576,12 +584,8 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE); MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
MLX5_COMMAND_STR_CASE(ALLOC_ENCAP_HEADER); MLX5_COMMAND_STR_CASE(ALLOC_ENCAP_HEADER);
MLX5_COMMAND_STR_CASE(DEALLOC_ENCAP_HEADER); MLX5_COMMAND_STR_CASE(DEALLOC_ENCAP_HEADER);
MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT); MLX5_COMMAND_STR_CASE(ALLOC_MODIFY_HEADER_CONTEXT);
MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT); MLX5_COMMAND_STR_CASE(DEALLOC_MODIFY_HEADER_CONTEXT);
MLX5_COMMAND_STR_CASE(QUERY_SCHEDULING_ELEMENT);
MLX5_COMMAND_STR_CASE(MODIFY_SCHEDULING_ELEMENT);
MLX5_COMMAND_STR_CASE(CREATE_QOS_PARA_VPORT);
MLX5_COMMAND_STR_CASE(DESTROY_QOS_PARA_VPORT);
default: return "unknown command opcode"; default: return "unknown command opcode";
} }
} }
......
...@@ -174,13 +174,9 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv, ...@@ -174,13 +174,9 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
enum arfs_type type) enum arfs_type type)
{ {
struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type]; struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type];
struct mlx5_flow_act flow_act = {
.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
.encap_id = 0,
};
struct mlx5_flow_destination dest;
struct mlx5e_tir *tir = priv->indir_tir; struct mlx5e_tir *tir = priv->indir_tir;
struct mlx5_flow_destination dest;
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
int err = 0; int err = 0;
...@@ -469,15 +465,11 @@ static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs, ...@@ -469,15 +465,11 @@ static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv, static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
struct arfs_rule *arfs_rule) struct arfs_rule *arfs_rule)
{ {
struct mlx5_flow_act flow_act = {
.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
.encap_id = 0,
};
struct mlx5e_arfs_tables *arfs = &priv->fs.arfs; struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
struct arfs_tuple *tuple = &arfs_rule->tuple; struct arfs_tuple *tuple = &arfs_rule->tuple;
struct mlx5_flow_handle *rule = NULL; struct mlx5_flow_handle *rule = NULL;
struct mlx5_flow_destination dest; struct mlx5_flow_destination dest;
MLX5_DECLARE_FLOW_ACT(flow_act);
struct arfs_table *arfs_table; struct arfs_table *arfs_table;
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
struct mlx5_flow_table *ft; struct mlx5_flow_table *ft;
......
...@@ -159,14 +159,10 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, ...@@ -159,14 +159,10 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
enum mlx5e_vlan_rule_type rule_type, enum mlx5e_vlan_rule_type rule_type,
u16 vid, struct mlx5_flow_spec *spec) u16 vid, struct mlx5_flow_spec *spec)
{ {
struct mlx5_flow_act flow_act = {
.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
.encap_id = 0,
};
struct mlx5_flow_table *ft = priv->fs.vlan.ft.t; struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
struct mlx5_flow_destination dest; struct mlx5_flow_destination dest;
struct mlx5_flow_handle **rule_p; struct mlx5_flow_handle **rule_p;
MLX5_DECLARE_FLOW_ACT(flow_act);
int err = 0; int err = 0;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
...@@ -659,11 +655,7 @@ mlx5e_generate_ttc_rule(struct mlx5e_priv *priv, ...@@ -659,11 +655,7 @@ mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
u16 etype, u16 etype,
u8 proto) u8 proto)
{ {
struct mlx5_flow_act flow_act = { MLX5_DECLARE_FLOW_ACT(flow_act);
.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
.encap_id = 0,
};
struct mlx5_flow_handle *rule; struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
int err = 0; int err = 0;
...@@ -848,13 +840,9 @@ static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv, ...@@ -848,13 +840,9 @@ static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
struct mlx5e_l2_rule *ai, int type) struct mlx5e_l2_rule *ai, int type)
{ {
struct mlx5_flow_act flow_act = {
.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
.encap_id = 0,
};
struct mlx5_flow_table *ft = priv->fs.l2.ft.t; struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
struct mlx5_flow_destination dest; struct mlx5_flow_destination dest;
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
int err = 0; int err = 0;
u8 *mc_dmac; u8 *mc_dmac;
......
...@@ -285,8 +285,8 @@ enum { ...@@ -285,8 +285,8 @@ enum {
SET_VLAN_INSERT = BIT(1) SET_VLAN_INSERT = BIT(1)
}; };
#define MLX5_FLOW_CONTEXT_ACTION_VLAN_POP 0x40 #define MLX5_FLOW_CONTEXT_ACTION_VLAN_POP 0x4000
#define MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH 0x80 #define MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH 0x8000
struct mlx5_encap_entry { struct mlx5_encap_entry {
struct hlist_node encap_hlist; struct hlist_node encap_hlist;
...@@ -308,6 +308,7 @@ struct mlx5_esw_flow_attr { ...@@ -308,6 +308,7 @@ struct mlx5_esw_flow_attr {
u16 vlan; u16 vlan;
bool vlan_handled; bool vlan_handled;
struct mlx5_encap_entry *encap; struct mlx5_encap_entry *encap;
u32 mod_hdr_id;
}; };
int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw, int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
......
...@@ -68,8 +68,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, ...@@ -68,8 +68,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
} }
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
counter = mlx5_fc_create(esw->dev, true); counter = mlx5_fc_create(esw->dev, true);
if (IS_ERR(counter)) if (IS_ERR(counter)) {
return ERR_CAST(counter); rule = ERR_CAST(counter);
goto err_counter_alloc;
}
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
dest[i].counter = counter; dest[i].counter = counter;
i++; i++;
...@@ -86,17 +88,25 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, ...@@ -86,17 +88,25 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS; spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
if (attr->encap) if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
flow_act.modify_id = attr->mod_hdr_id;
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
flow_act.encap_id = attr->encap->encap_id; flow_act.encap_id = attr->encap->encap_id;
rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb, rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb,
spec, &flow_act, dest, i); spec, &flow_act, dest, i);
if (IS_ERR(rule)) if (IS_ERR(rule))
mlx5_fc_destroy(esw->dev, counter); goto err_add_rule;
else else
esw->offloads.num_flows++; esw->offloads.num_flows++;
return rule; return rule;
err_add_rule:
mlx5_fc_destroy(esw->dev, counter);
err_counter_alloc:
return rule;
} }
void void
...@@ -106,12 +116,10 @@ mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw, ...@@ -106,12 +116,10 @@ mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
{ {
struct mlx5_fc *counter = NULL; struct mlx5_fc *counter = NULL;
if (!IS_ERR(rule)) { counter = mlx5_flow_rule_counter(rule);
counter = mlx5_flow_rule_counter(rule); mlx5_del_flow_rules(rule);
mlx5_del_flow_rules(rule); mlx5_fc_destroy(esw->dev, counter);
mlx5_fc_destroy(esw->dev, counter); esw->offloads.num_flows--;
esw->offloads.num_flows--;
}
} }
static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val) static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
......
...@@ -249,6 +249,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, ...@@ -249,6 +249,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(flow_context, in_flow_context, flow_tag, fte->flow_tag); MLX5_SET(flow_context, in_flow_context, flow_tag, fte->flow_tag);
MLX5_SET(flow_context, in_flow_context, action, fte->action); MLX5_SET(flow_context, in_flow_context, action, fte->action);
MLX5_SET(flow_context, in_flow_context, encap_id, fte->encap_id); MLX5_SET(flow_context, in_flow_context, encap_id, fte->encap_id);
MLX5_SET(flow_context, in_flow_context, modify_header_id, fte->modify_id);
in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context, in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
match_value); match_value);
memcpy(in_match_value, &fte->val, MLX5_ST_SZ_BYTES(fte_match_param)); memcpy(in_match_value, &fte->val, MLX5_ST_SZ_BYTES(fte_match_param));
...@@ -515,3 +516,69 @@ void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id) ...@@ -515,3 +516,69 @@ void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id)
mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
} }
int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
u8 namespace, u8 num_actions,
void *modify_actions, u32 *modify_header_id)
{
u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)];
int max_actions, actions_size, inlen, err;
void *actions_in;
u8 table_type;
u32 *in;
switch (namespace) {
case MLX5_FLOW_NAMESPACE_FDB:
max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, max_modify_header_actions);
table_type = FS_FT_FDB;
break;
case MLX5_FLOW_NAMESPACE_KERNEL:
max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(dev, max_modify_header_actions);
table_type = FS_FT_NIC_RX;
break;
default:
return -EOPNOTSUPP;
}
if (num_actions > max_actions) {
mlx5_core_warn(dev, "too many modify header actions %d, max supported %d\n",
num_actions, max_actions);
return -EOPNOTSUPP;
}
actions_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto) * num_actions;
inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) + actions_size;
in = kzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
MLX5_SET(alloc_modify_header_context_in, in, opcode,
MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT);
MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type);
MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_actions);
actions_in = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
memcpy(actions_in, modify_actions, actions_size);
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
*modify_header_id = MLX5_GET(alloc_modify_header_context_out, out, modify_header_id);
kfree(in);
return err;
}
void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id)
{
u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)];
u32 out[MLX5_ST_SZ_DW(dealloc_modify_header_context_out)];
memset(in, 0, sizeof(in));
MLX5_SET(dealloc_modify_header_context_in, in, opcode,
MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
modify_header_id);
mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
...@@ -476,6 +476,7 @@ static struct fs_fte *alloc_fte(struct mlx5_flow_act *flow_act, ...@@ -476,6 +476,7 @@ static struct fs_fte *alloc_fte(struct mlx5_flow_act *flow_act,
fte->index = index; fte->index = index;
fte->action = flow_act->action; fte->action = flow_act->action;
fte->encap_id = flow_act->encap_id; fte->encap_id = flow_act->encap_id;
fte->modify_id = flow_act->modify_id;
return fte; return fte;
} }
......
...@@ -152,6 +152,7 @@ struct fs_fte { ...@@ -152,6 +152,7 @@ struct fs_fte {
u32 index; u32 index;
u32 action; u32 action;
u32 encap_id; u32 encap_id;
u32 modify_id;
enum fs_fte_status status; enum fs_fte_status status;
struct mlx5_fc *counter; struct mlx5_fc *counter;
}; };
......
...@@ -141,6 +141,11 @@ int mlx5_encap_alloc(struct mlx5_core_dev *dev, ...@@ -141,6 +141,11 @@ int mlx5_encap_alloc(struct mlx5_core_dev *dev,
u32 *encap_id); u32 *encap_id);
void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id); void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id);
int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
u8 namespace, u8 num_actions,
void *modify_actions, u32 *modify_header_id);
void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id);
bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv); bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv);
int mlx5_query_mtpps(struct mlx5_core_dev *dev, u32 *mtpps, u32 mtpps_size); int mlx5_query_mtpps(struct mlx5_core_dev *dev, u32 *mtpps, u32 mtpps_size);
......
...@@ -134,8 +134,13 @@ struct mlx5_flow_act { ...@@ -134,8 +134,13 @@ struct mlx5_flow_act {
u32 action; u32 action;
u32 flow_tag; u32 flow_tag;
u32 encap_id; u32 encap_id;
u32 modify_id;
}; };
#define MLX5_DECLARE_FLOW_ACT(name) \
struct mlx5_flow_act name = {MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,\
MLX5_FS_DEFAULT_FLOW_TAG, 0, 0}
/* Single destination per rule. /* Single destination per rule.
* Group ID is implied by the match criteria. * Group ID is implied by the match criteria.
*/ */
...@@ -156,5 +161,4 @@ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging); ...@@ -156,5 +161,4 @@ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter); void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
void mlx5_fc_query_cached(struct mlx5_fc *counter, void mlx5_fc_query_cached(struct mlx5_fc *counter,
u64 *bytes, u64 *packets, u64 *lastuse); u64 *bytes, u64 *packets, u64 *lastuse);
#endif #endif
...@@ -227,6 +227,8 @@ enum { ...@@ -227,6 +227,8 @@ enum {
MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c, MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c,
MLX5_CMD_OP_ALLOC_ENCAP_HEADER = 0x93d, MLX5_CMD_OP_ALLOC_ENCAP_HEADER = 0x93d,
MLX5_CMD_OP_DEALLOC_ENCAP_HEADER = 0x93e, MLX5_CMD_OP_DEALLOC_ENCAP_HEADER = 0x93e,
MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT = 0x940,
MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT = 0x941,
MLX5_CMD_OP_MAX MLX5_CMD_OP_MAX
}; };
...@@ -302,7 +304,8 @@ struct mlx5_ifc_flow_table_prop_layout_bits { ...@@ -302,7 +304,8 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 reserved_at_20[0x2]; u8 reserved_at_20[0x2];
u8 log_max_ft_size[0x6]; u8 log_max_ft_size[0x6];
u8 reserved_at_28[0x10]; u8 log_max_modify_header_context[0x8];
u8 max_modify_header_actions[0x8];
u8 max_ft_level[0x8]; u8 max_ft_level[0x8];
u8 reserved_at_40[0x20]; u8 reserved_at_40[0x20];
...@@ -2190,6 +2193,7 @@ enum { ...@@ -2190,6 +2193,7 @@ enum {
MLX5_FLOW_CONTEXT_ACTION_COUNT = 0x8, MLX5_FLOW_CONTEXT_ACTION_COUNT = 0x8,
MLX5_FLOW_CONTEXT_ACTION_ENCAP = 0x10, MLX5_FLOW_CONTEXT_ACTION_ENCAP = 0x10,
MLX5_FLOW_CONTEXT_ACTION_DECAP = 0x20, MLX5_FLOW_CONTEXT_ACTION_DECAP = 0x20,
MLX5_FLOW_CONTEXT_ACTION_MOD_HDR = 0x40,
}; };
struct mlx5_ifc_flow_context_bits { struct mlx5_ifc_flow_context_bits {
...@@ -2211,7 +2215,9 @@ struct mlx5_ifc_flow_context_bits { ...@@ -2211,7 +2215,9 @@ struct mlx5_ifc_flow_context_bits {
u8 encap_id[0x20]; u8 encap_id[0x20];
u8 reserved_at_e0[0x120]; u8 modify_header_id[0x20];
u8 reserved_at_100[0x100];
struct mlx5_ifc_fte_match_param_bits match_value; struct mlx5_ifc_fte_match_param_bits match_value;
...@@ -4534,6 +4540,109 @@ struct mlx5_ifc_dealloc_encap_header_in_bits { ...@@ -4534,6 +4540,109 @@ struct mlx5_ifc_dealloc_encap_header_in_bits {
u8 reserved_60[0x20]; u8 reserved_60[0x20];
}; };
struct mlx5_ifc_set_action_in_bits {
u8 action_type[0x4];
u8 field[0xc];
u8 reserved_at_10[0x3];
u8 offset[0x5];
u8 reserved_at_18[0x3];
u8 length[0x5];
u8 data[0x20];
};
struct mlx5_ifc_add_action_in_bits {
u8 action_type[0x4];
u8 field[0xc];
u8 reserved_at_10[0x10];
u8 data[0x20];
};
union mlx5_ifc_set_action_in_add_action_in_auto_bits {
struct mlx5_ifc_set_action_in_bits set_action_in;
struct mlx5_ifc_add_action_in_bits add_action_in;
u8 reserved_at_0[0x40];
};
enum {
MLX5_ACTION_TYPE_SET = 0x1,
MLX5_ACTION_TYPE_ADD = 0x2,
};
enum {
MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16 = 0x1,
MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0 = 0x2,
MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE = 0x3,
MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16 = 0x4,
MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0 = 0x5,
MLX5_ACTION_IN_FIELD_OUT_IP_DSCP = 0x6,
MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS = 0x7,
MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT = 0x8,
MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT = 0x9,
MLX5_ACTION_IN_FIELD_OUT_IP_TTL = 0xa,
MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT = 0xb,
MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT = 0xc,
MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96 = 0xd,
MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64 = 0xe,
MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32 = 0xf,
MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0 = 0x10,
MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96 = 0x11,
MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64 = 0x12,
MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32 = 0x13,
MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0 = 0x14,
MLX5_ACTION_IN_FIELD_OUT_SIPV4 = 0x15,
MLX5_ACTION_IN_FIELD_OUT_DIPV4 = 0x16,
};
struct mlx5_ifc_alloc_modify_header_context_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
u8 modify_header_id[0x20];
u8 reserved_at_60[0x20];
};
struct mlx5_ifc_alloc_modify_header_context_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 reserved_at_40[0x20];
u8 table_type[0x8];
u8 reserved_at_68[0x10];
u8 num_of_actions[0x8];
union mlx5_ifc_set_action_in_add_action_in_auto_bits actions[0];
};
struct mlx5_ifc_dealloc_modify_header_context_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
u8 reserved_at_40[0x40];
};
struct mlx5_ifc_dealloc_modify_header_context_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 modify_header_id[0x20];
u8 reserved_at_60[0x20];
};
struct mlx5_ifc_query_dct_out_bits { struct mlx5_ifc_query_dct_out_bits {
u8 status[0x8]; u8 status[0x8];
u8 reserved_at_8[0x18]; u8 reserved_at_8[0x18];
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#define __NET_TC_PED_H #define __NET_TC_PED_H
#include <net/act_api.h> #include <net/act_api.h>
#include <linux/tc_act/tc_pedit.h>
struct tcf_pedit_key_ex { struct tcf_pedit_key_ex {
enum pedit_header_type htype; enum pedit_header_type htype;
...@@ -17,4 +18,48 @@ struct tcf_pedit { ...@@ -17,4 +18,48 @@ struct tcf_pedit {
}; };
#define to_pedit(a) ((struct tcf_pedit *)a) #define to_pedit(a) ((struct tcf_pedit *)a)
static inline bool is_tcf_pedit(const struct tc_action *a)
{
#ifdef CONFIG_NET_CLS_ACT
if (a->ops && a->ops->type == TCA_ACT_PEDIT)
return true;
#endif
return false;
}
static inline int tcf_pedit_nkeys(const struct tc_action *a)
{
return to_pedit(a)->tcfp_nkeys;
}
static inline u32 tcf_pedit_htype(const struct tc_action *a, int index)
{
if (to_pedit(a)->tcfp_keys_ex)
return to_pedit(a)->tcfp_keys_ex[index].htype;
return TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK;
}
static inline u32 tcf_pedit_cmd(const struct tc_action *a, int index)
{
if (to_pedit(a)->tcfp_keys_ex)
return to_pedit(a)->tcfp_keys_ex[index].cmd;
return __PEDIT_CMD_MAX;
}
static inline u32 tcf_pedit_mask(const struct tc_action *a, int index)
{
return to_pedit(a)->tcfp_keys[index].mask;
}
static inline u32 tcf_pedit_val(const struct tc_action *a, int index)
{
return to_pedit(a)->tcfp_keys[index].val;
}
static inline u32 tcf_pedit_offset(const struct tc_action *a, int index)
{
return to_pedit(a)->tcfp_keys[index].off;
}
#endif /* __NET_TC_PED_H */ #endif /* __NET_TC_PED_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment