Commit 137f3d50 authored by Jianbo Liu's avatar Jianbo Liu Committed by Jakub Kicinski

net/mlx5: Support matching on l4_type for ttc_table

Replace matching on TCP and UDP protocols with new l4_type field which
is parsed by steering for ttc_table. It is enabled by the
outer_l4_type or inner_l4_type bits in nic_rx or port_sel flow table
capabilities and used only if pcc_ifa2 bit in HCA capabilities is set.
Signed-off-by: default avatarJianbo Liu <jianbol@nvidia.com>
Reviewed-by: default avatarMark Bloch <mbloch@nvidia.com>
Signed-off-by: default avatarTariq Toukan <tariqt@nvidia.com>
Link: https://lore.kernel.org/r/20240402133043.56322-10-tariqt@nvidia.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 8c54c89a
...@@ -896,8 +896,7 @@ static void mlx5e_set_inner_ttc_params(struct mlx5e_flow_steering *fs, ...@@ -896,8 +896,7 @@ static void mlx5e_set_inner_ttc_params(struct mlx5e_flow_steering *fs,
int tt; int tt;
memset(ttc_params, 0, sizeof(*ttc_params)); memset(ttc_params, 0, sizeof(*ttc_params));
ttc_params->ns = mlx5_get_flow_namespace(fs->mdev, ttc_params->ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
MLX5_FLOW_NAMESPACE_KERNEL);
ft_attr->level = MLX5E_INNER_TTC_FT_LEVEL; ft_attr->level = MLX5E_INNER_TTC_FT_LEVEL;
ft_attr->prio = MLX5E_NIC_PRIO; ft_attr->prio = MLX5E_NIC_PRIO;
...@@ -920,8 +919,7 @@ void mlx5e_set_ttc_params(struct mlx5e_flow_steering *fs, ...@@ -920,8 +919,7 @@ void mlx5e_set_ttc_params(struct mlx5e_flow_steering *fs,
int tt; int tt;
memset(ttc_params, 0, sizeof(*ttc_params)); memset(ttc_params, 0, sizeof(*ttc_params));
ttc_params->ns = mlx5_get_flow_namespace(fs->mdev, ttc_params->ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
MLX5_FLOW_NAMESPACE_KERNEL);
ft_attr->level = MLX5E_TTC_FT_LEVEL; ft_attr->level = MLX5E_TTC_FT_LEVEL;
ft_attr->prio = MLX5E_NIC_PRIO; ft_attr->prio = MLX5E_NIC_PRIO;
......
...@@ -835,8 +835,7 @@ static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp, ...@@ -835,8 +835,7 @@ static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
memset(ttc_params, 0, sizeof(*ttc_params)); memset(ttc_params, 0, sizeof(*ttc_params));
ttc_params->ns = mlx5_get_flow_namespace(hp->func_mdev, ttc_params->ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
MLX5_FLOW_NAMESPACE_KERNEL);
for (tt = 0; tt < MLX5_NUM_TT; tt++) { for (tt = 0; tt < MLX5_NUM_TT; tt++) {
ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR; ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
ttc_params->dests[tt].tir_num = ttc_params->dests[tt].tir_num =
......
...@@ -449,13 +449,11 @@ static void set_tt_map(struct mlx5_lag_port_sel *port_sel, ...@@ -449,13 +449,11 @@ static void set_tt_map(struct mlx5_lag_port_sel *port_sel,
static void mlx5_lag_set_inner_ttc_params(struct mlx5_lag *ldev, static void mlx5_lag_set_inner_ttc_params(struct mlx5_lag *ldev,
struct ttc_params *ttc_params) struct ttc_params *ttc_params)
{ {
struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel; struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
struct mlx5_flow_table_attr *ft_attr; struct mlx5_flow_table_attr *ft_attr;
int tt; int tt;
ttc_params->ns = mlx5_get_flow_namespace(dev, ttc_params->ns_type = MLX5_FLOW_NAMESPACE_PORT_SEL;
MLX5_FLOW_NAMESPACE_PORT_SEL);
ft_attr = &ttc_params->ft_attr; ft_attr = &ttc_params->ft_attr;
ft_attr->level = MLX5_LAG_FT_LEVEL_INNER_TTC; ft_attr->level = MLX5_LAG_FT_LEVEL_INNER_TTC;
...@@ -470,13 +468,11 @@ static void mlx5_lag_set_inner_ttc_params(struct mlx5_lag *ldev, ...@@ -470,13 +468,11 @@ static void mlx5_lag_set_inner_ttc_params(struct mlx5_lag *ldev,
static void mlx5_lag_set_outer_ttc_params(struct mlx5_lag *ldev, static void mlx5_lag_set_outer_ttc_params(struct mlx5_lag *ldev,
struct ttc_params *ttc_params) struct ttc_params *ttc_params)
{ {
struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel; struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
struct mlx5_flow_table_attr *ft_attr; struct mlx5_flow_table_attr *ft_attr;
int tt; int tt;
ttc_params->ns = mlx5_get_flow_namespace(dev, ttc_params->ns_type = MLX5_FLOW_NAMESPACE_PORT_SEL;
MLX5_FLOW_NAMESPACE_PORT_SEL);
ft_attr = &ttc_params->ft_attr; ft_attr = &ttc_params->ft_attr;
ft_attr->level = MLX5_LAG_FT_LEVEL_TTC; ft_attr->level = MLX5_LAG_FT_LEVEL_TTC;
......
...@@ -9,21 +9,24 @@ ...@@ -9,21 +9,24 @@
#include "mlx5_core.h" #include "mlx5_core.h"
#include "lib/fs_ttc.h" #include "lib/fs_ttc.h"
#define MLX5_TTC_NUM_GROUPS 3 #define MLX5_TTC_MAX_NUM_GROUPS 4
#define MLX5_TTC_GROUP1_SIZE (BIT(3) + MLX5_NUM_TUNNEL_TT) #define MLX5_TTC_GROUP_TCPUDP_SIZE (MLX5_TT_IPV6_UDP + 1)
#define MLX5_TTC_GROUP2_SIZE BIT(1)
#define MLX5_TTC_GROUP3_SIZE BIT(0) struct mlx5_fs_ttc_groups {
#define MLX5_TTC_TABLE_SIZE (MLX5_TTC_GROUP1_SIZE +\ bool use_l4_type;
MLX5_TTC_GROUP2_SIZE +\ int num_groups;
MLX5_TTC_GROUP3_SIZE) int group_size[MLX5_TTC_MAX_NUM_GROUPS];
};
#define MLX5_INNER_TTC_NUM_GROUPS 3
#define MLX5_INNER_TTC_GROUP1_SIZE BIT(3) static int mlx5_fs_ttc_table_size(const struct mlx5_fs_ttc_groups *groups)
#define MLX5_INNER_TTC_GROUP2_SIZE BIT(1) {
#define MLX5_INNER_TTC_GROUP3_SIZE BIT(0) int i, sz = 0;
#define MLX5_INNER_TTC_TABLE_SIZE (MLX5_INNER_TTC_GROUP1_SIZE +\
MLX5_INNER_TTC_GROUP2_SIZE +\ for (i = 0; i < groups->num_groups; i++)
MLX5_INNER_TTC_GROUP3_SIZE) sz += groups->group_size[i];
return sz;
}
/* L3/L4 traffic type classifier */ /* L3/L4 traffic type classifier */
struct mlx5_ttc_table { struct mlx5_ttc_table {
...@@ -138,6 +141,53 @@ static struct mlx5_etype_proto ttc_tunnel_rules[] = { ...@@ -138,6 +141,53 @@ static struct mlx5_etype_proto ttc_tunnel_rules[] = {
}; };
enum TTC_GROUP_TYPE {
TTC_GROUPS_DEFAULT = 0,
TTC_GROUPS_USE_L4_TYPE = 1,
};
static const struct mlx5_fs_ttc_groups ttc_groups[] = {
[TTC_GROUPS_DEFAULT] = {
.num_groups = 3,
.group_size = {
BIT(3) + MLX5_NUM_TUNNEL_TT,
BIT(1),
BIT(0),
},
},
[TTC_GROUPS_USE_L4_TYPE] = {
.use_l4_type = true,
.num_groups = 4,
.group_size = {
MLX5_TTC_GROUP_TCPUDP_SIZE,
BIT(3) + MLX5_NUM_TUNNEL_TT - MLX5_TTC_GROUP_TCPUDP_SIZE,
BIT(1),
BIT(0),
},
},
};
static const struct mlx5_fs_ttc_groups inner_ttc_groups[] = {
[TTC_GROUPS_DEFAULT] = {
.num_groups = 3,
.group_size = {
BIT(3),
BIT(1),
BIT(0),
},
},
[TTC_GROUPS_USE_L4_TYPE] = {
.use_l4_type = true,
.num_groups = 4,
.group_size = {
MLX5_TTC_GROUP_TCPUDP_SIZE,
BIT(3) - MLX5_TTC_GROUP_TCPUDP_SIZE,
BIT(1),
BIT(0),
},
},
};
u8 mlx5_get_proto_by_tunnel_type(enum mlx5_tunnel_types tt) u8 mlx5_get_proto_by_tunnel_type(enum mlx5_tunnel_types tt)
{ {
return ttc_tunnel_rules[tt].proto; return ttc_tunnel_rules[tt].proto;
...@@ -188,9 +238,29 @@ static u8 mlx5_etype_to_ipv(u16 ethertype) ...@@ -188,9 +238,29 @@ static u8 mlx5_etype_to_ipv(u16 ethertype)
return 0; return 0;
} }
static void mlx5_fs_ttc_set_match_proto(void *headers_c, void *headers_v,
u8 proto, bool use_l4_type)
{
int l4_type;
if (use_l4_type && (proto == IPPROTO_TCP || proto == IPPROTO_UDP)) {
if (proto == IPPROTO_TCP)
l4_type = MLX5_PACKET_L4_TYPE_TCP;
else
l4_type = MLX5_PACKET_L4_TYPE_UDP;
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, l4_type);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_type, l4_type);
} else {
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, proto);
}
}
static struct mlx5_flow_handle * static struct mlx5_flow_handle *
mlx5_generate_ttc_rule(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, mlx5_generate_ttc_rule(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
struct mlx5_flow_destination *dest, u16 etype, u8 proto) struct mlx5_flow_destination *dest, u16 etype, u8 proto,
bool use_l4_type)
{ {
int match_ipv_outer = int match_ipv_outer =
MLX5_CAP_FLOWTABLE_NIC_RX(dev, MLX5_CAP_FLOWTABLE_NIC_RX(dev,
...@@ -207,8 +277,13 @@ mlx5_generate_ttc_rule(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, ...@@ -207,8 +277,13 @@ mlx5_generate_ttc_rule(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
if (proto) { if (proto) {
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol); mlx5_fs_ttc_set_match_proto(MLX5_ADDR_OF(fte_match_param,
MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto); spec->match_criteria,
outer_headers),
MLX5_ADDR_OF(fte_match_param,
spec->match_value,
outer_headers),
proto, use_l4_type);
} }
ipv = mlx5_etype_to_ipv(etype); ipv = mlx5_etype_to_ipv(etype);
...@@ -234,7 +309,8 @@ mlx5_generate_ttc_rule(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, ...@@ -234,7 +309,8 @@ mlx5_generate_ttc_rule(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev, static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev,
struct ttc_params *params, struct ttc_params *params,
struct mlx5_ttc_table *ttc) struct mlx5_ttc_table *ttc,
bool use_l4_type)
{ {
struct mlx5_flow_handle **trules; struct mlx5_flow_handle **trules;
struct mlx5_ttc_rule *rules; struct mlx5_ttc_rule *rules;
...@@ -251,7 +327,8 @@ static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev, ...@@ -251,7 +327,8 @@ static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev,
continue; continue;
rule->rule = mlx5_generate_ttc_rule(dev, ft, &params->dests[tt], rule->rule = mlx5_generate_ttc_rule(dev, ft, &params->dests[tt],
ttc_rules[tt].etype, ttc_rules[tt].etype,
ttc_rules[tt].proto); ttc_rules[tt].proto,
use_l4_type);
if (IS_ERR(rule->rule)) { if (IS_ERR(rule->rule)) {
err = PTR_ERR(rule->rule); err = PTR_ERR(rule->rule);
rule->rule = NULL; rule->rule = NULL;
...@@ -273,7 +350,8 @@ static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev, ...@@ -273,7 +350,8 @@ static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev,
trules[tt] = mlx5_generate_ttc_rule(dev, ft, trules[tt] = mlx5_generate_ttc_rule(dev, ft,
&params->tunnel_dests[tt], &params->tunnel_dests[tt],
ttc_tunnel_rules[tt].etype, ttc_tunnel_rules[tt].etype,
ttc_tunnel_rules[tt].proto); ttc_tunnel_rules[tt].proto,
use_l4_type);
if (IS_ERR(trules[tt])) { if (IS_ERR(trules[tt])) {
err = PTR_ERR(trules[tt]); err = PTR_ERR(trules[tt]);
trules[tt] = NULL; trules[tt] = NULL;
...@@ -289,7 +367,8 @@ static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev, ...@@ -289,7 +367,8 @@ static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev,
} }
static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc, static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc,
bool use_ipv) bool use_ipv,
const struct mlx5_fs_ttc_groups *groups)
{ {
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
int ix = 0; int ix = 0;
...@@ -297,7 +376,7 @@ static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc, ...@@ -297,7 +376,7 @@ static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc,
int err; int err;
u8 *mc; u8 *mc;
ttc->g = kcalloc(MLX5_TTC_NUM_GROUPS, sizeof(*ttc->g), GFP_KERNEL); ttc->g = kcalloc(groups->num_groups, sizeof(*ttc->g), GFP_KERNEL);
if (!ttc->g) if (!ttc->g)
return -ENOMEM; return -ENOMEM;
in = kvzalloc(inlen, GFP_KERNEL); in = kvzalloc(inlen, GFP_KERNEL);
...@@ -307,16 +386,31 @@ static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc, ...@@ -307,16 +386,31 @@ static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc,
return -ENOMEM; return -ENOMEM;
} }
/* L4 Group */
mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
if (use_ipv) if (use_ipv)
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_version); MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_version);
else else
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
/* TCP UDP group */
if (groups->use_l4_type) {
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.l4_type);
MLX5_SET_CFG(in, start_flow_index, ix);
ix += groups->group_size[ttc->num_groups];
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
if (IS_ERR(ttc->g[ttc->num_groups]))
goto err;
ttc->num_groups++;
MLX5_SET(fte_match_param, mc, outer_headers.l4_type, 0);
}
/* L4 Group */
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
MLX5_SET_CFG(in, start_flow_index, ix); MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5_TTC_GROUP1_SIZE; ix += groups->group_size[ttc->num_groups];
MLX5_SET_CFG(in, end_flow_index, ix - 1); MLX5_SET_CFG(in, end_flow_index, ix - 1);
ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in); ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
if (IS_ERR(ttc->g[ttc->num_groups])) if (IS_ERR(ttc->g[ttc->num_groups]))
...@@ -326,7 +420,7 @@ static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc, ...@@ -326,7 +420,7 @@ static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc,
/* L3 Group */ /* L3 Group */
MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0); MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
MLX5_SET_CFG(in, start_flow_index, ix); MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5_TTC_GROUP2_SIZE; ix += groups->group_size[ttc->num_groups];
MLX5_SET_CFG(in, end_flow_index, ix - 1); MLX5_SET_CFG(in, end_flow_index, ix - 1);
ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in); ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
if (IS_ERR(ttc->g[ttc->num_groups])) if (IS_ERR(ttc->g[ttc->num_groups]))
...@@ -336,7 +430,7 @@ static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc, ...@@ -336,7 +430,7 @@ static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc,
/* Any Group */ /* Any Group */
memset(in, 0, inlen); memset(in, 0, inlen);
MLX5_SET_CFG(in, start_flow_index, ix); MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5_TTC_GROUP3_SIZE; ix += groups->group_size[ttc->num_groups];
MLX5_SET_CFG(in, end_flow_index, ix - 1); MLX5_SET_CFG(in, end_flow_index, ix - 1);
ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in); ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
if (IS_ERR(ttc->g[ttc->num_groups])) if (IS_ERR(ttc->g[ttc->num_groups]))
...@@ -358,7 +452,7 @@ static struct mlx5_flow_handle * ...@@ -358,7 +452,7 @@ static struct mlx5_flow_handle *
mlx5_generate_inner_ttc_rule(struct mlx5_core_dev *dev, mlx5_generate_inner_ttc_rule(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft, struct mlx5_flow_table *ft,
struct mlx5_flow_destination *dest, struct mlx5_flow_destination *dest,
u16 etype, u8 proto) u16 etype, u8 proto, bool use_l4_type)
{ {
MLX5_DECLARE_FLOW_ACT(flow_act); MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_handle *rule; struct mlx5_flow_handle *rule;
...@@ -379,8 +473,13 @@ mlx5_generate_inner_ttc_rule(struct mlx5_core_dev *dev, ...@@ -379,8 +473,13 @@ mlx5_generate_inner_ttc_rule(struct mlx5_core_dev *dev,
if (proto) { if (proto) {
spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS; spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_protocol); mlx5_fs_ttc_set_match_proto(MLX5_ADDR_OF(fte_match_param,
MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_protocol, proto); spec->match_criteria,
inner_headers),
MLX5_ADDR_OF(fte_match_param,
spec->match_value,
inner_headers),
proto, use_l4_type);
} }
rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1); rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
...@@ -395,7 +494,8 @@ mlx5_generate_inner_ttc_rule(struct mlx5_core_dev *dev, ...@@ -395,7 +494,8 @@ mlx5_generate_inner_ttc_rule(struct mlx5_core_dev *dev,
static int mlx5_generate_inner_ttc_table_rules(struct mlx5_core_dev *dev, static int mlx5_generate_inner_ttc_table_rules(struct mlx5_core_dev *dev,
struct ttc_params *params, struct ttc_params *params,
struct mlx5_ttc_table *ttc) struct mlx5_ttc_table *ttc,
bool use_l4_type)
{ {
struct mlx5_ttc_rule *rules; struct mlx5_ttc_rule *rules;
struct mlx5_flow_table *ft; struct mlx5_flow_table *ft;
...@@ -413,7 +513,8 @@ static int mlx5_generate_inner_ttc_table_rules(struct mlx5_core_dev *dev, ...@@ -413,7 +513,8 @@ static int mlx5_generate_inner_ttc_table_rules(struct mlx5_core_dev *dev,
rule->rule = mlx5_generate_inner_ttc_rule(dev, ft, rule->rule = mlx5_generate_inner_ttc_rule(dev, ft,
&params->dests[tt], &params->dests[tt],
ttc_rules[tt].etype, ttc_rules[tt].etype,
ttc_rules[tt].proto); ttc_rules[tt].proto,
use_l4_type);
if (IS_ERR(rule->rule)) { if (IS_ERR(rule->rule)) {
err = PTR_ERR(rule->rule); err = PTR_ERR(rule->rule);
rule->rule = NULL; rule->rule = NULL;
...@@ -430,7 +531,8 @@ static int mlx5_generate_inner_ttc_table_rules(struct mlx5_core_dev *dev, ...@@ -430,7 +531,8 @@ static int mlx5_generate_inner_ttc_table_rules(struct mlx5_core_dev *dev,
return err; return err;
} }
static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc) static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc,
const struct mlx5_fs_ttc_groups *groups)
{ {
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
int ix = 0; int ix = 0;
...@@ -438,8 +540,7 @@ static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc) ...@@ -438,8 +540,7 @@ static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc)
int err; int err;
u8 *mc; u8 *mc;
ttc->g = kcalloc(MLX5_INNER_TTC_NUM_GROUPS, sizeof(*ttc->g), ttc->g = kcalloc(groups->num_groups, sizeof(*ttc->g), GFP_KERNEL);
GFP_KERNEL);
if (!ttc->g) if (!ttc->g)
return -ENOMEM; return -ENOMEM;
in = kvzalloc(inlen, GFP_KERNEL); in = kvzalloc(inlen, GFP_KERNEL);
...@@ -449,13 +550,28 @@ static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc) ...@@ -449,13 +550,28 @@ static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc)
return -ENOMEM; return -ENOMEM;
} }
/* L4 Group */
mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_version); MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_version);
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS); MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
/* TCP UDP group */
if (groups->use_l4_type) {
MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.l4_type);
MLX5_SET_CFG(in, start_flow_index, ix);
ix += groups->group_size[ttc->num_groups];
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
if (IS_ERR(ttc->g[ttc->num_groups]))
goto err;
ttc->num_groups++;
MLX5_SET(fte_match_param, mc, inner_headers.l4_type, 0);
}
/* L4 Group */
MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
MLX5_SET_CFG(in, start_flow_index, ix); MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5_INNER_TTC_GROUP1_SIZE; ix += groups->group_size[ttc->num_groups];
MLX5_SET_CFG(in, end_flow_index, ix - 1); MLX5_SET_CFG(in, end_flow_index, ix - 1);
ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in); ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
if (IS_ERR(ttc->g[ttc->num_groups])) if (IS_ERR(ttc->g[ttc->num_groups]))
...@@ -465,7 +581,7 @@ static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc) ...@@ -465,7 +581,7 @@ static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc)
/* L3 Group */ /* L3 Group */
MLX5_SET(fte_match_param, mc, inner_headers.ip_protocol, 0); MLX5_SET(fte_match_param, mc, inner_headers.ip_protocol, 0);
MLX5_SET_CFG(in, start_flow_index, ix); MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5_INNER_TTC_GROUP2_SIZE; ix += groups->group_size[ttc->num_groups];
MLX5_SET_CFG(in, end_flow_index, ix - 1); MLX5_SET_CFG(in, end_flow_index, ix - 1);
ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in); ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
if (IS_ERR(ttc->g[ttc->num_groups])) if (IS_ERR(ttc->g[ttc->num_groups]))
...@@ -475,7 +591,7 @@ static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc) ...@@ -475,7 +591,7 @@ static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc)
/* Any Group */ /* Any Group */
memset(in, 0, inlen); memset(in, 0, inlen);
MLX5_SET_CFG(in, start_flow_index, ix); MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5_INNER_TTC_GROUP3_SIZE; ix += groups->group_size[ttc->num_groups];
MLX5_SET_CFG(in, end_flow_index, ix - 1); MLX5_SET_CFG(in, end_flow_index, ix - 1);
ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in); ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
if (IS_ERR(ttc->g[ttc->num_groups])) if (IS_ERR(ttc->g[ttc->num_groups]))
...@@ -496,27 +612,47 @@ static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc) ...@@ -496,27 +612,47 @@ static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc)
struct mlx5_ttc_table *mlx5_create_inner_ttc_table(struct mlx5_core_dev *dev, struct mlx5_ttc_table *mlx5_create_inner_ttc_table(struct mlx5_core_dev *dev,
struct ttc_params *params) struct ttc_params *params)
{ {
const struct mlx5_fs_ttc_groups *groups;
struct mlx5_flow_namespace *ns;
struct mlx5_ttc_table *ttc; struct mlx5_ttc_table *ttc;
bool use_l4_type;
int err; int err;
ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL); ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
if (!ttc) if (!ttc)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
switch (params->ns_type) {
case MLX5_FLOW_NAMESPACE_PORT_SEL:
use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) &&
MLX5_CAP_PORT_SELECTION_FT_FIELD_SUPPORT_2(dev, inner_l4_type);
break;
case MLX5_FLOW_NAMESPACE_KERNEL:
use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) &&
MLX5_CAP_NIC_RX_FT_FIELD_SUPPORT_2(dev, inner_l4_type);
break;
default:
return ERR_PTR(-EINVAL);
}
ns = mlx5_get_flow_namespace(dev, params->ns_type);
groups = use_l4_type ? &inner_ttc_groups[TTC_GROUPS_USE_L4_TYPE] :
&inner_ttc_groups[TTC_GROUPS_DEFAULT];
WARN_ON_ONCE(params->ft_attr.max_fte); WARN_ON_ONCE(params->ft_attr.max_fte);
params->ft_attr.max_fte = MLX5_INNER_TTC_TABLE_SIZE; params->ft_attr.max_fte = mlx5_fs_ttc_table_size(groups);
ttc->t = mlx5_create_flow_table(params->ns, &params->ft_attr); ttc->t = mlx5_create_flow_table(ns, &params->ft_attr);
if (IS_ERR(ttc->t)) { if (IS_ERR(ttc->t)) {
err = PTR_ERR(ttc->t); err = PTR_ERR(ttc->t);
kvfree(ttc); kvfree(ttc);
return ERR_PTR(err); return ERR_PTR(err);
} }
err = mlx5_create_inner_ttc_table_groups(ttc); err = mlx5_create_inner_ttc_table_groups(ttc, groups);
if (err) if (err)
goto destroy_ft; goto destroy_ft;
err = mlx5_generate_inner_ttc_table_rules(dev, params, ttc); err = mlx5_generate_inner_ttc_table_rules(dev, params, ttc, use_l4_type);
if (err) if (err)
goto destroy_ft; goto destroy_ft;
...@@ -549,27 +685,47 @@ struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev, ...@@ -549,27 +685,47 @@ struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev,
bool match_ipv_outer = bool match_ipv_outer =
MLX5_CAP_FLOWTABLE_NIC_RX(dev, MLX5_CAP_FLOWTABLE_NIC_RX(dev,
ft_field_support.outer_ip_version); ft_field_support.outer_ip_version);
const struct mlx5_fs_ttc_groups *groups;
struct mlx5_flow_namespace *ns;
struct mlx5_ttc_table *ttc; struct mlx5_ttc_table *ttc;
bool use_l4_type;
int err; int err;
ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL); ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
if (!ttc) if (!ttc)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
switch (params->ns_type) {
case MLX5_FLOW_NAMESPACE_PORT_SEL:
use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) &&
MLX5_CAP_PORT_SELECTION_FT_FIELD_SUPPORT_2(dev, outer_l4_type);
break;
case MLX5_FLOW_NAMESPACE_KERNEL:
use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) &&
MLX5_CAP_NIC_RX_FT_FIELD_SUPPORT_2(dev, outer_l4_type);
break;
default:
return ERR_PTR(-EINVAL);
}
ns = mlx5_get_flow_namespace(dev, params->ns_type);
groups = use_l4_type ? &ttc_groups[TTC_GROUPS_USE_L4_TYPE] :
&ttc_groups[TTC_GROUPS_DEFAULT];
WARN_ON_ONCE(params->ft_attr.max_fte); WARN_ON_ONCE(params->ft_attr.max_fte);
params->ft_attr.max_fte = MLX5_TTC_TABLE_SIZE; params->ft_attr.max_fte = mlx5_fs_ttc_table_size(groups);
ttc->t = mlx5_create_flow_table(params->ns, &params->ft_attr); ttc->t = mlx5_create_flow_table(ns, &params->ft_attr);
if (IS_ERR(ttc->t)) { if (IS_ERR(ttc->t)) {
err = PTR_ERR(ttc->t); err = PTR_ERR(ttc->t);
kvfree(ttc); kvfree(ttc);
return ERR_PTR(err); return ERR_PTR(err);
} }
err = mlx5_create_ttc_table_groups(ttc, match_ipv_outer); err = mlx5_create_ttc_table_groups(ttc, match_ipv_outer, groups);
if (err) if (err)
goto destroy_ft; goto destroy_ft;
err = mlx5_generate_ttc_table_rules(dev, params, ttc); err = mlx5_generate_ttc_table_rules(dev, params, ttc, use_l4_type);
if (err) if (err)
goto destroy_ft; goto destroy_ft;
......
...@@ -40,7 +40,7 @@ struct mlx5_ttc_rule { ...@@ -40,7 +40,7 @@ struct mlx5_ttc_rule {
struct mlx5_ttc_table; struct mlx5_ttc_table;
struct ttc_params { struct ttc_params {
struct mlx5_flow_namespace *ns; enum mlx5_flow_namespace_type ns_type;
struct mlx5_flow_table_attr ft_attr; struct mlx5_flow_table_attr ft_attr;
struct mlx5_flow_destination dests[MLX5_NUM_TT]; struct mlx5_flow_destination dests[MLX5_NUM_TT];
DECLARE_BITMAP(ignore_dests, MLX5_NUM_TT); DECLARE_BITMAP(ignore_dests, MLX5_NUM_TT);
......
...@@ -1336,6 +1336,9 @@ enum mlx5_qcam_feature_groups { ...@@ -1336,6 +1336,9 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_ESW_FT_FIELD_SUPPORT_2(mdev, cap) \ #define MLX5_CAP_ESW_FT_FIELD_SUPPORT_2(mdev, cap) \
MLX5_CAP_ESW_FLOWTABLE(mdev, ft_field_support_2_esw_fdb.cap) MLX5_CAP_ESW_FLOWTABLE(mdev, ft_field_support_2_esw_fdb.cap)
#define MLX5_CAP_NIC_RX_FT_FIELD_SUPPORT_2(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, ft_field_support_2_nic_receive.cap)
#define MLX5_CAP_ESW(mdev, cap) \ #define MLX5_CAP_ESW(mdev, cap) \
MLX5_GET(e_switch_cap, \ MLX5_GET(e_switch_cap, \
mdev->caps.hca[MLX5_CAP_ESWITCH]->cur, cap) mdev->caps.hca[MLX5_CAP_ESWITCH]->cur, cap)
...@@ -1359,6 +1362,9 @@ enum mlx5_qcam_feature_groups { ...@@ -1359,6 +1362,9 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) \ #define MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) \
MLX5_CAP_PORT_SELECTION(mdev, flow_table_properties_port_selection.cap) MLX5_CAP_PORT_SELECTION(mdev, flow_table_properties_port_selection.cap)
#define MLX5_CAP_PORT_SELECTION_FT_FIELD_SUPPORT_2(mdev, cap) \
MLX5_CAP_PORT_SELECTION(mdev, ft_field_support_2_port_selection.cap)
#define MLX5_CAP_ODP(mdev, cap)\ #define MLX5_CAP_ODP(mdev, cap)\
MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, cap) MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, cap)
......
...@@ -416,7 +416,10 @@ struct mlx5_ifc_flow_table_fields_supported_bits { ...@@ -416,7 +416,10 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
/* Table 2170 - Flow Table Fields Supported 2 Format */ /* Table 2170 - Flow Table Fields Supported 2 Format */
struct mlx5_ifc_flow_table_fields_supported_2_bits { struct mlx5_ifc_flow_table_fields_supported_2_bits {
u8 reserved_at_0[0xe]; u8 reserved_at_0[0x2];
u8 inner_l4_type[0x1];
u8 outer_l4_type[0x1];
u8 reserved_at_4[0xa];
u8 bth_opcode[0x1]; u8 bth_opcode[0x1];
u8 reserved_at_f[0x1]; u8 reserved_at_f[0x1];
u8 tunnel_header_0_1[0x1]; u8 tunnel_header_0_1[0x1];
...@@ -525,6 +528,12 @@ union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits { ...@@ -525,6 +528,12 @@ union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits {
u8 reserved_at_0[0x80]; u8 reserved_at_0[0x80];
}; };
enum {
MLX5_PACKET_L4_TYPE_NONE,
MLX5_PACKET_L4_TYPE_TCP,
MLX5_PACKET_L4_TYPE_UDP,
};
struct mlx5_ifc_fte_match_set_lyr_2_4_bits { struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
u8 smac_47_16[0x20]; u8 smac_47_16[0x20];
...@@ -550,7 +559,8 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits { ...@@ -550,7 +559,8 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
u8 tcp_sport[0x10]; u8 tcp_sport[0x10];
u8 tcp_dport[0x10]; u8 tcp_dport[0x10];
u8 reserved_at_c0[0x10]; u8 l4_type[0x2];
u8 reserved_at_c2[0xe];
u8 ipv4_ihl[0x4]; u8 ipv4_ihl[0x4];
u8 reserved_at_c4[0x4]; u8 reserved_at_c4[0x4];
...@@ -846,7 +856,11 @@ struct mlx5_ifc_flow_table_nic_cap_bits { ...@@ -846,7 +856,11 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer;
u8 reserved_at_e00[0x700]; u8 reserved_at_e00[0x600];
struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_nic_receive;
u8 reserved_at_1480[0x80];
struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_nic_receive_rdma; struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_nic_receive_rdma;
...@@ -876,7 +890,9 @@ struct mlx5_ifc_port_selection_cap_bits { ...@@ -876,7 +890,9 @@ struct mlx5_ifc_port_selection_cap_bits {
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_port_selection; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_port_selection;
u8 reserved_at_400[0x7c00]; struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_port_selection;
u8 reserved_at_480[0x7b80];
}; };
enum { enum {
...@@ -2004,7 +2020,13 @@ struct mlx5_ifc_cmd_hca_cap_2_bits { ...@@ -2004,7 +2020,13 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 reserved_at_3a0[0x10]; u8 reserved_at_3a0[0x10];
u8 max_rqt_vhca_id[0x10]; u8 max_rqt_vhca_id[0x10];
u8 reserved_at_3c0[0x440]; u8 reserved_at_3c0[0x20];
u8 reserved_at_3e0[0x10];
u8 pcc_ifa2[0x1];
u8 reserved_at_3f1[0xf];
u8 reserved_at_400[0x400];
}; };
enum mlx5_ifc_flow_destination_type { enum mlx5_ifc_flow_destination_type {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment