Commit ce2b6eb4 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2021-12-28' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 Software steering, New features and optimizations

This patch series brings various SW steering features, optimizations and
debug-ability focused improvements.

 1) Expose debugfs for dumping the SW steering resources
 2) Removing unused fields
 3) support for matching on new fields
 4) steering optimization for RX/TX-only rules
 5) Make Software steering the default steering mechanism when
    available, applies only to Switchdev mode FDB

From Yevgeny Kliteynik and Muhammad Sammar:

 - Patch 1 fixes an error flow in creating matchers
 - Patch 2 fix lower case macro prefix "mlx5_" to "MLX5_"
 - Patch 3 removes unused struct member in mlx5dr_matcher
 - Patch 4 renames list field in matcher struct to list_node to reflect the
   fact that is field is for list node that is stored on another struct's lists
 - Patch 5 adds checking for valid Flex parser ID value
 - Patch 6 adds the missing reserved fields to dr_match_param and aligns it to
   the format that is defined by HW spec
 - Patch 7 adds support for dumping SW steering (SMFS) resources using debugfs
   in CSV format: domain and its tables, matchers and rules
 - Patch 8 adds support for a new destination type - UPLINK
 - Patch 9 adds WARN_ON_ONCE on refcount checks in SW steering object destructors
 - Patches 10, 11, 12 add misc5 flow table match parameters and add support for
   matching on tunnel headers 0 and 1
 - Patch 13 adds support for matching on geneve_tlv_option_0_exist field
 - Patch 14 implements performance optimization for for empty or RX/TX-only
   matchers by splitting RX and TX matchers handling: matcher connection in the
   matchers chain is split into two separate lists (RX only and TX only), which
   solves a usecase of many RX or TX only rules that create a long chain of
   RX/TX-only paths w/o the actual rules
 - Patch 15 ignores modify TTL if device doesn't support it instead of
   adding and unsupported action
 - Patch 16 sets SMFS as a default steering mode
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 20a9013e aa36c948
...@@ -104,7 +104,8 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o ...@@ -104,7 +104,8 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o
steering/dr_ste.o steering/dr_send.o \ steering/dr_ste.o steering/dr_send.o \
steering/dr_ste_v0.o steering/dr_ste_v1.o \ steering/dr_ste_v0.o steering/dr_ste_v1.o \
steering/dr_cmd.o steering/dr_fw.o \ steering/dr_cmd.o steering/dr_fw.o \
steering/dr_action.o steering/fs_dr.o steering/dr_action.o steering/fs_dr.o \
steering/dr_dbg.o
# #
# SF device # SF device
# #
......
...@@ -451,7 +451,8 @@ static int mlx5_set_extended_dest(struct mlx5_core_dev *dev, ...@@ -451,7 +451,8 @@ static int mlx5_set_extended_dest(struct mlx5_core_dev *dev,
list_for_each_entry(dst, &fte->node.children, node.list) { list_for_each_entry(dst, &fte->node.children, node.list) {
if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
continue; continue;
if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT && if ((dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
num_encap++; num_encap++;
num_fwd_destinations++; num_fwd_destinations++;
......
...@@ -1525,7 +1525,8 @@ static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1, ...@@ -1525,7 +1525,8 @@ static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
struct mlx5_flow_destination *d2) struct mlx5_flow_destination *d2)
{ {
if (d1->type == d2->type) { if (d1->type == d2->type) {
if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT && if (((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
d1->type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
d1->vport.num == d2->vport.num && d1->vport.num == d2->vport.num &&
d1->vport.flags == d2->vport.flags && d1->vport.flags == d2->vport.flags &&
((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ? ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ?
...@@ -3082,6 +3083,11 @@ int mlx5_init_fs(struct mlx5_core_dev *dev) ...@@ -3082,6 +3083,11 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
steering->dev = dev; steering->dev = dev;
dev->priv.steering = steering; dev->priv.steering = steering;
if (mlx5_fs_dr_is_supported(dev))
steering->mode = MLX5_FLOW_STEERING_MODE_SMFS;
else
steering->mode = MLX5_FLOW_STEERING_MODE_DMFS;
steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs", steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
sizeof(struct mlx5_flow_group), 0, sizeof(struct mlx5_flow_group), 0,
0, NULL); 0, NULL);
......
...@@ -203,7 +203,7 @@ struct mlx5_ft_underlay_qp { ...@@ -203,7 +203,7 @@ struct mlx5_ft_underlay_qp {
u32 qpn; u32 qpn;
}; };
#define MLX5_FTE_MATCH_PARAM_RESERVED reserved_at_c00 #define MLX5_FTE_MATCH_PARAM_RESERVED reserved_at_e00
/* Calculate the fte_match_param length and without the reserved length. /* Calculate the fte_match_param length and without the reserved length.
* Make sure the reserved field is the last. * Make sure the reserved field is the last.
*/ */
......
...@@ -1560,6 +1560,12 @@ dr_action_modify_check_is_ttl_modify(const void *sw_action) ...@@ -1560,6 +1560,12 @@ dr_action_modify_check_is_ttl_modify(const void *sw_action)
return sw_field == MLX5_ACTION_IN_FIELD_OUT_IP_TTL; return sw_field == MLX5_ACTION_IN_FIELD_OUT_IP_TTL;
} }
static bool dr_action_modify_ttl_ignore(struct mlx5dr_domain *dmn)
{
return !mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps) &&
!MLX5_CAP_ESW_FLOWTABLE(dmn->mdev, fdb_ipv4_ttl_modify);
}
static int dr_actions_convert_modify_header(struct mlx5dr_action *action, static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
u32 max_hw_actions, u32 max_hw_actions,
u32 num_sw_actions, u32 num_sw_actions,
...@@ -1591,8 +1597,13 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action, ...@@ -1591,8 +1597,13 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
if (ret) if (ret)
return ret; return ret;
if (!(*modify_ttl)) if (!(*modify_ttl) &&
*modify_ttl = dr_action_modify_check_is_ttl_modify(sw_action); dr_action_modify_check_is_ttl_modify(sw_action)) {
if (dr_action_modify_ttl_ignore(dmn))
continue;
*modify_ttl = true;
}
/* Convert SW action to HW action */ /* Convert SW action to HW action */
ret = dr_action_modify_sw_to_hw(dmn, ret = dr_action_modify_sw_to_hw(dmn,
...@@ -1631,7 +1642,7 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action, ...@@ -1631,7 +1642,7 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
* modify actions doesn't exceeds the limit * modify actions doesn't exceeds the limit
*/ */
hw_idx++; hw_idx++;
if ((num_sw_actions + hw_idx - i) >= max_hw_actions) { if (hw_idx >= max_hw_actions) {
mlx5dr_dbg(dmn, "Modify header action number exceeds HW limit\n"); mlx5dr_dbg(dmn, "Modify header action number exceeds HW limit\n");
return -EINVAL; return -EINVAL;
} }
...@@ -1642,6 +1653,10 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action, ...@@ -1642,6 +1653,10 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
hw_idx++; hw_idx++;
} }
/* if the resulting HW actions list is empty, add NOP action */
if (!hw_idx)
hw_idx++;
*num_hw_actions = hw_idx; *num_hw_actions = hw_idx;
return 0; return 0;
...@@ -1792,7 +1807,7 @@ mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn, ...@@ -1792,7 +1807,7 @@ mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn,
int mlx5dr_action_destroy(struct mlx5dr_action *action) int mlx5dr_action_destroy(struct mlx5dr_action *action)
{ {
if (refcount_read(&action->refcount) > 1) if (WARN_ON_ONCE(refcount_read(&action->refcount) > 1))
return -EBUSY; return -EBUSY;
switch (action->action_type) { switch (action->action_type) {
......
...@@ -132,6 +132,13 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev, ...@@ -132,6 +132,13 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
caps->isolate_vl_tc = MLX5_CAP_GEN(mdev, isolate_vl_tc_new); caps->isolate_vl_tc = MLX5_CAP_GEN(mdev, isolate_vl_tc_new);
/* geneve_tlv_option_0_exist is the indication of
* STE support for lookup type flex_parser_ok
*/
caps->flex_parser_ok_bits_supp =
MLX5_CAP_FLOWTABLE(mdev,
flow_table_properties_nic_receive.ft_field_support.geneve_tlv_option_0_exist);
if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED) { if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED) {
caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0); caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0);
caps->flex_parser_id_icmp_dw1 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw1); caps->flex_parser_id_icmp_dw1 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw1);
...@@ -152,7 +159,7 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev, ...@@ -152,7 +159,7 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
caps->flex_parser_id_mpls_over_gre = caps->flex_parser_id_mpls_over_gre =
MLX5_CAP_GEN(mdev, flex_parser_id_outer_first_mpls_over_gre); MLX5_CAP_GEN(mdev, flex_parser_id_outer_first_mpls_over_gre);
if (caps->flex_protocols & mlx5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED) if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED)
caps->flex_parser_id_mpls_over_udp = caps->flex_parser_id_mpls_over_udp =
MLX5_CAP_GEN(mdev, flex_parser_id_outer_first_mpls_over_udp_label); MLX5_CAP_GEN(mdev, flex_parser_id_outer_first_mpls_over_udp_label);
...@@ -599,7 +606,8 @@ static int mlx5dr_cmd_set_extended_dest(struct mlx5_core_dev *dev, ...@@ -599,7 +606,8 @@ static int mlx5dr_cmd_set_extended_dest(struct mlx5_core_dev *dev,
for (i = 0; i < fte->dests_size; i++) { for (i = 0; i < fte->dests_size; i++) {
if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
continue; continue;
if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_VPORT && if ((fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
fte->dest_arr[i].vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) fte->dest_arr[i].vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
num_encap++; num_encap++;
num_fwd_destinations++; num_fwd_destinations++;
...@@ -724,12 +732,19 @@ int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev, ...@@ -724,12 +732,19 @@ int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE: case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
id = fte->dest_arr[i].ft_id; id = fte->dest_arr[i].ft_id;
break; break;
case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
case MLX5_FLOW_DESTINATION_TYPE_VPORT: case MLX5_FLOW_DESTINATION_TYPE_VPORT:
if (type == MLX5_FLOW_DESTINATION_TYPE_VPORT) {
id = fte->dest_arr[i].vport.num; id = fte->dest_arr[i].vport.num;
MLX5_SET(dest_format_struct, in_dests, MLX5_SET(dest_format_struct, in_dests,
destination_eswitch_owner_vhca_id_valid, destination_eswitch_owner_vhca_id_valid,
!!(fte->dest_arr[i].vport.flags & !!(fte->dest_arr[i].vport.flags &
MLX5_FLOW_DEST_VPORT_VHCA_ID)); MLX5_FLOW_DEST_VPORT_VHCA_ID));
} else {
id = 0;
MLX5_SET(dest_format_struct, in_dests,
destination_eswitch_owner_vhca_id_valid, 1);
}
MLX5_SET(dest_format_struct, in_dests, MLX5_SET(dest_format_struct, in_dests,
destination_eswitch_owner_vhca_id, destination_eswitch_owner_vhca_id,
fte->dest_arr[i].vport.vhca_id); fte->dest_arr[i].vport.vhca_id);
......
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
struct mlx5dr_dbg_dump_info {
struct mutex dbg_mutex; /* protect dbg lists */
struct dentry *steering_debugfs;
struct dentry *fdb_debugfs;
};
void mlx5dr_dbg_init_dump(struct mlx5dr_domain *dmn);
void mlx5dr_dbg_uninit_dump(struct mlx5dr_domain *dmn);
void mlx5dr_dbg_tbl_add(struct mlx5dr_table *tbl);
void mlx5dr_dbg_tbl_del(struct mlx5dr_table *tbl);
void mlx5dr_dbg_rule_add(struct mlx5dr_rule *rule);
void mlx5dr_dbg_rule_del(struct mlx5dr_rule *rule);
...@@ -395,7 +395,7 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type) ...@@ -395,7 +395,7 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
} }
dr_domain_init_csum_recalc_fts(dmn); dr_domain_init_csum_recalc_fts(dmn);
mlx5dr_dbg_init_dump(dmn);
return dmn; return dmn;
uninit_caps: uninit_caps:
...@@ -431,11 +431,12 @@ int mlx5dr_domain_sync(struct mlx5dr_domain *dmn, u32 flags) ...@@ -431,11 +431,12 @@ int mlx5dr_domain_sync(struct mlx5dr_domain *dmn, u32 flags)
int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn) int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
{ {
if (refcount_read(&dmn->refcount) > 1) if (WARN_ON_ONCE(refcount_read(&dmn->refcount) > 1))
return -EBUSY; return -EBUSY;
/* make sure resources are not used by the hardware */ /* make sure resources are not used by the hardware */
mlx5dr_cmd_sync_steering(dmn->mdev); mlx5dr_cmd_sync_steering(dmn->mdev);
mlx5dr_dbg_uninit_dump(dmn);
dr_domain_uninit_csum_recalc_fts(dmn); dr_domain_uninit_csum_recalc_fts(dmn);
dr_domain_uninit_resources(dmn); dr_domain_uninit_resources(dmn);
dr_domain_caps_uninit(dmn); dr_domain_caps_uninit(dmn);
......
...@@ -5,11 +5,6 @@ ...@@ -5,11 +5,6 @@
#define DR_RULE_MAX_STE_CHAIN (DR_RULE_MAX_STES + DR_ACTION_MAX_STES) #define DR_RULE_MAX_STE_CHAIN (DR_RULE_MAX_STES + DR_ACTION_MAX_STES)
struct mlx5dr_rule_action_member {
struct mlx5dr_action *action;
struct list_head list;
};
static int dr_rule_append_to_miss_list(struct mlx5dr_ste_ctx *ste_ctx, static int dr_rule_append_to_miss_list(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste *new_last_ste, struct mlx5dr_ste *new_last_ste,
struct list_head *miss_list, struct list_head *miss_list,
...@@ -979,14 +974,36 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher, ...@@ -979,14 +974,36 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
return false; return false;
} }
} }
if (match_criteria & DR_MATCHER_CRITERIA_MISC5) {
s_idx = offsetof(struct mlx5dr_match_param, misc5);
e_idx = min(s_idx + sizeof(param->misc5), value_size);
if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
mlx5dr_err(matcher->tbl->dmn, "Rule misc5 parameters contains a value not specified by mask\n");
return false;
}
}
return true; return true;
} }
static int dr_rule_destroy_rule_nic(struct mlx5dr_rule *rule, static int dr_rule_destroy_rule_nic(struct mlx5dr_rule *rule,
struct mlx5dr_rule_rx_tx *nic_rule) struct mlx5dr_rule_rx_tx *nic_rule)
{ {
/* Check if this nic rule was actually created, or was it skipped
* and only the other type of the RX/TX nic rule was created.
*/
if (!nic_rule->last_rule_ste)
return 0;
mlx5dr_domain_nic_lock(nic_rule->nic_matcher->nic_tbl->nic_dmn); mlx5dr_domain_nic_lock(nic_rule->nic_matcher->nic_tbl->nic_dmn);
dr_rule_clean_rule_members(rule, nic_rule); dr_rule_clean_rule_members(rule, nic_rule);
nic_rule->nic_matcher->rules--;
if (!nic_rule->nic_matcher->rules)
mlx5dr_matcher_remove_from_tbl_nic(rule->matcher->tbl->dmn,
nic_rule->nic_matcher);
mlx5dr_domain_nic_unlock(nic_rule->nic_matcher->nic_tbl->nic_dmn); mlx5dr_domain_nic_unlock(nic_rule->nic_matcher->nic_tbl->nic_dmn);
return 0; return 0;
...@@ -1003,6 +1020,8 @@ static int dr_rule_destroy_rule(struct mlx5dr_rule *rule) ...@@ -1003,6 +1020,8 @@ static int dr_rule_destroy_rule(struct mlx5dr_rule *rule)
{ {
struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn; struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
mlx5dr_dbg_rule_del(rule);
switch (dmn->type) { switch (dmn->type) {
case MLX5DR_DOMAIN_TYPE_NIC_RX: case MLX5DR_DOMAIN_TYPE_NIC_RX:
dr_rule_destroy_rule_nic(rule, &rule->rx); dr_rule_destroy_rule_nic(rule, &rule->rx);
...@@ -1091,24 +1110,28 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule, ...@@ -1091,24 +1110,28 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
mlx5dr_domain_nic_lock(nic_dmn); mlx5dr_domain_nic_lock(nic_dmn);
ret = mlx5dr_matcher_add_to_tbl_nic(dmn, nic_matcher);
if (ret)
goto free_hw_ste;
ret = mlx5dr_matcher_select_builders(matcher, ret = mlx5dr_matcher_select_builders(matcher,
nic_matcher, nic_matcher,
dr_rule_get_ipv(&param->outer), dr_rule_get_ipv(&param->outer),
dr_rule_get_ipv(&param->inner)); dr_rule_get_ipv(&param->inner));
if (ret) if (ret)
goto free_hw_ste; goto remove_from_nic_tbl;
/* Set the tag values inside the ste array */ /* Set the tag values inside the ste array */
ret = mlx5dr_ste_build_ste_arr(matcher, nic_matcher, param, hw_ste_arr); ret = mlx5dr_ste_build_ste_arr(matcher, nic_matcher, param, hw_ste_arr);
if (ret) if (ret)
goto free_hw_ste; goto remove_from_nic_tbl;
/* Set the actions values/addresses inside the ste array */ /* Set the actions values/addresses inside the ste array */
ret = mlx5dr_actions_build_ste_arr(matcher, nic_matcher, actions, ret = mlx5dr_actions_build_ste_arr(matcher, nic_matcher, actions,
num_actions, hw_ste_arr, num_actions, hw_ste_arr,
&new_hw_ste_arr_sz); &new_hw_ste_arr_sz);
if (ret) if (ret)
goto free_hw_ste; goto remove_from_nic_tbl;
cur_htbl = nic_matcher->s_htbl; cur_htbl = nic_matcher->s_htbl;
...@@ -1155,6 +1178,8 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule, ...@@ -1155,6 +1178,8 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
if (htbl) if (htbl)
mlx5dr_htbl_put(htbl); mlx5dr_htbl_put(htbl);
nic_matcher->rules++;
mlx5dr_domain_nic_unlock(nic_dmn); mlx5dr_domain_nic_unlock(nic_dmn);
kfree(hw_ste_arr); kfree(hw_ste_arr);
...@@ -1168,6 +1193,10 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule, ...@@ -1168,6 +1193,10 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
list_del(&ste_info->send_list); list_del(&ste_info->send_list);
kfree(ste_info); kfree(ste_info);
} }
remove_from_nic_tbl:
mlx5dr_matcher_remove_from_tbl_nic(dmn, nic_matcher);
free_hw_ste: free_hw_ste:
mlx5dr_domain_nic_unlock(nic_dmn); mlx5dr_domain_nic_unlock(nic_dmn);
kfree(hw_ste_arr); kfree(hw_ste_arr);
...@@ -1257,6 +1286,8 @@ dr_rule_create_rule(struct mlx5dr_matcher *matcher, ...@@ -1257,6 +1286,8 @@ dr_rule_create_rule(struct mlx5dr_matcher *matcher,
if (ret) if (ret)
goto remove_action_members; goto remove_action_members;
INIT_LIST_HEAD(&rule->dbg_node);
mlx5dr_dbg_rule_add(rule);
return rule; return rule;
remove_action_members: remove_action_members:
......
...@@ -719,6 +719,8 @@ static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec, bo ...@@ -719,6 +719,8 @@ static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec, bo
spec->vxlan_vni = IFC_GET_CLR(fte_match_set_misc, mask, vxlan_vni, clr); spec->vxlan_vni = IFC_GET_CLR(fte_match_set_misc, mask, vxlan_vni, clr);
spec->geneve_vni = IFC_GET_CLR(fte_match_set_misc, mask, geneve_vni, clr); spec->geneve_vni = IFC_GET_CLR(fte_match_set_misc, mask, geneve_vni, clr);
spec->geneve_tlv_option_0_exist =
IFC_GET_CLR(fte_match_set_misc, mask, geneve_tlv_option_0_exist, clr);
spec->geneve_oam = IFC_GET_CLR(fte_match_set_misc, mask, geneve_oam, clr); spec->geneve_oam = IFC_GET_CLR(fte_match_set_misc, mask, geneve_oam, clr);
spec->outer_ipv6_flow_label = spec->outer_ipv6_flow_label =
...@@ -880,6 +882,26 @@ static void dr_ste_copy_mask_misc4(char *mask, struct mlx5dr_match_misc4 *spec, ...@@ -880,6 +882,26 @@ static void dr_ste_copy_mask_misc4(char *mask, struct mlx5dr_match_misc4 *spec,
IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_3, clr); IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_3, clr);
} }
static void dr_ste_copy_mask_misc5(char *mask, struct mlx5dr_match_misc5 *spec, bool clr)
{
spec->macsec_tag_0 =
IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_0, clr);
spec->macsec_tag_1 =
IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_1, clr);
spec->macsec_tag_2 =
IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_2, clr);
spec->macsec_tag_3 =
IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_3, clr);
spec->tunnel_header_0 =
IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_0, clr);
spec->tunnel_header_1 =
IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_1, clr);
spec->tunnel_header_2 =
IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_2, clr);
spec->tunnel_header_3 =
IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_3, clr);
}
void mlx5dr_ste_copy_param(u8 match_criteria, void mlx5dr_ste_copy_param(u8 match_criteria,
struct mlx5dr_match_param *set_param, struct mlx5dr_match_param *set_param,
struct mlx5dr_match_parameters *mask, struct mlx5dr_match_parameters *mask,
...@@ -966,6 +988,20 @@ void mlx5dr_ste_copy_param(u8 match_criteria, ...@@ -966,6 +988,20 @@ void mlx5dr_ste_copy_param(u8 match_criteria,
} }
dr_ste_copy_mask_misc4(buff, &set_param->misc4, clr); dr_ste_copy_mask_misc4(buff, &set_param->misc4, clr);
} }
param_location += sizeof(struct mlx5dr_match_misc4);
if (match_criteria & DR_MATCHER_CRITERIA_MISC5) {
if (mask->match_sz < param_location +
sizeof(struct mlx5dr_match_misc5)) {
memcpy(tail_param, data + param_location,
mask->match_sz - param_location);
buff = tail_param;
} else {
buff = data + param_location;
}
dr_ste_copy_mask_misc5(buff, &set_param->misc5, clr);
}
} }
void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx *ste_ctx, void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx *ste_ctx,
...@@ -1180,6 +1216,21 @@ void mlx5dr_ste_build_tnl_geneve_tlv_opt(struct mlx5dr_ste_ctx *ste_ctx, ...@@ -1180,6 +1216,21 @@ void mlx5dr_ste_build_tnl_geneve_tlv_opt(struct mlx5dr_ste_ctx *ste_ctx,
ste_ctx->build_tnl_geneve_tlv_opt_init(sb, mask); ste_ctx->build_tnl_geneve_tlv_opt_init(sb, mask);
} }
void mlx5dr_ste_build_tnl_geneve_tlv_opt_exist(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps,
bool inner, bool rx)
{
if (!ste_ctx->build_tnl_geneve_tlv_opt_exist_init)
return;
sb->rx = rx;
sb->caps = caps;
sb->inner = inner;
ste_ctx->build_tnl_geneve_tlv_opt_exist_init(sb, mask);
}
void mlx5dr_ste_build_tnl_gtpu(struct mlx5dr_ste_ctx *ste_ctx, void mlx5dr_ste_build_tnl_gtpu(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb, struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask, struct mlx5dr_match_param *mask,
...@@ -1269,6 +1320,16 @@ void mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx, ...@@ -1269,6 +1320,16 @@ void mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
ste_ctx->build_flex_parser_1_init(sb, mask); ste_ctx->build_flex_parser_1_init(sb, mask);
} }
void mlx5dr_ste_build_tnl_header_0_1(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
sb->rx = rx;
sb->inner = inner;
ste_ctx->build_tnl_header_0_1_init(sb, mask);
}
static struct mlx5dr_ste_ctx *mlx5dr_ste_ctx_arr[] = { static struct mlx5dr_ste_ctx *mlx5dr_ste_ctx_arr[] = {
[MLX5_STEERING_FORMAT_CONNECTX_5] = &ste_ctx_v0, [MLX5_STEERING_FORMAT_CONNECTX_5] = &ste_ctx_v0,
[MLX5_STEERING_FORMAT_CONNECTX_6DX] = &ste_ctx_v1, [MLX5_STEERING_FORMAT_CONNECTX_6DX] = &ste_ctx_v1,
......
...@@ -135,12 +135,14 @@ struct mlx5dr_ste_ctx { ...@@ -135,12 +135,14 @@ struct mlx5dr_ste_ctx {
void DR_STE_CTX_BUILDER(tnl_vxlan_gpe); void DR_STE_CTX_BUILDER(tnl_vxlan_gpe);
void DR_STE_CTX_BUILDER(tnl_geneve); void DR_STE_CTX_BUILDER(tnl_geneve);
void DR_STE_CTX_BUILDER(tnl_geneve_tlv_opt); void DR_STE_CTX_BUILDER(tnl_geneve_tlv_opt);
void DR_STE_CTX_BUILDER(tnl_geneve_tlv_opt_exist);
void DR_STE_CTX_BUILDER(register_0); void DR_STE_CTX_BUILDER(register_0);
void DR_STE_CTX_BUILDER(register_1); void DR_STE_CTX_BUILDER(register_1);
void DR_STE_CTX_BUILDER(src_gvmi_qpn); void DR_STE_CTX_BUILDER(src_gvmi_qpn);
void DR_STE_CTX_BUILDER(flex_parser_0); void DR_STE_CTX_BUILDER(flex_parser_0);
void DR_STE_CTX_BUILDER(flex_parser_1); void DR_STE_CTX_BUILDER(flex_parser_1);
void DR_STE_CTX_BUILDER(tnl_gtpu); void DR_STE_CTX_BUILDER(tnl_gtpu);
void DR_STE_CTX_BUILDER(tnl_header_0_1);
void DR_STE_CTX_BUILDER(tnl_gtpu_flex_parser_0); void DR_STE_CTX_BUILDER(tnl_gtpu_flex_parser_0);
void DR_STE_CTX_BUILDER(tnl_gtpu_flex_parser_1); void DR_STE_CTX_BUILDER(tnl_gtpu_flex_parser_1);
......
...@@ -80,6 +80,7 @@ enum { ...@@ -80,6 +80,7 @@ enum {
DR_STE_V0_LU_TYPE_GENERAL_PURPOSE = 0x18, DR_STE_V0_LU_TYPE_GENERAL_PURPOSE = 0x18,
DR_STE_V0_LU_TYPE_STEERING_REGISTERS_0 = 0x2f, DR_STE_V0_LU_TYPE_STEERING_REGISTERS_0 = 0x2f,
DR_STE_V0_LU_TYPE_STEERING_REGISTERS_1 = 0x30, DR_STE_V0_LU_TYPE_STEERING_REGISTERS_1 = 0x30,
DR_STE_V0_LU_TYPE_TUNNEL_HEADER = 0x34,
DR_STE_V0_LU_TYPE_DONT_CARE = MLX5DR_STE_LU_TYPE_DONT_CARE, DR_STE_V0_LU_TYPE_DONT_CARE = MLX5DR_STE_LU_TYPE_DONT_CARE,
}; };
...@@ -1704,7 +1705,7 @@ static void dr_ste_v0_set_flex_parser(u32 *misc4_field_id, ...@@ -1704,7 +1705,7 @@ static void dr_ste_v0_set_flex_parser(u32 *misc4_field_id,
u32 id = *misc4_field_id; u32 id = *misc4_field_id;
u8 *parser_ptr; u8 *parser_ptr;
if (parser_is_used[id]) if (id >= DR_NUM_OF_FLEX_PARSERS || parser_is_used[id])
return; return;
parser_is_used[id] = true; parser_is_used[id] = true;
...@@ -1875,6 +1876,27 @@ dr_ste_v0_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb, ...@@ -1875,6 +1876,27 @@ dr_ste_v0_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
sb->ste_build_tag_func = &dr_ste_v0_build_tnl_gtpu_flex_parser_1_tag; sb->ste_build_tag_func = &dr_ste_v0_build_tnl_gtpu_flex_parser_1_tag;
} }
static int dr_ste_v0_build_tnl_header_0_1_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
uint8_t *tag)
{
struct mlx5dr_match_misc5 *misc5 = &value->misc5;
DR_STE_SET_TAG(tunnel_header, tag, tunnel_header_0, misc5, tunnel_header_0);
DR_STE_SET_TAG(tunnel_header, tag, tunnel_header_1, misc5, tunnel_header_1);
return 0;
}
static void dr_ste_v0_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
sb->lu_type = DR_STE_V0_LU_TYPE_TUNNEL_HEADER;
dr_ste_v0_build_tnl_header_0_1_tag(mask, sb, sb->bit_mask);
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v0_build_tnl_header_0_1_tag;
}
struct mlx5dr_ste_ctx ste_ctx_v0 = { struct mlx5dr_ste_ctx ste_ctx_v0 = {
/* Builders */ /* Builders */
.build_eth_l2_src_dst_init = &dr_ste_v0_build_eth_l2_src_dst_init, .build_eth_l2_src_dst_init = &dr_ste_v0_build_eth_l2_src_dst_init,
...@@ -1903,6 +1925,7 @@ struct mlx5dr_ste_ctx ste_ctx_v0 = { ...@@ -1903,6 +1925,7 @@ struct mlx5dr_ste_ctx ste_ctx_v0 = {
.build_flex_parser_0_init = &dr_ste_v0_build_flex_parser_0_init, .build_flex_parser_0_init = &dr_ste_v0_build_flex_parser_0_init,
.build_flex_parser_1_init = &dr_ste_v0_build_flex_parser_1_init, .build_flex_parser_1_init = &dr_ste_v0_build_flex_parser_1_init,
.build_tnl_gtpu_init = &dr_ste_v0_build_flex_parser_tnl_gtpu_init, .build_tnl_gtpu_init = &dr_ste_v0_build_flex_parser_tnl_gtpu_init,
.build_tnl_header_0_1_init = &dr_ste_v0_build_tnl_header_0_1_init,
.build_tnl_gtpu_flex_parser_0_init = &dr_ste_v0_build_tnl_gtpu_flex_parser_0_init, .build_tnl_gtpu_flex_parser_0_init = &dr_ste_v0_build_tnl_gtpu_flex_parser_0_init,
.build_tnl_gtpu_flex_parser_1_init = &dr_ste_v0_build_tnl_gtpu_flex_parser_1_init, .build_tnl_gtpu_flex_parser_1_init = &dr_ste_v0_build_tnl_gtpu_flex_parser_1_init,
......
...@@ -47,6 +47,7 @@ enum { ...@@ -47,6 +47,7 @@ enum {
DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_I = 0x000f, DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_I = 0x000f,
DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0 = 0x010f, DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0 = 0x010f,
DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1 = 0x0110, DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1 = 0x0110,
DR_STE_V1_LU_TYPE_FLEX_PARSER_OK = 0x0011,
DR_STE_V1_LU_TYPE_FLEX_PARSER_0 = 0x0111, DR_STE_V1_LU_TYPE_FLEX_PARSER_0 = 0x0111,
DR_STE_V1_LU_TYPE_FLEX_PARSER_1 = 0x0112, DR_STE_V1_LU_TYPE_FLEX_PARSER_1 = 0x0112,
DR_STE_V1_LU_TYPE_ETHL4_MISC_O = 0x0113, DR_STE_V1_LU_TYPE_ETHL4_MISC_O = 0x0113,
...@@ -1713,6 +1714,27 @@ dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb, ...@@ -1713,6 +1714,27 @@ dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tag; sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tag;
} }
static int dr_ste_v1_build_tnl_header_0_1_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
uint8_t *tag)
{
struct mlx5dr_match_misc5 *misc5 = &value->misc5;
DR_STE_SET_TAG(tunnel_header, tag, tunnel_header_0, misc5, tunnel_header_0);
DR_STE_SET_TAG(tunnel_header, tag, tunnel_header_1, misc5, tunnel_header_1);
return 0;
}
static void dr_ste_v1_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
dr_ste_v1_build_tnl_header_0_1_tag(mask, sb, sb->bit_mask);
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v1_build_tnl_header_0_1_tag;
}
static int dr_ste_v1_build_register_0_tag(struct mlx5dr_match_param *value, static int dr_ste_v1_build_register_0_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb, struct mlx5dr_ste_build *sb,
u8 *tag) u8 *tag)
...@@ -1833,7 +1855,7 @@ static void dr_ste_v1_set_flex_parser(u32 *misc4_field_id, ...@@ -1833,7 +1855,7 @@ static void dr_ste_v1_set_flex_parser(u32 *misc4_field_id,
u32 id = *misc4_field_id; u32 id = *misc4_field_id;
u8 *parser_ptr; u8 *parser_ptr;
if (parser_is_used[id]) if (id >= DR_NUM_OF_FLEX_PARSERS || parser_is_used[id])
return; return;
parser_is_used[id] = true; parser_is_used[id] = true;
...@@ -1921,6 +1943,32 @@ dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb, ...@@ -1921,6 +1943,32 @@ dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag; sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag;
} }
static int
dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
uint8_t *tag)
{
u8 parser_id = sb->caps->flex_parser_id_geneve_tlv_option_0;
struct mlx5dr_match_misc *misc = &value->misc;
if (misc->geneve_tlv_option_0_exist) {
MLX5_SET(ste_flex_parser_ok, tag, flex_parsers_ok, 1 << parser_id);
misc->geneve_tlv_option_0_exist = 0;
}
return 0;
}
static void
dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_OK;
dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag(mask, sb, sb->bit_mask);
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag;
}
static int dr_ste_v1_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *value, static int dr_ste_v1_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb, struct mlx5dr_ste_build *sb,
u8 *tag) u8 *tag)
...@@ -2020,12 +2068,14 @@ struct mlx5dr_ste_ctx ste_ctx_v1 = { ...@@ -2020,12 +2068,14 @@ struct mlx5dr_ste_ctx ste_ctx_v1 = {
.build_tnl_vxlan_gpe_init = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init, .build_tnl_vxlan_gpe_init = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init,
.build_tnl_geneve_init = &dr_ste_v1_build_flex_parser_tnl_geneve_init, .build_tnl_geneve_init = &dr_ste_v1_build_flex_parser_tnl_geneve_init,
.build_tnl_geneve_tlv_opt_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init, .build_tnl_geneve_tlv_opt_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init,
.build_tnl_geneve_tlv_opt_exist_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init,
.build_register_0_init = &dr_ste_v1_build_register_0_init, .build_register_0_init = &dr_ste_v1_build_register_0_init,
.build_register_1_init = &dr_ste_v1_build_register_1_init, .build_register_1_init = &dr_ste_v1_build_register_1_init,
.build_src_gvmi_qpn_init = &dr_ste_v1_build_src_gvmi_qpn_init, .build_src_gvmi_qpn_init = &dr_ste_v1_build_src_gvmi_qpn_init,
.build_flex_parser_0_init = &dr_ste_v1_build_flex_parser_0_init, .build_flex_parser_0_init = &dr_ste_v1_build_flex_parser_0_init,
.build_flex_parser_1_init = &dr_ste_v1_build_flex_parser_1_init, .build_flex_parser_1_init = &dr_ste_v1_build_flex_parser_1_init,
.build_tnl_gtpu_init = &dr_ste_v1_build_flex_parser_tnl_gtpu_init, .build_tnl_gtpu_init = &dr_ste_v1_build_flex_parser_tnl_gtpu_init,
.build_tnl_header_0_1_init = &dr_ste_v1_build_tnl_header_0_1_init,
.build_tnl_gtpu_flex_parser_0_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_init, .build_tnl_gtpu_flex_parser_0_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_init,
.build_tnl_gtpu_flex_parser_1_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_init, .build_tnl_gtpu_flex_parser_1_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_init,
......
...@@ -3,69 +3,66 @@ ...@@ -3,69 +3,66 @@
#include "dr_types.h" #include "dr_types.h"
int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl, static int dr_table_set_miss_action_nic(struct mlx5dr_domain *dmn,
struct mlx5dr_table_rx_tx *nic_tbl,
struct mlx5dr_action *action) struct mlx5dr_action *action)
{ {
struct mlx5dr_matcher *last_matcher = NULL; struct mlx5dr_matcher_rx_tx *last_nic_matcher = NULL;
struct mlx5dr_htbl_connect_info info; struct mlx5dr_htbl_connect_info info;
struct mlx5dr_ste_htbl *last_htbl; struct mlx5dr_ste_htbl *last_htbl;
int ret; int ret;
if (action && action->action_type != DR_ACTION_TYP_FT) if (!list_empty(&nic_tbl->nic_matcher_list))
return -EOPNOTSUPP; last_nic_matcher = list_last_entry(&nic_tbl->nic_matcher_list,
struct mlx5dr_matcher_rx_tx,
mlx5dr_domain_lock(tbl->dmn); list_node);
if (!list_empty(&tbl->matcher_list))
last_matcher = list_last_entry(&tbl->matcher_list,
struct mlx5dr_matcher,
matcher_list);
if (tbl->dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX || if (last_nic_matcher)
tbl->dmn->type == MLX5DR_DOMAIN_TYPE_FDB) { last_htbl = last_nic_matcher->e_anchor;
if (last_matcher)
last_htbl = last_matcher->rx.e_anchor;
else else
last_htbl = tbl->rx.s_anchor; last_htbl = nic_tbl->s_anchor;
tbl->rx.default_icm_addr = action ? if (action)
nic_tbl->default_icm_addr =
nic_tbl->nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX ?
action->dest_tbl->tbl->rx.s_anchor->chunk->icm_addr : action->dest_tbl->tbl->rx.s_anchor->chunk->icm_addr :
tbl->rx.nic_dmn->default_icm_addr; action->dest_tbl->tbl->tx.s_anchor->chunk->icm_addr;
else
nic_tbl->default_icm_addr = nic_tbl->nic_dmn->default_icm_addr;
info.type = CONNECT_MISS; info.type = CONNECT_MISS;
info.miss_icm_addr = tbl->rx.default_icm_addr; info.miss_icm_addr = nic_tbl->default_icm_addr;
ret = mlx5dr_ste_htbl_init_and_postsend(tbl->dmn, ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_tbl->nic_dmn,
tbl->rx.nic_dmn, last_htbl, &info, true);
last_htbl, if (ret)
&info, true); mlx5dr_dbg(dmn, "Failed to set NIC RX/TX miss action, ret %d\n", ret);
if (ret) {
mlx5dr_dbg(tbl->dmn, "Failed to set RX miss action, ret %d\n", ret);
goto out;
}
}
if (tbl->dmn->type == MLX5DR_DOMAIN_TYPE_NIC_TX || return ret;
tbl->dmn->type == MLX5DR_DOMAIN_TYPE_FDB) { }
if (last_matcher)
last_htbl = last_matcher->tx.e_anchor;
else
last_htbl = tbl->tx.s_anchor;
tbl->tx.default_icm_addr = action ? int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,
action->dest_tbl->tbl->tx.s_anchor->chunk->icm_addr : struct mlx5dr_action *action)
tbl->tx.nic_dmn->default_icm_addr; {
int ret;
info.type = CONNECT_MISS; if (action && action->action_type != DR_ACTION_TYP_FT)
info.miss_icm_addr = tbl->tx.default_icm_addr; return -EOPNOTSUPP;
ret = mlx5dr_ste_htbl_init_and_postsend(tbl->dmn, mlx5dr_domain_lock(tbl->dmn);
tbl->tx.nic_dmn,
last_htbl, &info, true); if (tbl->dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX ||
if (ret) { tbl->dmn->type == MLX5DR_DOMAIN_TYPE_FDB) {
mlx5dr_dbg(tbl->dmn, "Failed to set TX miss action, ret %d\n", ret); ret = dr_table_set_miss_action_nic(tbl->dmn, &tbl->rx, action);
if (ret)
goto out; goto out;
} }
if (tbl->dmn->type == MLX5DR_DOMAIN_TYPE_NIC_TX ||
tbl->dmn->type == MLX5DR_DOMAIN_TYPE_FDB) {
ret = dr_table_set_miss_action_nic(tbl->dmn, &tbl->tx, action);
if (ret)
goto out;
} }
/* Release old action */ /* Release old action */
...@@ -122,6 +119,8 @@ static int dr_table_init_nic(struct mlx5dr_domain *dmn, ...@@ -122,6 +119,8 @@ static int dr_table_init_nic(struct mlx5dr_domain *dmn,
struct mlx5dr_htbl_connect_info info; struct mlx5dr_htbl_connect_info info;
int ret; int ret;
INIT_LIST_HEAD(&nic_tbl->nic_matcher_list);
nic_tbl->default_icm_addr = nic_dmn->default_icm_addr; nic_tbl->default_icm_addr = nic_dmn->default_icm_addr;
nic_tbl->s_anchor = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool, nic_tbl->s_anchor = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
...@@ -266,6 +265,8 @@ struct mlx5dr_table *mlx5dr_table_create(struct mlx5dr_domain *dmn, u32 level, u ...@@ -266,6 +265,8 @@ struct mlx5dr_table *mlx5dr_table_create(struct mlx5dr_domain *dmn, u32 level, u
if (ret) if (ret)
goto uninit_tbl; goto uninit_tbl;
INIT_LIST_HEAD(&tbl->dbg_node);
mlx5dr_dbg_tbl_add(tbl);
return tbl; return tbl;
uninit_tbl: uninit_tbl:
...@@ -281,9 +282,10 @@ int mlx5dr_table_destroy(struct mlx5dr_table *tbl) ...@@ -281,9 +282,10 @@ int mlx5dr_table_destroy(struct mlx5dr_table *tbl)
{ {
int ret; int ret;
if (refcount_read(&tbl->refcount) > 1) if (WARN_ON_ONCE(refcount_read(&tbl->refcount) > 1))
return -EBUSY; return -EBUSY;
mlx5dr_dbg_tbl_del(tbl);
ret = dr_table_destroy_sw_owned_tbl(tbl); ret = dr_table_destroy_sw_owned_tbl(tbl);
if (ret) if (ret)
return ret; return ret;
......
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2019 Mellanox Technologies */ /* Copyright (c) 2019 Mellanox Technologies */
#include <linux/mlx5/vport.h>
#include "mlx5_core.h" #include "mlx5_core.h"
#include "fs_core.h" #include "fs_core.h"
#include "fs_cmd.h" #include "fs_cmd.h"
...@@ -194,6 +195,15 @@ static struct mlx5dr_action *create_vport_action(struct mlx5dr_domain *domain, ...@@ -194,6 +195,15 @@ static struct mlx5dr_action *create_vport_action(struct mlx5dr_domain *domain,
dest_attr->vport.vhca_id); dest_attr->vport.vhca_id);
} }
static struct mlx5dr_action *create_uplink_action(struct mlx5dr_domain *domain,
struct mlx5_flow_rule *dst)
{
struct mlx5_flow_destination *dest_attr = &dst->dest_attr;
return mlx5dr_action_create_dest_vport(domain, MLX5_VPORT_UPLINK, 1,
dest_attr->vport.vhca_id);
}
static struct mlx5dr_action *create_ft_action(struct mlx5dr_domain *domain, static struct mlx5dr_action *create_ft_action(struct mlx5dr_domain *domain,
struct mlx5_flow_rule *dst) struct mlx5_flow_rule *dst)
{ {
...@@ -218,7 +228,8 @@ static struct mlx5dr_action *create_action_push_vlan(struct mlx5dr_domain *domai ...@@ -218,7 +228,8 @@ static struct mlx5dr_action *create_action_push_vlan(struct mlx5dr_domain *domai
static bool contain_vport_reformat_action(struct mlx5_flow_rule *dst) static bool contain_vport_reformat_action(struct mlx5_flow_rule *dst)
{ {
return dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT && return (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID; dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
} }
...@@ -411,8 +422,11 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, ...@@ -411,8 +422,11 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
fs_dr_actions[fs_dr_num_actions++] = tmp_action; fs_dr_actions[fs_dr_num_actions++] = tmp_action;
term_actions[num_term_actions++].dest = tmp_action; term_actions[num_term_actions++].dest = tmp_action;
break; break;
case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
case MLX5_FLOW_DESTINATION_TYPE_VPORT: case MLX5_FLOW_DESTINATION_TYPE_VPORT:
tmp_action = create_vport_action(domain, dst); tmp_action = type == MLX5_FLOW_DESTINATION_TYPE_VPORT ?
create_vport_action(domain, dst) :
create_uplink_action(domain, dst);
if (!tmp_action) { if (!tmp_action) {
err = -ENOMEM; err = -ENOMEM;
goto free_actions; goto free_actions;
......
...@@ -447,6 +447,14 @@ struct mlx5_ifc_ste_flex_parser_1_bits { ...@@ -447,6 +447,14 @@ struct mlx5_ifc_ste_flex_parser_1_bits {
u8 flex_parser_4[0x20]; u8 flex_parser_4[0x20];
}; };
struct mlx5_ifc_ste_flex_parser_ok_bits {
u8 flex_parser_3[0x20];
u8 flex_parser_2[0x20];
u8 flex_parsers_ok[0x8];
u8 reserved_at_48[0x18];
u8 flex_parser_0[0x20];
};
struct mlx5_ifc_ste_flex_parser_tnl_bits { struct mlx5_ifc_ste_flex_parser_tnl_bits {
u8 flex_parser_tunneling_header_63_32[0x20]; u8 flex_parser_tunneling_header_63_32[0x20];
...@@ -490,6 +498,14 @@ struct mlx5_ifc_ste_flex_parser_tnl_gtpu_bits { ...@@ -490,6 +498,14 @@ struct mlx5_ifc_ste_flex_parser_tnl_gtpu_bits {
u8 reserved_at_40[0x40]; u8 reserved_at_40[0x40];
}; };
struct mlx5_ifc_ste_tunnel_header_bits {
u8 tunnel_header_0[0x20];
u8 tunnel_header_1[0x20];
u8 reserved_at_40[0x40];
};
struct mlx5_ifc_ste_general_purpose_bits { struct mlx5_ifc_ste_general_purpose_bits {
u8 general_purpose_lookup_field[0x20]; u8 general_purpose_lookup_field[0x20];
......
...@@ -1117,6 +1117,7 @@ enum { ...@@ -1117,6 +1117,7 @@ enum {
MLX5_MATCH_MISC_PARAMETERS_2 = 1 << 3, MLX5_MATCH_MISC_PARAMETERS_2 = 1 << 3,
MLX5_MATCH_MISC_PARAMETERS_3 = 1 << 4, MLX5_MATCH_MISC_PARAMETERS_3 = 1 << 4,
MLX5_MATCH_MISC_PARAMETERS_4 = 1 << 5, MLX5_MATCH_MISC_PARAMETERS_4 = 1 << 5,
MLX5_MATCH_MISC_PARAMETERS_5 = 1 << 6,
}; };
enum { enum {
......
...@@ -372,7 +372,8 @@ struct mlx5_ifc_flow_table_fields_supported_bits { ...@@ -372,7 +372,8 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
u8 reserved_at_37[0x9]; u8 reserved_at_37[0x9];
u8 geneve_tlv_option_0_data[0x1]; u8 geneve_tlv_option_0_data[0x1];
u8 reserved_at_41[0x4]; u8 geneve_tlv_option_0_exist[0x1];
u8 reserved_at_42[0x3];
u8 outer_first_mpls_over_udp[0x4]; u8 outer_first_mpls_over_udp[0x4];
u8 outer_first_mpls_over_gre[0x4]; u8 outer_first_mpls_over_gre[0x4];
u8 inner_first_mpls[0x4]; u8 inner_first_mpls[0x4];
...@@ -551,7 +552,8 @@ struct mlx5_ifc_fte_match_set_misc_bits { ...@@ -551,7 +552,8 @@ struct mlx5_ifc_fte_match_set_misc_bits {
u8 bth_opcode[0x8]; u8 bth_opcode[0x8];
u8 geneve_vni[0x18]; u8 geneve_vni[0x18];
u8 reserved_at_d8[0x7]; u8 reserved_at_d8[0x6];
u8 geneve_tlv_option_0_exist[0x1];
u8 geneve_oam[0x1]; u8 geneve_oam[0x1];
u8 reserved_at_e0[0xc]; u8 reserved_at_e0[0xc];
...@@ -670,6 +672,26 @@ struct mlx5_ifc_fte_match_set_misc4_bits { ...@@ -670,6 +672,26 @@ struct mlx5_ifc_fte_match_set_misc4_bits {
u8 reserved_at_100[0x100]; u8 reserved_at_100[0x100];
}; };
struct mlx5_ifc_fte_match_set_misc5_bits {
u8 macsec_tag_0[0x20];
u8 macsec_tag_1[0x20];
u8 macsec_tag_2[0x20];
u8 macsec_tag_3[0x20];
u8 tunnel_header_0[0x20];
u8 tunnel_header_1[0x20];
u8 tunnel_header_2[0x20];
u8 tunnel_header_3[0x20];
u8 reserved_at_100[0x100];
};
struct mlx5_ifc_cmd_pas_bits { struct mlx5_ifc_cmd_pas_bits {
u8 pa_h[0x20]; u8 pa_h[0x20];
...@@ -811,7 +833,7 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits { ...@@ -811,7 +833,7 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits {
u8 fdb_to_vport_reg_c_id[0x8]; u8 fdb_to_vport_reg_c_id[0x8];
u8 reserved_at_8[0xd]; u8 reserved_at_8[0xd];
u8 fdb_modify_header_fwd_to_table[0x1]; u8 fdb_modify_header_fwd_to_table[0x1];
u8 reserved_at_16[0x1]; u8 fdb_ipv4_ttl_modify[0x1];
u8 flow_source[0x1]; u8 flow_source[0x1];
u8 reserved_at_18[0x2]; u8 reserved_at_18[0x2];
u8 multi_fdb_encap[0x1]; u8 multi_fdb_encap[0x1];
...@@ -1291,7 +1313,7 @@ enum { ...@@ -1291,7 +1313,7 @@ enum {
enum { enum {
MLX5_FLEX_PARSER_GENEVE_ENABLED = 1 << 3, MLX5_FLEX_PARSER_GENEVE_ENABLED = 1 << 3,
MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED = 1 << 4, MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED = 1 << 4,
mlx5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED = 1 << 5, MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED = 1 << 5,
MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED = 1 << 7, MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED = 1 << 7,
MLX5_FLEX_PARSER_ICMP_V4_ENABLED = 1 << 8, MLX5_FLEX_PARSER_ICMP_V4_ENABLED = 1 << 8,
MLX5_FLEX_PARSER_ICMP_V6_ENABLED = 1 << 9, MLX5_FLEX_PARSER_ICMP_V6_ENABLED = 1 << 9,
...@@ -1839,7 +1861,9 @@ struct mlx5_ifc_fte_match_param_bits { ...@@ -1839,7 +1861,9 @@ struct mlx5_ifc_fte_match_param_bits {
struct mlx5_ifc_fte_match_set_misc4_bits misc_parameters_4; struct mlx5_ifc_fte_match_set_misc4_bits misc_parameters_4;
u8 reserved_at_c00[0x400]; struct mlx5_ifc_fte_match_set_misc5_bits misc_parameters_5;
u8 reserved_at_e00[0x200];
}; };
enum { enum {
...@@ -5977,6 +6001,7 @@ enum { ...@@ -5977,6 +6001,7 @@ enum {
MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0x3, MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0x3,
MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_3 = 0x4, MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_3 = 0x4,
MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_4 = 0x5, MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_4 = 0x5,
MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_5 = 0x6,
}; };
struct mlx5_ifc_query_flow_group_out_bits { struct mlx5_ifc_query_flow_group_out_bits {
......
...@@ -252,7 +252,7 @@ enum mlx5_ib_device_query_context_attrs { ...@@ -252,7 +252,7 @@ enum mlx5_ib_device_query_context_attrs {
MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX = (1U << UVERBS_ID_NS_SHIFT), MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX = (1U << UVERBS_ID_NS_SHIFT),
}; };
#define MLX5_IB_DW_MATCH_PARAM 0x90 #define MLX5_IB_DW_MATCH_PARAM 0xA0
struct mlx5_ib_match_params { struct mlx5_ib_match_params {
__u32 match_params[MLX5_IB_DW_MATCH_PARAM]; __u32 match_params[MLX5_IB_DW_MATCH_PARAM];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment