Commit 765d1216 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-fixes-2022-05-17' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 fixes 2022-05-17

This series provides bug fixes to mlx5 driver.
Please pull and let me know if there is any problem.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 23dd4581 16d42d31
...@@ -23,7 +23,7 @@ struct mlx5_ct_fs_smfs_matcher { ...@@ -23,7 +23,7 @@ struct mlx5_ct_fs_smfs_matcher {
}; };
struct mlx5_ct_fs_smfs_matchers { struct mlx5_ct_fs_smfs_matchers {
struct mlx5_ct_fs_smfs_matcher smfs_matchers[4]; struct mlx5_ct_fs_smfs_matcher smfs_matchers[6];
struct list_head used; struct list_head used;
}; };
...@@ -44,7 +44,8 @@ struct mlx5_ct_fs_smfs_rule { ...@@ -44,7 +44,8 @@ struct mlx5_ct_fs_smfs_rule {
}; };
static inline void static inline void
mlx5_ct_fs_smfs_fill_mask(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, bool ipv4, bool tcp) mlx5_ct_fs_smfs_fill_mask(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, bool ipv4, bool tcp,
bool gre)
{ {
void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers); void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
...@@ -77,7 +78,7 @@ mlx5_ct_fs_smfs_fill_mask(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, bo ...@@ -77,7 +78,7 @@ mlx5_ct_fs_smfs_fill_mask(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, bo
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, tcp_dport); MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, tcp_dport);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags, MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
ntohs(MLX5_CT_TCP_FLAGS_MASK)); ntohs(MLX5_CT_TCP_FLAGS_MASK));
} else { } else if (!gre) {
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, udp_sport); MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, udp_sport);
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, udp_dport); MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, udp_dport);
} }
...@@ -87,7 +88,7 @@ mlx5_ct_fs_smfs_fill_mask(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, bo ...@@ -87,7 +88,7 @@ mlx5_ct_fs_smfs_fill_mask(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, bo
static struct mlx5dr_matcher * static struct mlx5dr_matcher *
mlx5_ct_fs_smfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5dr_table *tbl, bool ipv4, mlx5_ct_fs_smfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5dr_table *tbl, bool ipv4,
bool tcp, u32 priority) bool tcp, bool gre, u32 priority)
{ {
struct mlx5dr_matcher *dr_matcher; struct mlx5dr_matcher *dr_matcher;
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
...@@ -96,7 +97,7 @@ mlx5_ct_fs_smfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5dr_table *tbl, ...@@ -96,7 +97,7 @@ mlx5_ct_fs_smfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5dr_table *tbl,
if (!spec) if (!spec)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
mlx5_ct_fs_smfs_fill_mask(fs, spec, ipv4, tcp); mlx5_ct_fs_smfs_fill_mask(fs, spec, ipv4, tcp, gre);
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2 | MLX5_MATCH_OUTER_HEADERS; spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2 | MLX5_MATCH_OUTER_HEADERS;
dr_matcher = mlx5_smfs_matcher_create(tbl, priority, spec); dr_matcher = mlx5_smfs_matcher_create(tbl, priority, spec);
...@@ -108,7 +109,7 @@ mlx5_ct_fs_smfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5dr_table *tbl, ...@@ -108,7 +109,7 @@ mlx5_ct_fs_smfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5dr_table *tbl,
} }
static struct mlx5_ct_fs_smfs_matcher * static struct mlx5_ct_fs_smfs_matcher *
mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp) mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp, bool gre)
{ {
struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs); struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
struct mlx5_ct_fs_smfs_matcher *m, *smfs_matcher; struct mlx5_ct_fs_smfs_matcher *m, *smfs_matcher;
...@@ -119,7 +120,7 @@ mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp ...@@ -119,7 +120,7 @@ mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp
int prio; int prio;
matchers = nat ? &fs_smfs->matchers_nat : &fs_smfs->matchers; matchers = nat ? &fs_smfs->matchers_nat : &fs_smfs->matchers;
smfs_matcher = &matchers->smfs_matchers[ipv4 * 2 + tcp]; smfs_matcher = &matchers->smfs_matchers[ipv4 * 3 + tcp * 2 + gre];
if (refcount_inc_not_zero(&smfs_matcher->ref)) if (refcount_inc_not_zero(&smfs_matcher->ref))
return smfs_matcher; return smfs_matcher;
...@@ -145,11 +146,11 @@ mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp ...@@ -145,11 +146,11 @@ mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp
} }
tbl = nat ? fs_smfs->ct_nat_tbl : fs_smfs->ct_tbl; tbl = nat ? fs_smfs->ct_nat_tbl : fs_smfs->ct_tbl;
dr_matcher = mlx5_ct_fs_smfs_matcher_create(fs, tbl, ipv4, tcp, prio); dr_matcher = mlx5_ct_fs_smfs_matcher_create(fs, tbl, ipv4, tcp, gre, prio);
if (IS_ERR(dr_matcher)) { if (IS_ERR(dr_matcher)) {
netdev_warn(fs->netdev, netdev_warn(fs->netdev,
"ct_fs_smfs: failed to create matcher (nat %d, ipv4 %d, tcp %d), err: %ld\n", "ct_fs_smfs: failed to create matcher (nat %d, ipv4 %d, tcp %d, gre %d), err: %ld\n",
nat, ipv4, tcp, PTR_ERR(dr_matcher)); nat, ipv4, tcp, gre, PTR_ERR(dr_matcher));
smfs_matcher = ERR_CAST(dr_matcher); smfs_matcher = ERR_CAST(dr_matcher);
goto out_unlock; goto out_unlock;
...@@ -222,16 +223,17 @@ mlx5_ct_fs_smfs_destroy(struct mlx5_ct_fs *fs) ...@@ -222,16 +223,17 @@ mlx5_ct_fs_smfs_destroy(struct mlx5_ct_fs *fs)
static inline bool static inline bool
mlx5_tc_ct_valid_used_dissector_keys(const u32 used_keys) mlx5_tc_ct_valid_used_dissector_keys(const u32 used_keys)
{ {
#define DISSECTOR_BIT(name) BIT(FLOW_DISSECTOR_KEY_ ## name) #define DISS_BIT(name) BIT(FLOW_DISSECTOR_KEY_ ## name)
const u32 basic_keys = DISSECTOR_BIT(BASIC) | DISSECTOR_BIT(CONTROL) | const u32 basic_keys = DISS_BIT(BASIC) | DISS_BIT(CONTROL) | DISS_BIT(META);
DISSECTOR_BIT(PORTS) | DISSECTOR_BIT(META); const u32 ipv4_tcp = basic_keys | DISS_BIT(IPV4_ADDRS) | DISS_BIT(PORTS) | DISS_BIT(TCP);
const u32 ipv4_tcp = basic_keys | DISSECTOR_BIT(IPV4_ADDRS) | DISSECTOR_BIT(TCP); const u32 ipv6_tcp = basic_keys | DISS_BIT(IPV6_ADDRS) | DISS_BIT(PORTS) | DISS_BIT(TCP);
const u32 ipv4_udp = basic_keys | DISSECTOR_BIT(IPV4_ADDRS); const u32 ipv4_udp = basic_keys | DISS_BIT(IPV4_ADDRS) | DISS_BIT(PORTS);
const u32 ipv6_tcp = basic_keys | DISSECTOR_BIT(IPV6_ADDRS) | DISSECTOR_BIT(TCP); const u32 ipv6_udp = basic_keys | DISS_BIT(IPV6_ADDRS) | DISS_BIT(PORTS);
const u32 ipv6_udp = basic_keys | DISSECTOR_BIT(IPV6_ADDRS); const u32 ipv4_gre = basic_keys | DISS_BIT(IPV4_ADDRS);
const u32 ipv6_gre = basic_keys | DISS_BIT(IPV6_ADDRS);
return (used_keys == ipv4_tcp || used_keys == ipv4_udp || used_keys == ipv6_tcp || return (used_keys == ipv4_tcp || used_keys == ipv4_udp || used_keys == ipv6_tcp ||
used_keys == ipv6_udp); used_keys == ipv6_udp || used_keys == ipv4_gre || used_keys == ipv6_gre);
} }
static bool static bool
...@@ -254,20 +256,24 @@ mlx5_ct_fs_smfs_ct_validate_flow_rule(struct mlx5_ct_fs *fs, struct flow_rule *f ...@@ -254,20 +256,24 @@ mlx5_ct_fs_smfs_ct_validate_flow_rule(struct mlx5_ct_fs *fs, struct flow_rule *f
flow_rule_match_control(flow_rule, &control); flow_rule_match_control(flow_rule, &control);
flow_rule_match_ipv4_addrs(flow_rule, &ipv4_addrs); flow_rule_match_ipv4_addrs(flow_rule, &ipv4_addrs);
flow_rule_match_ipv6_addrs(flow_rule, &ipv6_addrs); flow_rule_match_ipv6_addrs(flow_rule, &ipv6_addrs);
flow_rule_match_ports(flow_rule, &ports); if (basic.key->ip_proto != IPPROTO_GRE)
flow_rule_match_tcp(flow_rule, &tcp); flow_rule_match_ports(flow_rule, &ports);
if (basic.key->ip_proto == IPPROTO_TCP)
flow_rule_match_tcp(flow_rule, &tcp);
if (basic.mask->n_proto != htons(0xFFFF) || if (basic.mask->n_proto != htons(0xFFFF) ||
(basic.key->n_proto != htons(ETH_P_IP) && basic.key->n_proto != htons(ETH_P_IPV6)) || (basic.key->n_proto != htons(ETH_P_IP) && basic.key->n_proto != htons(ETH_P_IPV6)) ||
basic.mask->ip_proto != 0xFF || basic.mask->ip_proto != 0xFF ||
(basic.key->ip_proto != IPPROTO_UDP && basic.key->ip_proto != IPPROTO_TCP)) { (basic.key->ip_proto != IPPROTO_UDP && basic.key->ip_proto != IPPROTO_TCP &&
basic.key->ip_proto != IPPROTO_GRE)) {
ct_dbg("rule uses unexpected basic match (n_proto 0x%04x/0x%04x, ip_proto 0x%02x/0x%02x)", ct_dbg("rule uses unexpected basic match (n_proto 0x%04x/0x%04x, ip_proto 0x%02x/0x%02x)",
ntohs(basic.key->n_proto), ntohs(basic.mask->n_proto), ntohs(basic.key->n_proto), ntohs(basic.mask->n_proto),
basic.key->ip_proto, basic.mask->ip_proto); basic.key->ip_proto, basic.mask->ip_proto);
return false; return false;
} }
if (ports.mask->src != htons(0xFFFF) || ports.mask->dst != htons(0xFFFF)) { if (basic.key->ip_proto != IPPROTO_GRE &&
(ports.mask->src != htons(0xFFFF) || ports.mask->dst != htons(0xFFFF))) {
ct_dbg("rule uses ports match (src 0x%04x, dst 0x%04x)", ct_dbg("rule uses ports match (src 0x%04x, dst 0x%04x)",
ports.mask->src, ports.mask->dst); ports.mask->src, ports.mask->dst);
return false; return false;
...@@ -291,7 +297,7 @@ mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, ...@@ -291,7 +297,7 @@ mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
struct mlx5dr_action *actions[5]; struct mlx5dr_action *actions[5];
struct mlx5dr_rule *rule; struct mlx5dr_rule *rule;
int num_actions = 0, err; int num_actions = 0, err;
bool nat, tcp, ipv4; bool nat, tcp, ipv4, gre;
if (!mlx5_ct_fs_smfs_ct_validate_flow_rule(fs, flow_rule)) if (!mlx5_ct_fs_smfs_ct_validate_flow_rule(fs, flow_rule))
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
...@@ -314,15 +320,17 @@ mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, ...@@ -314,15 +320,17 @@ mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
ipv4 = mlx5e_tc_get_ip_version(spec, true) == 4; ipv4 = mlx5e_tc_get_ip_version(spec, true) == 4;
tcp = MLX5_GET(fte_match_param, spec->match_value, tcp = MLX5_GET(fte_match_param, spec->match_value,
outer_headers.ip_protocol) == IPPROTO_TCP; outer_headers.ip_protocol) == IPPROTO_TCP;
gre = MLX5_GET(fte_match_param, spec->match_value,
outer_headers.ip_protocol) == IPPROTO_GRE;
smfs_matcher = mlx5_ct_fs_smfs_matcher_get(fs, nat, ipv4, tcp); smfs_matcher = mlx5_ct_fs_smfs_matcher_get(fs, nat, ipv4, tcp, gre);
if (IS_ERR(smfs_matcher)) { if (IS_ERR(smfs_matcher)) {
err = PTR_ERR(smfs_matcher); err = PTR_ERR(smfs_matcher);
goto err_matcher; goto err_matcher;
} }
rule = mlx5_smfs_rule_create(smfs_matcher->dr_matcher, spec, num_actions, actions, rule = mlx5_smfs_rule_create(smfs_matcher->dr_matcher, spec, num_actions, actions,
MLX5_FLOW_CONTEXT_FLOW_SOURCE_ANY_VPORT); spec->flow_context.flow_source);
if (!rule) { if (!rule) {
err = -EINVAL; err = -EINVAL;
goto err_create; goto err_create;
......
...@@ -14,19 +14,26 @@ static int mlx5e_trap_napi_poll(struct napi_struct *napi, int budget) ...@@ -14,19 +14,26 @@ static int mlx5e_trap_napi_poll(struct napi_struct *napi, int budget)
bool busy = false; bool busy = false;
int work_done = 0; int work_done = 0;
rcu_read_lock();
ch_stats->poll++; ch_stats->poll++;
work_done = mlx5e_poll_rx_cq(&rq->cq, budget); work_done = mlx5e_poll_rx_cq(&rq->cq, budget);
busy |= work_done == budget; busy |= work_done == budget;
busy |= rq->post_wqes(rq); busy |= rq->post_wqes(rq);
if (busy) if (busy) {
return budget; work_done = budget;
goto out;
}
if (unlikely(!napi_complete_done(napi, work_done))) if (unlikely(!napi_complete_done(napi, work_done)))
return work_done; goto out;
mlx5e_cq_arm(&rq->cq); mlx5e_cq_arm(&rq->cq);
out:
rcu_read_unlock();
return work_done; return work_done;
} }
......
...@@ -3864,6 +3864,10 @@ static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev ...@@ -3864,6 +3864,10 @@ static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev
if (netdev->features & NETIF_F_NTUPLE) if (netdev->features & NETIF_F_NTUPLE)
netdev_warn(netdev, "Disabling ntuple, not supported in switchdev mode\n"); netdev_warn(netdev, "Disabling ntuple, not supported in switchdev mode\n");
features &= ~NETIF_F_GRO_HW;
if (netdev->features & NETIF_F_GRO_HW)
netdev_warn(netdev, "Disabling HW_GRO, not supported in switchdev mode\n");
return features; return features;
} }
...@@ -3896,6 +3900,25 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev, ...@@ -3896,6 +3900,25 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
} }
} }
if (params->xdp_prog) {
if (features & NETIF_F_LRO) {
netdev_warn(netdev, "LRO is incompatible with XDP\n");
features &= ~NETIF_F_LRO;
}
if (features & NETIF_F_GRO_HW) {
netdev_warn(netdev, "HW GRO is incompatible with XDP\n");
features &= ~NETIF_F_GRO_HW;
}
}
if (priv->xsk.refcnt) {
if (features & NETIF_F_GRO_HW) {
netdev_warn(netdev, "HW GRO is incompatible with AF_XDP (%u XSKs are active)\n",
priv->xsk.refcnt);
features &= ~NETIF_F_GRO_HW;
}
}
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) { if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
features &= ~NETIF_F_RXHASH; features &= ~NETIF_F_RXHASH;
if (netdev->features & NETIF_F_RXHASH) if (netdev->features & NETIF_F_RXHASH)
...@@ -4850,10 +4873,6 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) ...@@ -4850,10 +4873,6 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX; netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
if (!!MLX5_CAP_GEN(mdev, shampo) &&
mlx5e_check_fragmented_striding_rq_cap(mdev))
netdev->hw_features |= NETIF_F_GRO_HW;
if (mlx5e_tunnel_any_tx_proto_supported(mdev)) { if (mlx5e_tunnel_any_tx_proto_supported(mdev)) {
netdev->hw_enc_features |= NETIF_F_HW_CSUM; netdev->hw_enc_features |= NETIF_F_HW_CSUM;
netdev->hw_enc_features |= NETIF_F_TSO; netdev->hw_enc_features |= NETIF_F_TSO;
......
...@@ -2663,28 +2663,6 @@ static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns) ...@@ -2663,28 +2663,6 @@ static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
clean_tree(&root_ns->ns.node); clean_tree(&root_ns->ns.node);
} }
void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
{
struct mlx5_flow_steering *steering = dev->priv.steering;
cleanup_root_ns(steering->root_ns);
cleanup_root_ns(steering->fdb_root_ns);
steering->fdb_root_ns = NULL;
kfree(steering->fdb_sub_ns);
steering->fdb_sub_ns = NULL;
cleanup_root_ns(steering->port_sel_root_ns);
cleanup_root_ns(steering->sniffer_rx_root_ns);
cleanup_root_ns(steering->sniffer_tx_root_ns);
cleanup_root_ns(steering->rdma_rx_root_ns);
cleanup_root_ns(steering->rdma_tx_root_ns);
cleanup_root_ns(steering->egress_root_ns);
mlx5_cleanup_fc_stats(dev);
kmem_cache_destroy(steering->ftes_cache);
kmem_cache_destroy(steering->fgs_cache);
mlx5_ft_pool_destroy(dev);
kfree(steering);
}
static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering) static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering)
{ {
struct fs_prio *prio; struct fs_prio *prio;
...@@ -3086,42 +3064,27 @@ static int init_egress_root_ns(struct mlx5_flow_steering *steering) ...@@ -3086,42 +3064,27 @@ static int init_egress_root_ns(struct mlx5_flow_steering *steering)
return err; return err;
} }
int mlx5_init_fs(struct mlx5_core_dev *dev) void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev)
{ {
struct mlx5_flow_steering *steering; struct mlx5_flow_steering *steering = dev->priv.steering;
int err = 0;
err = mlx5_init_fc_stats(dev);
if (err)
return err;
err = mlx5_ft_pool_init(dev);
if (err)
return err;
steering = kzalloc(sizeof(*steering), GFP_KERNEL);
if (!steering) {
err = -ENOMEM;
goto err;
}
steering->dev = dev;
dev->priv.steering = steering;
if (mlx5_fs_dr_is_supported(dev)) cleanup_root_ns(steering->root_ns);
steering->mode = MLX5_FLOW_STEERING_MODE_SMFS; cleanup_root_ns(steering->fdb_root_ns);
else steering->fdb_root_ns = NULL;
steering->mode = MLX5_FLOW_STEERING_MODE_DMFS; kfree(steering->fdb_sub_ns);
steering->fdb_sub_ns = NULL;
cleanup_root_ns(steering->port_sel_root_ns);
cleanup_root_ns(steering->sniffer_rx_root_ns);
cleanup_root_ns(steering->sniffer_tx_root_ns);
cleanup_root_ns(steering->rdma_rx_root_ns);
cleanup_root_ns(steering->rdma_tx_root_ns);
cleanup_root_ns(steering->egress_root_ns);
}
steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs", int mlx5_fs_core_init(struct mlx5_core_dev *dev)
sizeof(struct mlx5_flow_group), 0, {
0, NULL); struct mlx5_flow_steering *steering = dev->priv.steering;
steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0, int err = 0;
0, NULL);
if (!steering->ftes_cache || !steering->fgs_cache) {
err = -ENOMEM;
goto err;
}
if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
(MLX5_CAP_GEN(dev, nic_flow_table))) || (MLX5_CAP_GEN(dev, nic_flow_table))) ||
...@@ -3180,8 +3143,64 @@ int mlx5_init_fs(struct mlx5_core_dev *dev) ...@@ -3180,8 +3143,64 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
} }
return 0; return 0;
err:
mlx5_fs_core_cleanup(dev);
return err;
}
void mlx5_fs_core_free(struct mlx5_core_dev *dev)
{
struct mlx5_flow_steering *steering = dev->priv.steering;
kmem_cache_destroy(steering->ftes_cache);
kmem_cache_destroy(steering->fgs_cache);
kfree(steering);
mlx5_ft_pool_destroy(dev);
mlx5_cleanup_fc_stats(dev);
}
int mlx5_fs_core_alloc(struct mlx5_core_dev *dev)
{
struct mlx5_flow_steering *steering;
int err = 0;
err = mlx5_init_fc_stats(dev);
if (err)
return err;
err = mlx5_ft_pool_init(dev);
if (err)
goto err;
steering = kzalloc(sizeof(*steering), GFP_KERNEL);
if (!steering) {
err = -ENOMEM;
goto err;
}
steering->dev = dev;
dev->priv.steering = steering;
if (mlx5_fs_dr_is_supported(dev))
steering->mode = MLX5_FLOW_STEERING_MODE_SMFS;
else
steering->mode = MLX5_FLOW_STEERING_MODE_DMFS;
steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
sizeof(struct mlx5_flow_group), 0,
0, NULL);
steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
0, NULL);
if (!steering->ftes_cache || !steering->fgs_cache) {
err = -ENOMEM;
goto err;
}
return 0;
err: err:
mlx5_cleanup_fs(dev); mlx5_fs_core_free(dev);
return err; return err;
} }
......
...@@ -298,8 +298,10 @@ int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns, ...@@ -298,8 +298,10 @@ int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns, int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
enum mlx5_flow_steering_mode mode); enum mlx5_flow_steering_mode mode);
int mlx5_init_fs(struct mlx5_core_dev *dev); int mlx5_fs_core_alloc(struct mlx5_core_dev *dev);
void mlx5_cleanup_fs(struct mlx5_core_dev *dev); void mlx5_fs_core_free(struct mlx5_core_dev *dev);
int mlx5_fs_core_init(struct mlx5_core_dev *dev);
void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev);
int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports); int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports);
void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev); void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev);
......
...@@ -8,7 +8,8 @@ ...@@ -8,7 +8,8 @@
enum { enum {
MLX5_FW_RESET_FLAGS_RESET_REQUESTED, MLX5_FW_RESET_FLAGS_RESET_REQUESTED,
MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST,
MLX5_FW_RESET_FLAGS_PENDING_COMP MLX5_FW_RESET_FLAGS_PENDING_COMP,
MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS
}; };
struct mlx5_fw_reset { struct mlx5_fw_reset {
...@@ -208,7 +209,10 @@ static void poll_sync_reset(struct timer_list *t) ...@@ -208,7 +209,10 @@ static void poll_sync_reset(struct timer_list *t)
if (fatal_error) { if (fatal_error) {
mlx5_core_warn(dev, "Got Device Reset\n"); mlx5_core_warn(dev, "Got Device Reset\n");
queue_work(fw_reset->wq, &fw_reset->reset_reload_work); if (!test_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags))
queue_work(fw_reset->wq, &fw_reset->reset_reload_work);
else
mlx5_core_err(dev, "Device is being removed, Drop new reset work\n");
return; return;
} }
...@@ -433,9 +437,12 @@ static int fw_reset_event_notifier(struct notifier_block *nb, unsigned long acti ...@@ -433,9 +437,12 @@ static int fw_reset_event_notifier(struct notifier_block *nb, unsigned long acti
struct mlx5_fw_reset *fw_reset = mlx5_nb_cof(nb, struct mlx5_fw_reset, nb); struct mlx5_fw_reset *fw_reset = mlx5_nb_cof(nb, struct mlx5_fw_reset, nb);
struct mlx5_eqe *eqe = data; struct mlx5_eqe *eqe = data;
if (test_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags))
return NOTIFY_DONE;
switch (eqe->sub_type) { switch (eqe->sub_type) {
case MLX5_GENERAL_SUBTYPE_FW_LIVE_PATCH_EVENT: case MLX5_GENERAL_SUBTYPE_FW_LIVE_PATCH_EVENT:
queue_work(fw_reset->wq, &fw_reset->fw_live_patch_work); queue_work(fw_reset->wq, &fw_reset->fw_live_patch_work);
break; break;
case MLX5_GENERAL_SUBTYPE_PCI_SYNC_FOR_FW_UPDATE_EVENT: case MLX5_GENERAL_SUBTYPE_PCI_SYNC_FOR_FW_UPDATE_EVENT:
mlx5_sync_reset_events_handle(fw_reset, eqe); mlx5_sync_reset_events_handle(fw_reset, eqe);
...@@ -479,6 +486,18 @@ void mlx5_fw_reset_events_stop(struct mlx5_core_dev *dev) ...@@ -479,6 +486,18 @@ void mlx5_fw_reset_events_stop(struct mlx5_core_dev *dev)
mlx5_eq_notifier_unregister(dev, &dev->priv.fw_reset->nb); mlx5_eq_notifier_unregister(dev, &dev->priv.fw_reset->nb);
} }
void mlx5_drain_fw_reset(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
set_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags);
cancel_work_sync(&fw_reset->fw_live_patch_work);
cancel_work_sync(&fw_reset->reset_request_work);
cancel_work_sync(&fw_reset->reset_reload_work);
cancel_work_sync(&fw_reset->reset_now_work);
cancel_work_sync(&fw_reset->reset_abort_work);
}
int mlx5_fw_reset_init(struct mlx5_core_dev *dev) int mlx5_fw_reset_init(struct mlx5_core_dev *dev)
{ {
struct mlx5_fw_reset *fw_reset = kzalloc(sizeof(*fw_reset), GFP_KERNEL); struct mlx5_fw_reset *fw_reset = kzalloc(sizeof(*fw_reset), GFP_KERNEL);
......
...@@ -16,6 +16,7 @@ int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev); ...@@ -16,6 +16,7 @@ int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev);
int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev); int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev);
void mlx5_fw_reset_events_start(struct mlx5_core_dev *dev); void mlx5_fw_reset_events_start(struct mlx5_core_dev *dev);
void mlx5_fw_reset_events_stop(struct mlx5_core_dev *dev); void mlx5_fw_reset_events_stop(struct mlx5_core_dev *dev);
void mlx5_drain_fw_reset(struct mlx5_core_dev *dev);
int mlx5_fw_reset_init(struct mlx5_core_dev *dev); int mlx5_fw_reset_init(struct mlx5_core_dev *dev);
void mlx5_fw_reset_cleanup(struct mlx5_core_dev *dev); void mlx5_fw_reset_cleanup(struct mlx5_core_dev *dev);
......
...@@ -938,6 +938,12 @@ static int mlx5_init_once(struct mlx5_core_dev *dev) ...@@ -938,6 +938,12 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
goto err_sf_table_cleanup; goto err_sf_table_cleanup;
} }
err = mlx5_fs_core_alloc(dev);
if (err) {
mlx5_core_err(dev, "Failed to alloc flow steering\n");
goto err_fs;
}
dev->dm = mlx5_dm_create(dev); dev->dm = mlx5_dm_create(dev);
if (IS_ERR(dev->dm)) if (IS_ERR(dev->dm))
mlx5_core_warn(dev, "Failed to init device memory%d\n", err); mlx5_core_warn(dev, "Failed to init device memory%d\n", err);
...@@ -948,6 +954,8 @@ static int mlx5_init_once(struct mlx5_core_dev *dev) ...@@ -948,6 +954,8 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
return 0; return 0;
err_fs:
mlx5_sf_table_cleanup(dev);
err_sf_table_cleanup: err_sf_table_cleanup:
mlx5_sf_hw_table_cleanup(dev); mlx5_sf_hw_table_cleanup(dev);
err_sf_hw_table_cleanup: err_sf_hw_table_cleanup:
...@@ -985,6 +993,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev) ...@@ -985,6 +993,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_hv_vhca_destroy(dev->hv_vhca); mlx5_hv_vhca_destroy(dev->hv_vhca);
mlx5_fw_tracer_destroy(dev->tracer); mlx5_fw_tracer_destroy(dev->tracer);
mlx5_dm_cleanup(dev); mlx5_dm_cleanup(dev);
mlx5_fs_core_free(dev);
mlx5_sf_table_cleanup(dev); mlx5_sf_table_cleanup(dev);
mlx5_sf_hw_table_cleanup(dev); mlx5_sf_hw_table_cleanup(dev);
mlx5_vhca_event_cleanup(dev); mlx5_vhca_event_cleanup(dev);
...@@ -1191,7 +1200,7 @@ static int mlx5_load(struct mlx5_core_dev *dev) ...@@ -1191,7 +1200,7 @@ static int mlx5_load(struct mlx5_core_dev *dev)
goto err_tls_start; goto err_tls_start;
} }
err = mlx5_init_fs(dev); err = mlx5_fs_core_init(dev);
if (err) { if (err) {
mlx5_core_err(dev, "Failed to init flow steering\n"); mlx5_core_err(dev, "Failed to init flow steering\n");
goto err_fs; goto err_fs;
...@@ -1236,7 +1245,7 @@ static int mlx5_load(struct mlx5_core_dev *dev) ...@@ -1236,7 +1245,7 @@ static int mlx5_load(struct mlx5_core_dev *dev)
err_vhca: err_vhca:
mlx5_vhca_event_stop(dev); mlx5_vhca_event_stop(dev);
err_set_hca: err_set_hca:
mlx5_cleanup_fs(dev); mlx5_fs_core_cleanup(dev);
err_fs: err_fs:
mlx5_accel_tls_cleanup(dev); mlx5_accel_tls_cleanup(dev);
err_tls_start: err_tls_start:
...@@ -1265,7 +1274,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev) ...@@ -1265,7 +1274,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
mlx5_ec_cleanup(dev); mlx5_ec_cleanup(dev);
mlx5_sf_hw_table_destroy(dev); mlx5_sf_hw_table_destroy(dev);
mlx5_vhca_event_stop(dev); mlx5_vhca_event_stop(dev);
mlx5_cleanup_fs(dev); mlx5_fs_core_cleanup(dev);
mlx5_accel_ipsec_cleanup(dev); mlx5_accel_ipsec_cleanup(dev);
mlx5_accel_tls_cleanup(dev); mlx5_accel_tls_cleanup(dev);
mlx5_fpga_device_stop(dev); mlx5_fpga_device_stop(dev);
...@@ -1618,6 +1627,10 @@ static void remove_one(struct pci_dev *pdev) ...@@ -1618,6 +1627,10 @@ static void remove_one(struct pci_dev *pdev)
struct mlx5_core_dev *dev = pci_get_drvdata(pdev); struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
struct devlink *devlink = priv_to_devlink(dev); struct devlink *devlink = priv_to_devlink(dev);
/* mlx5_drain_fw_reset() is using devlink APIs. Hence, we must drain
* fw_reset before unregistering the devlink.
*/
mlx5_drain_fw_reset(dev);
devlink_unregister(devlink); devlink_unregister(devlink);
mlx5_sriov_disable(pdev); mlx5_sriov_disable(pdev);
mlx5_crdump_disable(dev); mlx5_crdump_disable(dev);
......
...@@ -530,6 +530,37 @@ static int dr_action_handle_cs_recalc(struct mlx5dr_domain *dmn, ...@@ -530,6 +530,37 @@ static int dr_action_handle_cs_recalc(struct mlx5dr_domain *dmn,
return 0; return 0;
} }
static void dr_action_modify_ttl_adjust(struct mlx5dr_domain *dmn,
struct mlx5dr_ste_actions_attr *attr,
bool rx_rule,
bool *recalc_cs_required)
{
*recalc_cs_required = false;
/* if device supports csum recalculation - no adjustment needed */
if (mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps))
return;
/* no adjustment needed on TX rules */
if (!rx_rule)
return;
if (!MLX5_CAP_ESW_FLOWTABLE(dmn->mdev, fdb_ipv4_ttl_modify)) {
/* Ignore the modify TTL action.
* It is always kept as last HW action.
*/
attr->modify_actions--;
return;
}
if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB)
/* Due to a HW bug on some devices, modifying TTL on RX flows
* will cause an incorrect checksum calculation. In such cases
* we will use a FW table to recalculate the checksum.
*/
*recalc_cs_required = true;
}
static void dr_action_print_sequence(struct mlx5dr_domain *dmn, static void dr_action_print_sequence(struct mlx5dr_domain *dmn,
struct mlx5dr_action *actions[], struct mlx5dr_action *actions[],
int last_idx) int last_idx)
...@@ -650,8 +681,9 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, ...@@ -650,8 +681,9 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
case DR_ACTION_TYP_MODIFY_HDR: case DR_ACTION_TYP_MODIFY_HDR:
attr.modify_index = action->rewrite->index; attr.modify_index = action->rewrite->index;
attr.modify_actions = action->rewrite->num_of_actions; attr.modify_actions = action->rewrite->num_of_actions;
recalc_cs_required = action->rewrite->modify_ttl && if (action->rewrite->modify_ttl)
!mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps); dr_action_modify_ttl_adjust(dmn, &attr, rx_rule,
&recalc_cs_required);
break; break;
case DR_ACTION_TYP_L2_TO_TNL_L2: case DR_ACTION_TYP_L2_TO_TNL_L2:
case DR_ACTION_TYP_L2_TO_TNL_L3: case DR_ACTION_TYP_L2_TO_TNL_L3:
...@@ -732,12 +764,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, ...@@ -732,12 +764,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
*new_hw_ste_arr_sz = nic_matcher->num_of_builders; *new_hw_ste_arr_sz = nic_matcher->num_of_builders;
last_ste = ste_arr + DR_STE_SIZE * (nic_matcher->num_of_builders - 1); last_ste = ste_arr + DR_STE_SIZE * (nic_matcher->num_of_builders - 1);
/* Due to a HW bug in some devices, modifying TTL on RX flows will if (recalc_cs_required && dest_action) {
* cause an incorrect checksum calculation. In this case we will
* use a FW table to recalculate.
*/
if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB &&
rx_rule && recalc_cs_required && dest_action) {
ret = dr_action_handle_cs_recalc(dmn, dest_action, &attr.final_icm_addr); ret = dr_action_handle_cs_recalc(dmn, dest_action, &attr.final_icm_addr);
if (ret) { if (ret) {
mlx5dr_err(dmn, mlx5dr_err(dmn,
...@@ -842,7 +869,8 @@ struct mlx5dr_action * ...@@ -842,7 +869,8 @@ struct mlx5dr_action *
mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn, mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
struct mlx5dr_action_dest *dests, struct mlx5dr_action_dest *dests,
u32 num_of_dests, u32 num_of_dests,
bool ignore_flow_level) bool ignore_flow_level,
u32 flow_source)
{ {
struct mlx5dr_cmd_flow_destination_hw_info *hw_dests; struct mlx5dr_cmd_flow_destination_hw_info *hw_dests;
struct mlx5dr_action **ref_actions; struct mlx5dr_action **ref_actions;
...@@ -914,7 +942,8 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn, ...@@ -914,7 +942,8 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
reformat_req, reformat_req,
&action->dest_tbl->fw_tbl.id, &action->dest_tbl->fw_tbl.id,
&action->dest_tbl->fw_tbl.group_id, &action->dest_tbl->fw_tbl.group_id,
ignore_flow_level); ignore_flow_level,
flow_source);
if (ret) if (ret)
goto free_action; goto free_action;
...@@ -1556,12 +1585,6 @@ dr_action_modify_check_is_ttl_modify(const void *sw_action) ...@@ -1556,12 +1585,6 @@ dr_action_modify_check_is_ttl_modify(const void *sw_action)
return sw_field == MLX5_ACTION_IN_FIELD_OUT_IP_TTL; return sw_field == MLX5_ACTION_IN_FIELD_OUT_IP_TTL;
} }
static bool dr_action_modify_ttl_ignore(struct mlx5dr_domain *dmn)
{
return !mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps) &&
!MLX5_CAP_ESW_FLOWTABLE(dmn->mdev, fdb_ipv4_ttl_modify);
}
static int dr_actions_convert_modify_header(struct mlx5dr_action *action, static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
u32 max_hw_actions, u32 max_hw_actions,
u32 num_sw_actions, u32 num_sw_actions,
...@@ -1573,6 +1596,7 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action, ...@@ -1573,6 +1596,7 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
const struct mlx5dr_ste_action_modify_field *hw_dst_action_info; const struct mlx5dr_ste_action_modify_field *hw_dst_action_info;
const struct mlx5dr_ste_action_modify_field *hw_src_action_info; const struct mlx5dr_ste_action_modify_field *hw_src_action_info;
struct mlx5dr_domain *dmn = action->rewrite->dmn; struct mlx5dr_domain *dmn = action->rewrite->dmn;
__be64 *modify_ttl_sw_action = NULL;
int ret, i, hw_idx = 0; int ret, i, hw_idx = 0;
__be64 *sw_action; __be64 *sw_action;
__be64 hw_action; __be64 hw_action;
...@@ -1585,8 +1609,14 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action, ...@@ -1585,8 +1609,14 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
action->rewrite->allow_rx = 1; action->rewrite->allow_rx = 1;
action->rewrite->allow_tx = 1; action->rewrite->allow_tx = 1;
for (i = 0; i < num_sw_actions; i++) { for (i = 0; i < num_sw_actions || modify_ttl_sw_action; i++) {
sw_action = &sw_actions[i]; /* modify TTL is handled separately, as a last action */
if (i == num_sw_actions) {
sw_action = modify_ttl_sw_action;
modify_ttl_sw_action = NULL;
} else {
sw_action = &sw_actions[i];
}
ret = dr_action_modify_check_field_limitation(action, ret = dr_action_modify_check_field_limitation(action,
sw_action); sw_action);
...@@ -1595,10 +1625,9 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action, ...@@ -1595,10 +1625,9 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
if (!(*modify_ttl) && if (!(*modify_ttl) &&
dr_action_modify_check_is_ttl_modify(sw_action)) { dr_action_modify_check_is_ttl_modify(sw_action)) {
if (dr_action_modify_ttl_ignore(dmn)) modify_ttl_sw_action = sw_action;
continue;
*modify_ttl = true; *modify_ttl = true;
continue;
} }
/* Convert SW action to HW action */ /* Convert SW action to HW action */
......
...@@ -104,7 +104,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn, ...@@ -104,7 +104,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
bool reformat_req, bool reformat_req,
u32 *tbl_id, u32 *tbl_id,
u32 *group_id, u32 *group_id,
bool ignore_flow_level) bool ignore_flow_level,
u32 flow_source)
{ {
struct mlx5dr_cmd_create_flow_table_attr ft_attr = {}; struct mlx5dr_cmd_create_flow_table_attr ft_attr = {};
struct mlx5dr_cmd_fte_info fte_info = {}; struct mlx5dr_cmd_fte_info fte_info = {};
...@@ -139,6 +140,7 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn, ...@@ -139,6 +140,7 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
fte_info.val = val; fte_info.val = val;
fte_info.dest_arr = dest; fte_info.dest_arr = dest;
fte_info.ignore_flow_level = ignore_flow_level; fte_info.ignore_flow_level = ignore_flow_level;
fte_info.flow_context.flow_source = flow_source;
ret = mlx5dr_cmd_set_fte(dmn->mdev, 0, 0, &ft_info, *group_id, &fte_info); ret = mlx5dr_cmd_set_fte(dmn->mdev, 0, 0, &ft_info, *group_id, &fte_info);
if (ret) { if (ret) {
......
...@@ -420,7 +420,7 @@ dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn, ...@@ -420,7 +420,7 @@ dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
* encapsulation. The reason for that is that we support * encapsulation. The reason for that is that we support
* modify headers for outer headers only * modify headers for outer headers only
*/ */
if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) { if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] && attr->modify_actions) {
dr_ste_v0_set_entry_type(last_ste, DR_STE_TYPE_MODIFY_PKT); dr_ste_v0_set_entry_type(last_ste, DR_STE_TYPE_MODIFY_PKT);
dr_ste_v0_set_rewrite_actions(last_ste, dr_ste_v0_set_rewrite_actions(last_ste,
attr->modify_actions, attr->modify_actions,
...@@ -513,7 +513,7 @@ dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn, ...@@ -513,7 +513,7 @@ dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn,
} }
} }
if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) { if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] && attr->modify_actions) {
if (dr_ste_v0_get_entry_type(last_ste) == DR_STE_TYPE_MODIFY_PKT) if (dr_ste_v0_get_entry_type(last_ste) == DR_STE_TYPE_MODIFY_PKT)
dr_ste_v0_arr_init_next(&last_ste, dr_ste_v0_arr_init_next(&last_ste,
added_stes, added_stes,
......
...@@ -1461,7 +1461,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn, ...@@ -1461,7 +1461,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
bool reformat_req, bool reformat_req,
u32 *tbl_id, u32 *tbl_id,
u32 *group_id, u32 *group_id,
bool ignore_flow_level); bool ignore_flow_level,
u32 flow_source);
void mlx5dr_fw_destroy_md_tbl(struct mlx5dr_domain *dmn, u32 tbl_id, void mlx5dr_fw_destroy_md_tbl(struct mlx5dr_domain *dmn, u32 tbl_id,
u32 group_id); u32 group_id);
#endif /* _DR_TYPES_H_ */ #endif /* _DR_TYPES_H_ */
...@@ -520,6 +520,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, ...@@ -520,6 +520,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
} else if (num_term_actions > 1) { } else if (num_term_actions > 1) {
bool ignore_flow_level = bool ignore_flow_level =
!!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL); !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
u32 flow_source = fte->flow_context.flow_source;
if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX || if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
...@@ -529,7 +530,8 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, ...@@ -529,7 +530,8 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
tmp_action = mlx5dr_action_create_mult_dest_tbl(domain, tmp_action = mlx5dr_action_create_mult_dest_tbl(domain,
term_actions, term_actions,
num_term_actions, num_term_actions,
ignore_flow_level); ignore_flow_level,
flow_source);
if (!tmp_action) { if (!tmp_action) {
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
goto free_actions; goto free_actions;
......
...@@ -99,7 +99,8 @@ struct mlx5dr_action * ...@@ -99,7 +99,8 @@ struct mlx5dr_action *
mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn, mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
struct mlx5dr_action_dest *dests, struct mlx5dr_action_dest *dests,
u32 num_of_dests, u32 num_of_dests,
bool ignore_flow_level); bool ignore_flow_level,
u32 flow_source);
struct mlx5dr_action *mlx5dr_action_create_drop(void); struct mlx5dr_action *mlx5dr_action_create_drop(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment