Commit 109e2f5b authored by David S. Miller's avatar David S. Miller

Merge branch 'mlx5-fixes' into main

Tariq Toukan says:

====================
mlx5 fixes 2024-06-27

This patchset provides fixes from the team to the mlx5 core and EN
drivers.

The first 3 patches by Daniel replace a buggy cap field with a newly
introduced one.

Patch 4 by Chris de-couples ingress ACL creation from a specific flow,
so it's invoked by other flows if needed.

Patch 5 by Jianbo fixes a possible missing cleanup of QoS objects.

Patches 6 and 7 by Leon fixes IPsec stats logic to better reflect the
traffic.

Series generated against:
commit 02ea3120 ("octeontx2-pf: Fix coverity and klockwork issues in octeon PF driver")

V2:
Fixed wrong cited SHA in patch 6.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents a6458ab7 e562f2d4
......@@ -989,7 +989,12 @@ static void mlx5e_xfrm_update_stats(struct xfrm_state *x)
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
struct net *net = dev_net(x->xso.dev);
u64 trailer_packets = 0, trailer_bytes = 0;
u64 replay_packets = 0, replay_bytes = 0;
u64 auth_packets = 0, auth_bytes = 0;
u64 success_packets, success_bytes;
u64 packets, bytes, lastuse;
size_t headers;
lockdep_assert(lockdep_is_held(&x->lock) ||
lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_cfg_mutex) ||
......@@ -999,26 +1004,43 @@ static void mlx5e_xfrm_update_stats(struct xfrm_state *x)
return;
if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
mlx5_fc_query_cached(ipsec_rule->auth.fc, &bytes, &packets, &lastuse);
x->stats.integrity_failed += packets;
XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR, packets);
mlx5_fc_query_cached(ipsec_rule->auth.fc, &auth_bytes,
&auth_packets, &lastuse);
x->stats.integrity_failed += auth_packets;
XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR, auth_packets);
mlx5_fc_query_cached(ipsec_rule->trailer.fc, &bytes, &packets, &lastuse);
XFRM_ADD_STATS(net, LINUX_MIB_XFRMINHDRERROR, packets);
mlx5_fc_query_cached(ipsec_rule->trailer.fc, &trailer_bytes,
&trailer_packets, &lastuse);
XFRM_ADD_STATS(net, LINUX_MIB_XFRMINHDRERROR, trailer_packets);
}
if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
return;
mlx5_fc_query_cached(ipsec_rule->fc, &bytes, &packets, &lastuse);
x->curlft.packets += packets;
x->curlft.bytes += bytes;
if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
mlx5_fc_query_cached(ipsec_rule->replay.fc, &bytes, &packets, &lastuse);
x->stats.replay += packets;
XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR, packets);
mlx5_fc_query_cached(ipsec_rule->replay.fc, &replay_bytes,
&replay_packets, &lastuse);
x->stats.replay += replay_packets;
XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR, replay_packets);
}
mlx5_fc_query_cached(ipsec_rule->fc, &bytes, &packets, &lastuse);
success_packets = packets - auth_packets - trailer_packets - replay_packets;
x->curlft.packets += success_packets;
/* NIC counts all bytes passed through flow steering and doesn't have
* an ability to count payload data size which is needed for SA.
*
* To overcome HW limitestion, let's approximate the payload size
* by removing always available headers.
*/
headers = sizeof(struct ethhdr);
if (sa_entry->attrs.family == AF_INET)
headers += sizeof(struct iphdr);
else
headers += sizeof(struct ipv6hdr);
success_bytes = bytes - auth_bytes - trailer_bytes - replay_bytes;
x->curlft.bytes += success_bytes - headers * success_packets;
}
static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev,
......
......@@ -5868,6 +5868,11 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
kfree(priv->htb_qos_sq_stats[i]);
kvfree(priv->htb_qos_sq_stats);
if (priv->mqprio_rl) {
mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
mlx5e_mqprio_rl_free(priv->mqprio_rl);
}
memset(priv, 0, sizeof(*priv));
}
......
......@@ -1197,9 +1197,7 @@ static int get_num_eqs(struct mlx5_core_dev *dev)
if (!mlx5_core_is_eth_enabled(dev) && mlx5_eth_supported(dev))
return 1;
max_dev_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
MLX5_CAP_GEN(dev, max_num_eqs) :
1 << MLX5_CAP_GEN(dev, log_max_eq);
max_dev_eqs = mlx5_max_eq_cap_get(dev);
num_eqs = min_t(int, mlx5_irq_table_get_num_comp(eq_table->irq_table),
max_dev_eqs - MLX5_MAX_ASYNC_EQS);
......
......@@ -6,6 +6,9 @@
#include "helper.h"
#include "ofld.h"
static int
acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
static bool
esw_acl_ingress_prio_tag_enabled(struct mlx5_eswitch *esw,
const struct mlx5_vport *vport)
......@@ -123,18 +126,31 @@ static int esw_acl_ingress_src_port_drop_create(struct mlx5_eswitch *esw,
{
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *flow_rule;
bool created = false;
int err = 0;
if (!vport->ingress.acl) {
err = acl_ingress_ofld_setup(esw, vport);
if (err)
return err;
created = true;
}
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
flow_act.fg = vport->ingress.offloads.drop_grp;
flow_rule = mlx5_add_flow_rules(vport->ingress.acl, NULL, &flow_act, NULL, 0);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
goto out;
goto err_out;
}
vport->ingress.offloads.drop_rule = flow_rule;
out:
return 0;
err_out:
/* Only destroy ingress acl created in this function. */
if (created)
esw_acl_ingress_ofld_cleanup(esw, vport);
return err;
}
......@@ -299,16 +315,12 @@ static void esw_acl_ingress_ofld_groups_destroy(struct mlx5_vport *vport)
}
}
int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
static int
acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
int num_ftes = 0;
int err;
if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
!esw_acl_ingress_prio_tag_enabled(esw, vport))
return 0;
esw_acl_ingress_allow_rule_destroy(vport);
if (mlx5_eswitch_vport_match_metadata_enabled(esw))
......@@ -347,6 +359,15 @@ int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
return err;
}
int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
!esw_acl_ingress_prio_tag_enabled(esw, vport))
return 0;
return acl_ingress_ofld_setup(esw, vport);
}
void esw_acl_ingress_ofld_cleanup(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
......
......@@ -4600,20 +4600,26 @@ mlx5_devlink_port_fn_max_io_eqs_get(struct devlink_port *port, u32 *max_io_eqs,
return -EOPNOTSUPP;
}
if (!MLX5_CAP_GEN_2(esw->dev, max_num_eqs_24b)) {
NL_SET_ERR_MSG_MOD(extack,
"Device doesn't support getting the max number of EQs");
return -EOPNOTSUPP;
}
query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
if (!query_ctx)
return -ENOMEM;
mutex_lock(&esw->state_lock);
err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx,
MLX5_CAP_GENERAL);
MLX5_CAP_GENERAL_2);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps");
goto out;
}
hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
max_eqs = MLX5_GET(cmd_hca_cap, hca_caps, max_num_eqs);
max_eqs = MLX5_GET(cmd_hca_cap_2, hca_caps, max_num_eqs_24b);
if (max_eqs < MLX5_ESW_MAX_CTRL_EQS)
*max_io_eqs = 0;
else
......@@ -4644,6 +4650,12 @@ mlx5_devlink_port_fn_max_io_eqs_set(struct devlink_port *port, u32 max_io_eqs,
return -EOPNOTSUPP;
}
if (!MLX5_CAP_GEN_2(esw->dev, max_num_eqs_24b)) {
NL_SET_ERR_MSG_MOD(extack,
"Device doesn't support changing the max number of EQs");
return -EOPNOTSUPP;
}
if (check_add_overflow(max_io_eqs, MLX5_ESW_MAX_CTRL_EQS, &max_eqs)) {
NL_SET_ERR_MSG_MOD(extack, "Supplied value out of range");
return -EINVAL;
......@@ -4655,17 +4667,17 @@ mlx5_devlink_port_fn_max_io_eqs_set(struct devlink_port *port, u32 max_io_eqs,
mutex_lock(&esw->state_lock);
err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx,
MLX5_CAP_GENERAL);
MLX5_CAP_GENERAL_2);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps");
goto out;
}
hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
MLX5_SET(cmd_hca_cap, hca_caps, max_num_eqs, max_eqs);
MLX5_SET(cmd_hca_cap_2, hca_caps, max_num_eqs_24b, max_eqs);
err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport_num,
MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE2);
if (err)
NL_SET_ERR_MSG_MOD(extack, "Failed setting HCA caps");
......
......@@ -383,4 +383,14 @@ static inline int mlx5_vport_to_func_id(const struct mlx5_core_dev *dev, u16 vpo
: vport;
}
static inline int mlx5_max_eq_cap_get(const struct mlx5_core_dev *dev)
{
if (MLX5_CAP_GEN_2(dev, max_num_eqs_24b))
return MLX5_CAP_GEN_2(dev, max_num_eqs_24b);
if (MLX5_CAP_GEN(dev, max_num_eqs))
return MLX5_CAP_GEN(dev, max_num_eqs);
return 1 << MLX5_CAP_GEN(dev, log_max_eq);
}
#endif /* __MLX5_CORE_H__ */
......@@ -711,9 +711,7 @@ int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table)
int mlx5_irq_table_create(struct mlx5_core_dev *dev)
{
int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
MLX5_CAP_GEN(dev, max_num_eqs) :
1 << MLX5_CAP_GEN(dev, log_max_eq);
int num_eqs = mlx5_max_eq_cap_get(dev);
int total_vec;
int pcif_vec;
int req_vec;
......
......@@ -2029,7 +2029,11 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 pcc_ifa2[0x1];
u8 reserved_at_3f1[0xf];
u8 reserved_at_400[0x400];
u8 reserved_at_400[0x40];
u8 reserved_at_440[0x8];
u8 max_num_eqs_24b[0x18];
u8 reserved_at_460[0x3a0];
};
enum mlx5_ifc_flow_destination_type {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment