Commit 0b4f5add authored by David S. Miller's avatar David S. Miller

Merge branch 'mlx5-fixes'

Tariq Toukan says:

====================
mlx5 fixes 24-05-22

This patchset provides bug fixes to mlx5 core and Eth drivers.

Series generated against:
commit 9c91c7fa ("net: mana: Fix the extra HZ in mana_hwc_send_request")
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 128d54fb 83fea49f
...@@ -102,8 +102,14 @@ static inline void ...@@ -102,8 +102,14 @@ static inline void
mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb) mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb)
{ {
int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr); int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr);
struct udphdr *udphdr;
udp_hdr(skb)->len = htons(payload_len); if (skb->encapsulation)
udphdr = (struct udphdr *)skb_inner_transport_header(skb);
else
udphdr = udp_hdr(skb);
udphdr->len = htons(payload_len);
} }
struct mlx5e_accel_tx_state { struct mlx5e_accel_tx_state {
......
...@@ -750,8 +750,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec, ...@@ -750,8 +750,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
err_fs_ft: err_fs_ft:
if (rx->allow_tunnel_mode) if (rx->allow_tunnel_mode)
mlx5_eswitch_unblock_encap(mdev); mlx5_eswitch_unblock_encap(mdev);
mlx5_del_flow_rules(rx->status.rule); mlx5_ipsec_rx_status_destroy(ipsec, rx);
mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
err_add: err_add:
mlx5_destroy_flow_table(rx->ft.status); mlx5_destroy_flow_table(rx->ft.status);
err_fs_ft_status: err_fs_ft_status:
......
...@@ -97,18 +97,11 @@ mlx5e_ipsec_feature_check(struct sk_buff *skb, netdev_features_t features) ...@@ -97,18 +97,11 @@ mlx5e_ipsec_feature_check(struct sk_buff *skb, netdev_features_t features)
if (!x || !x->xso.offload_handle) if (!x || !x->xso.offload_handle)
goto out_disable; goto out_disable;
if (xo->inner_ipproto) {
/* Cannot support tunnel packet over IPsec tunnel mode
* because we cannot offload three IP header csum
*/
if (x->props.mode == XFRM_MODE_TUNNEL)
goto out_disable;
/* Only support UDP or TCP L4 checksum */ /* Only support UDP or TCP L4 checksum */
if (xo->inner_ipproto != IPPROTO_UDP && if (xo->inner_ipproto &&
xo->inner_ipproto != IPPROTO_UDP &&
xo->inner_ipproto != IPPROTO_TCP) xo->inner_ipproto != IPPROTO_TCP)
goto out_disable; goto out_disable;
}
return features; return features;
......
...@@ -3886,7 +3886,7 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) ...@@ -3886,7 +3886,7 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
mlx5e_fold_sw_stats64(priv, stats); mlx5e_fold_sw_stats64(priv, stats);
} }
stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer; stats->rx_missed_errors = priv->stats.qcnt.rx_out_of_buffer;
stats->rx_length_errors = stats->rx_length_errors =
PPORT_802_3_GET(pstats, a_in_range_length_errors) + PPORT_802_3_GET(pstats, a_in_range_length_errors) +
......
...@@ -1186,6 +1186,9 @@ void mlx5e_stats_ts_get(struct mlx5e_priv *priv, ...@@ -1186,6 +1186,9 @@ void mlx5e_stats_ts_get(struct mlx5e_priv *priv,
ts_stats->err = 0; ts_stats->err = 0;
ts_stats->lost = 0; ts_stats->lost = 0;
if (!ptp)
goto out;
/* Aggregate stats across all TCs */ /* Aggregate stats across all TCs */
for (i = 0; i < ptp->num_tc; i++) { for (i = 0; i < ptp->num_tc; i++) {
struct mlx5e_ptp_cq_stats *stats = struct mlx5e_ptp_cq_stats *stats =
...@@ -1214,6 +1217,7 @@ void mlx5e_stats_ts_get(struct mlx5e_priv *priv, ...@@ -1214,6 +1217,7 @@ void mlx5e_stats_ts_get(struct mlx5e_priv *priv,
} }
} }
out:
mutex_unlock(&priv->state_lock); mutex_unlock(&priv->state_lock);
} }
......
...@@ -153,6 +153,10 @@ mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb, int *hopbyhop) ...@@ -153,6 +153,10 @@ mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb, int *hopbyhop)
*hopbyhop = 0; *hopbyhop = 0;
if (skb->encapsulation) { if (skb->encapsulation) {
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
ihs = skb_inner_transport_offset(skb) +
sizeof(struct udphdr);
else
ihs = skb_inner_tcp_all_headers(skb); ihs = skb_inner_tcp_all_headers(skb);
stats->tso_inner_packets++; stats->tso_inner_packets++;
stats->tso_inner_bytes += skb->len - ihs; stats->tso_inner_bytes += skb->len - ihs;
......
...@@ -719,6 +719,7 @@ bool mlx5_lag_check_prereq(struct mlx5_lag *ldev) ...@@ -719,6 +719,7 @@ bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
struct mlx5_core_dev *dev; struct mlx5_core_dev *dev;
u8 mode; u8 mode;
#endif #endif
bool roce_support;
int i; int i;
for (i = 0; i < ldev->ports; i++) for (i = 0; i < ldev->ports; i++)
...@@ -743,6 +744,11 @@ bool mlx5_lag_check_prereq(struct mlx5_lag *ldev) ...@@ -743,6 +744,11 @@ bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
if (mlx5_sriov_is_enabled(ldev->pf[i].dev)) if (mlx5_sriov_is_enabled(ldev->pf[i].dev))
return false; return false;
#endif #endif
roce_support = mlx5_get_roce_state(ldev->pf[MLX5_LAG_P1].dev);
for (i = 1; i < ldev->ports; i++)
if (mlx5_get_roce_state(ldev->pf[i].dev) != roce_support)
return false;
return true; return true;
} }
...@@ -910,8 +916,10 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) ...@@ -910,8 +916,10 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
} else if (roce_lag) { } else if (roce_lag) {
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0); mlx5_rescan_drivers_locked(dev0);
for (i = 1; i < ldev->ports; i++) for (i = 1; i < ldev->ports; i++) {
if (mlx5_get_roce_state(ldev->pf[i].dev))
mlx5_nic_vport_enable_roce(ldev->pf[i].dev); mlx5_nic_vport_enable_roce(ldev->pf[i].dev);
}
} else if (shared_fdb) { } else if (shared_fdb) {
int i; int i;
......
...@@ -100,10 +100,6 @@ static bool ft_create_alias_supported(struct mlx5_core_dev *dev) ...@@ -100,10 +100,6 @@ static bool ft_create_alias_supported(struct mlx5_core_dev *dev)
static bool mlx5_sd_is_supported(struct mlx5_core_dev *dev, u8 host_buses) static bool mlx5_sd_is_supported(struct mlx5_core_dev *dev, u8 host_buses)
{ {
/* Feature is currently implemented for PFs only */
if (!mlx5_core_is_pf(dev))
return false;
/* Honor the SW implementation limit */ /* Honor the SW implementation limit */
if (host_buses > MLX5_SD_MAX_GROUP_SZ) if (host_buses > MLX5_SD_MAX_GROUP_SZ)
return false; return false;
...@@ -162,6 +158,14 @@ static int sd_init(struct mlx5_core_dev *dev) ...@@ -162,6 +158,14 @@ static int sd_init(struct mlx5_core_dev *dev)
bool sdm; bool sdm;
int err; int err;
/* Feature is currently implemented for PFs only */
if (!mlx5_core_is_pf(dev))
return 0;
/* Block on embedded CPU PFs */
if (mlx5_core_is_ecpf(dev))
return 0;
if (!MLX5_CAP_MCAM_REG(dev, mpir)) if (!MLX5_CAP_MCAM_REG(dev, mpir))
return 0; return 0;
......
...@@ -10308,9 +10308,9 @@ struct mlx5_ifc_mcam_access_reg_bits { ...@@ -10308,9 +10308,9 @@ struct mlx5_ifc_mcam_access_reg_bits {
u8 mfrl[0x1]; u8 mfrl[0x1];
u8 regs_39_to_32[0x8]; u8 regs_39_to_32[0x8];
u8 regs_31_to_10[0x16]; u8 regs_31_to_11[0x15];
u8 mtmp[0x1]; u8 mtmp[0x1];
u8 regs_8_to_0[0x9]; u8 regs_9_to_0[0xa];
}; };
struct mlx5_ifc_mcam_access_reg_bits1 { struct mlx5_ifc_mcam_access_reg_bits1 {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment