Commit 854e9bf5 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'mlx5-fixes-2024-09-25' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 fixes 2024-09-25

* tag 'mlx5-fixes-2024-09-25' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5e: Fix crash caused by calling __xfrm_state_delete() twice
  net/mlx5e: SHAMPO, Fix overflow of hd_per_wq
  net/mlx5: HWS, changed E2BIG error to a negative return code
  net/mlx5: HWS, fixed double-free in error flow of creating SQ
  net/mlx5: Fix wrong reserved field in hca_cap_2 in mlx5_ifc
  net/mlx5e: Fix NULL deref in mlx5e_tir_builder_alloc()
  net/mlx5: Added cond_resched() to crdump collection
  net/mlx5: Fix error path in multi-packet WQE transmit
====================

Link: https://patch.msgid.link/20240925202013.45374-1-saeed@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents e5e3f369 7b124695
...@@ -627,7 +627,7 @@ struct mlx5e_shampo_hd { ...@@ -627,7 +627,7 @@ struct mlx5e_shampo_hd {
struct mlx5e_dma_info *info; struct mlx5e_dma_info *info;
struct mlx5e_frag_page *pages; struct mlx5e_frag_page *pages;
u16 curr_page_index; u16 curr_page_index;
u16 hd_per_wq; u32 hd_per_wq;
u16 hd_per_wqe; u16 hd_per_wqe;
unsigned long *bitmap; unsigned long *bitmap;
u16 pi; u16 pi;
......
...@@ -23,6 +23,9 @@ struct mlx5e_tir_builder *mlx5e_tir_builder_alloc(bool modify) ...@@ -23,6 +23,9 @@ struct mlx5e_tir_builder *mlx5e_tir_builder_alloc(bool modify)
struct mlx5e_tir_builder *builder; struct mlx5e_tir_builder *builder;
builder = kvzalloc(sizeof(*builder), GFP_KERNEL); builder = kvzalloc(sizeof(*builder), GFP_KERNEL);
if (!builder)
return NULL;
builder->modify = modify; builder->modify = modify;
return builder; return builder;
......
...@@ -67,7 +67,6 @@ static void mlx5e_ipsec_handle_sw_limits(struct work_struct *_work) ...@@ -67,7 +67,6 @@ static void mlx5e_ipsec_handle_sw_limits(struct work_struct *_work)
return; return;
spin_lock_bh(&x->lock); spin_lock_bh(&x->lock);
xfrm_state_check_expire(x);
if (x->km.state == XFRM_STATE_EXPIRED) { if (x->km.state == XFRM_STATE_EXPIRED) {
sa_entry->attrs.drop = true; sa_entry->attrs.drop = true;
spin_unlock_bh(&x->lock); spin_unlock_bh(&x->lock);
...@@ -75,6 +74,13 @@ static void mlx5e_ipsec_handle_sw_limits(struct work_struct *_work) ...@@ -75,6 +74,13 @@ static void mlx5e_ipsec_handle_sw_limits(struct work_struct *_work)
mlx5e_accel_ipsec_fs_modify(sa_entry); mlx5e_accel_ipsec_fs_modify(sa_entry);
return; return;
} }
if (x->km.state != XFRM_STATE_VALID) {
spin_unlock_bh(&x->lock);
return;
}
xfrm_state_check_expire(x);
spin_unlock_bh(&x->lock); spin_unlock_bh(&x->lock);
queue_delayed_work(sa_entry->ipsec->wq, &dwork->dwork, queue_delayed_work(sa_entry->ipsec->wq, &dwork->dwork,
......
...@@ -642,7 +642,6 @@ mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb, ...@@ -642,7 +642,6 @@ mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
return; return;
err_unmap: err_unmap:
mlx5e_dma_unmap_wqe_err(sq, 1);
sq->stats->dropped++; sq->stats->dropped++;
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
mlx5e_tx_flush(sq); mlx5e_tx_flush(sq);
......
...@@ -24,6 +24,11 @@ ...@@ -24,6 +24,11 @@
pci_write_config_dword((dev)->pdev, (dev)->vsc_addr + (offset), (val)) pci_write_config_dword((dev)->pdev, (dev)->vsc_addr + (offset), (val))
#define VSC_MAX_RETRIES 2048 #define VSC_MAX_RETRIES 2048
/* Reading VSC registers can take relatively long time.
* Yield the cpu every 128 registers read.
*/
#define VSC_GW_READ_BLOCK_COUNT 128
enum { enum {
VSC_CTRL_OFFSET = 0x4, VSC_CTRL_OFFSET = 0x4,
VSC_COUNTER_OFFSET = 0x8, VSC_COUNTER_OFFSET = 0x8,
...@@ -273,6 +278,7 @@ int mlx5_vsc_gw_read_block_fast(struct mlx5_core_dev *dev, u32 *data, ...@@ -273,6 +278,7 @@ int mlx5_vsc_gw_read_block_fast(struct mlx5_core_dev *dev, u32 *data,
{ {
unsigned int next_read_addr = 0; unsigned int next_read_addr = 0;
unsigned int read_addr = 0; unsigned int read_addr = 0;
unsigned int count = 0;
while (read_addr < length) { while (read_addr < length) {
if (mlx5_vsc_gw_read_fast(dev, read_addr, &next_read_addr, if (mlx5_vsc_gw_read_fast(dev, read_addr, &next_read_addr,
...@@ -280,6 +286,10 @@ int mlx5_vsc_gw_read_block_fast(struct mlx5_core_dev *dev, u32 *data, ...@@ -280,6 +286,10 @@ int mlx5_vsc_gw_read_block_fast(struct mlx5_core_dev *dev, u32 *data,
return read_addr; return read_addr;
read_addr = next_read_addr; read_addr = next_read_addr;
if (++count == VSC_GW_READ_BLOCK_COUNT) {
cond_resched();
count = 0;
}
} }
return length; return length;
} }
......
...@@ -33,7 +33,7 @@ bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx, ...@@ -33,7 +33,7 @@ bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
* and let the usual match creation path handle it, * and let the usual match creation path handle it,
* both for good and bad flows. * both for good and bad flows.
*/ */
if (ret == E2BIG) { if (ret == -E2BIG) {
is_complex = true; is_complex = true;
mlx5hws_dbg(ctx, "Matcher definer layout: need complex matcher\n"); mlx5hws_dbg(ctx, "Matcher definer layout: need complex matcher\n");
} else { } else {
......
...@@ -1845,7 +1845,7 @@ hws_definer_find_best_match_fit(struct mlx5hws_context *ctx, ...@@ -1845,7 +1845,7 @@ hws_definer_find_best_match_fit(struct mlx5hws_context *ctx,
return 0; return 0;
} }
return E2BIG; return -E2BIG;
} }
static void static void
...@@ -1931,7 +1931,7 @@ mlx5hws_definer_calc_layout(struct mlx5hws_context *ctx, ...@@ -1931,7 +1931,7 @@ mlx5hws_definer_calc_layout(struct mlx5hws_context *ctx,
/* Find the match definer layout for header layout match union */ /* Find the match definer layout for header layout match union */
ret = hws_definer_find_best_match_fit(ctx, match_definer, match_hl); ret = hws_definer_find_best_match_fit(ctx, match_definer, match_hl);
if (ret) { if (ret) {
if (ret == E2BIG) if (ret == -E2BIG)
mlx5hws_dbg(ctx, mlx5hws_dbg(ctx,
"Failed to create match definer from header layout - E2BIG\n"); "Failed to create match definer from header layout - E2BIG\n");
else else
......
...@@ -675,7 +675,7 @@ static int hws_matcher_bind_mt(struct mlx5hws_matcher *matcher) ...@@ -675,7 +675,7 @@ static int hws_matcher_bind_mt(struct mlx5hws_matcher *matcher)
if (!(matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION)) { if (!(matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION)) {
ret = mlx5hws_definer_mt_init(ctx, matcher->mt); ret = mlx5hws_definer_mt_init(ctx, matcher->mt);
if (ret) { if (ret) {
if (ret == E2BIG) if (ret == -E2BIG)
mlx5hws_err(ctx, "Failed to set matcher templates with match definers\n"); mlx5hws_err(ctx, "Failed to set matcher templates with match definers\n");
return ret; return ret;
} }
......
...@@ -653,6 +653,12 @@ static int hws_send_ring_create_sq(struct mlx5_core_dev *mdev, u32 pdn, ...@@ -653,6 +653,12 @@ static int hws_send_ring_create_sq(struct mlx5_core_dev *mdev, u32 pdn,
return err; return err;
} }
static void hws_send_ring_destroy_sq(struct mlx5_core_dev *mdev,
struct mlx5hws_send_ring_sq *sq)
{
mlx5_core_destroy_sq(mdev, sq->sqn);
}
static int hws_send_ring_set_sq_rdy(struct mlx5_core_dev *mdev, u32 sqn) static int hws_send_ring_set_sq_rdy(struct mlx5_core_dev *mdev, u32 sqn)
{ {
void *in, *sqc; void *in, *sqc;
...@@ -696,7 +702,7 @@ static int hws_send_ring_create_sq_rdy(struct mlx5_core_dev *mdev, u32 pdn, ...@@ -696,7 +702,7 @@ static int hws_send_ring_create_sq_rdy(struct mlx5_core_dev *mdev, u32 pdn,
err = hws_send_ring_set_sq_rdy(mdev, sq->sqn); err = hws_send_ring_set_sq_rdy(mdev, sq->sqn);
if (err) if (err)
hws_send_ring_close_sq(sq); hws_send_ring_destroy_sq(mdev, sq);
return err; return err;
} }
......
...@@ -2138,7 +2138,7 @@ struct mlx5_ifc_cmd_hca_cap_2_bits { ...@@ -2138,7 +2138,7 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 ts_cqe_metadata_size2wqe_counter[0x5]; u8 ts_cqe_metadata_size2wqe_counter[0x5];
u8 reserved_at_250[0x10]; u8 reserved_at_250[0x10];
u8 reserved_at_260[0x120]; u8 reserved_at_260[0x20];
u8 format_select_dw_gtpu_dw_0[0x8]; u8 format_select_dw_gtpu_dw_0[0x8];
u8 format_select_dw_gtpu_dw_1[0x8]; u8 format_select_dw_gtpu_dw_1[0x8];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment