Commit e7e0004a authored by Maxim Mikityanskiy's avatar Maxim Mikityanskiy Committed by Saeed Mahameed

net/mlx5e: Don't trigger IRQ multiple times on XSK wakeup to avoid WQ overruns

XSK wakeup function triggers NAPI by posting a NOP WQE to a special XSK
ICOSQ. When the application floods the driver with wakeup requests by
calling sendto() in a certain pattern that ends up in mlx5e_trigger_irq,
the XSK ICOSQ may overflow.

Multiple NOPs are not required and won't accelerate the process, so
avoid posting a second NOP if there is one already on the way. This way
we also avoid increasing the queue size (which might not help anyway).

Fixes: db05815b ("net/mlx5e: Add XSK zero-copy support")
Signed-off-by: default avatarMaxim Mikityanskiy <maximmi@mellanox.com>
Reviewed-by: default avatarTariq Toukan <tariqt@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent 70840b66
...@@ -367,6 +367,7 @@ enum { ...@@ -367,6 +367,7 @@ enum {
MLX5E_SQ_STATE_AM, MLX5E_SQ_STATE_AM,
MLX5E_SQ_STATE_TLS, MLX5E_SQ_STATE_TLS,
MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE,
MLX5E_SQ_STATE_PENDING_XSK_TX,
}; };
struct mlx5e_sq_wqe_info { struct mlx5e_sq_wqe_info {
...@@ -960,7 +961,7 @@ void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, ...@@ -960,7 +961,7 @@ void mlx5e_page_release_dynamic(struct mlx5e_rq *rq,
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq); bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
void mlx5e_poll_ico_cq(struct mlx5e_cq *cq); int mlx5e_poll_ico_cq(struct mlx5e_cq *cq);
bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq); bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq);
void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix); void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix); void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
......
...@@ -33,6 +33,9 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) ...@@ -33,6 +33,9 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &c->xskicosq.state))) if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &c->xskicosq.state)))
return 0; return 0;
if (test_and_set_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->xskicosq.state))
return 0;
spin_lock(&c->xskicosq_lock); spin_lock(&c->xskicosq_lock);
mlx5e_trigger_irq(&c->xskicosq); mlx5e_trigger_irq(&c->xskicosq);
spin_unlock(&c->xskicosq_lock); spin_unlock(&c->xskicosq_lock);
......
...@@ -589,7 +589,7 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) ...@@ -589,7 +589,7 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
return !!err; return !!err;
} }
void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
{ {
struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq); struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
struct mlx5_cqe64 *cqe; struct mlx5_cqe64 *cqe;
...@@ -597,11 +597,11 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) ...@@ -597,11 +597,11 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
int i; int i;
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
return; return 0;
cqe = mlx5_cqwq_get_cqe(&cq->wq); cqe = mlx5_cqwq_get_cqe(&cq->wq);
if (likely(!cqe)) if (likely(!cqe))
return; return 0;
/* sq->cc must be updated only after mlx5_cqwq_update_db_record(), /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
* otherwise a cq overrun may occur * otherwise a cq overrun may occur
...@@ -650,6 +650,8 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) ...@@ -650,6 +650,8 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
sq->cc = sqcc; sq->cc = sqcc;
mlx5_cqwq_update_db_record(&cq->wq); mlx5_cqwq_update_db_record(&cq->wq);
return i;
} }
bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq) bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
......
...@@ -152,7 +152,11 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) ...@@ -152,7 +152,11 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
mlx5e_post_rx_wqes, mlx5e_post_rx_wqes,
rq); rq);
if (xsk_open) { if (xsk_open) {
mlx5e_poll_ico_cq(&c->xskicosq.cq); if (mlx5e_poll_ico_cq(&c->xskicosq.cq))
/* Don't clear the flag if nothing was polled to prevent
* queueing more WQEs and overflowing XSKICOSQ.
*/
clear_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->xskicosq.state);
busy |= mlx5e_poll_xdpsq_cq(&xsksq->cq); busy |= mlx5e_poll_xdpsq_cq(&xsksq->cq);
busy_xsk |= mlx5e_napi_xsk_post(xsksq, xskrq); busy_xsk |= mlx5e_napi_xsk_post(xsksq, xskrq);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment