Commit 01f0f425 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

mlx4: do not fire tasklet unless necessary

All rx and rx netdev interrupts are handled by respectively
by mlx4_en_rx_irq() and mlx4_en_tx_irq() which simply schedule a NAPI.

But mlx4_eq_int() also fires a tasklet to service all items that were
queued via mlx4_add_cq_to_tasklet(), but this handler was not called
unless user cqe was handled.

This is very confusing, as "mpstat -I SCPU ..." show huge number of
tasklet invocations.

This patch saves this overhead, by carefully firing the tasklet directly
from mlx4_add_cq_to_tasklet(), removing four atomic operations per IRQ.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Cc: Tariq Toukan <tariqt@mellanox.com>
Cc: Saeed Mahameed <saeedm@mellanox.com>
Acked-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 99d5ceee
...@@ -81,8 +81,9 @@ void mlx4_cq_tasklet_cb(unsigned long data) ...@@ -81,8 +81,9 @@ void mlx4_cq_tasklet_cb(unsigned long data)
static void mlx4_add_cq_to_tasklet(struct mlx4_cq *cq) static void mlx4_add_cq_to_tasklet(struct mlx4_cq *cq)
{ {
unsigned long flags;
struct mlx4_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv; struct mlx4_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv;
unsigned long flags;
bool kick;
spin_lock_irqsave(&tasklet_ctx->lock, flags); spin_lock_irqsave(&tasklet_ctx->lock, flags);
/* When migrating CQs between EQs will be implemented, please note /* When migrating CQs between EQs will be implemented, please note
...@@ -92,7 +93,10 @@ static void mlx4_add_cq_to_tasklet(struct mlx4_cq *cq) ...@@ -92,7 +93,10 @@ static void mlx4_add_cq_to_tasklet(struct mlx4_cq *cq)
*/ */
if (list_empty_careful(&cq->tasklet_ctx.list)) { if (list_empty_careful(&cq->tasklet_ctx.list)) {
atomic_inc(&cq->refcount); atomic_inc(&cq->refcount);
kick = list_empty(&tasklet_ctx->list);
list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list); list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list);
if (kick)
tasklet_schedule(&tasklet_ctx->task);
} }
spin_unlock_irqrestore(&tasklet_ctx->lock, flags); spin_unlock_irqrestore(&tasklet_ctx->lock, flags);
} }
......
...@@ -494,7 +494,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) ...@@ -494,7 +494,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
{ {
struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_eqe *eqe; struct mlx4_eqe *eqe;
int cqn = -1; int cqn;
int eqes_found = 0; int eqes_found = 0;
int set_ci = 0; int set_ci = 0;
int port; int port;
...@@ -840,13 +840,6 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) ...@@ -840,13 +840,6 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
eq_set_ci(eq, 1); eq_set_ci(eq, 1);
/* cqn is 24bit wide but is initialized such that its higher bits
* are ones too. Thus, if we got any event, cqn's high bits should be off
* and we need to schedule the tasklet.
*/
if (!(cqn & ~0xffffff))
tasklet_schedule(&eq->tasklet_ctx.task);
return eqes_found; return eqes_found;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment