Commit 81bfa206 authored by Ariel Levkovich's avatar Ariel Levkovich Committed by Saeed Mahameed

net/mlx5: Use a single IRQ for all async EQs

The patch modifies the IRQ allocation so that all async EQs are
assigned to the same IRQ resulting in more available IRQs for
completion EQs.

The changes are using the support for IRQ sharing and EQ polling budget
that was introduced in previous patches so when the shared interrupt is
triggered, the kernel will serially call the handler of each of the
sharing EQs with a certain budget of EQEs to poll in order to prevent
starvation.
Signed-off-by: default avatarAriel Levkovich <lariel@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent cf49f41d
...@@ -1557,7 +1557,7 @@ mlx5_ib_create_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq) ...@@ -1557,7 +1557,7 @@ mlx5_ib_create_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int; eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int;
param = (struct mlx5_eq_param) { param = (struct mlx5_eq_param) {
.index = MLX5_EQ_PFAULT_IDX, .irq_index = 0,
.mask = 1 << MLX5_EVENT_TYPE_PAGE_FAULT, .mask = 1 << MLX5_EVENT_TYPE_PAGE_FAULT,
.nent = MLX5_IB_NUM_PF_EQE, .nent = MLX5_IB_NUM_PF_EQE,
.nb = &eq->irq_nb, .nb = &eq->irq_nb,
......
...@@ -250,7 +250,7 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, ...@@ -250,7 +250,7 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
struct mlx5_cq_table *cq_table = &eq->cq_table; struct mlx5_cq_table *cq_table = &eq->cq_table;
u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0}; u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
struct mlx5_priv *priv = &dev->priv; struct mlx5_priv *priv = &dev->priv;
u8 vecidx = param->index; u8 vecidx = param->irq_index;
__be64 *pas; __be64 *pas;
void *eqc; void *eqc;
int inlen; int inlen;
...@@ -435,8 +435,9 @@ static int create_async_eq(struct mlx5_core_dev *dev, ...@@ -435,8 +435,9 @@ static int create_async_eq(struct mlx5_core_dev *dev,
int err; int err;
mutex_lock(&eq_table->lock); mutex_lock(&eq_table->lock);
if (param->index >= MLX5_EQ_MAX_ASYNC_EQS) { /* Async EQs must share irq index 0 */
err = -ENOSPC; if (param->irq_index != 0) {
err = -EINVAL;
goto unlock; goto unlock;
} }
...@@ -540,7 +541,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev) ...@@ -540,7 +541,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
table->cmd_eq.irq_nb.notifier_call = mlx5_eq_async_int; table->cmd_eq.irq_nb.notifier_call = mlx5_eq_async_int;
param = (struct mlx5_eq_param) { param = (struct mlx5_eq_param) {
.index = MLX5_EQ_CMD_IDX, .irq_index = 0,
.mask = 1ull << MLX5_EVENT_TYPE_CMD, .mask = 1ull << MLX5_EVENT_TYPE_CMD,
.nent = MLX5_NUM_CMD_EQE, .nent = MLX5_NUM_CMD_EQE,
.nb = &table->cmd_eq.irq_nb, .nb = &table->cmd_eq.irq_nb,
...@@ -555,7 +556,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev) ...@@ -555,7 +556,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
table->async_eq.irq_nb.notifier_call = mlx5_eq_async_int; table->async_eq.irq_nb.notifier_call = mlx5_eq_async_int;
param = (struct mlx5_eq_param) { param = (struct mlx5_eq_param) {
.index = MLX5_EQ_ASYNC_IDX, .irq_index = 0,
.mask = gather_async_events_mask(dev), .mask = gather_async_events_mask(dev),
.nent = MLX5_NUM_ASYNC_EQE, .nent = MLX5_NUM_ASYNC_EQE,
.nb = &table->async_eq.irq_nb, .nb = &table->async_eq.irq_nb,
...@@ -568,7 +569,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev) ...@@ -568,7 +569,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
table->pages_eq.irq_nb.notifier_call = mlx5_eq_async_int; table->pages_eq.irq_nb.notifier_call = mlx5_eq_async_int;
param = (struct mlx5_eq_param) { param = (struct mlx5_eq_param) {
.index = MLX5_EQ_PAGEREQ_IDX, .irq_index = 0,
.mask = 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, .mask = 1 << MLX5_EVENT_TYPE_PAGE_REQUEST,
.nent = /* TODO: sriov max_vf + */ 1, .nent = /* TODO: sriov max_vf + */ 1,
.nb = &table->pages_eq.irq_nb, .nb = &table->pages_eq.irq_nb,
...@@ -731,7 +732,7 @@ static int create_comp_eqs(struct mlx5_core_dev *dev) ...@@ -731,7 +732,7 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
ncomp_eqs = table->num_comp_eqs; ncomp_eqs = table->num_comp_eqs;
nent = MLX5_COMP_EQ_SIZE; nent = MLX5_COMP_EQ_SIZE;
for (i = 0; i < ncomp_eqs; i++) { for (i = 0; i < ncomp_eqs; i++) {
int vecidx = i + MLX5_EQ_VEC_COMP_BASE; int vecidx = i + MLX5_IRQ_VEC_COMP_BASE;
struct mlx5_eq_param param = {}; struct mlx5_eq_param param = {};
eq = kzalloc(sizeof(*eq), GFP_KERNEL); eq = kzalloc(sizeof(*eq), GFP_KERNEL);
...@@ -748,7 +749,7 @@ static int create_comp_eqs(struct mlx5_core_dev *dev) ...@@ -748,7 +749,7 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
eq->irq_nb.notifier_call = mlx5_eq_comp_int; eq->irq_nb.notifier_call = mlx5_eq_comp_int;
param = (struct mlx5_eq_param) { param = (struct mlx5_eq_param) {
.index = vecidx, .irq_index = vecidx,
.mask = 0, .mask = 0,
.nent = nent, .nent = nent,
.nb = &eq->irq_nb, .nb = &eq->irq_nb,
...@@ -800,7 +801,7 @@ EXPORT_SYMBOL(mlx5_comp_vectors_count); ...@@ -800,7 +801,7 @@ EXPORT_SYMBOL(mlx5_comp_vectors_count);
struct cpumask * struct cpumask *
mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector) mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
{ {
int vecidx = vector + MLX5_EQ_VEC_COMP_BASE; int vecidx = vector + MLX5_IRQ_VEC_COMP_BASE;
return mlx5_irq_get_affinity_mask(dev->priv.eq_table->irq_table, return mlx5_irq_get_affinity_mask(dev->priv.eq_table->irq_table,
vecidx); vecidx);
......
...@@ -45,7 +45,7 @@ void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev) ...@@ -45,7 +45,7 @@ void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev)
int mlx5_irq_get_num_comp(struct mlx5_irq_table *table) int mlx5_irq_get_num_comp(struct mlx5_irq_table *table)
{ {
return table->nvec - MLX5_EQ_VEC_COMP_BASE; return table->nvec - MLX5_IRQ_VEC_COMP_BASE;
} }
static struct mlx5_irq *mlx5_irq_get(struct mlx5_core_dev *dev, int vecidx) static struct mlx5_irq *mlx5_irq_get(struct mlx5_core_dev *dev, int vecidx)
...@@ -81,24 +81,14 @@ static irqreturn_t mlx5_irq_int_handler(int irq, void *nh) ...@@ -81,24 +81,14 @@ static irqreturn_t mlx5_irq_int_handler(int irq, void *nh)
static void irq_set_name(char *name, int vecidx) static void irq_set_name(char *name, int vecidx)
{ {
switch (vecidx) { if (vecidx == 0) {
case MLX5_EQ_CMD_IDX: snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_async");
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_cmd_eq"); return;
break;
case MLX5_EQ_ASYNC_IDX:
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_async_eq");
break;
case MLX5_EQ_PAGEREQ_IDX:
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_pages_eq");
break;
case MLX5_EQ_PFAULT_IDX:
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_ib_page_fault_eq");
break;
default:
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d",
vecidx - MLX5_EQ_VEC_COMP_BASE);
break;
} }
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d",
vecidx - MLX5_IRQ_VEC_COMP_BASE);
return;
} }
static int request_irqs(struct mlx5_core_dev *dev, int nvec) static int request_irqs(struct mlx5_core_dev *dev, int nvec)
...@@ -159,7 +149,7 @@ static int irq_set_rmap(struct mlx5_core_dev *mdev) ...@@ -159,7 +149,7 @@ static int irq_set_rmap(struct mlx5_core_dev *mdev)
goto err_out; goto err_out;
} }
vecidx = MLX5_EQ_VEC_COMP_BASE; vecidx = MLX5_IRQ_VEC_COMP_BASE;
for (; vecidx < irq_table->nvec; vecidx++) { for (; vecidx < irq_table->nvec; vecidx++) {
err = irq_cpu_rmap_add(irq_table->rmap, err = irq_cpu_rmap_add(irq_table->rmap,
pci_irq_vector(mdev->pdev, vecidx)); pci_irq_vector(mdev->pdev, vecidx));
...@@ -182,7 +172,7 @@ static int irq_set_rmap(struct mlx5_core_dev *mdev) ...@@ -182,7 +172,7 @@ static int irq_set_rmap(struct mlx5_core_dev *mdev)
static int set_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i) static int set_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i)
{ {
int vecidx = MLX5_EQ_VEC_COMP_BASE + i; int vecidx = MLX5_IRQ_VEC_COMP_BASE + i;
struct mlx5_irq *irq; struct mlx5_irq *irq;
int irqn; int irqn;
...@@ -205,7 +195,7 @@ static int set_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i) ...@@ -205,7 +195,7 @@ static int set_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i)
static void clear_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i) static void clear_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i)
{ {
int vecidx = MLX5_EQ_VEC_COMP_BASE + i; int vecidx = MLX5_IRQ_VEC_COMP_BASE + i;
struct mlx5_irq *irq; struct mlx5_irq *irq;
int irqn; int irqn;
...@@ -279,16 +269,16 @@ int mlx5_irq_table_create(struct mlx5_core_dev *dev) ...@@ -279,16 +269,16 @@ int mlx5_irq_table_create(struct mlx5_core_dev *dev)
int err; int err;
nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() + nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
MLX5_EQ_VEC_COMP_BASE; MLX5_IRQ_VEC_COMP_BASE;
nvec = min_t(int, nvec, num_eqs); nvec = min_t(int, nvec, num_eqs);
if (nvec <= MLX5_EQ_VEC_COMP_BASE) if (nvec <= MLX5_IRQ_VEC_COMP_BASE)
return -ENOMEM; return -ENOMEM;
table->irq = kcalloc(nvec, sizeof(*table->irq), GFP_KERNEL); table->irq = kcalloc(nvec, sizeof(*table->irq), GFP_KERNEL);
if (!table->irq) if (!table->irq)
return -ENOMEM; return -ENOMEM;
nvec = pci_alloc_irq_vectors(dev->pdev, MLX5_EQ_VEC_COMP_BASE + 1, nvec = pci_alloc_irq_vectors(dev->pdev, MLX5_IRQ_VEC_COMP_BASE + 1,
nvec, PCI_IRQ_MSIX); nvec, PCI_IRQ_MSIX);
if (nvec < 0) { if (nvec < 0) {
err = nvec; err = nvec;
......
...@@ -4,17 +4,7 @@ ...@@ -4,17 +4,7 @@
#ifndef MLX5_CORE_EQ_H #ifndef MLX5_CORE_EQ_H
#define MLX5_CORE_EQ_H #define MLX5_CORE_EQ_H
enum { #define MLX5_IRQ_VEC_COMP_BASE 1
MLX5_EQ_PAGEREQ_IDX = 0,
MLX5_EQ_CMD_IDX = 1,
MLX5_EQ_ASYNC_IDX = 2,
/* reserved to be used by mlx5_core ulps (mlx5e/mlx5_ib) */
MLX5_EQ_PFAULT_IDX = 3,
MLX5_EQ_MAX_ASYNC_EQS,
/* completion eqs vector indices start here */
MLX5_EQ_VEC_COMP_BASE = MLX5_EQ_MAX_ASYNC_EQS,
};
#define MLX5_NUM_CMD_EQE (32) #define MLX5_NUM_CMD_EQE (32)
#define MLX5_NUM_ASYNC_EQE (0x1000) #define MLX5_NUM_ASYNC_EQE (0x1000)
#define MLX5_NUM_SPARE_EQE (0x80) #define MLX5_NUM_SPARE_EQE (0x80)
...@@ -23,7 +13,7 @@ struct mlx5_eq; ...@@ -23,7 +13,7 @@ struct mlx5_eq;
struct mlx5_core_dev; struct mlx5_core_dev;
struct mlx5_eq_param { struct mlx5_eq_param {
u8 index; u8 irq_index;
int nent; int nent;
u64 mask; u64 mask;
struct notifier_block *nb; struct notifier_block *nb;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment