Commit 3ac7afdb authored by Saeed Mahameed's avatar Saeed Mahameed

net/mlx5: Move CQ completion and event forwarding logic to eq.c

Since CQ tree is now per EQ, CQ completion and event forwarding became
specific implementation of EQ logic, this patch moves that logic to eq.c
and makes those functions static.
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
Reviewed-by: default avatarGal Pressman <galp@mellanox.com>
parent f105b45b
...@@ -85,51 +85,6 @@ static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq) ...@@ -85,51 +85,6 @@ static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq)
spin_unlock_irqrestore(&tasklet_ctx->lock, flags); spin_unlock_irqrestore(&tasklet_ctx->lock, flags);
} }
/* caller must eventually call mlx5_cq_put on the returned cq */
static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn)
{
struct mlx5_cq_table *table = &eq->cq_table;
struct mlx5_core_cq *cq = NULL;
spin_lock(&table->lock);
cq = radix_tree_lookup(&table->tree, cqn);
if (likely(cq))
mlx5_cq_hold(cq);
spin_unlock(&table->lock);
return cq;
}
void mlx5_cq_completion(struct mlx5_eq *eq, u32 cqn)
{
struct mlx5_core_cq *cq = mlx5_eq_cq_get(eq, cqn);
if (unlikely(!cq)) {
mlx5_core_warn(eq->dev, "Completion event for bogus CQ 0x%x\n", cqn);
return;
}
++cq->arm_sn;
cq->comp(cq);
mlx5_cq_put(cq);
}
void mlx5_cq_event(struct mlx5_eq *eq, u32 cqn, int event_type)
{
struct mlx5_core_cq *cq = mlx5_eq_cq_get(eq, cqn);
if (unlikely(!cq)) {
mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn);
return;
}
cq->event(cq, event_type);
mlx5_cq_put(cq);
}
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
u32 *in, int inlen) u32 *in, int inlen)
{ {
......
...@@ -393,6 +393,51 @@ static void general_event_handler(struct mlx5_core_dev *dev, ...@@ -393,6 +393,51 @@ static void general_event_handler(struct mlx5_core_dev *dev,
} }
} }
/* caller must eventually call mlx5_cq_put on the returned cq */
static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn)
{
struct mlx5_cq_table *table = &eq->cq_table;
struct mlx5_core_cq *cq = NULL;
spin_lock(&table->lock);
cq = radix_tree_lookup(&table->tree, cqn);
if (likely(cq))
mlx5_cq_hold(cq);
spin_unlock(&table->lock);
return cq;
}
static void mlx5_eq_cq_completion(struct mlx5_eq *eq, u32 cqn)
{
struct mlx5_core_cq *cq = mlx5_eq_cq_get(eq, cqn);
if (unlikely(!cq)) {
mlx5_core_warn(eq->dev, "Completion event for bogus CQ 0x%x\n", cqn);
return;
}
++cq->arm_sn;
cq->comp(cq);
mlx5_cq_put(cq);
}
static void mlx5_eq_cq_event(struct mlx5_eq *eq, u32 cqn, int event_type)
{
struct mlx5_core_cq *cq = mlx5_eq_cq_get(eq, cqn);
if (unlikely(!cq)) {
mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn);
return;
}
cq->event(cq, event_type);
mlx5_cq_put(cq);
}
static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr) static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
{ {
struct mlx5_eq *eq = eq_ptr; struct mlx5_eq *eq = eq_ptr;
...@@ -415,7 +460,7 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr) ...@@ -415,7 +460,7 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
switch (eqe->type) { switch (eqe->type) {
case MLX5_EVENT_TYPE_COMP: case MLX5_EVENT_TYPE_COMP:
cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff; cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
mlx5_cq_completion(eq, cqn); mlx5_eq_cq_completion(eq, cqn);
break; break;
case MLX5_EVENT_TYPE_DCT_DRAINED: case MLX5_EVENT_TYPE_DCT_DRAINED:
rsn = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff; rsn = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
...@@ -472,7 +517,7 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr) ...@@ -472,7 +517,7 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff; cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrome 0x%x\n", mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",
cqn, eqe->data.cq_err.syndrome); cqn, eqe->data.cq_err.syndrome);
mlx5_cq_event(eq, cqn, eqe->type); mlx5_eq_cq_event(eq, cqn, eqe->type);
break; break;
case MLX5_EVENT_TYPE_PAGE_REQUEST: case MLX5_EVENT_TYPE_PAGE_REQUEST:
......
...@@ -1049,12 +1049,10 @@ int mlx5_eq_init(struct mlx5_core_dev *dev); ...@@ -1049,12 +1049,10 @@ int mlx5_eq_init(struct mlx5_core_dev *dev);
void mlx5_eq_cleanup(struct mlx5_core_dev *dev); void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas); void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas);
void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas); void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
void mlx5_cq_completion(struct mlx5_eq *eq, u32 cqn);
void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type); void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced); void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
void mlx5_cq_event(struct mlx5_eq *eq, u32 cqn, int event_type);
int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
int nent, u64 mask, const char *name, int nent, u64 mask, const char *name,
enum mlx5_eq_type type); enum mlx5_eq_type type);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment