Commit fdb09ed1 authored by Boshi Yu's avatar Boshi Yu Committed by Leon Romanovsky

RDMA/erdma: Unify the names related to doorbell records

There exist two different names for the doorbell records: db_info and
db_record. We use dbrec for cpu address of the doorbell record and
dbrec_dma for dma address of the doorbell recordi uniformly.
Reviewed-by: default avatarCheng Xu <chengyou@linux.alibaba.com>
Signed-off-by: default avatarBoshi Yu <boshiyu@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240311113821.22482-3-boshiyu@alibaba-inc.comSigned-off-by: default avatarLeon Romanovsky <leon@kernel.org>
parent f0697bf0
...@@ -33,8 +33,8 @@ struct erdma_eq { ...@@ -33,8 +33,8 @@ struct erdma_eq {
atomic64_t notify_num; atomic64_t notify_num;
void __iomem *db; void __iomem *db;
u64 *db_record; u64 *dbrec;
dma_addr_t db_record_dma_addr; dma_addr_t dbrec_dma;
}; };
struct erdma_cmdq_sq { struct erdma_cmdq_sq {
...@@ -49,8 +49,8 @@ struct erdma_cmdq_sq { ...@@ -49,8 +49,8 @@ struct erdma_cmdq_sq {
u16 wqebb_cnt; u16 wqebb_cnt;
u64 *db_record; u64 *dbrec;
dma_addr_t db_record_dma_addr; dma_addr_t dbrec_dma;
}; };
struct erdma_cmdq_cq { struct erdma_cmdq_cq {
...@@ -63,8 +63,8 @@ struct erdma_cmdq_cq { ...@@ -63,8 +63,8 @@ struct erdma_cmdq_cq {
u32 ci; u32 ci;
u32 cmdsn; u32 cmdsn;
u64 *db_record; u64 *dbrec;
dma_addr_t db_record_dma_addr; dma_addr_t dbrec_dma;
atomic64_t armed_num; atomic64_t armed_num;
}; };
......
...@@ -14,7 +14,7 @@ static void arm_cmdq_cq(struct erdma_cmdq *cmdq) ...@@ -14,7 +14,7 @@ static void arm_cmdq_cq(struct erdma_cmdq *cmdq)
FIELD_PREP(ERDMA_CQDB_CMDSN_MASK, cmdq->cq.cmdsn) | FIELD_PREP(ERDMA_CQDB_CMDSN_MASK, cmdq->cq.cmdsn) |
FIELD_PREP(ERDMA_CQDB_IDX_MASK, cmdq->cq.cmdsn); FIELD_PREP(ERDMA_CQDB_IDX_MASK, cmdq->cq.cmdsn);
*cmdq->cq.db_record = db_data; *cmdq->cq.dbrec = db_data;
writeq(db_data, dev->func_bar + ERDMA_CMDQ_CQDB_REG); writeq(db_data, dev->func_bar + ERDMA_CMDQ_CQDB_REG);
atomic64_inc(&cmdq->cq.armed_num); atomic64_inc(&cmdq->cq.armed_num);
...@@ -25,7 +25,7 @@ static void kick_cmdq_db(struct erdma_cmdq *cmdq) ...@@ -25,7 +25,7 @@ static void kick_cmdq_db(struct erdma_cmdq *cmdq)
struct erdma_dev *dev = container_of(cmdq, struct erdma_dev, cmdq); struct erdma_dev *dev = container_of(cmdq, struct erdma_dev, cmdq);
u64 db_data = FIELD_PREP(ERDMA_CMD_HDR_WQEBB_INDEX_MASK, cmdq->sq.pi); u64 db_data = FIELD_PREP(ERDMA_CMD_HDR_WQEBB_INDEX_MASK, cmdq->sq.pi);
*cmdq->sq.db_record = db_data; *cmdq->sq.dbrec = db_data;
writeq(db_data, dev->func_bar + ERDMA_CMDQ_SQDB_REG); writeq(db_data, dev->func_bar + ERDMA_CMDQ_SQDB_REG);
} }
...@@ -98,9 +98,8 @@ static int erdma_cmdq_sq_init(struct erdma_dev *dev) ...@@ -98,9 +98,8 @@ static int erdma_cmdq_sq_init(struct erdma_dev *dev)
if (!sq->qbuf) if (!sq->qbuf)
return -ENOMEM; return -ENOMEM;
sq->db_record = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, sq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &sq->dbrec_dma);
&sq->db_record_dma_addr); if (!sq->dbrec)
if (!sq->db_record)
goto err_out; goto err_out;
spin_lock_init(&sq->lock); spin_lock_init(&sq->lock);
...@@ -110,8 +109,7 @@ static int erdma_cmdq_sq_init(struct erdma_dev *dev) ...@@ -110,8 +109,7 @@ static int erdma_cmdq_sq_init(struct erdma_dev *dev)
erdma_reg_write32(dev, ERDMA_REGS_CMDQ_SQ_ADDR_L_REG, erdma_reg_write32(dev, ERDMA_REGS_CMDQ_SQ_ADDR_L_REG,
lower_32_bits(sq->qbuf_dma_addr)); lower_32_bits(sq->qbuf_dma_addr));
erdma_reg_write32(dev, ERDMA_REGS_CMDQ_DEPTH_REG, sq->depth); erdma_reg_write32(dev, ERDMA_REGS_CMDQ_DEPTH_REG, sq->depth);
erdma_reg_write64(dev, ERDMA_CMDQ_SQ_DB_HOST_ADDR_REG, erdma_reg_write64(dev, ERDMA_CMDQ_SQ_DB_HOST_ADDR_REG, sq->dbrec_dma);
sq->db_record_dma_addr);
return 0; return 0;
...@@ -136,9 +134,8 @@ static int erdma_cmdq_cq_init(struct erdma_dev *dev) ...@@ -136,9 +134,8 @@ static int erdma_cmdq_cq_init(struct erdma_dev *dev)
spin_lock_init(&cq->lock); spin_lock_init(&cq->lock);
cq->db_record = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, cq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &cq->dbrec_dma);
&cq->db_record_dma_addr); if (!cq->dbrec)
if (!cq->db_record)
goto err_out; goto err_out;
atomic64_set(&cq->armed_num, 0); atomic64_set(&cq->armed_num, 0);
...@@ -147,8 +144,7 @@ static int erdma_cmdq_cq_init(struct erdma_dev *dev) ...@@ -147,8 +144,7 @@ static int erdma_cmdq_cq_init(struct erdma_dev *dev)
upper_32_bits(cq->qbuf_dma_addr)); upper_32_bits(cq->qbuf_dma_addr));
erdma_reg_write32(dev, ERDMA_REGS_CMDQ_CQ_ADDR_L_REG, erdma_reg_write32(dev, ERDMA_REGS_CMDQ_CQ_ADDR_L_REG,
lower_32_bits(cq->qbuf_dma_addr)); lower_32_bits(cq->qbuf_dma_addr));
erdma_reg_write64(dev, ERDMA_CMDQ_CQ_DB_HOST_ADDR_REG, erdma_reg_write64(dev, ERDMA_CMDQ_CQ_DB_HOST_ADDR_REG, cq->dbrec_dma);
cq->db_record_dma_addr);
return 0; return 0;
...@@ -175,9 +171,8 @@ static int erdma_cmdq_eq_init(struct erdma_dev *dev) ...@@ -175,9 +171,8 @@ static int erdma_cmdq_eq_init(struct erdma_dev *dev)
atomic64_set(&eq->event_num, 0); atomic64_set(&eq->event_num, 0);
eq->db = dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG; eq->db = dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG;
eq->db_record = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, eq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &eq->dbrec_dma);
&eq->db_record_dma_addr); if (!eq->dbrec)
if (!eq->db_record)
goto err_out; goto err_out;
erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_ADDR_H_REG, erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_ADDR_H_REG,
...@@ -185,8 +180,7 @@ static int erdma_cmdq_eq_init(struct erdma_dev *dev) ...@@ -185,8 +180,7 @@ static int erdma_cmdq_eq_init(struct erdma_dev *dev)
erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_ADDR_L_REG, erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_ADDR_L_REG,
lower_32_bits(eq->qbuf_dma_addr)); lower_32_bits(eq->qbuf_dma_addr));
erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_DEPTH_REG, eq->depth); erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_DEPTH_REG, eq->depth);
erdma_reg_write64(dev, ERDMA_CMDQ_EQ_DB_HOST_ADDR_REG, erdma_reg_write64(dev, ERDMA_CMDQ_EQ_DB_HOST_ADDR_REG, eq->dbrec_dma);
eq->db_record_dma_addr);
return 0; return 0;
...@@ -231,15 +225,13 @@ int erdma_cmdq_init(struct erdma_dev *dev) ...@@ -231,15 +225,13 @@ int erdma_cmdq_init(struct erdma_dev *dev)
dma_free_coherent(&dev->pdev->dev, cmdq->cq.depth << CQE_SHIFT, dma_free_coherent(&dev->pdev->dev, cmdq->cq.depth << CQE_SHIFT,
cmdq->cq.qbuf, cmdq->cq.qbuf_dma_addr); cmdq->cq.qbuf, cmdq->cq.qbuf_dma_addr);
dma_pool_free(dev->db_pool, cmdq->cq.db_record, dma_pool_free(dev->db_pool, cmdq->cq.dbrec, cmdq->cq.dbrec_dma);
cmdq->cq.db_record_dma_addr);
err_destroy_sq: err_destroy_sq:
dma_free_coherent(&dev->pdev->dev, cmdq->sq.depth << SQEBB_SHIFT, dma_free_coherent(&dev->pdev->dev, cmdq->sq.depth << SQEBB_SHIFT,
cmdq->sq.qbuf, cmdq->sq.qbuf_dma_addr); cmdq->sq.qbuf, cmdq->sq.qbuf_dma_addr);
dma_pool_free(dev->db_pool, cmdq->sq.db_record, dma_pool_free(dev->db_pool, cmdq->sq.dbrec, cmdq->sq.dbrec_dma);
cmdq->sq.db_record_dma_addr);
return err; return err;
} }
...@@ -260,20 +252,17 @@ void erdma_cmdq_destroy(struct erdma_dev *dev) ...@@ -260,20 +252,17 @@ void erdma_cmdq_destroy(struct erdma_dev *dev)
dma_free_coherent(&dev->pdev->dev, cmdq->eq.depth << EQE_SHIFT, dma_free_coherent(&dev->pdev->dev, cmdq->eq.depth << EQE_SHIFT,
cmdq->eq.qbuf, cmdq->eq.qbuf_dma_addr); cmdq->eq.qbuf, cmdq->eq.qbuf_dma_addr);
dma_pool_free(dev->db_pool, cmdq->eq.db_record, dma_pool_free(dev->db_pool, cmdq->eq.dbrec, cmdq->eq.dbrec_dma);
cmdq->eq.db_record_dma_addr);
dma_free_coherent(&dev->pdev->dev, cmdq->sq.depth << SQEBB_SHIFT, dma_free_coherent(&dev->pdev->dev, cmdq->sq.depth << SQEBB_SHIFT,
cmdq->sq.qbuf, cmdq->sq.qbuf_dma_addr); cmdq->sq.qbuf, cmdq->sq.qbuf_dma_addr);
dma_pool_free(dev->db_pool, cmdq->sq.db_record, dma_pool_free(dev->db_pool, cmdq->sq.dbrec, cmdq->sq.dbrec_dma);
cmdq->sq.db_record_dma_addr);
dma_free_coherent(&dev->pdev->dev, cmdq->cq.depth << CQE_SHIFT, dma_free_coherent(&dev->pdev->dev, cmdq->cq.depth << CQE_SHIFT,
cmdq->cq.qbuf, cmdq->cq.qbuf_dma_addr); cmdq->cq.qbuf, cmdq->cq.qbuf_dma_addr);
dma_pool_free(dev->db_pool, cmdq->cq.db_record, dma_pool_free(dev->db_pool, cmdq->cq.dbrec, cmdq->cq.dbrec_dma);
cmdq->cq.db_record_dma_addr);
} }
static void *get_next_valid_cmdq_cqe(struct erdma_cmdq *cmdq) static void *get_next_valid_cmdq_cqe(struct erdma_cmdq *cmdq)
......
...@@ -26,7 +26,7 @@ static void notify_cq(struct erdma_cq *cq, u8 solcitied) ...@@ -26,7 +26,7 @@ static void notify_cq(struct erdma_cq *cq, u8 solcitied)
FIELD_PREP(ERDMA_CQDB_CMDSN_MASK, cq->kern_cq.cmdsn) | FIELD_PREP(ERDMA_CQDB_CMDSN_MASK, cq->kern_cq.cmdsn) |
FIELD_PREP(ERDMA_CQDB_CI_MASK, cq->kern_cq.ci); FIELD_PREP(ERDMA_CQDB_CI_MASK, cq->kern_cq.ci);
*cq->kern_cq.db_record = db_data; *cq->kern_cq.dbrec = db_data;
writeq(db_data, cq->kern_cq.db); writeq(db_data, cq->kern_cq.db);
} }
......
...@@ -13,7 +13,7 @@ void notify_eq(struct erdma_eq *eq) ...@@ -13,7 +13,7 @@ void notify_eq(struct erdma_eq *eq)
u64 db_data = FIELD_PREP(ERDMA_EQDB_CI_MASK, eq->ci) | u64 db_data = FIELD_PREP(ERDMA_EQDB_CI_MASK, eq->ci) |
FIELD_PREP(ERDMA_EQDB_ARM_MASK, 1); FIELD_PREP(ERDMA_EQDB_ARM_MASK, 1);
*eq->db_record = db_data; *eq->dbrec = db_data;
writeq(db_data, eq->db); writeq(db_data, eq->db);
atomic64_inc(&eq->notify_num); atomic64_inc(&eq->notify_num);
...@@ -97,9 +97,8 @@ int erdma_aeq_init(struct erdma_dev *dev) ...@@ -97,9 +97,8 @@ int erdma_aeq_init(struct erdma_dev *dev)
atomic64_set(&eq->notify_num, 0); atomic64_set(&eq->notify_num, 0);
eq->db = dev->func_bar + ERDMA_REGS_AEQ_DB_REG; eq->db = dev->func_bar + ERDMA_REGS_AEQ_DB_REG;
eq->db_record = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, eq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &eq->dbrec_dma);
&eq->db_record_dma_addr); if (!eq->dbrec)
if (!eq->db_record)
goto err_out; goto err_out;
erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_H_REG, erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_H_REG,
...@@ -107,8 +106,7 @@ int erdma_aeq_init(struct erdma_dev *dev) ...@@ -107,8 +106,7 @@ int erdma_aeq_init(struct erdma_dev *dev)
erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_L_REG, erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_L_REG,
lower_32_bits(eq->qbuf_dma_addr)); lower_32_bits(eq->qbuf_dma_addr));
erdma_reg_write32(dev, ERDMA_REGS_AEQ_DEPTH_REG, eq->depth); erdma_reg_write32(dev, ERDMA_REGS_AEQ_DEPTH_REG, eq->depth);
erdma_reg_write64(dev, ERDMA_AEQ_DB_HOST_ADDR_REG, erdma_reg_write64(dev, ERDMA_AEQ_DB_HOST_ADDR_REG, eq->dbrec_dma);
eq->db_record_dma_addr);
return 0; return 0;
...@@ -126,7 +124,7 @@ void erdma_aeq_destroy(struct erdma_dev *dev) ...@@ -126,7 +124,7 @@ void erdma_aeq_destroy(struct erdma_dev *dev)
dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf, dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf,
eq->qbuf_dma_addr); eq->qbuf_dma_addr);
dma_pool_free(dev->db_pool, eq->db_record, eq->db_record_dma_addr); dma_pool_free(dev->db_pool, eq->dbrec, eq->dbrec_dma);
} }
void erdma_ceq_completion_handler(struct erdma_eq_cb *ceq_cb) void erdma_ceq_completion_handler(struct erdma_eq_cb *ceq_cb)
...@@ -226,8 +224,8 @@ static int create_eq_cmd(struct erdma_dev *dev, u32 eqn, struct erdma_eq *eq) ...@@ -226,8 +224,8 @@ static int create_eq_cmd(struct erdma_dev *dev, u32 eqn, struct erdma_eq *eq)
req.qtype = ERDMA_EQ_TYPE_CEQ; req.qtype = ERDMA_EQ_TYPE_CEQ;
/* Vector index is the same as EQN. */ /* Vector index is the same as EQN. */
req.vector_idx = eqn; req.vector_idx = eqn;
req.db_dma_addr_l = lower_32_bits(eq->db_record_dma_addr); req.db_dma_addr_l = lower_32_bits(eq->dbrec_dma);
req.db_dma_addr_h = upper_32_bits(eq->db_record_dma_addr); req.db_dma_addr_h = upper_32_bits(eq->dbrec_dma);
return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL); return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
} }
...@@ -251,9 +249,8 @@ static int erdma_ceq_init_one(struct erdma_dev *dev, u16 ceqn) ...@@ -251,9 +249,8 @@ static int erdma_ceq_init_one(struct erdma_dev *dev, u16 ceqn)
eq->db = dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG + eq->db = dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG +
(ceqn + 1) * ERDMA_DB_SIZE; (ceqn + 1) * ERDMA_DB_SIZE;
eq->db_record = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, eq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &eq->dbrec_dma);
&eq->db_record_dma_addr); if (!eq->dbrec) {
if (!eq->db_record) {
dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT,
eq->qbuf, eq->qbuf_dma_addr); eq->qbuf, eq->qbuf_dma_addr);
return -ENOMEM; return -ENOMEM;
...@@ -290,7 +287,7 @@ static void erdma_ceq_uninit_one(struct erdma_dev *dev, u16 ceqn) ...@@ -290,7 +287,7 @@ static void erdma_ceq_uninit_one(struct erdma_dev *dev, u16 ceqn)
dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf, dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf,
eq->qbuf_dma_addr); eq->qbuf_dma_addr);
dma_pool_free(dev->db_pool, eq->db_record, eq->db_record_dma_addr); dma_pool_free(dev->db_pool, eq->dbrec, eq->dbrec_dma);
} }
int erdma_ceqs_init(struct erdma_dev *dev) int erdma_ceqs_init(struct erdma_dev *dev)
......
...@@ -240,7 +240,7 @@ struct erdma_cmdq_create_cq_req { ...@@ -240,7 +240,7 @@ struct erdma_cmdq_create_cq_req {
u32 qbuf_addr_l; u32 qbuf_addr_l;
u32 qbuf_addr_h; u32 qbuf_addr_h;
u32 cfg1; u32 cfg1;
u64 cq_db_info_addr; u64 cq_dbrec_dma;
u32 first_page_offset; u32 first_page_offset;
u32 cfg2; u32 cfg2;
}; };
...@@ -335,8 +335,8 @@ struct erdma_cmdq_create_qp_req { ...@@ -335,8 +335,8 @@ struct erdma_cmdq_create_qp_req {
u64 rq_buf_addr; u64 rq_buf_addr;
u32 sq_mtt_cfg; u32 sq_mtt_cfg;
u32 rq_mtt_cfg; u32 rq_mtt_cfg;
u64 sq_db_info_dma_addr; u64 sq_dbrec_dma;
u64 rq_db_info_dma_addr; u64 rq_dbrec_dma;
u64 sq_mtt_entry[3]; u64 sq_mtt_entry[3];
u64 rq_mtt_entry[3]; u64 rq_mtt_entry[3];
......
...@@ -492,7 +492,7 @@ static void kick_sq_db(struct erdma_qp *qp, u16 pi) ...@@ -492,7 +492,7 @@ static void kick_sq_db(struct erdma_qp *qp, u16 pi)
u64 db_data = FIELD_PREP(ERDMA_SQE_HDR_QPN_MASK, QP_ID(qp)) | u64 db_data = FIELD_PREP(ERDMA_SQE_HDR_QPN_MASK, QP_ID(qp)) |
FIELD_PREP(ERDMA_SQE_HDR_WQEBB_INDEX_MASK, pi); FIELD_PREP(ERDMA_SQE_HDR_WQEBB_INDEX_MASK, pi);
*(u64 *)qp->kern_qp.sq_db_info = db_data; *(u64 *)qp->kern_qp.sq_dbrec = db_data;
writeq(db_data, qp->kern_qp.hw_sq_db); writeq(db_data, qp->kern_qp.hw_sq_db);
} }
...@@ -557,7 +557,7 @@ static int erdma_post_recv_one(struct erdma_qp *qp, ...@@ -557,7 +557,7 @@ static int erdma_post_recv_one(struct erdma_qp *qp,
return -EINVAL; return -EINVAL;
} }
*(u64 *)qp->kern_qp.rq_db_info = *(u64 *)rqe; *(u64 *)qp->kern_qp.rq_dbrec = *(u64 *)rqe;
writeq(*(u64 *)rqe, qp->kern_qp.hw_rq_db); writeq(*(u64 *)rqe, qp->kern_qp.hw_rq_db);
qp->kern_qp.rwr_tbl[qp->kern_qp.rq_pi & (qp->attrs.rq_size - 1)] = qp->kern_qp.rwr_tbl[qp->kern_qp.rq_pi & (qp->attrs.rq_size - 1)] =
......
...@@ -76,8 +76,8 @@ static int create_qp_cmd(struct erdma_ucontext *uctx, struct erdma_qp *qp) ...@@ -76,8 +76,8 @@ static int create_qp_cmd(struct erdma_ucontext *uctx, struct erdma_qp *qp)
req.rq_buf_addr = qp->kern_qp.rq_buf_dma_addr; req.rq_buf_addr = qp->kern_qp.rq_buf_dma_addr;
req.sq_buf_addr = qp->kern_qp.sq_buf_dma_addr; req.sq_buf_addr = qp->kern_qp.sq_buf_dma_addr;
req.sq_db_info_dma_addr = qp->kern_qp.sq_db_info_dma_addr; req.sq_dbrec_dma = qp->kern_qp.sq_dbrec_dma;
req.rq_db_info_dma_addr = qp->kern_qp.rq_db_info_dma_addr; req.rq_dbrec_dma = qp->kern_qp.rq_dbrec_dma;
} else { } else {
user_qp = &qp->user_qp; user_qp = &qp->user_qp;
req.sq_cqn_mtt_cfg = FIELD_PREP( req.sq_cqn_mtt_cfg = FIELD_PREP(
...@@ -105,8 +105,8 @@ static int create_qp_cmd(struct erdma_ucontext *uctx, struct erdma_qp *qp) ...@@ -105,8 +105,8 @@ static int create_qp_cmd(struct erdma_ucontext *uctx, struct erdma_qp *qp)
assemble_qbuf_mtt_for_cmd(&user_qp->rq_mem, &req.rq_mtt_cfg, assemble_qbuf_mtt_for_cmd(&user_qp->rq_mem, &req.rq_mtt_cfg,
&req.rq_buf_addr, req.rq_mtt_entry); &req.rq_buf_addr, req.rq_mtt_entry);
req.sq_db_info_dma_addr = user_qp->sq_db_info_dma_addr; req.sq_dbrec_dma = user_qp->sq_dbrec_dma;
req.rq_db_info_dma_addr = user_qp->rq_db_info_dma_addr; req.rq_dbrec_dma = user_qp->rq_dbrec_dma;
if (uctx->ext_db.enable) { if (uctx->ext_db.enable) {
req.sq_cqn_mtt_cfg |= req.sq_cqn_mtt_cfg |=
...@@ -207,7 +207,7 @@ static int create_cq_cmd(struct erdma_ucontext *uctx, struct erdma_cq *cq) ...@@ -207,7 +207,7 @@ static int create_cq_cmd(struct erdma_ucontext *uctx, struct erdma_cq *cq)
ERDMA_MR_MTT_0LEVEL); ERDMA_MR_MTT_0LEVEL);
req.first_page_offset = 0; req.first_page_offset = 0;
req.cq_db_info_addr = cq->kern_cq.db_record_dma_addr; req.cq_dbrec_dma = cq->kern_cq.dbrec_dma;
} else { } else {
mem = &cq->user_cq.qbuf_mem; mem = &cq->user_cq.qbuf_mem;
req.cfg0 |= req.cfg0 |=
...@@ -230,7 +230,7 @@ static int create_cq_cmd(struct erdma_ucontext *uctx, struct erdma_cq *cq) ...@@ -230,7 +230,7 @@ static int create_cq_cmd(struct erdma_ucontext *uctx, struct erdma_cq *cq)
mem->mtt_nents); mem->mtt_nents);
req.first_page_offset = mem->page_offset; req.first_page_offset = mem->page_offset;
req.cq_db_info_addr = cq->user_cq.db_info_dma_addr; req.cq_dbrec_dma = cq->user_cq.dbrec_dma;
if (uctx->ext_db.enable) { if (uctx->ext_db.enable) {
req.cfg1 |= FIELD_PREP( req.cfg1 |= FIELD_PREP(
...@@ -484,9 +484,9 @@ static void free_kernel_qp(struct erdma_qp *qp) ...@@ -484,9 +484,9 @@ static void free_kernel_qp(struct erdma_qp *qp)
qp->kern_qp.sq_buf, qp->kern_qp.sq_buf,
qp->kern_qp.sq_buf_dma_addr); qp->kern_qp.sq_buf_dma_addr);
if (qp->kern_qp.sq_db_info) if (qp->kern_qp.sq_dbrec)
dma_pool_free(dev->db_pool, qp->kern_qp.sq_db_info, dma_pool_free(dev->db_pool, qp->kern_qp.sq_dbrec,
qp->kern_qp.sq_db_info_dma_addr); qp->kern_qp.sq_dbrec_dma);
if (qp->kern_qp.rq_buf) if (qp->kern_qp.rq_buf)
dma_free_coherent(&dev->pdev->dev, dma_free_coherent(&dev->pdev->dev,
...@@ -494,9 +494,9 @@ static void free_kernel_qp(struct erdma_qp *qp) ...@@ -494,9 +494,9 @@ static void free_kernel_qp(struct erdma_qp *qp)
qp->kern_qp.rq_buf, qp->kern_qp.rq_buf,
qp->kern_qp.rq_buf_dma_addr); qp->kern_qp.rq_buf_dma_addr);
if (qp->kern_qp.rq_db_info) if (qp->kern_qp.rq_dbrec)
dma_pool_free(dev->db_pool, qp->kern_qp.rq_db_info, dma_pool_free(dev->db_pool, qp->kern_qp.rq_dbrec,
qp->kern_qp.rq_db_info_dma_addr); qp->kern_qp.rq_dbrec_dma);
} }
static int init_kernel_qp(struct erdma_dev *dev, struct erdma_qp *qp, static int init_kernel_qp(struct erdma_dev *dev, struct erdma_qp *qp,
...@@ -527,9 +527,9 @@ static int init_kernel_qp(struct erdma_dev *dev, struct erdma_qp *qp, ...@@ -527,9 +527,9 @@ static int init_kernel_qp(struct erdma_dev *dev, struct erdma_qp *qp,
if (!kqp->sq_buf) if (!kqp->sq_buf)
goto err_out; goto err_out;
kqp->sq_db_info = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, kqp->sq_dbrec =
&kqp->sq_db_info_dma_addr); dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &kqp->sq_dbrec_dma);
if (!kqp->sq_db_info) if (!kqp->sq_dbrec)
goto err_out; goto err_out;
size = qp->attrs.rq_size << RQE_SHIFT; size = qp->attrs.rq_size << RQE_SHIFT;
...@@ -538,9 +538,9 @@ static int init_kernel_qp(struct erdma_dev *dev, struct erdma_qp *qp, ...@@ -538,9 +538,9 @@ static int init_kernel_qp(struct erdma_dev *dev, struct erdma_qp *qp,
if (!kqp->rq_buf) if (!kqp->rq_buf)
goto err_out; goto err_out;
kqp->rq_db_info = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, kqp->rq_dbrec =
&kqp->rq_db_info_dma_addr); dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &kqp->rq_dbrec_dma);
if (!kqp->rq_db_info) if (!kqp->rq_dbrec)
goto err_out; goto err_out;
return 0; return 0;
...@@ -876,9 +876,9 @@ erdma_unmap_user_dbrecords(struct erdma_ucontext *ctx, ...@@ -876,9 +876,9 @@ erdma_unmap_user_dbrecords(struct erdma_ucontext *ctx,
} }
static int init_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx, static int init_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx,
u64 va, u32 len, u64 db_info_va) u64 va, u32 len, u64 dbrec_va)
{ {
dma_addr_t db_info_dma_addr; dma_addr_t dbrec_dma;
u32 rq_offset; u32 rq_offset;
int ret; int ret;
...@@ -901,14 +901,14 @@ static int init_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx, ...@@ -901,14 +901,14 @@ static int init_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx,
if (ret) if (ret)
goto put_sq_mtt; goto put_sq_mtt;
ret = erdma_map_user_dbrecords(uctx, db_info_va, ret = erdma_map_user_dbrecords(uctx, dbrec_va,
&qp->user_qp.user_dbr_page, &qp->user_qp.user_dbr_page,
&db_info_dma_addr); &dbrec_dma);
if (ret) if (ret)
goto put_rq_mtt; goto put_rq_mtt;
qp->user_qp.sq_db_info_dma_addr = db_info_dma_addr; qp->user_qp.sq_dbrec_dma = dbrec_dma;
qp->user_qp.rq_db_info_dma_addr = db_info_dma_addr + ERDMA_DB_SIZE; qp->user_qp.rq_dbrec_dma = dbrec_dma + ERDMA_DB_SIZE;
return 0; return 0;
...@@ -1251,8 +1251,8 @@ int erdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) ...@@ -1251,8 +1251,8 @@ int erdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
if (rdma_is_kernel_res(&cq->ibcq.res)) { if (rdma_is_kernel_res(&cq->ibcq.res)) {
dma_free_coherent(&dev->pdev->dev, cq->depth << CQE_SHIFT, dma_free_coherent(&dev->pdev->dev, cq->depth << CQE_SHIFT,
cq->kern_cq.qbuf, cq->kern_cq.qbuf_dma_addr); cq->kern_cq.qbuf, cq->kern_cq.qbuf_dma_addr);
dma_pool_free(dev->db_pool, cq->kern_cq.db_record, dma_pool_free(dev->db_pool, cq->kern_cq.dbrec,
cq->kern_cq.db_record_dma_addr); cq->kern_cq.dbrec_dma);
} else { } else {
erdma_unmap_user_dbrecords(ctx, &cq->user_cq.user_dbr_page); erdma_unmap_user_dbrecords(ctx, &cq->user_cq.user_dbr_page);
put_mtt_entries(dev, &cq->user_cq.qbuf_mem); put_mtt_entries(dev, &cq->user_cq.qbuf_mem);
...@@ -1592,7 +1592,7 @@ static int erdma_init_user_cq(struct erdma_ucontext *ctx, struct erdma_cq *cq, ...@@ -1592,7 +1592,7 @@ static int erdma_init_user_cq(struct erdma_ucontext *ctx, struct erdma_cq *cq,
ret = erdma_map_user_dbrecords(ctx, ureq->db_record_va, ret = erdma_map_user_dbrecords(ctx, ureq->db_record_va,
&cq->user_cq.user_dbr_page, &cq->user_cq.user_dbr_page,
&cq->user_cq.db_info_dma_addr); &cq->user_cq.dbrec_dma);
if (ret) if (ret)
put_mtt_entries(dev, &cq->user_cq.qbuf_mem); put_mtt_entries(dev, &cq->user_cq.qbuf_mem);
...@@ -1609,9 +1609,9 @@ static int erdma_init_kernel_cq(struct erdma_cq *cq) ...@@ -1609,9 +1609,9 @@ static int erdma_init_kernel_cq(struct erdma_cq *cq)
if (!cq->kern_cq.qbuf) if (!cq->kern_cq.qbuf)
return -ENOMEM; return -ENOMEM;
cq->kern_cq.db_record = dma_pool_zalloc( cq->kern_cq.dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL,
dev->db_pool, GFP_KERNEL, &cq->kern_cq.db_record_dma_addr); &cq->kern_cq.dbrec_dma);
if (!cq->kern_cq.db_record) if (!cq->kern_cq.dbrec)
goto err_out; goto err_out;
spin_lock_init(&cq->kern_cq.lock); spin_lock_init(&cq->kern_cq.lock);
...@@ -1690,8 +1690,8 @@ int erdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, ...@@ -1690,8 +1690,8 @@ int erdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
} else { } else {
dma_free_coherent(&dev->pdev->dev, depth << CQE_SHIFT, dma_free_coherent(&dev->pdev->dev, depth << CQE_SHIFT,
cq->kern_cq.qbuf, cq->kern_cq.qbuf_dma_addr); cq->kern_cq.qbuf, cq->kern_cq.qbuf_dma_addr);
dma_pool_free(dev->db_pool, cq->kern_cq.db_record, dma_pool_free(dev->db_pool, cq->kern_cq.dbrec,
cq->kern_cq.db_record_dma_addr); cq->kern_cq.dbrec_dma);
} }
err_out_xa: err_out_xa:
......
...@@ -140,8 +140,8 @@ struct erdma_uqp { ...@@ -140,8 +140,8 @@ struct erdma_uqp {
struct erdma_mem sq_mem; struct erdma_mem sq_mem;
struct erdma_mem rq_mem; struct erdma_mem rq_mem;
dma_addr_t sq_db_info_dma_addr; dma_addr_t sq_dbrec_dma;
dma_addr_t rq_db_info_dma_addr; dma_addr_t rq_dbrec_dma;
struct erdma_user_dbrecords_page *user_dbr_page; struct erdma_user_dbrecords_page *user_dbr_page;
...@@ -167,11 +167,11 @@ struct erdma_kqp { ...@@ -167,11 +167,11 @@ struct erdma_kqp {
void *rq_buf; void *rq_buf;
dma_addr_t rq_buf_dma_addr; dma_addr_t rq_buf_dma_addr;
void *sq_db_info; void *sq_dbrec;
void *rq_db_info; void *rq_dbrec;
dma_addr_t sq_db_info_dma_addr; dma_addr_t sq_dbrec_dma;
dma_addr_t rq_db_info_dma_addr; dma_addr_t rq_dbrec_dma;
u8 sig_all; u8 sig_all;
}; };
...@@ -249,14 +249,14 @@ struct erdma_kcq_info { ...@@ -249,14 +249,14 @@ struct erdma_kcq_info {
spinlock_t lock; spinlock_t lock;
u8 __iomem *db; u8 __iomem *db;
u64 *db_record; u64 *dbrec;
dma_addr_t db_record_dma_addr; dma_addr_t dbrec_dma;
}; };
struct erdma_ucq_info { struct erdma_ucq_info {
struct erdma_mem qbuf_mem; struct erdma_mem qbuf_mem;
struct erdma_user_dbrecords_page *user_dbr_page; struct erdma_user_dbrecords_page *user_dbr_page;
dma_addr_t db_info_dma_addr; dma_addr_t dbrec_dma;
}; };
struct erdma_cq { struct erdma_cq {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment