Commit 0ca9c2e2 authored by Cheng Xu's avatar Cheng Xu Committed by Leon Romanovsky

RDMA/erdma: Implement atomic operations support

Add atomic operations support in post_send and poll_cq implementation.
Also, rename 'laddr' and 'lkey' in struct erdma_sge to 'addr' and 'key',
because this structure is used for both local and remote SGEs.
Signed-off-by: default avatarCheng Xu <chengyou@linux.alibaba.com>
Link: https://lore.kernel.org/r/20221107021845.44598-4-chengyou@linux.alibaba.comSigned-off-by: default avatarLeon Romanovsky <leon@kernel.org>
parent 71c6925f
......@@ -64,6 +64,8 @@ static const enum ib_wc_opcode wc_mapping_table[ERDMA_NUM_OPCODES] = {
[ERDMA_OP_REG_MR] = IB_WC_REG_MR,
[ERDMA_OP_LOCAL_INV] = IB_WC_LOCAL_INV,
[ERDMA_OP_READ_WITH_INV] = IB_WC_RDMA_READ,
[ERDMA_OP_ATOMIC_CAS] = IB_WC_COMP_SWAP,
[ERDMA_OP_ATOMIC_FAD] = IB_WC_FETCH_ADD,
};
static const struct {
......
......@@ -344,9 +344,9 @@ struct erdma_cqe {
};
struct erdma_sge {
__aligned_le64 laddr;
__aligned_le64 addr;
__le32 length;
__le32 lkey;
__le32 key;
};
/* Receive Queue Element */
......@@ -413,6 +413,16 @@ struct erdma_readreq_sqe {
__le32 rsvd;
};
struct erdma_atomic_sqe {
__le64 hdr;
__le64 rsvd;
__le64 fetchadd_swap_data;
__le64 cmp_data;
struct erdma_sge remote;
struct erdma_sge sgl;
};
struct erdma_reg_mr_sqe {
__le64 hdr;
__le64 addr;
......@@ -472,7 +482,9 @@ enum erdma_opcode {
ERDMA_OP_REG_MR = 14,
ERDMA_OP_LOCAL_INV = 15,
ERDMA_OP_READ_WITH_INV = 16,
ERDMA_NUM_OPCODES = 17,
ERDMA_OP_ATOMIC_CAS = 17,
ERDMA_OP_ATOMIC_FAD = 18,
ERDMA_NUM_OPCODES = 19,
ERDMA_OP_INVALID = ERDMA_NUM_OPCODES + 1
};
......
......@@ -285,15 +285,16 @@ static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
u32 wqe_size, wqebb_cnt, hw_op, flags, sgl_offset;
u32 idx = *pi & (qp->attrs.sq_size - 1);
enum ib_wr_opcode op = send_wr->opcode;
struct erdma_atomic_sqe *atomic_sqe;
struct erdma_readreq_sqe *read_sqe;
struct erdma_reg_mr_sqe *regmr_sge;
struct erdma_write_sqe *write_sqe;
struct erdma_send_sqe *send_sqe;
struct ib_rdma_wr *rdma_wr;
struct erdma_mr *mr;
struct erdma_sge *sge;
__le32 *length_field;
struct erdma_mr *mr;
u64 wqe_hdr, *entry;
struct ib_sge *sge;
u32 attrs;
int ret;
......@@ -360,9 +361,9 @@ static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
sge = get_queue_entry(qp->kern_qp.sq_buf, idx + 1,
qp->attrs.sq_size, SQEBB_SHIFT);
sge->addr = rdma_wr->remote_addr;
sge->lkey = rdma_wr->rkey;
sge->length = send_wr->sg_list[0].length;
sge->addr = cpu_to_le64(rdma_wr->remote_addr);
sge->key = cpu_to_le32(rdma_wr->rkey);
sge->length = cpu_to_le32(send_wr->sg_list[0].length);
wqe_size = sizeof(struct erdma_readreq_sqe) +
send_wr->num_sge * sizeof(struct ib_sge);
......@@ -423,6 +424,35 @@ static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
regmr_sge->stag = cpu_to_le32(send_wr->ex.invalidate_rkey);
wqe_size = sizeof(struct erdma_reg_mr_sqe);
goto out;
case IB_WR_ATOMIC_CMP_AND_SWP:
case IB_WR_ATOMIC_FETCH_AND_ADD:
atomic_sqe = (struct erdma_atomic_sqe *)entry;
if (op == IB_WR_ATOMIC_CMP_AND_SWP) {
wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK,
ERDMA_OP_ATOMIC_CAS);
atomic_sqe->fetchadd_swap_data =
cpu_to_le64(atomic_wr(send_wr)->swap);
atomic_sqe->cmp_data =
cpu_to_le64(atomic_wr(send_wr)->compare_add);
} else {
wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK,
ERDMA_OP_ATOMIC_FAD);
atomic_sqe->fetchadd_swap_data =
cpu_to_le64(atomic_wr(send_wr)->compare_add);
}
sge = get_queue_entry(qp->kern_qp.sq_buf, idx + 1,
qp->attrs.sq_size, SQEBB_SHIFT);
sge->addr = cpu_to_le64(atomic_wr(send_wr)->remote_addr);
sge->key = cpu_to_le32(atomic_wr(send_wr)->rkey);
sge++;
sge->addr = cpu_to_le64(send_wr->sg_list[0].addr);
sge->key = cpu_to_le32(send_wr->sg_list[0].lkey);
sge->length = cpu_to_le32(send_wr->sg_list[0].length);
wqe_size = sizeof(*atomic_sqe);
goto out;
default:
return -EOPNOTSUPP;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment