Commit e363f7de authored by Xi Wang's avatar Xi Wang Committed by Jason Gunthorpe

RDMA/hns: Optimize the wr opcode conversion from ib to hns

Simplify the wr opcode conversion from ib to hns by using a map table
instead of the switch-case statement.

Link: https://lore.kernel.org/r/1583839084-31579-4-git-send-email-liweihang@huawei.comSigned-off-by: default avatarXi Wang <wangxi11@huawei.com>
Signed-off-by: default avatarWeihang Li <liweihang@huawei.com>
Reviewed-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 00a59d30
...@@ -56,6 +56,40 @@ static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg, ...@@ -56,6 +56,40 @@ static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
dseg->len = cpu_to_le32(sg->length); dseg->len = cpu_to_le32(sg->length);
} }
/*
* mapped-value = 1 + real-value
* The hns wr opcode real value is start from 0, In order to distinguish between
* initialized and uninitialized map values, we plus 1 to the actual value when
* defining the mapping, so that the validity can be identified by checking the
* mapped value is greater than 0.
*/
#define HR_OPC_MAP(ib_key, hr_key) \
[IB_WR_ ## ib_key] = 1 + HNS_ROCE_V2_WQE_OP_ ## hr_key
static const u32 hns_roce_op_code[] = {
HR_OPC_MAP(RDMA_WRITE, RDMA_WRITE),
HR_OPC_MAP(RDMA_WRITE_WITH_IMM, RDMA_WRITE_WITH_IMM),
HR_OPC_MAP(SEND, SEND),
HR_OPC_MAP(SEND_WITH_IMM, SEND_WITH_IMM),
HR_OPC_MAP(RDMA_READ, RDMA_READ),
HR_OPC_MAP(ATOMIC_CMP_AND_SWP, ATOM_CMP_AND_SWAP),
HR_OPC_MAP(ATOMIC_FETCH_AND_ADD, ATOM_FETCH_AND_ADD),
HR_OPC_MAP(SEND_WITH_INV, SEND_WITH_INV),
HR_OPC_MAP(LOCAL_INV, LOCAL_INV),
HR_OPC_MAP(MASKED_ATOMIC_CMP_AND_SWP, ATOM_MSK_CMP_AND_SWAP),
HR_OPC_MAP(MASKED_ATOMIC_FETCH_AND_ADD, ATOM_MSK_FETCH_AND_ADD),
HR_OPC_MAP(REG_MR, FAST_REG_PMR),
};
static u32 to_hr_opcode(u32 ib_opcode)
{
if (ib_opcode >= ARRAY_SIZE(hns_roce_op_code))
return HNS_ROCE_V2_WQE_OP_MASK;
return hns_roce_op_code[ib_opcode] ? hns_roce_op_code[ib_opcode] - 1 :
HNS_ROCE_V2_WQE_OP_MASK;
}
static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe, static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
void *wqe, const struct ib_reg_wr *wr) void *wqe, const struct ib_reg_wr *wr)
{ {
...@@ -303,7 +337,6 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, ...@@ -303,7 +337,6 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
void *wqe = NULL; void *wqe = NULL;
bool loopback; bool loopback;
u32 tmp_len; u32 tmp_len;
u32 hr_op;
u8 *smac; u8 *smac;
int nreq; int nreq;
int ret; int ret;
...@@ -517,76 +550,52 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, ...@@ -517,76 +550,52 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
wqe += sizeof(struct hns_roce_v2_rc_send_wqe); wqe += sizeof(struct hns_roce_v2_rc_send_wqe);
switch (wr->opcode) { switch (wr->opcode) {
case IB_WR_RDMA_READ: case IB_WR_RDMA_READ:
hr_op = HNS_ROCE_V2_WQE_OP_RDMA_READ;
rc_sq_wqe->rkey = rc_sq_wqe->rkey =
cpu_to_le32(rdma_wr(wr)->rkey); cpu_to_le32(rdma_wr(wr)->rkey);
rc_sq_wqe->va = rc_sq_wqe->va =
cpu_to_le64(rdma_wr(wr)->remote_addr); cpu_to_le64(rdma_wr(wr)->remote_addr);
break; break;
case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE:
hr_op = HNS_ROCE_V2_WQE_OP_RDMA_WRITE;
rc_sq_wqe->rkey = rc_sq_wqe->rkey =
cpu_to_le32(rdma_wr(wr)->rkey); cpu_to_le32(rdma_wr(wr)->rkey);
rc_sq_wqe->va = rc_sq_wqe->va =
cpu_to_le64(rdma_wr(wr)->remote_addr); cpu_to_le64(rdma_wr(wr)->remote_addr);
break; break;
case IB_WR_RDMA_WRITE_WITH_IMM: case IB_WR_RDMA_WRITE_WITH_IMM:
hr_op = HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM;
rc_sq_wqe->rkey = rc_sq_wqe->rkey =
cpu_to_le32(rdma_wr(wr)->rkey); cpu_to_le32(rdma_wr(wr)->rkey);
rc_sq_wqe->va = rc_sq_wqe->va =
cpu_to_le64(rdma_wr(wr)->remote_addr); cpu_to_le64(rdma_wr(wr)->remote_addr);
break; break;
case IB_WR_SEND:
hr_op = HNS_ROCE_V2_WQE_OP_SEND;
break;
case IB_WR_SEND_WITH_INV:
hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_INV;
break;
case IB_WR_SEND_WITH_IMM:
hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM;
break;
case IB_WR_LOCAL_INV: case IB_WR_LOCAL_INV:
hr_op = HNS_ROCE_V2_WQE_OP_LOCAL_INV;
roce_set_bit(rc_sq_wqe->byte_4, roce_set_bit(rc_sq_wqe->byte_4,
V2_RC_SEND_WQE_BYTE_4_SO_S, 1); V2_RC_SEND_WQE_BYTE_4_SO_S, 1);
rc_sq_wqe->inv_key = rc_sq_wqe->inv_key =
cpu_to_le32(wr->ex.invalidate_rkey); cpu_to_le32(wr->ex.invalidate_rkey);
break; break;
case IB_WR_REG_MR: case IB_WR_REG_MR:
hr_op = HNS_ROCE_V2_WQE_OP_FAST_REG_PMR;
set_frmr_seg(rc_sq_wqe, wqe, reg_wr(wr)); set_frmr_seg(rc_sq_wqe, wqe, reg_wr(wr));
break; break;
case IB_WR_ATOMIC_CMP_AND_SWP: case IB_WR_ATOMIC_CMP_AND_SWP:
hr_op = HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP;
rc_sq_wqe->rkey = rc_sq_wqe->rkey =
cpu_to_le32(atomic_wr(wr)->rkey); cpu_to_le32(atomic_wr(wr)->rkey);
rc_sq_wqe->va = rc_sq_wqe->va =
cpu_to_le64(atomic_wr(wr)->remote_addr); cpu_to_le64(atomic_wr(wr)->remote_addr);
break; break;
case IB_WR_ATOMIC_FETCH_AND_ADD: case IB_WR_ATOMIC_FETCH_AND_ADD:
hr_op = HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD;
rc_sq_wqe->rkey = rc_sq_wqe->rkey =
cpu_to_le32(atomic_wr(wr)->rkey); cpu_to_le32(atomic_wr(wr)->rkey);
rc_sq_wqe->va = rc_sq_wqe->va =
cpu_to_le64(atomic_wr(wr)->remote_addr); cpu_to_le64(atomic_wr(wr)->remote_addr);
break; break;
case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
hr_op =
HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP;
break;
case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
hr_op =
HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD;
break;
default: default:
hr_op = HNS_ROCE_V2_WQE_OP_MASK;
break; break;
} }
roce_set_field(rc_sq_wqe->byte_4, roce_set_field(rc_sq_wqe->byte_4,
V2_RC_SEND_WQE_BYTE_4_OPCODE_M, V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
V2_RC_SEND_WQE_BYTE_4_OPCODE_S, hr_op); V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
to_hr_opcode(wr->opcode));
if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment