Commit 400ddc11 authored by Roland Dreier's avatar Roland Dreier

IB/mthca: Factor out setting WQE remote address and atomic segment entries

    
Factor code to set remote address and atomic segment entries out of the
work request posting functions into inline functions set_raddr_seg()
and set_atomic_seg().  This doesn't change the generated code in any
significant way, and makes the source easier on the eyes.
Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
parent 0fbfa6a9
......@@ -1578,6 +1578,27 @@ static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq,
return cur + nreq >= wq->max;
}
static __always_inline void set_raddr_seg(struct mthca_raddr_seg *rseg,
u64 remote_addr, u32 rkey)
{
rseg->raddr = cpu_to_be64(remote_addr);
rseg->rkey = cpu_to_be32(rkey);
rseg->reserved = 0;
}
static __always_inline void set_atomic_seg(struct mthca_atomic_seg *aseg,
struct ib_send_wr *wr)
{
if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
} else {
aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
aseg->compare = 0;
}
}
int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr)
{
......@@ -1642,25 +1663,11 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
switch (wr->opcode) {
case IB_WR_ATOMIC_CMP_AND_SWP:
case IB_WR_ATOMIC_FETCH_AND_ADD:
((struct mthca_raddr_seg *) wqe)->raddr =
cpu_to_be64(wr->wr.atomic.remote_addr);
((struct mthca_raddr_seg *) wqe)->rkey =
cpu_to_be32(wr->wr.atomic.rkey);
((struct mthca_raddr_seg *) wqe)->reserved = 0;
set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
wr->wr.atomic.rkey);
wqe += sizeof (struct mthca_raddr_seg);
if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
((struct mthca_atomic_seg *) wqe)->swap_add =
cpu_to_be64(wr->wr.atomic.swap);
((struct mthca_atomic_seg *) wqe)->compare =
cpu_to_be64(wr->wr.atomic.compare_add);
} else {
((struct mthca_atomic_seg *) wqe)->swap_add =
cpu_to_be64(wr->wr.atomic.compare_add);
((struct mthca_atomic_seg *) wqe)->compare = 0;
}
set_atomic_seg(wqe, wr);
wqe += sizeof (struct mthca_atomic_seg);
size += (sizeof (struct mthca_raddr_seg) +
sizeof (struct mthca_atomic_seg)) / 16;
......@@ -1669,11 +1676,8 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
case IB_WR_RDMA_READ:
((struct mthca_raddr_seg *) wqe)->raddr =
cpu_to_be64(wr->wr.rdma.remote_addr);
((struct mthca_raddr_seg *) wqe)->rkey =
cpu_to_be32(wr->wr.rdma.rkey);
((struct mthca_raddr_seg *) wqe)->reserved = 0;
set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
wr->wr.rdma.rkey);
wqe += sizeof (struct mthca_raddr_seg);
size += sizeof (struct mthca_raddr_seg) / 16;
break;
......@@ -1689,11 +1693,8 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
switch (wr->opcode) {
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
((struct mthca_raddr_seg *) wqe)->raddr =
cpu_to_be64(wr->wr.rdma.remote_addr);
((struct mthca_raddr_seg *) wqe)->rkey =
cpu_to_be32(wr->wr.rdma.rkey);
((struct mthca_raddr_seg *) wqe)->reserved = 0;
set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
wr->wr.rdma.rkey);
wqe += sizeof (struct mthca_raddr_seg);
size += sizeof (struct mthca_raddr_seg) / 16;
break;
......@@ -2019,25 +2020,11 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
switch (wr->opcode) {
case IB_WR_ATOMIC_CMP_AND_SWP:
case IB_WR_ATOMIC_FETCH_AND_ADD:
((struct mthca_raddr_seg *) wqe)->raddr =
cpu_to_be64(wr->wr.atomic.remote_addr);
((struct mthca_raddr_seg *) wqe)->rkey =
cpu_to_be32(wr->wr.atomic.rkey);
((struct mthca_raddr_seg *) wqe)->reserved = 0;
set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
wr->wr.atomic.rkey);
wqe += sizeof (struct mthca_raddr_seg);
if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
((struct mthca_atomic_seg *) wqe)->swap_add =
cpu_to_be64(wr->wr.atomic.swap);
((struct mthca_atomic_seg *) wqe)->compare =
cpu_to_be64(wr->wr.atomic.compare_add);
} else {
((struct mthca_atomic_seg *) wqe)->swap_add =
cpu_to_be64(wr->wr.atomic.compare_add);
((struct mthca_atomic_seg *) wqe)->compare = 0;
}
set_atomic_seg(wqe, wr);
wqe += sizeof (struct mthca_atomic_seg);
size += (sizeof (struct mthca_raddr_seg) +
sizeof (struct mthca_atomic_seg)) / 16;
......@@ -2046,11 +2033,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
case IB_WR_RDMA_READ:
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
((struct mthca_raddr_seg *) wqe)->raddr =
cpu_to_be64(wr->wr.rdma.remote_addr);
((struct mthca_raddr_seg *) wqe)->rkey =
cpu_to_be32(wr->wr.rdma.rkey);
((struct mthca_raddr_seg *) wqe)->reserved = 0;
set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
wr->wr.rdma.rkey);
wqe += sizeof (struct mthca_raddr_seg);
size += sizeof (struct mthca_raddr_seg) / 16;
break;
......@@ -2066,11 +2050,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
switch (wr->opcode) {
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
((struct mthca_raddr_seg *) wqe)->raddr =
cpu_to_be64(wr->wr.rdma.remote_addr);
((struct mthca_raddr_seg *) wqe)->rkey =
cpu_to_be32(wr->wr.rdma.rkey);
((struct mthca_raddr_seg *) wqe)->reserved = 0;
set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
wr->wr.rdma.rkey);
wqe += sizeof (struct mthca_raddr_seg);
size += sizeof (struct mthca_raddr_seg) / 16;
break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment