Commit f8b3aaf2 authored by Andy Grover's avatar Andy Grover

RDS: Remove struct rds_rdma_op

A big changeset, but it's all pretty dumb.

struct rds_rdma_op was already embedded in struct rm_rdma_op.
Remove rds_rdma_op and put its members in rm_rdma_op. Rename
members with "op_" prefix instead of "r_", for consistency.

Of course this breaks a lot, so fixup the code accordingly.
Signed-off-by: default avatarAndy Grover <andy.grover@oracle.com>
parent d0ab25a8
...@@ -54,7 +54,7 @@ struct rds_ib_connect_private { ...@@ -54,7 +54,7 @@ struct rds_ib_connect_private {
struct rds_ib_send_work { struct rds_ib_send_work {
struct rds_message *s_rm; struct rds_message *s_rm;
struct rds_rdma_op *s_op; struct rm_rdma_op *s_op;
struct ib_send_wr s_wr; struct ib_send_wr s_wr;
struct ib_sge s_sge[RDS_IB_MAX_SGE]; struct ib_sge s_sge[RDS_IB_MAX_SGE];
unsigned long s_queued; unsigned long s_queued;
...@@ -331,7 +331,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, ...@@ -331,7 +331,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context); void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context);
void rds_ib_send_init_ring(struct rds_ib_connection *ic); void rds_ib_send_init_ring(struct rds_ib_connection *ic);
void rds_ib_send_clear_ring(struct rds_ib_connection *ic); void rds_ib_send_clear_ring(struct rds_ib_connection *ic);
int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op); int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op);
void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits); void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits);
void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted); void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted);
int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted, int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted,
......
...@@ -79,14 +79,14 @@ static void rds_ib_send_unmap_rm(struct rds_ib_connection *ic, ...@@ -79,14 +79,14 @@ static void rds_ib_send_unmap_rm(struct rds_ib_connection *ic,
rm->data.m_sg, rm->data.m_nents, rm->data.m_sg, rm->data.m_nents,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (rm->rdma.m_rdma_op.r_active) { if (rm->rdma.op_active) {
struct rds_rdma_op *op = &rm->rdma.m_rdma_op; struct rm_rdma_op *op = &rm->rdma;
if (op->r_mapped) { if (op->op_mapped) {
ib_dma_unmap_sg(ic->i_cm_id->device, ib_dma_unmap_sg(ic->i_cm_id->device,
op->r_sg, op->r_nents, op->op_sg, op->op_nents,
op->r_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); op->op_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
op->r_mapped = 0; op->op_mapped = 0;
} }
/* If the user asked for a completion notification on this /* If the user asked for a completion notification on this
...@@ -111,10 +111,10 @@ static void rds_ib_send_unmap_rm(struct rds_ib_connection *ic, ...@@ -111,10 +111,10 @@ static void rds_ib_send_unmap_rm(struct rds_ib_connection *ic,
*/ */
rds_ib_send_complete(rm, wc_status, rds_rdma_send_complete); rds_ib_send_complete(rm, wc_status, rds_rdma_send_complete);
if (rm->rdma.m_rdma_op.r_write) if (rm->rdma.op_write)
rds_stats_add(s_send_rdma_bytes, rm->rdma.m_rdma_op.r_bytes); rds_stats_add(s_send_rdma_bytes, rm->rdma.op_bytes);
else else
rds_stats_add(s_recv_rdma_bytes, rm->rdma.m_rdma_op.r_bytes); rds_stats_add(s_recv_rdma_bytes, rm->rdma.op_bytes);
} }
if (rm->atomic.op_active) { if (rm->atomic.op_active) {
...@@ -540,10 +540,10 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, ...@@ -540,10 +540,10 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
/* If it has a RDMA op, tell the peer we did it. This is /* If it has a RDMA op, tell the peer we did it. This is
* used by the peer to release use-once RDMA MRs. */ * used by the peer to release use-once RDMA MRs. */
if (rm->rdma.m_rdma_op.r_active) { if (rm->rdma.op_active) {
struct rds_ext_header_rdma ext_hdr; struct rds_ext_header_rdma ext_hdr;
ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.m_rdma_op.r_key); ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey);
rds_message_add_extension(&rm->m_inc.i_hdr, rds_message_add_extension(&rm->m_inc.i_hdr,
RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr)); RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
} }
...@@ -576,7 +576,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, ...@@ -576,7 +576,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
* or when requested by the user. Right now, we let * or when requested by the user. Right now, we let
* the application choose. * the application choose.
*/ */
if (rm->rdma.m_rdma_op.r_active && rm->rdma.m_rdma_op.r_fence) if (rm->rdma.op_active && rm->rdma.op_fence)
send_flags = IB_SEND_FENCE; send_flags = IB_SEND_FENCE;
/* Each frag gets a header. Msgs may be 0 bytes */ /* Each frag gets a header. Msgs may be 0 bytes */
...@@ -746,7 +746,7 @@ int rds_ib_xmit_atomic(struct rds_connection *conn, struct rds_message *rm) ...@@ -746,7 +746,7 @@ int rds_ib_xmit_atomic(struct rds_connection *conn, struct rds_message *rm)
* we must fill in s_rm ourselves, so we properly clean up * we must fill in s_rm ourselves, so we properly clean up
* on completion. * on completion.
*/ */
if (!rm->rdma.m_rdma_op.r_active && !rm->data.op_active) if (!rm->rdma.op_active && !rm->data.op_active)
send->s_rm = rm; send->s_rm = rm;
/* map 8 byte retval buffer to the device */ /* map 8 byte retval buffer to the device */
...@@ -788,7 +788,7 @@ int rds_ib_xmit_atomic(struct rds_connection *conn, struct rds_message *rm) ...@@ -788,7 +788,7 @@ int rds_ib_xmit_atomic(struct rds_connection *conn, struct rds_message *rm)
return ret; return ret;
} }
int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op) int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
{ {
struct rds_ib_connection *ic = conn->c_transport_data; struct rds_ib_connection *ic = conn->c_transport_data;
struct rds_ib_send_work *send = NULL; struct rds_ib_send_work *send = NULL;
...@@ -798,7 +798,7 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op) ...@@ -798,7 +798,7 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
struct rds_ib_device *rds_ibdev; struct rds_ib_device *rds_ibdev;
struct scatterlist *scat; struct scatterlist *scat;
unsigned long len; unsigned long len;
u64 remote_addr = op->r_remote_addr; u64 remote_addr = op->op_remote_addr;
u32 pos; u32 pos;
u32 work_alloc; u32 work_alloc;
u32 i; u32 i;
...@@ -810,25 +810,25 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op) ...@@ -810,25 +810,25 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client); rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client);
/* map the message the first time we see it */ /* map the message the first time we see it */
if (!op->r_mapped) { if (!op->op_mapped) {
op->r_count = ib_dma_map_sg(ic->i_cm_id->device, op->op_count = ib_dma_map_sg(ic->i_cm_id->device,
op->r_sg, op->r_nents, (op->r_write) ? op->op_sg, op->op_nents, (op->op_write) ?
DMA_TO_DEVICE : DMA_FROM_DEVICE); DMA_TO_DEVICE : DMA_FROM_DEVICE);
rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->r_count); rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->op_count);
if (op->r_count == 0) { if (op->op_count == 0) {
rds_ib_stats_inc(s_ib_tx_sg_mapping_failure); rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
ret = -ENOMEM; /* XXX ? */ ret = -ENOMEM; /* XXX ? */
goto out; goto out;
} }
op->r_mapped = 1; op->op_mapped = 1;
} }
/* /*
* Instead of knowing how to return a partial rdma read/write we insist that there * Instead of knowing how to return a partial rdma read/write we insist that there
* be enough work requests to send the entire message. * be enough work requests to send the entire message.
*/ */
i = ceil(op->r_count, rds_ibdev->max_sge); i = ceil(op->op_count, rds_ibdev->max_sge);
work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos); work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
if (work_alloc != i) { if (work_alloc != i) {
...@@ -841,19 +841,19 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op) ...@@ -841,19 +841,19 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
send = &ic->i_sends[pos]; send = &ic->i_sends[pos];
first = send; first = send;
prev = NULL; prev = NULL;
scat = &op->r_sg[0]; scat = &op->op_sg[0];
sent = 0; sent = 0;
num_sge = op->r_count; num_sge = op->op_count;
for (i = 0; i < work_alloc && scat != &op->r_sg[op->r_count]; i++) { for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) {
send->s_wr.send_flags = 0; send->s_wr.send_flags = 0;
send->s_queued = jiffies; send->s_queued = jiffies;
rds_ib_set_wr_signal_state(ic, send, op->r_notify); rds_ib_set_wr_signal_state(ic, send, op->op_notify);
send->s_wr.opcode = op->r_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ; send->s_wr.opcode = op->op_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ;
send->s_wr.wr.rdma.remote_addr = remote_addr; send->s_wr.wr.rdma.remote_addr = remote_addr;
send->s_wr.wr.rdma.rkey = op->r_key; send->s_wr.wr.rdma.rkey = op->op_rkey;
send->s_op = op; send->s_op = op;
if (num_sge > rds_ibdev->max_sge) { if (num_sge > rds_ibdev->max_sge) {
...@@ -868,7 +868,7 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op) ...@@ -868,7 +868,7 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
if (prev) if (prev)
prev->s_wr.next = &send->s_wr; prev->s_wr.next = &send->s_wr;
for (j = 0; j < send->s_wr.num_sge && scat != &op->r_sg[op->r_count]; j++) { for (j = 0; j < send->s_wr.num_sge && scat != &op->op_sg[op->op_count]; j++) {
len = ib_sg_dma_len(ic->i_cm_id->device, scat); len = ib_sg_dma_len(ic->i_cm_id->device, scat);
send->s_sge[j].addr = send->s_sge[j].addr =
ib_sg_dma_address(ic->i_cm_id->device, scat); ib_sg_dma_address(ic->i_cm_id->device, scat);
......
...@@ -70,7 +70,7 @@ struct rds_iw_send_work { ...@@ -70,7 +70,7 @@ struct rds_iw_send_work {
struct rds_message *s_rm; struct rds_message *s_rm;
/* We should really put these into a union: */ /* We should really put these into a union: */
struct rds_rdma_op *s_op; struct rm_rdma_op *s_op;
struct rds_iw_mapping *s_mapping; struct rds_iw_mapping *s_mapping;
struct ib_mr *s_mr; struct ib_mr *s_mr;
struct ib_fast_reg_page_list *s_page_list; struct ib_fast_reg_page_list *s_page_list;
...@@ -357,7 +357,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm, ...@@ -357,7 +357,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context); void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context);
void rds_iw_send_init_ring(struct rds_iw_connection *ic); void rds_iw_send_init_ring(struct rds_iw_connection *ic);
void rds_iw_send_clear_ring(struct rds_iw_connection *ic); void rds_iw_send_clear_ring(struct rds_iw_connection *ic);
int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op); int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op);
void rds_iw_send_add_credits(struct rds_connection *conn, unsigned int credits); void rds_iw_send_add_credits(struct rds_connection *conn, unsigned int credits);
void rds_iw_advertise_credits(struct rds_connection *conn, unsigned int posted); void rds_iw_advertise_credits(struct rds_connection *conn, unsigned int posted);
int rds_iw_send_grab_credits(struct rds_iw_connection *ic, u32 wanted, int rds_iw_send_grab_credits(struct rds_iw_connection *ic, u32 wanted,
......
...@@ -63,13 +63,13 @@ static void rds_iw_send_rdma_complete(struct rds_message *rm, ...@@ -63,13 +63,13 @@ static void rds_iw_send_rdma_complete(struct rds_message *rm,
} }
static void rds_iw_send_unmap_rdma(struct rds_iw_connection *ic, static void rds_iw_send_unmap_rdma(struct rds_iw_connection *ic,
struct rds_rdma_op *op) struct rm_rdma_op *op)
{ {
if (op->r_mapped) { if (op->op_mapped) {
ib_dma_unmap_sg(ic->i_cm_id->device, ib_dma_unmap_sg(ic->i_cm_id->device,
op->r_sg, op->r_nents, op->op_sg, op->op_nents,
op->r_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); op->op_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
op->r_mapped = 0; op->op_mapped = 0;
} }
} }
...@@ -85,8 +85,8 @@ static void rds_iw_send_unmap_rm(struct rds_iw_connection *ic, ...@@ -85,8 +85,8 @@ static void rds_iw_send_unmap_rm(struct rds_iw_connection *ic,
rm->data.m_sg, rm->data.m_nents, rm->data.m_sg, rm->data.m_nents,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (rm->rdma.m_rdma_op.r_active) { if (rm->rdma.op_active) {
rds_iw_send_unmap_rdma(ic, &rm->rdma.m_rdma_op); rds_iw_send_unmap_rdma(ic, &rm->rdma);
/* If the user asked for a completion notification on this /* If the user asked for a completion notification on this
* message, we can implement three different semantics: * message, we can implement three different semantics:
...@@ -110,10 +110,10 @@ static void rds_iw_send_unmap_rm(struct rds_iw_connection *ic, ...@@ -110,10 +110,10 @@ static void rds_iw_send_unmap_rm(struct rds_iw_connection *ic,
*/ */
rds_iw_send_rdma_complete(rm, wc_status); rds_iw_send_rdma_complete(rm, wc_status);
if (rm->rdma.m_rdma_op.r_write) if (rm->rdma.op_write)
rds_stats_add(s_send_rdma_bytes, rm->rdma.m_rdma_op.r_bytes); rds_stats_add(s_send_rdma_bytes, rm->rdma.op_bytes);
else else
rds_stats_add(s_recv_rdma_bytes, rm->rdma.m_rdma_op.r_bytes); rds_stats_add(s_recv_rdma_bytes, rm->rdma.op_bytes);
} }
/* If anyone waited for this message to get flushed out, wake /* If anyone waited for this message to get flushed out, wake
...@@ -591,10 +591,10 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm, ...@@ -591,10 +591,10 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
/* If it has a RDMA op, tell the peer we did it. This is /* If it has a RDMA op, tell the peer we did it. This is
* used by the peer to release use-once RDMA MRs. */ * used by the peer to release use-once RDMA MRs. */
if (rm->rdma.m_rdma_op.r_active) { if (rm->rdma.op_active) {
struct rds_ext_header_rdma ext_hdr; struct rds_ext_header_rdma ext_hdr;
ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.m_rdma_op.r_key); ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey);
rds_message_add_extension(&rm->m_inc.i_hdr, rds_message_add_extension(&rm->m_inc.i_hdr,
RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr)); RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
} }
...@@ -632,7 +632,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm, ...@@ -632,7 +632,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
* or when requested by the user. Right now, we let * or when requested by the user. Right now, we let
* the application choose. * the application choose.
*/ */
if (rm->rdma.m_rdma_op.r_active && rm->rdma.m_rdma_op.r_fence) if (rm->rdma.op_active && rm->rdma.op_fence)
send_flags = IB_SEND_FENCE; send_flags = IB_SEND_FENCE;
/* /*
...@@ -785,7 +785,7 @@ static void rds_iw_build_send_fastreg(struct rds_iw_device *rds_iwdev, struct rd ...@@ -785,7 +785,7 @@ static void rds_iw_build_send_fastreg(struct rds_iw_device *rds_iwdev, struct rd
ib_update_fast_reg_key(send->s_mr, send->s_remap_count++); ib_update_fast_reg_key(send->s_mr, send->s_remap_count++);
} }
int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op) int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
{ {
struct rds_iw_connection *ic = conn->c_transport_data; struct rds_iw_connection *ic = conn->c_transport_data;
struct rds_iw_send_work *send = NULL; struct rds_iw_send_work *send = NULL;
...@@ -795,7 +795,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op) ...@@ -795,7 +795,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
struct rds_iw_device *rds_iwdev; struct rds_iw_device *rds_iwdev;
struct scatterlist *scat; struct scatterlist *scat;
unsigned long len; unsigned long len;
u64 remote_addr = op->r_remote_addr; u64 remote_addr = op->op_remote_addr;
u32 pos, fr_pos; u32 pos, fr_pos;
u32 work_alloc; u32 work_alloc;
u32 i; u32 i;
...@@ -807,21 +807,21 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op) ...@@ -807,21 +807,21 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client); rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client);
/* map the message the first time we see it */ /* map the message the first time we see it */
if (!op->r_mapped) { if (!op->op_mapped) {
op->r_count = ib_dma_map_sg(ic->i_cm_id->device, op->op_count = ib_dma_map_sg(ic->i_cm_id->device,
op->r_sg, op->r_nents, (op->r_write) ? op->op_sg, op->op_nents, (op->op_write) ?
DMA_TO_DEVICE : DMA_FROM_DEVICE); DMA_TO_DEVICE : DMA_FROM_DEVICE);
rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->r_count); rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->op_count);
if (op->r_count == 0) { if (op->op_count == 0) {
rds_iw_stats_inc(s_iw_tx_sg_mapping_failure); rds_iw_stats_inc(s_iw_tx_sg_mapping_failure);
ret = -ENOMEM; /* XXX ? */ ret = -ENOMEM; /* XXX ? */
goto out; goto out;
} }
op->r_mapped = 1; op->op_mapped = 1;
} }
if (!op->r_write) { if (!op->op_write) {
/* Alloc space on the send queue for the fastreg */ /* Alloc space on the send queue for the fastreg */
work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, 1, &fr_pos); work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, 1, &fr_pos);
if (work_alloc != 1) { if (work_alloc != 1) {
...@@ -836,7 +836,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op) ...@@ -836,7 +836,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
* Instead of knowing how to return a partial rdma read/write we insist that there * Instead of knowing how to return a partial rdma read/write we insist that there
* be enough work requests to send the entire message. * be enough work requests to send the entire message.
*/ */
i = ceil(op->r_count, rds_iwdev->max_sge); i = ceil(op->op_count, rds_iwdev->max_sge);
work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, i, &pos); work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, i, &pos);
if (work_alloc != i) { if (work_alloc != i) {
...@@ -847,17 +847,17 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op) ...@@ -847,17 +847,17 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
} }
send = &ic->i_sends[pos]; send = &ic->i_sends[pos];
if (!op->r_write) { if (!op->op_write) {
first = prev = &ic->i_sends[fr_pos]; first = prev = &ic->i_sends[fr_pos];
} else { } else {
first = send; first = send;
prev = NULL; prev = NULL;
} }
scat = &op->r_sg[0]; scat = &op->op_sg[0];
sent = 0; sent = 0;
num_sge = op->r_count; num_sge = op->op_count;
for (i = 0; i < work_alloc && scat != &op->r_sg[op->r_count]; i++) { for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) {
send->s_wr.send_flags = 0; send->s_wr.send_flags = 0;
send->s_queued = jiffies; send->s_queued = jiffies;
...@@ -874,13 +874,13 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op) ...@@ -874,13 +874,13 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
* for local access after RDS is finished with it, using * for local access after RDS is finished with it, using
* IB_WR_RDMA_READ_WITH_INV will invalidate it after the read has completed. * IB_WR_RDMA_READ_WITH_INV will invalidate it after the read has completed.
*/ */
if (op->r_write) if (op->op_write)
send->s_wr.opcode = IB_WR_RDMA_WRITE; send->s_wr.opcode = IB_WR_RDMA_WRITE;
else else
send->s_wr.opcode = IB_WR_RDMA_READ_WITH_INV; send->s_wr.opcode = IB_WR_RDMA_READ_WITH_INV;
send->s_wr.wr.rdma.remote_addr = remote_addr; send->s_wr.wr.rdma.remote_addr = remote_addr;
send->s_wr.wr.rdma.rkey = op->r_key; send->s_wr.wr.rdma.rkey = op->op_rkey;
send->s_op = op; send->s_op = op;
if (num_sge > rds_iwdev->max_sge) { if (num_sge > rds_iwdev->max_sge) {
...@@ -894,7 +894,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op) ...@@ -894,7 +894,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
if (prev) if (prev)
prev->s_wr.next = &send->s_wr; prev->s_wr.next = &send->s_wr;
for (j = 0; j < send->s_wr.num_sge && scat != &op->r_sg[op->r_count]; j++) { for (j = 0; j < send->s_wr.num_sge && scat != &op->op_sg[op->op_count]; j++) {
len = ib_sg_dma_len(ic->i_cm_id->device, scat); len = ib_sg_dma_len(ic->i_cm_id->device, scat);
if (send->s_wr.opcode == IB_WR_RDMA_READ_WITH_INV) if (send->s_wr.opcode == IB_WR_RDMA_READ_WITH_INV)
...@@ -928,7 +928,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op) ...@@ -928,7 +928,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
} }
/* if we finished the message then send completion owns it */ /* if we finished the message then send completion owns it */
if (scat == &op->r_sg[op->r_count]) if (scat == &op->op_sg[op->op_count])
first->s_wr.send_flags = IB_SEND_SIGNALED; first->s_wr.send_flags = IB_SEND_SIGNALED;
if (i < work_alloc) { if (i < work_alloc) {
...@@ -942,9 +942,9 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op) ...@@ -942,9 +942,9 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
* adapters do not allow using the lkey for this at all. To bypass this use a * adapters do not allow using the lkey for this at all. To bypass this use a
* fastreg_mr (or possibly a dma_mr) * fastreg_mr (or possibly a dma_mr)
*/ */
if (!op->r_write) { if (!op->op_write) {
rds_iw_build_send_fastreg(rds_iwdev, ic, &ic->i_sends[fr_pos], rds_iw_build_send_fastreg(rds_iwdev, ic, &ic->i_sends[fr_pos],
op->r_count, sent, conn->c_xmit_rm->m_rs->rs_user_addr); op->op_count, sent, conn->c_xmit_rm->m_rs->rs_user_addr);
work_alloc++; work_alloc++;
} }
......
...@@ -69,10 +69,10 @@ static void rds_message_purge(struct rds_message *rm) ...@@ -69,10 +69,10 @@ static void rds_message_purge(struct rds_message *rm)
} }
rm->data.m_nents = 0; rm->data.m_nents = 0;
if (rm->rdma.m_rdma_op.r_active) if (rm->rdma.op_active)
rds_rdma_free_op(&rm->rdma.m_rdma_op); rds_rdma_free_op(&rm->rdma);
if (rm->rdma.m_rdma_mr) if (rm->rdma.op_rdma_mr)
rds_mr_put(rm->rdma.m_rdma_mr); rds_mr_put(rm->rdma.op_rdma_mr);
if (rm->atomic.op_active) if (rm->atomic.op_active)
rds_atomic_free_op(&rm->atomic); rds_atomic_free_op(&rm->atomic);
......
...@@ -440,26 +440,26 @@ void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force) ...@@ -440,26 +440,26 @@ void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force)
rds_mr_put(mr); rds_mr_put(mr);
} }
void rds_rdma_free_op(struct rds_rdma_op *ro) void rds_rdma_free_op(struct rm_rdma_op *ro)
{ {
unsigned int i; unsigned int i;
for (i = 0; i < ro->r_nents; i++) { for (i = 0; i < ro->op_nents; i++) {
struct page *page = sg_page(&ro->r_sg[i]); struct page *page = sg_page(&ro->op_sg[i]);
/* Mark page dirty if it was possibly modified, which /* Mark page dirty if it was possibly modified, which
* is the case for a RDMA_READ which copies from remote * is the case for a RDMA_READ which copies from remote
* to local memory */ * to local memory */
if (!ro->r_write) { if (!ro->op_write) {
BUG_ON(irqs_disabled()); BUG_ON(irqs_disabled());
set_page_dirty(page); set_page_dirty(page);
} }
put_page(page); put_page(page);
} }
kfree(ro->r_notifier); kfree(ro->op_notifier);
ro->r_notifier = NULL; ro->op_notifier = NULL;
ro->r_active = 0; ro->op_active = 0;
} }
void rds_atomic_free_op(struct rm_atomic_op *ao) void rds_atomic_free_op(struct rm_atomic_op *ao)
...@@ -521,7 +521,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, ...@@ -521,7 +521,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
{ {
struct rds_rdma_args *args; struct rds_rdma_args *args;
struct rds_iovec vec; struct rds_iovec vec;
struct rds_rdma_op *op = &rm->rdma.m_rdma_op; struct rm_rdma_op *op = &rm->rdma;
unsigned int nr_pages; unsigned int nr_pages;
unsigned int nr_bytes; unsigned int nr_bytes;
struct page **pages = NULL; struct page **pages = NULL;
...@@ -531,7 +531,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, ...@@ -531,7 +531,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
int ret = 0; int ret = 0;
if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args)) if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args))
|| rm->rdma.m_rdma_op.r_active) || rm->rdma.op_active)
return -EINVAL; return -EINVAL;
args = CMSG_DATA(cmsg); args = CMSG_DATA(cmsg);
...@@ -556,27 +556,27 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, ...@@ -556,27 +556,27 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
goto out; goto out;
} }
op->r_write = !!(args->flags & RDS_RDMA_READWRITE); op->op_write = !!(args->flags & RDS_RDMA_READWRITE);
op->r_fence = !!(args->flags & RDS_RDMA_FENCE); op->op_fence = !!(args->flags & RDS_RDMA_FENCE);
op->r_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME); op->op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
op->r_active = 1; op->op_active = 1;
op->r_recverr = rs->rs_recverr; op->op_recverr = rs->rs_recverr;
WARN_ON(!nr_pages); WARN_ON(!nr_pages);
op->r_sg = rds_message_alloc_sgs(rm, nr_pages); op->op_sg = rds_message_alloc_sgs(rm, nr_pages);
if (op->r_notify || op->r_recverr) { if (op->op_notify || op->op_recverr) {
/* We allocate an uninitialized notifier here, because /* We allocate an uninitialized notifier here, because
* we don't want to do that in the completion handler. We * we don't want to do that in the completion handler. We
* would have to use GFP_ATOMIC there, and don't want to deal * would have to use GFP_ATOMIC there, and don't want to deal
* with failed allocations. * with failed allocations.
*/ */
op->r_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL); op->op_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL);
if (!op->r_notifier) { if (!op->op_notifier) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
op->r_notifier->n_user_token = args->user_token; op->op_notifier->n_user_token = args->user_token;
op->r_notifier->n_status = RDS_RDMA_SUCCESS; op->op_notifier->n_status = RDS_RDMA_SUCCESS;
} }
/* The cookie contains the R_Key of the remote memory region, and /* The cookie contains the R_Key of the remote memory region, and
...@@ -586,15 +586,15 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, ...@@ -586,15 +586,15 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
* destination address (which is really an offset into the MR) * destination address (which is really an offset into the MR)
* FIXME: We may want to move this into ib_rdma.c * FIXME: We may want to move this into ib_rdma.c
*/ */
op->r_key = rds_rdma_cookie_key(args->cookie); op->op_rkey = rds_rdma_cookie_key(args->cookie);
op->r_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie); op->op_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie);
nr_bytes = 0; nr_bytes = 0;
rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n", rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n",
(unsigned long long)args->nr_local, (unsigned long long)args->nr_local,
(unsigned long long)args->remote_vec.addr, (unsigned long long)args->remote_vec.addr,
op->r_key); op->op_rkey);
local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr; local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
...@@ -617,7 +617,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, ...@@ -617,7 +617,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
/* If it's a WRITE operation, we want to pin the pages for reading. /* If it's a WRITE operation, we want to pin the pages for reading.
* If it's a READ operation, we need to pin the pages for writing. * If it's a READ operation, we need to pin the pages for writing.
*/ */
ret = rds_pin_pages(vec.addr, nr, pages, !op->r_write); ret = rds_pin_pages(vec.addr, nr, pages, !op->op_write);
if (ret < 0) if (ret < 0)
goto out; goto out;
...@@ -630,7 +630,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, ...@@ -630,7 +630,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
unsigned int offset = vec.addr & ~PAGE_MASK; unsigned int offset = vec.addr & ~PAGE_MASK;
struct scatterlist *sg; struct scatterlist *sg;
sg = &op->r_sg[op->r_nents + j]; sg = &op->op_sg[op->op_nents + j];
sg_set_page(sg, pages[j], sg_set_page(sg, pages[j],
min_t(unsigned int, vec.bytes, PAGE_SIZE - offset), min_t(unsigned int, vec.bytes, PAGE_SIZE - offset),
offset); offset);
...@@ -642,7 +642,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, ...@@ -642,7 +642,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
vec.bytes -= sg->length; vec.bytes -= sg->length;
} }
op->r_nents += nr; op->op_nents += nr;
} }
if (nr_bytes > args->remote_vec.bytes) { if (nr_bytes > args->remote_vec.bytes) {
...@@ -652,7 +652,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, ...@@ -652,7 +652,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
op->r_bytes = nr_bytes; op->op_bytes = nr_bytes;
ret = 0; ret = 0;
out: out:
...@@ -700,7 +700,7 @@ int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm, ...@@ -700,7 +700,7 @@ int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
if (mr) { if (mr) {
mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE); mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE);
rm->rdma.m_rdma_mr = mr; rm->rdma.op_rdma_mr = mr;
} }
return err; return err;
} }
...@@ -718,7 +718,7 @@ int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm, ...@@ -718,7 +718,7 @@ int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
rm->m_rdma_cookie != 0) rm->m_rdma_cookie != 0)
return -EINVAL; return -EINVAL;
return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->rdma.m_rdma_mr); return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->rdma.op_rdma_mr);
} }
/* /*
......
...@@ -230,22 +230,6 @@ struct rds_mr { ...@@ -230,22 +230,6 @@ struct rds_mr {
/* Flags for mr->r_state */ /* Flags for mr->r_state */
#define RDS_MR_DEAD 0 #define RDS_MR_DEAD 0
struct rds_rdma_op {
u32 r_key;
u64 r_remote_addr;
unsigned int r_write:1;
unsigned int r_fence:1;
unsigned int r_notify:1;
unsigned int r_recverr:1;
unsigned int r_mapped:1;
unsigned int r_active:1;
struct rds_notifier *r_notifier;
unsigned int r_bytes;
unsigned int r_nents;
unsigned int r_count;
struct scatterlist *r_sg;
};
static inline rds_rdma_cookie_t rds_rdma_make_cookie(u32 r_key, u32 offset) static inline rds_rdma_cookie_t rds_rdma_make_cookie(u32 r_key, u32 offset)
{ {
return r_key | (((u64) offset) << 32); return r_key | (((u64) offset) << 32);
...@@ -331,14 +315,27 @@ struct rds_message { ...@@ -331,14 +315,27 @@ struct rds_message {
unsigned int op_recverr:1; unsigned int op_recverr:1;
unsigned int op_mapped:1; unsigned int op_mapped:1;
unsigned int op_active:1; unsigned int op_active:1;
struct rds_notifier *op_notifier;
struct scatterlist *op_sg; struct scatterlist *op_sg;
struct rds_notifier *op_notifier;
struct rds_mr *op_rdma_mr; struct rds_mr *op_rdma_mr;
} atomic; } atomic;
struct rm_rdma_op { struct rm_rdma_op {
struct rds_rdma_op m_rdma_op; u32 op_rkey;
struct rds_mr *m_rdma_mr; u64 op_remote_addr;
unsigned int op_write:1;
unsigned int op_fence:1;
unsigned int op_notify:1;
unsigned int op_recverr:1;
unsigned int op_mapped:1;
unsigned int op_active:1;
unsigned int op_bytes;
unsigned int op_nents;
unsigned int op_count;
struct scatterlist *op_sg;
struct rds_notifier *op_notifier;
struct rds_mr *op_rdma_mr;
} rdma; } rdma;
struct rm_data_op { struct rm_data_op {
unsigned int op_active:1; unsigned int op_active:1;
...@@ -418,7 +415,7 @@ struct rds_transport { ...@@ -418,7 +415,7 @@ struct rds_transport {
unsigned int hdr_off, unsigned int sg, unsigned int off); unsigned int hdr_off, unsigned int sg, unsigned int off);
int (*xmit_cong_map)(struct rds_connection *conn, int (*xmit_cong_map)(struct rds_connection *conn,
struct rds_cong_map *map, unsigned long offset); struct rds_cong_map *map, unsigned long offset);
int (*xmit_rdma)(struct rds_connection *conn, struct rds_rdma_op *op); int (*xmit_rdma)(struct rds_connection *conn, struct rm_rdma_op *op);
int (*xmit_atomic)(struct rds_connection *conn, struct rds_message *rm); int (*xmit_atomic)(struct rds_connection *conn, struct rds_message *rm);
int (*recv)(struct rds_connection *conn); int (*recv)(struct rds_connection *conn);
int (*inc_copy_to_user)(struct rds_incoming *inc, struct iovec *iov, int (*inc_copy_to_user)(struct rds_incoming *inc, struct iovec *iov,
...@@ -727,7 +724,7 @@ int rds_send_acked_before(struct rds_connection *conn, u64 seq); ...@@ -727,7 +724,7 @@ int rds_send_acked_before(struct rds_connection *conn, u64 seq);
void rds_send_remove_from_sock(struct list_head *messages, int status); void rds_send_remove_from_sock(struct list_head *messages, int status);
int rds_send_pong(struct rds_connection *conn, __be16 dport); int rds_send_pong(struct rds_connection *conn, __be16 dport);
struct rds_message *rds_send_get_message(struct rds_connection *, struct rds_message *rds_send_get_message(struct rds_connection *,
struct rds_rdma_op *); struct rm_rdma_op *);
/* rdma.c */ /* rdma.c */
void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force); void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force);
...@@ -744,7 +741,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, ...@@ -744,7 +741,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg); struct cmsghdr *cmsg);
int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm, int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg); struct cmsghdr *cmsg);
void rds_rdma_free_op(struct rds_rdma_op *ro); void rds_rdma_free_op(struct rm_rdma_op *ro);
void rds_atomic_free_op(struct rm_atomic_op *ao); void rds_atomic_free_op(struct rm_atomic_op *ao);
void rds_rdma_send_complete(struct rds_message *rm, int wc_status); void rds_rdma_send_complete(struct rds_message *rm, int wc_status);
void rds_atomic_send_complete(struct rds_message *rm, int wc_status); void rds_atomic_send_complete(struct rds_message *rm, int wc_status);
......
...@@ -237,7 +237,7 @@ int rds_send_xmit(struct rds_connection *conn) ...@@ -237,7 +237,7 @@ int rds_send_xmit(struct rds_connection *conn)
* connection. * connection.
* Therefore, we never retransmit messages with RDMA ops. * Therefore, we never retransmit messages with RDMA ops.
*/ */
if (rm->rdma.m_rdma_op.r_active && if (rm->rdma.op_active &&
test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) { test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
spin_lock_irqsave(&conn->c_lock, flags); spin_lock_irqsave(&conn->c_lock, flags);
if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
...@@ -280,8 +280,8 @@ int rds_send_xmit(struct rds_connection *conn) ...@@ -280,8 +280,8 @@ int rds_send_xmit(struct rds_connection *conn)
* keep this simple and require that the transport either * keep this simple and require that the transport either
* send the whole rdma or none of it. * send the whole rdma or none of it.
*/ */
if (rm->rdma.m_rdma_op.r_active && !conn->c_xmit_rdma_sent) { if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) {
ret = conn->c_trans->xmit_rdma(conn, &rm->rdma.m_rdma_op); ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
if (ret) if (ret)
break; break;
conn->c_xmit_rdma_sent = 1; conn->c_xmit_rdma_sent = 1;
...@@ -430,16 +430,16 @@ int rds_send_acked_before(struct rds_connection *conn, u64 seq) ...@@ -430,16 +430,16 @@ int rds_send_acked_before(struct rds_connection *conn, u64 seq)
void rds_rdma_send_complete(struct rds_message *rm, int status) void rds_rdma_send_complete(struct rds_message *rm, int status)
{ {
struct rds_sock *rs = NULL; struct rds_sock *rs = NULL;
struct rds_rdma_op *ro; struct rm_rdma_op *ro;
struct rds_notifier *notifier; struct rds_notifier *notifier;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&rm->m_rs_lock, flags); spin_lock_irqsave(&rm->m_rs_lock, flags);
ro = &rm->rdma.m_rdma_op; ro = &rm->rdma;
if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) && if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
ro->r_active && ro->r_notify && ro->r_notifier) { ro->op_active && ro->op_notify && ro->op_notifier) {
notifier = ro->r_notifier; notifier = ro->op_notifier;
rs = rm->m_rs; rs = rm->m_rs;
sock_hold(rds_rs_to_sk(rs)); sock_hold(rds_rs_to_sk(rs));
...@@ -448,7 +448,7 @@ void rds_rdma_send_complete(struct rds_message *rm, int status) ...@@ -448,7 +448,7 @@ void rds_rdma_send_complete(struct rds_message *rm, int status)
list_add_tail(&notifier->n_list, &rs->rs_notify_queue); list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
spin_unlock(&rs->rs_lock); spin_unlock(&rs->rs_lock);
ro->r_notifier = NULL; ro->op_notifier = NULL;
} }
spin_unlock_irqrestore(&rm->m_rs_lock, flags); spin_unlock_irqrestore(&rm->m_rs_lock, flags);
...@@ -503,13 +503,13 @@ EXPORT_SYMBOL_GPL(rds_atomic_send_complete); ...@@ -503,13 +503,13 @@ EXPORT_SYMBOL_GPL(rds_atomic_send_complete);
static inline void static inline void
__rds_rdma_send_complete(struct rds_sock *rs, struct rds_message *rm, int status) __rds_rdma_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
{ {
struct rds_rdma_op *ro; struct rm_rdma_op *ro;
ro = &rm->rdma.m_rdma_op; ro = &rm->rdma;
if (ro->r_active && ro->r_notify && ro->r_notifier) { if (ro->op_active && ro->op_notify && ro->op_notifier) {
ro->r_notifier->n_status = status; ro->op_notifier->n_status = status;
list_add_tail(&ro->r_notifier->n_list, &rs->rs_notify_queue); list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue);
ro->r_notifier = NULL; ro->op_notifier = NULL;
} }
/* No need to wake the app - caller does this */ /* No need to wake the app - caller does this */
...@@ -521,7 +521,7 @@ __rds_rdma_send_complete(struct rds_sock *rs, struct rds_message *rm, int status ...@@ -521,7 +521,7 @@ __rds_rdma_send_complete(struct rds_sock *rs, struct rds_message *rm, int status
* So speed is not an issue here. * So speed is not an issue here.
*/ */
struct rds_message *rds_send_get_message(struct rds_connection *conn, struct rds_message *rds_send_get_message(struct rds_connection *conn,
struct rds_rdma_op *op) struct rm_rdma_op *op)
{ {
struct rds_message *rm, *tmp, *found = NULL; struct rds_message *rm, *tmp, *found = NULL;
unsigned long flags; unsigned long flags;
...@@ -529,7 +529,7 @@ struct rds_message *rds_send_get_message(struct rds_connection *conn, ...@@ -529,7 +529,7 @@ struct rds_message *rds_send_get_message(struct rds_connection *conn,
spin_lock_irqsave(&conn->c_lock, flags); spin_lock_irqsave(&conn->c_lock, flags);
list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) { list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
if (&rm->rdma.m_rdma_op == op) { if (&rm->rdma == op) {
atomic_inc(&rm->m_refcount); atomic_inc(&rm->m_refcount);
found = rm; found = rm;
goto out; goto out;
...@@ -537,7 +537,7 @@ struct rds_message *rds_send_get_message(struct rds_connection *conn, ...@@ -537,7 +537,7 @@ struct rds_message *rds_send_get_message(struct rds_connection *conn,
} }
list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) { list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
if (&rm->rdma.m_rdma_op == op) { if (&rm->rdma == op) {
atomic_inc(&rm->m_refcount); atomic_inc(&rm->m_refcount);
found = rm; found = rm;
break; break;
...@@ -597,20 +597,20 @@ void rds_send_remove_from_sock(struct list_head *messages, int status) ...@@ -597,20 +597,20 @@ void rds_send_remove_from_sock(struct list_head *messages, int status)
spin_lock(&rs->rs_lock); spin_lock(&rs->rs_lock);
if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) { if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
struct rds_rdma_op *ro = &rm->rdma.m_rdma_op; struct rm_rdma_op *ro = &rm->rdma;
struct rds_notifier *notifier; struct rds_notifier *notifier;
list_del_init(&rm->m_sock_item); list_del_init(&rm->m_sock_item);
rds_send_sndbuf_remove(rs, rm); rds_send_sndbuf_remove(rs, rm);
if (ro->r_active && ro->r_notifier && if (ro->op_active && ro->op_notifier &&
(ro->r_notify || (ro->r_recverr && status))) { (ro->op_notify || (ro->op_recverr && status))) {
notifier = ro->r_notifier; notifier = ro->op_notifier;
list_add_tail(&notifier->n_list, list_add_tail(&notifier->n_list,
&rs->rs_notify_queue); &rs->rs_notify_queue);
if (!notifier->n_status) if (!notifier->n_status)
notifier->n_status = status; notifier->n_status = status;
rm->rdma.m_rdma_op.r_notifier = NULL; rm->rdma.op_notifier = NULL;
} }
was_on_sock = 1; was_on_sock = 1;
rm->m_rs = NULL; rm->m_rs = NULL;
...@@ -987,11 +987,11 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, ...@@ -987,11 +987,11 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
if (ret) if (ret)
goto out; goto out;
if ((rm->m_rdma_cookie || rm->rdma.m_rdma_op.r_active) && if ((rm->m_rdma_cookie || rm->rdma.op_active) &&
!conn->c_trans->xmit_rdma) { !conn->c_trans->xmit_rdma) {
if (printk_ratelimit()) if (printk_ratelimit())
printk(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n", printk(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
&rm->rdma.m_rdma_op, conn->c_trans->xmit_rdma); &rm->rdma, conn->c_trans->xmit_rdma);
ret = -EOPNOTSUPP; ret = -EOPNOTSUPP;
goto out; goto out;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment