Commit 6c7cc6e4 authored by Andy Grover's avatar Andy Grover

RDS: Rename data op members prefix from m_ to op_

For consistency.
Signed-off-by: default avatarAndy Grover <andy.grover@oracle.com>
parent f8b3aaf2
...@@ -76,7 +76,7 @@ static void rds_ib_send_unmap_rm(struct rds_ib_connection *ic, ...@@ -76,7 +76,7 @@ static void rds_ib_send_unmap_rm(struct rds_ib_connection *ic,
rdsdebug("ic %p send %p rm %p\n", ic, send, rm); rdsdebug("ic %p send %p rm %p\n", ic, send, rm);
ib_dma_unmap_sg(ic->i_cm_id->device, ib_dma_unmap_sg(ic->i_cm_id->device,
rm->data.m_sg, rm->data.m_nents, rm->data.op_sg, rm->data.op_nents,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (rm->rdma.op_active) { if (rm->rdma.op_active) {
...@@ -513,20 +513,20 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, ...@@ -513,20 +513,20 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
/* map the message the first time we see it */ /* map the message the first time we see it */
if (!ic->i_rm) { if (!ic->i_rm) {
if (rm->data.m_nents) { if (rm->data.op_nents) {
rm->data.m_count = ib_dma_map_sg(dev, rm->data.op_count = ib_dma_map_sg(dev,
rm->data.m_sg, rm->data.op_sg,
rm->data.m_nents, rm->data.op_nents,
DMA_TO_DEVICE); DMA_TO_DEVICE);
rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.m_count); rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.op_count);
if (rm->data.m_count == 0) { if (rm->data.op_count == 0) {
rds_ib_stats_inc(s_ib_tx_sg_mapping_failure); rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
ret = -ENOMEM; /* XXX ? */ ret = -ENOMEM; /* XXX ? */
goto out; goto out;
} }
} else { } else {
rm->data.m_count = 0; rm->data.op_count = 0;
} }
rds_message_addref(rm); rds_message_addref(rm);
...@@ -583,7 +583,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, ...@@ -583,7 +583,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
send = &ic->i_sends[pos]; send = &ic->i_sends[pos];
first = send; first = send;
prev = NULL; prev = NULL;
scat = &rm->data.m_sg[sg]; scat = &rm->data.op_sg[sg];
i = 0; i = 0;
do { do {
unsigned int len = 0; unsigned int len = 0;
...@@ -604,7 +604,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, ...@@ -604,7 +604,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
/* Set up the data, if present */ /* Set up the data, if present */
if (i < work_alloc if (i < work_alloc
&& scat != &rm->data.m_sg[rm->data.m_count]) { && scat != &rm->data.op_sg[rm->data.op_count]) {
len = min(RDS_FRAG_SIZE, ib_sg_dma_len(dev, scat) - off); len = min(RDS_FRAG_SIZE, ib_sg_dma_len(dev, scat) - off);
send->s_wr.num_sge = 2; send->s_wr.num_sge = 2;
...@@ -649,7 +649,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, ...@@ -649,7 +649,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
i++; i++;
} while (i < work_alloc } while (i < work_alloc
&& scat != &rm->data.m_sg[rm->data.m_count]); && scat != &rm->data.op_sg[rm->data.op_count]);
/* Account the RDS header in the number of bytes we sent, but just once. /* Account the RDS header in the number of bytes we sent, but just once.
* The caller has no concept of fragmentation. */ * The caller has no concept of fragmentation. */
...@@ -657,7 +657,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, ...@@ -657,7 +657,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
bytes_sent += sizeof(struct rds_header); bytes_sent += sizeof(struct rds_header);
/* if we finished the message then send completion owns it */ /* if we finished the message then send completion owns it */
if (scat == &rm->data.m_sg[rm->data.m_count]) { if (scat == &rm->data.op_sg[rm->data.op_count]) {
prev->s_rm = ic->i_rm; prev->s_rm = ic->i_rm;
prev->s_wr.send_flags |= IB_SEND_SOLICITED; prev->s_wr.send_flags |= IB_SEND_SOLICITED;
ic->i_rm = NULL; ic->i_rm = NULL;
......
...@@ -82,7 +82,7 @@ static void rds_iw_send_unmap_rm(struct rds_iw_connection *ic, ...@@ -82,7 +82,7 @@ static void rds_iw_send_unmap_rm(struct rds_iw_connection *ic,
rdsdebug("ic %p send %p rm %p\n", ic, send, rm); rdsdebug("ic %p send %p rm %p\n", ic, send, rm);
ib_dma_unmap_sg(ic->i_cm_id->device, ib_dma_unmap_sg(ic->i_cm_id->device,
rm->data.m_sg, rm->data.m_nents, rm->data.op_sg, rm->data.op_nents,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (rm->rdma.op_active) { if (rm->rdma.op_active) {
...@@ -562,20 +562,20 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm, ...@@ -562,20 +562,20 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
rm->m_inc.i_hdr.h_flags, rm->m_inc.i_hdr.h_flags,
be32_to_cpu(rm->m_inc.i_hdr.h_len)); be32_to_cpu(rm->m_inc.i_hdr.h_len));
*/ */
if (rm->data.m_nents) { if (rm->data.op_nents) {
rm->data.m_count = ib_dma_map_sg(dev, rm->data.op_count = ib_dma_map_sg(dev,
rm->data.m_sg, rm->data.op_sg,
rm->data.m_nents, rm->data.op_nents,
DMA_TO_DEVICE); DMA_TO_DEVICE);
rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.m_count); rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.op_count);
if (rm->data.m_count == 0) { if (rm->data.op_count == 0) {
rds_iw_stats_inc(s_iw_tx_sg_mapping_failure); rds_iw_stats_inc(s_iw_tx_sg_mapping_failure);
rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc); rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc);
ret = -ENOMEM; /* XXX ? */ ret = -ENOMEM; /* XXX ? */
goto out; goto out;
} }
} else { } else {
rm->data.m_count = 0; rm->data.op_count = 0;
} }
ic->i_unsignaled_wrs = rds_iw_sysctl_max_unsig_wrs; ic->i_unsignaled_wrs = rds_iw_sysctl_max_unsig_wrs;
...@@ -622,7 +622,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm, ...@@ -622,7 +622,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
send = &ic->i_sends[pos]; send = &ic->i_sends[pos];
first = send; first = send;
prev = NULL; prev = NULL;
scat = &rm->data.m_sg[sg]; scat = &rm->data.op_sg[sg];
sent = 0; sent = 0;
i = 0; i = 0;
...@@ -651,7 +651,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm, ...@@ -651,7 +651,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
} }
/* if there's data reference it with a chain of work reqs */ /* if there's data reference it with a chain of work reqs */
for (; i < work_alloc && scat != &rm->data.m_sg[rm->data.m_count]; i++) { for (; i < work_alloc && scat != &rm->data.op_sg[rm->data.op_count]; i++) {
unsigned int len; unsigned int len;
send = &ic->i_sends[pos]; send = &ic->i_sends[pos];
...@@ -729,7 +729,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm, ...@@ -729,7 +729,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
sent += sizeof(struct rds_header); sent += sizeof(struct rds_header);
/* if we finished the message then send completion owns it */ /* if we finished the message then send completion owns it */
if (scat == &rm->data.m_sg[rm->data.m_count]) { if (scat == &rm->data.op_sg[rm->data.op_count]) {
prev->s_rm = ic->i_rm; prev->s_rm = ic->i_rm;
prev->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; prev->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
ic->i_rm = NULL; ic->i_rm = NULL;
......
...@@ -62,12 +62,12 @@ static void rds_message_purge(struct rds_message *rm) ...@@ -62,12 +62,12 @@ static void rds_message_purge(struct rds_message *rm)
if (unlikely(test_bit(RDS_MSG_PAGEVEC, &rm->m_flags))) if (unlikely(test_bit(RDS_MSG_PAGEVEC, &rm->m_flags)))
return; return;
for (i = 0; i < rm->data.m_nents; i++) { for (i = 0; i < rm->data.op_nents; i++) {
rdsdebug("putting data page %p\n", (void *)sg_page(&rm->data.m_sg[i])); rdsdebug("putting data page %p\n", (void *)sg_page(&rm->data.op_sg[i]));
/* XXX will have to put_page for page refs */ /* XXX will have to put_page for page refs */
__free_page(sg_page(&rm->data.m_sg[i])); __free_page(sg_page(&rm->data.op_sg[i]));
} }
rm->data.m_nents = 0; rm->data.op_nents = 0;
if (rm->rdma.op_active) if (rm->rdma.op_active)
rds_rdma_free_op(&rm->rdma); rds_rdma_free_op(&rm->rdma);
...@@ -261,11 +261,11 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in ...@@ -261,11 +261,11 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in
set_bit(RDS_MSG_PAGEVEC, &rm->m_flags); set_bit(RDS_MSG_PAGEVEC, &rm->m_flags);
rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len); rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
rm->data.m_nents = ceil(total_len, PAGE_SIZE); rm->data.op_nents = ceil(total_len, PAGE_SIZE);
rm->data.m_sg = rds_message_alloc_sgs(rm, num_sgs); rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
for (i = 0; i < rm->data.m_nents; ++i) { for (i = 0; i < rm->data.op_nents; ++i) {
sg_set_page(&rm->data.m_sg[i], sg_set_page(&rm->data.op_sg[i],
virt_to_page(page_addrs[i]), virt_to_page(page_addrs[i]),
PAGE_SIZE, 0); PAGE_SIZE, 0);
} }
...@@ -288,7 +288,7 @@ int rds_message_copy_from_user(struct rds_message *rm, struct iovec *first_iov, ...@@ -288,7 +288,7 @@ int rds_message_copy_from_user(struct rds_message *rm, struct iovec *first_iov,
/* /*
* now allocate and copy in the data payload. * now allocate and copy in the data payload.
*/ */
sg = rm->data.m_sg; sg = rm->data.op_sg;
iov = first_iov; iov = first_iov;
iov_off = 0; iov_off = 0;
sg_off = 0; /* Dear gcc, sg->page will be null from kzalloc. */ sg_off = 0; /* Dear gcc, sg->page will be null from kzalloc. */
...@@ -299,7 +299,7 @@ int rds_message_copy_from_user(struct rds_message *rm, struct iovec *first_iov, ...@@ -299,7 +299,7 @@ int rds_message_copy_from_user(struct rds_message *rm, struct iovec *first_iov,
GFP_HIGHUSER); GFP_HIGHUSER);
if (ret) if (ret)
goto out; goto out;
rm->data.m_nents++; rm->data.op_nents++;
sg_off = 0; sg_off = 0;
} }
...@@ -354,7 +354,7 @@ int rds_message_inc_copy_to_user(struct rds_incoming *inc, ...@@ -354,7 +354,7 @@ int rds_message_inc_copy_to_user(struct rds_incoming *inc,
iov = first_iov; iov = first_iov;
iov_off = 0; iov_off = 0;
sg = rm->data.m_sg; sg = rm->data.op_sg;
vec_off = 0; vec_off = 0;
copied = 0; copied = 0;
......
...@@ -339,9 +339,9 @@ struct rds_message { ...@@ -339,9 +339,9 @@ struct rds_message {
} rdma; } rdma;
struct rm_data_op { struct rm_data_op {
unsigned int op_active:1; unsigned int op_active:1;
unsigned int m_nents; unsigned int op_nents;
unsigned int m_count; unsigned int op_count;
struct scatterlist *m_sg; struct scatterlist *op_sg;
} data; } data;
}; };
unsigned int m_used_sgs; unsigned int m_used_sgs;
......
...@@ -166,7 +166,7 @@ int rds_send_xmit(struct rds_connection *conn) ...@@ -166,7 +166,7 @@ int rds_send_xmit(struct rds_connection *conn)
rm = conn->c_xmit_rm; rm = conn->c_xmit_rm;
if (rm && if (rm &&
conn->c_xmit_hdr_off == sizeof(struct rds_header) && conn->c_xmit_hdr_off == sizeof(struct rds_header) &&
conn->c_xmit_sg == rm->data.m_nents) { conn->c_xmit_sg == rm->data.op_nents) {
conn->c_xmit_rm = NULL; conn->c_xmit_rm = NULL;
conn->c_xmit_sg = 0; conn->c_xmit_sg = 0;
conn->c_xmit_hdr_off = 0; conn->c_xmit_hdr_off = 0;
...@@ -296,7 +296,7 @@ int rds_send_xmit(struct rds_connection *conn) ...@@ -296,7 +296,7 @@ int rds_send_xmit(struct rds_connection *conn)
if (rm->data.op_active if (rm->data.op_active
&& (conn->c_xmit_hdr_off < sizeof(struct rds_header) || && (conn->c_xmit_hdr_off < sizeof(struct rds_header) ||
conn->c_xmit_sg < rm->data.m_nents)) { conn->c_xmit_sg < rm->data.op_nents)) {
ret = conn->c_trans->xmit(conn, rm, ret = conn->c_trans->xmit(conn, rm,
conn->c_xmit_hdr_off, conn->c_xmit_hdr_off,
conn->c_xmit_sg, conn->c_xmit_sg,
...@@ -312,7 +312,7 @@ int rds_send_xmit(struct rds_connection *conn) ...@@ -312,7 +312,7 @@ int rds_send_xmit(struct rds_connection *conn)
ret -= tmp; ret -= tmp;
} }
sg = &rm->data.m_sg[conn->c_xmit_sg]; sg = &rm->data.op_sg[conn->c_xmit_sg];
while (ret) { while (ret) {
tmp = min_t(int, ret, sg->length - tmp = min_t(int, ret, sg->length -
conn->c_xmit_data_off); conn->c_xmit_data_off);
...@@ -323,7 +323,7 @@ int rds_send_xmit(struct rds_connection *conn) ...@@ -323,7 +323,7 @@ int rds_send_xmit(struct rds_connection *conn)
sg++; sg++;
conn->c_xmit_sg++; conn->c_xmit_sg++;
BUG_ON(ret != 0 && BUG_ON(ret != 0 &&
conn->c_xmit_sg == rm->data.m_nents); conn->c_xmit_sg == rm->data.op_nents);
} }
} }
} }
...@@ -959,7 +959,7 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, ...@@ -959,7 +959,7 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
goto out; goto out;
} }
rm->data.m_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE)); rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE));
/* XXX fix this to not allocate memory */ /* XXX fix this to not allocate memory */
ret = rds_message_copy_from_user(rm, msg->msg_iov, payload_len); ret = rds_message_copy_from_user(rm, msg->msg_iov, payload_len);
if (ret) if (ret)
......
...@@ -166,21 +166,21 @@ int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm, ...@@ -166,21 +166,21 @@ int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
goto out; goto out;
} }
while (sg < rm->data.m_nents) { while (sg < rm->data.op_nents) {
ret = tc->t_sock->ops->sendpage(tc->t_sock, ret = tc->t_sock->ops->sendpage(tc->t_sock,
sg_page(&rm->data.m_sg[sg]), sg_page(&rm->data.op_sg[sg]),
rm->data.m_sg[sg].offset + off, rm->data.op_sg[sg].offset + off,
rm->data.m_sg[sg].length - off, rm->data.op_sg[sg].length - off,
MSG_DONTWAIT|MSG_NOSIGNAL); MSG_DONTWAIT|MSG_NOSIGNAL);
rdsdebug("tcp sendpage %p:%u:%u ret %d\n", (void *)sg_page(&rm->data.m_sg[sg]), rdsdebug("tcp sendpage %p:%u:%u ret %d\n", (void *)sg_page(&rm->data.op_sg[sg]),
rm->data.m_sg[sg].offset + off, rm->data.m_sg[sg].length - off, rm->data.op_sg[sg].offset + off, rm->data.op_sg[sg].length - off,
ret); ret);
if (ret <= 0) if (ret <= 0)
break; break;
off += ret; off += ret;
done += ret; done += ret;
if (off == rm->data.m_sg[sg].length) { if (off == rm->data.op_sg[sg].length) {
off = 0; off = 0;
sg++; sg++;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment