Commit 1117f26e authored by Bob Pearson's avatar Bob Pearson Committed by Jason Gunthorpe

RDMA/rxe: Move ICRC generation to a subroutine

Isolate ICRC generation into a single subroutine named rxe_generate_icrc()
in rxe_icrc.c. Remove scattered crc generation code from elsewhere.

Link: https://lore.kernel.org/r/20210707040040.15434-5-rpearsonhpe@gmail.comSigned-off-by: default avatarBob Pearson <rpearsonhpe@gmail.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 13050a0b
...@@ -349,7 +349,7 @@ static inline enum comp_state do_read(struct rxe_qp *qp, ...@@ -349,7 +349,7 @@ static inline enum comp_state do_read(struct rxe_qp *qp,
ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
&wqe->dma, payload_addr(pkt), &wqe->dma, payload_addr(pkt),
payload_size(pkt), RXE_TO_MR_OBJ, NULL); payload_size(pkt), RXE_TO_MR_OBJ);
if (ret) { if (ret) {
wqe->status = IB_WC_LOC_PROT_ERR; wqe->status = IB_WC_LOC_PROT_ERR;
return COMPST_ERROR; return COMPST_ERROR;
...@@ -371,7 +371,7 @@ static inline enum comp_state do_atomic(struct rxe_qp *qp, ...@@ -371,7 +371,7 @@ static inline enum comp_state do_atomic(struct rxe_qp *qp,
ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
&wqe->dma, &atomic_orig, &wqe->dma, &atomic_orig,
sizeof(u64), RXE_TO_MR_OBJ, NULL); sizeof(u64), RXE_TO_MR_OBJ);
if (ret) { if (ret) {
wqe->status = IB_WC_LOC_PROT_ERR; wqe->status = IB_WC_LOC_PROT_ERR;
return COMPST_ERROR; return COMPST_ERROR;
......
...@@ -105,3 +105,16 @@ int rxe_icrc_check(struct sk_buff *skb, struct rxe_pkt_info *pkt) ...@@ -105,3 +105,16 @@ int rxe_icrc_check(struct sk_buff *skb, struct rxe_pkt_info *pkt)
return 0; return 0;
} }
/* rxe_icrc_generate- compute ICRC for a packet. */
void rxe_icrc_generate(struct sk_buff *skb, struct rxe_pkt_info *pkt)
{
__be32 *icrcp;
u32 icrc;
icrcp = (__be32 *)(pkt->hdr + pkt->paylen - RXE_ICRC_SIZE);
icrc = rxe_icrc_hdr(pkt, skb);
icrc = rxe_crc32(pkt->rxe, icrc, (u8 *)payload_addr(pkt),
payload_size(pkt) + bth_pad(pkt));
*icrcp = (__force __be32)~icrc;
}
...@@ -77,10 +77,9 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, ...@@ -77,10 +77,9 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
int access, struct rxe_mr *mr); int access, struct rxe_mr *mr);
int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr); int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr);
int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length, int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
enum rxe_mr_copy_dir dir, u32 *crcp); enum rxe_mr_copy_dir dir);
int copy_data(struct rxe_pd *pd, int access, int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma,
struct rxe_dma_info *dma, void *addr, int length, void *addr, int length, enum rxe_mr_copy_dir dir);
enum rxe_mr_copy_dir dir, u32 *crcp);
void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length); void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length);
struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key, struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
enum rxe_mr_lookup_type type); enum rxe_mr_lookup_type type);
...@@ -101,7 +100,7 @@ void rxe_mw_cleanup(struct rxe_pool_entry *arg); ...@@ -101,7 +100,7 @@ void rxe_mw_cleanup(struct rxe_pool_entry *arg);
/* rxe_net.c */ /* rxe_net.c */
struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av, struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
int paylen, struct rxe_pkt_info *pkt); int paylen, struct rxe_pkt_info *pkt);
int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb, u32 *crc); int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb);
int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt, int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
struct sk_buff *skb); struct sk_buff *skb);
const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num); const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num);
...@@ -196,6 +195,7 @@ int rxe_responder(void *arg); ...@@ -196,6 +195,7 @@ int rxe_responder(void *arg);
/* rxe_icrc.c */ /* rxe_icrc.c */
u32 rxe_icrc_hdr(struct rxe_pkt_info *pkt, struct sk_buff *skb); u32 rxe_icrc_hdr(struct rxe_pkt_info *pkt, struct sk_buff *skb);
int rxe_icrc_check(struct sk_buff *skb, struct rxe_pkt_info *pkt); int rxe_icrc_check(struct sk_buff *skb, struct rxe_pkt_info *pkt);
void rxe_icrc_generate(struct sk_buff *skb, struct rxe_pkt_info *pkt);
void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb); void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb);
......
...@@ -278,11 +278,10 @@ void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length) ...@@ -278,11 +278,10 @@ void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
} }
/* copy data from a range (vaddr, vaddr+length-1) to or from /* copy data from a range (vaddr, vaddr+length-1) to or from
* a mr object starting at iova. Compute incremental value of * a mr object starting at iova.
* crc32 if crcp is not zero. caller must hold a reference to mr
*/ */
int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length, int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
enum rxe_mr_copy_dir dir, u32 *crcp) enum rxe_mr_copy_dir dir)
{ {
int err; int err;
int bytes; int bytes;
...@@ -292,7 +291,6 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length, ...@@ -292,7 +291,6 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
int m; int m;
int i; int i;
size_t offset; size_t offset;
u32 crc = crcp ? (*crcp) : 0;
if (length == 0) if (length == 0)
return 0; return 0;
...@@ -306,10 +304,6 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length, ...@@ -306,10 +304,6 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
memcpy(dest, src, length); memcpy(dest, src, length);
if (crcp)
*crcp = rxe_crc32(to_rdev(mr->ibmr.device), *crcp, dest,
length);
return 0; return 0;
} }
...@@ -340,10 +334,6 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length, ...@@ -340,10 +334,6 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
memcpy(dest, src, bytes); memcpy(dest, src, bytes);
if (crcp)
crc = rxe_crc32(to_rdev(mr->ibmr.device), crc, dest,
bytes);
length -= bytes; length -= bytes;
addr += bytes; addr += bytes;
...@@ -358,9 +348,6 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length, ...@@ -358,9 +348,6 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
} }
} }
if (crcp)
*crcp = crc;
return 0; return 0;
err1: err1:
...@@ -376,8 +363,7 @@ int copy_data( ...@@ -376,8 +363,7 @@ int copy_data(
struct rxe_dma_info *dma, struct rxe_dma_info *dma,
void *addr, void *addr,
int length, int length,
enum rxe_mr_copy_dir dir, enum rxe_mr_copy_dir dir)
u32 *crcp)
{ {
int bytes; int bytes;
struct rxe_sge *sge = &dma->sge[dma->cur_sge]; struct rxe_sge *sge = &dma->sge[dma->cur_sge];
...@@ -438,7 +424,7 @@ int copy_data( ...@@ -438,7 +424,7 @@ int copy_data(
if (bytes > 0) { if (bytes > 0) {
iova = sge->addr + offset; iova = sge->addr + offset;
err = rxe_mr_copy(mr, iova, addr, bytes, dir, crcp); err = rxe_mr_copy(mr, iova, addr, bytes, dir);
if (err) if (err)
goto err2; goto err2;
......
...@@ -343,7 +343,7 @@ static int prepare6(struct rxe_pkt_info *pkt, struct sk_buff *skb) ...@@ -343,7 +343,7 @@ static int prepare6(struct rxe_pkt_info *pkt, struct sk_buff *skb)
return 0; return 0;
} }
int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb, u32 *crc) int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb)
{ {
int err = 0; int err = 0;
...@@ -352,8 +352,6 @@ int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb, u32 *crc) ...@@ -352,8 +352,6 @@ int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb, u32 *crc)
else if (skb->protocol == htons(ETH_P_IPV6)) else if (skb->protocol == htons(ETH_P_IPV6))
err = prepare6(pkt, skb); err = prepare6(pkt, skb);
*crc = rxe_icrc_hdr(pkt, skb);
if (ether_addr_equal(skb->dev->dev_addr, rxe_get_av(pkt)->dmac)) if (ether_addr_equal(skb->dev->dev_addr, rxe_get_av(pkt)->dmac))
pkt->mask |= RXE_LOOPBACK_MASK; pkt->mask |= RXE_LOOPBACK_MASK;
...@@ -438,6 +436,8 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt, ...@@ -438,6 +436,8 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
goto drop; goto drop;
} }
rxe_icrc_generate(skb, pkt);
if (pkt->mask & RXE_LOOPBACK_MASK) if (pkt->mask & RXE_LOOPBACK_MASK)
err = rxe_loopback(skb, pkt); err = rxe_loopback(skb, pkt);
else else
......
...@@ -466,12 +466,9 @@ static int finish_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe, ...@@ -466,12 +466,9 @@ static int finish_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
struct rxe_pkt_info *pkt, struct sk_buff *skb, struct rxe_pkt_info *pkt, struct sk_buff *skb,
int paylen) int paylen)
{ {
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
u32 crc = 0;
u32 *p;
int err; int err;
err = rxe_prepare(pkt, skb, &crc); err = rxe_prepare(pkt, skb);
if (err) if (err)
return err; return err;
...@@ -479,7 +476,6 @@ static int finish_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe, ...@@ -479,7 +476,6 @@ static int finish_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
if (wqe->wr.send_flags & IB_SEND_INLINE) { if (wqe->wr.send_flags & IB_SEND_INLINE) {
u8 *tmp = &wqe->dma.inline_data[wqe->dma.sge_offset]; u8 *tmp = &wqe->dma.inline_data[wqe->dma.sge_offset];
crc = rxe_crc32(rxe, crc, tmp, paylen);
memcpy(payload_addr(pkt), tmp, paylen); memcpy(payload_addr(pkt), tmp, paylen);
wqe->dma.resid -= paylen; wqe->dma.resid -= paylen;
...@@ -487,8 +483,7 @@ static int finish_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe, ...@@ -487,8 +483,7 @@ static int finish_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
} else { } else {
err = copy_data(qp->pd, 0, &wqe->dma, err = copy_data(qp->pd, 0, &wqe->dma,
payload_addr(pkt), paylen, payload_addr(pkt), paylen,
RXE_FROM_MR_OBJ, RXE_FROM_MR_OBJ);
&crc);
if (err) if (err)
return err; return err;
} }
...@@ -496,12 +491,8 @@ static int finish_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe, ...@@ -496,12 +491,8 @@ static int finish_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
u8 *pad = payload_addr(pkt) + paylen; u8 *pad = payload_addr(pkt) + paylen;
memset(pad, 0, bth_pad(pkt)); memset(pad, 0, bth_pad(pkt));
crc = rxe_crc32(rxe, crc, pad, bth_pad(pkt));
} }
} }
p = payload_addr(pkt) + paylen + bth_pad(pkt);
*p = ~crc;
return 0; return 0;
} }
......
...@@ -536,7 +536,7 @@ static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr, ...@@ -536,7 +536,7 @@ static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr,
int err; int err;
err = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma, err = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
data_addr, data_len, RXE_TO_MR_OBJ, NULL); data_addr, data_len, RXE_TO_MR_OBJ);
if (unlikely(err)) if (unlikely(err))
return (err == -ENOSPC) ? RESPST_ERR_LENGTH return (err == -ENOSPC) ? RESPST_ERR_LENGTH
: RESPST_ERR_MALFORMED_WQE; : RESPST_ERR_MALFORMED_WQE;
...@@ -552,7 +552,7 @@ static enum resp_states write_data_in(struct rxe_qp *qp, ...@@ -552,7 +552,7 @@ static enum resp_states write_data_in(struct rxe_qp *qp,
int data_len = payload_size(pkt); int data_len = payload_size(pkt);
err = rxe_mr_copy(qp->resp.mr, qp->resp.va + qp->resp.offset, err = rxe_mr_copy(qp->resp.mr, qp->resp.va + qp->resp.offset,
payload_addr(pkt), data_len, RXE_TO_MR_OBJ, NULL); payload_addr(pkt), data_len, RXE_TO_MR_OBJ);
if (err) { if (err) {
rc = RESPST_ERR_RKEY_VIOLATION; rc = RESPST_ERR_RKEY_VIOLATION;
goto out; goto out;
...@@ -613,13 +613,10 @@ static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp, ...@@ -613,13 +613,10 @@ static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
int opcode, int opcode,
int payload, int payload,
u32 psn, u32 psn,
u8 syndrome, u8 syndrome)
u32 *crcp)
{ {
struct rxe_dev *rxe = to_rdev(qp->ibqp.device); struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
struct sk_buff *skb; struct sk_buff *skb;
u32 crc = 0;
u32 *p;
int paylen; int paylen;
int pad; int pad;
int err; int err;
...@@ -651,20 +648,12 @@ static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp, ...@@ -651,20 +648,12 @@ static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
if (ack->mask & RXE_ATMACK_MASK) if (ack->mask & RXE_ATMACK_MASK)
atmack_set_orig(ack, qp->resp.atomic_orig); atmack_set_orig(ack, qp->resp.atomic_orig);
err = rxe_prepare(ack, skb, &crc); err = rxe_prepare(ack, skb);
if (err) { if (err) {
kfree_skb(skb); kfree_skb(skb);
return NULL; return NULL;
} }
if (crcp) {
/* CRC computation will be continued by the caller */
*crcp = crc;
} else {
p = payload_addr(ack) + payload + bth_pad(ack);
*p = ~crc;
}
return skb; return skb;
} }
...@@ -682,8 +671,6 @@ static enum resp_states read_reply(struct rxe_qp *qp, ...@@ -682,8 +671,6 @@ static enum resp_states read_reply(struct rxe_qp *qp,
int opcode; int opcode;
int err; int err;
struct resp_res *res = qp->resp.res; struct resp_res *res = qp->resp.res;
u32 icrc;
u32 *p;
if (!res) { if (!res) {
/* This is the first time we process that request. Get a /* This is the first time we process that request. Get a
...@@ -742,24 +729,20 @@ static enum resp_states read_reply(struct rxe_qp *qp, ...@@ -742,24 +729,20 @@ static enum resp_states read_reply(struct rxe_qp *qp,
payload = min_t(int, res->read.resid, mtu); payload = min_t(int, res->read.resid, mtu);
skb = prepare_ack_packet(qp, req_pkt, &ack_pkt, opcode, payload, skb = prepare_ack_packet(qp, req_pkt, &ack_pkt, opcode, payload,
res->cur_psn, AETH_ACK_UNLIMITED, &icrc); res->cur_psn, AETH_ACK_UNLIMITED);
if (!skb) if (!skb)
return RESPST_ERR_RNR; return RESPST_ERR_RNR;
err = rxe_mr_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt), err = rxe_mr_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt),
payload, RXE_FROM_MR_OBJ, &icrc); payload, RXE_FROM_MR_OBJ);
if (err) if (err)
pr_err("Failed copying memory\n"); pr_err("Failed copying memory\n");
if (bth_pad(&ack_pkt)) { if (bth_pad(&ack_pkt)) {
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
u8 *pad = payload_addr(&ack_pkt) + payload; u8 *pad = payload_addr(&ack_pkt) + payload;
memset(pad, 0, bth_pad(&ack_pkt)); memset(pad, 0, bth_pad(&ack_pkt));
icrc = rxe_crc32(rxe, icrc, pad, bth_pad(&ack_pkt));
} }
p = payload_addr(&ack_pkt) + payload + bth_pad(&ack_pkt);
*p = ~icrc;
err = rxe_xmit_packet(qp, &ack_pkt, skb); err = rxe_xmit_packet(qp, &ack_pkt, skb);
if (err) { if (err) {
...@@ -984,7 +967,7 @@ static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, ...@@ -984,7 +967,7 @@ static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
struct sk_buff *skb; struct sk_buff *skb;
skb = prepare_ack_packet(qp, pkt, &ack_pkt, IB_OPCODE_RC_ACKNOWLEDGE, skb = prepare_ack_packet(qp, pkt, &ack_pkt, IB_OPCODE_RC_ACKNOWLEDGE,
0, psn, syndrome, NULL); 0, psn, syndrome);
if (!skb) { if (!skb) {
err = -ENOMEM; err = -ENOMEM;
goto err1; goto err1;
...@@ -1008,7 +991,7 @@ static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, ...@@ -1008,7 +991,7 @@ static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
skb = prepare_ack_packet(qp, pkt, &ack_pkt, skb = prepare_ack_packet(qp, pkt, &ack_pkt,
IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE, 0, pkt->psn, IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE, 0, pkt->psn,
syndrome, NULL); syndrome);
if (!skb) { if (!skb) {
rc = -ENOMEM; rc = -ENOMEM;
goto out; goto out;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment