Commit 45e81834 authored by David S. Miller's avatar David S. Miller

Merge branch 'cxgb4-next'

Anish Bhatt says:

====================
All Chelsio drivers : Cleanup CPL messages macros

This patch series cleans up all register defines/MACROS defined in t4_msg.h and
affected files as part of the continuing cleanup effort

The patches series is created against 'net-next' tree and  includes patches
to the cxgb4, cxgb4vf, iw_cxgb4, cxgb4i and csiostor drivers.

We have included all the maintainers of respective drivers. Kindly review the
change and let us know in case of any review comments.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents f902e881 bdc590b9
This diff is collapsed.
...@@ -86,14 +86,14 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr, ...@@ -86,14 +86,14 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
req->wr.wr_lo = wait ? (__force __be64)(unsigned long) &wr_wait : 0L; req->wr.wr_lo = wait ? (__force __be64)(unsigned long) &wr_wait : 0L;
req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16))); req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16)));
req->cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE)); req->cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE));
req->cmd |= cpu_to_be32(V_T5_ULP_MEMIO_ORDER(1)); req->cmd |= cpu_to_be32(T5_ULP_MEMIO_ORDER_V(1));
req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(len>>5)); req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(len>>5));
req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16)); req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16));
req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr)); req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr));
sgl = (struct ulptx_sgl *)(req + 1); sgl = (struct ulptx_sgl *)(req + 1);
sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_DSGL) | sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
ULPTX_NSGE(1)); ULPTX_NSGE_V(1));
sgl->len0 = cpu_to_be32(len); sgl->len0 = cpu_to_be32(len);
sgl->addr0 = cpu_to_be64(data); sgl->addr0 = cpu_to_be64(data);
......
...@@ -672,7 +672,7 @@ static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl) ...@@ -672,7 +672,7 @@ static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
if (idx >= adap->tids.ftid_base && nidx < if (idx >= adap->tids.ftid_base && nidx <
(adap->tids.nftids + adap->tids.nsftids)) { (adap->tids.nftids + adap->tids.nsftids)) {
idx = nidx; idx = nidx;
ret = GET_TCB_COOKIE(rpl->cookie); ret = TCB_COOKIE_G(rpl->cookie);
f = &adap->tids.ftid_tab[idx]; f = &adap->tids.ftid_tab[idx];
if (ret == FW_FILTER_WR_FLT_DELETED) { if (ret == FW_FILTER_WR_FLT_DELETED) {
...@@ -724,7 +724,7 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp, ...@@ -724,7 +724,7 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
if (likely(opcode == CPL_SGE_EGR_UPDATE)) { if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
const struct cpl_sge_egr_update *p = (void *)rsp; const struct cpl_sge_egr_update *p = (void *)rsp;
unsigned int qid = EGR_QID(ntohl(p->opcode_qid)); unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid));
struct sge_txq *txq; struct sge_txq *txq;
txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start]; txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
...@@ -3416,8 +3416,8 @@ int cxgb4_create_server(const struct net_device *dev, unsigned int stid, ...@@ -3416,8 +3416,8 @@ int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
req->peer_ip = htonl(0); req->peer_ip = htonl(0);
chan = rxq_to_chan(&adap->sge, queue); chan = rxq_to_chan(&adap->sge, queue);
req->opt0 = cpu_to_be64(TX_CHAN_V(chan)); req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
req->opt1 = cpu_to_be64(CONN_POLICY_ASK | req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
ret = t4_mgmt_tx(adap, skb); ret = t4_mgmt_tx(adap, skb);
return net_xmit_eval(ret); return net_xmit_eval(ret);
} }
...@@ -3459,8 +3459,8 @@ int cxgb4_create_server6(const struct net_device *dev, unsigned int stid, ...@@ -3459,8 +3459,8 @@ int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
req->peer_ip_lo = cpu_to_be64(0); req->peer_ip_lo = cpu_to_be64(0);
chan = rxq_to_chan(&adap->sge, queue); chan = rxq_to_chan(&adap->sge, queue);
req->opt0 = cpu_to_be64(TX_CHAN_V(chan)); req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
req->opt1 = cpu_to_be64(CONN_POLICY_ASK | req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
ret = t4_mgmt_tx(adap, skb); ret = t4_mgmt_tx(adap, skb);
return net_xmit_eval(ret); return net_xmit_eval(ret);
} }
...@@ -3483,8 +3483,8 @@ int cxgb4_remove_server(const struct net_device *dev, unsigned int stid, ...@@ -3483,8 +3483,8 @@ int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req)); req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
INIT_TP_WR(req, 0); INIT_TP_WR(req, 0);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
req->reply_ctrl = htons(NO_REPLY(0) | (ipv6 ? LISTSVR_IPV6(1) : req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
LISTSVR_IPV6(0)) | QUEUENO(queue)); LISTSVR_IPV6_V(0)) | QUEUENO_V(queue));
ret = t4_mgmt_tx(adap, skb); ret = t4_mgmt_tx(adap, skb);
return net_xmit_eval(ret); return net_xmit_eval(ret);
} }
......
...@@ -151,8 +151,8 @@ static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync) ...@@ -151,8 +151,8 @@ static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ,
e->idx | (sync ? F_SYNC_WR : 0) | e->idx | (sync ? F_SYNC_WR : 0) |
TID_QID(adap->sge.fw_evtq.abs_id))); TID_QID_V(adap->sge.fw_evtq.abs_id)));
req->params = htons(L2T_W_PORT(e->lport) | L2T_W_NOREPLY(!sync)); req->params = htons(L2T_W_PORT_V(e->lport) | L2T_W_NOREPLY_V(!sync));
req->l2t_idx = htons(e->idx); req->l2t_idx = htons(e->idx);
req->vlan = htons(e->vlan); req->vlan = htons(e->vlan);
if (e->neigh && !(e->neigh->dev->flags & IFF_LOOPBACK)) if (e->neigh && !(e->neigh->dev->flags & IFF_LOOPBACK))
......
...@@ -821,7 +821,8 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q, ...@@ -821,7 +821,8 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
sgl->addr0 = cpu_to_be64(addr[1]); sgl->addr0 = cpu_to_be64(addr[1]);
} }
sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags)); sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
ULPTX_NSGE_V(nfrags));
if (likely(--nfrags == 0)) if (likely(--nfrags == 0))
return; return;
/* /*
...@@ -1761,7 +1762,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, ...@@ -1761,7 +1762,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
pkt = (const struct cpl_rx_pkt *)rsp; pkt = (const struct cpl_rx_pkt *)rsp;
csum_ok = pkt->csum_calc && !pkt->err_vec && csum_ok = pkt->csum_calc && !pkt->err_vec &&
(q->netdev->features & NETIF_F_RXCSUM); (q->netdev->features & NETIF_F_RXCSUM);
if ((pkt->l2info & htonl(RXF_TCP)) && if ((pkt->l2info & htonl(RXF_TCP_F)) &&
(q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
do_gro(rxq, si, pkt); do_gro(rxq, si, pkt);
return 0; return 0;
...@@ -1783,11 +1784,11 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, ...@@ -1783,11 +1784,11 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
rxq->stats.pkts++; rxq->stats.pkts++;
if (csum_ok && (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) { if (csum_ok && (pkt->l2info & htonl(RXF_UDP_F | RXF_TCP_F))) {
if (!pkt->ip_frag) { if (!pkt->ip_frag) {
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
rxq->stats.rx_cso++; rxq->stats.rx_cso++;
} else if (pkt->l2info & htonl(RXF_IP)) { } else if (pkt->l2info & htonl(RXF_IP_F)) {
__sum16 c = (__force __sum16)pkt->csum; __sum16 c = (__force __sum16)pkt->csum;
skb->csum = csum_unfold(c); skb->csum = csum_unfold(c);
skb->ip_summed = CHECKSUM_COMPLETE; skb->ip_summed = CHECKSUM_COMPLETE;
......
This diff is collapsed.
...@@ -450,7 +450,7 @@ static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp, ...@@ -450,7 +450,7 @@ static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
/* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG. /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
*/ */
const struct cpl_sge_egr_update *p = (void *)(rsp + 3); const struct cpl_sge_egr_update *p = (void *)(rsp + 3);
opcode = G_CPL_OPCODE(ntohl(p->opcode_qid)); opcode = CPL_OPCODE_G(ntohl(p->opcode_qid));
if (opcode != CPL_SGE_EGR_UPDATE) { if (opcode != CPL_SGE_EGR_UPDATE) {
dev_err(adapter->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n" dev_err(adapter->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
, opcode); , opcode);
...@@ -471,7 +471,7 @@ static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp, ...@@ -471,7 +471,7 @@ static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
* free TX Queue Descriptors ... * free TX Queue Descriptors ...
*/ */
const struct cpl_sge_egr_update *p = cpl; const struct cpl_sge_egr_update *p = cpl;
unsigned int qid = EGR_QID(be32_to_cpu(p->opcode_qid)); unsigned int qid = EGR_QID_G(be32_to_cpu(p->opcode_qid));
struct sge *s = &adapter->sge; struct sge *s = &adapter->sge;
struct sge_txq *tq; struct sge_txq *tq;
struct sge_eth_txq *txq; struct sge_eth_txq *txq;
......
...@@ -926,7 +926,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq, ...@@ -926,7 +926,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
} }
sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
ULPTX_NSGE(nfrags)); ULPTX_NSGE_V(nfrags));
if (likely(--nfrags == 0)) if (likely(--nfrags == 0))
return; return;
/* /*
...@@ -1604,7 +1604,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, ...@@ -1604,7 +1604,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
* If this is a good TCP packet and we have Generic Receive Offload * If this is a good TCP packet and we have Generic Receive Offload
* enabled, handle the packet in the GRO path. * enabled, handle the packet in the GRO path.
*/ */
if ((pkt->l2info & cpu_to_be32(RXF_TCP)) && if ((pkt->l2info & cpu_to_be32(RXF_TCP_F)) &&
(rspq->netdev->features & NETIF_F_GRO) && csum_ok && (rspq->netdev->features & NETIF_F_GRO) && csum_ok &&
!pkt->ip_frag) { !pkt->ip_frag) {
do_gro(rxq, gl, pkt); do_gro(rxq, gl, pkt);
...@@ -1626,7 +1626,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, ...@@ -1626,7 +1626,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
rxq->stats.pkts++; rxq->stats.pkts++;
if (csum_ok && !pkt->err_vec && if (csum_ok && !pkt->err_vec &&
(be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) { (be32_to_cpu(pkt->l2info) & (RXF_UDP_F | RXF_TCP_F))) {
if (!pkt->ip_frag) if (!pkt->ip_frag)
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
else { else {
......
...@@ -1758,7 +1758,7 @@ csio_ln_mgmt_submit_wr(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req, ...@@ -1758,7 +1758,7 @@ csio_ln_mgmt_submit_wr(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req,
else { else {
/* Program DSGL to dma payload */ /* Program DSGL to dma payload */
dsgl.cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | dsgl.cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
ULPTX_MORE | ULPTX_NSGE(1)); ULPTX_MORE_F | ULPTX_NSGE_V(1));
dsgl.len0 = cpu_to_be32(pld_len); dsgl.len0 = cpu_to_be32(pld_len);
dsgl.addr0 = cpu_to_be64(pld->paddr); dsgl.addr0 = cpu_to_be64(pld->paddr);
csio_wr_copy_to_wrp(&dsgl, &wrp, ALIGN(wr_off, 8), csio_wr_copy_to_wrp(&dsgl, &wrp, ALIGN(wr_off, 8),
......
...@@ -298,8 +298,8 @@ csio_scsi_init_ultptx_dsgl(struct csio_hw *hw, struct csio_ioreq *req, ...@@ -298,8 +298,8 @@ csio_scsi_init_ultptx_dsgl(struct csio_hw *hw, struct csio_ioreq *req,
struct csio_dma_buf *dma_buf; struct csio_dma_buf *dma_buf;
struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_MORE | sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_MORE_F |
ULPTX_NSGE(req->nsge)); ULPTX_NSGE_V(req->nsge));
/* Now add the data SGLs */ /* Now add the data SGLs */
if (likely(!req->dcopy)) { if (likely(!req->dcopy)) {
scsi_for_each_sg(scmnd, sgel, req->nsge, i) { scsi_for_each_sg(scmnd, sgel, req->nsge, i) {
......
...@@ -704,7 +704,7 @@ static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb) ...@@ -704,7 +704,7 @@ static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
struct cpl_act_establish *req = (struct cpl_act_establish *)skb->data; struct cpl_act_establish *req = (struct cpl_act_establish *)skb->data;
unsigned short tcp_opt = ntohs(req->tcp_opt); unsigned short tcp_opt = ntohs(req->tcp_opt);
unsigned int tid = GET_TID(req); unsigned int tid = GET_TID(req);
unsigned int atid = GET_TID_TID(ntohl(req->tos_atid)); unsigned int atid = TID_TID_G(ntohl(req->tos_atid));
struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
struct tid_info *t = lldi->tids; struct tid_info *t = lldi->tids;
u32 rcv_isn = be32_to_cpu(req->rcv_isn); u32 rcv_isn = be32_to_cpu(req->rcv_isn);
...@@ -752,15 +752,15 @@ static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb) ...@@ -752,15 +752,15 @@ static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
if (cxgb4i_rcv_win > (RCV_BUFSIZ_MASK << 10)) if (cxgb4i_rcv_win > (RCV_BUFSIZ_MASK << 10))
csk->rcv_wup -= cxgb4i_rcv_win - (RCV_BUFSIZ_MASK << 10); csk->rcv_wup -= cxgb4i_rcv_win - (RCV_BUFSIZ_MASK << 10);
csk->advmss = lldi->mtus[GET_TCPOPT_MSS(tcp_opt)] - 40; csk->advmss = lldi->mtus[TCPOPT_MSS_G(tcp_opt)] - 40;
if (GET_TCPOPT_TSTAMP(tcp_opt)) if (TCPOPT_TSTAMP_G(tcp_opt))
csk->advmss -= 12; csk->advmss -= 12;
if (csk->advmss < 128) if (csk->advmss < 128)
csk->advmss = 128; csk->advmss = 128;
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p, mss_idx %u, advmss %u.\n", "csk 0x%p, mss_idx %u, advmss %u.\n",
csk, GET_TCPOPT_MSS(tcp_opt), csk->advmss); csk, TCPOPT_MSS_G(tcp_opt), csk->advmss);
cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt)); cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
...@@ -856,8 +856,8 @@ static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) ...@@ -856,8 +856,8 @@ static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)skb->data; struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)skb->data;
unsigned int tid = GET_TID(rpl); unsigned int tid = GET_TID(rpl);
unsigned int atid = unsigned int atid =
GET_TID_TID(GET_AOPEN_ATID(be32_to_cpu(rpl->atid_status))); TID_TID_G(AOPEN_ATID_G(be32_to_cpu(rpl->atid_status)));
unsigned int status = GET_AOPEN_STATUS(be32_to_cpu(rpl->atid_status)); unsigned int status = AOPEN_STATUS_G(be32_to_cpu(rpl->atid_status));
struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
struct tid_info *t = lldi->tids; struct tid_info *t = lldi->tids;
...@@ -1112,7 +1112,7 @@ static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb) ...@@ -1112,7 +1112,7 @@ static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb)
hlen = ntohs(cpl->len); hlen = ntohs(cpl->len);
dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF; dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF;
plen = ISCSI_PDU_LEN(pdu_len_ddp); plen = ISCSI_PDU_LEN_G(pdu_len_ddp);
if (is_t4(lldi->adapter_type)) if (is_t4(lldi->adapter_type))
plen -= 40; plen -= 40;
...@@ -1619,7 +1619,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, ...@@ -1619,7 +1619,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
req = (struct cpl_set_tcb_field *)skb->head; req = (struct cpl_set_tcb_field *)skb->head;
INIT_TP_WR(req, csk->tid); INIT_TP_WR(req, csk->tid);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid)); req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
req->word_cookie = htons(0); req->word_cookie = htons(0);
req->mask = cpu_to_be64(0x3 << 8); req->mask = cpu_to_be64(0x3 << 8);
req->val = cpu_to_be64(pg_idx << 8); req->val = cpu_to_be64(pg_idx << 8);
...@@ -1651,7 +1651,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, ...@@ -1651,7 +1651,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
req = (struct cpl_set_tcb_field *)skb->head; req = (struct cpl_set_tcb_field *)skb->head;
INIT_TP_WR(req, tid); INIT_TP_WR(req, tid);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid)); req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
req->word_cookie = htons(0); req->word_cookie = htons(0);
req->mask = cpu_to_be64(0x3 << 4); req->mask = cpu_to_be64(0x3 << 4);
req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) | req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment