Commit 45e81834 authored by David S. Miller's avatar David S. Miller

Merge branch 'cxgb4-next'

Anish Bhatt says:

====================
All Chelsio drivers : Cleanup CPL messages macros

This patch series cleans up all register defines/MACROS defined in t4_msg.h and
affected files as part of the continuing cleanup effort

The patches series is created against 'net-next' tree and  includes patches
to the cxgb4, cxgb4vf, iw_cxgb4, cxgb4i and csiostor drivers.

We have included all the maintainers of respective drivers. Kindly review the
change and let us know in case of any review comments.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents f902e881 bdc590b9
...@@ -235,19 +235,19 @@ static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb) ...@@ -235,19 +235,19 @@ static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
static void set_emss(struct c4iw_ep *ep, u16 opt) static void set_emss(struct c4iw_ep *ep, u16 opt)
{ {
ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - ep->emss = ep->com.dev->rdev.lldi.mtus[TCPOPT_MSS_G(opt)] -
((AF_INET == ep->com.remote_addr.ss_family) ? ((AF_INET == ep->com.remote_addr.ss_family) ?
sizeof(struct iphdr) : sizeof(struct ipv6hdr)) - sizeof(struct iphdr) : sizeof(struct ipv6hdr)) -
sizeof(struct tcphdr); sizeof(struct tcphdr);
ep->mss = ep->emss; ep->mss = ep->emss;
if (GET_TCPOPT_TSTAMP(opt)) if (TCPOPT_TSTAMP_G(opt))
ep->emss -= round_up(TCPOLEN_TIMESTAMP, 4); ep->emss -= round_up(TCPOLEN_TIMESTAMP, 4);
if (ep->emss < 128) if (ep->emss < 128)
ep->emss = 128; ep->emss = 128;
if (ep->emss & 7) if (ep->emss & 7)
PDBG("Warning: misaligned mtu idx %u mss %u emss=%u\n", PDBG("Warning: misaligned mtu idx %u mss %u emss=%u\n",
GET_TCPOPT_MSS(opt), ep->mss, ep->emss); TCPOPT_MSS_G(opt), ep->mss, ep->emss);
PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt), PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt),
ep->mss, ep->emss); ep->mss, ep->emss);
} }
...@@ -652,24 +652,24 @@ static int send_connect(struct c4iw_ep *ep) ...@@ -652,24 +652,24 @@ static int send_connect(struct c4iw_ep *ep)
if (win > RCV_BUFSIZ_M) if (win > RCV_BUFSIZ_M)
win = RCV_BUFSIZ_M; win = RCV_BUFSIZ_M;
opt0 = (nocong ? NO_CONG(1) : 0) | opt0 = (nocong ? NO_CONG_F : 0) |
KEEP_ALIVE_F | KEEP_ALIVE_F |
DELACK(1) | DELACK_F |
WND_SCALE_V(wscale) | WND_SCALE_V(wscale) |
MSS_IDX_V(mtu_idx) | MSS_IDX_V(mtu_idx) |
L2T_IDX_V(ep->l2t->idx) | L2T_IDX_V(ep->l2t->idx) |
TX_CHAN_V(ep->tx_chan) | TX_CHAN_V(ep->tx_chan) |
SMAC_SEL_V(ep->smac_idx) | SMAC_SEL_V(ep->smac_idx) |
DSCP(ep->tos) | DSCP_V(ep->tos) |
ULP_MODE_V(ULP_MODE_TCPDDP) | ULP_MODE_V(ULP_MODE_TCPDDP) |
RCV_BUFSIZ_V(win); RCV_BUFSIZ_V(win);
opt2 = RX_CHANNEL_V(0) | opt2 = RX_CHANNEL_V(0) |
CCTRL_ECN(enable_ecn) | CCTRL_ECN_V(enable_ecn) |
RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid); RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
if (enable_tcp_timestamps) if (enable_tcp_timestamps)
opt2 |= TSTAMPS_EN(1); opt2 |= TSTAMPS_EN_F;
if (enable_tcp_sack) if (enable_tcp_sack)
opt2 |= SACK_EN(1); opt2 |= SACK_EN_F;
if (wscale && enable_tcp_window_scaling) if (wscale && enable_tcp_window_scaling)
opt2 |= WND_SCALE_EN_F; opt2 |= WND_SCALE_EN_F;
if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
...@@ -1042,7 +1042,7 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -1042,7 +1042,7 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_ep *ep; struct c4iw_ep *ep;
struct cpl_act_establish *req = cplhdr(skb); struct cpl_act_establish *req = cplhdr(skb);
unsigned int tid = GET_TID(req); unsigned int tid = GET_TID(req);
unsigned int atid = GET_TID_TID(ntohl(req->tos_atid)); unsigned int atid = TID_TID_G(ntohl(req->tos_atid));
struct tid_info *t = dev->rdev.lldi.tids; struct tid_info *t = dev->rdev.lldi.tids;
ep = lookup_atid(t, atid); ep = lookup_atid(t, atid);
...@@ -1751,7 +1751,7 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) ...@@ -1751,7 +1751,7 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req)); req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req));
memset(req, 0, sizeof(*req)); memset(req, 0, sizeof(*req));
req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR)); req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR));
req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16))); req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
req->le.filter = cpu_to_be32(cxgb4_select_ntuple( req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
ep->com.dev->rdev.lldi.ports[0], ep->com.dev->rdev.lldi.ports[0],
...@@ -1782,27 +1782,27 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) ...@@ -1782,27 +1782,27 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
if (win > RCV_BUFSIZ_M) if (win > RCV_BUFSIZ_M)
win = RCV_BUFSIZ_M; win = RCV_BUFSIZ_M;
req->tcb.opt0 = (__force __be64) (TCAM_BYPASS(1) | req->tcb.opt0 = (__force __be64) (TCAM_BYPASS_F |
(nocong ? NO_CONG(1) : 0) | (nocong ? NO_CONG_F : 0) |
KEEP_ALIVE_F | KEEP_ALIVE_F |
DELACK(1) | DELACK_F |
WND_SCALE_V(wscale) | WND_SCALE_V(wscale) |
MSS_IDX_V(mtu_idx) | MSS_IDX_V(mtu_idx) |
L2T_IDX_V(ep->l2t->idx) | L2T_IDX_V(ep->l2t->idx) |
TX_CHAN_V(ep->tx_chan) | TX_CHAN_V(ep->tx_chan) |
SMAC_SEL_V(ep->smac_idx) | SMAC_SEL_V(ep->smac_idx) |
DSCP(ep->tos) | DSCP_V(ep->tos) |
ULP_MODE_V(ULP_MODE_TCPDDP) | ULP_MODE_V(ULP_MODE_TCPDDP) |
RCV_BUFSIZ_V(win)); RCV_BUFSIZ_V(win));
req->tcb.opt2 = (__force __be32) (PACE(1) | req->tcb.opt2 = (__force __be32) (PACE_V(1) |
TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) | TX_QUEUE_V(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
RX_CHANNEL_V(0) | RX_CHANNEL_V(0) |
CCTRL_ECN(enable_ecn) | CCTRL_ECN_V(enable_ecn) |
RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid)); RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid));
if (enable_tcp_timestamps) if (enable_tcp_timestamps)
req->tcb.opt2 |= (__force __be32)TSTAMPS_EN(1); req->tcb.opt2 |= (__force __be32)TSTAMPS_EN_F;
if (enable_tcp_sack) if (enable_tcp_sack)
req->tcb.opt2 |= (__force __be32)SACK_EN(1); req->tcb.opt2 |= (__force __be32)SACK_EN_F;
if (wscale && enable_tcp_window_scaling) if (wscale && enable_tcp_window_scaling)
req->tcb.opt2 |= (__force __be32)WND_SCALE_EN_F; req->tcb.opt2 |= (__force __be32)WND_SCALE_EN_F;
req->tcb.opt0 = cpu_to_be64((__force u64)req->tcb.opt0); req->tcb.opt0 = cpu_to_be64((__force u64)req->tcb.opt0);
...@@ -2023,10 +2023,10 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -2023,10 +2023,10 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
{ {
struct c4iw_ep *ep; struct c4iw_ep *ep;
struct cpl_act_open_rpl *rpl = cplhdr(skb); struct cpl_act_open_rpl *rpl = cplhdr(skb);
unsigned int atid = GET_TID_TID(GET_AOPEN_ATID( unsigned int atid = TID_TID_G(AOPEN_ATID_G(
ntohl(rpl->atid_status))); ntohl(rpl->atid_status)));
struct tid_info *t = dev->rdev.lldi.tids; struct tid_info *t = dev->rdev.lldi.tids;
int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status)); int status = AOPEN_STATUS_G(ntohl(rpl->atid_status));
struct sockaddr_in *la; struct sockaddr_in *la;
struct sockaddr_in *ra; struct sockaddr_in *ra;
struct sockaddr_in6 *la6; struct sockaddr_in6 *la6;
...@@ -2064,7 +2064,7 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -2064,7 +2064,7 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
if (ep->com.local_addr.ss_family == AF_INET && if (ep->com.local_addr.ss_family == AF_INET &&
dev->rdev.lldi.enable_fw_ofld_conn) { dev->rdev.lldi.enable_fw_ofld_conn) {
send_fw_act_open_req(ep, send_fw_act_open_req(ep,
GET_TID_TID(GET_AOPEN_ATID( TID_TID_G(AOPEN_ATID_G(
ntohl(rpl->atid_status)))); ntohl(rpl->atid_status))));
return 0; return 0;
} }
...@@ -2181,24 +2181,24 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, ...@@ -2181,24 +2181,24 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
win = ep->rcv_win >> 10; win = ep->rcv_win >> 10;
if (win > RCV_BUFSIZ_M) if (win > RCV_BUFSIZ_M)
win = RCV_BUFSIZ_M; win = RCV_BUFSIZ_M;
opt0 = (nocong ? NO_CONG(1) : 0) | opt0 = (nocong ? NO_CONG_F : 0) |
KEEP_ALIVE_F | KEEP_ALIVE_F |
DELACK(1) | DELACK_F |
WND_SCALE_V(wscale) | WND_SCALE_V(wscale) |
MSS_IDX_V(mtu_idx) | MSS_IDX_V(mtu_idx) |
L2T_IDX_V(ep->l2t->idx) | L2T_IDX_V(ep->l2t->idx) |
TX_CHAN_V(ep->tx_chan) | TX_CHAN_V(ep->tx_chan) |
SMAC_SEL_V(ep->smac_idx) | SMAC_SEL_V(ep->smac_idx) |
DSCP(ep->tos >> 2) | DSCP_V(ep->tos >> 2) |
ULP_MODE_V(ULP_MODE_TCPDDP) | ULP_MODE_V(ULP_MODE_TCPDDP) |
RCV_BUFSIZ_V(win); RCV_BUFSIZ_V(win);
opt2 = RX_CHANNEL_V(0) | opt2 = RX_CHANNEL_V(0) |
RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid); RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
if (enable_tcp_timestamps && req->tcpopt.tstamp) if (enable_tcp_timestamps && req->tcpopt.tstamp)
opt2 |= TSTAMPS_EN(1); opt2 |= TSTAMPS_EN_F;
if (enable_tcp_sack && req->tcpopt.sack) if (enable_tcp_sack && req->tcpopt.sack)
opt2 |= SACK_EN(1); opt2 |= SACK_EN_F;
if (wscale && enable_tcp_window_scaling) if (wscale && enable_tcp_window_scaling)
opt2 |= WND_SCALE_EN_F; opt2 |= WND_SCALE_EN_F;
if (enable_ecn) { if (enable_ecn) {
...@@ -2208,7 +2208,7 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, ...@@ -2208,7 +2208,7 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
tcph = (const void *)(req + 1) + G_ETH_HDR_LEN(hlen) + tcph = (const void *)(req + 1) + G_ETH_HDR_LEN(hlen) +
G_IP_HDR_LEN(hlen); G_IP_HDR_LEN(hlen);
if (tcph->ece && tcph->cwr) if (tcph->ece && tcph->cwr)
opt2 |= CCTRL_ECN(1); opt2 |= CCTRL_ECN_V(1);
} }
if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
u32 isn = (prandom_u32() & ~7UL) - 1; u32 isn = (prandom_u32() & ~7UL) - 1;
...@@ -2277,7 +2277,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -2277,7 +2277,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
{ {
struct c4iw_ep *child_ep = NULL, *parent_ep; struct c4iw_ep *child_ep = NULL, *parent_ep;
struct cpl_pass_accept_req *req = cplhdr(skb); struct cpl_pass_accept_req *req = cplhdr(skb);
unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid)); unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
struct tid_info *t = dev->rdev.lldi.tids; struct tid_info *t = dev->rdev.lldi.tids;
unsigned int hwtid = GET_TID(req); unsigned int hwtid = GET_TID(req);
struct dst_entry *dst; struct dst_entry *dst;
...@@ -2310,14 +2310,14 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -2310,14 +2310,14 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
ntohs(peer_port), peer_mss); ntohs(peer_port), peer_mss);
dst = find_route(dev, *(__be32 *)local_ip, *(__be32 *)peer_ip, dst = find_route(dev, *(__be32 *)local_ip, *(__be32 *)peer_ip,
local_port, peer_port, local_port, peer_port,
GET_POPEN_TOS(ntohl(req->tos_stid))); PASS_OPEN_TOS_G(ntohl(req->tos_stid)));
} else { } else {
PDBG("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n" PDBG("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
, __func__, parent_ep, hwtid, , __func__, parent_ep, hwtid,
local_ip, peer_ip, ntohs(local_port), local_ip, peer_ip, ntohs(local_port),
ntohs(peer_port), peer_mss); ntohs(peer_port), peer_mss);
dst = find_route6(dev, local_ip, peer_ip, local_port, peer_port, dst = find_route6(dev, local_ip, peer_ip, local_port, peer_port,
PASS_OPEN_TOS(ntohl(req->tos_stid)), PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
((struct sockaddr_in6 *) ((struct sockaddr_in6 *)
&parent_ep->com.local_addr)->sin6_scope_id); &parent_ep->com.local_addr)->sin6_scope_id);
} }
...@@ -2375,7 +2375,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -2375,7 +2375,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
} }
c4iw_get_ep(&parent_ep->com); c4iw_get_ep(&parent_ep->com);
child_ep->parent_ep = parent_ep; child_ep->parent_ep = parent_ep;
child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid)); child_ep->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
child_ep->dst = dst; child_ep->dst = dst;
child_ep->hwtid = hwtid; child_ep->hwtid = hwtid;
...@@ -3501,23 +3501,23 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos) ...@@ -3501,23 +3501,23 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req)); req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req));
memset(req, 0, sizeof(*req)); memset(req, 0, sizeof(*req));
req->l2info = cpu_to_be16(V_SYN_INTF(intf) | req->l2info = cpu_to_be16(V_SYN_INTF(intf) |
V_SYN_MAC_IDX(G_RX_MACIDX( V_SYN_MAC_IDX(RX_MACIDX_G(
(__force int) htonl(l2info))) | (__force int) htonl(l2info))) |
F_SYN_XACT_MATCH); F_SYN_XACT_MATCH);
eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ? eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ?
G_RX_ETHHDR_LEN((__force int) htonl(l2info)) : RX_ETHHDR_LEN_G((__force int)htonl(l2info)) :
G_RX_T5_ETHHDR_LEN((__force int) htonl(l2info)); RX_T5_ETHHDR_LEN_G((__force int)htonl(l2info));
req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN( req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(RX_CHAN_G(
(__force int) htonl(l2info))) | (__force int) htonl(l2info))) |
V_TCP_HDR_LEN(G_RX_TCPHDR_LEN( V_TCP_HDR_LEN(RX_TCPHDR_LEN_G(
(__force int) htons(hdr_len))) | (__force int) htons(hdr_len))) |
V_IP_HDR_LEN(G_RX_IPHDR_LEN( V_IP_HDR_LEN(RX_IPHDR_LEN_G(
(__force int) htons(hdr_len))) | (__force int) htons(hdr_len))) |
V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(eth_hdr_len))); V_ETH_HDR_LEN(RX_ETHHDR_LEN_G(eth_hdr_len)));
req->vlan = (__force __be16) vlantag; req->vlan = (__force __be16) vlantag;
req->len = (__force __be16) len; req->len = (__force __be16) len;
req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) | req->tos_stid = cpu_to_be32(PASS_OPEN_TID_V(stid) |
PASS_OPEN_TOS(tos)); PASS_OPEN_TOS_V(tos));
req->tcpopt.mss = htons(tmp_opt.mss_clamp); req->tcpopt.mss = htons(tmp_opt.mss_clamp);
if (tmp_opt.wscale_ok) if (tmp_opt.wscale_ok)
req->tcpopt.wsf = tmp_opt.snd_wscale; req->tcpopt.wsf = tmp_opt.snd_wscale;
...@@ -3542,7 +3542,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb, ...@@ -3542,7 +3542,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL); req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL);
req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req)); req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req));
memset(req, 0, sizeof(*req)); memset(req, 0, sizeof(*req));
req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F); req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F);
req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16))); req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
req->le.version_cpl = htonl(FW_OFLD_CONNECTION_WR_CPL_F); req->le.version_cpl = htonl(FW_OFLD_CONNECTION_WR_CPL_F);
req->le.filter = (__force __be32) filter; req->le.filter = (__force __be32) filter;
...@@ -3556,7 +3556,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb, ...@@ -3556,7 +3556,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_RECV) | htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_RECV) |
FW_OFLD_CONNECTION_WR_RCV_SCALE_V(cpl->tcpopt.wsf) | FW_OFLD_CONNECTION_WR_RCV_SCALE_V(cpl->tcpopt.wsf) |
FW_OFLD_CONNECTION_WR_ASTID_V( FW_OFLD_CONNECTION_WR_ASTID_V(
GET_PASS_OPEN_TID(ntohl(cpl->tos_stid)))); PASS_OPEN_TID_G(ntohl(cpl->tos_stid))));
/* /*
* We store the qid in opt2 which will be used by the firmware * We store the qid in opt2 which will be used by the firmware
...@@ -3613,7 +3613,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -3613,7 +3613,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
struct neighbour *neigh; struct neighbour *neigh;
/* Drop all non-SYN packets */ /* Drop all non-SYN packets */
if (!(cpl->l2info & cpu_to_be32(F_RXF_SYN))) if (!(cpl->l2info & cpu_to_be32(RXF_SYN_F)))
goto reject; goto reject;
/* /*
...@@ -3635,8 +3635,8 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -3635,8 +3635,8 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
} }
eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ? eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ?
G_RX_ETHHDR_LEN(htonl(cpl->l2info)) : RX_ETHHDR_LEN_G(htonl(cpl->l2info)) :
G_RX_T5_ETHHDR_LEN(htonl(cpl->l2info)); RX_T5_ETHHDR_LEN_G(htonl(cpl->l2info));
if (eth_hdr_len == ETH_HLEN) { if (eth_hdr_len == ETH_HLEN) {
eh = (struct ethhdr *)(req + 1); eh = (struct ethhdr *)(req + 1);
iph = (struct iphdr *)(eh + 1); iph = (struct iphdr *)(eh + 1);
......
...@@ -86,14 +86,14 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr, ...@@ -86,14 +86,14 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
req->wr.wr_lo = wait ? (__force __be64)(unsigned long) &wr_wait : 0L; req->wr.wr_lo = wait ? (__force __be64)(unsigned long) &wr_wait : 0L;
req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16))); req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16)));
req->cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE)); req->cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE));
req->cmd |= cpu_to_be32(V_T5_ULP_MEMIO_ORDER(1)); req->cmd |= cpu_to_be32(T5_ULP_MEMIO_ORDER_V(1));
req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(len>>5)); req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(len>>5));
req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16)); req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16));
req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr)); req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr));
sgl = (struct ulptx_sgl *)(req + 1); sgl = (struct ulptx_sgl *)(req + 1);
sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_DSGL) | sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
ULPTX_NSGE(1)); ULPTX_NSGE_V(1));
sgl->len0 = cpu_to_be32(len); sgl->len0 = cpu_to_be32(len);
sgl->addr0 = cpu_to_be64(data); sgl->addr0 = cpu_to_be64(data);
......
...@@ -672,7 +672,7 @@ static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl) ...@@ -672,7 +672,7 @@ static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
if (idx >= adap->tids.ftid_base && nidx < if (idx >= adap->tids.ftid_base && nidx <
(adap->tids.nftids + adap->tids.nsftids)) { (adap->tids.nftids + adap->tids.nsftids)) {
idx = nidx; idx = nidx;
ret = GET_TCB_COOKIE(rpl->cookie); ret = TCB_COOKIE_G(rpl->cookie);
f = &adap->tids.ftid_tab[idx]; f = &adap->tids.ftid_tab[idx];
if (ret == FW_FILTER_WR_FLT_DELETED) { if (ret == FW_FILTER_WR_FLT_DELETED) {
...@@ -724,7 +724,7 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp, ...@@ -724,7 +724,7 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
if (likely(opcode == CPL_SGE_EGR_UPDATE)) { if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
const struct cpl_sge_egr_update *p = (void *)rsp; const struct cpl_sge_egr_update *p = (void *)rsp;
unsigned int qid = EGR_QID(ntohl(p->opcode_qid)); unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid));
struct sge_txq *txq; struct sge_txq *txq;
txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start]; txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
...@@ -3416,8 +3416,8 @@ int cxgb4_create_server(const struct net_device *dev, unsigned int stid, ...@@ -3416,8 +3416,8 @@ int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
req->peer_ip = htonl(0); req->peer_ip = htonl(0);
chan = rxq_to_chan(&adap->sge, queue); chan = rxq_to_chan(&adap->sge, queue);
req->opt0 = cpu_to_be64(TX_CHAN_V(chan)); req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
req->opt1 = cpu_to_be64(CONN_POLICY_ASK | req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
ret = t4_mgmt_tx(adap, skb); ret = t4_mgmt_tx(adap, skb);
return net_xmit_eval(ret); return net_xmit_eval(ret);
} }
...@@ -3459,8 +3459,8 @@ int cxgb4_create_server6(const struct net_device *dev, unsigned int stid, ...@@ -3459,8 +3459,8 @@ int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
req->peer_ip_lo = cpu_to_be64(0); req->peer_ip_lo = cpu_to_be64(0);
chan = rxq_to_chan(&adap->sge, queue); chan = rxq_to_chan(&adap->sge, queue);
req->opt0 = cpu_to_be64(TX_CHAN_V(chan)); req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
req->opt1 = cpu_to_be64(CONN_POLICY_ASK | req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
ret = t4_mgmt_tx(adap, skb); ret = t4_mgmt_tx(adap, skb);
return net_xmit_eval(ret); return net_xmit_eval(ret);
} }
...@@ -3483,8 +3483,8 @@ int cxgb4_remove_server(const struct net_device *dev, unsigned int stid, ...@@ -3483,8 +3483,8 @@ int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req)); req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
INIT_TP_WR(req, 0); INIT_TP_WR(req, 0);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
req->reply_ctrl = htons(NO_REPLY(0) | (ipv6 ? LISTSVR_IPV6(1) : req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
LISTSVR_IPV6(0)) | QUEUENO(queue)); LISTSVR_IPV6_V(0)) | QUEUENO_V(queue));
ret = t4_mgmt_tx(adap, skb); ret = t4_mgmt_tx(adap, skb);
return net_xmit_eval(ret); return net_xmit_eval(ret);
} }
......
...@@ -151,8 +151,8 @@ static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync) ...@@ -151,8 +151,8 @@ static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ,
e->idx | (sync ? F_SYNC_WR : 0) | e->idx | (sync ? F_SYNC_WR : 0) |
TID_QID(adap->sge.fw_evtq.abs_id))); TID_QID_V(adap->sge.fw_evtq.abs_id)));
req->params = htons(L2T_W_PORT(e->lport) | L2T_W_NOREPLY(!sync)); req->params = htons(L2T_W_PORT_V(e->lport) | L2T_W_NOREPLY_V(!sync));
req->l2t_idx = htons(e->idx); req->l2t_idx = htons(e->idx);
req->vlan = htons(e->vlan); req->vlan = htons(e->vlan);
if (e->neigh && !(e->neigh->dev->flags & IFF_LOOPBACK)) if (e->neigh && !(e->neigh->dev->flags & IFF_LOOPBACK))
......
...@@ -821,7 +821,8 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q, ...@@ -821,7 +821,8 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
sgl->addr0 = cpu_to_be64(addr[1]); sgl->addr0 = cpu_to_be64(addr[1]);
} }
sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags)); sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
ULPTX_NSGE_V(nfrags));
if (likely(--nfrags == 0)) if (likely(--nfrags == 0))
return; return;
/* /*
...@@ -1761,7 +1762,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, ...@@ -1761,7 +1762,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
pkt = (const struct cpl_rx_pkt *)rsp; pkt = (const struct cpl_rx_pkt *)rsp;
csum_ok = pkt->csum_calc && !pkt->err_vec && csum_ok = pkt->csum_calc && !pkt->err_vec &&
(q->netdev->features & NETIF_F_RXCSUM); (q->netdev->features & NETIF_F_RXCSUM);
if ((pkt->l2info & htonl(RXF_TCP)) && if ((pkt->l2info & htonl(RXF_TCP_F)) &&
(q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
do_gro(rxq, si, pkt); do_gro(rxq, si, pkt);
return 0; return 0;
...@@ -1783,11 +1784,11 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, ...@@ -1783,11 +1784,11 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
rxq->stats.pkts++; rxq->stats.pkts++;
if (csum_ok && (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) { if (csum_ok && (pkt->l2info & htonl(RXF_UDP_F | RXF_TCP_F))) {
if (!pkt->ip_frag) { if (!pkt->ip_frag) {
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
rxq->stats.rx_cso++; rxq->stats.rx_cso++;
} else if (pkt->l2info & htonl(RXF_IP)) { } else if (pkt->l2info & htonl(RXF_IP_F)) {
__sum16 c = (__force __sum16)pkt->csum; __sum16 c = (__force __sum16)pkt->csum;
skb->csum = csum_unfold(c); skb->csum = csum_unfold(c);
skb->ip_summed = CHECKSUM_COMPLETE; skb->ip_summed = CHECKSUM_COMPLETE;
......
...@@ -123,6 +123,13 @@ enum CPL_error { ...@@ -123,6 +123,13 @@ enum CPL_error {
CPL_ERR_IWARP_FLM = 50, CPL_ERR_IWARP_FLM = 50,
}; };
enum {
CPL_CONN_POLICY_AUTO = 0,
CPL_CONN_POLICY_ASK = 1,
CPL_CONN_POLICY_FILTER = 2,
CPL_CONN_POLICY_DENY = 3
};
enum { enum {
ULP_MODE_NONE = 0, ULP_MODE_NONE = 0,
ULP_MODE_ISCSI = 2, ULP_MODE_ISCSI = 2,
...@@ -160,16 +167,28 @@ union opcode_tid { ...@@ -160,16 +167,28 @@ union opcode_tid {
u8 opcode; u8 opcode;
}; };
#define CPL_OPCODE(x) ((x) << 24) #define CPL_OPCODE_S 24
#define G_CPL_OPCODE(x) (((x) >> 24) & 0xFF) #define CPL_OPCODE_V(x) ((x) << CPL_OPCODE_S)
#define MK_OPCODE_TID(opcode, tid) (CPL_OPCODE(opcode) | (tid)) #define CPL_OPCODE_G(x) (((x) >> CPL_OPCODE_S) & 0xFF)
#define TID_G(x) ((x) & 0xFFFFFF)
/* tid is assumed to be 24-bits */
#define MK_OPCODE_TID(opcode, tid) (CPL_OPCODE_V(opcode) | (tid))
#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid) #define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
#define GET_TID(cmd) (ntohl(OPCODE_TID(cmd)) & 0xFFFFFF)
/* extract the TID from a CPL command */
#define GET_TID(cmd) (TID_G(be32_to_cpu(OPCODE_TID(cmd))))
/* partitioning of TID fields that also carry a queue id */ /* partitioning of TID fields that also carry a queue id */
#define GET_TID_TID(x) ((x) & 0x3fff) #define TID_TID_S 0
#define GET_TID_QID(x) (((x) >> 14) & 0x3ff) #define TID_TID_M 0x3fff
#define TID_QID(x) ((x) << 14) #define TID_TID_G(x) (((x) >> TID_TID_S) & TID_TID_M)
#define TID_QID_S 14
#define TID_QID_M 0x3ff
#define TID_QID_V(x) ((x) << TID_QID_S)
#define TID_QID_G(x) (((x) >> TID_QID_S) & TID_QID_M)
struct rss_header { struct rss_header {
u8 opcode; u8 opcode;
...@@ -199,8 +218,8 @@ struct work_request_hdr { ...@@ -199,8 +218,8 @@ struct work_request_hdr {
}; };
/* wr_hi fields */ /* wr_hi fields */
#define S_WR_OP 24 #define WR_OP_S 24
#define V_WR_OP(x) ((__u64)(x) << S_WR_OP) #define WR_OP_V(x) ((__u64)(x) << WR_OP_S)
#define WR_HDR struct work_request_hdr wr #define WR_HDR struct work_request_hdr wr
...@@ -270,17 +289,42 @@ struct cpl_pass_open_req { ...@@ -270,17 +289,42 @@ struct cpl_pass_open_req {
__be32 local_ip; __be32 local_ip;
__be32 peer_ip; __be32 peer_ip;
__be64 opt0; __be64 opt0;
#define NO_CONG(x) ((x) << 4)
#define DELACK(x) ((x) << 5)
#define DSCP(x) ((x) << 22)
#define TCAM_BYPASS(x) ((u64)(x) << 48)
#define NAGLE(x) ((u64)(x) << 49)
__be64 opt1; __be64 opt1;
#define SYN_RSS_ENABLE (1 << 0)
#define SYN_RSS_QUEUE(x) ((x) << 2)
#define CONN_POLICY_ASK (1 << 22)
}; };
/* option 0 fields */
#define NO_CONG_S 4
#define NO_CONG_V(x) ((x) << NO_CONG_S)
#define NO_CONG_F NO_CONG_V(1U)
#define DELACK_S 5
#define DELACK_V(x) ((x) << DELACK_S)
#define DELACK_F DELACK_V(1U)
#define DSCP_S 22
#define DSCP_M 0x3F
#define DSCP_V(x) ((x) << DSCP_S)
#define DSCP_G(x) (((x) >> DSCP_S) & DSCP_M)
#define TCAM_BYPASS_S 48
#define TCAM_BYPASS_V(x) ((__u64)(x) << TCAM_BYPASS_S)
#define TCAM_BYPASS_F TCAM_BYPASS_V(1ULL)
#define NAGLE_S 49
#define NAGLE_V(x) ((__u64)(x) << NAGLE_S)
#define NAGLE_F NAGLE_V(1ULL)
/* option 1 fields */
#define SYN_RSS_ENABLE_S 0
#define SYN_RSS_ENABLE_V(x) ((x) << SYN_RSS_ENABLE_S)
#define SYN_RSS_ENABLE_F SYN_RSS_ENABLE_V(1U)
#define SYN_RSS_QUEUE_S 2
#define SYN_RSS_QUEUE_V(x) ((x) << SYN_RSS_QUEUE_S)
#define CONN_POLICY_S 22
#define CONN_POLICY_V(x) ((x) << CONN_POLICY_S)
struct cpl_pass_open_req6 { struct cpl_pass_open_req6 {
WR_HDR; WR_HDR;
union opcode_tid ot; union opcode_tid ot;
...@@ -304,16 +348,37 @@ struct cpl_pass_accept_rpl { ...@@ -304,16 +348,37 @@ struct cpl_pass_accept_rpl {
WR_HDR; WR_HDR;
union opcode_tid ot; union opcode_tid ot;
__be32 opt2; __be32 opt2;
#define RX_COALESCE_VALID(x) ((x) << 11)
#define RX_COALESCE(x) ((x) << 12)
#define PACE(x) ((x) << 16)
#define TX_QUEUE(x) ((x) << 23)
#define CCTRL_ECN(x) ((x) << 27)
#define TSTAMPS_EN(x) ((x) << 29)
#define SACK_EN(x) ((x) << 30)
__be64 opt0; __be64 opt0;
}; };
/* option 2 fields */
#define RX_COALESCE_VALID_S 11
#define RX_COALESCE_VALID_V(x) ((x) << RX_COALESCE_VALID_S)
#define RX_COALESCE_VALID_F RX_COALESCE_VALID_V(1U)
#define RX_COALESCE_S 12
#define RX_COALESCE_V(x) ((x) << RX_COALESCE_S)
#define PACE_S 16
#define PACE_V(x) ((x) << PACE_S)
#define TX_QUEUE_S 23
#define TX_QUEUE_M 0x7
#define TX_QUEUE_V(x) ((x) << TX_QUEUE_S)
#define TX_QUEUE_G(x) (((x) >> TX_QUEUE_S) & TX_QUEUE_M)
#define CCTRL_ECN_S 27
#define CCTRL_ECN_V(x) ((x) << CCTRL_ECN_S)
#define CCTRL_ECN_F CCTRL_ECN_V(1U)
#define TSTAMPS_EN_S 29
#define TSTAMPS_EN_V(x) ((x) << TSTAMPS_EN_S)
#define TSTAMPS_EN_F TSTAMPS_EN_V(1U)
#define SACK_EN_S 30
#define SACK_EN_V(x) ((x) << SACK_EN_S)
#define SACK_EN_F SACK_EN_V(1U)
struct cpl_t5_pass_accept_rpl { struct cpl_t5_pass_accept_rpl {
WR_HDR; WR_HDR;
union opcode_tid ot; union opcode_tid ot;
...@@ -384,30 +449,61 @@ struct cpl_t5_act_open_req6 { ...@@ -384,30 +449,61 @@ struct cpl_t5_act_open_req6 {
struct cpl_act_open_rpl { struct cpl_act_open_rpl {
union opcode_tid ot; union opcode_tid ot;
__be32 atid_status; __be32 atid_status;
#define GET_AOPEN_STATUS(x) ((x) & 0xff)
#define GET_AOPEN_ATID(x) (((x) >> 8) & 0xffffff)
}; };
/* cpl_act_open_rpl.atid_status fields */
#define AOPEN_STATUS_S 0
#define AOPEN_STATUS_M 0xFF
#define AOPEN_STATUS_G(x) (((x) >> AOPEN_STATUS_S) & AOPEN_STATUS_M)
#define AOPEN_ATID_S 8
#define AOPEN_ATID_M 0xFFFFFF
#define AOPEN_ATID_G(x) (((x) >> AOPEN_ATID_S) & AOPEN_ATID_M)
struct cpl_pass_establish { struct cpl_pass_establish {
union opcode_tid ot; union opcode_tid ot;
__be32 rsvd; __be32 rsvd;
__be32 tos_stid; __be32 tos_stid;
#define PASS_OPEN_TID(x) ((x) << 0)
#define PASS_OPEN_TOS(x) ((x) << 24)
#define GET_PASS_OPEN_TID(x) (((x) >> 0) & 0xFFFFFF)
#define GET_POPEN_TID(x) ((x) & 0xffffff)
#define GET_POPEN_TOS(x) (((x) >> 24) & 0xff)
__be16 mac_idx; __be16 mac_idx;
__be16 tcp_opt; __be16 tcp_opt;
#define GET_TCPOPT_WSCALE_OK(x) (((x) >> 5) & 1)
#define GET_TCPOPT_SACK(x) (((x) >> 6) & 1)
#define GET_TCPOPT_TSTAMP(x) (((x) >> 7) & 1)
#define GET_TCPOPT_SND_WSCALE(x) (((x) >> 8) & 0xf)
#define GET_TCPOPT_MSS(x) (((x) >> 12) & 0xf)
__be32 snd_isn; __be32 snd_isn;
__be32 rcv_isn; __be32 rcv_isn;
}; };
/* cpl_pass_establish.tos_stid fields */
#define PASS_OPEN_TID_S 0
#define PASS_OPEN_TID_M 0xFFFFFF
#define PASS_OPEN_TID_V(x) ((x) << PASS_OPEN_TID_S)
#define PASS_OPEN_TID_G(x) (((x) >> PASS_OPEN_TID_S) & PASS_OPEN_TID_M)
#define PASS_OPEN_TOS_S 24
#define PASS_OPEN_TOS_M 0xFF
#define PASS_OPEN_TOS_V(x) ((x) << PASS_OPEN_TOS_S)
#define PASS_OPEN_TOS_G(x) (((x) >> PASS_OPEN_TOS_S) & PASS_OPEN_TOS_M)
/* cpl_pass_establish.tcp_opt fields (also applies to act_open_establish) */
#define TCPOPT_WSCALE_OK_S 5
#define TCPOPT_WSCALE_OK_M 0x1
#define TCPOPT_WSCALE_OK_G(x) \
(((x) >> TCPOPT_WSCALE_OK_S) & TCPOPT_WSCALE_OK_M)
#define TCPOPT_SACK_S 6
#define TCPOPT_SACK_M 0x1
#define TCPOPT_SACK_G(x) (((x) >> TCPOPT_SACK_S) & TCPOPT_SACK_M)
#define TCPOPT_TSTAMP_S 7
#define TCPOPT_TSTAMP_M 0x1
#define TCPOPT_TSTAMP_G(x) (((x) >> TCPOPT_TSTAMP_S) & TCPOPT_TSTAMP_M)
#define TCPOPT_SND_WSCALE_S 8
#define TCPOPT_SND_WSCALE_M 0xF
#define TCPOPT_SND_WSCALE_G(x) \
(((x) >> TCPOPT_SND_WSCALE_S) & TCPOPT_SND_WSCALE_M)
#define TCPOPT_MSS_S 12
#define TCPOPT_MSS_M 0xF
#define TCPOPT_MSS_G(x) (((x) >> TCPOPT_MSS_S) & TCPOPT_MSS_M)
struct cpl_act_establish { struct cpl_act_establish {
union opcode_tid ot; union opcode_tid ot;
__be32 rsvd; __be32 rsvd;
...@@ -422,24 +518,39 @@ struct cpl_get_tcb { ...@@ -422,24 +518,39 @@ struct cpl_get_tcb {
WR_HDR; WR_HDR;
union opcode_tid ot; union opcode_tid ot;
__be16 reply_ctrl; __be16 reply_ctrl;
#define QUEUENO(x) ((x) << 0)
#define REPLY_CHAN(x) ((x) << 14)
#define NO_REPLY(x) ((x) << 15)
__be16 cookie; __be16 cookie;
}; };
/* cpl_get_tcb.reply_ctrl fields */
#define QUEUENO_S 0
#define QUEUENO_V(x) ((x) << QUEUENO_S)
#define REPLY_CHAN_S 14
#define REPLY_CHAN_V(x) ((x) << REPLY_CHAN_S)
#define REPLY_CHAN_F REPLY_CHAN_V(1U)
#define NO_REPLY_S 15
#define NO_REPLY_V(x) ((x) << NO_REPLY_S)
#define NO_REPLY_F NO_REPLY_V(1U)
struct cpl_set_tcb_field { struct cpl_set_tcb_field {
WR_HDR; WR_HDR;
union opcode_tid ot; union opcode_tid ot;
__be16 reply_ctrl; __be16 reply_ctrl;
__be16 word_cookie; __be16 word_cookie;
#define TCB_WORD(x) ((x) << 0)
#define TCB_COOKIE(x) ((x) << 5)
#define GET_TCB_COOKIE(x) (((x) >> 5) & 7)
__be64 mask; __be64 mask;
__be64 val; __be64 val;
}; };
/* cpl_set_tcb_field.word_cookie fields */
#define TCB_WORD_S 0
#define TCB_WORD(x) ((x) << TCB_WORD_S)
#define TCB_COOKIE_S 5
#define TCB_COOKIE_M 0x7
#define TCB_COOKIE_V(x) ((x) << TCB_COOKIE_S)
#define TCB_COOKIE_G(x) (((x) >> TCB_COOKIE_S) & TCB_COOKIE_M)
struct cpl_set_tcb_rpl { struct cpl_set_tcb_rpl {
union opcode_tid ot; union opcode_tid ot;
__be16 rsvd; __be16 rsvd;
...@@ -466,10 +577,14 @@ struct cpl_close_listsvr_req { ...@@ -466,10 +577,14 @@ struct cpl_close_listsvr_req {
WR_HDR; WR_HDR;
union opcode_tid ot; union opcode_tid ot;
__be16 reply_ctrl; __be16 reply_ctrl;
#define LISTSVR_IPV6(x) ((x) << 14)
__be16 rsvd; __be16 rsvd;
}; };
/* additional cpl_close_listsvr_req.reply_ctrl field */
#define LISTSVR_IPV6_S 14
#define LISTSVR_IPV6_V(x) ((x) << LISTSVR_IPV6_S)
#define LISTSVR_IPV6_F LISTSVR_IPV6_V(1U)
struct cpl_close_listsvr_rpl { struct cpl_close_listsvr_rpl {
union opcode_tid ot; union opcode_tid ot;
u8 rsvd[3]; u8 rsvd[3];
...@@ -565,6 +680,34 @@ struct cpl_tx_pkt_lso_core { ...@@ -565,6 +680,34 @@ struct cpl_tx_pkt_lso_core {
/* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */ /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */
}; };
/* cpl_tx_pkt_lso_core.lso_ctrl fields */
#define LSO_TCPHDR_LEN_S 0
#define LSO_TCPHDR_LEN_V(x) ((x) << LSO_TCPHDR_LEN_S)
#define LSO_IPHDR_LEN_S 4
#define LSO_IPHDR_LEN_V(x) ((x) << LSO_IPHDR_LEN_S)
#define LSO_ETHHDR_LEN_S 16
#define LSO_ETHHDR_LEN_V(x) ((x) << LSO_ETHHDR_LEN_S)
#define LSO_IPV6_S 20
#define LSO_IPV6_V(x) ((x) << LSO_IPV6_S)
#define LSO_IPV6_F LSO_IPV6_V(1U)
#define LSO_LAST_SLICE_S 22
#define LSO_LAST_SLICE_V(x) ((x) << LSO_LAST_SLICE_S)
#define LSO_LAST_SLICE_F LSO_LAST_SLICE_V(1U)
#define LSO_FIRST_SLICE_S 23
#define LSO_FIRST_SLICE_V(x) ((x) << LSO_FIRST_SLICE_S)
#define LSO_FIRST_SLICE_F LSO_FIRST_SLICE_V(1U)
#define LSO_OPCODE_S 24
#define LSO_OPCODE_V(x) ((x) << LSO_OPCODE_S)
#define LSO_T5_XFER_SIZE_S 0
#define LSO_T5_XFER_SIZE_V(x) ((x) << LSO_T5_XFER_SIZE_S)
struct cpl_tx_pkt_lso { struct cpl_tx_pkt_lso {
WR_HDR; WR_HDR;
struct cpl_tx_pkt_lso_core c; struct cpl_tx_pkt_lso_core c;
...@@ -574,8 +717,6 @@ struct cpl_tx_pkt_lso { ...@@ -574,8 +717,6 @@ struct cpl_tx_pkt_lso {
struct cpl_iscsi_hdr { struct cpl_iscsi_hdr {
union opcode_tid ot; union opcode_tid ot;
__be16 pdu_len_ddp; __be16 pdu_len_ddp;
#define ISCSI_PDU_LEN(x) ((x) & 0x7FFF)
#define ISCSI_DDP (1 << 15)
__be16 len; __be16 len;
__be32 seq; __be32 seq;
__be16 urg; __be16 urg;
...@@ -583,6 +724,16 @@ struct cpl_iscsi_hdr { ...@@ -583,6 +724,16 @@ struct cpl_iscsi_hdr {
u8 status; u8 status;
}; };
/* cpl_iscsi_hdr.pdu_len_ddp fields */
#define ISCSI_PDU_LEN_S 0
#define ISCSI_PDU_LEN_M 0x7FFF
#define ISCSI_PDU_LEN_V(x) ((x) << ISCSI_PDU_LEN_S)
#define ISCSI_PDU_LEN_G(x) (((x) >> ISCSI_PDU_LEN_S) & ISCSI_PDU_LEN_M)
#define ISCSI_DDP_S 15
#define ISCSI_DDP_V(x) ((x) << ISCSI_DDP_S)
#define ISCSI_DDP_F ISCSI_DDP_V(1U)
struct cpl_rx_data { struct cpl_rx_data {
union opcode_tid ot; union opcode_tid ot;
__be16 rsvd; __be16 rsvd;
...@@ -639,49 +790,61 @@ struct cpl_rx_pkt { ...@@ -639,49 +790,61 @@ struct cpl_rx_pkt {
__be16 vlan; __be16 vlan;
__be16 len; __be16 len;
__be32 l2info; __be32 l2info;
#define RXF_UDP (1 << 22)
#define RXF_TCP (1 << 23)
#define RXF_IP (1 << 24)
#define RXF_IP6 (1 << 25)
__be16 hdr_len; __be16 hdr_len;
__be16 err_vec; __be16 err_vec;
}; };
#define RXF_UDP_S 22
#define RXF_UDP_V(x) ((x) << RXF_UDP_S)
#define RXF_UDP_F RXF_UDP_V(1U)
#define RXF_TCP_S 23
#define RXF_TCP_V(x) ((x) << RXF_TCP_S)
#define RXF_TCP_F RXF_TCP_V(1U)
#define RXF_IP_S 24
#define RXF_IP_V(x) ((x) << RXF_IP_S)
#define RXF_IP_F RXF_IP_V(1U)
#define RXF_IP6_S 25
#define RXF_IP6_V(x) ((x) << RXF_IP6_S)
#define RXF_IP6_F RXF_IP6_V(1U)
/* rx_pkt.l2info fields */ /* rx_pkt.l2info fields */
#define S_RX_ETHHDR_LEN 0 #define RX_ETHHDR_LEN_S 0
#define M_RX_ETHHDR_LEN 0x1F #define RX_ETHHDR_LEN_M 0x1F
#define V_RX_ETHHDR_LEN(x) ((x) << S_RX_ETHHDR_LEN) #define RX_ETHHDR_LEN_V(x) ((x) << RX_ETHHDR_LEN_S)
#define G_RX_ETHHDR_LEN(x) (((x) >> S_RX_ETHHDR_LEN) & M_RX_ETHHDR_LEN) #define RX_ETHHDR_LEN_G(x) (((x) >> RX_ETHHDR_LEN_S) & RX_ETHHDR_LEN_M)
#define S_RX_T5_ETHHDR_LEN 0 #define RX_T5_ETHHDR_LEN_S 0
#define M_RX_T5_ETHHDR_LEN 0x3F #define RX_T5_ETHHDR_LEN_M 0x3F
#define V_RX_T5_ETHHDR_LEN(x) ((x) << S_RX_T5_ETHHDR_LEN) #define RX_T5_ETHHDR_LEN_V(x) ((x) << RX_T5_ETHHDR_LEN_S)
#define G_RX_T5_ETHHDR_LEN(x) (((x) >> S_RX_T5_ETHHDR_LEN) & M_RX_T5_ETHHDR_LEN) #define RX_T5_ETHHDR_LEN_G(x) (((x) >> RX_T5_ETHHDR_LEN_S) & RX_T5_ETHHDR_LEN_M)
#define S_RX_MACIDX 8 #define RX_MACIDX_S 8
#define M_RX_MACIDX 0x1FF #define RX_MACIDX_M 0x1FF
#define V_RX_MACIDX(x) ((x) << S_RX_MACIDX) #define RX_MACIDX_V(x) ((x) << RX_MACIDX_S)
#define G_RX_MACIDX(x) (((x) >> S_RX_MACIDX) & M_RX_MACIDX) #define RX_MACIDX_G(x) (((x) >> RX_MACIDX_S) & RX_MACIDX_M)
#define S_RXF_SYN 21 #define RXF_SYN_S 21
#define V_RXF_SYN(x) ((x) << S_RXF_SYN) #define RXF_SYN_V(x) ((x) << RXF_SYN_S)
#define F_RXF_SYN V_RXF_SYN(1U) #define RXF_SYN_F RXF_SYN_V(1U)
#define S_RX_CHAN 28 #define RX_CHAN_S 28
#define M_RX_CHAN 0xF #define RX_CHAN_M 0xF
#define V_RX_CHAN(x) ((x) << S_RX_CHAN) #define RX_CHAN_V(x) ((x) << RX_CHAN_S)
#define G_RX_CHAN(x) (((x) >> S_RX_CHAN) & M_RX_CHAN) #define RX_CHAN_G(x) (((x) >> RX_CHAN_S) & RX_CHAN_M)
/* rx_pkt.hdr_len fields */ /* rx_pkt.hdr_len fields */
#define S_RX_TCPHDR_LEN 0 #define RX_TCPHDR_LEN_S 0
#define M_RX_TCPHDR_LEN 0x3F #define RX_TCPHDR_LEN_M 0x3F
#define V_RX_TCPHDR_LEN(x) ((x) << S_RX_TCPHDR_LEN) #define RX_TCPHDR_LEN_V(x) ((x) << RX_TCPHDR_LEN_S)
#define G_RX_TCPHDR_LEN(x) (((x) >> S_RX_TCPHDR_LEN) & M_RX_TCPHDR_LEN) #define RX_TCPHDR_LEN_G(x) (((x) >> RX_TCPHDR_LEN_S) & RX_TCPHDR_LEN_M)
#define S_RX_IPHDR_LEN 6 #define RX_IPHDR_LEN_S 6
#define M_RX_IPHDR_LEN 0x3FF #define RX_IPHDR_LEN_M 0x3FF
#define V_RX_IPHDR_LEN(x) ((x) << S_RX_IPHDR_LEN) #define RX_IPHDR_LEN_V(x) ((x) << RX_IPHDR_LEN_S)
#define G_RX_IPHDR_LEN(x) (((x) >> S_RX_IPHDR_LEN) & M_RX_IPHDR_LEN) #define RX_IPHDR_LEN_G(x) (((x) >> RX_IPHDR_LEN_S) & RX_IPHDR_LEN_M)
struct cpl_trace_pkt { struct cpl_trace_pkt {
u8 opcode; u8 opcode;
...@@ -730,14 +893,22 @@ struct cpl_l2t_write_req { ...@@ -730,14 +893,22 @@ struct cpl_l2t_write_req {
WR_HDR; WR_HDR;
union opcode_tid ot; union opcode_tid ot;
__be16 params; __be16 params;
#define L2T_W_INFO(x) ((x) << 2)
#define L2T_W_PORT(x) ((x) << 8)
#define L2T_W_NOREPLY(x) ((x) << 15)
__be16 l2t_idx; __be16 l2t_idx;
__be16 vlan; __be16 vlan;
u8 dst_mac[6]; u8 dst_mac[6];
}; };
/* cpl_l2t_write_req.params fields */
#define L2T_W_INFO_S 2
#define L2T_W_INFO_V(x) ((x) << L2T_W_INFO_S)
#define L2T_W_PORT_S 8
#define L2T_W_PORT_V(x) ((x) << L2T_W_PORT_S)
#define L2T_W_NOREPLY_S 15
#define L2T_W_NOREPLY_V(x) ((x) << L2T_W_NOREPLY_S)
#define L2T_W_NOREPLY_F L2T_W_NOREPLY_V(1U)
struct cpl_l2t_write_rpl { struct cpl_l2t_write_rpl {
union opcode_tid ot; union opcode_tid ot;
u8 status; u8 status;
...@@ -752,11 +923,15 @@ struct cpl_rdma_terminate { ...@@ -752,11 +923,15 @@ struct cpl_rdma_terminate {
struct cpl_sge_egr_update { struct cpl_sge_egr_update {
__be32 opcode_qid; __be32 opcode_qid;
#define EGR_QID(x) ((x) & 0x1FFFF)
__be16 cidx; __be16 cidx;
__be16 pidx; __be16 pidx;
}; };
/* cpl_sge_egr_update.ot fields */
#define EGR_QID_S 0
#define EGR_QID_M 0x1FFFF
#define EGR_QID_G(x) (((x) >> EGR_QID_S) & EGR_QID_M)
/* cpl_fw*.type values */ /* cpl_fw*.type values */
enum { enum {
FW_TYPE_CMD_RPL = 0, FW_TYPE_CMD_RPL = 0,
...@@ -849,22 +1024,30 @@ struct ulptx_sge_pair { ...@@ -849,22 +1024,30 @@ struct ulptx_sge_pair {
struct ulptx_sgl { struct ulptx_sgl {
__be32 cmd_nsge; __be32 cmd_nsge;
#define ULPTX_NSGE(x) ((x) << 0)
#define ULPTX_MORE (1U << 23)
__be32 len0; __be32 len0;
__be64 addr0; __be64 addr0;
struct ulptx_sge_pair sge[0]; struct ulptx_sge_pair sge[0];
}; };
#define ULPTX_NSGE_S 0
#define ULPTX_NSGE_V(x) ((x) << ULPTX_NSGE_S)
#define ULPTX_MORE_S 23
#define ULPTX_MORE_V(x) ((x) << ULPTX_MORE_S)
#define ULPTX_MORE_F ULPTX_MORE_V(1U)
struct ulp_mem_io { struct ulp_mem_io {
WR_HDR; WR_HDR;
__be32 cmd; __be32 cmd;
__be32 len16; /* command length */ __be32 len16; /* command length */
__be32 dlen; /* data length in 32-byte units */ __be32 dlen; /* data length in 32-byte units */
__be32 lock_addr; __be32 lock_addr;
#define ULP_MEMIO_LOCK(x) ((x) << 31)
}; };
#define ULP_MEMIO_LOCK_S 31
#define ULP_MEMIO_LOCK_V(x) ((x) << ULP_MEMIO_LOCK_S)
#define ULP_MEMIO_LOCK_F ULP_MEMIO_LOCK_V(1U)
/* additional ulp_mem_io.cmd fields */ /* additional ulp_mem_io.cmd fields */
#define ULP_MEMIO_ORDER_S 23 #define ULP_MEMIO_ORDER_S 23
#define ULP_MEMIO_ORDER_V(x) ((x) << ULP_MEMIO_ORDER_S) #define ULP_MEMIO_ORDER_V(x) ((x) << ULP_MEMIO_ORDER_S)
...@@ -874,13 +1057,9 @@ struct ulp_mem_io { ...@@ -874,13 +1057,9 @@ struct ulp_mem_io {
#define T5_ULP_MEMIO_IMM_V(x) ((x) << T5_ULP_MEMIO_IMM_S) #define T5_ULP_MEMIO_IMM_V(x) ((x) << T5_ULP_MEMIO_IMM_S)
#define T5_ULP_MEMIO_IMM_F T5_ULP_MEMIO_IMM_V(1U) #define T5_ULP_MEMIO_IMM_F T5_ULP_MEMIO_IMM_V(1U)
#define S_T5_ULP_MEMIO_IMM 23 #define T5_ULP_MEMIO_ORDER_S 22
#define V_T5_ULP_MEMIO_IMM(x) ((x) << S_T5_ULP_MEMIO_IMM) #define T5_ULP_MEMIO_ORDER_V(x) ((x) << T5_ULP_MEMIO_ORDER_S)
#define F_T5_ULP_MEMIO_IMM V_T5_ULP_MEMIO_IMM(1U) #define T5_ULP_MEMIO_ORDER_F T5_ULP_MEMIO_ORDER_V(1U)
#define S_T5_ULP_MEMIO_ORDER 22
#define V_T5_ULP_MEMIO_ORDER(x) ((x) << S_T5_ULP_MEMIO_ORDER)
#define F_T5_ULP_MEMIO_ORDER V_T5_ULP_MEMIO_ORDER(1U)
/* ulp_mem_io.lock_addr fields */ /* ulp_mem_io.lock_addr fields */
#define ULP_MEMIO_ADDR_S 0 #define ULP_MEMIO_ADDR_S 0
......
...@@ -450,7 +450,7 @@ static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp, ...@@ -450,7 +450,7 @@ static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
/* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG. /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
*/ */
const struct cpl_sge_egr_update *p = (void *)(rsp + 3); const struct cpl_sge_egr_update *p = (void *)(rsp + 3);
opcode = G_CPL_OPCODE(ntohl(p->opcode_qid)); opcode = CPL_OPCODE_G(ntohl(p->opcode_qid));
if (opcode != CPL_SGE_EGR_UPDATE) { if (opcode != CPL_SGE_EGR_UPDATE) {
dev_err(adapter->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n" dev_err(adapter->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
, opcode); , opcode);
...@@ -471,7 +471,7 @@ static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp, ...@@ -471,7 +471,7 @@ static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
* free TX Queue Descriptors ... * free TX Queue Descriptors ...
*/ */
const struct cpl_sge_egr_update *p = cpl; const struct cpl_sge_egr_update *p = cpl;
unsigned int qid = EGR_QID(be32_to_cpu(p->opcode_qid)); unsigned int qid = EGR_QID_G(be32_to_cpu(p->opcode_qid));
struct sge *s = &adapter->sge; struct sge *s = &adapter->sge;
struct sge_txq *tq; struct sge_txq *tq;
struct sge_eth_txq *txq; struct sge_eth_txq *txq;
......
...@@ -926,7 +926,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq, ...@@ -926,7 +926,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
} }
sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
ULPTX_NSGE(nfrags)); ULPTX_NSGE_V(nfrags));
if (likely(--nfrags == 0)) if (likely(--nfrags == 0))
return; return;
/* /*
...@@ -1604,7 +1604,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, ...@@ -1604,7 +1604,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
* If this is a good TCP packet and we have Generic Receive Offload * If this is a good TCP packet and we have Generic Receive Offload
* enabled, handle the packet in the GRO path. * enabled, handle the packet in the GRO path.
*/ */
if ((pkt->l2info & cpu_to_be32(RXF_TCP)) && if ((pkt->l2info & cpu_to_be32(RXF_TCP_F)) &&
(rspq->netdev->features & NETIF_F_GRO) && csum_ok && (rspq->netdev->features & NETIF_F_GRO) && csum_ok &&
!pkt->ip_frag) { !pkt->ip_frag) {
do_gro(rxq, gl, pkt); do_gro(rxq, gl, pkt);
...@@ -1626,7 +1626,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, ...@@ -1626,7 +1626,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
rxq->stats.pkts++; rxq->stats.pkts++;
if (csum_ok && !pkt->err_vec && if (csum_ok && !pkt->err_vec &&
(be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) { (be32_to_cpu(pkt->l2info) & (RXF_UDP_F | RXF_TCP_F))) {
if (!pkt->ip_frag) if (!pkt->ip_frag)
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
else { else {
......
...@@ -1758,7 +1758,7 @@ csio_ln_mgmt_submit_wr(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req, ...@@ -1758,7 +1758,7 @@ csio_ln_mgmt_submit_wr(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req,
else { else {
/* Program DSGL to dma payload */ /* Program DSGL to dma payload */
dsgl.cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | dsgl.cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
ULPTX_MORE | ULPTX_NSGE(1)); ULPTX_MORE_F | ULPTX_NSGE_V(1));
dsgl.len0 = cpu_to_be32(pld_len); dsgl.len0 = cpu_to_be32(pld_len);
dsgl.addr0 = cpu_to_be64(pld->paddr); dsgl.addr0 = cpu_to_be64(pld->paddr);
csio_wr_copy_to_wrp(&dsgl, &wrp, ALIGN(wr_off, 8), csio_wr_copy_to_wrp(&dsgl, &wrp, ALIGN(wr_off, 8),
......
...@@ -298,8 +298,8 @@ csio_scsi_init_ultptx_dsgl(struct csio_hw *hw, struct csio_ioreq *req, ...@@ -298,8 +298,8 @@ csio_scsi_init_ultptx_dsgl(struct csio_hw *hw, struct csio_ioreq *req,
struct csio_dma_buf *dma_buf; struct csio_dma_buf *dma_buf;
struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_MORE | sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_MORE_F |
ULPTX_NSGE(req->nsge)); ULPTX_NSGE_V(req->nsge));
/* Now add the data SGLs */ /* Now add the data SGLs */
if (likely(!req->dcopy)) { if (likely(!req->dcopy)) {
scsi_for_each_sg(scmnd, sgel, req->nsge, i) { scsi_for_each_sg(scmnd, sgel, req->nsge, i) {
......
...@@ -704,7 +704,7 @@ static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb) ...@@ -704,7 +704,7 @@ static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
struct cpl_act_establish *req = (struct cpl_act_establish *)skb->data; struct cpl_act_establish *req = (struct cpl_act_establish *)skb->data;
unsigned short tcp_opt = ntohs(req->tcp_opt); unsigned short tcp_opt = ntohs(req->tcp_opt);
unsigned int tid = GET_TID(req); unsigned int tid = GET_TID(req);
unsigned int atid = GET_TID_TID(ntohl(req->tos_atid)); unsigned int atid = TID_TID_G(ntohl(req->tos_atid));
struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
struct tid_info *t = lldi->tids; struct tid_info *t = lldi->tids;
u32 rcv_isn = be32_to_cpu(req->rcv_isn); u32 rcv_isn = be32_to_cpu(req->rcv_isn);
...@@ -752,15 +752,15 @@ static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb) ...@@ -752,15 +752,15 @@ static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
if (cxgb4i_rcv_win > (RCV_BUFSIZ_MASK << 10)) if (cxgb4i_rcv_win > (RCV_BUFSIZ_MASK << 10))
csk->rcv_wup -= cxgb4i_rcv_win - (RCV_BUFSIZ_MASK << 10); csk->rcv_wup -= cxgb4i_rcv_win - (RCV_BUFSIZ_MASK << 10);
csk->advmss = lldi->mtus[GET_TCPOPT_MSS(tcp_opt)] - 40; csk->advmss = lldi->mtus[TCPOPT_MSS_G(tcp_opt)] - 40;
if (GET_TCPOPT_TSTAMP(tcp_opt)) if (TCPOPT_TSTAMP_G(tcp_opt))
csk->advmss -= 12; csk->advmss -= 12;
if (csk->advmss < 128) if (csk->advmss < 128)
csk->advmss = 128; csk->advmss = 128;
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p, mss_idx %u, advmss %u.\n", "csk 0x%p, mss_idx %u, advmss %u.\n",
csk, GET_TCPOPT_MSS(tcp_opt), csk->advmss); csk, TCPOPT_MSS_G(tcp_opt), csk->advmss);
cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt)); cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
...@@ -856,8 +856,8 @@ static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) ...@@ -856,8 +856,8 @@ static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)skb->data; struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)skb->data;
unsigned int tid = GET_TID(rpl); unsigned int tid = GET_TID(rpl);
unsigned int atid = unsigned int atid =
GET_TID_TID(GET_AOPEN_ATID(be32_to_cpu(rpl->atid_status))); TID_TID_G(AOPEN_ATID_G(be32_to_cpu(rpl->atid_status)));
unsigned int status = GET_AOPEN_STATUS(be32_to_cpu(rpl->atid_status)); unsigned int status = AOPEN_STATUS_G(be32_to_cpu(rpl->atid_status));
struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
struct tid_info *t = lldi->tids; struct tid_info *t = lldi->tids;
...@@ -1112,7 +1112,7 @@ static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb) ...@@ -1112,7 +1112,7 @@ static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb)
hlen = ntohs(cpl->len); hlen = ntohs(cpl->len);
dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF; dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF;
plen = ISCSI_PDU_LEN(pdu_len_ddp); plen = ISCSI_PDU_LEN_G(pdu_len_ddp);
if (is_t4(lldi->adapter_type)) if (is_t4(lldi->adapter_type))
plen -= 40; plen -= 40;
...@@ -1619,7 +1619,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, ...@@ -1619,7 +1619,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
req = (struct cpl_set_tcb_field *)skb->head; req = (struct cpl_set_tcb_field *)skb->head;
INIT_TP_WR(req, csk->tid); INIT_TP_WR(req, csk->tid);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid)); req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
req->word_cookie = htons(0); req->word_cookie = htons(0);
req->mask = cpu_to_be64(0x3 << 8); req->mask = cpu_to_be64(0x3 << 8);
req->val = cpu_to_be64(pg_idx << 8); req->val = cpu_to_be64(pg_idx << 8);
...@@ -1651,7 +1651,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, ...@@ -1651,7 +1651,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
req = (struct cpl_set_tcb_field *)skb->head; req = (struct cpl_set_tcb_field *)skb->head;
INIT_TP_WR(req, tid); INIT_TP_WR(req, tid);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid)); req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
req->word_cookie = htons(0); req->word_cookie = htons(0);
req->mask = cpu_to_be64(0x3 << 4); req->mask = cpu_to_be64(0x3 << 4);
req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) | req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment