Commit c4d4c255 authored by David S. Miller's avatar David S. Miller

Merge branch 'cxgb4'

Hariprasad Shenai says:

====================
Adds support for CIQ and other misc. fixes for rdma/cxgb4

This patch series adds support to allocate and use IQs specifically for
indirect interrupts, adds fixes to align ISS for iWARP connections & fixes
related to tcp snd/rvd window for Chelsio T4/T5 adapters on iw_cxgb4.
Also changes Interrupt Holdoff Packet Count threshold of response queues for
cxgb4 driver.

The patches series is created against 'net-next' tree.
And includes patches on cxgb4 and iw_cxgb4 driver.

Since this patch-series contains cxgb4 and iw_cxgb4 patches, we would like to
request this patch series to get merged via David Miller's 'net-next' tree.

We have included all the maintainers of respective drivers. Kindly review the
change and let us know in case of any review comments.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents f8c1b7ce c887ad0e
......@@ -232,12 +232,16 @@ static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
static void set_emss(struct c4iw_ep *ep, u16 opt)
{
ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - 40;
ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] -
sizeof(struct iphdr) - sizeof(struct tcphdr);
ep->mss = ep->emss;
if (GET_TCPOPT_TSTAMP(opt))
ep->emss -= 12;
if (ep->emss < 128)
ep->emss = 128;
if (ep->emss & 7)
PDBG("Warning: misaligned mtu idx %u mss %u emss=%u\n",
GET_TCPOPT_MSS(opt), ep->mss, ep->emss);
PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt),
ep->mss, ep->emss);
}
......@@ -468,7 +472,7 @@ static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq);
flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
flowc->mnemval[6].val = cpu_to_be32(snd_win);
flowc->mnemval[6].val = cpu_to_be32(ep->snd_win);
flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
flowc->mnemval[7].val = cpu_to_be32(ep->emss);
/* Pad WR to 16 byte boundary */
......@@ -528,6 +532,17 @@ static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
}
static void best_mtu(const unsigned short *mtus, unsigned short mtu,
unsigned int *idx, int use_ts)
{
unsigned short hdr_size = sizeof(struct iphdr) +
sizeof(struct tcphdr) +
(use_ts ? 12 : 0);
unsigned short data_size = mtu - hdr_size;
cxgb4_best_aligned_mtu(mtus, hdr_size, data_size, 8, idx);
}
static int send_connect(struct c4iw_ep *ep)
{
struct cpl_act_open_req *req;
......@@ -550,6 +565,7 @@ static int send_connect(struct c4iw_ep *ep)
struct sockaddr_in *ra = (struct sockaddr_in *)&ep->com.remote_addr;
struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&ep->com.local_addr;
struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&ep->com.remote_addr;
int win;
wrlen = (ep->com.remote_addr.ss_family == AF_INET) ?
roundup(sizev4, 16) :
......@@ -565,8 +581,18 @@ static int send_connect(struct c4iw_ep *ep)
}
set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
enable_tcp_timestamps);
wscale = compute_wscale(rcv_win);
/*
* Specify the largest window that will fit in opt0. The
* remainder will be specified in the rx_data_ack.
*/
win = ep->rcv_win >> 10;
if (win > RCV_BUFSIZ_MASK)
win = RCV_BUFSIZ_MASK;
opt0 = (nocong ? NO_CONG(1) : 0) |
KEEP_ALIVE(1) |
DELACK(1) |
......@@ -577,7 +603,7 @@ static int send_connect(struct c4iw_ep *ep)
SMAC_SEL(ep->smac_idx) |
DSCP(ep->tos) |
ULP_MODE(ULP_MODE_TCPDDP) |
RCV_BUFSIZ(rcv_win>>10);
RCV_BUFSIZ(win);
opt2 = RX_CHANNEL(0) |
CCTRL_ECN(enable_ecn) |
RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
......@@ -633,6 +659,13 @@ static int send_connect(struct c4iw_ep *ep)
req6->opt2 = cpu_to_be32(opt2);
}
} else {
u32 isn = (prandom_u32() & ~7UL) - 1;
opt2 |= T5_OPT_2_VALID;
opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
if (peer2peer)
isn += 4;
if (ep->com.remote_addr.ss_family == AF_INET) {
t5_req = (struct cpl_t5_act_open_req *)
skb_put(skb, wrlen);
......@@ -649,6 +682,9 @@ static int send_connect(struct c4iw_ep *ep)
cxgb4_select_ntuple(
ep->com.dev->rdev.lldi.ports[0],
ep->l2t)));
t5_req->rsvd = cpu_to_be32(isn);
PDBG("%s snd_isn %u\n", __func__,
be32_to_cpu(t5_req->rsvd));
t5_req->opt2 = cpu_to_be32(opt2);
} else {
t5_req6 = (struct cpl_t5_act_open_req6 *)
......@@ -672,6 +708,9 @@ static int send_connect(struct c4iw_ep *ep)
cxgb4_select_ntuple(
ep->com.dev->rdev.lldi.ports[0],
ep->l2t));
t5_req6->rsvd = cpu_to_be32(isn);
PDBG("%s snd_isn %u\n", __func__,
be32_to_cpu(t5_req6->rsvd));
t5_req6->opt2 = cpu_to_be32(opt2);
}
}
......@@ -1145,6 +1184,14 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
return 0;
}
/*
* If we couldn't specify the entire rcv window at connection setup
* due to the limit in the number of bits in the RCV_BUFSIZ field,
* then add the overage in to the credits returned.
*/
if (ep->rcv_win > RCV_BUFSIZ_MASK * 1024)
credits += ep->rcv_win - RCV_BUFSIZ_MASK * 1024;
req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen);
memset(req, 0, wrlen);
INIT_TP_WR(req, ep->hwtid);
......@@ -1618,6 +1665,7 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
unsigned int mtu_idx;
int wscale;
struct sockaddr_in *sin;
int win;
skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req));
......@@ -1640,8 +1688,18 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK);
req->tcb.tx_max = (__force __be32) jiffies;
req->tcb.rcv_adv = htons(1);
cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
enable_tcp_timestamps);
wscale = compute_wscale(rcv_win);
/*
* Specify the largest window that will fit in opt0. The
* remainder will be specified in the rx_data_ack.
*/
win = ep->rcv_win >> 10;
if (win > RCV_BUFSIZ_MASK)
win = RCV_BUFSIZ_MASK;
req->tcb.opt0 = (__force __be64) (TCAM_BYPASS(1) |
(nocong ? NO_CONG(1) : 0) |
KEEP_ALIVE(1) |
......@@ -1653,7 +1711,7 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
SMAC_SEL(ep->smac_idx) |
DSCP(ep->tos) |
ULP_MODE(ULP_MODE_TCPDDP) |
RCV_BUFSIZ(rcv_win >> 10));
RCV_BUFSIZ(win));
req->tcb.opt2 = (__force __be32) (PACE(1) |
TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
RX_CHANNEL(0) |
......@@ -1690,6 +1748,13 @@ static int is_neg_adv(unsigned int status)
status == CPL_ERR_KEEPALV_NEG_ADVICE;
}
static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi)
{
ep->snd_win = snd_win;
ep->rcv_win = rcv_win;
PDBG("%s snd_win %d rcv_win %d\n", __func__, ep->snd_win, ep->rcv_win);
}
#define ACT_OPEN_RETRY_COUNT 2
static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
......@@ -1738,6 +1803,7 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
ep->ctrlq_idx = cxgb4_port_idx(pdev);
ep->rss_qid = cdev->rdev.lldi.rxq_ids[
cxgb4_port_idx(pdev) * step];
set_tcp_window(ep, (struct port_info *)netdev_priv(pdev));
dev_put(pdev);
} else {
pdev = get_real_dev(n->dev);
......@@ -1756,6 +1822,7 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
cdev->rdev.lldi.nchan;
ep->rss_qid = cdev->rdev.lldi.rxq_ids[
cxgb4_port_idx(n->dev) * step];
set_tcp_window(ep, (struct port_info *)netdev_priv(pdev));
if (clear_mpa_v1) {
ep->retry_with_mpa_v1 = 0;
......@@ -1986,13 +2053,36 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
u64 opt0;
u32 opt2;
int wscale;
struct cpl_t5_pass_accept_rpl *rpl5 = NULL;
int win;
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
BUG_ON(skb_cloned(skb));
skb_trim(skb, sizeof(*rpl));
skb_get(skb);
cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
rpl = cplhdr(skb);
if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
skb_trim(skb, roundup(sizeof(*rpl5), 16));
rpl5 = (void *)rpl;
INIT_TP_WR(rpl5, ep->hwtid);
} else {
skb_trim(skb, sizeof(*rpl));
INIT_TP_WR(rpl, ep->hwtid);
}
OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
ep->hwtid));
best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
enable_tcp_timestamps && req->tcpopt.tstamp);
wscale = compute_wscale(rcv_win);
/*
* Specify the largest window that will fit in opt0. The
* remainder will be specified in the rx_data_ack.
*/
win = ep->rcv_win >> 10;
if (win > RCV_BUFSIZ_MASK)
win = RCV_BUFSIZ_MASK;
opt0 = (nocong ? NO_CONG(1) : 0) |
KEEP_ALIVE(1) |
DELACK(1) |
......@@ -2003,7 +2093,7 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
SMAC_SEL(ep->smac_idx) |
DSCP(ep->tos >> 2) |
ULP_MODE(ULP_MODE_TCPDDP) |
RCV_BUFSIZ(rcv_win>>10);
RCV_BUFSIZ(win);
opt2 = RX_CHANNEL(0) |
RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
......@@ -2023,14 +2113,18 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
opt2 |= CCTRL_ECN(1);
}
if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
u32 isn = (prandom_u32() & ~7UL) - 1;
opt2 |= T5_OPT_2_VALID;
opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
rpl5 = (void *)rpl;
memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16));
if (peer2peer)
isn += 4;
rpl5->iss = cpu_to_be32(isn);
PDBG("%s iss %u\n", __func__, be32_to_cpu(rpl5->iss));
}
rpl = cplhdr(skb);
INIT_TP_WR(rpl, ep->hwtid);
OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
ep->hwtid));
rpl->opt0 = cpu_to_be64(opt0);
rpl->opt2 = cpu_to_be32(opt2);
set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
......@@ -2095,6 +2189,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
int err;
u16 peer_mss = ntohs(req->tcpopt.mss);
int iptype;
unsigned short hdrs;
parent_ep = lookup_stid(t, stid);
if (!parent_ep) {
......@@ -2152,8 +2247,10 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
goto reject;
}
if (peer_mss && child_ep->mtu > (peer_mss + 40))
child_ep->mtu = peer_mss + 40;
hdrs = sizeof(struct iphdr) + sizeof(struct tcphdr) +
((enable_tcp_timestamps && req->tcpopt.tstamp) ? 12 : 0);
if (peer_mss && child_ep->mtu > (peer_mss + hdrs))
child_ep->mtu = peer_mss + hdrs;
state_set(&child_ep->com, CONNECTING);
child_ep->com.dev = dev;
......
......@@ -134,7 +134,8 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
V_FW_RI_RES_WR_IQANUS(0) |
V_FW_RI_RES_WR_IQANUD(1) |
F_FW_RI_RES_WR_IQANDST |
V_FW_RI_RES_WR_IQANDSTINDEX(*rdev->lldi.rxq_ids));
V_FW_RI_RES_WR_IQANDSTINDEX(
rdev->lldi.ciq_ids[cq->vector]));
res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
F_FW_RI_RES_WR_IQDROPRSS |
V_FW_RI_RES_WR_IQPCIECH(2) |
......@@ -870,6 +871,9 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
rhp = to_c4iw_dev(ibdev);
if (vector >= rhp->rdev.lldi.nciq)
return ERR_PTR(-EINVAL);
chp = kzalloc(sizeof(*chp), GFP_KERNEL);
if (!chp)
return ERR_PTR(-ENOMEM);
......@@ -915,6 +919,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
}
chp->cq.size = hwentries;
chp->cq.memsize = memsize;
chp->cq.vector = vector;
ret = create_cq(&rhp->rdev, &chp->cq,
ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
......
......@@ -805,6 +805,8 @@ struct c4iw_ep {
u8 retry_with_mpa_v1;
u8 tried_with_mpa_v1;
unsigned int retry_count;
int snd_win;
int rcv_win;
};
static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
......
......@@ -499,7 +499,7 @@ int c4iw_register_device(struct c4iw_dev *dev)
dev->ibdev.node_type = RDMA_NODE_RNIC;
memcpy(dev->ibdev.node_desc, C4IW_NODE_DESC, sizeof(C4IW_NODE_DESC));
dev->ibdev.phys_port_cnt = dev->rdev.lldi.nports;
dev->ibdev.num_comp_vectors = 1;
dev->ibdev.num_comp_vectors = dev->rdev.lldi.nciq;
dev->ibdev.dma_device = &(dev->rdev.lldi.pdev->dev);
dev->ibdev.query_device = c4iw_query_device;
dev->ibdev.query_port = c4iw_query_port;
......
......@@ -542,6 +542,7 @@ struct t4_cq {
size_t memsize;
__be64 bits_type_ts;
u32 cqid;
int vector;
u16 size; /* including status page */
u16 cidx;
u16 sw_pidx;
......
......@@ -848,6 +848,7 @@ enum { /* TCP congestion control algorithms */
#define V_CONG_CNTRL(x) ((x) << S_CONG_CNTRL)
#define G_CONG_CNTRL(x) (((x) >> S_CONG_CNTRL) & M_CONG_CNTRL)
#define CONG_CNTRL_VALID (1 << 18)
#define T5_OPT_2_VALID (1 << 31)
#endif /* _T4FW_RI_API_H_ */
......@@ -357,11 +357,17 @@ enum {
MAX_OFLD_QSETS = 16, /* # of offload Tx/Rx queue sets */
MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */
MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */
MAX_RDMA_CIQS = NCHAN, /* # of RDMA concentrator IQs */
MAX_ISCSI_QUEUES = NCHAN, /* # of streaming iSCSI Rx queues */
};
enum {
MAX_EGRQ = 128, /* max # of egress queues, including FLs */
MAX_INGQ = 64 /* max # of interrupt-capable ingress queues */
INGQ_EXTRAS = 2, /* firmware event queue and */
/* forwarded interrupts */
MAX_EGRQ = MAX_ETH_QSETS*2 + MAX_OFLD_QSETS*2
+ MAX_CTRL_QUEUES + MAX_RDMA_QUEUES + MAX_ISCSI_QUEUES,
MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES
+ MAX_RDMA_CIQS + MAX_ISCSI_QUEUES + INGQ_EXTRAS,
};
struct adapter;
......@@ -538,6 +544,7 @@ struct sge {
struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS];
struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES];
struct sge_ofld_rxq rdmaciq[MAX_RDMA_CIQS];
struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
struct sge_rspq intrq ____cacheline_aligned_in_smp;
......@@ -548,8 +555,10 @@ struct sge {
u16 ethtxq_rover; /* Tx queue to clean up next */
u16 ofldqsets; /* # of active offload queue sets */
u16 rdmaqs; /* # of available RDMA Rx queues */
u16 rdmaciqs; /* # of available RDMA concentrator IQs */
u16 ofld_rxq[MAX_OFLD_QSETS];
u16 rdma_rxq[NCHAN];
u16 rdma_ciq[NCHAN];
u16 timer_val[SGE_NTIMERS];
u8 counter_val[SGE_NCOUNTERS];
u32 fl_pg_order; /* large page allocation size */
......@@ -577,6 +586,7 @@ struct sge {
#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
#define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++)
#define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++)
#define for_each_rdmaciq(sge, i) for (i = 0; i < (sge)->rdmaciqs; i++)
struct l2t_data;
......
......@@ -232,8 +232,10 @@ struct cxgb4_lld_info {
const struct cxgb4_virt_res *vr; /* assorted HW resources */
const unsigned short *mtus; /* MTU table */
const unsigned short *rxq_ids; /* the ULD's Rx queue ids */
const unsigned short *ciq_ids; /* the ULD's concentrator IQ ids */
unsigned short nrxq; /* # of Rx queues */
unsigned short ntxq; /* # of Tx queues */
unsigned short nciq; /* # of concentrator IQ */
unsigned char nchan:4; /* # of channels */
unsigned char nports:4; /* # of ports */
unsigned char wr_cred; /* WR 16-byte credits */
......@@ -274,6 +276,11 @@ unsigned int cxgb4_port_viid(const struct net_device *dev);
unsigned int cxgb4_port_idx(const struct net_device *dev);
unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
unsigned int *idx);
unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
unsigned short header_size,
unsigned short data_size_max,
unsigned short data_size_align,
unsigned int *mtu_idxp);
void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
struct tp_tcp_stats *v6);
void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
......
......@@ -2215,7 +2215,6 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
iq->cntxt_id = ntohs(c.iqid);
iq->abs_id = ntohs(c.physiqid);
iq->size--; /* subtract status entry */
iq->adap = adap;
iq->netdev = dev;
iq->handler = hnd;
......@@ -2515,6 +2514,10 @@ void t4_free_sge_resources(struct adapter *adap)
if (oq->rspq.desc)
free_rspq_fl(adap, &oq->rspq, &oq->fl);
}
for (i = 0, oq = adap->sge.rdmaciq; i < adap->sge.rdmaciqs; i++, oq++) {
if (oq->rspq.desc)
free_rspq_fl(adap, &oq->rspq, &oq->fl);
}
/* clean up offload Tx queues */
for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) {
......
......@@ -68,6 +68,7 @@ enum {
SGE_MAX_WR_LEN = 512, /* max WR size in bytes */
SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */
SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */
SGE_MAX_IQ_SIZE = 65520,
SGE_TIMER_RSTRT_CNTR = 6, /* restart RX packet threshold counter */
SGE_TIMER_UPD_CIDX = 7, /* update cidx only */
......
......@@ -227,6 +227,7 @@ struct cpl_pass_open_req {
#define DELACK(x) ((x) << 5)
#define ULP_MODE(x) ((x) << 8)
#define RCV_BUFSIZ(x) ((x) << 12)
#define RCV_BUFSIZ_MASK 0x3FFU
#define DSCP(x) ((x) << 22)
#define SMAC_SEL(x) ((u64)(x) << 28)
#define L2T_IDX(x) ((u64)(x) << 36)
......@@ -278,6 +279,15 @@ struct cpl_pass_accept_rpl {
__be64 opt0;
};
struct cpl_t5_pass_accept_rpl {
WR_HDR;
union opcode_tid ot;
__be32 opt2;
__be64 opt0;
__be32 iss;
__be32 rsvd;
};
struct cpl_act_open_req {
WR_HDR;
union opcode_tid ot;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment