Commit 410a619a authored by David S. Miller's avatar David S. Miller

Merge branch 'qed-Add-iWARP-support-for-unaligned-MPA-packets'

Michal Kalderon says:

====================
qed: Add iWARP support for unaligned MPA packets

This patch series adds support for handling unaligned MPA packets.
(FPDUs split over more than one tcp packet).
When FW detects a packet is unaligned it fowards the packet to
the driver via a light l2 dedicated connection. The driver then
stores this packet until the remainder of the packet is received.
Once the driver reconstructs the full FPDU, it sends it down
to fw via the ll2 connection. Driver also breaks down any packed
PDUs into separate packets for FW.

Patches 1-6 are all slight modifications to ll2 to support additional
requirements for the unaligned MPA ll2 client.

Patch 7 opens the additional ll2 connection for iWARP.
Patches 8-12 contain the algorithm for aligning packets.
====================
Signed-off-by: default avatarMichal Kalderon <Michal.Kalderon@cavium.com>
Signed-off-by: default avatarAriel Elior <Ariel.Elior@cavium.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 90561843 1e28eaad
This diff is collapsed.
...@@ -55,15 +55,43 @@ enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state); ...@@ -55,15 +55,43 @@ enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state);
#define QED_IWARP_HANDLE_INVAL (0xff) #define QED_IWARP_HANDLE_INVAL (0xff)
struct qed_iwarp_ll2_buff { struct qed_iwarp_ll2_buff {
struct qed_iwarp_ll2_buff *piggy_buf;
void *data; void *data;
dma_addr_t data_phys_addr; dma_addr_t data_phys_addr;
u32 buff_size; u32 buff_size;
}; };
struct qed_iwarp_ll2_mpa_buf {
struct list_head list_entry;
struct qed_iwarp_ll2_buff *ll2_buf;
struct unaligned_opaque_data data;
u16 tcp_payload_len;
u8 placement_offset;
};
/* In some cases a fpdu will arrive with only one byte of the header, in this
* case the fpdu_length will be partial (contain only higher byte and
* incomplete bytes will contain the invalid value
*/
#define QED_IWARP_INVALID_INCOMPLETE_BYTES 0xffff
struct qed_iwarp_fpdu {
struct qed_iwarp_ll2_buff *mpa_buf;
void *mpa_frag_virt;
dma_addr_t mpa_frag;
dma_addr_t pkt_hdr;
u16 mpa_frag_len;
u16 fpdu_length;
u16 incomplete_bytes;
u8 pkt_hdr_size;
};
struct qed_iwarp_info { struct qed_iwarp_info {
struct list_head listen_list; /* qed_iwarp_listener */ struct list_head listen_list; /* qed_iwarp_listener */
struct list_head ep_list; /* qed_iwarp_ep */ struct list_head ep_list; /* qed_iwarp_ep */
struct list_head ep_free_list; /* pre-allocated ep's */ struct list_head ep_free_list; /* pre-allocated ep's */
struct list_head mpa_buf_list; /* list of mpa_bufs */
struct list_head mpa_buf_pending_list;
spinlock_t iw_lock; /* for iwarp resources */ spinlock_t iw_lock; /* for iwarp resources */
spinlock_t qp_lock; /* for teardown races */ spinlock_t qp_lock; /* for teardown races */
u32 rcv_wnd_scale; u32 rcv_wnd_scale;
...@@ -73,9 +101,14 @@ struct qed_iwarp_info { ...@@ -73,9 +101,14 @@ struct qed_iwarp_info {
u8 tcp_flags; u8 tcp_flags;
u8 ll2_syn_handle; u8 ll2_syn_handle;
u8 ll2_ooo_handle; u8 ll2_ooo_handle;
u8 ll2_mpa_handle;
u8 peer2peer; u8 peer2peer;
enum mpa_negotiation_mode mpa_rev; enum mpa_negotiation_mode mpa_rev;
enum mpa_rtr_type rtr_type; enum mpa_rtr_type rtr_type;
struct qed_iwarp_fpdu *partial_fpdus;
struct qed_iwarp_ll2_mpa_buf *mpa_bufs;
u8 *mpa_intermediate_buf;
u16 max_num_partial_fpdus;
}; };
enum qed_iwarp_ep_state { enum qed_iwarp_ep_state {
......
...@@ -422,6 +422,41 @@ static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn, ...@@ -422,6 +422,41 @@ static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
data->u.placement_offset = p_cqe->rx_cqe_fp.placement_offset; data->u.placement_offset = p_cqe->rx_cqe_fp.placement_offset;
} }
static int
qed_ll2_handle_slowpath(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn,
union core_rx_cqe_union *p_cqe,
unsigned long *p_lock_flags)
{
struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
struct core_rx_slow_path_cqe *sp_cqe;
sp_cqe = &p_cqe->rx_cqe_sp;
if (sp_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH) {
DP_NOTICE(p_hwfn,
"LL2 - unexpected Rx CQE slowpath ramrod_cmd_id:%d\n",
sp_cqe->ramrod_cmd_id);
return -EINVAL;
}
if (!p_ll2_conn->cbs.slowpath_cb) {
DP_NOTICE(p_hwfn,
"LL2 - received RX_QUEUE_FLUSH but no callback was provided\n");
return -EINVAL;
}
spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
p_ll2_conn->cbs.slowpath_cb(p_ll2_conn->cbs.cookie,
p_ll2_conn->my_id,
le32_to_cpu(sp_cqe->opaque_data.data[0]),
le32_to_cpu(sp_cqe->opaque_data.data[1]));
spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
return 0;
}
static int static int
qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn, qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn, struct qed_ll2_info *p_ll2_conn,
...@@ -495,8 +530,8 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie) ...@@ -495,8 +530,8 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
switch (cqe->rx_cqe_sp.type) { switch (cqe->rx_cqe_sp.type) {
case CORE_RX_CQE_TYPE_SLOW_PATH: case CORE_RX_CQE_TYPE_SLOW_PATH:
DP_NOTICE(p_hwfn, "LL2 - unexpected Rx CQE slowpath\n"); rc = qed_ll2_handle_slowpath(p_hwfn, p_ll2_conn,
rc = -EINVAL; cqe, &flags);
break; break;
case CORE_RX_CQE_TYPE_GSI_OFFLOAD: case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
case CORE_RX_CQE_TYPE_REGULAR: case CORE_RX_CQE_TYPE_REGULAR:
...@@ -894,7 +929,7 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, ...@@ -894,7 +929,7 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg; p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg;
p_ramrod->inner_vlan_removal_en = p_ll2_conn->input.rx_vlan_removal_en; p_ramrod->inner_vlan_removal_en = p_ll2_conn->input.rx_vlan_removal_en;
p_ramrod->queue_id = p_ll2_conn->queue_id; p_ramrod->queue_id = p_ll2_conn->queue_id;
p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_OOO) ? 0 : 1; p_ramrod->main_func_queue = p_ll2_conn->main_func_queue ? 1 : 0;
if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) && if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) &&
p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE) && p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE) &&
...@@ -1105,6 +1140,7 @@ static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn, ...@@ -1105,6 +1140,7 @@ static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_info) struct qed_ll2_info *p_ll2_info)
{ {
struct qed_ll2_tx_packet *p_descq; struct qed_ll2_tx_packet *p_descq;
u32 desc_size;
u32 capacity; u32 capacity;
int rc = 0; int rc = 0;
...@@ -1122,13 +1158,17 @@ static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn, ...@@ -1122,13 +1158,17 @@ static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
goto out; goto out;
capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain); capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain);
p_descq = kcalloc(capacity, sizeof(struct qed_ll2_tx_packet), /* First element is part of the packet, rest are flexibly added */
GFP_KERNEL); desc_size = (sizeof(*p_descq) +
(p_ll2_info->input.tx_max_bds_per_packet - 1) *
sizeof(p_descq->bds_set));
p_descq = kcalloc(capacity, desc_size, GFP_KERNEL);
if (!p_descq) { if (!p_descq) {
rc = -ENOMEM; rc = -ENOMEM;
goto out; goto out;
} }
p_ll2_info->tx_queue.descq_array = p_descq; p_ll2_info->tx_queue.descq_mem = p_descq;
DP_VERBOSE(p_hwfn, QED_MSG_LL2, DP_VERBOSE(p_hwfn, QED_MSG_LL2,
"Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n", "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
...@@ -1209,6 +1249,7 @@ qed_ll2_set_cbs(struct qed_ll2_info *p_ll2_info, const struct qed_ll2_cbs *cbs) ...@@ -1209,6 +1249,7 @@ qed_ll2_set_cbs(struct qed_ll2_info *p_ll2_info, const struct qed_ll2_cbs *cbs)
p_ll2_info->cbs.rx_release_cb = cbs->rx_release_cb; p_ll2_info->cbs.rx_release_cb = cbs->rx_release_cb;
p_ll2_info->cbs.tx_comp_cb = cbs->tx_comp_cb; p_ll2_info->cbs.tx_comp_cb = cbs->tx_comp_cb;
p_ll2_info->cbs.tx_release_cb = cbs->tx_release_cb; p_ll2_info->cbs.tx_release_cb = cbs->tx_release_cb;
p_ll2_info->cbs.slowpath_cb = cbs->slowpath_cb;
p_ll2_info->cbs.cookie = cbs->cookie; p_ll2_info->cbs.cookie = cbs->cookie;
return 0; return 0;
...@@ -1260,6 +1301,11 @@ int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data) ...@@ -1260,6 +1301,11 @@ int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data)
p_ll2_info->tx_dest = (data->input.tx_dest == QED_LL2_TX_DEST_NW) ? p_ll2_info->tx_dest = (data->input.tx_dest == QED_LL2_TX_DEST_NW) ?
CORE_TX_DEST_NW : CORE_TX_DEST_LB; CORE_TX_DEST_NW : CORE_TX_DEST_LB;
if (data->input.conn_type == QED_LL2_TYPE_OOO ||
data->input.secondary_queue)
p_ll2_info->main_func_queue = false;
else
p_ll2_info->main_func_queue = true;
/* Correct maximum number of Tx BDs */ /* Correct maximum number of Tx BDs */
p_tx_max = &p_ll2_info->input.tx_max_bds_per_packet; p_tx_max = &p_ll2_info->input.tx_max_bds_per_packet;
...@@ -1359,11 +1405,13 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle) ...@@ -1359,11 +1405,13 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
{ {
struct qed_hwfn *p_hwfn = cxt; struct qed_hwfn *p_hwfn = cxt;
struct qed_ll2_info *p_ll2_conn; struct qed_ll2_info *p_ll2_conn;
struct qed_ll2_tx_packet *p_pkt;
struct qed_ll2_rx_queue *p_rx; struct qed_ll2_rx_queue *p_rx;
struct qed_ll2_tx_queue *p_tx; struct qed_ll2_tx_queue *p_tx;
struct qed_ptt *p_ptt; struct qed_ptt *p_ptt;
int rc = -EINVAL; int rc = -EINVAL;
u32 i, capacity; u32 i, capacity;
u32 desc_size;
u8 qid; u8 qid;
p_ptt = qed_ptt_acquire(p_hwfn); p_ptt = qed_ptt_acquire(p_hwfn);
...@@ -1397,9 +1445,15 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle) ...@@ -1397,9 +1445,15 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
INIT_LIST_HEAD(&p_tx->sending_descq); INIT_LIST_HEAD(&p_tx->sending_descq);
spin_lock_init(&p_tx->lock); spin_lock_init(&p_tx->lock);
capacity = qed_chain_get_capacity(&p_tx->txq_chain); capacity = qed_chain_get_capacity(&p_tx->txq_chain);
for (i = 0; i < capacity; i++) /* First element is part of the packet, rest are flexibly added */
list_add_tail(&p_tx->descq_array[i].list_entry, desc_size = (sizeof(*p_pkt) +
&p_tx->free_descq); (p_ll2_conn->input.tx_max_bds_per_packet - 1) *
sizeof(p_pkt->bds_set));
for (i = 0; i < capacity; i++) {
p_pkt = p_tx->descq_mem + desc_size * i;
list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
}
p_tx->cur_completing_bd_idx = 0; p_tx->cur_completing_bd_idx = 0;
p_tx->bds_idx = 0; p_tx->bds_idx = 0;
p_tx->b_completing_packet = false; p_tx->b_completing_packet = false;
...@@ -1579,11 +1633,28 @@ qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, ...@@ -1579,11 +1633,28 @@ qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
roce_flavor = (pkt->qed_roce_flavor == QED_LL2_ROCE) ? CORE_ROCE roce_flavor = (pkt->qed_roce_flavor == QED_LL2_ROCE) ? CORE_ROCE
: CORE_RROCE; : CORE_RROCE;
tx_dest = (pkt->tx_dest == QED_LL2_TX_DEST_NW) ? CORE_TX_DEST_NW switch (pkt->tx_dest) {
: CORE_TX_DEST_LB; case QED_LL2_TX_DEST_NW:
tx_dest = CORE_TX_DEST_NW;
break;
case QED_LL2_TX_DEST_LB:
tx_dest = CORE_TX_DEST_LB;
break;
case QED_LL2_TX_DEST_DROP:
tx_dest = CORE_TX_DEST_DROP;
break;
default:
tx_dest = CORE_TX_DEST_LB;
break;
}
start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain); start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan); if (QED_IS_IWARP_PERSONALITY(p_hwfn) &&
p_ll2->input.conn_type == QED_LL2_TYPE_OOO)
start_bd->nw_vlan_or_lb_echo =
cpu_to_le16(IWARP_LL2_IN_ORDER_TX_QUEUE);
else
start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan);
SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W, SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
cpu_to_le16(pkt->l4_hdr_offset_w)); cpu_to_le16(pkt->l4_hdr_offset_w));
SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest); SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
...@@ -1591,6 +1662,9 @@ qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, ...@@ -1591,6 +1662,9 @@ qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1); SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1);
SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, pkt->num_of_bds); SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, pkt->num_of_bds);
SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor); SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor);
SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_CSUM, !!(pkt->enable_ip_cksum));
SET_FIELD(bd_data, CORE_TX_BD_DATA_L4_CSUM, !!(pkt->enable_l4_cksum));
SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_LEN, !!(pkt->calc_ip_len));
start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data); start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data);
DMA_REGPAIR_LE(start_bd->addr, pkt->first_frag); DMA_REGPAIR_LE(start_bd->addr, pkt->first_frag);
start_bd->nbytes = cpu_to_le16(pkt->first_frag_len); start_bd->nbytes = cpu_to_le16(pkt->first_frag_len);
...@@ -1698,7 +1772,7 @@ int qed_ll2_prepare_tx_packet(void *cxt, ...@@ -1698,7 +1772,7 @@ int qed_ll2_prepare_tx_packet(void *cxt,
p_tx = &p_ll2_conn->tx_queue; p_tx = &p_ll2_conn->tx_queue;
p_tx_chain = &p_tx->txq_chain; p_tx_chain = &p_tx->txq_chain;
if (pkt->num_of_bds > CORE_LL2_TX_MAX_BDS_PER_PACKET) if (pkt->num_of_bds > p_ll2_conn->input.tx_max_bds_per_packet)
return -EIO; return -EIO;
spin_lock_irqsave(&p_tx->lock, flags); spin_lock_irqsave(&p_tx->lock, flags);
...@@ -1858,7 +1932,7 @@ void qed_ll2_release_connection(void *cxt, u8 connection_handle) ...@@ -1858,7 +1932,7 @@ void qed_ll2_release_connection(void *cxt, u8 connection_handle)
qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index); qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
} }
kfree(p_ll2_conn->tx_queue.descq_array); kfree(p_ll2_conn->tx_queue.descq_mem);
qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain); qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
kfree(p_ll2_conn->rx_queue.descq_array); kfree(p_ll2_conn->rx_queue.descq_array);
......
...@@ -63,17 +63,14 @@ struct qed_ll2_rx_packet { ...@@ -63,17 +63,14 @@ struct qed_ll2_rx_packet {
struct qed_ll2_tx_packet { struct qed_ll2_tx_packet {
struct list_head list_entry; struct list_head list_entry;
u16 bd_used; u16 bd_used;
u16 vlan;
u16 l4_hdr_offset_w;
u8 bd_flags;
bool notify_fw; bool notify_fw;
void *cookie; void *cookie;
/* Flexible Array of bds_set determined by max_bds_per_packet */
struct { struct {
struct core_tx_bd *txq_bd; struct core_tx_bd *txq_bd;
dma_addr_t tx_frag; dma_addr_t tx_frag;
u16 frag_len; u16 frag_len;
} bds_set[ETH_TX_MAX_BDS_PER_NON_LSO_PACKET]; } bds_set[1];
}; };
struct qed_ll2_rx_queue { struct qed_ll2_rx_queue {
...@@ -101,7 +98,7 @@ struct qed_ll2_tx_queue { ...@@ -101,7 +98,7 @@ struct qed_ll2_tx_queue {
struct list_head active_descq; struct list_head active_descq;
struct list_head free_descq; struct list_head free_descq;
struct list_head sending_descq; struct list_head sending_descq;
struct qed_ll2_tx_packet *descq_array; void *descq_mem; /* memory for variable sized qed_ll2_tx_packet*/
struct qed_ll2_tx_packet *cur_send_packet; struct qed_ll2_tx_packet *cur_send_packet;
struct qed_ll2_tx_packet cur_completing_packet; struct qed_ll2_tx_packet cur_completing_packet;
u16 cur_completing_bd_idx; u16 cur_completing_bd_idx;
...@@ -124,6 +121,7 @@ struct qed_ll2_info { ...@@ -124,6 +121,7 @@ struct qed_ll2_info {
bool b_active; bool b_active;
enum core_tx_dest tx_dest; enum core_tx_dest tx_dest;
u8 tx_stats_en; u8 tx_stats_en;
bool main_func_queue;
struct qed_ll2_rx_queue rx_queue; struct qed_ll2_rx_queue rx_queue;
struct qed_ll2_tx_queue tx_queue; struct qed_ll2_tx_queue tx_queue;
struct qed_ll2_cbs cbs; struct qed_ll2_cbs cbs;
......
...@@ -64,6 +64,7 @@ enum qed_ll2_roce_flavor_type { ...@@ -64,6 +64,7 @@ enum qed_ll2_roce_flavor_type {
enum qed_ll2_tx_dest { enum qed_ll2_tx_dest {
QED_LL2_TX_DEST_NW, /* Light L2 TX Destination to the Network */ QED_LL2_TX_DEST_NW, /* Light L2 TX Destination to the Network */
QED_LL2_TX_DEST_LB, /* Light L2 TX Destination to the Loopback */ QED_LL2_TX_DEST_LB, /* Light L2 TX Destination to the Loopback */
QED_LL2_TX_DEST_DROP, /* Light L2 Drop the TX packet */
QED_LL2_TX_DEST_MAX QED_LL2_TX_DEST_MAX
}; };
...@@ -150,11 +151,16 @@ void (*qed_ll2_release_tx_packet_cb)(void *cxt, ...@@ -150,11 +151,16 @@ void (*qed_ll2_release_tx_packet_cb)(void *cxt,
dma_addr_t first_frag_addr, dma_addr_t first_frag_addr,
bool b_last_fragment, bool b_last_packet); bool b_last_fragment, bool b_last_packet);
typedef
void (*qed_ll2_slowpath_cb)(void *cxt, u8 connection_handle,
u32 opaque_data_0, u32 opaque_data_1);
struct qed_ll2_cbs { struct qed_ll2_cbs {
qed_ll2_complete_rx_packet_cb rx_comp_cb; qed_ll2_complete_rx_packet_cb rx_comp_cb;
qed_ll2_release_rx_packet_cb rx_release_cb; qed_ll2_release_rx_packet_cb rx_release_cb;
qed_ll2_complete_tx_packet_cb tx_comp_cb; qed_ll2_complete_tx_packet_cb tx_comp_cb;
qed_ll2_release_tx_packet_cb tx_release_cb; qed_ll2_release_tx_packet_cb tx_release_cb;
qed_ll2_slowpath_cb slowpath_cb;
void *cookie; void *cookie;
}; };
...@@ -171,6 +177,7 @@ struct qed_ll2_acquire_data_inputs { ...@@ -171,6 +177,7 @@ struct qed_ll2_acquire_data_inputs {
enum qed_ll2_tx_dest tx_dest; enum qed_ll2_tx_dest tx_dest;
enum qed_ll2_error_handle ai_err_packet_too_big; enum qed_ll2_error_handle ai_err_packet_too_big;
enum qed_ll2_error_handle ai_err_no_buf; enum qed_ll2_error_handle ai_err_no_buf;
bool secondary_queue;
u8 gsi_enable; u8 gsi_enable;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment