Commit 6f34a284 authored by Michal Kalderon's avatar Michal Kalderon Committed by David S. Miller

qed: Add LL2 slowpath handling

For iWARP unaligned MPA flow, a slowpath event of flushing an
MPA connection that entered an unaligned state is required.
The flush ramrod is received on the ll2 queue, and a pre-registered
callback function is called to handle the flush event.
Signed-off-by: default avatarMichal Kalderon <Michal.Kalderon@cavium.com>
Signed-off-by: default avatarAriel Elior <Ariel.Elior@cavium.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 89d65113
...@@ -422,6 +422,41 @@ static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn, ...@@ -422,6 +422,41 @@ static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
data->u.placement_offset = p_cqe->rx_cqe_fp.placement_offset; data->u.placement_offset = p_cqe->rx_cqe_fp.placement_offset;
} }
static int
qed_ll2_handle_slowpath(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn,
union core_rx_cqe_union *p_cqe,
unsigned long *p_lock_flags)
{
struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
struct core_rx_slow_path_cqe *sp_cqe;
sp_cqe = &p_cqe->rx_cqe_sp;
if (sp_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH) {
DP_NOTICE(p_hwfn,
"LL2 - unexpected Rx CQE slowpath ramrod_cmd_id:%d\n",
sp_cqe->ramrod_cmd_id);
return -EINVAL;
}
if (!p_ll2_conn->cbs.slowpath_cb) {
DP_NOTICE(p_hwfn,
"LL2 - received RX_QUEUE_FLUSH but no callback was provided\n");
return -EINVAL;
}
spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
p_ll2_conn->cbs.slowpath_cb(p_ll2_conn->cbs.cookie,
p_ll2_conn->my_id,
le32_to_cpu(sp_cqe->opaque_data.data[0]),
le32_to_cpu(sp_cqe->opaque_data.data[1]));
spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
return 0;
}
static int static int
qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn, qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn, struct qed_ll2_info *p_ll2_conn,
...@@ -495,8 +530,8 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie) ...@@ -495,8 +530,8 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
switch (cqe->rx_cqe_sp.type) { switch (cqe->rx_cqe_sp.type) {
case CORE_RX_CQE_TYPE_SLOW_PATH: case CORE_RX_CQE_TYPE_SLOW_PATH:
DP_NOTICE(p_hwfn, "LL2 - unexpected Rx CQE slowpath\n"); rc = qed_ll2_handle_slowpath(p_hwfn, p_ll2_conn,
rc = -EINVAL; cqe, &flags);
break; break;
case CORE_RX_CQE_TYPE_GSI_OFFLOAD: case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
case CORE_RX_CQE_TYPE_REGULAR: case CORE_RX_CQE_TYPE_REGULAR:
...@@ -1214,6 +1249,7 @@ qed_ll2_set_cbs(struct qed_ll2_info *p_ll2_info, const struct qed_ll2_cbs *cbs) ...@@ -1214,6 +1249,7 @@ qed_ll2_set_cbs(struct qed_ll2_info *p_ll2_info, const struct qed_ll2_cbs *cbs)
p_ll2_info->cbs.rx_release_cb = cbs->rx_release_cb; p_ll2_info->cbs.rx_release_cb = cbs->rx_release_cb;
p_ll2_info->cbs.tx_comp_cb = cbs->tx_comp_cb; p_ll2_info->cbs.tx_comp_cb = cbs->tx_comp_cb;
p_ll2_info->cbs.tx_release_cb = cbs->tx_release_cb; p_ll2_info->cbs.tx_release_cb = cbs->tx_release_cb;
p_ll2_info->cbs.slowpath_cb = cbs->slowpath_cb;
p_ll2_info->cbs.cookie = cbs->cookie; p_ll2_info->cbs.cookie = cbs->cookie;
return 0; return 0;
......
...@@ -151,11 +151,16 @@ void (*qed_ll2_release_tx_packet_cb)(void *cxt, ...@@ -151,11 +151,16 @@ void (*qed_ll2_release_tx_packet_cb)(void *cxt,
dma_addr_t first_frag_addr, dma_addr_t first_frag_addr,
bool b_last_fragment, bool b_last_packet); bool b_last_fragment, bool b_last_packet);
typedef
void (*qed_ll2_slowpath_cb)(void *cxt, u8 connection_handle,
u32 opaque_data_0, u32 opaque_data_1);
struct qed_ll2_cbs { struct qed_ll2_cbs {
qed_ll2_complete_rx_packet_cb rx_comp_cb; qed_ll2_complete_rx_packet_cb rx_comp_cb;
qed_ll2_release_rx_packet_cb rx_release_cb; qed_ll2_release_rx_packet_cb rx_release_cb;
qed_ll2_complete_tx_packet_cb tx_comp_cb; qed_ll2_complete_tx_packet_cb tx_comp_cb;
qed_ll2_release_tx_packet_cb tx_release_cb; qed_ll2_release_tx_packet_cb tx_release_cb;
qed_ll2_slowpath_cb slowpath_cb;
void *cookie; void *cookie;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment