Commit 81fe16e0 authored by David S. Miller's avatar David S. Miller

Merge branch 'qed-Slowpath-Queue-bug-fixes'

Denis Bolotin says:

====================
qed: Slowpath Queue bug fixes

This patch series fixes several bugs in the SPQ mechanism.
It deals with SPQ entries management, preventing resource leaks, memory
corruptions and handles error cases throughout the driver.
Please consider applying to net.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 0d5b9311 fa5c448d
...@@ -147,7 +147,8 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, ...@@ -147,7 +147,8 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
"Cannot satisfy CQ amount. CQs requested %d, CQs available %d. Aborting function start\n", "Cannot satisfy CQ amount. CQs requested %d, CQs available %d. Aborting function start\n",
fcoe_pf_params->num_cqs, fcoe_pf_params->num_cqs,
p_hwfn->hw_info.feat_num[QED_FCOE_CQ]); p_hwfn->hw_info.feat_num[QED_FCOE_CQ]);
return -EINVAL; rc = -EINVAL;
goto err;
} }
p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu); p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu);
...@@ -156,14 +157,14 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, ...@@ -156,14 +157,14 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &dummy_cid); rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &dummy_cid);
if (rc) if (rc)
return rc; goto err;
cxt_info.iid = dummy_cid; cxt_info.iid = dummy_cid;
rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info); rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
if (rc) { if (rc) {
DP_NOTICE(p_hwfn, "Cannot find context info for dummy cid=%d\n", DP_NOTICE(p_hwfn, "Cannot find context info for dummy cid=%d\n",
dummy_cid); dummy_cid);
return rc; goto err;
} }
p_cxt = cxt_info.p_cxt; p_cxt = cxt_info.p_cxt;
SET_FIELD(p_cxt->tstorm_ag_context.flags3, SET_FIELD(p_cxt->tstorm_ag_context.flags3,
...@@ -240,6 +241,10 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, ...@@ -240,6 +241,10 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
rc = qed_spq_post(p_hwfn, p_ent, NULL); rc = qed_spq_post(p_hwfn, p_ent, NULL);
return rc; return rc;
err:
qed_sp_destroy_request(p_hwfn, p_ent);
return rc;
} }
static int static int
......
...@@ -200,6 +200,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn, ...@@ -200,6 +200,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
"Cannot satisfy CQ amount. Queues requested %d, CQs available %d. Aborting function start\n", "Cannot satisfy CQ amount. Queues requested %d, CQs available %d. Aborting function start\n",
p_params->num_queues, p_params->num_queues,
p_hwfn->hw_info.feat_num[QED_ISCSI_CQ]); p_hwfn->hw_info.feat_num[QED_ISCSI_CQ]);
qed_sp_destroy_request(p_hwfn, p_ent);
return -EINVAL; return -EINVAL;
} }
......
...@@ -740,8 +740,7 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn, ...@@ -740,8 +740,7 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params); rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
if (rc) { if (rc) {
/* Return spq entry which is taken in qed_sp_init_request()*/ qed_sp_destroy_request(p_hwfn, p_ent);
qed_spq_return_entry(p_hwfn, p_ent);
return rc; return rc;
} }
...@@ -1355,6 +1354,7 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn, ...@@ -1355,6 +1354,7 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
DP_NOTICE(p_hwfn, DP_NOTICE(p_hwfn,
"%d is not supported yet\n", "%d is not supported yet\n",
p_filter_cmd->opcode); p_filter_cmd->opcode);
qed_sp_destroy_request(p_hwfn, *pp_ent);
return -EINVAL; return -EINVAL;
} }
...@@ -2056,13 +2056,13 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, ...@@ -2056,13 +2056,13 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
} else { } else {
rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
if (rc) if (rc)
return rc; goto err;
if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) { if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
rc = qed_fw_l2_queue(p_hwfn, p_params->qid, rc = qed_fw_l2_queue(p_hwfn, p_params->qid,
&abs_rx_q_id); &abs_rx_q_id);
if (rc) if (rc)
return rc; goto err;
p_ramrod->rx_qid_valid = 1; p_ramrod->rx_qid_valid = 1;
p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id); p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id);
...@@ -2083,6 +2083,10 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, ...@@ -2083,6 +2083,10 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
(u64)p_params->addr, p_params->length); (u64)p_params->addr, p_params->length);
return qed_spq_post(p_hwfn, p_ent, NULL); return qed_spq_post(p_hwfn, p_ent, NULL);
err:
qed_sp_destroy_request(p_hwfn, p_ent);
return rc;
} }
int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn, int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn,
......
...@@ -1514,6 +1514,7 @@ qed_rdma_register_tid(void *rdma_cxt, ...@@ -1514,6 +1514,7 @@ qed_rdma_register_tid(void *rdma_cxt,
default: default:
rc = -EINVAL; rc = -EINVAL;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
qed_sp_destroy_request(p_hwfn, p_ent);
return rc; return rc;
} }
SET_FIELD(p_ramrod->flags1, SET_FIELD(p_ramrod->flags1,
......
...@@ -745,6 +745,7 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn, ...@@ -745,6 +745,7 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
DP_NOTICE(p_hwfn, DP_NOTICE(p_hwfn,
"qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n", "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n",
rc); rc);
qed_sp_destroy_request(p_hwfn, p_ent);
return rc; return rc;
} }
......
...@@ -167,6 +167,9 @@ struct qed_spq_entry { ...@@ -167,6 +167,9 @@ struct qed_spq_entry {
enum spq_mode comp_mode; enum spq_mode comp_mode;
struct qed_spq_comp_cb comp_cb; struct qed_spq_comp_cb comp_cb;
struct qed_spq_comp_done comp_done; /* SPQ_MODE_EBLOCK */ struct qed_spq_comp_done comp_done; /* SPQ_MODE_EBLOCK */
/* Posted entry for unlimited list entry in EBLOCK mode */
struct qed_spq_entry *post_ent;
}; };
struct qed_eq { struct qed_eq {
...@@ -396,6 +399,17 @@ struct qed_sp_init_data { ...@@ -396,6 +399,17 @@ struct qed_sp_init_data {
struct qed_spq_comp_cb *p_comp_data; struct qed_spq_comp_cb *p_comp_data;
}; };
/**
* @brief Returns a SPQ entry to the pool / frees the entry if allocated.
* Should be called on in error flows after initializing the SPQ entry
* and before posting it.
*
* @param p_hwfn
* @param p_ent
*/
void qed_sp_destroy_request(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent);
int qed_sp_init_request(struct qed_hwfn *p_hwfn, int qed_sp_init_request(struct qed_hwfn *p_hwfn,
struct qed_spq_entry **pp_ent, struct qed_spq_entry **pp_ent,
u8 cmd, u8 cmd,
......
...@@ -47,6 +47,19 @@ ...@@ -47,6 +47,19 @@
#include "qed_sp.h" #include "qed_sp.h"
#include "qed_sriov.h" #include "qed_sriov.h"
void qed_sp_destroy_request(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent)
{
/* qed_spq_get_entry() can either get an entry from the free_pool,
* or, if no entries are left, allocate a new entry and add it to
* the unlimited_pending list.
*/
if (p_ent->queue == &p_hwfn->p_spq->unlimited_pending)
kfree(p_ent);
else
qed_spq_return_entry(p_hwfn, p_ent);
}
int qed_sp_init_request(struct qed_hwfn *p_hwfn, int qed_sp_init_request(struct qed_hwfn *p_hwfn,
struct qed_spq_entry **pp_ent, struct qed_spq_entry **pp_ent,
u8 cmd, u8 protocol, struct qed_sp_init_data *p_data) u8 cmd, u8 protocol, struct qed_sp_init_data *p_data)
...@@ -80,7 +93,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, ...@@ -80,7 +93,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
case QED_SPQ_MODE_BLOCK: case QED_SPQ_MODE_BLOCK:
if (!p_data->p_comp_data) if (!p_data->p_comp_data)
return -EINVAL; goto err;
p_ent->comp_cb.cookie = p_data->p_comp_data->cookie; p_ent->comp_cb.cookie = p_data->p_comp_data->cookie;
break; break;
...@@ -95,7 +108,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, ...@@ -95,7 +108,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
default: default:
DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n", DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
p_ent->comp_mode); p_ent->comp_mode);
return -EINVAL; goto err;
} }
DP_VERBOSE(p_hwfn, QED_MSG_SPQ, DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
...@@ -109,6 +122,11 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, ...@@ -109,6 +122,11 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
memset(&p_ent->ramrod, 0, sizeof(p_ent->ramrod)); memset(&p_ent->ramrod, 0, sizeof(p_ent->ramrod));
return 0; return 0;
err:
qed_sp_destroy_request(p_hwfn, p_ent);
return -EINVAL;
} }
static enum tunnel_clss qed_tunn_clss_to_fw_clss(u8 type) static enum tunnel_clss qed_tunn_clss_to_fw_clss(u8 type)
......
...@@ -142,6 +142,7 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn, ...@@ -142,6 +142,7 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n"); DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
rc = qed_mcp_drain(p_hwfn, p_ptt); rc = qed_mcp_drain(p_hwfn, p_ptt);
qed_ptt_release(p_hwfn, p_ptt);
if (rc) { if (rc) {
DP_NOTICE(p_hwfn, "MCP drain failed\n"); DP_NOTICE(p_hwfn, "MCP drain failed\n");
goto err; goto err;
...@@ -150,18 +151,15 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn, ...@@ -150,18 +151,15 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
/* Retry after drain */ /* Retry after drain */
rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true); rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
if (!rc) if (!rc)
goto out; return 0;
comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie; comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
if (comp_done->done == 1) if (comp_done->done == 1) {
if (p_fw_ret) if (p_fw_ret)
*p_fw_ret = comp_done->fw_return_code; *p_fw_ret = comp_done->fw_return_code;
out:
qed_ptt_release(p_hwfn, p_ptt);
return 0; return 0;
}
err: err:
qed_ptt_release(p_hwfn, p_ptt);
DP_NOTICE(p_hwfn, DP_NOTICE(p_hwfn,
"Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n", "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
le32_to_cpu(p_ent->elem.hdr.cid), le32_to_cpu(p_ent->elem.hdr.cid),
...@@ -685,6 +683,8 @@ static int qed_spq_add_entry(struct qed_hwfn *p_hwfn, ...@@ -685,6 +683,8 @@ static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
/* EBLOCK responsible to free the allocated p_ent */ /* EBLOCK responsible to free the allocated p_ent */
if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK) if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
kfree(p_ent); kfree(p_ent);
else
p_ent->post_ent = p_en2;
p_ent = p_en2; p_ent = p_en2;
} }
...@@ -767,6 +767,25 @@ static int qed_spq_pend_post(struct qed_hwfn *p_hwfn) ...@@ -767,6 +767,25 @@ static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
SPQ_HIGH_PRI_RESERVE_DEFAULT); SPQ_HIGH_PRI_RESERVE_DEFAULT);
} }
/* Avoid overriding of SPQ entries when getting out-of-order completions, by
* marking the completions in a bitmap and increasing the chain consumer only
* for the first successive completed entries.
*/
static void qed_spq_comp_bmap_update(struct qed_hwfn *p_hwfn, __le16 echo)
{
u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
struct qed_spq *p_spq = p_hwfn->p_spq;
__set_bit(pos, p_spq->p_comp_bitmap);
while (test_bit(p_spq->comp_bitmap_idx,
p_spq->p_comp_bitmap)) {
__clear_bit(p_spq->comp_bitmap_idx,
p_spq->p_comp_bitmap);
p_spq->comp_bitmap_idx++;
qed_chain_return_produced(&p_spq->chain);
}
}
int qed_spq_post(struct qed_hwfn *p_hwfn, int qed_spq_post(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent, u8 *fw_return_code) struct qed_spq_entry *p_ent, u8 *fw_return_code)
{ {
...@@ -824,11 +843,12 @@ int qed_spq_post(struct qed_hwfn *p_hwfn, ...@@ -824,11 +843,12 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
p_ent->queue == &p_spq->unlimited_pending); p_ent->queue == &p_spq->unlimited_pending);
if (p_ent->queue == &p_spq->unlimited_pending) { if (p_ent->queue == &p_spq->unlimited_pending) {
/* This is an allocated p_ent which does not need to struct qed_spq_entry *p_post_ent = p_ent->post_ent;
* return to pool.
*/
kfree(p_ent); kfree(p_ent);
return rc;
/* Return the entry which was actually posted */
p_ent = p_post_ent;
} }
if (rc) if (rc)
...@@ -842,7 +862,7 @@ int qed_spq_post(struct qed_hwfn *p_hwfn, ...@@ -842,7 +862,7 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
spq_post_fail2: spq_post_fail2:
spin_lock_bh(&p_spq->lock); spin_lock_bh(&p_spq->lock);
list_del(&p_ent->list); list_del(&p_ent->list);
qed_chain_return_produced(&p_spq->chain); qed_spq_comp_bmap_update(p_hwfn, p_ent->elem.hdr.echo);
spq_post_fail: spq_post_fail:
/* return to the free pool */ /* return to the free pool */
...@@ -874,25 +894,8 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, ...@@ -874,25 +894,8 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
spin_lock_bh(&p_spq->lock); spin_lock_bh(&p_spq->lock);
list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) { list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
if (p_ent->elem.hdr.echo == echo) { if (p_ent->elem.hdr.echo == echo) {
u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
list_del(&p_ent->list); list_del(&p_ent->list);
qed_spq_comp_bmap_update(p_hwfn, echo);
/* Avoid overriding of SPQ entries when getting
* out-of-order completions, by marking the completions
* in a bitmap and increasing the chain consumer only
* for the first successive completed entries.
*/
__set_bit(pos, p_spq->p_comp_bitmap);
while (test_bit(p_spq->comp_bitmap_idx,
p_spq->p_comp_bitmap)) {
__clear_bit(p_spq->comp_bitmap_idx,
p_spq->p_comp_bitmap);
p_spq->comp_bitmap_idx++;
qed_chain_return_produced(&p_spq->chain);
}
p_spq->comp_count++; p_spq->comp_count++;
found = p_ent; found = p_ent;
break; break;
...@@ -931,11 +934,9 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, ...@@ -931,11 +934,9 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
QED_MSG_SPQ, QED_MSG_SPQ,
"Got a completion without a callback function\n"); "Got a completion without a callback function\n");
if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) || if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
(found->queue == &p_spq->unlimited_pending))
/* EBLOCK is responsible for returning its own entry into the /* EBLOCK is responsible for returning its own entry into the
* free list, unless it originally added the entry into the * free list.
* unlimited pending list.
*/ */
qed_spq_return_entry(p_hwfn, found); qed_spq_return_entry(p_hwfn, found);
......
...@@ -101,6 +101,7 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf) ...@@ -101,6 +101,7 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
default: default:
DP_NOTICE(p_hwfn, "Unknown VF personality %d\n", DP_NOTICE(p_hwfn, "Unknown VF personality %d\n",
p_hwfn->hw_info.personality); p_hwfn->hw_info.personality);
qed_sp_destroy_request(p_hwfn, p_ent);
return -EINVAL; return -EINVAL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment