Commit 2f44f752 authored by David S. Miller's avatar David S. Miller

Merge branch 'qed-fixes'

Yuval Mintz says:

====================
qed: Bug fixes

Patch #1 addresses a day-one race which is dependent on the number of Vfs
[I.e., more child VFs from a single PF make it more probable].
Patch #2 corrects a race that got introduced in the last set of fixes for
qed, one that would happen each time PF transitions to UP state.

I've built & tested those against current net-next.
Please consider applying the series there.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 4ca257ee 6f437d43
......@@ -688,7 +688,9 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
#define OOO_LB_TC 9
int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate);
void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
struct qed_ptt *p_ptt,
u32 min_pf_rate);
void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
#define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
......
......@@ -3198,7 +3198,8 @@ int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate)
}
/* API to configure WFQ from mcp link change */
void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate)
void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
struct qed_ptt *p_ptt, u32 min_pf_rate)
{
int i;
......@@ -3212,8 +3213,7 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate)
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
__qed_configure_vp_wfq_on_link_change(p_hwfn,
p_hwfn->p_dpc_ptt,
__qed_configure_vp_wfq_on_link_change(p_hwfn, p_ptt,
min_pf_rate);
}
}
......
......@@ -679,7 +679,8 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
/* Min bandwidth configuration */
__qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_link->min_pf_rate);
qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_ptt,
p_link->min_pf_rate);
p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
p_link->an_complete = !!(status &
......
......@@ -3014,8 +3014,7 @@ qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn,
ack_vfs[vfid / 32] |= BIT((vfid % 32));
p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
~(1ULL << (rel_vf_id % 64));
p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
~(1ULL << (rel_vf_id % 64));
p_vf->vf_mbx.b_pending_msg = false;
}
return rc;
......@@ -3128,11 +3127,20 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
mbx = &p_vf->vf_mbx;
/* qed_iov_process_mbx_request */
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"VF[%02x]: Processing mailbox message\n", p_vf->abs_vf_id);
if (!mbx->b_pending_msg) {
DP_NOTICE(p_hwfn,
"VF[%02x]: Trying to process mailbox message when none is pending\n",
p_vf->abs_vf_id);
return;
}
mbx->b_pending_msg = false;
mbx->first_tlv = mbx->req_virt->first_tlv;
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"VF[%02x]: Processing mailbox message [type %04x]\n",
p_vf->abs_vf_id, mbx->first_tlv.tl.type);
/* check if tlv type is known */
if (qed_iov_tlv_supported(mbx->first_tlv.tl.type) &&
!p_vf->b_malicious) {
......@@ -3219,20 +3227,19 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
}
}
static void qed_iov_pf_add_pending_events(struct qed_hwfn *p_hwfn, u8 vfid)
void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events)
{
u64 add_bit = 1ULL << (vfid % 64);
int i;
p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit;
}
memset(events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH);
static void qed_iov_pf_get_and_clear_pending_events(struct qed_hwfn *p_hwfn,
u64 *events)
{
u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events;
qed_for_each_vf(p_hwfn, i) {
struct qed_vf_info *p_vf;
memcpy(events, p_pending_events, sizeof(u64) * QED_VF_ARRAY_LENGTH);
memset(p_pending_events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH);
p_vf = &p_hwfn->pf_iov_info->vfs_array[i];
if (p_vf->vf_mbx.b_pending_msg)
events[i / 64] |= 1ULL << (i % 64);
}
}
static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn,
......@@ -3266,7 +3273,7 @@ static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
/* Mark the event and schedule the workqueue */
qed_iov_pf_add_pending_events(p_hwfn, p_vf->relative_vf_id);
p_vf->vf_mbx.b_pending_msg = true;
qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG);
return 0;
......@@ -4030,7 +4037,7 @@ static void qed_handle_vf_msg(struct qed_hwfn *hwfn)
return;
}
qed_iov_pf_get_and_clear_pending_events(hwfn, events);
qed_iov_pf_get_pending_events(hwfn, events);
DP_VERBOSE(hwfn, QED_MSG_IOV,
"Event mask of VF events: 0x%llx 0x%llx 0x%llx\n",
......
......@@ -140,6 +140,9 @@ struct qed_iov_vf_mbx {
/* Address in VF where a pending message is located */
dma_addr_t pending_req;
/* Message from VF awaits handling */
bool b_pending_msg;
u8 *offset;
/* saved VF request header */
......@@ -232,7 +235,6 @@ struct qed_vf_info {
*/
struct qed_pf_iov {
struct qed_vf_info vfs_array[MAX_NUM_VFS];
u64 pending_events[QED_VF_ARRAY_LENGTH];
u64 pending_flr[QED_VF_ARRAY_LENGTH];
/* Allocate message address continuosuly and split to each VF */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment