Commit 9c7664cb authored by David S. Miller's avatar David S. Miller

Merge branch 'qed-next'

Manish Chopra says:

====================
qed*: driver updates

There are several new additions in this series;
Most are connected to either Tx offloading or Rx classifications
[either fastpath changes or supporting configuration].

In addition, there's a single IOV enhancement.

Please consider applying this series to `net-next'.

V2->V3:
Fixes below kbuild warning
call to '__compiletime_assert_60' declared with
attribute error: Need native word sized stores/loads for atomicity.

V1->V2:
Added a fix for the race in ramrod handling
pointed by Eric Dumazet [patch 7].
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d0b3fbb2 d5df7688
...@@ -1652,6 +1652,7 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, ...@@ -1652,6 +1652,7 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
if (IS_PF(cdev)) { if (IS_PF(cdev)) {
int max_vf_vlan_filters = 0; int max_vf_vlan_filters = 0;
int max_vf_mac_filters = 0;
if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
for_each_hwfn(cdev, i) for_each_hwfn(cdev, i)
...@@ -1665,11 +1666,18 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, ...@@ -1665,11 +1666,18 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
info->num_queues = cdev->num_hwfns; info->num_queues = cdev->num_hwfns;
} }
if (IS_QED_SRIOV(cdev)) if (IS_QED_SRIOV(cdev)) {
max_vf_vlan_filters = cdev->p_iov_info->total_vfs * max_vf_vlan_filters = cdev->p_iov_info->total_vfs *
QED_ETH_VF_NUM_VLAN_FILTERS; QED_ETH_VF_NUM_VLAN_FILTERS;
info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN) - max_vf_mac_filters = cdev->p_iov_info->total_vfs *
QED_ETH_VF_NUM_MAC_FILTERS;
}
info->num_vlan_filters = RESC_NUM(QED_LEADING_HWFN(cdev),
QED_VLAN) -
max_vf_vlan_filters; max_vf_vlan_filters;
info->num_mac_filters = RESC_NUM(QED_LEADING_HWFN(cdev),
QED_MAC) -
max_vf_mac_filters;
ether_addr_copy(info->port_mac, ether_addr_copy(info->port_mac,
cdev->hwfns[0].hw_info.hw_mac_addr); cdev->hwfns[0].hw_info.hw_mac_addr);
......
...@@ -111,8 +111,8 @@ union qed_spq_req_comp { ...@@ -111,8 +111,8 @@ union qed_spq_req_comp {
}; };
struct qed_spq_comp_done { struct qed_spq_comp_done {
u64 done; unsigned int done;
u8 fw_return_code; u8 fw_return_code;
}; };
struct qed_spq_entry { struct qed_spq_entry {
......
...@@ -37,7 +37,11 @@ ...@@ -37,7 +37,11 @@
***************************************************************************/ ***************************************************************************/
#define SPQ_HIGH_PRI_RESERVE_DEFAULT (1) #define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
#define SPQ_BLOCK_SLEEP_LENGTH (1000)
#define SPQ_BLOCK_DELAY_MAX_ITER (10)
#define SPQ_BLOCK_DELAY_US (10)
#define SPQ_BLOCK_SLEEP_MAX_ITER (1000)
#define SPQ_BLOCK_SLEEP_MS (5)
/*************************************************************************** /***************************************************************************
* Blocking Imp. (BLOCK/EBLOCK mode) * Blocking Imp. (BLOCK/EBLOCK mode)
...@@ -50,60 +54,88 @@ static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn, ...@@ -50,60 +54,88 @@ static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
comp_done = (struct qed_spq_comp_done *)cookie; comp_done = (struct qed_spq_comp_done *)cookie;
comp_done->done = 0x1; comp_done->fw_return_code = fw_return_code;
comp_done->fw_return_code = fw_return_code;
/* make update visible to waiting thread */ /* Make sure completion done is visible on waiting thread */
smp_wmb(); smp_store_release(&comp_done->done, 0x1);
} }
static int qed_spq_block(struct qed_hwfn *p_hwfn, static int __qed_spq_block(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent, struct qed_spq_entry *p_ent,
u8 *p_fw_ret) u8 *p_fw_ret, bool sleep_between_iter)
{ {
int sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
struct qed_spq_comp_done *comp_done; struct qed_spq_comp_done *comp_done;
int rc; u32 iter_cnt;
comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie; comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
while (sleep_count) { iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
/* validate we receive completion update */ : SPQ_BLOCK_DELAY_MAX_ITER;
smp_rmb();
if (comp_done->done == 1) { while (iter_cnt--) {
/* Validate we receive completion update */
if (READ_ONCE(comp_done->done) == 1) {
/* Read updated FW return value */
smp_read_barrier_depends();
if (p_fw_ret) if (p_fw_ret)
*p_fw_ret = comp_done->fw_return_code; *p_fw_ret = comp_done->fw_return_code;
return 0; return 0;
} }
usleep_range(5000, 10000);
sleep_count--; if (sleep_between_iter)
msleep(SPQ_BLOCK_SLEEP_MS);
else
udelay(SPQ_BLOCK_DELAY_US);
}
return -EBUSY;
}
static int qed_spq_block(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent,
u8 *p_fw_ret, bool skip_quick_poll)
{
struct qed_spq_comp_done *comp_done;
int rc;
/* A relatively short polling period w/o sleeping, to allow the FW to
* complete the ramrod and thus possibly to avoid the following sleeps.
*/
if (!skip_quick_poll) {
rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, false);
if (!rc)
return 0;
} }
/* Move to polling with a sleeping period between iterations */
rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
if (!rc)
return 0;
DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n"); DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt); rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
if (rc != 0) if (rc) {
DP_NOTICE(p_hwfn, "MCP drain failed\n"); DP_NOTICE(p_hwfn, "MCP drain failed\n");
goto err;
}
/* Retry after drain */ /* Retry after drain */
sleep_count = SPQ_BLOCK_SLEEP_LENGTH; rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
while (sleep_count) { if (!rc)
/* validate we receive completion update */ return 0;
smp_rmb();
if (comp_done->done == 1) {
if (p_fw_ret)
*p_fw_ret = comp_done->fw_return_code;
return 0;
}
usleep_range(5000, 10000);
sleep_count--;
}
comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
if (comp_done->done == 1) { if (comp_done->done == 1) {
if (p_fw_ret) if (p_fw_ret)
*p_fw_ret = comp_done->fw_return_code; *p_fw_ret = comp_done->fw_return_code;
return 0; return 0;
} }
err:
DP_NOTICE(p_hwfn, "Ramrod is stuck, MCP drain failed\n"); DP_NOTICE(p_hwfn,
"Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
le32_to_cpu(p_ent->elem.hdr.cid),
p_ent->elem.hdr.cmd_id,
p_ent->elem.hdr.protocol_id,
le16_to_cpu(p_ent->elem.hdr.echo));
return -EBUSY; return -EBUSY;
} }
...@@ -729,7 +761,8 @@ int qed_spq_post(struct qed_hwfn *p_hwfn, ...@@ -729,7 +761,8 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
* access p_ent here to see whether it's successful or not. * access p_ent here to see whether it's successful or not.
* Thus, after gaining the answer perform the cleanup here. * Thus, after gaining the answer perform the cleanup here.
*/ */
rc = qed_spq_block(p_hwfn, p_ent, fw_return_code); rc = qed_spq_block(p_hwfn, p_ent, fw_return_code,
p_ent->queue == &p_spq->unlimited_pending);
if (p_ent->queue == &p_spq->unlimited_pending) { if (p_ent->queue == &p_spq->unlimited_pending) {
/* This is an allocated p_ent which does not need to /* This is an allocated p_ent which does not need to
......
...@@ -109,7 +109,8 @@ static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn, ...@@ -109,7 +109,8 @@ static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn,
} }
static bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn, static bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
int rel_vf_id, bool b_enabled_only) int rel_vf_id,
bool b_enabled_only, bool b_non_malicious)
{ {
if (!p_hwfn->pf_iov_info) { if (!p_hwfn->pf_iov_info) {
DP_NOTICE(p_hwfn->cdev, "No iov info\n"); DP_NOTICE(p_hwfn->cdev, "No iov info\n");
...@@ -124,6 +125,10 @@ static bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn, ...@@ -124,6 +125,10 @@ static bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
b_enabled_only) b_enabled_only)
return false; return false;
if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) &&
b_non_malicious)
return false;
return true; return true;
} }
...@@ -138,7 +143,8 @@ static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn, ...@@ -138,7 +143,8 @@ static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
return NULL; return NULL;
} }
if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id, b_enabled_only)) if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id,
b_enabled_only, false))
vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id]; vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
else else
DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n", DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
...@@ -542,7 +548,8 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn) ...@@ -542,7 +548,8 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
return 0; return 0;
} }
static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid) bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn,
int vfid, bool b_fail_malicious)
{ {
/* Check PF supports sriov */ /* Check PF supports sriov */
if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) || if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) ||
...@@ -550,12 +557,17 @@ static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid) ...@@ -550,12 +557,17 @@ static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
return false; return false;
/* Check VF validity */ /* Check VF validity */
if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true)) if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious))
return false; return false;
return true; return true;
} }
bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
{
return _qed_iov_pf_sanity_check(p_hwfn, vfid, true);
}
static void qed_iov_set_vf_to_disable(struct qed_dev *cdev, static void qed_iov_set_vf_to_disable(struct qed_dev *cdev,
u16 rel_vf_id, u8 to_disable) u16 rel_vf_id, u8 to_disable)
{ {
...@@ -652,6 +664,9 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn, ...@@ -652,6 +664,9 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf); qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
/* It's possible VF was previously considered malicious */
vf->b_malicious = false;
rc = qed_mcp_config_vf_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs); rc = qed_mcp_config_vf_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs);
if (rc) if (rc)
return rc; return rc;
...@@ -2804,6 +2819,13 @@ qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn, ...@@ -2804,6 +2819,13 @@ qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn,
return rc; return rc;
} }
/* Workaround to make VF-PF channel ready, as FW
* doesn't do that as a part of FLR.
*/
REG_WR(p_hwfn,
GTT_BAR0_MAP_REG_USDM_RAM +
USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1);
/* VF_STOPPED has to be set only after final cleanup /* VF_STOPPED has to be set only after final cleanup
* but prior to re-enabling the VF. * but prior to re-enabling the VF.
*/ */
...@@ -2942,7 +2964,8 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, ...@@ -2942,7 +2964,8 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
mbx->first_tlv = mbx->req_virt->first_tlv; mbx->first_tlv = mbx->req_virt->first_tlv;
/* check if tlv type is known */ /* check if tlv type is known */
if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) { if (qed_iov_tlv_supported(mbx->first_tlv.tl.type) &&
!p_vf->b_malicious) {
switch (mbx->first_tlv.tl.type) { switch (mbx->first_tlv.tl.type) {
case CHANNEL_TLV_ACQUIRE: case CHANNEL_TLV_ACQUIRE:
qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf); qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
...@@ -2984,6 +3007,15 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, ...@@ -2984,6 +3007,15 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf); qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
break; break;
} }
} else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
p_vf->abs_vf_id, mbx->first_tlv.tl.type);
qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
mbx->first_tlv.tl.type,
sizeof(struct pfvf_def_resp_tlv),
PFVF_STATUS_MALICIOUS);
} else { } else {
/* unknown TLV - this may belong to a VF driver from the future /* unknown TLV - this may belong to a VF driver from the future
* - a version written after this PF driver was written, which * - a version written after this PF driver was written, which
...@@ -3033,20 +3065,30 @@ static void qed_iov_pf_get_and_clear_pending_events(struct qed_hwfn *p_hwfn, ...@@ -3033,20 +3065,30 @@ static void qed_iov_pf_get_and_clear_pending_events(struct qed_hwfn *p_hwfn,
memset(p_pending_events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH); memset(p_pending_events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH);
} }
static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn, static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn,
u16 abs_vfid, struct regpair *vf_msg) u16 abs_vfid)
{ {
u8 min = (u8)p_hwfn->cdev->p_iov_info->first_vf_in_pf; u8 min = (u8) p_hwfn->cdev->p_iov_info->first_vf_in_pf;
struct qed_vf_info *p_vf;
if (!qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min)) { if (!_qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
DP_VERBOSE(p_hwfn, DP_VERBOSE(p_hwfn,
QED_MSG_IOV, QED_MSG_IOV,
"Got a message from VF [abs 0x%08x] that cannot be handled by PF\n", "Got indication for VF [abs 0x%08x] that cannot be handled by PF\n",
abs_vfid); abs_vfid);
return 0; return NULL;
} }
p_vf = &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
return &p_hwfn->pf_iov_info->vfs_array[(u8) abs_vfid - min];
}
static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
u16 abs_vfid, struct regpair *vf_msg)
{
struct qed_vf_info *p_vf = qed_sriov_get_vf_from_absid(p_hwfn,
abs_vfid);
if (!p_vf)
return 0;
/* List the physical address of the request so that handler /* List the physical address of the request so that handler
* could later on copy the message from it. * could later on copy the message from it.
...@@ -3060,6 +3102,23 @@ static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn, ...@@ -3060,6 +3102,23 @@ static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
return 0; return 0;
} }
static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
struct malicious_vf_eqe_data *p_data)
{
struct qed_vf_info *p_vf;
p_vf = qed_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id);
if (!p_vf)
return;
DP_INFO(p_hwfn,
"VF [%d] - Malicious behavior [%02x]\n",
p_vf->abs_vf_id, p_data->err_id);
p_vf->b_malicious = true;
}
int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
u8 opcode, __le16 echo, union event_ring_data *data) u8 opcode, __le16 echo, union event_ring_data *data)
{ {
...@@ -3067,6 +3126,9 @@ int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, ...@@ -3067,6 +3126,9 @@ int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
case COMMON_EVENT_VF_PF_CHANNEL: case COMMON_EVENT_VF_PF_CHANNEL:
return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo), return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo),
&data->vf_pf_channel.msg_addr); &data->vf_pf_channel.msg_addr);
case COMMON_EVENT_MALICIOUS_VF:
qed_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
return 0;
default: default:
DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n", DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n",
opcode); opcode);
...@@ -3083,7 +3145,7 @@ u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id) ...@@ -3083,7 +3145,7 @@ u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
goto out; goto out;
for (i = rel_vf_id; i < p_iov->total_vfs; i++) for (i = rel_vf_id; i < p_iov->total_vfs; i++)
if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true)) if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false))
return i; return i;
out: out:
...@@ -3130,6 +3192,12 @@ static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn, ...@@ -3130,6 +3192,12 @@ static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn,
return; return;
} }
if (vf_info->b_malicious) {
DP_NOTICE(p_hwfn->cdev,
"Can't set forced MAC to malicious VF [%d]\n", vfid);
return;
}
feature = 1 << MAC_ADDR_FORCED; feature = 1 << MAC_ADDR_FORCED;
memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN); memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
...@@ -3153,6 +3221,12 @@ static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn, ...@@ -3153,6 +3221,12 @@ static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
return; return;
} }
if (vf_info->b_malicious) {
DP_NOTICE(p_hwfn->cdev,
"Can't set forced vlan to malicious VF [%d]\n", vfid);
return;
}
feature = 1 << VLAN_ADDR_FORCED; feature = 1 << VLAN_ADDR_FORCED;
vf_info->bulletin.p_virt->pvid = pvid; vf_info->bulletin.p_virt->pvid = pvid;
if (pvid) if (pvid)
...@@ -3367,7 +3441,7 @@ int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) ...@@ -3367,7 +3441,7 @@ int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
qed_for_each_vf(hwfn, j) { qed_for_each_vf(hwfn, j) {
int k; int k;
if (!qed_iov_is_valid_vfid(hwfn, j, true)) if (!qed_iov_is_valid_vfid(hwfn, j, true, false))
continue; continue;
/* Wait until VF is disabled before releasing */ /* Wait until VF is disabled before releasing */
...@@ -3425,7 +3499,7 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num) ...@@ -3425,7 +3499,7 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num)
num_sbs = min_t(int, sb_cnt_info.sb_free_blk, limit); num_sbs = min_t(int, sb_cnt_info.sb_free_blk, limit);
for (i = 0; i < num; i++) { for (i = 0; i < num; i++) {
if (!qed_iov_is_valid_vfid(hwfn, i, false)) if (!qed_iov_is_valid_vfid(hwfn, i, false, true))
continue; continue;
rc = qed_iov_init_hw_for_vf(hwfn, rc = qed_iov_init_hw_for_vf(hwfn,
...@@ -3477,7 +3551,7 @@ static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid) ...@@ -3477,7 +3551,7 @@ static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid)
return -EINVAL; return -EINVAL;
} }
if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true)) { if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) {
DP_VERBOSE(cdev, QED_MSG_IOV, DP_VERBOSE(cdev, QED_MSG_IOV,
"Cannot set VF[%d] MAC (VF is not active)\n", vfid); "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
return -EINVAL; return -EINVAL;
...@@ -3509,7 +3583,7 @@ static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid) ...@@ -3509,7 +3583,7 @@ static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid)
return -EINVAL; return -EINVAL;
} }
if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true)) { if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) {
DP_VERBOSE(cdev, QED_MSG_IOV, DP_VERBOSE(cdev, QED_MSG_IOV,
"Cannot set VF[%d] MAC (VF is not active)\n", vfid); "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
return -EINVAL; return -EINVAL;
...@@ -3543,7 +3617,7 @@ static int qed_get_vf_config(struct qed_dev *cdev, ...@@ -3543,7 +3617,7 @@ static int qed_get_vf_config(struct qed_dev *cdev,
if (IS_VF(cdev)) if (IS_VF(cdev))
return -EINVAL; return -EINVAL;
if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true)) { if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, false)) {
DP_VERBOSE(cdev, QED_MSG_IOV, DP_VERBOSE(cdev, QED_MSG_IOV,
"VF index [%d] isn't active\n", vf_id); "VF index [%d] isn't active\n", vf_id);
return -EINVAL; return -EINVAL;
...@@ -3647,7 +3721,7 @@ static int qed_set_vf_link_state(struct qed_dev *cdev, ...@@ -3647,7 +3721,7 @@ static int qed_set_vf_link_state(struct qed_dev *cdev,
if (IS_VF(cdev)) if (IS_VF(cdev))
return -EINVAL; return -EINVAL;
if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true)) { if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, true)) {
DP_VERBOSE(cdev, QED_MSG_IOV, DP_VERBOSE(cdev, QED_MSG_IOV,
"VF index [%d] isn't active\n", vf_id); "VF index [%d] isn't active\n", vf_id);
return -EINVAL; return -EINVAL;
......
...@@ -132,6 +132,7 @@ struct qed_vf_info { ...@@ -132,6 +132,7 @@ struct qed_vf_info {
struct qed_iov_vf_mbx vf_mbx; struct qed_iov_vf_mbx vf_mbx;
enum vf_state state; enum vf_state state;
bool b_init; bool b_init;
bool b_malicious;
u8 to_disable; u8 to_disable;
struct qed_bulletin bulletin; struct qed_bulletin bulletin;
......
...@@ -1230,8 +1230,8 @@ static void qed_handle_bulletin_change(struct qed_hwfn *hwfn) ...@@ -1230,8 +1230,8 @@ static void qed_handle_bulletin_change(struct qed_hwfn *hwfn)
is_mac_exist = qed_vf_bulletin_get_forced_mac(hwfn, mac, is_mac_exist = qed_vf_bulletin_get_forced_mac(hwfn, mac,
&is_mac_forced); &is_mac_forced);
if (is_mac_exist && is_mac_forced && cookie) if (is_mac_exist && cookie)
ops->force_mac(cookie, mac); ops->force_mac(cookie, mac, !!is_mac_forced);
/* Always update link configuration according to bulletin */ /* Always update link configuration according to bulletin */
qed_link_update(hwfn); qed_link_update(hwfn);
......
...@@ -40,6 +40,7 @@ enum { ...@@ -40,6 +40,7 @@ enum {
PFVF_STATUS_NOT_SUPPORTED, PFVF_STATUS_NOT_SUPPORTED,
PFVF_STATUS_NO_RESOURCE, PFVF_STATUS_NO_RESOURCE,
PFVF_STATUS_FORCED, PFVF_STATUS_FORCED,
PFVF_STATUS_MALICIOUS,
}; };
/* vf pf channel tlvs */ /* vf pf channel tlvs */
......
...@@ -320,6 +320,7 @@ struct qede_fastpath { ...@@ -320,6 +320,7 @@ struct qede_fastpath {
#define XMIT_L4_CSUM BIT(0) #define XMIT_L4_CSUM BIT(0)
#define XMIT_LSO BIT(1) #define XMIT_LSO BIT(1)
#define XMIT_ENC BIT(2) #define XMIT_ENC BIT(2)
#define XMIT_ENC_GSO_L4_CSUM BIT(3)
#define QEDE_CSUM_ERROR BIT(0) #define QEDE_CSUM_ERROR BIT(0)
#define QEDE_CSUM_UNNECESSARY BIT(1) #define QEDE_CSUM_UNNECESSARY BIT(1)
......
...@@ -171,10 +171,14 @@ static struct pci_driver qede_pci_driver = { ...@@ -171,10 +171,14 @@ static struct pci_driver qede_pci_driver = {
#endif #endif
}; };
static void qede_force_mac(void *dev, u8 *mac) static void qede_force_mac(void *dev, u8 *mac, bool forced)
{ {
struct qede_dev *edev = dev; struct qede_dev *edev = dev;
/* MAC hints take effect only if we haven't set one already */
if (is_valid_ether_addr(edev->ndev->dev_addr) && !forced)
return;
ether_addr_copy(edev->ndev->dev_addr, mac); ether_addr_copy(edev->ndev->dev_addr, mac);
ether_addr_copy(edev->primary_mac, mac); ether_addr_copy(edev->primary_mac, mac);
} }
...@@ -396,8 +400,19 @@ static u32 qede_xmit_type(struct qede_dev *edev, ...@@ -396,8 +400,19 @@ static u32 qede_xmit_type(struct qede_dev *edev,
(ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
*ipv6_ext = 1; *ipv6_ext = 1;
if (skb->encapsulation) if (skb->encapsulation) {
rc |= XMIT_ENC; rc |= XMIT_ENC;
if (skb_is_gso(skb)) {
unsigned short gso_type = skb_shinfo(skb)->gso_type;
if ((gso_type & SKB_GSO_UDP_TUNNEL_CSUM) ||
(gso_type & SKB_GSO_GRE_CSUM))
rc |= XMIT_ENC_GSO_L4_CSUM;
rc |= XMIT_LSO;
return rc;
}
}
if (skb_is_gso(skb)) if (skb_is_gso(skb))
rc |= XMIT_LSO; rc |= XMIT_LSO;
...@@ -633,6 +648,12 @@ static netdev_tx_t qede_start_xmit(struct sk_buff *skb, ...@@ -633,6 +648,12 @@ static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
if (unlikely(xmit_type & XMIT_ENC)) { if (unlikely(xmit_type & XMIT_ENC)) {
first_bd->data.bd_flags.bitfields |= first_bd->data.bd_flags.bitfields |=
1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT; 1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
if (xmit_type & XMIT_ENC_GSO_L4_CSUM) {
u8 tmp = ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
first_bd->data.bd_flags.bitfields |= 1 << tmp;
}
hlen = qede_get_skb_hlen(skb, true); hlen = qede_get_skb_hlen(skb, true);
} else { } else {
first_bd->data.bd_flags.bitfields |= first_bd->data.bd_flags.bitfields |=
...@@ -2219,6 +2240,40 @@ static void qede_udp_tunnel_del(struct net_device *dev, ...@@ -2219,6 +2240,40 @@ static void qede_udp_tunnel_del(struct net_device *dev,
schedule_delayed_work(&edev->sp_task, 0); schedule_delayed_work(&edev->sp_task, 0);
} }
/* 8B udp header + 8B base tunnel header + 32B option length */
#define QEDE_MAX_TUN_HDR_LEN 48
static netdev_features_t qede_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{
if (skb->encapsulation) {
u8 l4_proto = 0;
switch (vlan_get_protocol(skb)) {
case htons(ETH_P_IP):
l4_proto = ip_hdr(skb)->protocol;
break;
case htons(ETH_P_IPV6):
l4_proto = ipv6_hdr(skb)->nexthdr;
break;
default:
return features;
}
/* Disable offloads for geneve tunnels, as HW can't parse
* the geneve header which has option length greater than 32B.
*/
if ((l4_proto == IPPROTO_UDP) &&
((skb_inner_mac_header(skb) -
skb_transport_header(skb)) > QEDE_MAX_TUN_HDR_LEN))
return features & ~(NETIF_F_CSUM_MASK |
NETIF_F_GSO_MASK);
}
return features;
}
static const struct net_device_ops qede_netdev_ops = { static const struct net_device_ops qede_netdev_ops = {
.ndo_open = qede_open, .ndo_open = qede_open,
.ndo_stop = qede_close, .ndo_stop = qede_close,
...@@ -2243,6 +2298,7 @@ static const struct net_device_ops qede_netdev_ops = { ...@@ -2243,6 +2298,7 @@ static const struct net_device_ops qede_netdev_ops = {
#endif #endif
.ndo_udp_tunnel_add = qede_udp_tunnel_add, .ndo_udp_tunnel_add = qede_udp_tunnel_add,
.ndo_udp_tunnel_del = qede_udp_tunnel_del, .ndo_udp_tunnel_del = qede_udp_tunnel_del,
.ndo_features_check = qede_features_check,
}; };
/* ------------------------------------------------------------------------- /* -------------------------------------------------------------------------
...@@ -2309,6 +2365,8 @@ static void qede_init_ndev(struct qede_dev *edev) ...@@ -2309,6 +2365,8 @@ static void qede_init_ndev(struct qede_dev *edev)
qede_set_ethtool_ops(ndev); qede_set_ethtool_ops(ndev);
ndev->priv_flags = IFF_UNICAST_FLT;
/* user-changeble features */ /* user-changeble features */
hw_features = NETIF_F_GRO | NETIF_F_SG | hw_features = NETIF_F_GRO | NETIF_F_SG |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
...@@ -2316,11 +2374,14 @@ static void qede_init_ndev(struct qede_dev *edev) ...@@ -2316,11 +2374,14 @@ static void qede_init_ndev(struct qede_dev *edev)
/* Encap features*/ /* Encap features*/
hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_TSO_ECN; NETIF_F_TSO_ECN | NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_GSO_GRE_CSUM;
ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN |
NETIF_F_TSO6 | NETIF_F_GSO_GRE | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM; NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM |
NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_GSO_GRE_CSUM;
ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM | ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
NETIF_F_HIGHDMA; NETIF_F_HIGHDMA;
...@@ -3878,7 +3939,7 @@ static void qede_config_rx_mode(struct net_device *ndev) ...@@ -3878,7 +3939,7 @@ static void qede_config_rx_mode(struct net_device *ndev)
/* Check for promiscuous */ /* Check for promiscuous */
if ((ndev->flags & IFF_PROMISC) || if ((ndev->flags & IFF_PROMISC) ||
(uc_count > 15)) { /* @@@TBD resource allocation - 1 */ (uc_count > edev->dev_info.num_mac_filters - 1)) {
accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC; accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
} else { } else {
/* Add MAC filters according to the unicast secondary macs */ /* Add MAC filters according to the unicast secondary macs */
......
...@@ -23,6 +23,7 @@ struct qed_dev_eth_info { ...@@ -23,6 +23,7 @@ struct qed_dev_eth_info {
u8 port_mac[ETH_ALEN]; u8 port_mac[ETH_ALEN];
u8 num_vlan_filters; u8 num_vlan_filters;
u16 num_mac_filters;
/* Legacy VF - this affects the datapath, so qede has to know */ /* Legacy VF - this affects the datapath, so qede has to know */
bool is_legacy; bool is_legacy;
...@@ -129,7 +130,7 @@ struct qed_tunn_params { ...@@ -129,7 +130,7 @@ struct qed_tunn_params {
struct qed_eth_cb_ops { struct qed_eth_cb_ops {
struct qed_common_cb_ops common; struct qed_common_cb_ops common;
void (*force_mac) (void *dev, u8 *mac); void (*force_mac) (void *dev, u8 *mac, bool forced);
}; };
#ifdef CONFIG_DCB #ifdef CONFIG_DCB
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment