Commit f7257f65 authored by David S. Miller's avatar David S. Miller

Merge branch 'qed-sriov-legacy'

Yuval Mintz says:

====================
qed*: IOV patch series

Recent FW [8.10.10.0] enabled us to support sriov interaction
with legacy VF/PF. This patch series adds the necessary driver changes
to utilize this additional compatibility.
In addition, utilize the new FW ability to prevent pause floods by VFs,
and fix a bug that is [mostly] exposed by the added legacy support.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 4d55d014 b0bccb69
...@@ -101,6 +101,9 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, ...@@ -101,6 +101,9 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
p_ramrod->tx_switching_en = p_params->tx_switching; p_ramrod->tx_switching_en = p_params->tx_switching;
p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac;
p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype;
/* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */ /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev, p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev,
p_params->concrete_fid); p_params->concrete_fid);
...@@ -514,7 +517,8 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, ...@@ -514,7 +517,8 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
u8 stats_id, u8 stats_id,
u16 bd_max_bytes, u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr, dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size) dma_addr_t cqe_pbl_addr,
u16 cqe_pbl_size, bool b_use_zone_a_prod)
{ {
struct rx_queue_start_ramrod_data *p_ramrod = NULL; struct rx_queue_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL; struct qed_spq_entry *p_ent = NULL;
...@@ -571,11 +575,14 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, ...@@ -571,11 +575,14 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size); p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr); DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
if (p_params->vf_qid || b_use_zone_a_prod) {
p_ramrod->vf_rx_prod_index = p_params->vf_qid; p_ramrod->vf_rx_prod_index = p_params->vf_qid;
if (p_params->vf_qid)
DP_VERBOSE(p_hwfn, QED_MSG_SP, DP_VERBOSE(p_hwfn, QED_MSG_SP,
"Queue is meant for VF rxq[%04x]\n", "Queue%s is meant for VF rxq[%02x]\n",
b_use_zone_a_prod ? " [legacy]" : "",
p_params->vf_qid); p_params->vf_qid);
p_ramrod->vf_rx_prod_use_zone_a = b_use_zone_a_prod;
}
return qed_spq_post(p_hwfn, p_ent, NULL); return qed_spq_post(p_hwfn, p_ent, NULL);
} }
...@@ -637,8 +644,7 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn, ...@@ -637,8 +644,7 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
abs_stats_id, abs_stats_id,
bd_max_bytes, bd_max_bytes,
bd_chain_phys_addr, bd_chain_phys_addr,
cqe_pbl_addr, cqe_pbl_addr, cqe_pbl_size, false);
cqe_pbl_size);
if (rc) if (rc)
qed_sp_release_queue_cid(p_hwfn, p_rx_cid); qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
...@@ -1679,6 +1685,8 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, ...@@ -1679,6 +1685,8 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
qed_vf_get_num_vlan_filters(&cdev->hwfns[0], qed_vf_get_num_vlan_filters(&cdev->hwfns[0],
&info->num_vlan_filters); &info->num_vlan_filters);
qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac); qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac);
info->is_legacy = !!cdev->hwfns[0].vf_iov_info->b_pre_fp_hsi;
} }
qed_fill_dev_info(cdev, &info->common); qed_fill_dev_info(cdev, &info->common);
......
...@@ -102,6 +102,8 @@ struct qed_sp_vport_start_params { ...@@ -102,6 +102,8 @@ struct qed_sp_vport_start_params {
u16 opaque_fid; u16 opaque_fid;
u8 vport_id; u8 vport_id;
u16 mtu; u16 mtu;
bool check_mac;
bool check_ethtype;
}; };
int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
...@@ -225,7 +227,8 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, ...@@ -225,7 +227,8 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
u8 stats_id, u8 stats_id,
u16 bd_max_bytes, u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr, dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size); dma_addr_t cqe_pbl_addr,
u16 cqe_pbl_size, bool b_use_zone_a_prod);
int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
u16 opaque_fid, u16 opaque_fid,
......
...@@ -60,7 +60,8 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf) ...@@ -60,7 +60,8 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
} }
fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor; fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
if (fp_minor > ETH_HSI_VER_MINOR) { if (fp_minor > ETH_HSI_VER_MINOR &&
fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) {
DP_VERBOSE(p_hwfn, DP_VERBOSE(p_hwfn,
QED_MSG_IOV, QED_MSG_IOV,
"VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n", "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n",
...@@ -1241,6 +1242,16 @@ static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn, ...@@ -1241,6 +1242,16 @@ static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn,
p_req->num_vlan_filters, p_req->num_vlan_filters,
p_resp->num_vlan_filters, p_resp->num_vlan_filters,
p_req->num_mc_filters, p_resp->num_mc_filters); p_req->num_mc_filters, p_resp->num_mc_filters);
/* Some legacy OSes are incapable of correctly handling this
* failure.
*/
if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
ETH_HSI_VER_NO_PKT_LEN_TUNN) &&
(p_vf->acquire.vfdev_info.os_type ==
VFPF_ACQUIRE_OS_WINDOWS))
return PFVF_STATUS_SUCCESS;
return PFVF_STATUS_NO_RESOURCE; return PFVF_STATUS_NO_RESOURCE;
} }
...@@ -1287,8 +1298,26 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, ...@@ -1287,8 +1298,26 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR; pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR; pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
if (vf->state != VF_FREE && vf->state != VF_STOPPED) {
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
vf->abs_vf_id, vf->state);
goto out;
}
/* Validate FW compatibility */ /* Validate FW compatibility */
if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) { if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
if (req->vfdev_info.capabilities &
VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info;
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"VF[%d] is pre-fastpath HSI\n",
vf->abs_vf_id);
p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
} else {
DP_INFO(p_hwfn, DP_INFO(p_hwfn,
"VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n", "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n",
vf->abs_vf_id, vf->abs_vf_id,
...@@ -1298,6 +1327,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, ...@@ -1298,6 +1327,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
goto out; goto out;
} }
}
/* On 100g PFs, prevent old VFs from loading */ /* On 100g PFs, prevent old VFs from loading */
if ((p_hwfn->cdev->num_hwfns > 1) && if ((p_hwfn->cdev->num_hwfns > 1) &&
...@@ -1335,6 +1365,10 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, ...@@ -1335,6 +1365,10 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
pfdev_info->fw_minor = FW_MINOR_VERSION; pfdev_info->fw_minor = FW_MINOR_VERSION;
pfdev_info->fw_rev = FW_REVISION_VERSION; pfdev_info->fw_rev = FW_REVISION_VERSION;
pfdev_info->fw_eng = FW_ENGINEERING_VERSION; pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
/* Incorrect when legacy, but doesn't matter as legacy isn't reading
* this field.
*/
pfdev_info->minor_fp_hsi = min_t(u8, ETH_HSI_VER_MINOR, pfdev_info->minor_fp_hsi = min_t(u8, ETH_HSI_VER_MINOR,
req->vfdev_info.eth_fp_hsi_minor); req->vfdev_info.eth_fp_hsi_minor);
pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX; pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX;
...@@ -1646,6 +1680,7 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn, ...@@ -1646,6 +1680,7 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
params.vport_id = vf->vport_id; params.vport_id = vf->vport_id;
params.max_buffers_per_cqe = start->max_buffers_per_cqe; params.max_buffers_per_cqe = start->max_buffers_per_cqe;
params.mtu = vf->mtu; params.mtu = vf->mtu;
params.check_mac = true;
rc = qed_sp_eth_vport_start(p_hwfn, &params); rc = qed_sp_eth_vport_start(p_hwfn, &params);
if (rc) { if (rc) {
...@@ -1691,21 +1726,32 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn, ...@@ -1691,21 +1726,32 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn, static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
struct qed_vf_info *vf, u8 status) struct qed_vf_info *vf,
u8 status, bool b_legacy)
{ {
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
struct pfvf_start_queue_resp_tlv *p_tlv; struct pfvf_start_queue_resp_tlv *p_tlv;
struct vfpf_start_rxq_tlv *req; struct vfpf_start_rxq_tlv *req;
u16 length;
mbx->offset = (u8 *)mbx->reply_virt; mbx->offset = (u8 *)mbx->reply_virt;
/* Taking a bigger struct instead of adding a TLV to list was a
* mistake, but one which we're now stuck with, as some older
* clients assume the size of the previous response.
*/
if (!b_legacy)
length = sizeof(*p_tlv);
else
length = sizeof(struct pfvf_def_resp_tlv);
p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ, p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
sizeof(*p_tlv)); length);
qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv)); sizeof(struct channel_list_end_tlv));
/* Update the TLV with the response */ /* Update the TLV with the response */
if (status == PFVF_STATUS_SUCCESS) { if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
req = &mbx->req_virt->start_rxq; req = &mbx->req_virt->start_rxq;
p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B + p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
offsetof(struct mstorm_vf_zone, offsetof(struct mstorm_vf_zone,
...@@ -1713,7 +1759,7 @@ static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn, ...@@ -1713,7 +1759,7 @@ static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
sizeof(struct eth_rx_prod_data) * req->rx_qid; sizeof(struct eth_rx_prod_data) * req->rx_qid;
} }
qed_iov_send_response(p_hwfn, p_ptt, vf, sizeof(*p_tlv), status); qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
} }
static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
...@@ -1724,6 +1770,7 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, ...@@ -1724,6 +1770,7 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
u8 status = PFVF_STATUS_NO_RESOURCE; u8 status = PFVF_STATUS_NO_RESOURCE;
struct vfpf_start_rxq_tlv *req; struct vfpf_start_rxq_tlv *req;
bool b_legacy_vf = false;
int rc; int rc;
memset(&params, 0, sizeof(params)); memset(&params, 0, sizeof(params));
...@@ -1739,13 +1786,27 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, ...@@ -1739,13 +1786,27 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
params.sb = req->hw_sb; params.sb = req->hw_sb;
params.sb_idx = req->sb_index; params.sb_idx = req->sb_index;
/* Legacy VFs have their Producers in a different location, which they
* calculate on their own and clean the producer prior to this.
*/
if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
ETH_HSI_VER_NO_PKT_LEN_TUNN) {
b_legacy_vf = true;
} else {
REG_WR(p_hwfn,
GTT_BAR0_MAP_REG_MSDM_RAM +
MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
0);
}
rc = qed_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid, rc = qed_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid,
vf->vf_queues[req->rx_qid].fw_cid, vf->vf_queues[req->rx_qid].fw_cid,
&params, &params,
vf->abs_vf_id + 0x10, vf->abs_vf_id + 0x10,
req->bd_max_bytes, req->bd_max_bytes,
req->rxq_addr, req->rxq_addr,
req->cqe_pbl_addr, req->cqe_pbl_size); req->cqe_pbl_addr, req->cqe_pbl_size,
b_legacy_vf);
if (rc) { if (rc) {
status = PFVF_STATUS_FAILURE; status = PFVF_STATUS_FAILURE;
...@@ -1756,7 +1817,7 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, ...@@ -1756,7 +1817,7 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
} }
out: out:
qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status); qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status, b_legacy_vf);
} }
static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn, static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
...@@ -1765,23 +1826,38 @@ static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn, ...@@ -1765,23 +1826,38 @@ static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
{ {
struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
struct pfvf_start_queue_resp_tlv *p_tlv; struct pfvf_start_queue_resp_tlv *p_tlv;
bool b_legacy = false;
u16 length;
mbx->offset = (u8 *)mbx->reply_virt; mbx->offset = (u8 *)mbx->reply_virt;
/* Taking a bigger struct instead of adding a TLV to list was a
* mistake, but one which we're now stuck with, as some older
* clients assume the size of the previous response.
*/
if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
ETH_HSI_VER_NO_PKT_LEN_TUNN)
b_legacy = true;
if (!b_legacy)
length = sizeof(*p_tlv);
else
length = sizeof(struct pfvf_def_resp_tlv);
p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ, p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ,
sizeof(*p_tlv)); length);
qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv)); sizeof(struct channel_list_end_tlv));
/* Update the TLV with the response */ /* Update the TLV with the response */
if (status == PFVF_STATUS_SUCCESS) { if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
u16 qid = mbx->req_virt->start_txq.tx_qid; u16 qid = mbx->req_virt->start_txq.tx_qid;
p_tlv->offset = qed_db_addr(p_vf->vf_queues[qid].fw_cid, p_tlv->offset = qed_db_addr(p_vf->vf_queues[qid].fw_cid,
DQ_DEMS_LEGACY); DQ_DEMS_LEGACY);
} }
qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_tlv), status); qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
} }
static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn, static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
......
This diff is collapsed.
...@@ -86,7 +86,7 @@ struct vfpf_acquire_tlv { ...@@ -86,7 +86,7 @@ struct vfpf_acquire_tlv {
struct vfpf_first_tlv first_tlv; struct vfpf_first_tlv first_tlv;
struct vf_pf_vfdev_info { struct vf_pf_vfdev_info {
#define VFPF_ACQUIRE_CAP_OBSOLETE (1 << 0) #define VFPF_ACQUIRE_CAP_PRE_FP_HSI (1 << 0) /* VF pre-FP hsi version */
#define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */ #define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */
u64 capabilities; u64 capabilities;
u8 fw_major; u8 fw_major;
...@@ -551,6 +551,11 @@ struct qed_vf_iov { ...@@ -551,6 +551,11 @@ struct qed_vf_iov {
/* we set aside a copy of the acquire response */ /* we set aside a copy of the acquire response */
struct pfvf_acquire_resp_tlv acquire_resp; struct pfvf_acquire_resp_tlv acquire_resp;
/* In case PF originates prior to the fp-hsi version comparison,
* this has to be propagated as it affects the fastpath.
*/
bool b_pre_fp_hsi;
}; };
#ifdef CONFIG_QED_SRIOV #ifdef CONFIG_QED_SRIOV
......
...@@ -268,6 +268,8 @@ struct qede_tx_queue { ...@@ -268,6 +268,8 @@ struct qede_tx_queue {
u16 num_tx_buffers; u16 num_tx_buffers;
u64 xmit_pkts; u64 xmit_pkts;
u64 stopped_cnt; u64 stopped_cnt;
bool is_legacy;
}; };
#define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr.hi), \ #define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr.hi), \
......
...@@ -598,6 +598,14 @@ static netdev_tx_t qede_start_xmit(struct sk_buff *skb, ...@@ -598,6 +598,14 @@ static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT; 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
} }
/* Legacy FW had flipped behavior in regard to this bit -
* I.e., needed to set to prevent FW from touching encapsulated
* packets when it didn't need to.
*/
if (unlikely(txq->is_legacy))
first_bd->data.bitfields ^=
1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
/* If the packet is IPv6 with extension header, indicate that /* If the packet is IPv6 with extension header, indicate that
* to FW and pass few params, since the device cracker doesn't * to FW and pass few params, since the device cracker doesn't
* support parsing IPv6 with extension header/s. * support parsing IPv6 with extension header/s.
...@@ -2991,6 +2999,8 @@ static void qede_init_fp(struct qede_dev *edev) ...@@ -2991,6 +2999,8 @@ static void qede_init_fp(struct qede_dev *edev)
for (tc = 0; tc < edev->num_tc; tc++) { for (tc = 0; tc < edev->num_tc; tc++) {
txq_index = tc * QEDE_RSS_CNT(edev) + rss_id; txq_index = tc * QEDE_RSS_CNT(edev) + rss_id;
fp->txqs[tc].index = txq_index; fp->txqs[tc].index = txq_index;
if (edev->dev_info.is_legacy)
fp->txqs[tc].is_legacy = true;
} }
snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
......
...@@ -23,6 +23,9 @@ struct qed_dev_eth_info { ...@@ -23,6 +23,9 @@ struct qed_dev_eth_info {
u8 port_mac[ETH_ALEN]; u8 port_mac[ETH_ALEN];
u8 num_vlan_filters; u8 num_vlan_filters;
/* Legacy VF - this affects the datapath, so qede has to know */
bool is_legacy;
}; };
struct qed_update_vport_rss_params { struct qed_update_vport_rss_params {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment