Commit 8ef5cc4f authored by David S. Miller's avatar David S. Miller

Merge branch 'bnxt_en-Driver-updates'

Michael Chan says:

====================
bnxt_en: Driver updates.

This patch series adds some extended statistics available with the new
firmware interface, package version from firmware, aRFS support on
57500 chips, new PCI IDs, and some miscellaneous fixes and improvements.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 9073989a 51fec80d
...@@ -114,6 +114,7 @@ enum board_idx { ...@@ -114,6 +114,7 @@ enum board_idx {
BCM5745x_NPAR, BCM5745x_NPAR,
BCM57508, BCM57508,
BCM57504, BCM57504,
BCM57502,
BCM58802, BCM58802,
BCM58804, BCM58804,
BCM58808, BCM58808,
...@@ -158,6 +159,7 @@ static const struct { ...@@ -158,6 +159,7 @@ static const struct {
[BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" }, [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
[BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
[BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
[BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
[BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" }, [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
[BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
[BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
...@@ -205,6 +207,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = { ...@@ -205,6 +207,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 }, { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
{ PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 }, { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
{ PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 }, { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
{ PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
{ PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 }, { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
{ PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 }, { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
#ifdef CONFIG_BNXT_SRIOV #ifdef CONFIG_BNXT_SRIOV
...@@ -216,6 +219,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = { ...@@ -216,6 +219,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF }, { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
{ PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF }, { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
{ PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF }, { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
{ PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
{ PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF }, { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
{ PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF }, { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
#endif #endif
...@@ -3396,6 +3400,12 @@ static void bnxt_free_port_stats(struct bnxt *bp) ...@@ -3396,6 +3400,12 @@ static void bnxt_free_port_stats(struct bnxt *bp)
bp->hw_rx_port_stats_ext_map); bp->hw_rx_port_stats_ext_map);
bp->hw_rx_port_stats_ext = NULL; bp->hw_rx_port_stats_ext = NULL;
} }
if (bp->hw_pcie_stats) {
dma_free_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats),
bp->hw_pcie_stats, bp->hw_pcie_stats_map);
bp->hw_pcie_stats = NULL;
}
} }
static void bnxt_free_ring_stats(struct bnxt *bp) static void bnxt_free_ring_stats(struct bnxt *bp)
...@@ -3440,7 +3450,9 @@ static int bnxt_alloc_stats(struct bnxt *bp) ...@@ -3440,7 +3450,9 @@ static int bnxt_alloc_stats(struct bnxt *bp)
cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
} }
if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) { if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
return 0;
if (bp->hw_rx_port_stats) if (bp->hw_rx_port_stats)
goto alloc_ext_stats; goto alloc_ext_stats;
...@@ -3454,34 +3466,32 @@ static int bnxt_alloc_stats(struct bnxt *bp) ...@@ -3454,34 +3466,32 @@ static int bnxt_alloc_stats(struct bnxt *bp)
if (!bp->hw_rx_port_stats) if (!bp->hw_rx_port_stats)
return -ENOMEM; return -ENOMEM;
bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) + bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) + 512;
512;
bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map + bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
sizeof(struct rx_port_stats) + 512; sizeof(struct rx_port_stats) + 512;
bp->flags |= BNXT_FLAG_PORT_STATS; bp->flags |= BNXT_FLAG_PORT_STATS;
alloc_ext_stats: alloc_ext_stats:
/* Display extended statistics only if FW supports it */ /* Display extended statistics only if FW supports it */
if (bp->hwrm_spec_code < 0x10804 || if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
bp->hwrm_spec_code == 0x10900) if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
return 0; return 0;
if (bp->hw_rx_port_stats_ext) if (bp->hw_rx_port_stats_ext)
goto alloc_tx_ext_stats; goto alloc_tx_ext_stats;
bp->hw_rx_port_stats_ext = bp->hw_rx_port_stats_ext =
dma_alloc_coherent(&pdev->dev, dma_alloc_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
sizeof(struct rx_port_stats_ext), &bp->hw_rx_port_stats_ext_map, GFP_KERNEL);
&bp->hw_rx_port_stats_ext_map,
GFP_KERNEL);
if (!bp->hw_rx_port_stats_ext) if (!bp->hw_rx_port_stats_ext)
return 0; return 0;
alloc_tx_ext_stats: alloc_tx_ext_stats:
if (bp->hw_tx_port_stats_ext) if (bp->hw_tx_port_stats_ext)
return 0; goto alloc_pcie_stats;
if (bp->hwrm_spec_code >= 0x10902) { if (bp->hwrm_spec_code >= 0x10902 ||
(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
bp->hw_tx_port_stats_ext = bp->hw_tx_port_stats_ext =
dma_alloc_coherent(&pdev->dev, dma_alloc_coherent(&pdev->dev,
sizeof(struct tx_port_stats_ext), sizeof(struct tx_port_stats_ext),
...@@ -3489,7 +3499,19 @@ static int bnxt_alloc_stats(struct bnxt *bp) ...@@ -3489,7 +3499,19 @@ static int bnxt_alloc_stats(struct bnxt *bp)
GFP_KERNEL); GFP_KERNEL);
} }
bp->flags |= BNXT_FLAG_PORT_STATS_EXT; bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
}
alloc_pcie_stats:
if (bp->hw_pcie_stats ||
!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
return 0;
bp->hw_pcie_stats =
dma_alloc_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats),
&bp->hw_pcie_stats_map, GFP_KERNEL);
if (!bp->hw_pcie_stats)
return 0;
bp->flags |= BNXT_FLAG_PCIE_STATS;
return 0; return 0;
} }
...@@ -4208,16 +4230,25 @@ static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, ...@@ -4208,16 +4230,25 @@ static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
struct bnxt_ntuple_filter *fltr) struct bnxt_ntuple_filter *fltr)
{ {
struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
struct hwrm_cfa_ntuple_filter_alloc_input req = {0}; struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
struct hwrm_cfa_ntuple_filter_alloc_output *resp; struct hwrm_cfa_ntuple_filter_alloc_output *resp;
struct flow_keys *keys = &fltr->fkeys; struct flow_keys *keys = &fltr->fkeys;
struct bnxt_vnic_info *vnic;
u32 dst_ena = 0;
int rc = 0; int rc = 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx]; req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS); if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX) {
dst_ena = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX;
req.rfs_ring_tbl_idx = cpu_to_le16(fltr->rxq);
vnic = &bp->vnic_info[0];
} else {
vnic = &bp->vnic_info[fltr->rxq + 1];
}
req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS | dst_ena);
req.ethertype = htons(ETH_P_IP); req.ethertype = htons(ETH_P_IP);
memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN); memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
...@@ -4255,7 +4286,6 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, ...@@ -4255,7 +4286,6 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
req.dst_port = keys->ports.dst; req.dst_port = keys->ports.dst;
req.dst_port_mask = cpu_to_be16(0xffff); req.dst_port_mask = cpu_to_be16(0xffff);
req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
mutex_lock(&bp->hwrm_cmd_lock); mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc) { if (!rc) {
...@@ -5503,11 +5533,13 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp) ...@@ -5503,11 +5533,13 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
stat = bnxt_get_func_stat_ctxs(bp); stat = bnxt_get_func_stat_ctxs(bp);
if (BNXT_NEW_RM(bp) && if (BNXT_NEW_RM(bp) &&
(hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp || (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
hw_resc->resv_irqs < nq || hw_resc->resv_vnics != vnic || hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
hw_resc->resv_stat_ctxs != stat ||
(hw_resc->resv_hw_ring_grps != grp && (hw_resc->resv_hw_ring_grps != grp &&
!(bp->flags & BNXT_FLAG_CHIP_P5)))) !(bp->flags & BNXT_FLAG_CHIP_P5))))
return true; return true;
if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
hw_resc->resv_irqs != nq)
return true;
return false; return false;
} }
...@@ -6056,6 +6088,8 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) ...@@ -6056,6 +6088,8 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
ctx->tqm_entries_multiple = 1; ctx->tqm_entries_multiple = 1;
ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries); ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size); ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
ctx->mrav_num_entries_units =
le16_to_cpu(resp->mrav_num_entries_units);
ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size); ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries); ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
} else { } else {
...@@ -6102,6 +6136,7 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) ...@@ -6102,6 +6136,7 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
struct bnxt_ctx_pg_info *ctx_pg; struct bnxt_ctx_pg_info *ctx_pg;
__le32 *num_entries; __le32 *num_entries;
__le64 *pg_dir; __le64 *pg_dir;
u32 flags = 0;
u8 *pg_attr; u8 *pg_attr;
int i, rc; int i, rc;
u32 ena; u32 ena;
...@@ -6161,6 +6196,9 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) ...@@ -6161,6 +6196,9 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) { if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
ctx_pg = &ctx->mrav_mem; ctx_pg = &ctx->mrav_mem;
req.mrav_num_entries = cpu_to_le32(ctx_pg->entries); req.mrav_num_entries = cpu_to_le32(ctx_pg->entries);
if (ctx->mrav_num_entries_units)
flags |=
FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size); req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req.mrav_pg_size_mrav_lvl, &req.mrav_pg_size_mrav_lvl,
...@@ -6187,6 +6225,7 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) ...@@ -6187,6 +6225,7 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
*num_entries = cpu_to_le32(ctx_pg->entries); *num_entries = cpu_to_le32(ctx_pg->entries);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir); bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
} }
req.flags = cpu_to_le32(flags);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc) if (rc)
rc = -EIO; rc = -EIO;
...@@ -6325,6 +6364,7 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp) ...@@ -6325,6 +6364,7 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
struct bnxt_ctx_pg_info *ctx_pg; struct bnxt_ctx_pg_info *ctx_pg;
struct bnxt_ctx_mem_info *ctx; struct bnxt_ctx_mem_info *ctx;
u32 mem_size, ena, entries; u32 mem_size, ena, entries;
u32 num_mr, num_ah;
u32 extra_srqs = 0; u32 extra_srqs = 0;
u32 extra_qps = 0; u32 extra_qps = 0;
u8 pg_lvl = 1; u8 pg_lvl = 1;
...@@ -6388,12 +6428,21 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp) ...@@ -6388,12 +6428,21 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
goto skip_rdma; goto skip_rdma;
ctx_pg = &ctx->mrav_mem; ctx_pg = &ctx->mrav_mem;
ctx_pg->entries = extra_qps * 4; /* 128K extra is needed to accommodate static AH context
* allocation by f/w.
*/
num_mr = 1024 * 256;
num_ah = 1024 * 128;
ctx_pg->entries = num_mr + num_ah;
mem_size = ctx->mrav_entry_size * ctx_pg->entries; mem_size = ctx->mrav_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2); rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2);
if (rc) if (rc)
return rc; return rc;
ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV; ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
if (ctx->mrav_num_entries_units)
ctx_pg->entries =
((num_mr / ctx->mrav_num_entries_units) << 16) |
(num_ah / ctx->mrav_num_entries_units);
ctx_pg = &ctx->tim_mem; ctx_pg = &ctx->tim_mem;
ctx_pg->entries = ctx->qp_mem.entries; ctx_pg->entries = ctx->qp_mem.entries;
...@@ -6508,6 +6557,10 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) ...@@ -6508,6 +6557,10 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->flags |= BNXT_FLAG_ROCEV1_CAP; bp->flags |= BNXT_FLAG_ROCEV1_CAP;
if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED) if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
bp->flags |= BNXT_FLAG_ROCEV2_CAP; bp->flags |= BNXT_FLAG_ROCEV2_CAP;
if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
bp->tx_push_thresh = 0; bp->tx_push_thresh = 0;
if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)
...@@ -6580,6 +6633,34 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp) ...@@ -6580,6 +6633,34 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
return 0; return 0;
} }
static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
{
struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
int rc = 0;
u32 flags;
if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
return 0;
resp = bp->hwrm_cmd_resp_addr;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, -1, -1);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
goto hwrm_cfa_adv_qcaps_exit;
flags = le32_to_cpu(resp->flags);
if (flags &
CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX;
hwrm_cfa_adv_qcaps_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
static int bnxt_hwrm_func_reset(struct bnxt *bp) static int bnxt_hwrm_func_reset(struct bnxt *bp)
{ {
struct hwrm_func_reset_input req = {0}; struct hwrm_func_reset_input req = {0};
...@@ -6671,6 +6752,15 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp) ...@@ -6671,6 +6752,15 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b, resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b,
resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b); resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b);
if (strlen(resp->active_pkg_name)) {
int fw_ver_len = strlen(bp->fw_ver_str);
snprintf(bp->fw_ver_str + fw_ver_len,
FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
resp->active_pkg_name);
bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
}
bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout); bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
if (!bp->hwrm_cmd_timeout) if (!bp->hwrm_cmd_timeout)
bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
...@@ -6703,6 +6793,10 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp) ...@@ -6703,6 +6793,10 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED) VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF; bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
if (dev_caps_cfg &
VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
hwrm_ver_get_exit: hwrm_ver_get_exit:
mutex_unlock(&bp->hwrm_cmd_lock); mutex_unlock(&bp->hwrm_cmd_lock);
return rc; return rc;
...@@ -6808,6 +6902,19 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp) ...@@ -6808,6 +6902,19 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
return rc; return rc;
} }
static int bnxt_hwrm_pcie_qstats(struct bnxt *bp)
{
struct hwrm_pcie_qstats_input req = {0};
if (!(bp->flags & BNXT_FLAG_PCIE_STATS))
return 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1);
req.pcie_stat_size = cpu_to_le16(sizeof(struct pcie_ctx_hw_stats));
req.pcie_stat_host_addr = cpu_to_le64(bp->hw_pcie_stats_map);
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
{ {
if (bp->vxlan_port_cnt) { if (bp->vxlan_port_cnt) {
...@@ -8655,7 +8762,7 @@ static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg, ...@@ -8655,7 +8762,7 @@ static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
req.port_id = cpu_to_le16(bp->pf.port_id); req.port_id = cpu_to_le16(bp->pf.port_id);
req.phy_addr = phy_addr; req.phy_addr = phy_addr;
req.reg_addr = cpu_to_le16(reg & 0x1f); req.reg_addr = cpu_to_le16(reg & 0x1f);
if (bp->link_info.support_speeds & BNXT_LINK_SPEED_MSK_10GB) { if (mdio_phy_id_is_c45(phy_addr)) {
req.cl45_mdio = 1; req.cl45_mdio = 1;
req.phy_addr = mdio_phy_id_prtad(phy_addr); req.phy_addr = mdio_phy_id_prtad(phy_addr);
req.dev_addr = mdio_phy_id_devad(phy_addr); req.dev_addr = mdio_phy_id_devad(phy_addr);
...@@ -8682,7 +8789,7 @@ static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg, ...@@ -8682,7 +8789,7 @@ static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
req.port_id = cpu_to_le16(bp->pf.port_id); req.port_id = cpu_to_le16(bp->pf.port_id);
req.phy_addr = phy_addr; req.phy_addr = phy_addr;
req.reg_addr = cpu_to_le16(reg & 0x1f); req.reg_addr = cpu_to_le16(reg & 0x1f);
if (bp->link_info.support_speeds & BNXT_LINK_SPEED_MSK_10GB) { if (mdio_phy_id_is_c45(phy_addr)) {
req.cl45_mdio = 1; req.cl45_mdio = 1;
req.phy_addr = mdio_phy_id_prtad(phy_addr); req.phy_addr = mdio_phy_id_prtad(phy_addr);
req.dev_addr = mdio_phy_id_devad(phy_addr); req.dev_addr = mdio_phy_id_devad(phy_addr);
...@@ -9000,8 +9107,11 @@ static bool bnxt_can_reserve_rings(struct bnxt *bp) ...@@ -9000,8 +9107,11 @@ static bool bnxt_can_reserve_rings(struct bnxt *bp)
/* If the chip and firmware supports RFS */ /* If the chip and firmware supports RFS */
static bool bnxt_rfs_supported(struct bnxt *bp) static bool bnxt_rfs_supported(struct bnxt *bp)
{ {
if (bp->flags & BNXT_FLAG_CHIP_P5) if (bp->flags & BNXT_FLAG_CHIP_P5) {
if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX)
return true;
return false; return false;
}
if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
return true; return true;
if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
...@@ -9016,7 +9126,7 @@ static bool bnxt_rfs_capable(struct bnxt *bp) ...@@ -9016,7 +9126,7 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
int vnics, max_vnics, max_rss_ctxs; int vnics, max_vnics, max_rss_ctxs;
if (bp->flags & BNXT_FLAG_CHIP_P5) if (bp->flags & BNXT_FLAG_CHIP_P5)
return false; return bnxt_rfs_supported(bp);
if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp)) if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
return false; return false;
...@@ -9398,6 +9508,7 @@ static void bnxt_sp_task(struct work_struct *work) ...@@ -9398,6 +9508,7 @@ static void bnxt_sp_task(struct work_struct *work)
if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) { if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
bnxt_hwrm_port_qstats(bp); bnxt_hwrm_port_qstats(bp);
bnxt_hwrm_port_qstats_ext(bp); bnxt_hwrm_port_qstats_ext(bp);
bnxt_hwrm_pcie_qstats(bp);
} }
if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
...@@ -10601,6 +10712,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -10601,6 +10712,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = -1; rc = -1;
goto init_err_pci_clean; goto init_err_pci_clean;
} }
rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
if (rc)
netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
rc);
rc = bnxt_init_mac_addr(bp); rc = bnxt_init_mac_addr(bp);
if (rc) { if (rc) {
dev_err(&pdev->dev, "Unable to initialize mac address.\n"); dev_err(&pdev->dev, "Unable to initialize mac address.\n");
......
...@@ -1227,6 +1227,7 @@ struct bnxt_ctx_mem_info { ...@@ -1227,6 +1227,7 @@ struct bnxt_ctx_mem_info {
u16 mrav_entry_size; u16 mrav_entry_size;
u16 tim_entry_size; u16 tim_entry_size;
u32 tim_max_entries; u32 tim_max_entries;
u16 mrav_num_entries_units;
u8 tqm_entries_multiple; u8 tqm_entries_multiple;
u32 flags; u32 flags;
...@@ -1354,6 +1355,7 @@ struct bnxt { ...@@ -1354,6 +1355,7 @@ struct bnxt {
#define BNXT_FLAG_DIM 0x2000000 #define BNXT_FLAG_DIM 0x2000000
#define BNXT_FLAG_ROCE_MIRROR_CAP 0x4000000 #define BNXT_FLAG_ROCE_MIRROR_CAP 0x4000000
#define BNXT_FLAG_PORT_STATS_EXT 0x10000000 #define BNXT_FLAG_PORT_STATS_EXT 0x10000000
#define BNXT_FLAG_PCIE_STATS 0x40000000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \ #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
BNXT_FLAG_RFS | \ BNXT_FLAG_RFS | \
...@@ -1480,6 +1482,11 @@ struct bnxt { ...@@ -1480,6 +1482,11 @@ struct bnxt {
#define BNXT_FW_CAP_KONG_MB_CHNL 0x00000080 #define BNXT_FW_CAP_KONG_MB_CHNL 0x00000080
#define BNXT_FW_CAP_OVS_64BIT_HANDLE 0x00000400 #define BNXT_FW_CAP_OVS_64BIT_HANDLE 0x00000400
#define BNXT_FW_CAP_TRUSTED_VF 0x00000800 #define BNXT_FW_CAP_TRUSTED_VF 0x00000800
#define BNXT_FW_CAP_PKG_VER 0x00004000
#define BNXT_FW_CAP_CFA_ADV_FLOW 0x00008000
#define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX 0x00010000
#define BNXT_FW_CAP_PCIE_STATS_SUPPORTED 0x00020000
#define BNXT_FW_CAP_EXT_STATS_SUPPORTED 0x00040000
#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM) #define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
u32 hwrm_spec_code; u32 hwrm_spec_code;
...@@ -1498,10 +1505,12 @@ struct bnxt { ...@@ -1498,10 +1505,12 @@ struct bnxt {
struct tx_port_stats *hw_tx_port_stats; struct tx_port_stats *hw_tx_port_stats;
struct rx_port_stats_ext *hw_rx_port_stats_ext; struct rx_port_stats_ext *hw_rx_port_stats_ext;
struct tx_port_stats_ext *hw_tx_port_stats_ext; struct tx_port_stats_ext *hw_tx_port_stats_ext;
struct pcie_ctx_hw_stats *hw_pcie_stats;
dma_addr_t hw_rx_port_stats_map; dma_addr_t hw_rx_port_stats_map;
dma_addr_t hw_tx_port_stats_map; dma_addr_t hw_tx_port_stats_map;
dma_addr_t hw_rx_port_stats_ext_map; dma_addr_t hw_rx_port_stats_ext_map;
dma_addr_t hw_tx_port_stats_ext_map; dma_addr_t hw_tx_port_stats_ext_map;
dma_addr_t hw_pcie_stats_map;
int hw_port_stats_size; int hw_port_stats_size;
u16 fw_rx_stats_ext_size; u16 fw_rx_stats_ext_size;
u16 fw_tx_stats_ext_size; u16 fw_tx_stats_ext_size;
...@@ -1634,6 +1643,9 @@ struct bnxt { ...@@ -1634,6 +1643,9 @@ struct bnxt {
#define BNXT_TX_STATS_EXT_OFFSET(counter) \ #define BNXT_TX_STATS_EXT_OFFSET(counter) \
(offsetof(struct tx_port_stats_ext, counter) / 8) (offsetof(struct tx_port_stats_ext, counter) / 8)
#define BNXT_PCIE_STATS_OFFSET(counter) \
(offsetof(struct pcie_ctx_hw_stats, counter) / 8)
#define I2C_DEV_ADDR_A0 0xa0 #define I2C_DEV_ADDR_A0 0xa0
#define I2C_DEV_ADDR_A2 0xa2 #define I2C_DEV_ADDR_A2 0xa2
#define SFF_DIAG_SUPPORT_OFFSET 0x5c #define SFF_DIAG_SUPPORT_OFFSET 0x5c
......
...@@ -235,6 +235,9 @@ static int bnxt_set_coalesce(struct net_device *dev, ...@@ -235,6 +235,9 @@ static int bnxt_set_coalesce(struct net_device *dev,
BNXT_TX_STATS_PRI_ENTRY(counter, 6), \ BNXT_TX_STATS_PRI_ENTRY(counter, 6), \
BNXT_TX_STATS_PRI_ENTRY(counter, 7) BNXT_TX_STATS_PRI_ENTRY(counter, 7)
#define BNXT_PCIE_STATS_ENTRY(counter) \
{ BNXT_PCIE_STATS_OFFSET(counter), __stringify(counter) }
enum { enum {
RX_TOTAL_DISCARDS, RX_TOTAL_DISCARDS,
TX_TOTAL_DISCARDS, TX_TOTAL_DISCARDS,
...@@ -345,6 +348,10 @@ static const struct { ...@@ -345,6 +348,10 @@ static const struct {
BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events), BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events),
BNXT_RX_STATS_EXT_COS_ENTRIES, BNXT_RX_STATS_EXT_COS_ENTRIES,
BNXT_RX_STATS_EXT_PFC_ENTRIES, BNXT_RX_STATS_EXT_PFC_ENTRIES,
BNXT_RX_STATS_EXT_ENTRY(rx_bits),
BNXT_RX_STATS_EXT_ENTRY(rx_buffer_passed_threshold),
BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err),
BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits),
}; };
static const struct { static const struct {
...@@ -383,6 +390,24 @@ static const struct { ...@@ -383,6 +390,24 @@ static const struct {
BNXT_TX_STATS_PRI_ENTRIES(tx_packets), BNXT_TX_STATS_PRI_ENTRIES(tx_packets),
}; };
static const struct {
long offset;
char string[ETH_GSTRING_LEN];
} bnxt_pcie_stats_arr[] = {
BNXT_PCIE_STATS_ENTRY(pcie_pl_signal_integrity),
BNXT_PCIE_STATS_ENTRY(pcie_dl_signal_integrity),
BNXT_PCIE_STATS_ENTRY(pcie_tl_signal_integrity),
BNXT_PCIE_STATS_ENTRY(pcie_link_integrity),
BNXT_PCIE_STATS_ENTRY(pcie_tx_traffic_rate),
BNXT_PCIE_STATS_ENTRY(pcie_rx_traffic_rate),
BNXT_PCIE_STATS_ENTRY(pcie_tx_dllp_statistics),
BNXT_PCIE_STATS_ENTRY(pcie_rx_dllp_statistics),
BNXT_PCIE_STATS_ENTRY(pcie_equalization_time),
BNXT_PCIE_STATS_ENTRY(pcie_ltssm_histogram[0]),
BNXT_PCIE_STATS_ENTRY(pcie_ltssm_histogram[2]),
BNXT_PCIE_STATS_ENTRY(pcie_recovery_histogram),
};
#define BNXT_NUM_SW_FUNC_STATS ARRAY_SIZE(bnxt_sw_func_stats) #define BNXT_NUM_SW_FUNC_STATS ARRAY_SIZE(bnxt_sw_func_stats)
#define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr) #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
#define BNXT_NUM_STATS_PRI \ #define BNXT_NUM_STATS_PRI \
...@@ -390,6 +415,7 @@ static const struct { ...@@ -390,6 +415,7 @@ static const struct {
ARRAY_SIZE(bnxt_rx_pkts_pri_arr) + \ ARRAY_SIZE(bnxt_rx_pkts_pri_arr) + \
ARRAY_SIZE(bnxt_tx_bytes_pri_arr) + \ ARRAY_SIZE(bnxt_tx_bytes_pri_arr) + \
ARRAY_SIZE(bnxt_tx_pkts_pri_arr)) ARRAY_SIZE(bnxt_tx_pkts_pri_arr))
#define BNXT_NUM_PCIE_STATS ARRAY_SIZE(bnxt_pcie_stats_arr)
static int bnxt_get_num_stats(struct bnxt *bp) static int bnxt_get_num_stats(struct bnxt *bp)
{ {
...@@ -407,6 +433,9 @@ static int bnxt_get_num_stats(struct bnxt *bp) ...@@ -407,6 +433,9 @@ static int bnxt_get_num_stats(struct bnxt *bp)
num_stats += BNXT_NUM_STATS_PRI; num_stats += BNXT_NUM_STATS_PRI;
} }
if (bp->flags & BNXT_FLAG_PCIE_STATS)
num_stats += BNXT_NUM_PCIE_STATS;
return num_stats; return num_stats;
} }
...@@ -509,6 +538,14 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, ...@@ -509,6 +538,14 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
} }
} }
} }
if (bp->flags & BNXT_FLAG_PCIE_STATS) {
__le64 *pcie_stats = (__le64 *)bp->hw_pcie_stats;
for (i = 0; i < BNXT_NUM_PCIE_STATS; i++, j++) {
buf[j] = le64_to_cpu(*(pcie_stats +
bnxt_pcie_stats_arr[i].offset));
}
}
} }
static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf) static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
...@@ -609,6 +646,12 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf) ...@@ -609,6 +646,12 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
} }
} }
} }
if (bp->flags & BNXT_FLAG_PCIE_STATS) {
for (i = 0; i < BNXT_NUM_PCIE_STATS; i++) {
strcpy(buf, bnxt_pcie_stats_arr[i].string);
buf += ETH_GSTRING_LEN;
}
}
break; break;
case ETH_SS_TEST: case ETH_SS_TEST:
if (bp->num_tests) if (bp->num_tests)
...@@ -3262,6 +3305,7 @@ void bnxt_ethtool_init(struct bnxt *bp) ...@@ -3262,6 +3305,7 @@ void bnxt_ethtool_init(struct bnxt *bp)
struct net_device *dev = bp->dev; struct net_device *dev = bp->dev;
int i, rc; int i, rc;
if (!(bp->fw_cap & BNXT_FW_CAP_PKG_VER))
bnxt_get_pkgver(dev); bnxt_get_pkgver(dev);
if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp)) if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp))
......
...@@ -89,7 +89,10 @@ struct hwrm_short_input { ...@@ -89,7 +89,10 @@ struct hwrm_short_input {
__le16 signature; __le16 signature;
#define SHORT_REQ_SIGNATURE_SHORT_CMD 0x4321UL #define SHORT_REQ_SIGNATURE_SHORT_CMD 0x4321UL
#define SHORT_REQ_SIGNATURE_LAST SHORT_REQ_SIGNATURE_SHORT_CMD #define SHORT_REQ_SIGNATURE_LAST SHORT_REQ_SIGNATURE_SHORT_CMD
__le16 unused_0; __le16 target_id;
#define SHORT_REQ_TARGET_ID_DEFAULT 0x0UL
#define SHORT_REQ_TARGET_ID_TOOLS 0xfffdUL
#define SHORT_REQ_TARGET_ID_LAST SHORT_REQ_TARGET_ID_TOOLS
__le16 size; __le16 size;
__le64 req_addr; __le64 req_addr;
}; };
...@@ -211,6 +214,7 @@ struct cmd_nums { ...@@ -211,6 +214,7 @@ struct cmd_nums {
#define HWRM_FWD_RESP 0xd2UL #define HWRM_FWD_RESP 0xd2UL
#define HWRM_FWD_ASYNC_EVENT_CMPL 0xd3UL #define HWRM_FWD_ASYNC_EVENT_CMPL 0xd3UL
#define HWRM_OEM_CMD 0xd4UL #define HWRM_OEM_CMD 0xd4UL
#define HWRM_PORT_PRBS_TEST 0xd5UL
#define HWRM_TEMP_MONITOR_QUERY 0xe0UL #define HWRM_TEMP_MONITOR_QUERY 0xe0UL
#define HWRM_WOL_FILTER_ALLOC 0xf0UL #define HWRM_WOL_FILTER_ALLOC 0xf0UL
#define HWRM_WOL_FILTER_FREE 0xf1UL #define HWRM_WOL_FILTER_FREE 0xf1UL
...@@ -262,6 +266,7 @@ struct cmd_nums { ...@@ -262,6 +266,7 @@ struct cmd_nums {
#define HWRM_CFA_EEM_QCFG 0x122UL #define HWRM_CFA_EEM_QCFG 0x122UL
#define HWRM_CFA_EEM_OP 0x123UL #define HWRM_CFA_EEM_OP 0x123UL
#define HWRM_CFA_ADV_FLOW_MGNT_QCAPS 0x124UL #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS 0x124UL
#define HWRM_CFA_TFLIB 0x125UL
#define HWRM_ENGINE_CKV_HELLO 0x12dUL #define HWRM_ENGINE_CKV_HELLO 0x12dUL
#define HWRM_ENGINE_CKV_STATUS 0x12eUL #define HWRM_ENGINE_CKV_STATUS 0x12eUL
#define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL #define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL
...@@ -272,6 +277,7 @@ struct cmd_nums { ...@@ -272,6 +277,7 @@ struct cmd_nums {
#define HWRM_ENGINE_CKV_RNG_GET 0x134UL #define HWRM_ENGINE_CKV_RNG_GET 0x134UL
#define HWRM_ENGINE_CKV_KEY_GEN 0x135UL #define HWRM_ENGINE_CKV_KEY_GEN 0x135UL
#define HWRM_ENGINE_CKV_KEY_LABEL_CFG 0x136UL #define HWRM_ENGINE_CKV_KEY_LABEL_CFG 0x136UL
#define HWRM_ENGINE_CKV_KEY_LABEL_QCFG 0x137UL
#define HWRM_ENGINE_QG_CONFIG_QUERY 0x13cUL #define HWRM_ENGINE_QG_CONFIG_QUERY 0x13cUL
#define HWRM_ENGINE_QG_QUERY 0x13dUL #define HWRM_ENGINE_QG_QUERY 0x13dUL
#define HWRM_ENGINE_QG_METER_PROFILE_CONFIG_QUERY 0x13eUL #define HWRM_ENGINE_QG_METER_PROFILE_CONFIG_QUERY 0x13eUL
...@@ -312,6 +318,11 @@ struct cmd_nums { ...@@ -312,6 +318,11 @@ struct cmd_nums {
#define HWRM_SELFTEST_IRQ 0x202UL #define HWRM_SELFTEST_IRQ 0x202UL
#define HWRM_SELFTEST_RETRIEVE_SERDES_DATA 0x203UL #define HWRM_SELFTEST_RETRIEVE_SERDES_DATA 0x203UL
#define HWRM_PCIE_QSTATS 0x204UL #define HWRM_PCIE_QSTATS 0x204UL
#define HWRM_MFG_FRU_WRITE_CONTROL 0x205UL
#define HWRM_MFG_TIMERS_QUERY 0x206UL
#define HWRM_MFG_OTP_CFG 0x207UL
#define HWRM_MFG_OTP_QCFG 0x208UL
#define HWRM_MFG_HDMA_TEST 0x209UL
#define HWRM_DBG_READ_DIRECT 0xff10UL #define HWRM_DBG_READ_DIRECT 0xff10UL
#define HWRM_DBG_READ_INDIRECT 0xff11UL #define HWRM_DBG_READ_INDIRECT 0xff11UL
#define HWRM_DBG_WRITE_DIRECT 0xff12UL #define HWRM_DBG_WRITE_DIRECT 0xff12UL
...@@ -325,6 +336,8 @@ struct cmd_nums { ...@@ -325,6 +336,8 @@ struct cmd_nums {
#define HWRM_DBG_FW_CLI 0xff1aUL #define HWRM_DBG_FW_CLI 0xff1aUL
#define HWRM_DBG_I2C_CMD 0xff1bUL #define HWRM_DBG_I2C_CMD 0xff1bUL
#define HWRM_DBG_RING_INFO_GET 0xff1cUL #define HWRM_DBG_RING_INFO_GET 0xff1cUL
#define HWRM_DBG_CRASHDUMP_HEADER 0xff1dUL
#define HWRM_DBG_CRASHDUMP_ERASE 0xff1eUL
#define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL #define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL
#define HWRM_NVM_VALIDATE_OPTION 0xffefUL #define HWRM_NVM_VALIDATE_OPTION 0xffefUL
#define HWRM_NVM_FLUSH 0xfff0UL #define HWRM_NVM_FLUSH 0xfff0UL
...@@ -362,6 +375,9 @@ struct ret_codes { ...@@ -362,6 +375,9 @@ struct ret_codes {
#define HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR 0x9UL #define HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR 0x9UL
#define HWRM_ERR_CODE_HOT_RESET_PROGRESS 0xaUL #define HWRM_ERR_CODE_HOT_RESET_PROGRESS 0xaUL
#define HWRM_ERR_CODE_HOT_RESET_FAIL 0xbUL #define HWRM_ERR_CODE_HOT_RESET_FAIL 0xbUL
#define HWRM_ERR_CODE_NO_FLOW_COUNTER_DURING_ALLOC 0xcUL
#define HWRM_ERR_CODE_KEY_HASH_COLLISION 0xdUL
#define HWRM_ERR_CODE_KEY_ALREADY_EXISTS 0xeUL
#define HWRM_ERR_CODE_HWRM_ERROR 0xfUL #define HWRM_ERR_CODE_HWRM_ERROR 0xfUL
#define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL #define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL
#define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL #define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
...@@ -387,11 +403,15 @@ struct hwrm_err_output { ...@@ -387,11 +403,15 @@ struct hwrm_err_output {
#define HW_HASH_INDEX_SIZE 0x80 #define HW_HASH_INDEX_SIZE 0x80
#define HW_HASH_KEY_SIZE 40 #define HW_HASH_KEY_SIZE 40
#define HWRM_RESP_VALID_KEY 1 #define HWRM_RESP_VALID_KEY 1
#define HWRM_TARGET_ID_BONO 0xFFF8
#define HWRM_TARGET_ID_KONG 0xFFF9
#define HWRM_TARGET_ID_APE 0xFFFA
#define HWRM_TARGET_ID_TOOLS 0xFFFD
#define HWRM_VERSION_MAJOR 1 #define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10 #define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 0 #define HWRM_VERSION_UPDATE 0
#define HWRM_VERSION_RSVD 47 #define HWRM_VERSION_RSVD 69
#define HWRM_VERSION_STR "1.10.0.47" #define HWRM_VERSION_STR "1.10.0.69"
/* hwrm_ver_get_input (size:192b/24B) */ /* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input { struct hwrm_ver_get_input {
...@@ -442,6 +462,7 @@ struct hwrm_ver_get_output { ...@@ -442,6 +462,7 @@ struct hwrm_ver_get_output {
#define VER_GET_RESP_DEV_CAPS_CFG_ADV_FLOW_COUNTERS_SUPPORTED 0x400UL #define VER_GET_RESP_DEV_CAPS_CFG_ADV_FLOW_COUNTERS_SUPPORTED 0x400UL
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_EEM_SUPPORTED 0x800UL #define VER_GET_RESP_DEV_CAPS_CFG_CFA_EEM_SUPPORTED 0x800UL
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED 0x1000UL #define VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED 0x1000UL
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_TFLIB_SUPPORTED 0x2000UL
u8 roce_fw_maj_8b; u8 roce_fw_maj_8b;
u8 roce_fw_min_8b; u8 roce_fw_min_8b;
u8 roce_fw_bld_8b; u8 roce_fw_bld_8b;
...@@ -449,7 +470,7 @@ struct hwrm_ver_get_output { ...@@ -449,7 +470,7 @@ struct hwrm_ver_get_output {
char hwrm_fw_name[16]; char hwrm_fw_name[16];
char mgmt_fw_name[16]; char mgmt_fw_name[16];
char netctrl_fw_name[16]; char netctrl_fw_name[16];
u8 reserved2[16]; char active_pkg_name[16];
char roce_fw_name[16]; char roce_fw_name[16];
__le16 chip_num; __le16 chip_num;
u8 chip_rev; u8 chip_rev;
...@@ -1047,6 +1068,7 @@ struct hwrm_func_qcaps_output { ...@@ -1047,6 +1068,7 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_DYNAMIC_TX_RING_ALLOC 0x200000UL #define FUNC_QCAPS_RESP_FLAGS_DYNAMIC_TX_RING_ALLOC 0x200000UL
#define FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE 0x400000UL #define FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE 0x400000UL
#define FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE 0x800000UL #define FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE 0x800000UL
#define FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED 0x1000000UL
u8 mac_address[6]; u8 mac_address[6];
__le16 max_rsscos_ctx; __le16 max_rsscos_ctx;
__le16 max_cmpl_rings; __le16 max_cmpl_rings;
...@@ -1715,7 +1737,7 @@ struct hwrm_func_backing_store_qcaps_output { ...@@ -1715,7 +1737,7 @@ struct hwrm_func_backing_store_qcaps_output {
__le16 mrav_entry_size; __le16 mrav_entry_size;
__le16 tim_entry_size; __le16 tim_entry_size;
__le32 tim_max_entries; __le32 tim_max_entries;
u8 unused_0[2]; __le16 mrav_num_entries_units;
u8 tqm_entries_multiple; u8 tqm_entries_multiple;
u8 valid; u8 valid;
}; };
...@@ -1729,6 +1751,7 @@ struct hwrm_func_backing_store_cfg_input { ...@@ -1729,6 +1751,7 @@ struct hwrm_func_backing_store_cfg_input {
__le64 resp_addr; __le64 resp_addr;
__le32 flags; __le32 flags;
#define FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE 0x1UL #define FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE 0x1UL
#define FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT 0x2UL
__le32 enables; __le32 enables;
#define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP 0x1UL #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP 0x1UL
#define FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ 0x2UL #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ 0x2UL
...@@ -2580,7 +2603,7 @@ struct hwrm_port_phy_qcfg_output { ...@@ -2580,7 +2603,7 @@ struct hwrm_port_phy_qcfg_output {
u8 valid; u8 valid;
}; };
/* hwrm_port_mac_cfg_input (size:320b/40B) */ /* hwrm_port_mac_cfg_input (size:384b/48B) */
struct hwrm_port_mac_cfg_input { struct hwrm_port_mac_cfg_input {
__le16 req_type; __le16 req_type;
__le16 cmpl_ring; __le16 cmpl_ring;
...@@ -2601,6 +2624,7 @@ struct hwrm_port_mac_cfg_input { ...@@ -2601,6 +2624,7 @@ struct hwrm_port_mac_cfg_input {
#define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE 0x400UL #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE 0x400UL
#define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL
#define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL
#define PORT_MAC_CFG_REQ_FLAGS_PTP_ONE_STEP_TX_TS 0x2000UL
__le32 enables; __le32 enables;
#define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
#define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
...@@ -2610,6 +2634,7 @@ struct hwrm_port_mac_cfg_input { ...@@ -2610,6 +2634,7 @@ struct hwrm_port_mac_cfg_input {
#define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL #define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL
#define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL #define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL
#define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL #define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL
#define PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB 0x200UL
__le16 port_id; __le16 port_id;
u8 ipg; u8 ipg;
u8 lpbk; u8 lpbk;
...@@ -2642,6 +2667,8 @@ struct hwrm_port_mac_cfg_input { ...@@ -2642,6 +2667,8 @@ struct hwrm_port_mac_cfg_input {
#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL
#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5 #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5
u8 unused_0[3]; u8 unused_0[3];
__s32 ptp_freq_adj_ppb;
u8 unused_1[4];
}; };
/* hwrm_port_mac_cfg_output (size:128b/16B) */ /* hwrm_port_mac_cfg_output (size:128b/16B) */
...@@ -2682,6 +2709,7 @@ struct hwrm_port_mac_ptp_qcfg_output { ...@@ -2682,6 +2709,7 @@ struct hwrm_port_mac_ptp_qcfg_output {
u8 flags; u8 flags;
#define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL
#define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x2UL #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x2UL
#define PORT_MAC_PTP_QCFG_RESP_FLAGS_ONE_STEP_TX_TS 0x4UL
u8 unused_0[3]; u8 unused_0[3];
__le32 rx_ts_reg_off_lower; __le32 rx_ts_reg_off_lower;
__le32 rx_ts_reg_off_upper; __le32 rx_ts_reg_off_upper;
...@@ -2888,7 +2916,7 @@ struct tx_port_stats_ext { ...@@ -2888,7 +2916,7 @@ struct tx_port_stats_ext {
__le64 pfc_pri7_tx_transitions; __le64 pfc_pri7_tx_transitions;
}; };
/* rx_port_stats_ext (size:2368b/296B) */ /* rx_port_stats_ext (size:2624b/328B) */
struct rx_port_stats_ext { struct rx_port_stats_ext {
__le64 link_down_events; __le64 link_down_events;
__le64 continuous_pause_events; __le64 continuous_pause_events;
...@@ -2927,6 +2955,10 @@ struct rx_port_stats_ext { ...@@ -2927,6 +2955,10 @@ struct rx_port_stats_ext {
__le64 pfc_pri6_rx_transitions; __le64 pfc_pri6_rx_transitions;
__le64 pfc_pri7_rx_duration_us; __le64 pfc_pri7_rx_duration_us;
__le64 pfc_pri7_rx_transitions; __le64 pfc_pri7_rx_transitions;
__le64 rx_bits;
__le64 rx_buffer_passed_threshold;
__le64 rx_pcs_symbol_err;
__le64 rx_corrected_bits;
}; };
/* hwrm_port_qstats_ext_input (size:320b/40B) */ /* hwrm_port_qstats_ext_input (size:320b/40B) */
...@@ -3029,6 +3061,35 @@ struct hwrm_port_lpbk_clr_stats_output { ...@@ -3029,6 +3061,35 @@ struct hwrm_port_lpbk_clr_stats_output {
u8 valid; u8 valid;
}; };
/* hwrm_port_ts_query_input (size:192b/24B) */
struct hwrm_port_ts_query_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
__le32 flags;
#define PORT_TS_QUERY_REQ_FLAGS_PATH 0x1UL
#define PORT_TS_QUERY_REQ_FLAGS_PATH_TX 0x0UL
#define PORT_TS_QUERY_REQ_FLAGS_PATH_RX 0x1UL
#define PORT_TS_QUERY_REQ_FLAGS_PATH_LAST PORT_TS_QUERY_REQ_FLAGS_PATH_RX
#define PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME 0x2UL
__le16 port_id;
u8 unused_0[2];
};
/* hwrm_port_ts_query_output (size:192b/24B) */
struct hwrm_port_ts_query_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
__le64 ptp_msg_ts;
__le16 ptp_msg_seqid;
u8 unused_0[5];
u8 valid;
};
/* hwrm_port_phy_qcaps_input (size:192b/24B) */ /* hwrm_port_phy_qcaps_input (size:192b/24B) */
struct hwrm_port_phy_qcaps_input { struct hwrm_port_phy_qcaps_input {
__le16 req_type; __le16 req_type;
...@@ -4703,7 +4764,8 @@ struct hwrm_vnic_qcaps_output { ...@@ -4703,7 +4764,8 @@ struct hwrm_vnic_qcaps_output {
#define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL
#define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL
#define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL
u8 unused_1[7]; __le16 max_aggs_supported;
u8 unused_1[5];
u8 valid; u8 valid;
}; };
...@@ -4723,6 +4785,7 @@ struct hwrm_vnic_tpa_cfg_input { ...@@ -4723,6 +4785,7 @@ struct hwrm_vnic_tpa_cfg_input {
#define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL
#define VNIC_TPA_CFG_REQ_FLAGS_GRO_IPID_CHECK 0x40UL #define VNIC_TPA_CFG_REQ_FLAGS_GRO_IPID_CHECK 0x40UL
#define VNIC_TPA_CFG_REQ_FLAGS_GRO_TTL_CHECK 0x80UL #define VNIC_TPA_CFG_REQ_FLAGS_GRO_TTL_CHECK 0x80UL
#define VNIC_TPA_CFG_REQ_FLAGS_AGG_PACK_AS_GRO 0x100UL
__le32 enables; __le32 enables;
#define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS 0x1UL #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS 0x1UL
#define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS 0x2UL #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS 0x2UL
...@@ -5254,6 +5317,8 @@ struct hwrm_cfa_l2_filter_alloc_input { ...@@ -5254,6 +5317,8 @@ struct hwrm_cfa_l2_filter_alloc_input {
#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 4) #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 4)
#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 4) #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 4)
#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE
#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_XDP_DISABLE 0x40UL
#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_SOURCE_VALID 0x80UL
__le32 enables; __le32 enables;
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x1UL #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x1UL
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK 0x2UL #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK 0x2UL
...@@ -5272,8 +5337,11 @@ struct hwrm_cfa_l2_filter_alloc_input { ...@@ -5272,8 +5337,11 @@ struct hwrm_cfa_l2_filter_alloc_input {
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4000UL #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4000UL
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS 0x20000UL
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_NUM_VLANS 0x40000UL
u8 l2_addr[6]; u8 l2_addr[6];
u8 unused_0[2]; u8 num_vlans;
u8 t_num_vlans;
u8 l2_addr_mask[6]; u8 l2_addr_mask[6];
__le16 l2_ovlan; __le16 l2_ovlan;
__le16 l2_ovlan_mask; __le16 l2_ovlan_mask;
...@@ -5338,6 +5406,16 @@ struct hwrm_cfa_l2_filter_alloc_output { ...@@ -5338,6 +5406,16 @@ struct hwrm_cfa_l2_filter_alloc_output {
__le16 resp_len; __le16 resp_len;
__le64 l2_filter_id; __le64 l2_filter_id;
__le32 flow_id; __le32 flow_id;
#define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
#define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
#define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
#define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
#define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
#define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT
#define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
#define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
#define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
#define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX
u8 unused_0[3]; u8 unused_0[3];
u8 valid; u8 valid;
}; };
...@@ -5504,6 +5582,16 @@ struct hwrm_cfa_tunnel_filter_alloc_output { ...@@ -5504,6 +5582,16 @@ struct hwrm_cfa_tunnel_filter_alloc_output {
__le16 resp_len; __le16 resp_len;
__le64 tunnel_filter_id; __le64 tunnel_filter_id;
__le32 flow_id; __le32 flow_id;
#define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
#define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
#define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
#define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
#define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
#define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT
#define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
#define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
#define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
#define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX
u8 unused_0[3]; u8 unused_0[3];
u8 valid; u8 valid;
}; };
...@@ -5646,7 +5734,7 @@ struct hwrm_cfa_encap_record_free_output { ...@@ -5646,7 +5734,7 @@ struct hwrm_cfa_encap_record_free_output {
u8 valid; u8 valid;
}; };
/* hwrm_cfa_ntuple_filter_alloc_input (size:1024b/128B) */ /* hwrm_cfa_ntuple_filter_alloc_input (size:1088b/136B) */
struct hwrm_cfa_ntuple_filter_alloc_input { struct hwrm_cfa_ntuple_filter_alloc_input {
__le16 req_type; __le16 req_type;
__le16 cmpl_ring; __le16 cmpl_ring;
...@@ -5678,6 +5766,7 @@ struct hwrm_cfa_ntuple_filter_alloc_input { ...@@ -5678,6 +5766,7 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x10000UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x10000UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x20000UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x20000UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x40000UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x40000UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX 0x80000UL
__le64 l2_filter_id; __le64 l2_filter_id;
u8 src_macaddr[6]; u8 src_macaddr[6];
__be16 ethertype; __be16 ethertype;
...@@ -5725,6 +5814,8 @@ struct hwrm_cfa_ntuple_filter_alloc_input { ...@@ -5725,6 +5814,8 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
__be16 dst_port; __be16 dst_port;
__be16 dst_port_mask; __be16 dst_port_mask;
__le64 ntuple_filter_id_hint; __le64 ntuple_filter_id_hint;
__le16 rfs_ring_tbl_idx;
u8 unused_0[6];
}; };
/* hwrm_cfa_ntuple_filter_alloc_output (size:192b/24B) */ /* hwrm_cfa_ntuple_filter_alloc_output (size:192b/24B) */
...@@ -5735,6 +5826,16 @@ struct hwrm_cfa_ntuple_filter_alloc_output { ...@@ -5735,6 +5826,16 @@ struct hwrm_cfa_ntuple_filter_alloc_output {
__le16 resp_len; __le16 resp_len;
__le64 ntuple_filter_id; __le64 ntuple_filter_id;
__le32 flow_id; __le32 flow_id;
#define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
#define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
#define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
#define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
#define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
#define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT
#define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
#define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
#define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
#define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX
u8 unused_0[3]; u8 unused_0[3];
u8 valid; u8 valid;
}; };
...@@ -5947,6 +6048,7 @@ struct hwrm_cfa_flow_alloc_input { ...@@ -5947,6 +6048,7 @@ struct hwrm_cfa_flow_alloc_input {
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL_IP 0x400UL #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL_IP 0x400UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FLOW_AGING_ENABLED 0x800UL #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FLOW_AGING_ENABLED 0x800UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_PRI_HINT 0x1000UL #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_PRI_HINT 0x1000UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NO_FLOW_COUNTER_ALLOC 0x2000UL
__le16 dst_fid; __le16 dst_fid;
__be16 l2_rewrite_vlan_tpid; __be16 l2_rewrite_vlan_tpid;
__be16 l2_rewrite_vlan_tci; __be16 l2_rewrite_vlan_tci;
...@@ -5997,6 +6099,16 @@ struct hwrm_cfa_flow_alloc_output { ...@@ -5997,6 +6099,16 @@ struct hwrm_cfa_flow_alloc_output {
__le16 flow_handle; __le16 flow_handle;
u8 unused_0[2]; u8 unused_0[2];
__le32 flow_id; __le32 flow_id;
#define CFA_FLOW_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
#define CFA_FLOW_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
#define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
#define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
#define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
#define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_EXT
#define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
#define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
#define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
#define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_TX
__le64 ext_flow_handle; __le64 ext_flow_handle;
__le32 flow_counter_id; __le32 flow_counter_id;
u8 unused_1[3]; u8 unused_1[3];
...@@ -6011,7 +6123,8 @@ struct hwrm_cfa_flow_free_input { ...@@ -6011,7 +6123,8 @@ struct hwrm_cfa_flow_free_input {
__le16 target_id; __le16 target_id;
__le64 resp_addr; __le64 resp_addr;
__le16 flow_handle; __le16 flow_handle;
u8 unused_0[6]; __le16 unused_0;
__le32 flow_counter_id;
__le64 ext_flow_handle; __le64 ext_flow_handle;
}; };
...@@ -6201,6 +6314,8 @@ struct hwrm_cfa_eem_qcaps_output { ...@@ -6201,6 +6314,8 @@ struct hwrm_cfa_eem_qcaps_output {
__le32 flags; __le32 flags;
#define CFA_EEM_QCAPS_RESP_FLAGS_PATH_TX 0x1UL #define CFA_EEM_QCAPS_RESP_FLAGS_PATH_TX 0x1UL
#define CFA_EEM_QCAPS_RESP_FLAGS_PATH_RX 0x2UL #define CFA_EEM_QCAPS_RESP_FLAGS_PATH_RX 0x2UL
#define CFA_EEM_QCAPS_RESP_FLAGS_CENTRALIZED_MEMORY_MODEL_SUPPORTED 0x4UL
#define CFA_EEM_QCAPS_RESP_FLAGS_DETACHED_CENTRALIZED_MEMORY_MODEL_SUPPORTED 0x8UL
__le32 unused_0; __le32 unused_0;
__le32 supported; __le32 supported;
#define CFA_EEM_QCAPS_RESP_SUPPORTED_KEY0_TABLE 0x1UL #define CFA_EEM_QCAPS_RESP_SUPPORTED_KEY0_TABLE 0x1UL
...@@ -6226,7 +6341,9 @@ struct hwrm_cfa_eem_cfg_input { ...@@ -6226,7 +6341,9 @@ struct hwrm_cfa_eem_cfg_input {
#define CFA_EEM_CFG_REQ_FLAGS_PATH_TX 0x1UL #define CFA_EEM_CFG_REQ_FLAGS_PATH_TX 0x1UL
#define CFA_EEM_CFG_REQ_FLAGS_PATH_RX 0x2UL #define CFA_EEM_CFG_REQ_FLAGS_PATH_RX 0x2UL
#define CFA_EEM_CFG_REQ_FLAGS_PREFERRED_OFFLOAD 0x4UL #define CFA_EEM_CFG_REQ_FLAGS_PREFERRED_OFFLOAD 0x4UL
__le32 unused_0; #define CFA_EEM_CFG_REQ_FLAGS_SECONDARY_PF 0x8UL
__le16 group_id;
__le16 unused_0;
__le32 num_entries; __le32 num_entries;
__le32 unused_1; __le32 unused_1;
__le16 key0_ctx_id; __le16 key0_ctx_id;
...@@ -6258,7 +6375,7 @@ struct hwrm_cfa_eem_qcfg_input { ...@@ -6258,7 +6375,7 @@ struct hwrm_cfa_eem_qcfg_input {
__le32 unused_0; __le32 unused_0;
}; };
/* hwrm_cfa_eem_qcfg_output (size:128b/16B) */ /* hwrm_cfa_eem_qcfg_output (size:192b/24B) */
struct hwrm_cfa_eem_qcfg_output { struct hwrm_cfa_eem_qcfg_output {
__le16 error_code; __le16 error_code;
__le16 req_type; __le16 req_type;
...@@ -6269,6 +6386,8 @@ struct hwrm_cfa_eem_qcfg_output { ...@@ -6269,6 +6386,8 @@ struct hwrm_cfa_eem_qcfg_output {
#define CFA_EEM_QCFG_RESP_FLAGS_PATH_RX 0x2UL #define CFA_EEM_QCFG_RESP_FLAGS_PATH_RX 0x2UL
#define CFA_EEM_QCFG_RESP_FLAGS_PREFERRED_OFFLOAD 0x4UL #define CFA_EEM_QCFG_RESP_FLAGS_PREFERRED_OFFLOAD 0x4UL
__le32 num_entries; __le32 num_entries;
u8 unused_0[7];
u8 valid;
}; };
/* hwrm_cfa_eem_op_input (size:192b/24B) */ /* hwrm_cfa_eem_op_input (size:192b/24B) */
...@@ -6300,6 +6419,39 @@ struct hwrm_cfa_eem_op_output { ...@@ -6300,6 +6419,39 @@ struct hwrm_cfa_eem_op_output {
u8 valid; u8 valid;
}; };
/* hwrm_cfa_adv_flow_mgnt_qcaps_input (size:256b/32B) */
struct hwrm_cfa_adv_flow_mgnt_qcaps_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
__le32 unused_0[4];
};
/* hwrm_cfa_adv_flow_mgnt_qcaps_output (size:128b/16B) */
struct hwrm_cfa_adv_flow_mgnt_qcaps_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
__le32 flags;
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_16BIT_SUPPORTED 0x1UL
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_64BIT_SUPPORTED 0x2UL
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_BATCH_DELETE_SUPPORTED 0x4UL
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_RESET_ALL_SUPPORTED 0x8UL
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_DEST_FUNC_SUPPORTED 0x10UL
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TX_EEM_FLOW_SUPPORTED 0x20UL
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RX_EEM_FLOW_SUPPORTED 0x40UL
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_COUNTER_ALLOC_SUPPORTED 0x80UL
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED 0x100UL
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_UNTAGGED_VLAN_SUPPORTED 0x200UL
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_XDP_SUPPORTED 0x400UL
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_HEADER_SOURCE_FIELDS_SUPPORTED 0x800UL
u8 unused_0[3];
u8 valid;
};
/* hwrm_tunnel_dst_port_query_input (size:192b/24B) */ /* hwrm_tunnel_dst_port_query_input (size:192b/24B) */
struct hwrm_tunnel_dst_port_query_input { struct hwrm_tunnel_dst_port_query_input {
__le16 req_type; __le16 req_type;
...@@ -6636,7 +6788,8 @@ struct hwrm_fw_qstatus_output { ...@@ -6636,7 +6788,8 @@ struct hwrm_fw_qstatus_output {
#define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
#define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
#define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
#define FW_QSTATUS_RESP_SELFRST_STATUS_LAST FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPOWER 0x3UL
#define FW_QSTATUS_RESP_SELFRST_STATUS_LAST FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPOWER
u8 unused_0[6]; u8 unused_0[6];
u8 valid; u8 valid;
}; };
...@@ -6659,8 +6812,8 @@ struct hwrm_fw_set_time_input { ...@@ -6659,8 +6812,8 @@ struct hwrm_fw_set_time_input {
u8 unused_0; u8 unused_0;
__le16 millisecond; __le16 millisecond;
__le16 zone; __le16 zone;
#define FW_SET_TIME_REQ_ZONE_UTC 0x0UL #define FW_SET_TIME_REQ_ZONE_UTC 0
#define FW_SET_TIME_REQ_ZONE_UNKNOWN 0xffffUL #define FW_SET_TIME_REQ_ZONE_UNKNOWN 65535
#define FW_SET_TIME_REQ_ZONE_LAST FW_SET_TIME_REQ_ZONE_UNKNOWN #define FW_SET_TIME_REQ_ZONE_LAST FW_SET_TIME_REQ_ZONE_UNKNOWN
u8 unused_1[4]; u8 unused_1[4];
}; };
...@@ -7064,7 +7217,9 @@ struct hwrm_dbg_coredump_list_input { ...@@ -7064,7 +7217,9 @@ struct hwrm_dbg_coredump_list_input {
__le64 host_dest_addr; __le64 host_dest_addr;
__le32 host_buf_len; __le32 host_buf_len;
__le16 seq_no; __le16 seq_no;
u8 unused_0[2]; u8 flags;
#define DBG_COREDUMP_LIST_REQ_FLAGS_CRASHDUMP 0x1UL
u8 unused_0[1];
}; };
/* hwrm_dbg_coredump_list_output (size:128b/16B) */ /* hwrm_dbg_coredump_list_output (size:128b/16B) */
...@@ -7392,7 +7547,9 @@ struct hwrm_nvm_get_dev_info_output { ...@@ -7392,7 +7547,9 @@ struct hwrm_nvm_get_dev_info_output {
__le32 nvram_size; __le32 nvram_size;
__le32 reserved_size; __le32 reserved_size;
__le32 available_size; __le32 available_size;
u8 unused_0[3]; u8 nvm_cfg_ver_maj;
u8 nvm_cfg_ver_min;
u8 nvm_cfg_ver_upd;
u8 valid; u8 valid;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment