Commit f8f20f29 authored by David S. Miller's avatar David S. Miller

Merge branch 'bnxt_en-devlink'

Michael Chan says:

====================
bnxt_en: devlink enhancements

This patch series implements some devlink enhancements for bnxt_en.
They include:

1. devlink reload to reinitialize driver or to activate new firmware.
2. Support enable_remote_dev_reset to enable/disable other functions
resetting the device.
3. Consolidate and improve the health reporters.
4. Support live firmware patch.
5. Provide devlink dev info "fw" version on older firmware.

v2:
In patch 3, don't use devlink_reload_disable() and devlink_reload_enable()
which are no longer available in the latest kernel.  Instead, check that
the netdev is not in unregistered state before proceeding with reload.

In patch 14, use min_t() instead of min() to fix the mismatched type
warning.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents a812a046 eff441f3
......@@ -22,6 +22,8 @@ Parameters
- Permanent
* - ``msix_vec_per_pf_min``
- Permanent
* - ``enable_remote_dev_reset``
- Runtime
The ``bnxt`` driver also implements the following driver-specific
parameters.
......
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_BNXT) += bnxt_en.o
bnxt_en-y := bnxt.o bnxt_hwrm.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_ptp.o bnxt_vfr.o bnxt_devlink.o bnxt_dim.o
bnxt_en-y := bnxt.o bnxt_hwrm.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_ptp.o bnxt_vfr.o bnxt_devlink.o bnxt_dim.o bnxt_coredump.o
bnxt_en-$(CONFIG_BNXT_FLOWER_OFFLOAD) += bnxt_tc.o
bnxt_en-$(CONFIG_DEBUG_FS) += bnxt_debugfs.o
......@@ -49,8 +49,6 @@
#include <linux/log2.h>
#include <linux/aer.h>
#include <linux/bitmap.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
#include <linux/cpu_rmap.h>
#include <linux/cpumask.h>
#include <net/pkt_cls.h>
......@@ -85,55 +83,7 @@ MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
#define BNXT_TX_PUSH_THRESH 164
enum board_idx {
BCM57301,
BCM57302,
BCM57304,
BCM57417_NPAR,
BCM58700,
BCM57311,
BCM57312,
BCM57402,
BCM57404,
BCM57406,
BCM57402_NPAR,
BCM57407,
BCM57412,
BCM57414,
BCM57416,
BCM57417,
BCM57412_NPAR,
BCM57314,
BCM57417_SFP,
BCM57416_SFP,
BCM57404_NPAR,
BCM57406_NPAR,
BCM57407_SFP,
BCM57407_NPAR,
BCM57414_NPAR,
BCM57416_NPAR,
BCM57452,
BCM57454,
BCM5745x_NPAR,
BCM57508,
BCM57504,
BCM57502,
BCM57508_NPAR,
BCM57504_NPAR,
BCM57502_NPAR,
BCM58802,
BCM58804,
BCM58808,
NETXTREME_E_VF,
NETXTREME_C_VF,
NETXTREME_S_VF,
NETXTREME_C_VF_HV,
NETXTREME_E_VF_HV,
NETXTREME_E_P5_VF,
NETXTREME_E_P5_VF_HV,
};
/* indexed by enum above */
/* indexed by enum board_idx */
static const struct {
char *name;
} board_info[] = {
......@@ -2172,7 +2122,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
break;
case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
char *fatal_str = "non-fatal";
char *type_str = "Solicited";
if (!bp->fw_health)
goto async_event_process_exit;
......@@ -2184,13 +2134,21 @@ static int bnxt_async_event_process(struct bnxt *bp,
bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
if (!bp->fw_reset_max_dsecs)
bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
fatal_str = "fatal";
if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) {
set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state);
} else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
type_str = "Fatal";
bp->fw_health->fatalities++;
set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
} else if (data2 && BNXT_FW_STATUS_HEALTHY !=
EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) {
type_str = "Non-fatal";
bp->fw_health->survivals++;
set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
}
netif_warn(bp, hw, bp->dev,
"Firmware %s reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
fatal_str, data1, data2,
"%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
type_str, data1, data2,
bp->fw_reset_min_dsecs * 100,
bp->fw_reset_max_dsecs * 100);
set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
......@@ -2198,17 +2156,18 @@ static int bnxt_async_event_process(struct bnxt *bp,
}
case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
struct bnxt_fw_health *fw_health = bp->fw_health;
char *status_desc = "healthy";
u32 status;
if (!fw_health)
goto async_event_process_exit;
if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
fw_health->enabled = false;
netif_info(bp, drv, bp->dev,
"Error recovery info: error recovery[0]\n");
netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n");
break;
}
fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
fw_health->tmr_multiplier =
DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
bp->current_interval * 10);
......@@ -2218,10 +2177,13 @@ static int bnxt_async_event_process(struct bnxt *bp,
bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
fw_health->last_fw_reset_cnt =
bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
if (status != BNXT_FW_STATUS_HEALTHY)
status_desc = "unhealthy";
netif_info(bp, drv, bp->dev,
"Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n",
fw_health->master, fw_health->last_fw_reset_cnt,
bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG));
"Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n",
fw_health->primary ? "primary" : "backup", status,
status_desc, fw_health->last_fw_reset_cnt);
if (!fw_health->enabled) {
/* Make sure tmr_counter is set and visible to
* bnxt_health_check() before setting enabled to true.
......@@ -4651,7 +4613,7 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
return rc;
}
static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
{
struct hwrm_func_drv_unrgtr_input *req;
int rc;
......@@ -7192,7 +7154,7 @@ static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
ctx_pg->nr_pages = 0;
}
static void bnxt_free_ctx_mem(struct bnxt *bp)
void bnxt_free_ctx_mem(struct bnxt *bp)
{
struct bnxt_ctx_mem_info *ctx = bp->ctx;
int i;
......@@ -7518,12 +7480,18 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
if (flags & FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS;
flags_ext = le32_to_cpu(resp->flags_ext);
if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT))
bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
bp->tx_push_thresh = 0;
if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
......@@ -7579,6 +7547,32 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
return rc;
}
static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp)
{
struct hwrm_dbg_qcaps_output *resp;
struct hwrm_dbg_qcaps_input *req;
int rc;
bp->fw_dbg_cap = 0;
if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS))
return;
rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS);
if (rc)
return;
req->fid = cpu_to_le16(0xffff);
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
if (rc)
goto hwrm_dbg_qcaps_exit;
bp->fw_dbg_cap = le32_to_cpu(resp->flags);
hwrm_dbg_qcaps_exit:
hwrm_req_drop(bp, req);
}
static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
......@@ -7588,6 +7582,9 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
rc = __bnxt_hwrm_func_qcaps(bp);
if (rc)
return rc;
bnxt_hwrm_dbg_qcaps(bp);
rc = bnxt_hwrm_queue_qportcfg(bp);
if (rc) {
netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
......@@ -7642,6 +7639,7 @@ static int __bnxt_alloc_fw_health(struct bnxt *bp)
if (!bp->fw_health)
return -ENOMEM;
mutex_init(&bp->fw_health->lock);
return 0;
}
......@@ -7688,12 +7686,16 @@ static void bnxt_inv_fw_health_reg(struct bnxt *bp)
struct bnxt_fw_health *fw_health = bp->fw_health;
u32 reg_type;
if (!fw_health || !fw_health->status_reliable)
if (!fw_health)
return;
reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
fw_health->status_reliable = false;
reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]);
if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
fw_health->resets_reliable = false;
}
static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
......@@ -7750,6 +7752,7 @@ static int bnxt_map_fw_health_regs(struct bnxt *bp)
int i;
bp->fw_health->status_reliable = false;
bp->fw_health->resets_reliable = false;
/* Only pre-map the monitoring GRC registers using window 3 */
for (i = 0; i < 4; i++) {
u32 reg = fw_health->regs[i];
......@@ -7763,6 +7766,7 @@ static int bnxt_map_fw_health_regs(struct bnxt *bp)
fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
}
bp->fw_health->status_reliable = true;
bp->fw_health->resets_reliable = true;
if (reg_base == 0xffffffff)
return 0;
......@@ -8208,6 +8212,10 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
if (!rc) {
bp->fw_rx_stats_ext_size =
le16_to_cpu(resp_qs->rx_stat_size) / 8;
if (BNXT_FW_MAJ(bp) < 220 &&
bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY)
bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY;
bp->fw_tx_stats_ext_size = tx_stat_size ?
le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0;
} else {
......@@ -9246,7 +9254,7 @@ static char *bnxt_report_fec(struct bnxt_link_info *link_info)
}
}
static void bnxt_report_link(struct bnxt *bp)
void bnxt_report_link(struct bnxt *bp)
{
if (bp->link_info.link_up) {
const char *signal = "";
......@@ -9691,8 +9699,6 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
return hwrm_req_send(bp, req);
}
static int bnxt_fw_init_one(struct bnxt *bp);
static int bnxt_fw_reset_via_optee(struct bnxt *bp)
{
#ifdef CONFIG_TEE_BNXT_FW
......@@ -9739,6 +9745,33 @@ static int bnxt_try_recover_fw(struct bnxt *bp)
return -ENODEV;
}
int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
int rc;
if (!BNXT_NEW_RM(bp))
return 0; /* no resource reservations required */
rc = bnxt_hwrm_func_resc_qcaps(bp, true);
if (rc)
netdev_err(bp->dev, "resc_qcaps failed\n");
hw_resc->resv_cp_rings = 0;
hw_resc->resv_stat_ctxs = 0;
hw_resc->resv_irqs = 0;
hw_resc->resv_tx_rings = 0;
hw_resc->resv_rx_rings = 0;
hw_resc->resv_hw_ring_grps = 0;
hw_resc->resv_vnics = 0;
if (!fw_reset) {
bp->tx_nr_rings = 0;
bp->rx_nr_rings = 0;
}
return rc;
}
static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
{
struct hwrm_func_drv_if_change_output *resp;
......@@ -9822,25 +9855,7 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
return rc;
}
}
if (BNXT_NEW_RM(bp)) {
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
rc = bnxt_hwrm_func_resc_qcaps(bp, true);
if (rc)
netdev_err(bp->dev, "resc_qcaps failed\n");
hw_resc->resv_cp_rings = 0;
hw_resc->resv_stat_ctxs = 0;
hw_resc->resv_irqs = 0;
hw_resc->resv_tx_rings = 0;
hw_resc->resv_rx_rings = 0;
hw_resc->resv_hw_ring_grps = 0;
hw_resc->resv_vnics = 0;
if (!fw_reset) {
bp->tx_nr_rings = 0;
bp->rx_nr_rings = 0;
}
}
rc = bnxt_cancel_reservations(bp, fw_reset);
}
return rc;
}
......@@ -10318,7 +10333,7 @@ void bnxt_half_close_nic(struct bnxt *bp)
bnxt_free_mem(bp, false);
}
static void bnxt_reenable_sriov(struct bnxt *bp)
void bnxt_reenable_sriov(struct bnxt *bp)
{
if (BNXT_PF(bp)) {
struct bnxt_pf_info *pf = &bp->pf;
......@@ -11295,14 +11310,18 @@ static void bnxt_fw_health_check(struct bnxt *bp)
}
val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
if (val == fw_health->last_fw_heartbeat)
if (val == fw_health->last_fw_heartbeat) {
fw_health->arrests++;
goto fw_reset;
}
fw_health->last_fw_heartbeat = val;
val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
if (val != fw_health->last_fw_reset_cnt)
if (val != fw_health->last_fw_reset_cnt) {
fw_health->discoveries++;
goto fw_reset;
}
fw_health->tmr_counter = fw_health->tmr_multiplier;
return;
......@@ -11508,7 +11527,7 @@ static void bnxt_force_fw_reset(struct bnxt *bp)
}
bnxt_fw_reset_close(bp);
wait_dsecs = fw_health->master_func_wait_dsecs;
if (fw_health->master) {
if (fw_health->primary) {
if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
wait_dsecs = 0;
bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
......@@ -11772,13 +11791,17 @@ static void bnxt_sp_task(struct work_struct *work)
if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
bnxt_rx_ring_reset(bp);
if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event))
bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT);
if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) {
if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
bnxt_devlink_health_fw_report(bp);
else
bnxt_fw_reset(bp);
}
if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
if (!is_bnxt_fw_ok(bp))
bnxt_devlink_health_report(bp,
BNXT_FW_EXCEPTION_SP_EVENT);
bnxt_devlink_health_fw_report(bp);
}
smp_mb__before_atomic();
......@@ -11989,7 +12012,7 @@ static void bnxt_fw_init_one_p3(struct bnxt *bp)
static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
static int bnxt_fw_init_one(struct bnxt *bp)
int bnxt_fw_init_one(struct bnxt *bp)
{
int rc;
......@@ -12051,6 +12074,27 @@ static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
}
}
bool bnxt_hwrm_reset_permitted(struct bnxt *bp)
{
struct hwrm_func_qcfg_output *resp;
struct hwrm_func_qcfg_input *req;
bool result = true; /* firmware will enforce if unknown */
if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
return result;
if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG))
return result;
req->fid = cpu_to_le16(0xffff);
resp = hwrm_req_hold(bp, req);
if (!hwrm_req_send(bp, req))
result = !!(le16_to_cpu(resp->flags) &
FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED);
hwrm_req_drop(bp, req);
return result;
}
static void bnxt_reset_all(struct bnxt *bp)
{
struct bnxt_fw_health *fw_health = bp->fw_health;
......@@ -12093,7 +12137,7 @@ static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
bnxt_ulp_start(bp, rc);
bnxt_dl_health_status_update(bp, false);
bnxt_dl_health_fw_status_update(bp, false);
}
bp->fw_reset_state = 0;
dev_close(bp->dev);
......@@ -12159,7 +12203,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
return;
}
if (!bp->fw_health->master) {
if (!bp->fw_health->primary) {
u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
......@@ -12192,6 +12236,10 @@ static void bnxt_fw_reset_task(struct work_struct *work)
}
}
clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) &&
!test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state))
bnxt_dl_remote_reload(bp);
if (pci_enable_device(bp->pdev)) {
netdev_err(bp->dev, "Cannot re-enable PCI device\n");
rc = -ENODEV;
......@@ -12241,8 +12289,11 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bnxt_vf_reps_alloc(bp);
bnxt_vf_reps_open(bp);
bnxt_ptp_reapply_pps(bp);
bnxt_dl_health_recovery_done(bp);
bnxt_dl_health_status_update(bp, true);
clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) {
bnxt_dl_health_fw_recovery_done(bp);
bnxt_dl_health_fw_status_update(bp, true);
}
rtnl_unlock();
break;
}
......@@ -13186,6 +13237,15 @@ static int bnxt_map_db_bar(struct bnxt *bp)
return 0;
}
void bnxt_print_device_info(struct bnxt *bp)
{
netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n",
board_info[bp->board_idx].name,
(long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr);
pcie_print_link_status(bp->pdev);
}
static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *dev;
......@@ -13209,10 +13269,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENOMEM;
bp = netdev_priv(dev);
bp->board_idx = ent->driver_data;
bp->msg_enable = BNXT_DEF_MSG_ENABLE;
bnxt_set_max_func_irqs(bp, max_irqs);
if (bnxt_vf_pciid(ent->driver_data))
if (bnxt_vf_pciid(bp->board_idx))
bp->flags |= BNXT_FLAG_VF;
if (pdev->msix_cap)
......@@ -13382,10 +13443,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
devlink_port_type_eth_set(&bp->dl_port, bp->dev);
bnxt_dl_fw_reporters_create(bp);
netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
board_info[ent->driver_data].name,
(long)pci_resource_start(pdev, 0), dev->dev_addr);
pcie_print_link_status(pdev);
bnxt_print_device_info(bp);
pci_save_state(pdev);
return 0;
......
......@@ -489,6 +489,15 @@ struct rx_tpa_end_cmp_ext {
ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK) ==\
ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL)
#define EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1) \
(((data1) & \
ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK) ==\
ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION)
#define EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2) \
((data2) & \
ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_MASK)
#define EVENT_DATA1_RECOVERY_MASTER_FUNC(data1) \
!!((data1) & \
ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASTER_FUNC)
......@@ -1514,6 +1523,21 @@ struct bnxt_ctx_mem_info {
struct bnxt_mem_init mem_init[BNXT_CTX_MEM_INIT_MAX];
};
enum bnxt_health_severity {
SEVERITY_NORMAL = 0,
SEVERITY_WARNING,
SEVERITY_RECOVERABLE,
SEVERITY_FATAL,
};
enum bnxt_health_remedy {
REMEDY_DEVLINK_RECOVER,
REMEDY_POWER_CYCLE_DEVICE,
REMEDY_POWER_CYCLE_HOST,
REMEDY_FW_UPDATE,
REMEDY_HW_REPLACE,
};
struct bnxt_fw_health {
u32 flags;
u32 polling_dsecs;
......@@ -1531,9 +1555,9 @@ struct bnxt_fw_health {
u32 last_fw_heartbeat;
u32 last_fw_reset_cnt;
u8 enabled:1;
u8 master:1;
u8 fatal:1;
u8 primary:1;
u8 status_reliable:1;
u8 resets_reliable:1;
u8 tmr_multiplier;
u8 tmr_counter;
u8 fw_reset_seq_cnt;
......@@ -1543,12 +1567,15 @@ struct bnxt_fw_health {
u32 echo_req_data1;
u32 echo_req_data2;
struct devlink_health_reporter *fw_reporter;
struct devlink_health_reporter *fw_reset_reporter;
struct devlink_health_reporter *fw_fatal_reporter;
};
struct bnxt_fw_reporter_ctx {
unsigned long sp_event;
/* Protects severity and remedy */
struct mutex lock;
enum bnxt_health_severity severity;
enum bnxt_health_remedy remedy;
u32 arrests;
u32 discoveries;
u32 survivals;
u32 fatalities;
u32 diagnoses;
};
#define BNXT_FW_HEALTH_REG_TYPE_MASK 3
......@@ -1586,6 +1613,54 @@ struct bnxt_fw_reporter_ctx {
#define BNXT_FW_RETRY 5
#define BNXT_FW_IF_RETRY 10
enum board_idx {
BCM57301,
BCM57302,
BCM57304,
BCM57417_NPAR,
BCM58700,
BCM57311,
BCM57312,
BCM57402,
BCM57404,
BCM57406,
BCM57402_NPAR,
BCM57407,
BCM57412,
BCM57414,
BCM57416,
BCM57417,
BCM57412_NPAR,
BCM57314,
BCM57417_SFP,
BCM57416_SFP,
BCM57404_NPAR,
BCM57406_NPAR,
BCM57407_SFP,
BCM57407_NPAR,
BCM57414_NPAR,
BCM57416_NPAR,
BCM57452,
BCM57454,
BCM5745x_NPAR,
BCM57508,
BCM57504,
BCM57502,
BCM57508_NPAR,
BCM57504_NPAR,
BCM57502_NPAR,
BCM58802,
BCM58804,
BCM58808,
NETXTREME_E_VF,
NETXTREME_C_VF,
NETXTREME_S_VF,
NETXTREME_C_VF_HV,
NETXTREME_E_VF_HV,
NETXTREME_E_P5_VF,
NETXTREME_E_P5_VF_HV,
};
struct bnxt {
void __iomem *bar0;
void __iomem *bar1;
......@@ -1840,6 +1915,10 @@ struct bnxt {
#define BNXT_STATE_DRV_REGISTERED 7
#define BNXT_STATE_PCI_CHANNEL_IO_FROZEN 8
#define BNXT_STATE_NAPI_DISABLED 9
#define BNXT_STATE_FW_ACTIVATE 11
#define BNXT_STATE_RECOVER 12
#define BNXT_STATE_FW_NON_FATAL_COND 13
#define BNXT_STATE_FW_ACTIVATE_RESET 14
#define BNXT_NO_FW_ACCESS(bp) \
(test_bit(BNXT_STATE_FW_FATAL_COND, &(bp)->state) || \
......@@ -1879,8 +1958,13 @@ struct bnxt {
#define BNXT_FW_CAP_VLAN_RX_STRIP 0x01000000
#define BNXT_FW_CAP_VLAN_TX_INSERT 0x02000000
#define BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED 0x04000000
#define BNXT_FW_CAP_LIVEPATCH 0x08000000
#define BNXT_FW_CAP_PTP_PPS 0x10000000
#define BNXT_FW_CAP_HOT_RESET_IF 0x20000000
#define BNXT_FW_CAP_RING_MONITOR 0x40000000
#define BNXT_FW_CAP_DBG_QCAPS 0x80000000
u32 fw_dbg_cap;
#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
u32 hwrm_spec_code;
......@@ -2049,6 +2133,7 @@ struct bnxt {
struct list_head tc_indr_block_list;
struct dentry *debugfs_pdev;
struct device *hwmon_dev;
enum board_idx board_idx;
};
#define BNXT_NUM_RX_RING_STATS 8
......@@ -2090,6 +2175,9 @@ struct bnxt {
#define BNXT_RX_STATS_EXT_OFFSET(counter) \
(offsetof(struct rx_port_stats_ext, counter) / 8)
#define BNXT_RX_STATS_EXT_NUM_LEGACY \
BNXT_RX_STATS_EXT_OFFSET(rx_fec_corrected_blocks)
#define BNXT_TX_STATS_EXT_OFFSET(counter) \
(offsetof(struct tx_port_stats_ext, counter) / 8)
......@@ -2181,11 +2269,13 @@ void bnxt_set_ring_params(struct bnxt *);
int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap,
int bmap_size, bool async_only);
int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp);
int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings);
int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id);
int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings);
int bnxt_nq_rings_in_use(struct bnxt *bp);
int bnxt_hwrm_set_coal(struct bnxt *);
void bnxt_free_ctx_mem(struct bnxt *bp);
unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp);
unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp);
unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
......@@ -2194,9 +2284,11 @@ int bnxt_get_avail_msix(struct bnxt *bp, int num);
int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init);
void bnxt_tx_disable(struct bnxt *bp);
void bnxt_tx_enable(struct bnxt *bp);
void bnxt_report_link(struct bnxt *bp);
int bnxt_update_link(struct bnxt *bp, bool chng_link_state);
int bnxt_hwrm_set_pause(struct bnxt *);
int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool);
int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset);
int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_free_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all);
......@@ -2205,6 +2297,7 @@ int bnxt_hwrm_fw_set_time(struct bnxt *);
int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_half_open_nic(struct bnxt *bp);
void bnxt_half_close_nic(struct bnxt *bp);
void bnxt_reenable_sriov(struct bnxt *bp);
int bnxt_close_nic(struct bnxt *, bool, bool);
int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
u32 *reg_buf);
......@@ -2212,6 +2305,8 @@ void bnxt_fw_exception(struct bnxt *bp);
void bnxt_fw_reset(struct bnxt *bp);
int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int tx_xdp);
int bnxt_fw_init_one(struct bnxt *bp);
bool bnxt_hwrm_reset_permitted(struct bnxt *bp);
int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
int bnxt_restore_pf_fw_resources(struct bnxt *bp);
......@@ -2219,5 +2314,5 @@ int bnxt_get_port_parent_id(struct net_device *dev,
struct netdev_phys_item_id *ppid);
void bnxt_dim_work(struct work_struct *work);
int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi);
void bnxt_print_device_info(struct bnxt *bp);
#endif
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2021 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_coredump.h"
static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg,
struct bnxt_hwrm_dbg_dma_info *info)
{
struct hwrm_dbg_cmn_input *cmn_req = msg;
__le16 *seq_ptr = msg + info->seq_off;
struct hwrm_dbg_cmn_output *cmn_resp;
u16 seq = 0, len, segs_off;
dma_addr_t dma_handle;
void *dma_buf, *resp;
int rc, off = 0;
dma_buf = hwrm_req_dma_slice(bp, msg, info->dma_len, &dma_handle);
if (!dma_buf) {
hwrm_req_drop(bp, msg);
return -ENOMEM;
}
hwrm_req_timeout(bp, msg, HWRM_COREDUMP_TIMEOUT);
cmn_resp = hwrm_req_hold(bp, msg);
resp = cmn_resp;
segs_off = offsetof(struct hwrm_dbg_coredump_list_output,
total_segments);
cmn_req->host_dest_addr = cpu_to_le64(dma_handle);
cmn_req->host_buf_len = cpu_to_le32(info->dma_len);
while (1) {
*seq_ptr = cpu_to_le16(seq);
rc = hwrm_req_send(bp, msg);
if (rc)
break;
len = le16_to_cpu(*((__le16 *)(resp + info->data_len_off)));
if (!seq &&
cmn_req->req_type == cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) {
info->segs = le16_to_cpu(*((__le16 *)(resp +
segs_off)));
if (!info->segs) {
rc = -EIO;
break;
}
info->dest_buf_size = info->segs *
sizeof(struct coredump_segment_record);
info->dest_buf = kmalloc(info->dest_buf_size,
GFP_KERNEL);
if (!info->dest_buf) {
rc = -ENOMEM;
break;
}
}
if (info->dest_buf) {
if ((info->seg_start + off + len) <=
BNXT_COREDUMP_BUF_LEN(info->buf_len)) {
memcpy(info->dest_buf + off, dma_buf, len);
} else {
rc = -ENOBUFS;
break;
}
}
if (cmn_req->req_type ==
cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
info->dest_buf_size += len;
if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE))
break;
seq++;
off += len;
}
hwrm_req_drop(bp, msg);
return rc;
}
static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp,
struct bnxt_coredump *coredump)
{
struct bnxt_hwrm_dbg_dma_info info = {NULL};
struct hwrm_dbg_coredump_list_input *req;
int rc;
rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_LIST);
if (rc)
return rc;
info.dma_len = COREDUMP_LIST_BUF_LEN;
info.seq_off = offsetof(struct hwrm_dbg_coredump_list_input, seq_no);
info.data_len_off = offsetof(struct hwrm_dbg_coredump_list_output,
data_len);
rc = bnxt_hwrm_dbg_dma_data(bp, req, &info);
if (!rc) {
coredump->data = info.dest_buf;
coredump->data_size = info.dest_buf_size;
coredump->total_segs = info.segs;
}
return rc;
}
static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
u16 segment_id)
{
struct hwrm_dbg_coredump_initiate_input *req;
int rc;
rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_INITIATE);
if (rc)
return rc;
hwrm_req_timeout(bp, req, HWRM_COREDUMP_TIMEOUT);
req->component_id = cpu_to_le16(component_id);
req->segment_id = cpu_to_le16(segment_id);
return hwrm_req_send(bp, req);
}
static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
u16 segment_id, u32 *seg_len,
void *buf, u32 buf_len, u32 offset)
{
struct hwrm_dbg_coredump_retrieve_input *req;
struct bnxt_hwrm_dbg_dma_info info = {NULL};
int rc;
rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_RETRIEVE);
if (rc)
return rc;
req->component_id = cpu_to_le16(component_id);
req->segment_id = cpu_to_le16(segment_id);
info.dma_len = COREDUMP_RETRIEVE_BUF_LEN;
info.seq_off = offsetof(struct hwrm_dbg_coredump_retrieve_input,
seq_no);
info.data_len_off = offsetof(struct hwrm_dbg_coredump_retrieve_output,
data_len);
if (buf) {
info.dest_buf = buf + offset;
info.buf_len = buf_len;
info.seg_start = offset;
}
rc = bnxt_hwrm_dbg_dma_data(bp, req, &info);
if (!rc)
*seg_len = info.dest_buf_size;
return rc;
}
static void
bnxt_fill_coredump_seg_hdr(struct bnxt *bp,
struct bnxt_coredump_segment_hdr *seg_hdr,
struct coredump_segment_record *seg_rec, u32 seg_len,
int status, u32 duration, u32 instance)
{
memset(seg_hdr, 0, sizeof(*seg_hdr));
memcpy(seg_hdr->signature, "sEgM", 4);
if (seg_rec) {
seg_hdr->component_id = (__force __le32)seg_rec->component_id;
seg_hdr->segment_id = (__force __le32)seg_rec->segment_id;
seg_hdr->low_version = seg_rec->version_low;
seg_hdr->high_version = seg_rec->version_hi;
seg_hdr->flags = cpu_to_le32(seg_rec->compress_flags);
} else {
/* For hwrm_ver_get response Component id = 2
* and Segment id = 0
*/
seg_hdr->component_id = cpu_to_le32(2);
seg_hdr->segment_id = 0;
}
seg_hdr->function_id = cpu_to_le16(bp->pdev->devfn);
seg_hdr->length = cpu_to_le32(seg_len);
seg_hdr->status = cpu_to_le32(status);
seg_hdr->duration = cpu_to_le32(duration);
seg_hdr->data_offset = cpu_to_le32(sizeof(*seg_hdr));
seg_hdr->instance = cpu_to_le32(instance);
}
static void bnxt_fill_cmdline(struct bnxt_coredump_record *record)
{
struct mm_struct *mm = current->mm;
int i, len, last = 0;
if (mm) {
len = min_t(int, mm->arg_end - mm->arg_start,
sizeof(record->commandline) - 1);
if (len && !copy_from_user(record->commandline,
(char __user *)mm->arg_start, len)) {
for (i = 0; i < len; i++) {
if (record->commandline[i])
last = i;
else
record->commandline[i] = ' ';
}
record->commandline[last + 1] = 0;
return;
}
}
strscpy(record->commandline, current->comm, TASK_COMM_LEN);
}
static void
bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record,
time64_t start, s16 start_utc, u16 total_segs,
int status)
{
time64_t end = ktime_get_real_seconds();
u32 os_ver_major = 0, os_ver_minor = 0;
struct tm tm;
time64_to_tm(start, 0, &tm);
memset(record, 0, sizeof(*record));
memcpy(record->signature, "cOrE", 4);
record->flags = 0;
record->low_version = 0;
record->high_version = 1;
record->asic_state = 0;
strscpy(record->system_name, utsname()->nodename,
sizeof(record->system_name));
record->year = cpu_to_le16(tm.tm_year + 1900);
record->month = cpu_to_le16(tm.tm_mon + 1);
record->day = cpu_to_le16(tm.tm_mday);
record->hour = cpu_to_le16(tm.tm_hour);
record->minute = cpu_to_le16(tm.tm_min);
record->second = cpu_to_le16(tm.tm_sec);
record->utc_bias = cpu_to_le16(start_utc);
bnxt_fill_cmdline(record);
record->total_segments = cpu_to_le32(total_segs);
if (sscanf(utsname()->release, "%u.%u", &os_ver_major, &os_ver_minor) != 2)
netdev_warn(bp->dev, "Unknown OS release in coredump\n");
record->os_ver_major = cpu_to_le32(os_ver_major);
record->os_ver_minor = cpu_to_le32(os_ver_minor);
strscpy(record->os_name, utsname()->sysname, sizeof(record->os_name));
time64_to_tm(end, 0, &tm);
record->end_year = cpu_to_le16(tm.tm_year + 1900);
record->end_month = cpu_to_le16(tm.tm_mon + 1);
record->end_day = cpu_to_le16(tm.tm_mday);
record->end_hour = cpu_to_le16(tm.tm_hour);
record->end_minute = cpu_to_le16(tm.tm_min);
record->end_second = cpu_to_le16(tm.tm_sec);
record->end_utc_bias = cpu_to_le16(sys_tz.tz_minuteswest * 60);
record->asic_id1 = cpu_to_le32(bp->chip_num << 16 |
bp->ver_resp.chip_rev << 8 |
bp->ver_resp.chip_metal);
record->asic_id2 = 0;
record->coredump_status = cpu_to_le32(status);
record->ioctl_low_version = 0;
record->ioctl_high_version = 0;
}
static int __bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
{
u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output);
u32 offset = 0, seg_hdr_len, seg_record_len, buf_len = 0;
struct coredump_segment_record *seg_record = NULL;
struct bnxt_coredump_segment_hdr seg_hdr;
struct bnxt_coredump coredump = {NULL};
time64_t start_time;
u16 start_utc;
int rc = 0, i;
if (buf)
buf_len = *dump_len;
start_time = ktime_get_real_seconds();
start_utc = sys_tz.tz_minuteswest * 60;
seg_hdr_len = sizeof(seg_hdr);
/* First segment should be hwrm_ver_get response */
*dump_len = seg_hdr_len + ver_get_resp_len;
if (buf) {
bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, ver_get_resp_len,
0, 0, 0);
memcpy(buf + offset, &seg_hdr, seg_hdr_len);
offset += seg_hdr_len;
memcpy(buf + offset, &bp->ver_resp, ver_get_resp_len);
offset += ver_get_resp_len;
}
rc = bnxt_hwrm_dbg_coredump_list(bp, &coredump);
if (rc) {
netdev_err(bp->dev, "Failed to get coredump segment list\n");
goto err;
}
*dump_len += seg_hdr_len * coredump.total_segs;
seg_record = (struct coredump_segment_record *)coredump.data;
seg_record_len = sizeof(*seg_record);
for (i = 0; i < coredump.total_segs; i++) {
u16 comp_id = le16_to_cpu(seg_record->component_id);
u16 seg_id = le16_to_cpu(seg_record->segment_id);
u32 duration = 0, seg_len = 0;
unsigned long start, end;
if (buf && ((offset + seg_hdr_len) >
BNXT_COREDUMP_BUF_LEN(buf_len))) {
rc = -ENOBUFS;
goto err;
}
start = jiffies;
rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id);
if (rc) {
netdev_err(bp->dev,
"Failed to initiate coredump for seg = %d\n",
seg_record->segment_id);
goto next_seg;
}
/* Write segment data into the buffer */
rc = bnxt_hwrm_dbg_coredump_retrieve(bp, comp_id, seg_id,
&seg_len, buf, buf_len,
offset + seg_hdr_len);
if (rc && rc == -ENOBUFS)
goto err;
else if (rc)
netdev_err(bp->dev,
"Failed to retrieve coredump for seg = %d\n",
seg_record->segment_id);
next_seg:
end = jiffies;
duration = jiffies_to_msecs(end - start);
bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, seg_record, seg_len,
rc, duration, 0);
if (buf) {
/* Write segment header into the buffer */
memcpy(buf + offset, &seg_hdr, seg_hdr_len);
offset += seg_hdr_len + seg_len;
}
*dump_len += seg_len;
seg_record =
(struct coredump_segment_record *)((u8 *)seg_record +
seg_record_len);
}
err:
if (buf)
bnxt_fill_coredump_record(bp, buf + offset, start_time,
start_utc, coredump.total_segs + 1,
rc);
kfree(coredump.data);
*dump_len += sizeof(struct bnxt_coredump_record);
if (rc == -ENOBUFS)
netdev_err(bp->dev, "Firmware returned large coredump buffer\n");
return rc;
}
int bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf, u32 *dump_len)
{
if (dump_type == BNXT_DUMP_CRASH) {
#ifdef CONFIG_TEE_BNXT_FW
return tee_bnxt_copy_coredump(buf, 0, *dump_len);
#else
return -EOPNOTSUPP;
#endif
} else {
return __bnxt_get_coredump(bp, buf, dump_len);
}
}
static int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len)
{
struct hwrm_dbg_qcfg_output *resp;
struct hwrm_dbg_qcfg_input *req;
int rc, hdr_len = 0;
if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS))
return -EOPNOTSUPP;
if (dump_type == BNXT_DUMP_CRASH &&
!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR))
return -EOPNOTSUPP;
rc = hwrm_req_init(bp, req, HWRM_DBG_QCFG);
if (rc)
return rc;
req->fid = cpu_to_le16(0xffff);
if (dump_type == BNXT_DUMP_CRASH)
req->flags = cpu_to_le16(DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR);
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
if (rc)
goto get_dump_len_exit;
if (dump_type == BNXT_DUMP_CRASH) {
*dump_len = le32_to_cpu(resp->crashdump_size);
} else {
/* Driver adds coredump header and "HWRM_VER_GET response"
* segment additionally to coredump.
*/
hdr_len = sizeof(struct bnxt_coredump_segment_hdr) +
sizeof(struct hwrm_ver_get_output) +
sizeof(struct bnxt_coredump_record);
*dump_len = le32_to_cpu(resp->coredump_size) + hdr_len;
}
if (*dump_len <= hdr_len)
rc = -EINVAL;
get_dump_len_exit:
hwrm_req_drop(bp, req);
return rc;
}
u32 bnxt_get_coredump_length(struct bnxt *bp, u16 dump_type)
{
u32 len = 0;
if (bnxt_hwrm_get_dump_len(bp, dump_type, &len)) {
if (dump_type == BNXT_DUMP_CRASH)
len = BNXT_CRASH_DUMP_LEN;
else
__bnxt_get_coredump(bp, NULL, &len);
}
return len;
}
......@@ -10,6 +10,10 @@
#ifndef BNXT_COREDUMP_H
#define BNXT_COREDUMP_H
#include <linux/utsname.h>
#include <linux/time.h>
#include <linux/rtc.h>
struct bnxt_coredump_segment_hdr {
__u8 signature[4];
__le32 component_id;
......@@ -63,4 +67,51 @@ struct bnxt_coredump_record {
__u8 ioctl_high_version;
__le16 rsvd3[313];
};
#define BNXT_CRASH_DUMP_LEN (8 << 20)
#define COREDUMP_LIST_BUF_LEN 2048
#define COREDUMP_RETRIEVE_BUF_LEN 4096
struct bnxt_coredump {
void *data;
int data_size;
u16 total_segs;
};
#define BNXT_COREDUMP_BUF_LEN(len) ((len) - sizeof(struct bnxt_coredump_record))
struct bnxt_hwrm_dbg_dma_info {
void *dest_buf;
int dest_buf_size;
u16 dma_len;
u16 seq_off;
u16 data_len_off;
u16 segs;
u32 seg_start;
u32 buf_len;
};
struct hwrm_dbg_cmn_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
__le64 host_dest_addr;
__le32 host_buf_len;
};
struct hwrm_dbg_cmn_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
u8 flags;
#define HWRM_DBG_CMN_FLAGS_MORE 1
};
int bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf, u32 *dump_len);
u32 bnxt_get_coredump_length(struct bnxt *bp, u16 dump_type);
#endif
......@@ -16,6 +16,18 @@
#include "bnxt_vfr.h"
#include "bnxt_devlink.h"
#include "bnxt_ethtool.h"
#include "bnxt_ulp.h"
#include "bnxt_ptp.h"
#include "bnxt_coredump.h"
static void __bnxt_fw_recover(struct bnxt *bp)
{
if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
bnxt_fw_reset(bp);
else
bnxt_fw_exception(bp);
}
static int
bnxt_dl_flash_update(struct devlink *dl,
......@@ -40,146 +52,208 @@ bnxt_dl_flash_update(struct devlink *dl,
return rc;
}
static int bnxt_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
struct devlink_fmsg *fmsg,
struct netlink_ext_ack *extack)
static int bnxt_hwrm_remote_dev_reset_set(struct bnxt *bp, bool remote_reset)
{
struct hwrm_func_cfg_input *req;
int rc;
if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
return -EOPNOTSUPP;
rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
if (rc)
return rc;
req->fid = cpu_to_le16(0xffff);
req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_HOT_RESET_IF_SUPPORT);
if (remote_reset)
req->flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_HOT_RESET_IF_EN_DIS);
return hwrm_req_send(bp, req);
}
static char *bnxt_health_severity_str(enum bnxt_health_severity severity)
{
switch (severity) {
case SEVERITY_NORMAL: return "normal";
case SEVERITY_WARNING: return "warning";
case SEVERITY_RECOVERABLE: return "recoverable";
case SEVERITY_FATAL: return "fatal";
default: return "unknown";
}
}
static char *bnxt_health_remedy_str(enum bnxt_health_remedy remedy)
{
switch (remedy) {
case REMEDY_DEVLINK_RECOVER: return "devlink recover";
case REMEDY_POWER_CYCLE_DEVICE: return "device power cycle";
case REMEDY_POWER_CYCLE_HOST: return "host power cycle";
case REMEDY_FW_UPDATE: return "update firmware";
case REMEDY_HW_REPLACE: return "replace hardware";
default: return "unknown";
}
}
static int bnxt_fw_diagnose(struct devlink_health_reporter *reporter,
struct devlink_fmsg *fmsg,
struct netlink_ext_ack *extack)
{
struct bnxt *bp = devlink_health_reporter_priv(reporter);
u32 val;
struct bnxt_fw_health *h = bp->fw_health;
u32 fw_status, fw_resets;
int rc;
if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
return 0;
return devlink_fmsg_string_pair_put(fmsg, "Status", "recovering");
val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
if (!h->status_reliable)
return devlink_fmsg_string_pair_put(fmsg, "Status", "unknown");
if (BNXT_FW_IS_BOOTING(val)) {
rc = devlink_fmsg_string_pair_put(fmsg, "Description",
"Not yet completed initialization");
mutex_lock(&h->lock);
fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
if (BNXT_FW_IS_BOOTING(fw_status)) {
rc = devlink_fmsg_string_pair_put(fmsg, "Status", "initializing");
if (rc)
return rc;
} else if (BNXT_FW_IS_ERR(val)) {
rc = devlink_fmsg_string_pair_put(fmsg, "Description",
"Encountered fatal error and cannot recover");
goto unlock;
} else if (h->severity || fw_status != BNXT_FW_STATUS_HEALTHY) {
if (!h->severity) {
h->severity = SEVERITY_FATAL;
h->remedy = REMEDY_POWER_CYCLE_DEVICE;
h->diagnoses++;
devlink_health_report(h->fw_reporter,
"FW error diagnosed", h);
}
rc = devlink_fmsg_string_pair_put(fmsg, "Status", "error");
if (rc)
return rc;
goto unlock;
rc = devlink_fmsg_u32_pair_put(fmsg, "Syndrome", fw_status);
if (rc)
goto unlock;
} else {
rc = devlink_fmsg_string_pair_put(fmsg, "Status", "healthy");
if (rc)
goto unlock;
}
if (val >> 16) {
rc = devlink_fmsg_u32_pair_put(fmsg, "Error code", val >> 16);
rc = devlink_fmsg_string_pair_put(fmsg, "Severity",
bnxt_health_severity_str(h->severity));
if (rc)
goto unlock;
if (h->severity) {
rc = devlink_fmsg_string_pair_put(fmsg, "Remedy",
bnxt_health_remedy_str(h->remedy));
if (rc)
return rc;
goto unlock;
if (h->remedy == REMEDY_DEVLINK_RECOVER) {
rc = devlink_fmsg_string_pair_put(fmsg, "Impact",
"traffic+ntuple_cfg");
if (rc)
goto unlock;
}
}
val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
rc = devlink_fmsg_u32_pair_put(fmsg, "Reset count", val);
if (rc)
unlock:
mutex_unlock(&h->lock);
if (rc || !h->resets_reliable)
return rc;
return 0;
fw_resets = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
rc = devlink_fmsg_u32_pair_put(fmsg, "Resets", fw_resets);
if (rc)
return rc;
rc = devlink_fmsg_u32_pair_put(fmsg, "Arrests", h->arrests);
if (rc)
return rc;
rc = devlink_fmsg_u32_pair_put(fmsg, "Survivals", h->survivals);
if (rc)
return rc;
rc = devlink_fmsg_u32_pair_put(fmsg, "Discoveries", h->discoveries);
if (rc)
return rc;
rc = devlink_fmsg_u32_pair_put(fmsg, "Fatalities", h->fatalities);
if (rc)
return rc;
return devlink_fmsg_u32_pair_put(fmsg, "Diagnoses", h->diagnoses);
}
static const struct devlink_health_reporter_ops bnxt_dl_fw_reporter_ops = {
.name = "fw",
.diagnose = bnxt_fw_reporter_diagnose,
};
static int bnxt_fw_reset_recover(struct devlink_health_reporter *reporter,
void *priv_ctx,
struct netlink_ext_ack *extack)
static int bnxt_fw_dump(struct devlink_health_reporter *reporter,
struct devlink_fmsg *fmsg, void *priv_ctx,
struct netlink_ext_ack *extack)
{
struct bnxt *bp = devlink_health_reporter_priv(reporter);
u32 dump_len;
void *data;
int rc;
if (!priv_ctx)
/* TODO: no firmware dump support in devlink_health_report() context */
if (priv_ctx)
return -EOPNOTSUPP;
bnxt_fw_reset(bp);
return -EINPROGRESS;
}
dump_len = bnxt_get_coredump_length(bp, BNXT_DUMP_LIVE);
if (!dump_len)
return -EIO;
static const
struct devlink_health_reporter_ops bnxt_dl_fw_reset_reporter_ops = {
.name = "fw_reset",
.recover = bnxt_fw_reset_recover,
};
data = vmalloc(dump_len);
if (!data)
return -ENOMEM;
static int bnxt_fw_fatal_recover(struct devlink_health_reporter *reporter,
void *priv_ctx,
struct netlink_ext_ack *extack)
rc = bnxt_get_coredump(bp, BNXT_DUMP_LIVE, data, &dump_len);
if (!rc) {
rc = devlink_fmsg_pair_nest_start(fmsg, "core");
if (rc)
goto exit;
rc = devlink_fmsg_binary_pair_put(fmsg, "data", data, dump_len);
if (rc)
goto exit;
rc = devlink_fmsg_u32_pair_put(fmsg, "size", dump_len);
if (rc)
goto exit;
rc = devlink_fmsg_pair_nest_end(fmsg);
}
exit:
vfree(data);
return rc;
}
static int bnxt_fw_recover(struct devlink_health_reporter *reporter,
void *priv_ctx,
struct netlink_ext_ack *extack)
{
struct bnxt *bp = devlink_health_reporter_priv(reporter);
struct bnxt_fw_reporter_ctx *fw_reporter_ctx = priv_ctx;
unsigned long event;
if (!priv_ctx)
return -EOPNOTSUPP;
if (bp->fw_health->severity == SEVERITY_FATAL)
return -ENODEV;
bp->fw_health->fatal = true;
event = fw_reporter_ctx->sp_event;
if (event == BNXT_FW_RESET_NOTIFY_SP_EVENT)
bnxt_fw_reset(bp);
else if (event == BNXT_FW_EXCEPTION_SP_EVENT)
bnxt_fw_exception(bp);
set_bit(BNXT_STATE_RECOVER, &bp->state);
__bnxt_fw_recover(bp);
return -EINPROGRESS;
}
static const
struct devlink_health_reporter_ops bnxt_dl_fw_fatal_reporter_ops = {
.name = "fw_fatal",
.recover = bnxt_fw_fatal_recover,
static const struct devlink_health_reporter_ops bnxt_dl_fw_reporter_ops = {
.name = "fw",
.diagnose = bnxt_fw_diagnose,
.dump = bnxt_fw_dump,
.recover = bnxt_fw_recover,
};
void bnxt_dl_fw_reporters_create(struct bnxt *bp)
{
struct bnxt_fw_health *health = bp->fw_health;
if (!health)
if (!health || health->fw_reporter)
return;
if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) || health->fw_reset_reporter)
goto err_recovery;
health->fw_reset_reporter =
devlink_health_reporter_create(bp->dl,
&bnxt_dl_fw_reset_reporter_ops,
health->fw_reporter =
devlink_health_reporter_create(bp->dl, &bnxt_dl_fw_reporter_ops,
0, bp);
if (IS_ERR(health->fw_reset_reporter)) {
netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n",
PTR_ERR(health->fw_reset_reporter));
health->fw_reset_reporter = NULL;
bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
}
err_recovery:
if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
return;
if (!health->fw_reporter) {
health->fw_reporter =
devlink_health_reporter_create(bp->dl,
&bnxt_dl_fw_reporter_ops,
0, bp);
if (IS_ERR(health->fw_reporter)) {
netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n",
PTR_ERR(health->fw_reporter));
health->fw_reporter = NULL;
bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
return;
}
}
if (health->fw_fatal_reporter)
return;
health->fw_fatal_reporter =
devlink_health_reporter_create(bp->dl,
&bnxt_dl_fw_fatal_reporter_ops,
0, bp);
if (IS_ERR(health->fw_fatal_reporter)) {
netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n",
PTR_ERR(health->fw_fatal_reporter));
health->fw_fatal_reporter = NULL;
if (IS_ERR(health->fw_reporter)) {
netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n",
PTR_ERR(health->fw_reporter));
health->fw_reporter = NULL;
bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
}
}
......@@ -191,12 +265,6 @@ void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all)
if (!health)
return;
if ((all || !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) &&
health->fw_reset_reporter) {
devlink_health_reporter_destroy(health->fw_reset_reporter);
health->fw_reset_reporter = NULL;
}
if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && !all)
return;
......@@ -204,82 +272,319 @@ void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all)
devlink_health_reporter_destroy(health->fw_reporter);
health->fw_reporter = NULL;
}
if (health->fw_fatal_reporter) {
devlink_health_reporter_destroy(health->fw_fatal_reporter);
health->fw_fatal_reporter = NULL;
}
}
void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event)
void bnxt_devlink_health_fw_report(struct bnxt *bp)
{
struct bnxt_fw_health *fw_health = bp->fw_health;
struct bnxt_fw_reporter_ctx fw_reporter_ctx;
fw_reporter_ctx.sp_event = event;
switch (event) {
case BNXT_FW_RESET_NOTIFY_SP_EVENT:
if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
if (!fw_health->fw_fatal_reporter)
return;
devlink_health_report(fw_health->fw_fatal_reporter,
"FW fatal async event received",
&fw_reporter_ctx);
return;
}
if (!fw_health->fw_reset_reporter)
return;
int rc;
devlink_health_report(fw_health->fw_reset_reporter,
"FW non-fatal reset event received",
&fw_reporter_ctx);
if (!fw_health)
return;
case BNXT_FW_EXCEPTION_SP_EVENT:
if (!fw_health->fw_fatal_reporter)
return;
devlink_health_report(fw_health->fw_fatal_reporter,
"FW fatal error reported",
&fw_reporter_ctx);
if (!fw_health->fw_reporter) {
__bnxt_fw_recover(bp);
return;
}
mutex_lock(&fw_health->lock);
fw_health->severity = SEVERITY_RECOVERABLE;
fw_health->remedy = REMEDY_DEVLINK_RECOVER;
mutex_unlock(&fw_health->lock);
rc = devlink_health_report(fw_health->fw_reporter, "FW error reported",
fw_health);
if (rc == -ECANCELED)
__bnxt_fw_recover(bp);
}
void bnxt_dl_health_status_update(struct bnxt *bp, bool healthy)
void bnxt_dl_health_fw_status_update(struct bnxt *bp, bool healthy)
{
struct bnxt_fw_health *health = bp->fw_health;
struct bnxt_fw_health *fw_health = bp->fw_health;
u8 state;
if (healthy)
mutex_lock(&fw_health->lock);
if (healthy) {
fw_health->severity = SEVERITY_NORMAL;
state = DEVLINK_HEALTH_REPORTER_STATE_HEALTHY;
else
} else {
fw_health->severity = SEVERITY_FATAL;
fw_health->remedy = REMEDY_POWER_CYCLE_DEVICE;
state = DEVLINK_HEALTH_REPORTER_STATE_ERROR;
if (health->fatal)
devlink_health_reporter_state_update(health->fw_fatal_reporter,
state);
else
devlink_health_reporter_state_update(health->fw_reset_reporter,
state);
health->fatal = false;
}
mutex_unlock(&fw_health->lock);
devlink_health_reporter_state_update(fw_health->fw_reporter, state);
}
void bnxt_dl_health_recovery_done(struct bnxt *bp)
void bnxt_dl_health_fw_recovery_done(struct bnxt *bp)
{
struct bnxt_fw_health *hlth = bp->fw_health;
struct bnxt_dl *dl = devlink_priv(bp->dl);
if (hlth->fatal)
devlink_health_reporter_recovery_done(hlth->fw_fatal_reporter);
else
devlink_health_reporter_recovery_done(hlth->fw_reset_reporter);
devlink_health_reporter_recovery_done(bp->fw_health->fw_reporter);
bnxt_hwrm_remote_dev_reset_set(bp, dl->remote_reset);
}
static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
struct netlink_ext_ack *extack);
static void
bnxt_dl_livepatch_report_err(struct bnxt *bp, struct netlink_ext_ack *extack,
struct hwrm_fw_livepatch_output *resp)
{
int err = ((struct hwrm_err_output *)resp)->cmd_err;
switch (err) {
case FW_LIVEPATCH_CMD_ERR_CODE_INVALID_OPCODE:
netdev_err(bp->dev, "Illegal live patch opcode");
NL_SET_ERR_MSG_MOD(extack, "Invalid opcode");
break;
case FW_LIVEPATCH_CMD_ERR_CODE_NOT_SUPPORTED:
NL_SET_ERR_MSG_MOD(extack, "Live patch operation not supported");
break;
case FW_LIVEPATCH_CMD_ERR_CODE_NOT_INSTALLED:
NL_SET_ERR_MSG_MOD(extack, "Live patch not found");
break;
case FW_LIVEPATCH_CMD_ERR_CODE_NOT_PATCHED:
NL_SET_ERR_MSG_MOD(extack,
"Live patch deactivation failed. Firmware not patched.");
break;
case FW_LIVEPATCH_CMD_ERR_CODE_AUTH_FAIL:
NL_SET_ERR_MSG_MOD(extack, "Live patch not authenticated");
break;
case FW_LIVEPATCH_CMD_ERR_CODE_INVALID_HEADER:
NL_SET_ERR_MSG_MOD(extack, "Incompatible live patch");
break;
case FW_LIVEPATCH_CMD_ERR_CODE_INVALID_SIZE:
NL_SET_ERR_MSG_MOD(extack, "Live patch has invalid size");
break;
case FW_LIVEPATCH_CMD_ERR_CODE_ALREADY_PATCHED:
NL_SET_ERR_MSG_MOD(extack, "Live patch already applied");
break;
default:
netdev_err(bp->dev, "Unexpected live patch error: %hhd\n", err);
NL_SET_ERR_MSG_MOD(extack, "Failed to activate live patch");
break;
}
}
static int
bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
{
struct hwrm_fw_livepatch_query_output *query_resp;
struct hwrm_fw_livepatch_query_input *query_req;
struct hwrm_fw_livepatch_output *patch_resp;
struct hwrm_fw_livepatch_input *patch_req;
u32 installed = 0;
u16 flags;
u8 target;
int rc;
if (~bp->fw_cap & BNXT_FW_CAP_LIVEPATCH) {
NL_SET_ERR_MSG_MOD(extack, "Device does not support live patch");
return -EOPNOTSUPP;
}
rc = hwrm_req_init(bp, query_req, HWRM_FW_LIVEPATCH_QUERY);
if (rc)
return rc;
query_resp = hwrm_req_hold(bp, query_req);
rc = hwrm_req_init(bp, patch_req, HWRM_FW_LIVEPATCH);
if (rc) {
hwrm_req_drop(bp, query_req);
return rc;
}
patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_ACTIVATE;
patch_req->loadtype = FW_LIVEPATCH_REQ_LOADTYPE_NVM_INSTALL;
patch_resp = hwrm_req_hold(bp, patch_req);
for (target = 1; target <= FW_LIVEPATCH_REQ_FW_TARGET_LAST; target++) {
query_req->fw_target = target;
rc = hwrm_req_send(bp, query_req);
if (rc) {
NL_SET_ERR_MSG_MOD(extack, "Failed to query packages");
break;
}
flags = le16_to_cpu(query_resp->status_flags);
if (~flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL)
continue;
if ((flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE) &&
!strncmp(query_resp->active_ver, query_resp->install_ver,
sizeof(query_resp->active_ver)))
continue;
patch_req->fw_target = target;
rc = hwrm_req_send(bp, patch_req);
if (rc) {
bnxt_dl_livepatch_report_err(bp, extack, patch_resp);
break;
}
installed++;
}
if (!rc && !installed) {
NL_SET_ERR_MSG_MOD(extack, "No live patches found");
rc = -ENOENT;
}
hwrm_req_drop(bp, query_req);
hwrm_req_drop(bp, patch_req);
return rc;
}
static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
enum devlink_reload_action action,
enum devlink_reload_limit limit,
struct netlink_ext_ack *extack)
{
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
int rc = 0;
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: {
if (BNXT_PF(bp) && bp->pf.active_vfs) {
NL_SET_ERR_MSG_MOD(extack,
"reload is unsupported when VFs are allocated\n");
return -EOPNOTSUPP;
}
rtnl_lock();
if (bp->dev->reg_state == NETREG_UNREGISTERED) {
rtnl_unlock();
return -ENODEV;
}
bnxt_ulp_stop(bp);
if (netif_running(bp->dev)) {
rc = bnxt_close_nic(bp, true, true);
if (rc) {
NL_SET_ERR_MSG_MOD(extack, "Failed to close");
dev_close(bp->dev);
rtnl_unlock();
break;
}
}
bnxt_vf_reps_free(bp);
rc = bnxt_hwrm_func_drv_unrgtr(bp);
if (rc) {
NL_SET_ERR_MSG_MOD(extack, "Failed to deregister");
if (netif_running(bp->dev))
dev_close(bp->dev);
rtnl_unlock();
break;
}
bnxt_cancel_reservations(bp, false);
bnxt_free_ctx_mem(bp);
kfree(bp->ctx);
bp->ctx = NULL;
break;
}
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: {
if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET)
return bnxt_dl_livepatch_activate(bp, extack);
if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET) {
NL_SET_ERR_MSG_MOD(extack, "Device not capable, requires reboot");
return -EOPNOTSUPP;
}
if (!bnxt_hwrm_reset_permitted(bp)) {
NL_SET_ERR_MSG_MOD(extack,
"Reset denied by firmware, it may be inhibited by remote driver");
return -EPERM;
}
rtnl_lock();
if (bp->dev->reg_state == NETREG_UNREGISTERED) {
rtnl_unlock();
return -ENODEV;
}
if (netif_running(bp->dev))
set_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
rc = bnxt_hwrm_firmware_reset(bp->dev,
FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP,
FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP,
FW_RESET_REQ_FLAGS_RESET_GRACEFUL |
FW_RESET_REQ_FLAGS_FW_ACTIVATION);
if (rc) {
NL_SET_ERR_MSG_MOD(extack, "Failed to activate firmware");
clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
rtnl_unlock();
}
break;
}
default:
rc = -EOPNOTSUPP;
}
return rc;
}
static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action action,
enum devlink_reload_limit limit, u32 *actions_performed,
struct netlink_ext_ack *extack)
{
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
int rc = 0;
*actions_performed = 0;
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: {
bnxt_fw_init_one(bp);
bnxt_vf_reps_alloc(bp);
if (netif_running(bp->dev))
rc = bnxt_open_nic(bp, true, true);
bnxt_ulp_start(bp, rc);
if (!rc) {
bnxt_reenable_sriov(bp);
bnxt_ptp_reapply_pps(bp);
}
break;
}
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: {
unsigned long start = jiffies;
unsigned long timeout = start + BNXT_DFLT_FW_RST_MAX_DSECS * HZ / 10;
if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET)
break;
if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
timeout = start + bp->fw_health->normal_func_wait_dsecs * HZ / 10;
if (!netif_running(bp->dev))
NL_SET_ERR_MSG_MOD(extack,
"Device is closed, not waiting for reset notice that will never come");
rtnl_unlock();
while (test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state)) {
if (time_after(jiffies, timeout)) {
NL_SET_ERR_MSG_MOD(extack, "Activation incomplete");
rc = -ETIMEDOUT;
break;
}
if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
NL_SET_ERR_MSG_MOD(extack, "Activation aborted");
rc = -ENODEV;
break;
}
msleep(50);
}
rtnl_lock();
if (!rc)
*actions_performed |= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
break;
}
default:
return -EOPNOTSUPP;
}
if (!rc) {
bnxt_print_device_info(bp);
if (netif_running(bp->dev)) {
mutex_lock(&bp->link_lock);
bnxt_report_link(bp);
mutex_unlock(&bp->link_lock);
}
*actions_performed |= BIT(action);
} else if (netif_running(bp->dev)) {
dev_close(bp->dev);
}
rtnl_unlock();
return rc;
}
static const struct devlink_ops bnxt_dl_ops = {
#ifdef CONFIG_BNXT_SRIOV
.eswitch_mode_set = bnxt_dl_eswitch_mode_set,
......@@ -287,6 +592,11 @@ static const struct devlink_ops bnxt_dl_ops = {
#endif /* CONFIG_BNXT_SRIOV */
.info_get = bnxt_dl_info_get,
.flash_update = bnxt_dl_flash_update,
.reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
.reload_limits = BIT(DEVLINK_RELOAD_LIMIT_NO_RESET),
.reload_down = bnxt_dl_reload_down,
.reload_up = bnxt_dl_reload_up,
};
static const struct devlink_ops bnxt_vf_dl_ops;
......@@ -430,6 +740,57 @@ static int bnxt_dl_info_put(struct bnxt *bp, struct devlink_info_req *req,
return 0;
}
#define BNXT_FW_SRT_PATCH "fw.srt.patch"
#define BNXT_FW_CRT_PATCH "fw.crt.patch"
static int bnxt_dl_livepatch_info_put(struct bnxt *bp,
struct devlink_info_req *req,
const char *key)
{
struct hwrm_fw_livepatch_query_input *query;
struct hwrm_fw_livepatch_query_output *resp;
u16 flags;
int rc;
if (~bp->fw_cap & BNXT_FW_CAP_LIVEPATCH)
return 0;
rc = hwrm_req_init(bp, query, HWRM_FW_LIVEPATCH_QUERY);
if (rc)
return rc;
if (!strcmp(key, BNXT_FW_SRT_PATCH))
query->fw_target = FW_LIVEPATCH_QUERY_REQ_FW_TARGET_SECURE_FW;
else if (!strcmp(key, BNXT_FW_CRT_PATCH))
query->fw_target = FW_LIVEPATCH_QUERY_REQ_FW_TARGET_COMMON_FW;
else
goto exit;
resp = hwrm_req_hold(bp, query);
rc = hwrm_req_send(bp, query);
if (rc)
goto exit;
flags = le16_to_cpu(resp->status_flags);
if (flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE) {
resp->active_ver[sizeof(resp->active_ver) - 1] = '\0';
rc = devlink_info_version_running_put(req, key, resp->active_ver);
if (rc)
goto exit;
}
if (flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL) {
resp->install_ver[sizeof(resp->install_ver) - 1] = '\0';
rc = devlink_info_version_stored_put(req, key, resp->install_ver);
if (rc)
goto exit;
}
exit:
hwrm_req_drop(bp, query);
return rc;
}
#define HWRM_FW_VER_STR_LEN 16
static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
......@@ -554,8 +915,13 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
rc = bnxt_hwrm_nvm_get_dev_info(bp, &nvm_dev_info);
if (rc ||
!(nvm_dev_info.flags & NVM_GET_DEV_INFO_RESP_FLAGS_FW_VER_VALID))
!(nvm_dev_info.flags & NVM_GET_DEV_INFO_RESP_FLAGS_FW_VER_VALID)) {
if (!bnxt_get_pkginfo(bp->dev, buf, sizeof(buf)))
return bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED,
DEVLINK_INFO_VERSION_GENERIC_FW,
buf);
return 0;
}
buf[0] = 0;
strncat(buf, nvm_dev_info.pkg_name, HWRM_FW_VER_STR_LEN);
......@@ -583,8 +949,16 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
nvm_dev_info.roce_fw_major, nvm_dev_info.roce_fw_minor,
nvm_dev_info.roce_fw_build, nvm_dev_info.roce_fw_patch);
return bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED,
DEVLINK_INFO_VERSION_GENERIC_FW_ROCE, roce_ver);
rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED,
DEVLINK_INFO_VERSION_GENERIC_FW_ROCE, roce_ver);
if (rc)
return rc;
rc = bnxt_dl_livepatch_info_put(bp, req, BNXT_FW_SRT_PATCH);
if (rc)
return rc;
return bnxt_dl_livepatch_info_put(bp, req, BNXT_FW_CRT_PATCH);
}
static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
......@@ -712,6 +1086,32 @@ static int bnxt_dl_msix_validate(struct devlink *dl, u32 id,
return 0;
}
static int bnxt_remote_dev_reset_get(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
{
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
return -EOPNOTSUPP;
ctx->val.vbool = bnxt_dl_get_remote_reset(dl);
return 0;
}
static int bnxt_remote_dev_reset_set(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
{
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
int rc;
rc = bnxt_hwrm_remote_dev_reset_set(bp, ctx->val.vbool);
if (rc)
return rc;
bnxt_dl_set_remote_reset(dl, ctx->val.vbool);
return rc;
}
static const struct devlink_param bnxt_dl_params[] = {
DEVLINK_PARAM_GENERIC(ENABLE_SRIOV,
BIT(DEVLINK_PARAM_CMODE_PERMANENT),
......@@ -734,17 +1134,25 @@ static const struct devlink_param bnxt_dl_params[] = {
BIT(DEVLINK_PARAM_CMODE_PERMANENT),
bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
NULL),
/* keep REMOTE_DEV_RESET last, it is excluded based on caps */
DEVLINK_PARAM_GENERIC(ENABLE_REMOTE_DEV_RESET,
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
bnxt_remote_dev_reset_get,
bnxt_remote_dev_reset_set, NULL),
};
static int bnxt_dl_params_register(struct bnxt *bp)
{
int num_params = ARRAY_SIZE(bnxt_dl_params);
int rc;
if (bp->hwrm_spec_code < 0x10600)
return 0;
rc = devlink_params_register(bp->dl, bnxt_dl_params,
ARRAY_SIZE(bnxt_dl_params));
if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
num_params--;
rc = devlink_params_register(bp->dl, bnxt_dl_params, num_params);
if (rc)
netdev_warn(bp->dev, "devlink_params_register failed. rc=%d\n",
rc);
......@@ -753,11 +1161,15 @@ static int bnxt_dl_params_register(struct bnxt *bp)
static void bnxt_dl_params_unregister(struct bnxt *bp)
{
int num_params = ARRAY_SIZE(bnxt_dl_params);
if (bp->hwrm_spec_code < 0x10600)
return;
devlink_params_unregister(bp->dl, bnxt_dl_params,
ARRAY_SIZE(bnxt_dl_params));
if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
num_params--;
devlink_params_unregister(bp->dl, bnxt_dl_params, num_params);
}
int bnxt_dl_register(struct bnxt *bp)
......@@ -782,6 +1194,7 @@ int bnxt_dl_register(struct bnxt *bp)
bp->dl = dl;
bp_dl = devlink_priv(dl);
bp_dl->bp = bp;
bnxt_dl_set_remote_reset(dl, true);
/* Add switchdev eswitch mode setting, if SRIOV supported */
if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV) &&
......
......@@ -13,6 +13,7 @@
/* Struct to hold housekeeping info needed by devlink interface */
struct bnxt_dl {
struct bnxt *bp; /* back ptr to the controlling dev */
bool remote_reset;
};
static inline struct bnxt *bnxt_get_bp_from_dl(struct devlink *dl)
......@@ -20,6 +21,23 @@ static inline struct bnxt *bnxt_get_bp_from_dl(struct devlink *dl)
return ((struct bnxt_dl *)devlink_priv(dl))->bp;
}
static inline void bnxt_dl_remote_reload(struct bnxt *bp)
{
devlink_remote_reload_actions_performed(bp->dl, 0,
BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE));
}
static inline bool bnxt_dl_get_remote_reset(struct devlink *dl)
{
return ((struct bnxt_dl *)devlink_priv(dl))->remote_reset;
}
static inline void bnxt_dl_set_remote_reset(struct devlink *dl, bool value)
{
((struct bnxt_dl *)devlink_priv(dl))->remote_reset = value;
}
#define NVM_OFF_MSIX_VEC_PER_PF_MAX 108
#define NVM_OFF_MSIX_VEC_PER_PF_MIN 114
#define NVM_OFF_IGNORE_ARI 164
......@@ -53,9 +71,9 @@ enum bnxt_dl_version_type {
BNXT_VERSION_STORED,
};
void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event);
void bnxt_dl_health_status_update(struct bnxt *bp, bool healthy);
void bnxt_dl_health_recovery_done(struct bnxt *bp);
void bnxt_devlink_health_fw_report(struct bnxt *bp);
void bnxt_dl_health_fw_status_update(struct bnxt *bp, bool healthy);
void bnxt_dl_health_fw_recovery_done(struct bnxt *bp);
void bnxt_dl_fw_reporters_create(struct bnxt *bp);
void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all);
int bnxt_dl_register(struct bnxt *bp);
......
......@@ -427,6 +427,8 @@ static const struct {
BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err),
BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits),
BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES,
BNXT_RX_STATS_EXT_ENTRY(rx_fec_corrected_blocks),
BNXT_RX_STATS_EXT_ENTRY(rx_fec_uncorrectable_blocks),
};
static const struct {
......@@ -2180,13 +2182,18 @@ static int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
return rc;
}
static int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type,
u8 self_reset, u8 flags)
int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type,
u8 self_reset, u8 flags)
{
struct bnxt *bp = netdev_priv(dev);
struct hwrm_fw_reset_input *req;
int rc;
if (!bnxt_hwrm_reset_permitted(bp)) {
netdev_warn(bp->dev, "Reset denied by firmware, it may be inhibited by remote driver");
return -EPERM;
}
rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
if (rc)
return rc;
......@@ -2825,39 +2832,56 @@ static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen)
return retval;
}
static void bnxt_get_pkgver(struct net_device *dev)
int bnxt_get_pkginfo(struct net_device *dev, char *ver, int size)
{
struct bnxt *bp = netdev_priv(dev);
u16 index = 0;
char *pkgver;
u32 pkglen;
u8 *pkgbuf;
int len;
int rc;
if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG,
BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
&index, NULL, &pkglen) != 0)
return;
rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG,
BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
&index, NULL, &pkglen);
if (rc)
return rc;
pkgbuf = kzalloc(pkglen, GFP_KERNEL);
if (!pkgbuf) {
dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n",
pkglen);
return;
return -ENOMEM;
}
if (bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf))
rc = bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf);
if (rc)
goto err;
pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf,
pkglen);
if (pkgver && *pkgver != 0 && isdigit(*pkgver)) {
if (pkgver && *pkgver != 0 && isdigit(*pkgver))
strscpy(ver, pkgver, size);
else
rc = -ENOENT;
err:
kfree(pkgbuf);
return rc;
}
static void bnxt_get_pkgver(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
char buf[FW_VER_STR_LEN];
int len;
if (!bnxt_get_pkginfo(dev, buf, sizeof(buf))) {
len = strlen(bp->fw_ver_str);
snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1,
"/pkg %s", pkgver);
"/pkg %s", buf);
}
err:
kfree(pkgbuf);
}
static int bnxt_get_eeprom(struct net_device *dev,
......@@ -3609,337 +3633,6 @@ static int bnxt_reset(struct net_device *dev, u32 *flags)
return 0;
}
static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg,
struct bnxt_hwrm_dbg_dma_info *info)
{
struct hwrm_dbg_cmn_input *cmn_req = msg;
__le16 *seq_ptr = msg + info->seq_off;
struct hwrm_dbg_cmn_output *cmn_resp;
u16 seq = 0, len, segs_off;
dma_addr_t dma_handle;
void *dma_buf, *resp;
int rc, off = 0;
dma_buf = hwrm_req_dma_slice(bp, msg, info->dma_len, &dma_handle);
if (!dma_buf) {
hwrm_req_drop(bp, msg);
return -ENOMEM;
}
hwrm_req_timeout(bp, msg, HWRM_COREDUMP_TIMEOUT);
cmn_resp = hwrm_req_hold(bp, msg);
resp = cmn_resp;
segs_off = offsetof(struct hwrm_dbg_coredump_list_output,
total_segments);
cmn_req->host_dest_addr = cpu_to_le64(dma_handle);
cmn_req->host_buf_len = cpu_to_le32(info->dma_len);
while (1) {
*seq_ptr = cpu_to_le16(seq);
rc = hwrm_req_send(bp, msg);
if (rc)
break;
len = le16_to_cpu(*((__le16 *)(resp + info->data_len_off)));
if (!seq &&
cmn_req->req_type == cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) {
info->segs = le16_to_cpu(*((__le16 *)(resp +
segs_off)));
if (!info->segs) {
rc = -EIO;
break;
}
info->dest_buf_size = info->segs *
sizeof(struct coredump_segment_record);
info->dest_buf = kmalloc(info->dest_buf_size,
GFP_KERNEL);
if (!info->dest_buf) {
rc = -ENOMEM;
break;
}
}
if (info->dest_buf) {
if ((info->seg_start + off + len) <=
BNXT_COREDUMP_BUF_LEN(info->buf_len)) {
memcpy(info->dest_buf + off, dma_buf, len);
} else {
rc = -ENOBUFS;
break;
}
}
if (cmn_req->req_type ==
cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
info->dest_buf_size += len;
if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE))
break;
seq++;
off += len;
}
hwrm_req_drop(bp, msg);
return rc;
}
static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp,
struct bnxt_coredump *coredump)
{
struct bnxt_hwrm_dbg_dma_info info = {NULL};
struct hwrm_dbg_coredump_list_input *req;
int rc;
rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_LIST);
if (rc)
return rc;
info.dma_len = COREDUMP_LIST_BUF_LEN;
info.seq_off = offsetof(struct hwrm_dbg_coredump_list_input, seq_no);
info.data_len_off = offsetof(struct hwrm_dbg_coredump_list_output,
data_len);
rc = bnxt_hwrm_dbg_dma_data(bp, req, &info);
if (!rc) {
coredump->data = info.dest_buf;
coredump->data_size = info.dest_buf_size;
coredump->total_segs = info.segs;
}
return rc;
}
static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
u16 segment_id)
{
struct hwrm_dbg_coredump_initiate_input *req;
int rc;
rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_INITIATE);
if (rc)
return rc;
hwrm_req_timeout(bp, req, HWRM_COREDUMP_TIMEOUT);
req->component_id = cpu_to_le16(component_id);
req->segment_id = cpu_to_le16(segment_id);
return hwrm_req_send(bp, req);
}
static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
u16 segment_id, u32 *seg_len,
void *buf, u32 buf_len, u32 offset)
{
struct hwrm_dbg_coredump_retrieve_input *req;
struct bnxt_hwrm_dbg_dma_info info = {NULL};
int rc;
rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_RETRIEVE);
if (rc)
return rc;
req->component_id = cpu_to_le16(component_id);
req->segment_id = cpu_to_le16(segment_id);
info.dma_len = COREDUMP_RETRIEVE_BUF_LEN;
info.seq_off = offsetof(struct hwrm_dbg_coredump_retrieve_input,
seq_no);
info.data_len_off = offsetof(struct hwrm_dbg_coredump_retrieve_output,
data_len);
if (buf) {
info.dest_buf = buf + offset;
info.buf_len = buf_len;
info.seg_start = offset;
}
rc = bnxt_hwrm_dbg_dma_data(bp, req, &info);
if (!rc)
*seg_len = info.dest_buf_size;
return rc;
}
static void
bnxt_fill_coredump_seg_hdr(struct bnxt *bp,
struct bnxt_coredump_segment_hdr *seg_hdr,
struct coredump_segment_record *seg_rec, u32 seg_len,
int status, u32 duration, u32 instance)
{
memset(seg_hdr, 0, sizeof(*seg_hdr));
memcpy(seg_hdr->signature, "sEgM", 4);
if (seg_rec) {
seg_hdr->component_id = (__force __le32)seg_rec->component_id;
seg_hdr->segment_id = (__force __le32)seg_rec->segment_id;
seg_hdr->low_version = seg_rec->version_low;
seg_hdr->high_version = seg_rec->version_hi;
} else {
/* For hwrm_ver_get response Component id = 2
* and Segment id = 0
*/
seg_hdr->component_id = cpu_to_le32(2);
seg_hdr->segment_id = 0;
}
seg_hdr->function_id = cpu_to_le16(bp->pdev->devfn);
seg_hdr->length = cpu_to_le32(seg_len);
seg_hdr->status = cpu_to_le32(status);
seg_hdr->duration = cpu_to_le32(duration);
seg_hdr->data_offset = cpu_to_le32(sizeof(*seg_hdr));
seg_hdr->instance = cpu_to_le32(instance);
}
static void
bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record,
time64_t start, s16 start_utc, u16 total_segs,
int status)
{
time64_t end = ktime_get_real_seconds();
u32 os_ver_major = 0, os_ver_minor = 0;
struct tm tm;
time64_to_tm(start, 0, &tm);
memset(record, 0, sizeof(*record));
memcpy(record->signature, "cOrE", 4);
record->flags = 0;
record->low_version = 0;
record->high_version = 1;
record->asic_state = 0;
strlcpy(record->system_name, utsname()->nodename,
sizeof(record->system_name));
record->year = cpu_to_le16(tm.tm_year + 1900);
record->month = cpu_to_le16(tm.tm_mon + 1);
record->day = cpu_to_le16(tm.tm_mday);
record->hour = cpu_to_le16(tm.tm_hour);
record->minute = cpu_to_le16(tm.tm_min);
record->second = cpu_to_le16(tm.tm_sec);
record->utc_bias = cpu_to_le16(start_utc);
strcpy(record->commandline, "ethtool -w");
record->total_segments = cpu_to_le32(total_segs);
sscanf(utsname()->release, "%u.%u", &os_ver_major, &os_ver_minor);
record->os_ver_major = cpu_to_le32(os_ver_major);
record->os_ver_minor = cpu_to_le32(os_ver_minor);
strlcpy(record->os_name, utsname()->sysname, 32);
time64_to_tm(end, 0, &tm);
record->end_year = cpu_to_le16(tm.tm_year + 1900);
record->end_month = cpu_to_le16(tm.tm_mon + 1);
record->end_day = cpu_to_le16(tm.tm_mday);
record->end_hour = cpu_to_le16(tm.tm_hour);
record->end_minute = cpu_to_le16(tm.tm_min);
record->end_second = cpu_to_le16(tm.tm_sec);
record->end_utc_bias = cpu_to_le16(sys_tz.tz_minuteswest * 60);
record->asic_id1 = cpu_to_le32(bp->chip_num << 16 |
bp->ver_resp.chip_rev << 8 |
bp->ver_resp.chip_metal);
record->asic_id2 = 0;
record->coredump_status = cpu_to_le32(status);
record->ioctl_low_version = 0;
record->ioctl_high_version = 0;
}
static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
{
u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output);
u32 offset = 0, seg_hdr_len, seg_record_len, buf_len = 0;
struct coredump_segment_record *seg_record = NULL;
struct bnxt_coredump_segment_hdr seg_hdr;
struct bnxt_coredump coredump = {NULL};
time64_t start_time;
u16 start_utc;
int rc = 0, i;
if (buf)
buf_len = *dump_len;
start_time = ktime_get_real_seconds();
start_utc = sys_tz.tz_minuteswest * 60;
seg_hdr_len = sizeof(seg_hdr);
/* First segment should be hwrm_ver_get response */
*dump_len = seg_hdr_len + ver_get_resp_len;
if (buf) {
bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, ver_get_resp_len,
0, 0, 0);
memcpy(buf + offset, &seg_hdr, seg_hdr_len);
offset += seg_hdr_len;
memcpy(buf + offset, &bp->ver_resp, ver_get_resp_len);
offset += ver_get_resp_len;
}
rc = bnxt_hwrm_dbg_coredump_list(bp, &coredump);
if (rc) {
netdev_err(bp->dev, "Failed to get coredump segment list\n");
goto err;
}
*dump_len += seg_hdr_len * coredump.total_segs;
seg_record = (struct coredump_segment_record *)coredump.data;
seg_record_len = sizeof(*seg_record);
for (i = 0; i < coredump.total_segs; i++) {
u16 comp_id = le16_to_cpu(seg_record->component_id);
u16 seg_id = le16_to_cpu(seg_record->segment_id);
u32 duration = 0, seg_len = 0;
unsigned long start, end;
if (buf && ((offset + seg_hdr_len) >
BNXT_COREDUMP_BUF_LEN(buf_len))) {
rc = -ENOBUFS;
goto err;
}
start = jiffies;
rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id);
if (rc) {
netdev_err(bp->dev,
"Failed to initiate coredump for seg = %d\n",
seg_record->segment_id);
goto next_seg;
}
/* Write segment data into the buffer */
rc = bnxt_hwrm_dbg_coredump_retrieve(bp, comp_id, seg_id,
&seg_len, buf, buf_len,
offset + seg_hdr_len);
if (rc && rc == -ENOBUFS)
goto err;
else if (rc)
netdev_err(bp->dev,
"Failed to retrieve coredump for seg = %d\n",
seg_record->segment_id);
next_seg:
end = jiffies;
duration = jiffies_to_msecs(end - start);
bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, seg_record, seg_len,
rc, duration, 0);
if (buf) {
/* Write segment header into the buffer */
memcpy(buf + offset, &seg_hdr, seg_hdr_len);
offset += seg_hdr_len + seg_len;
}
*dump_len += seg_len;
seg_record =
(struct coredump_segment_record *)((u8 *)seg_record +
seg_record_len);
}
err:
if (buf)
bnxt_fill_coredump_record(bp, buf + offset, start_time,
start_utc, coredump.total_segs + 1,
rc);
kfree(coredump.data);
*dump_len += sizeof(struct bnxt_coredump_record);
if (rc == -ENOBUFS)
netdev_err(bp->dev, "Firmware returned large coredump buffer\n");
return rc;
}
static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump)
{
struct bnxt *bp = netdev_priv(dev);
......@@ -3971,10 +3664,7 @@ static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump)
bp->ver_resp.hwrm_fw_rsvd_8b;
dump->flag = bp->dump_flag;
if (bp->dump_flag == BNXT_DUMP_CRASH)
dump->len = BNXT_CRASH_DUMP_LEN;
else
bnxt_get_coredump(bp, NULL, &dump->len);
dump->len = bnxt_get_coredump_length(bp, bp->dump_flag);
return 0;
}
......@@ -3989,15 +3679,7 @@ static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump,
memset(buf, 0, dump->len);
dump->flag = bp->dump_flag;
if (dump->flag == BNXT_DUMP_CRASH) {
#ifdef CONFIG_TEE_BNXT_FW
return tee_bnxt_copy_coredump(buf, 0, dump->len);
#endif
} else {
return bnxt_get_coredump(bp, buf, &dump->len);
}
return 0;
return bnxt_get_coredump(bp, dump->flag, buf, &dump->len);
}
static int bnxt_get_ts_info(struct net_device *dev,
......
......@@ -22,49 +22,6 @@ struct bnxt_led_cfg {
u8 rsvd;
};
#define COREDUMP_LIST_BUF_LEN 2048
#define COREDUMP_RETRIEVE_BUF_LEN 4096
struct bnxt_coredump {
void *data;
int data_size;
u16 total_segs;
};
#define BNXT_COREDUMP_BUF_LEN(len) ((len) - sizeof(struct bnxt_coredump_record))
struct bnxt_hwrm_dbg_dma_info {
void *dest_buf;
int dest_buf_size;
u16 dma_len;
u16 seq_off;
u16 data_len_off;
u16 segs;
u32 seg_start;
u32 buf_len;
};
struct hwrm_dbg_cmn_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
__le64 host_dest_addr;
__le32 host_buf_len;
};
struct hwrm_dbg_cmn_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
u8 flags;
#define HWRM_DBG_CMN_FLAGS_MORE 1
};
#define BNXT_CRASH_DUMP_LEN (8 << 20)
#define BNXT_LED_DFLT_ENA \
(PORT_LED_CFG_REQ_ENABLES_LED0_ID | \
PORT_LED_CFG_REQ_ENABLES_LED0_STATE | \
......@@ -94,8 +51,11 @@ u32 bnxt_fw_to_ethtool_speed(u16);
u16 bnxt_get_fw_auto_link_speeds(u32);
int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
struct hwrm_nvm_get_dev_info_output *nvm_dev_info);
int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type,
u8 self_reset, u8 flags);
int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw,
u32 install_type);
int bnxt_get_pkginfo(struct net_device *dev, char *ver, int size);
void bnxt_ethtool_init(struct bnxt *bp);
void bnxt_ethtool_free(struct bnxt *bp);
......
......@@ -532,8 +532,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 2
#define HWRM_VERSION_RSVD 52
#define HWRM_VERSION_STR "1.10.2.52"
#define HWRM_VERSION_RSVD 63
#define HWRM_VERSION_STR "1.10.2.63"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
......@@ -1587,6 +1587,8 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED 0x200000UL
#define FUNC_QCAPS_RESP_FLAGS_EXT_KTLS_SUPPORTED 0x400000UL
#define FUNC_QCAPS_RESP_FLAGS_EXT_EP_RATE_CONTROL 0x800000UL
#define FUNC_QCAPS_RESP_FLAGS_EXT_MIN_BW_SUPPORTED 0x1000000UL
#define FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP 0x2000000UL
u8 max_schqs;
u8 mpc_chnls_cap;
#define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TCE 0x1UL
......@@ -1956,6 +1958,18 @@ struct hwrm_func_cfg_output {
u8 valid;
};
/* hwrm_func_cfg_cmd_err (size:64b/8B) */
struct hwrm_func_cfg_cmd_err {
u8 code;
#define FUNC_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
#define FUNC_CFG_CMD_ERR_CODE_PARTITION_MIN_BW_RANGE 0x1UL
#define FUNC_CFG_CMD_ERR_CODE_PARTITION_MIN_MORE_THAN_MAX 0x2UL
#define FUNC_CFG_CMD_ERR_CODE_PARTITION_MIN_BW_UNSUPPORTED 0x3UL
#define FUNC_CFG_CMD_ERR_CODE_PARTITION_BW_PERCENT 0x4UL
#define FUNC_CFG_CMD_ERR_CODE_LAST FUNC_CFG_CMD_ERR_CODE_PARTITION_BW_PERCENT
u8 unused_0[7];
};
/* hwrm_func_qstats_input (size:192b/24B) */
struct hwrm_func_qstats_input {
__le16 req_type;
......@@ -3601,7 +3615,15 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR4 0x1dUL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR4 0x1eUL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4 0x1fUL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4
#define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASECR 0x20UL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASESR 0x21UL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASELR 0x22UL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASEER 0x23UL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR2 0x24UL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR2 0x25UL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR2 0x26UL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2 0x27UL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2
u8 media_type;
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
......@@ -4040,7 +4062,7 @@ struct tx_port_stats_ext {
__le64 pfc_pri7_tx_transitions;
};
/* rx_port_stats_ext (size:3648b/456B) */
/* rx_port_stats_ext (size:3776b/472B) */
struct rx_port_stats_ext {
__le64 link_down_events;
__le64 continuous_pause_events;
......@@ -4099,6 +4121,8 @@ struct rx_port_stats_ext {
__le64 rx_discard_packets_cos5;
__le64 rx_discard_packets_cos6;
__le64 rx_discard_packets_cos7;
__le64 rx_fec_corrected_blocks;
__le64 rx_fec_uncorrectable_blocks;
};
/* hwrm_port_qstats_ext_input (size:320b/40B) */
......@@ -4372,7 +4396,10 @@ struct hwrm_port_phy_qcaps_output {
#define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_50G 0x1UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_100G 0x2UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_200G 0x4UL
u8 unused_0[3];
__le16 flags2;
#define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL
#define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL
u8 unused_0[1];
u8 valid;
};
......@@ -6076,6 +6103,11 @@ struct hwrm_vnic_qcaps_output {
#define VNIC_QCAPS_RESP_FLAGS_VIRTIO_NET_VNIC_ALLOC_CAP 0x800UL
#define VNIC_QCAPS_RESP_FLAGS_METADATA_FORMAT_CAP 0x1000UL
#define VNIC_QCAPS_RESP_FLAGS_RSS_STRICT_HASH_TYPE_CAP 0x2000UL
#define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP 0x4000UL
#define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_FUNCTION_TOEPLITZ_CAP 0x8000UL
#define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_FUNCTION_XOR_CAP 0x10000UL
#define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_FUNCTION_CHKSM_CAP 0x20000UL
#define VNIC_QCAPS_RESP_FLAGS_RSS_IPV6_FLOW_LABEL_CAP 0x40000UL
__le16 max_aggs_supported;
u8 unused_1[5];
u8 valid;
......@@ -6206,7 +6238,15 @@ struct hwrm_vnic_rss_cfg_input {
__le64 ring_grp_tbl_addr;
__le64 hash_key_tbl_addr;
__le16 rss_ctx_idx;
u8 unused_1[6];
u8 flags;
#define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE 0x1UL
#define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE 0x2UL
u8 rss_hash_function;
#define VNIC_RSS_CFG_REQ_RSS_HASH_FUNCTION_TOEPLITZ 0x0UL
#define VNIC_RSS_CFG_REQ_RSS_HASH_FUNCTION_XOR 0x1UL
#define VNIC_RSS_CFG_REQ_RSS_HASH_FUNCTION_CHECKSUM 0x2UL
#define VNIC_RSS_CFG_REQ_RSS_HASH_FUNCTION_LAST VNIC_RSS_CFG_REQ_RSS_HASH_FUNCTION_CHECKSUM
u8 unused_1[4];
};
/* hwrm_vnic_rss_cfg_output (size:128b/16B) */
......@@ -6331,7 +6371,24 @@ struct hwrm_ring_alloc_input {
#define RING_ALLOC_REQ_RING_TYPE_RX_AGG 0x4UL
#define RING_ALLOC_REQ_RING_TYPE_NQ 0x5UL
#define RING_ALLOC_REQ_RING_TYPE_LAST RING_ALLOC_REQ_RING_TYPE_NQ
u8 unused_0;
u8 cmpl_coal_cnt;
#define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_OFF 0x0UL
#define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_4 0x1UL
#define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_8 0x2UL
#define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_12 0x3UL
#define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_16 0x4UL
#define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_24 0x5UL
#define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_32 0x6UL
#define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_48 0x7UL
#define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64 0x8UL
#define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_96 0x9UL
#define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_128 0xaUL
#define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_192 0xbUL
#define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_256 0xcUL
#define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_320 0xdUL
#define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_384 0xeUL
#define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_MAX 0xfUL
#define RING_ALLOC_REQ_CMPL_COAL_CNT_LAST RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_MAX
__le16 flags;
#define RING_ALLOC_REQ_FLAGS_RX_SOP_PAD 0x1UL
__le64 page_tbl_addr;
......@@ -7099,6 +7156,7 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
#define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_FID 0x8UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_ARP_REPLY 0x10UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX 0x20UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_NO_L2_CONTEXT 0x40UL
__le32 enables;
#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL
......@@ -7234,6 +7292,7 @@ struct hwrm_cfa_ntuple_filter_cfg_input {
__le32 flags;
#define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_FID 0x1UL
#define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_RFS_RING_IDX 0x2UL
#define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_NO_L2_CONTEXT 0x4UL
__le64 ntuple_filter_id;
__le32 new_dst_id;
__le32 new_mirror_vnic_id;
......@@ -7834,11 +7893,11 @@ struct hwrm_cfa_adv_flow_mgnt_qcaps_output {
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TRUFLOW_CAPABLE 0x8000UL
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_FILTER_TRAFFIC_TYPE_L2_ROCE_SUPPORTED 0x10000UL
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_LAG_SUPPORTED 0x20000UL
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_NO_L2CTX_SUPPORTED 0x40000UL
u8 unused_0[3];
u8 valid;
};
/* hwrm_tunnel_dst_port_query_input (size:192b/24B) */
struct hwrm_tunnel_dst_port_query_input {
__le16 req_type;
__le16 cmpl_ring;
......@@ -8414,6 +8473,86 @@ struct hwrm_fw_get_structured_data_cmd_err {
u8 unused_0[7];
};
/* hwrm_fw_livepatch_query_input (size:192b/24B) */
struct hwrm_fw_livepatch_query_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
u8 fw_target;
#define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_COMMON_FW 0x1UL
#define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_SECURE_FW 0x2UL
#define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_LAST FW_LIVEPATCH_QUERY_REQ_FW_TARGET_SECURE_FW
u8 unused_0[7];
};
/* hwrm_fw_livepatch_query_output (size:640b/80B) */
struct hwrm_fw_livepatch_query_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
char install_ver[32];
char active_ver[32];
__le16 status_flags;
#define FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL 0x1UL
#define FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE 0x2UL
u8 unused_0[5];
u8 valid;
};
/* hwrm_fw_livepatch_input (size:256b/32B) */
struct hwrm_fw_livepatch_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
u8 opcode;
#define FW_LIVEPATCH_REQ_OPCODE_ACTIVATE 0x1UL
#define FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE 0x2UL
#define FW_LIVEPATCH_REQ_OPCODE_LAST FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE
u8 fw_target;
#define FW_LIVEPATCH_REQ_FW_TARGET_COMMON_FW 0x1UL
#define FW_LIVEPATCH_REQ_FW_TARGET_SECURE_FW 0x2UL
#define FW_LIVEPATCH_REQ_FW_TARGET_LAST FW_LIVEPATCH_REQ_FW_TARGET_SECURE_FW
u8 loadtype;
#define FW_LIVEPATCH_REQ_LOADTYPE_NVM_INSTALL 0x1UL
#define FW_LIVEPATCH_REQ_LOADTYPE_MEMORY_DIRECT 0x2UL
#define FW_LIVEPATCH_REQ_LOADTYPE_LAST FW_LIVEPATCH_REQ_LOADTYPE_MEMORY_DIRECT
u8 flags;
__le32 patch_len;
__le64 host_addr;
};
/* hwrm_fw_livepatch_output (size:128b/16B) */
struct hwrm_fw_livepatch_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
u8 unused_0[7];
u8 valid;
};
/* hwrm_fw_livepatch_cmd_err (size:64b/8B) */
struct hwrm_fw_livepatch_cmd_err {
u8 code;
#define FW_LIVEPATCH_CMD_ERR_CODE_UNKNOWN 0x0UL
#define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_OPCODE 0x1UL
#define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_TARGET 0x2UL
#define FW_LIVEPATCH_CMD_ERR_CODE_NOT_SUPPORTED 0x3UL
#define FW_LIVEPATCH_CMD_ERR_CODE_NOT_INSTALLED 0x4UL
#define FW_LIVEPATCH_CMD_ERR_CODE_NOT_PATCHED 0x5UL
#define FW_LIVEPATCH_CMD_ERR_CODE_AUTH_FAIL 0x6UL
#define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_HEADER 0x7UL
#define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_SIZE 0x8UL
#define FW_LIVEPATCH_CMD_ERR_CODE_ALREADY_PATCHED 0x9UL
#define FW_LIVEPATCH_CMD_ERR_CODE_LAST FW_LIVEPATCH_CMD_ERR_CODE_ALREADY_PATCHED
u8 unused_0[7];
};
/* hwrm_exec_fwd_resp_input (size:1024b/128B) */
struct hwrm_exec_fwd_resp_input {
__le16 req_type;
......
......@@ -11,9 +11,7 @@
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/net_tstamp.h>
#include <linux/timecounter.h>
#include <linux/timekeeping.h>
#include <linux/ptp_classify.h>
#include "bnxt_hsi.h"
......
......@@ -10,6 +10,9 @@
#ifndef BNXT_PTP_H
#define BNXT_PTP_H
#include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
#define BNXT_PTP_GRC_WIN 6
#define BNXT_PTP_GRC_WIN_BASE 0x6000
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment