Commit 14026192 authored by David S. Miller's avatar David S. Miller

Merge branch 'bnxt_en-next'

Michael Chan says:

====================
bnxt_en: Error recovery optimizations.

This series implements some optimizations to error recovery.  One
patch adds an echo/reply mechanism with firmware to enhance error
detection.  The other patches speed up the recovery process by
polling config space earlier and to selectively initialize
context memory during re-initialization.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents c4762993 f4d95c3c
......@@ -257,6 +257,7 @@ static const u16 bnxt_async_events_arr[] = {
ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
};
static struct workqueue_struct *bnxt_pf_wq;
......@@ -2054,14 +2055,11 @@ static int bnxt_async_event_process(struct bnxt *bp,
fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1);
fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
if (!fw_health->enabled)
if (!fw_health->enabled) {
netif_info(bp, drv, bp->dev,
"Error recovery info: error recovery[0]\n");
break;
netif_info(bp, drv, bp->dev,
"Error recovery info: error recovery[%d], master[%d], reset count[0x%x], health status: 0x%x\n",
fw_health->enabled, fw_health->master,
bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG),
bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG));
}
fw_health->tmr_multiplier =
DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
bp->current_interval * 10);
......@@ -2070,6 +2068,10 @@ static int bnxt_async_event_process(struct bnxt *bp,
bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
fw_health->last_fw_reset_cnt =
bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
netif_info(bp, drv, bp->dev,
"Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n",
fw_health->master, fw_health->last_fw_reset_cnt,
bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG));
goto async_event_process_exit;
}
case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
......@@ -2099,6 +2101,20 @@ static int bnxt_async_event_process(struct bnxt *bp,
bnxt_sched_reset(bp, rxr);
goto async_event_process_exit;
}
case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
struct bnxt_fw_health *fw_health = bp->fw_health;
netif_notice(bp, hw, bp->dev,
"Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
data1, data2);
if (fw_health) {
fw_health->echo_req_data1 = data1;
fw_health->echo_req_data2 = data2;
set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
break;
}
goto async_event_process_exit;
}
default:
goto async_event_process_exit;
}
......@@ -2688,6 +2704,23 @@ static void bnxt_free_skbs(struct bnxt *bp)
bnxt_free_rx_skbs(bp);
}
static void bnxt_init_ctx_mem(struct bnxt_mem_init *mem_init, void *p, int len)
{
u8 init_val = mem_init->init_val;
u16 offset = mem_init->offset;
u8 *p2 = p;
int i;
if (!init_val)
return;
if (offset == BNXT_MEM_INVALID_OFFSET) {
memset(p, init_val, len);
return;
}
for (i = 0; i < len; i += mem_init->size)
*(p2 + i + offset) = init_val;
}
static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
{
struct pci_dev *pdev = bp->pdev;
......@@ -2747,9 +2780,9 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
if (!rmem->pg_arr[i])
return -ENOMEM;
if (rmem->init_val)
memset(rmem->pg_arr[i], rmem->init_val,
rmem->page_size);
if (rmem->mem_init)
bnxt_init_ctx_mem(rmem->mem_init, rmem->pg_arr[i],
rmem->page_size);
if (rmem->nr_pages > 1 || rmem->depth > 0) {
if (i == rmem->nr_pages - 2 &&
(rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
......@@ -6750,6 +6783,39 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
return rc;
}
static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_info *ctx,
struct hwrm_func_backing_store_qcaps_output *resp)
{
struct bnxt_mem_init *mem_init;
u16 init_mask;
u8 init_val;
u8 *offset;
int i;
init_val = resp->ctx_kind_initializer;
init_mask = le16_to_cpu(resp->ctx_init_mask);
offset = &resp->qp_init_offset;
mem_init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
for (i = 0; i < BNXT_CTX_MEM_INIT_MAX; i++, mem_init++, offset++) {
mem_init->init_val = init_val;
mem_init->offset = BNXT_MEM_INVALID_OFFSET;
if (!init_mask)
continue;
if (i == BNXT_CTX_MEM_INIT_STAT)
offset = &resp->stat_init_offset;
if (init_mask & (1 << i))
mem_init->offset = *offset * 4;
else
mem_init->init_val = 0;
}
ctx->mem_init[BNXT_CTX_MEM_INIT_QP].size = ctx->qp_entry_size;
ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ].size = ctx->srq_entry_size;
ctx->mem_init[BNXT_CTX_MEM_INIT_CQ].size = ctx->cq_entry_size;
ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC].size = ctx->vnic_entry_size;
ctx->mem_init[BNXT_CTX_MEM_INIT_STAT].size = ctx->stat_entry_size;
ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV].size = ctx->mrav_entry_size;
}
static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
{
struct hwrm_func_backing_store_qcaps_input req = {0};
......@@ -6804,7 +6870,9 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
le16_to_cpu(resp->mrav_num_entries_units);
ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
ctx->ctx_kind_initializer = resp->ctx_kind_initializer;
bnxt_init_ctx_initializer(ctx, resp);
ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
if (!ctx->tqm_fp_rings_count)
ctx->tqm_fp_rings_count = bp->max_q;
......@@ -6834,6 +6902,9 @@ static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
{
u8 pg_size = 0;
if (!rmem->nr_pages)
return;
if (BNXT_PAGE_SHIFT == 13)
pg_size = 1 << 4;
else if (BNXT_PAGE_SIZE == 16)
......@@ -6978,7 +7049,7 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
u8 depth, bool use_init_val)
u8 depth, struct bnxt_mem_init *mem_init)
{
struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
int rc;
......@@ -7016,8 +7087,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
rmem->depth = 1;
rmem->nr_pages = MAX_CTX_PAGES;
if (use_init_val)
rmem->init_val = bp->ctx->ctx_kind_initializer;
rmem->mem_init = mem_init;
if (i == (nr_tbls - 1)) {
int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
......@@ -7032,8 +7102,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
if (rmem->nr_pages > 1 || depth)
rmem->depth = 1;
if (use_init_val)
rmem->init_val = bp->ctx->ctx_kind_initializer;
rmem->mem_init = mem_init;
rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
}
return rc;
......@@ -7097,6 +7166,7 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
{
struct bnxt_ctx_pg_info *ctx_pg;
struct bnxt_ctx_mem_info *ctx;
struct bnxt_mem_init *init;
u32 mem_size, ena, entries;
u32 entries_sp, min;
u32 num_mr, num_ah;
......@@ -7124,39 +7194,54 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
ctx_pg = &ctx->qp_mem;
ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
extra_qps;
mem_size = ctx->qp_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
if (rc)
return rc;
if (ctx->qp_entry_size) {
mem_size = ctx->qp_entry_size * ctx_pg->entries;
init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
if (rc)
return rc;
}
ctx_pg = &ctx->srq_mem;
ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
mem_size = ctx->srq_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
if (rc)
return rc;
if (ctx->srq_entry_size) {
mem_size = ctx->srq_entry_size * ctx_pg->entries;
init = &ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ];
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
if (rc)
return rc;
}
ctx_pg = &ctx->cq_mem;
ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
mem_size = ctx->cq_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
if (rc)
return rc;
if (ctx->cq_entry_size) {
mem_size = ctx->cq_entry_size * ctx_pg->entries;
init = &ctx->mem_init[BNXT_CTX_MEM_INIT_CQ];
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
if (rc)
return rc;
}
ctx_pg = &ctx->vnic_mem;
ctx_pg->entries = ctx->vnic_max_vnic_entries +
ctx->vnic_max_ring_table_entries;
mem_size = ctx->vnic_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true);
if (rc)
return rc;
if (ctx->vnic_entry_size) {
mem_size = ctx->vnic_entry_size * ctx_pg->entries;
init = &ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC];
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
if (rc)
return rc;
}
ctx_pg = &ctx->stat_mem;
ctx_pg->entries = ctx->stat_max_entries;
mem_size = ctx->stat_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true);
if (rc)
return rc;
if (ctx->stat_entry_size) {
mem_size = ctx->stat_entry_size * ctx_pg->entries;
init = &ctx->mem_init[BNXT_CTX_MEM_INIT_STAT];
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
if (rc)
return rc;
}
ena = 0;
if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
......@@ -7169,10 +7254,13 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
num_mr = 1024 * 256;
num_ah = 1024 * 128;
ctx_pg->entries = num_mr + num_ah;
mem_size = ctx->mrav_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, true);
if (rc)
return rc;
if (ctx->mrav_entry_size) {
mem_size = ctx->mrav_entry_size * ctx_pg->entries;
init = &ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV];
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, init);
if (rc)
return rc;
}
ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
if (ctx->mrav_num_entries_units)
ctx_pg->entries =
......@@ -7181,10 +7269,12 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
ctx_pg = &ctx->tim_mem;
ctx_pg->entries = ctx->qp_mem.entries;
mem_size = ctx->tim_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false);
if (rc)
return rc;
if (ctx->tim_entry_size) {
mem_size = ctx->tim_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, NULL);
if (rc)
return rc;
}
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
skip_rdma:
......@@ -7198,10 +7288,13 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
ctx_pg = ctx->tqm_mem[i];
ctx_pg->entries = i ? entries : entries_sp;
mem_size = ctx->tqm_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false);
if (rc)
return rc;
if (ctx->tqm_entry_size) {
mem_size = ctx->tqm_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1,
NULL);
if (rc)
return rc;
}
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
}
ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
......@@ -10915,6 +11008,11 @@ static void bnxt_fw_reset_close(struct bnxt *bp)
* kernel memory.
*/
if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
u16 val = 0;
pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
if (val == 0xffff)
bp->fw_reset_min_dsecs = 0;
bnxt_tx_disable(bp);
bnxt_disable_napi(bp);
bnxt_disable_int_sync(bp);
......@@ -11126,6 +11224,17 @@ static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
link_info->req_flow_ctrl = link_info->force_pause_setting;
}
static void bnxt_fw_echo_reply(struct bnxt *bp)
{
struct bnxt_fw_health *fw_health = bp->fw_health;
struct hwrm_func_echo_response_input req = {0};
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_ECHO_RESPONSE, -1, -1);
req.event_data1 = cpu_to_le32(fw_health->echo_req_data1);
req.event_data2 = cpu_to_le32(fw_health->echo_req_data2);
hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
static void bnxt_sp_task(struct work_struct *work)
{
struct bnxt *bp = container_of(work, struct bnxt, sp_task);
......@@ -11193,6 +11302,9 @@ static void bnxt_sp_task(struct work_struct *work)
if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
bnxt_chk_missed_irq(bp);
if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
bnxt_fw_echo_reply(bp);
/* These functions below will clear BNXT_STATE_IN_SP_TASK. They
* must be the last functions to be called before exiting.
*/
......@@ -11600,6 +11712,20 @@ static void bnxt_fw_reset_task(struct work_struct *work)
if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
u32 val;
if (!bp->fw_reset_min_dsecs) {
u16 val;
pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID,
&val);
if (val == 0xffff) {
if (bnxt_fw_reset_timeout(bp)) {
netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
goto fw_reset_abort;
}
bnxt_queue_fw_reset_work(bp, HZ / 1000);
return;
}
}
val = bnxt_fw_health_readl(bp,
BNXT_FW_RESET_INPROG_REG);
if (val)
......
......@@ -18,7 +18,7 @@
*/
#define DRV_VER_MAJ 1
#define DRV_VER_MIN 10
#define DRV_VER_UPD 1
#define DRV_VER_UPD 2
#include <linux/ethtool.h>
#include <linux/interrupt.h>
......@@ -714,6 +714,13 @@ struct bnxt_sw_rx_agg_bd {
dma_addr_t mapping;
};
struct bnxt_mem_init {
u8 init_val;
u16 offset;
#define BNXT_MEM_INVALID_OFFSET 0xffff
u16 size;
};
struct bnxt_ring_mem_info {
int nr_pages;
int page_size;
......@@ -723,7 +730,7 @@ struct bnxt_ring_mem_info {
#define BNXT_RMEM_USE_FULL_PAGE_FLAG 4
u16 depth;
u8 init_val;
struct bnxt_mem_init *mem_init;
void **pg_arr;
dma_addr_t *dma_arr;
......@@ -1474,7 +1481,6 @@ struct bnxt_ctx_mem_info {
u32 tim_max_entries;
u16 mrav_num_entries_units;
u8 tqm_entries_multiple;
u8 ctx_kind_initializer;
u8 tqm_fp_rings_count;
u32 flags;
......@@ -1488,6 +1494,15 @@ struct bnxt_ctx_mem_info {
struct bnxt_ctx_pg_info mrav_mem;
struct bnxt_ctx_pg_info tim_mem;
struct bnxt_ctx_pg_info *tqm_mem[BNXT_MAX_TQM_RINGS];
#define BNXT_CTX_MEM_INIT_QP 0
#define BNXT_CTX_MEM_INIT_SRQ 1
#define BNXT_CTX_MEM_INIT_CQ 2
#define BNXT_CTX_MEM_INIT_VNIC 3
#define BNXT_CTX_MEM_INIT_STAT 4
#define BNXT_CTX_MEM_INIT_MRAV 5
#define BNXT_CTX_MEM_INIT_MAX 6
struct bnxt_mem_init mem_init[BNXT_CTX_MEM_INIT_MAX];
};
struct bnxt_fw_health {
......@@ -1516,6 +1531,8 @@ struct bnxt_fw_health {
u32 fw_reset_seq_regs[16];
u32 fw_reset_seq_vals[16];
u32 fw_reset_seq_delay_msec[16];
u32 echo_req_data1;
u32 echo_req_data2;
struct devlink_health_reporter *fw_reporter;
struct devlink_health_reporter *fw_reset_reporter;
struct devlink_health_reporter *fw_fatal_reporter;
......@@ -1925,6 +1942,7 @@ struct bnxt {
#define BNXT_FW_RESET_NOTIFY_SP_EVENT 18
#define BNXT_FW_EXCEPTION_SP_EVENT 19
#define BNXT_LINK_CFG_CHANGE_SP_EVENT 21
#define BNXT_FW_ECHO_REQUEST_SP_EVENT 23
struct delayed_work fw_reset_task;
int fw_reset_state;
......
......@@ -103,6 +103,7 @@ struct hwrm_short_input {
struct cmd_nums {
__le16 req_type;
#define HWRM_VER_GET 0x0UL
#define HWRM_FUNC_ECHO_RESPONSE 0xbUL
#define HWRM_ERROR_RECOVERY_QCFG 0xcUL
#define HWRM_FUNC_DRV_IF_CHANGE 0xdUL
#define HWRM_FUNC_BUF_UNRGTR 0xeUL
......@@ -501,8 +502,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 2
#define HWRM_VERSION_RSVD 11
#define HWRM_VERSION_STR "1.10.2.11"
#define HWRM_VERSION_RSVD 16
#define HWRM_VERSION_STR "1.10.2.16"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
......@@ -723,7 +724,8 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_QUIESCE_DONE 0x3fUL
#define ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE 0x40UL
#define ASYNC_EVENT_CMPL_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE 0x41UL
#define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x42UL
#define ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST 0x42UL
#define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x43UL
#define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
......@@ -1052,6 +1054,26 @@ struct hwrm_async_event_cmpl_deferred_response {
__le32 event_data1;
};
/* hwrm_async_event_cmpl_echo_request (size:128b/16B) */
struct hwrm_async_event_cmpl_echo_request {
__le16 type;
#define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_MASK 0x3fUL
#define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_SFT 0
#define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_HWRM_ASYNC_EVENT 0x2eUL
#define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_LAST ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_HWRM_ASYNC_EVENT
__le16 event_id;
#define ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_ECHO_REQUEST 0x42UL
#define ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_LAST ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_ECHO_REQUEST
__le32 event_data2;
u8 opaque_v;
#define ASYNC_EVENT_CMPL_ECHO_REQUEST_V 0x1UL
#define ASYNC_EVENT_CMPL_ECHO_REQUEST_OPAQUE_MASK 0xfeUL
#define ASYNC_EVENT_CMPL_ECHO_REQUEST_OPAQUE_SFT 1
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
};
/* hwrm_func_reset_input (size:192b/24B) */
struct hwrm_func_reset_input {
__le16 req_type;
......@@ -1294,6 +1316,10 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_EXT_SOC_SPD_SUPPORTED 0x200UL
#define FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED 0x400UL
#define FUNC_QCAPS_RESP_FLAGS_EXT_FAST_RESET_CAPABLE 0x800UL
#define FUNC_QCAPS_RESP_FLAGS_EXT_TX_METADATA_CFG_CAPABLE 0x1000UL
#define FUNC_QCAPS_RESP_FLAGS_EXT_NVM_OPTION_ACTION_SUPPORTED 0x2000UL
#define FUNC_QCAPS_RESP_FLAGS_EXT_BD_METADATA_SUPPORTED 0x4000UL
#define FUNC_QCAPS_RESP_FLAGS_EXT_ECHO_REQUEST_SUPPORTED 0x8000UL
u8 max_schqs;
u8 mpc_chnls_cap;
#define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TCE 0x1UL
......@@ -1339,6 +1365,7 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_FLAGS_PPP_PUSH_MODE_ENABLED 0x400UL
#define FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED 0x800UL
#define FUNC_QCFG_RESP_FLAGS_FAST_RESET_ALLOWED 0x1000UL
#define FUNC_QCFG_RESP_FLAGS_MULTI_ROOT 0x2000UL
u8 mac_address[6];
__le16 pci_id;
__le16 alloc_rsscos_ctx;
......@@ -1474,6 +1501,8 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_FLAGS_HOT_RESET_IF_EN_DIS 0x4000000UL
#define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE 0x8000000UL
#define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x10000000UL
#define FUNC_CFG_REQ_FLAGS_BD_METADATA_ENABLE 0x20000000UL
#define FUNC_CFG_REQ_FLAGS_BD_METADATA_DISABLE 0x40000000UL
__le32 enables;
#define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
#define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
......@@ -2063,8 +2092,32 @@ struct hwrm_func_backing_store_qcaps_output {
u8 tqm_fp_rings_count;
u8 stat_init_offset;
u8 mrav_init_offset;
u8 rsvd[6];
u8 valid;
u8 tqm_fp_rings_count_ext;
u8 rsvd[5];
u8 valid;
};
/* tqm_fp_ring_cfg (size:128b/16B) */
struct tqm_fp_ring_cfg {
u8 tqm_ring_pg_size_tqm_ring_lvl;
#define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_MASK 0xfUL
#define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_SFT 0
#define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_0 0x0UL
#define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_1 0x1UL
#define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_2 0x2UL
#define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LAST TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_2
#define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_MASK 0xf0UL
#define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_SFT 4
#define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
#define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
#define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
#define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
#define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
#define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
#define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_LAST TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_1G
u8 unused[3];
__le32 tqm_ring_num_entries;
__le64 tqm_ring_page_dir;
};
/* hwrm_func_backing_store_cfg_input (size:2432b/304B) */
......@@ -2560,6 +2613,27 @@ struct hwrm_error_recovery_qcfg_output {
u8 valid;
};
/* hwrm_func_echo_response_input (size:192b/24B) */
struct hwrm_func_echo_response_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
__le32 event_data1;
__le32 event_data2;
};
/* hwrm_func_echo_response_output (size:128b/16B) */
struct hwrm_func_echo_response_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
u8 unused_0[7];
u8 valid;
};
/* hwrm_func_drv_if_change_input (size:192b/24B) */
struct hwrm_func_drv_if_change_input {
__le16 req_type;
......@@ -3627,7 +3701,7 @@ struct hwrm_port_phy_qcaps_output {
#define PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET 0x10UL
#define PORT_PHY_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED 0x20UL
#define PORT_PHY_QCAPS_RESP_FLAGS_FW_MANAGED_LINK_DOWN 0x40UL
#define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1 0x80UL
#define PORT_PHY_QCAPS_RESP_FLAGS_NO_FCS 0x80UL
u8 port_cnt;
#define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
#define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL
......@@ -5392,6 +5466,7 @@ struct hwrm_vnic_qcaps_output {
#define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V2_CAP 0x200UL
#define VNIC_QCAPS_RESP_FLAGS_VNIC_STATE_CAP 0x400UL
#define VNIC_QCAPS_RESP_FLAGS_VIRTIO_NET_VNIC_ALLOC_CAP 0x800UL
#define VNIC_QCAPS_RESP_FLAGS_METADATA_FORMAT_CAP 0x1000UL
__le16 max_aggs_supported;
u8 unused_1[5];
u8 valid;
......@@ -5725,7 +5800,7 @@ struct hwrm_ring_alloc_output {
u8 valid;
};
/* hwrm_ring_free_input (size:192b/24B) */
/* hwrm_ring_free_input (size:256b/32B) */
struct hwrm_ring_free_input {
__le16 req_type;
__le16 cmpl_ring;
......@@ -5740,9 +5815,13 @@ struct hwrm_ring_free_input {
#define RING_FREE_REQ_RING_TYPE_RX_AGG 0x4UL
#define RING_FREE_REQ_RING_TYPE_NQ 0x5UL
#define RING_FREE_REQ_RING_TYPE_LAST RING_FREE_REQ_RING_TYPE_NQ
u8 unused_0;
u8 flags;
#define RING_FREE_REQ_FLAGS_VIRTIO_RING_VALID 0x1UL
#define RING_FREE_REQ_FLAGS_LAST RING_FREE_REQ_FLAGS_VIRTIO_RING_VALID
__le16 ring_id;
u8 unused_1[4];
__le32 prod_idx;
__le32 opaque;
__le32 unused_1;
};
/* hwrm_ring_free_output (size:128b/16B) */
......@@ -7562,7 +7641,13 @@ struct hwrm_fw_qstatus_output {
#define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
#define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPOWER 0x3UL
#define FW_QSTATUS_RESP_SELFRST_STATUS_LAST FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPOWER
u8 unused_0[6];
u8 nvm_option_action_status;
#define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_NONE 0x0UL
#define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_HOTRESET 0x1UL
#define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_WARMBOOT 0x2UL
#define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_COLDBOOT 0x3UL
#define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_LAST FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_COLDBOOT
u8 unused_0[5];
u8 valid;
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment