Commit 85fd8011 authored by David S. Miller's avatar David S. Miller

Merge branch 'bnxt_en-TPA-57500'

Michael Chan says:

====================
bnxt_en: Add TPA (GRO_HW and LRO) on 57500 chips.

This patchset adds TPA v2 support on the 57500 chips.  TPA v2 is
different from the legacy TPA scheme on older chips and requires major
refactoring and restructuring of the existing TPA logic.  The main
difference is that the new TPA v2 has on-the-fly aggregation buffer
completions before a TPA packet is completed.  The larger aggregation
ID space also requires a new ID mapping logic to make it more
memory efficient.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 473d924d 49c98421
This diff is collapsed.
...@@ -113,6 +113,7 @@ struct tx_cmp { ...@@ -113,6 +113,7 @@ struct tx_cmp {
#define CMP_TYPE_RX_AGG_CMP 18 #define CMP_TYPE_RX_AGG_CMP 18
#define CMP_TYPE_RX_L2_TPA_START_CMP 19 #define CMP_TYPE_RX_L2_TPA_START_CMP 19
#define CMP_TYPE_RX_L2_TPA_END_CMP 21 #define CMP_TYPE_RX_L2_TPA_END_CMP 21
#define CMP_TYPE_RX_TPA_AGG_CMP 22
#define CMP_TYPE_STATUS_CMP 32 #define CMP_TYPE_STATUS_CMP 32
#define CMP_TYPE_REMOTE_DRIVER_REQ 34 #define CMP_TYPE_REMOTE_DRIVER_REQ 34
#define CMP_TYPE_REMOTE_DRIVER_RESP 36 #define CMP_TYPE_REMOTE_DRIVER_RESP 36
...@@ -263,14 +264,21 @@ struct rx_agg_cmp { ...@@ -263,14 +264,21 @@ struct rx_agg_cmp {
u32 rx_agg_cmp_opaque; u32 rx_agg_cmp_opaque;
__le32 rx_agg_cmp_v; __le32 rx_agg_cmp_v;
#define RX_AGG_CMP_V (1 << 0) #define RX_AGG_CMP_V (1 << 0)
#define RX_AGG_CMP_AGG_ID (0xffff << 16)
#define RX_AGG_CMP_AGG_ID_SHIFT 16
__le32 rx_agg_cmp_unused; __le32 rx_agg_cmp_unused;
}; };
#define TPA_AGG_AGG_ID(rx_agg) \
((le32_to_cpu((rx_agg)->rx_agg_cmp_v) & \
RX_AGG_CMP_AGG_ID) >> RX_AGG_CMP_AGG_ID_SHIFT)
struct rx_tpa_start_cmp { struct rx_tpa_start_cmp {
__le32 rx_tpa_start_cmp_len_flags_type; __le32 rx_tpa_start_cmp_len_flags_type;
#define RX_TPA_START_CMP_TYPE (0x3f << 0) #define RX_TPA_START_CMP_TYPE (0x3f << 0)
#define RX_TPA_START_CMP_FLAGS (0x3ff << 6) #define RX_TPA_START_CMP_FLAGS (0x3ff << 6)
#define RX_TPA_START_CMP_FLAGS_SHIFT 6 #define RX_TPA_START_CMP_FLAGS_SHIFT 6
#define RX_TPA_START_CMP_FLAGS_ERROR (0x1 << 6)
#define RX_TPA_START_CMP_FLAGS_PLACEMENT (0x7 << 7) #define RX_TPA_START_CMP_FLAGS_PLACEMENT (0x7 << 7)
#define RX_TPA_START_CMP_FLAGS_PLACEMENT_SHIFT 7 #define RX_TPA_START_CMP_FLAGS_PLACEMENT_SHIFT 7
#define RX_TPA_START_CMP_FLAGS_PLACEMENT_JUMBO (0x1 << 7) #define RX_TPA_START_CMP_FLAGS_PLACEMENT_JUMBO (0x1 << 7)
...@@ -278,6 +286,7 @@ struct rx_tpa_start_cmp { ...@@ -278,6 +286,7 @@ struct rx_tpa_start_cmp {
#define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_JUMBO (0x5 << 7) #define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_JUMBO (0x5 << 7)
#define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_HDS (0x6 << 7) #define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_HDS (0x6 << 7)
#define RX_TPA_START_CMP_FLAGS_RSS_VALID (0x1 << 10) #define RX_TPA_START_CMP_FLAGS_RSS_VALID (0x1 << 10)
#define RX_TPA_START_CMP_FLAGS_TIMESTAMP (0x1 << 11)
#define RX_TPA_START_CMP_FLAGS_ITYPES (0xf << 12) #define RX_TPA_START_CMP_FLAGS_ITYPES (0xf << 12)
#define RX_TPA_START_CMP_FLAGS_ITYPES_SHIFT 12 #define RX_TPA_START_CMP_FLAGS_ITYPES_SHIFT 12
#define RX_TPA_START_CMP_FLAGS_ITYPE_TCP (0x2 << 12) #define RX_TPA_START_CMP_FLAGS_ITYPE_TCP (0x2 << 12)
...@@ -291,6 +300,8 @@ struct rx_tpa_start_cmp { ...@@ -291,6 +300,8 @@ struct rx_tpa_start_cmp {
#define RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT 9 #define RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT 9
#define RX_TPA_START_CMP_AGG_ID (0x7f << 25) #define RX_TPA_START_CMP_AGG_ID (0x7f << 25)
#define RX_TPA_START_CMP_AGG_ID_SHIFT 25 #define RX_TPA_START_CMP_AGG_ID_SHIFT 25
#define RX_TPA_START_CMP_AGG_ID_P5 (0xffff << 16)
#define RX_TPA_START_CMP_AGG_ID_SHIFT_P5 16
__le32 rx_tpa_start_cmp_rss_hash; __le32 rx_tpa_start_cmp_rss_hash;
}; };
...@@ -308,6 +319,14 @@ struct rx_tpa_start_cmp { ...@@ -308,6 +319,14 @@ struct rx_tpa_start_cmp {
((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \ ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \
RX_TPA_START_CMP_AGG_ID) >> RX_TPA_START_CMP_AGG_ID_SHIFT) RX_TPA_START_CMP_AGG_ID) >> RX_TPA_START_CMP_AGG_ID_SHIFT)
#define TPA_START_AGG_ID_P5(rx_tpa_start) \
((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \
RX_TPA_START_CMP_AGG_ID_P5) >> RX_TPA_START_CMP_AGG_ID_SHIFT_P5)
#define TPA_START_ERROR(rx_tpa_start) \
((rx_tpa_start)->rx_tpa_start_cmp_len_flags_type & \
cpu_to_le32(RX_TPA_START_CMP_FLAGS_ERROR))
struct rx_tpa_start_cmp_ext { struct rx_tpa_start_cmp_ext {
__le32 rx_tpa_start_cmp_flags2; __le32 rx_tpa_start_cmp_flags2;
#define RX_TPA_START_CMP_FLAGS2_IP_CS_CALC (0x1 << 0) #define RX_TPA_START_CMP_FLAGS2_IP_CS_CALC (0x1 << 0)
...@@ -315,10 +334,20 @@ struct rx_tpa_start_cmp_ext { ...@@ -315,10 +334,20 @@ struct rx_tpa_start_cmp_ext {
#define RX_TPA_START_CMP_FLAGS2_T_IP_CS_CALC (0x1 << 2) #define RX_TPA_START_CMP_FLAGS2_T_IP_CS_CALC (0x1 << 2)
#define RX_TPA_START_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3) #define RX_TPA_START_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3)
#define RX_TPA_START_CMP_FLAGS2_IP_TYPE (0x1 << 8) #define RX_TPA_START_CMP_FLAGS2_IP_TYPE (0x1 << 8)
#define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL_VALID (0x1 << 9)
#define RX_TPA_START_CMP_FLAGS2_EXT_META_FORMAT (0x3 << 10)
#define RX_TPA_START_CMP_FLAGS2_EXT_META_FORMAT_SHIFT 10
#define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL (0xffff << 16)
#define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL_SHIFT 16
__le32 rx_tpa_start_cmp_metadata; __le32 rx_tpa_start_cmp_metadata;
__le32 rx_tpa_start_cmp_cfa_code_v2; __le32 rx_tpa_start_cmp_cfa_code_v2;
#define RX_TPA_START_CMP_V2 (0x1 << 0) #define RX_TPA_START_CMP_V2 (0x1 << 0)
#define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_MASK (0x7 << 1)
#define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_SHIFT 1
#define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0 << 1)
#define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3 << 1)
#define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_FLUSH (0x5 << 1)
#define RX_TPA_START_CMP_CFA_CODE (0xffff << 16) #define RX_TPA_START_CMP_CFA_CODE (0xffff << 16)
#define RX_TPA_START_CMPL_CFA_CODE_SHIFT 16 #define RX_TPA_START_CMPL_CFA_CODE_SHIFT 16
__le32 rx_tpa_start_cmp_hdr_info; __le32 rx_tpa_start_cmp_hdr_info;
...@@ -332,6 +361,11 @@ struct rx_tpa_start_cmp_ext { ...@@ -332,6 +361,11 @@ struct rx_tpa_start_cmp_ext {
(!!((rx_tpa_start)->rx_tpa_start_cmp_flags2 & \ (!!((rx_tpa_start)->rx_tpa_start_cmp_flags2 & \
cpu_to_le32(RX_TPA_START_CMP_FLAGS2_IP_TYPE))) cpu_to_le32(RX_TPA_START_CMP_FLAGS2_IP_TYPE)))
#define TPA_START_ERROR_CODE(rx_tpa_start) \
((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) & \
RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_MASK) >> \
RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_SHIFT)
struct rx_tpa_end_cmp { struct rx_tpa_end_cmp {
__le32 rx_tpa_end_cmp_len_flags_type; __le32 rx_tpa_end_cmp_len_flags_type;
#define RX_TPA_END_CMP_TYPE (0x3f << 0) #define RX_TPA_END_CMP_TYPE (0x3f << 0)
...@@ -361,6 +395,8 @@ struct rx_tpa_end_cmp { ...@@ -361,6 +395,8 @@ struct rx_tpa_end_cmp {
#define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT 16 #define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT 16
#define RX_TPA_END_CMP_AGG_ID (0x7f << 25) #define RX_TPA_END_CMP_AGG_ID (0x7f << 25)
#define RX_TPA_END_CMP_AGG_ID_SHIFT 25 #define RX_TPA_END_CMP_AGG_ID_SHIFT 25
#define RX_TPA_END_CMP_AGG_ID_P5 (0xffff << 16)
#define RX_TPA_END_CMP_AGG_ID_SHIFT_P5 16
__le32 rx_tpa_end_cmp_tsdelta; __le32 rx_tpa_end_cmp_tsdelta;
#define RX_TPA_END_GRO_TS (0x1 << 31) #define RX_TPA_END_GRO_TS (0x1 << 31)
...@@ -370,6 +406,18 @@ struct rx_tpa_end_cmp { ...@@ -370,6 +406,18 @@ struct rx_tpa_end_cmp {
((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \ ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \
RX_TPA_END_CMP_AGG_ID) >> RX_TPA_END_CMP_AGG_ID_SHIFT) RX_TPA_END_CMP_AGG_ID) >> RX_TPA_END_CMP_AGG_ID_SHIFT)
#define TPA_END_AGG_ID_P5(rx_tpa_end) \
((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \
RX_TPA_END_CMP_AGG_ID_P5) >> RX_TPA_END_CMP_AGG_ID_SHIFT_P5)
#define TPA_END_PAYLOAD_OFF(rx_tpa_end) \
((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \
RX_TPA_END_CMP_PAYLOAD_OFFSET) >> RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT)
#define TPA_END_AGG_BUFS(rx_tpa_end) \
((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \
RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT)
#define TPA_END_TPA_SEGS(rx_tpa_end) \ #define TPA_END_TPA_SEGS(rx_tpa_end) \
((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \ ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \
RX_TPA_END_CMP_TPA_SEGS) >> RX_TPA_END_CMP_TPA_SEGS_SHIFT) RX_TPA_END_CMP_TPA_SEGS) >> RX_TPA_END_CMP_TPA_SEGS_SHIFT)
...@@ -389,6 +437,10 @@ struct rx_tpa_end_cmp { ...@@ -389,6 +437,10 @@ struct rx_tpa_end_cmp {
struct rx_tpa_end_cmp_ext { struct rx_tpa_end_cmp_ext {
__le32 rx_tpa_end_cmp_dup_acks; __le32 rx_tpa_end_cmp_dup_acks;
#define RX_TPA_END_CMP_TPA_DUP_ACKS (0xf << 0) #define RX_TPA_END_CMP_TPA_DUP_ACKS (0xf << 0)
#define RX_TPA_END_CMP_PAYLOAD_OFFSET_P5 (0xff << 16)
#define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT_P5 16
#define RX_TPA_END_CMP_AGG_BUFS_P5 (0xff << 24)
#define RX_TPA_END_CMP_AGG_BUFS_SHIFT_P5 24
__le32 rx_tpa_end_cmp_seg_len; __le32 rx_tpa_end_cmp_seg_len;
#define RX_TPA_END_CMP_TPA_SEG_LEN (0xffff << 0) #define RX_TPA_END_CMP_TPA_SEG_LEN (0xffff << 0)
...@@ -396,7 +448,13 @@ struct rx_tpa_end_cmp_ext { ...@@ -396,7 +448,13 @@ struct rx_tpa_end_cmp_ext {
__le32 rx_tpa_end_cmp_errors_v2; __le32 rx_tpa_end_cmp_errors_v2;
#define RX_TPA_END_CMP_V2 (0x1 << 0) #define RX_TPA_END_CMP_V2 (0x1 << 0)
#define RX_TPA_END_CMP_ERRORS (0x3 << 1) #define RX_TPA_END_CMP_ERRORS (0x3 << 1)
#define RX_TPA_END_CMP_ERRORS_P5 (0x7 << 1)
#define RX_TPA_END_CMPL_ERRORS_SHIFT 1 #define RX_TPA_END_CMPL_ERRORS_SHIFT 1
#define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0 << 1)
#define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_NOT_ON_CHIP (0x2 << 1)
#define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3 << 1)
#define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_RSV_ERROR (0x4 << 1)
#define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_FLUSH (0x5 << 1)
u32 rx_tpa_end_cmp_start_opaque; u32 rx_tpa_end_cmp_start_opaque;
}; };
...@@ -405,6 +463,15 @@ struct rx_tpa_end_cmp_ext { ...@@ -405,6 +463,15 @@ struct rx_tpa_end_cmp_ext {
((rx_tpa_end_ext)->rx_tpa_end_cmp_errors_v2 & \ ((rx_tpa_end_ext)->rx_tpa_end_cmp_errors_v2 & \
cpu_to_le32(RX_TPA_END_CMP_ERRORS)) cpu_to_le32(RX_TPA_END_CMP_ERRORS))
#define TPA_END_PAYLOAD_OFF_P5(rx_tpa_end_ext) \
((le32_to_cpu((rx_tpa_end_ext)->rx_tpa_end_cmp_dup_acks) & \
RX_TPA_END_CMP_PAYLOAD_OFFSET_P5) >> \
RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT_P5)
#define TPA_END_AGG_BUFS_P5(rx_tpa_end_ext) \
((le32_to_cpu((rx_tpa_end_ext)->rx_tpa_end_cmp_dup_acks) & \
RX_TPA_END_CMP_AGG_BUFS_P5) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT_P5)
struct nqe_cn { struct nqe_cn {
__le16 type; __le16 type;
#define NQ_CN_TYPE_MASK 0x3fUL #define NQ_CN_TYPE_MASK 0x3fUL
...@@ -487,6 +554,9 @@ struct nqe_cn { ...@@ -487,6 +554,9 @@ struct nqe_cn {
#define BNXT_DEFAULT_TX_RING_SIZE 511 #define BNXT_DEFAULT_TX_RING_SIZE 511
#define MAX_TPA 64 #define MAX_TPA 64
#define MAX_TPA_P5 256
#define MAX_TPA_P5_MASK (MAX_TPA_P5 - 1)
#define MAX_TPA_SEGS_P5 0x3f
#if (BNXT_PAGE_SHIFT == 16) #if (BNXT_PAGE_SHIFT == 16)
#define MAX_RX_PAGES 1 #define MAX_RX_PAGES 1
...@@ -768,6 +838,15 @@ struct bnxt_tpa_info { ...@@ -768,6 +838,15 @@ struct bnxt_tpa_info {
((hdr_info) & 0x1ff) ((hdr_info) & 0x1ff)
u16 cfa_code; /* cfa_code in TPA start compl */ u16 cfa_code; /* cfa_code in TPA start compl */
u8 agg_count;
struct rx_agg_cmp *agg_arr;
};
#define BNXT_AGG_IDX_BMAP_SIZE (MAX_TPA_P5 / BITS_PER_LONG)
struct bnxt_tpa_idx_map {
u16 agg_id_tbl[1024];
unsigned long agg_idx_bmap[BNXT_AGG_IDX_BMAP_SIZE];
}; };
struct bnxt_rx_ring_info { struct bnxt_rx_ring_info {
...@@ -797,6 +876,7 @@ struct bnxt_rx_ring_info { ...@@ -797,6 +876,7 @@ struct bnxt_rx_ring_info {
dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES]; dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
struct bnxt_tpa_info *rx_tpa; struct bnxt_tpa_info *rx_tpa;
struct bnxt_tpa_idx_map *rx_tpa_idx_map;
struct bnxt_ring_struct rx_ring_struct; struct bnxt_ring_struct rx_ring_struct;
struct bnxt_ring_struct rx_agg_ring_struct; struct bnxt_ring_struct rx_agg_ring_struct;
...@@ -1282,7 +1362,9 @@ struct bnxt { ...@@ -1282,7 +1362,9 @@ struct bnxt {
#define CHIP_NUM_5745X 0xd730 #define CHIP_NUM_5745X 0xd730
#define CHIP_NUM_57500 0x1750 #define CHIP_NUM_57508 0x1750
#define CHIP_NUM_57504 0x1751
#define CHIP_NUM_57502 0x1752
#define CHIP_NUM_58802 0xd802 #define CHIP_NUM_58802 0xd802
#define CHIP_NUM_58804 0xd804 #define CHIP_NUM_58804 0xd804
...@@ -1379,12 +1461,14 @@ struct bnxt { ...@@ -1379,12 +1461,14 @@ struct bnxt {
#define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0) #define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
#define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE) #define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE)
#define BNXT_SUPPORTS_TPA(bp) (!BNXT_CHIP_TYPE_NITRO_A0(bp) && \ #define BNXT_SUPPORTS_TPA(bp) (!BNXT_CHIP_TYPE_NITRO_A0(bp) && \
!(bp->flags & BNXT_FLAG_CHIP_P5) && \ (!((bp)->flags & BNXT_FLAG_CHIP_P5) || \
!is_kdump_kernel()) (bp)->max_tpa_v2) && !is_kdump_kernel())
/* Chip class phase 5 */ /* Chip class phase 5 */
#define BNXT_CHIP_P5(bp) \ #define BNXT_CHIP_P5(bp) \
((bp)->chip_num == CHIP_NUM_57500) ((bp)->chip_num == CHIP_NUM_57508 || \
(bp)->chip_num == CHIP_NUM_57504 || \
(bp)->chip_num == CHIP_NUM_57502)
/* Chip class phase 4.x */ /* Chip class phase 4.x */
#define BNXT_CHIP_P4(bp) \ #define BNXT_CHIP_P4(bp) \
...@@ -1414,6 +1498,8 @@ struct bnxt { ...@@ -1414,6 +1498,8 @@ struct bnxt {
u16, void *, u8 *, dma_addr_t, u16, void *, u8 *, dma_addr_t,
unsigned int); unsigned int);
u16 max_tpa_v2;
u16 max_tpa;
u32 rx_buf_size; u32 rx_buf_size;
u32 rx_buf_use_size; /* useable size */ u32 rx_buf_use_size; /* useable size */
u16 rx_offset; u16 rx_offset;
...@@ -1525,6 +1611,7 @@ struct bnxt { ...@@ -1525,6 +1611,7 @@ struct bnxt {
int hw_port_stats_size; int hw_port_stats_size;
u16 fw_rx_stats_ext_size; u16 fw_rx_stats_ext_size;
u16 fw_tx_stats_ext_size; u16 fw_tx_stats_ext_size;
u16 hw_ring_stats_size;
u8 pri2cos[8]; u8 pri2cos[8];
u8 pri2cos_valid; u8 pri2cos_valid;
......
...@@ -137,7 +137,44 @@ static int bnxt_set_coalesce(struct net_device *dev, ...@@ -137,7 +137,44 @@ static int bnxt_set_coalesce(struct net_device *dev,
return rc; return rc;
} }
#define BNXT_NUM_STATS 22 static const char * const bnxt_ring_stats_str[] = {
"rx_ucast_packets",
"rx_mcast_packets",
"rx_bcast_packets",
"rx_discards",
"rx_drops",
"rx_ucast_bytes",
"rx_mcast_bytes",
"rx_bcast_bytes",
"tx_ucast_packets",
"tx_mcast_packets",
"tx_bcast_packets",
"tx_discards",
"tx_drops",
"tx_ucast_bytes",
"tx_mcast_bytes",
"tx_bcast_bytes",
};
static const char * const bnxt_ring_tpa_stats_str[] = {
"tpa_packets",
"tpa_bytes",
"tpa_events",
"tpa_aborts",
};
static const char * const bnxt_ring_tpa2_stats_str[] = {
"rx_tpa_eligible_pkt",
"rx_tpa_eligible_bytes",
"rx_tpa_pkt",
"rx_tpa_bytes",
"rx_tpa_errors",
};
static const char * const bnxt_ring_sw_stats_str[] = {
"rx_l4_csum_errors",
"missed_irqs",
};
#define BNXT_RX_STATS_ENTRY(counter) \ #define BNXT_RX_STATS_ENTRY(counter) \
{ BNXT_RX_STATS_OFFSET(counter), __stringify(counter) } { BNXT_RX_STATS_OFFSET(counter), __stringify(counter) }
...@@ -207,6 +244,20 @@ static int bnxt_set_coalesce(struct net_device *dev, ...@@ -207,6 +244,20 @@ static int bnxt_set_coalesce(struct net_device *dev,
BNXT_TX_STATS_EXT_COS_ENTRY(6), \ BNXT_TX_STATS_EXT_COS_ENTRY(6), \
BNXT_TX_STATS_EXT_COS_ENTRY(7) \ BNXT_TX_STATS_EXT_COS_ENTRY(7) \
#define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(n) \
BNXT_RX_STATS_EXT_ENTRY(rx_discard_bytes_cos##n), \
BNXT_RX_STATS_EXT_ENTRY(rx_discard_packets_cos##n)
#define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES \
BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(0), \
BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(1), \
BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(2), \
BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(3), \
BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(4), \
BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(5), \
BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(6), \
BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(7)
#define BNXT_RX_STATS_PRI_ENTRY(counter, n) \ #define BNXT_RX_STATS_PRI_ENTRY(counter, n) \
{ BNXT_RX_STATS_EXT_OFFSET(counter##_cos0), \ { BNXT_RX_STATS_EXT_OFFSET(counter##_cos0), \
__stringify(counter##_pri##n) } __stringify(counter##_pri##n) }
...@@ -352,6 +403,7 @@ static const struct { ...@@ -352,6 +403,7 @@ static const struct {
BNXT_RX_STATS_EXT_ENTRY(rx_buffer_passed_threshold), BNXT_RX_STATS_EXT_ENTRY(rx_buffer_passed_threshold),
BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err), BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err),
BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits), BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits),
BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES,
}; };
static const struct { static const struct {
...@@ -417,9 +469,29 @@ static const struct { ...@@ -417,9 +469,29 @@ static const struct {
ARRAY_SIZE(bnxt_tx_pkts_pri_arr)) ARRAY_SIZE(bnxt_tx_pkts_pri_arr))
#define BNXT_NUM_PCIE_STATS ARRAY_SIZE(bnxt_pcie_stats_arr) #define BNXT_NUM_PCIE_STATS ARRAY_SIZE(bnxt_pcie_stats_arr)
static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp)
{
if (BNXT_SUPPORTS_TPA(bp)) {
if (bp->max_tpa_v2)
return ARRAY_SIZE(bnxt_ring_tpa2_stats_str);
return ARRAY_SIZE(bnxt_ring_tpa_stats_str);
}
return 0;
}
static int bnxt_get_num_ring_stats(struct bnxt *bp)
{
int num_stats;
num_stats = ARRAY_SIZE(bnxt_ring_stats_str) +
ARRAY_SIZE(bnxt_ring_sw_stats_str) +
bnxt_get_num_tpa_ring_stats(bp);
return num_stats * bp->cp_nr_rings;
}
static int bnxt_get_num_stats(struct bnxt *bp) static int bnxt_get_num_stats(struct bnxt *bp)
{ {
int num_stats = BNXT_NUM_STATS * bp->cp_nr_rings; int num_stats = bnxt_get_num_ring_stats(bp);
num_stats += BNXT_NUM_SW_FUNC_STATS; num_stats += BNXT_NUM_SW_FUNC_STATS;
...@@ -460,10 +532,11 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, ...@@ -460,10 +532,11 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
{ {
u32 i, j = 0; u32 i, j = 0;
struct bnxt *bp = netdev_priv(dev); struct bnxt *bp = netdev_priv(dev);
u32 stat_fields = sizeof(struct ctx_hw_stats) / 8; u32 stat_fields = ARRAY_SIZE(bnxt_ring_stats_str) +
bnxt_get_num_tpa_ring_stats(bp);
if (!bp->bnapi) { if (!bp->bnapi) {
j += BNXT_NUM_STATS * bp->cp_nr_rings + BNXT_NUM_SW_FUNC_STATS; j += bnxt_get_num_ring_stats(bp) + BNXT_NUM_SW_FUNC_STATS;
goto skip_ring_stats; goto skip_ring_stats;
} }
...@@ -551,57 +624,40 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, ...@@ -551,57 +624,40 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf) static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{ {
struct bnxt *bp = netdev_priv(dev); struct bnxt *bp = netdev_priv(dev);
u32 i; static const char * const *str;
u32 i, j, num_str;
switch (stringset) { switch (stringset) {
/* The number of strings must match BNXT_NUM_STATS defined above. */
case ETH_SS_STATS: case ETH_SS_STATS:
for (i = 0; i < bp->cp_nr_rings; i++) { for (i = 0; i < bp->cp_nr_rings; i++) {
sprintf(buf, "[%d]: rx_ucast_packets", i); num_str = ARRAY_SIZE(bnxt_ring_stats_str);
buf += ETH_GSTRING_LEN; for (j = 0; j < num_str; j++) {
sprintf(buf, "[%d]: rx_mcast_packets", i); sprintf(buf, "[%d]: %s", i,
buf += ETH_GSTRING_LEN; bnxt_ring_stats_str[j]);
sprintf(buf, "[%d]: rx_bcast_packets", i);
buf += ETH_GSTRING_LEN;
sprintf(buf, "[%d]: rx_discards", i);
buf += ETH_GSTRING_LEN;
sprintf(buf, "[%d]: rx_drops", i);
buf += ETH_GSTRING_LEN; buf += ETH_GSTRING_LEN;
sprintf(buf, "[%d]: rx_ucast_bytes", i); }
buf += ETH_GSTRING_LEN; if (!BNXT_SUPPORTS_TPA(bp))
sprintf(buf, "[%d]: rx_mcast_bytes", i); goto skip_tpa_stats;
buf += ETH_GSTRING_LEN;
sprintf(buf, "[%d]: rx_bcast_bytes", i); if (bp->max_tpa_v2) {
buf += ETH_GSTRING_LEN; num_str = ARRAY_SIZE(bnxt_ring_tpa2_stats_str);
sprintf(buf, "[%d]: tx_ucast_packets", i); str = bnxt_ring_tpa2_stats_str;
buf += ETH_GSTRING_LEN; } else {
sprintf(buf, "[%d]: tx_mcast_packets", i); num_str = ARRAY_SIZE(bnxt_ring_tpa_stats_str);
buf += ETH_GSTRING_LEN; str = bnxt_ring_tpa_stats_str;
sprintf(buf, "[%d]: tx_bcast_packets", i); }
buf += ETH_GSTRING_LEN; for (j = 0; j < num_str; j++) {
sprintf(buf, "[%d]: tx_discards", i); sprintf(buf, "[%d]: %s", i, str[j]);
buf += ETH_GSTRING_LEN;
sprintf(buf, "[%d]: tx_drops", i);
buf += ETH_GSTRING_LEN;
sprintf(buf, "[%d]: tx_ucast_bytes", i);
buf += ETH_GSTRING_LEN;
sprintf(buf, "[%d]: tx_mcast_bytes", i);
buf += ETH_GSTRING_LEN;
sprintf(buf, "[%d]: tx_bcast_bytes", i);
buf += ETH_GSTRING_LEN;
sprintf(buf, "[%d]: tpa_packets", i);
buf += ETH_GSTRING_LEN;
sprintf(buf, "[%d]: tpa_bytes", i);
buf += ETH_GSTRING_LEN;
sprintf(buf, "[%d]: tpa_events", i);
buf += ETH_GSTRING_LEN;
sprintf(buf, "[%d]: tpa_aborts", i);
buf += ETH_GSTRING_LEN;
sprintf(buf, "[%d]: rx_l4_csum_errors", i);
buf += ETH_GSTRING_LEN; buf += ETH_GSTRING_LEN;
sprintf(buf, "[%d]: missed_irqs", i); }
skip_tpa_stats:
num_str = ARRAY_SIZE(bnxt_ring_sw_stats_str);
for (j = 0; j < num_str; j++) {
sprintf(buf, "[%d]: %s", i,
bnxt_ring_sw_stats_str[j]);
buf += ETH_GSTRING_LEN; buf += ETH_GSTRING_LEN;
} }
}
for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) { for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) {
strcpy(buf, bnxt_sw_func_stats[i].string); strcpy(buf, bnxt_sw_func_stats[i].string);
buf += ETH_GSTRING_LEN; buf += ETH_GSTRING_LEN;
......
/* Broadcom NetXtreme-C/E network driver. /* Broadcom NetXtreme-C/E network driver.
* *
* Copyright (c) 2014-2016 Broadcom Corporation * Copyright (c) 2014-2016 Broadcom Corporation
* Copyright (c) 2016-2019 Broadcom Limited * Copyright (c) 2014-2018 Broadcom Limited
* Copyright (c) 2018-2019 Broadcom Inc.
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by * it under the terms of the GNU General Public License as published by
...@@ -39,15 +40,15 @@ struct hwrm_resp_hdr { ...@@ -39,15 +40,15 @@ struct hwrm_resp_hdr {
#define TLV_TYPE_ROCE_SP_COMMAND 0x3UL #define TLV_TYPE_ROCE_SP_COMMAND 0x3UL
#define TLV_TYPE_QUERY_ROCE_CC_GEN1 0x4UL #define TLV_TYPE_QUERY_ROCE_CC_GEN1 0x4UL
#define TLV_TYPE_MODIFY_ROCE_CC_GEN1 0x5UL #define TLV_TYPE_MODIFY_ROCE_CC_GEN1 0x5UL
#define TLV_TYPE_ENGINE_CKV_DEVICE_SERIAL_NUMBER 0x8001UL #define TLV_TYPE_ENGINE_CKV_ALIAS_ECC_PUBLIC_KEY 0x8001UL
#define TLV_TYPE_ENGINE_CKV_NONCE 0x8002UL
#define TLV_TYPE_ENGINE_CKV_IV 0x8003UL #define TLV_TYPE_ENGINE_CKV_IV 0x8003UL
#define TLV_TYPE_ENGINE_CKV_AUTH_TAG 0x8004UL #define TLV_TYPE_ENGINE_CKV_AUTH_TAG 0x8004UL
#define TLV_TYPE_ENGINE_CKV_CIPHERTEXT 0x8005UL #define TLV_TYPE_ENGINE_CKV_CIPHERTEXT 0x8005UL
#define TLV_TYPE_ENGINE_CKV_ALGORITHMS 0x8006UL #define TLV_TYPE_ENGINE_CKV_ALGORITHMS 0x8006UL
#define TLV_TYPE_ENGINE_CKV_ECC_PUBLIC_KEY 0x8007UL #define TLV_TYPE_ENGINE_CKV_HOST_ECC_PUBLIC_KEY 0x8007UL
#define TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE 0x8008UL #define TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE 0x8008UL
#define TLV_TYPE_LAST TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE #define TLV_TYPE_ENGINE_CKV_SRT_ECC_PUBLIC_KEY 0x8009UL
#define TLV_TYPE_LAST TLV_TYPE_ENGINE_CKV_SRT_ECC_PUBLIC_KEY
/* tlv (size:64b/8B) */ /* tlv (size:64b/8B) */
...@@ -267,7 +268,6 @@ struct cmd_nums { ...@@ -267,7 +268,6 @@ struct cmd_nums {
#define HWRM_CFA_EEM_OP 0x123UL #define HWRM_CFA_EEM_OP 0x123UL
#define HWRM_CFA_ADV_FLOW_MGNT_QCAPS 0x124UL #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS 0x124UL
#define HWRM_CFA_TFLIB 0x125UL #define HWRM_CFA_TFLIB 0x125UL
#define HWRM_ENGINE_CKV_HELLO 0x12dUL
#define HWRM_ENGINE_CKV_STATUS 0x12eUL #define HWRM_ENGINE_CKV_STATUS 0x12eUL
#define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL #define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL
#define HWRM_ENGINE_CKV_CKEK_DELETE 0x130UL #define HWRM_ENGINE_CKV_CKEK_DELETE 0x130UL
...@@ -313,6 +313,7 @@ struct cmd_nums { ...@@ -313,6 +313,7 @@ struct cmd_nums {
#define HWRM_FUNC_BACKING_STORE_QCFG 0x194UL #define HWRM_FUNC_BACKING_STORE_QCFG 0x194UL
#define HWRM_FUNC_VF_BW_CFG 0x195UL #define HWRM_FUNC_VF_BW_CFG 0x195UL
#define HWRM_FUNC_VF_BW_QCFG 0x196UL #define HWRM_FUNC_VF_BW_QCFG 0x196UL
#define HWRM_FUNC_HOST_PF_IDS_QUERY 0x197UL
#define HWRM_SELFTEST_QLIST 0x200UL #define HWRM_SELFTEST_QLIST 0x200UL
#define HWRM_SELFTEST_EXEC 0x201UL #define HWRM_SELFTEST_EXEC 0x201UL
#define HWRM_SELFTEST_IRQ 0x202UL #define HWRM_SELFTEST_IRQ 0x202UL
...@@ -410,8 +411,8 @@ struct hwrm_err_output { ...@@ -410,8 +411,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1 #define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10 #define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 0 #define HWRM_VERSION_UPDATE 0
#define HWRM_VERSION_RSVD 69 #define HWRM_VERSION_RSVD 89
#define HWRM_VERSION_STR "1.10.0.69" #define HWRM_VERSION_STR "1.10.0.89"
/* hwrm_ver_get_input (size:192b/24B) */ /* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input { struct hwrm_ver_get_input {
...@@ -624,6 +625,8 @@ struct hwrm_async_event_cmpl { ...@@ -624,6 +625,8 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_TCP_FLAG_ACTION_CHANGE 0x3aUL #define ASYNC_EVENT_CMPL_EVENT_ID_TCP_FLAG_ACTION_CHANGE 0x3aUL
#define ASYNC_EVENT_CMPL_EVENT_ID_EEM_FLOW_ACTIVE 0x3bUL #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_FLOW_ACTIVE 0x3bUL
#define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CFG_CHANGE 0x3cUL #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CFG_CHANGE 0x3cUL
#define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_DEFAULT_VNIC_CHANGE 0x3dUL
#define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_LINK_STATUS_CHANGE 0x3eUL
#define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL #define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
...@@ -1122,6 +1125,7 @@ struct hwrm_func_qcfg_output { ...@@ -1122,6 +1125,7 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL
#define FUNC_QCFG_RESP_FLAGS_TRUSTED_VF 0x40UL #define FUNC_QCFG_RESP_FLAGS_TRUSTED_VF 0x40UL
#define FUNC_QCFG_RESP_FLAGS_SECURE_MODE_ENABLED 0x80UL #define FUNC_QCFG_RESP_FLAGS_SECURE_MODE_ENABLED 0x80UL
#define FUNC_QCFG_RESP_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x100UL
u8 mac_address[6]; u8 mac_address[6];
__le16 pci_id; __le16 pci_id;
__le16 alloc_rsscos_ctx; __le16 alloc_rsscos_ctx;
...@@ -1241,6 +1245,7 @@ struct hwrm_func_cfg_input { ...@@ -1241,6 +1245,7 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_FLAGS_DYNAMIC_TX_RING_ALLOC 0x400000UL #define FUNC_CFG_REQ_FLAGS_DYNAMIC_TX_RING_ALLOC 0x400000UL
#define FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST 0x800000UL #define FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST 0x800000UL
#define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE 0x1000000UL #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE 0x1000000UL
#define FUNC_CFG_REQ_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x2000000UL
__le32 enables; __le32 enables;
#define FUNC_CFG_REQ_ENABLES_MTU 0x1UL #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
#define FUNC_CFG_REQ_ENABLES_MRU 0x2UL #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
...@@ -2916,7 +2921,7 @@ struct tx_port_stats_ext { ...@@ -2916,7 +2921,7 @@ struct tx_port_stats_ext {
__le64 pfc_pri7_tx_transitions; __le64 pfc_pri7_tx_transitions;
}; };
/* rx_port_stats_ext (size:2624b/328B) */ /* rx_port_stats_ext (size:3648b/456B) */
struct rx_port_stats_ext { struct rx_port_stats_ext {
__le64 link_down_events; __le64 link_down_events;
__le64 continuous_pause_events; __le64 continuous_pause_events;
...@@ -2959,6 +2964,22 @@ struct rx_port_stats_ext { ...@@ -2959,6 +2964,22 @@ struct rx_port_stats_ext {
__le64 rx_buffer_passed_threshold; __le64 rx_buffer_passed_threshold;
__le64 rx_pcs_symbol_err; __le64 rx_pcs_symbol_err;
__le64 rx_corrected_bits; __le64 rx_corrected_bits;
__le64 rx_discard_bytes_cos0;
__le64 rx_discard_bytes_cos1;
__le64 rx_discard_bytes_cos2;
__le64 rx_discard_bytes_cos3;
__le64 rx_discard_bytes_cos4;
__le64 rx_discard_bytes_cos5;
__le64 rx_discard_bytes_cos6;
__le64 rx_discard_bytes_cos7;
__le64 rx_discard_packets_cos0;
__le64 rx_discard_packets_cos1;
__le64 rx_discard_packets_cos2;
__le64 rx_discard_packets_cos3;
__le64 rx_discard_packets_cos4;
__le64 rx_discard_packets_cos5;
__le64 rx_discard_packets_cos6;
__le64 rx_discard_packets_cos7;
}; };
/* hwrm_port_qstats_ext_input (size:320b/40B) */ /* hwrm_port_qstats_ext_input (size:320b/40B) */
...@@ -6115,6 +6136,21 @@ struct hwrm_cfa_flow_alloc_output { ...@@ -6115,6 +6136,21 @@ struct hwrm_cfa_flow_alloc_output {
u8 valid; u8 valid;
}; };
/* hwrm_cfa_flow_alloc_cmd_err (size:64b/8B) */
struct hwrm_cfa_flow_alloc_cmd_err {
u8 code;
#define CFA_FLOW_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL
#define CFA_FLOW_ALLOC_CMD_ERR_CODE_L2_CONTEXT_TCAM 0x1UL
#define CFA_FLOW_ALLOC_CMD_ERR_CODE_ACTION_RECORD 0x2UL
#define CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_COUNTER 0x3UL
#define CFA_FLOW_ALLOC_CMD_ERR_CODE_WILD_CARD_TCAM 0x4UL
#define CFA_FLOW_ALLOC_CMD_ERR_CODE_HASH_COLLISION 0x5UL
#define CFA_FLOW_ALLOC_CMD_ERR_CODE_KEY_EXISTS 0x6UL
#define CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_CTXT_DB 0x7UL
#define CFA_FLOW_ALLOC_CMD_ERR_CODE_LAST CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_CTXT_DB
u8 unused_0[7];
};
/* hwrm_cfa_flow_free_input (size:256b/32B) */ /* hwrm_cfa_flow_free_input (size:256b/32B) */
struct hwrm_cfa_flow_free_input { struct hwrm_cfa_flow_free_input {
__le16 req_type; __le16 req_type;
...@@ -6305,7 +6341,7 @@ struct hwrm_cfa_eem_qcaps_input { ...@@ -6305,7 +6341,7 @@ struct hwrm_cfa_eem_qcaps_input {
__le32 unused_0; __le32 unused_0;
}; };
/* hwrm_cfa_eem_qcaps_output (size:256b/32B) */ /* hwrm_cfa_eem_qcaps_output (size:320b/40B) */
struct hwrm_cfa_eem_qcaps_output { struct hwrm_cfa_eem_qcaps_output {
__le16 error_code; __le16 error_code;
__le16 req_type; __le16 req_type;
...@@ -6322,15 +6358,17 @@ struct hwrm_cfa_eem_qcaps_output { ...@@ -6322,15 +6358,17 @@ struct hwrm_cfa_eem_qcaps_output {
#define CFA_EEM_QCAPS_RESP_SUPPORTED_KEY1_TABLE 0x2UL #define CFA_EEM_QCAPS_RESP_SUPPORTED_KEY1_TABLE 0x2UL
#define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_RECORD_TABLE 0x4UL #define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_RECORD_TABLE 0x4UL
#define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_FLOW_COUNTERS_TABLE 0x8UL #define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_FLOW_COUNTERS_TABLE 0x8UL
#define CFA_EEM_QCAPS_RESP_SUPPORTED_FID_TABLE 0x10UL
__le32 max_entries_supported; __le32 max_entries_supported;
__le16 key_entry_size; __le16 key_entry_size;
__le16 record_entry_size; __le16 record_entry_size;
__le16 efc_entry_size; __le16 efc_entry_size;
u8 unused_1; __le16 fid_entry_size;
u8 unused_1[7];
u8 valid; u8 valid;
}; };
/* hwrm_cfa_eem_cfg_input (size:320b/40B) */ /* hwrm_cfa_eem_cfg_input (size:384b/48B) */
struct hwrm_cfa_eem_cfg_input { struct hwrm_cfa_eem_cfg_input {
__le16 req_type; __le16 req_type;
__le16 cmpl_ring; __le16 cmpl_ring;
...@@ -6350,6 +6388,9 @@ struct hwrm_cfa_eem_cfg_input { ...@@ -6350,6 +6388,9 @@ struct hwrm_cfa_eem_cfg_input {
__le16 key1_ctx_id; __le16 key1_ctx_id;
__le16 record_ctx_id; __le16 record_ctx_id;
__le16 efc_ctx_id; __le16 efc_ctx_id;
__le16 fid_ctx_id;
__le16 unused_2;
__le32 unused_3;
}; };
/* hwrm_cfa_eem_cfg_output (size:128b/16B) */ /* hwrm_cfa_eem_cfg_output (size:128b/16B) */
...@@ -6375,7 +6416,7 @@ struct hwrm_cfa_eem_qcfg_input { ...@@ -6375,7 +6416,7 @@ struct hwrm_cfa_eem_qcfg_input {
__le32 unused_0; __le32 unused_0;
}; };
/* hwrm_cfa_eem_qcfg_output (size:192b/24B) */ /* hwrm_cfa_eem_qcfg_output (size:256b/32B) */
struct hwrm_cfa_eem_qcfg_output { struct hwrm_cfa_eem_qcfg_output {
__le16 error_code; __le16 error_code;
__le16 req_type; __le16 req_type;
...@@ -6386,7 +6427,12 @@ struct hwrm_cfa_eem_qcfg_output { ...@@ -6386,7 +6427,12 @@ struct hwrm_cfa_eem_qcfg_output {
#define CFA_EEM_QCFG_RESP_FLAGS_PATH_RX 0x2UL #define CFA_EEM_QCFG_RESP_FLAGS_PATH_RX 0x2UL
#define CFA_EEM_QCFG_RESP_FLAGS_PREFERRED_OFFLOAD 0x4UL #define CFA_EEM_QCFG_RESP_FLAGS_PREFERRED_OFFLOAD 0x4UL
__le32 num_entries; __le32 num_entries;
u8 unused_0[7]; __le16 key0_ctx_id;
__le16 key1_ctx_id;
__le16 record_ctx_id;
__le16 efc_ctx_id;
__le16 fid_ctx_id;
u8 unused_2[5];
u8 valid; u8 valid;
}; };
...@@ -6567,6 +6613,31 @@ struct ctx_hw_stats { ...@@ -6567,6 +6613,31 @@ struct ctx_hw_stats {
__le64 tpa_aborts; __le64 tpa_aborts;
}; };
/* ctx_hw_stats_ext (size:1344b/168B) */
struct ctx_hw_stats_ext {
__le64 rx_ucast_pkts;
__le64 rx_mcast_pkts;
__le64 rx_bcast_pkts;
__le64 rx_discard_pkts;
__le64 rx_drop_pkts;
__le64 rx_ucast_bytes;
__le64 rx_mcast_bytes;
__le64 rx_bcast_bytes;
__le64 tx_ucast_pkts;
__le64 tx_mcast_pkts;
__le64 tx_bcast_pkts;
__le64 tx_discard_pkts;
__le64 tx_drop_pkts;
__le64 tx_ucast_bytes;
__le64 tx_mcast_bytes;
__le64 tx_bcast_bytes;
__le64 rx_tpa_eligible_pkt;
__le64 rx_tpa_eligible_bytes;
__le64 rx_tpa_pkt;
__le64 rx_tpa_bytes;
__le64 rx_tpa_errors;
};
/* hwrm_stat_ctx_alloc_input (size:256b/32B) */ /* hwrm_stat_ctx_alloc_input (size:256b/32B) */
struct hwrm_stat_ctx_alloc_input { struct hwrm_stat_ctx_alloc_input {
__le16 req_type; __le16 req_type;
...@@ -6578,7 +6649,8 @@ struct hwrm_stat_ctx_alloc_input { ...@@ -6578,7 +6649,8 @@ struct hwrm_stat_ctx_alloc_input {
__le32 update_period_ms; __le32 update_period_ms;
u8 stat_ctx_flags; u8 stat_ctx_flags;
#define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL
u8 unused_0[3]; u8 unused_0;
__le16 stats_dma_length;
}; };
/* hwrm_stat_ctx_alloc_output (size:128b/16B) */ /* hwrm_stat_ctx_alloc_output (size:128b/16B) */
...@@ -7204,7 +7276,9 @@ struct coredump_segment_record { ...@@ -7204,7 +7276,9 @@ struct coredump_segment_record {
u8 version_hi; u8 version_hi;
u8 version_low; u8 version_low;
u8 seg_flags; u8 seg_flags;
u8 unused_0[7]; u8 compress_flags;
#define SFLAG_COMPRESSED_ZLIB 0x1UL
u8 unused_0[6];
}; };
/* hwrm_dbg_coredump_list_input (size:256b/32B) */ /* hwrm_dbg_coredump_list_input (size:256b/32B) */
...@@ -7729,6 +7803,9 @@ struct hwrm_nvm_set_variable_input { ...@@ -7729,6 +7803,9 @@ struct hwrm_nvm_set_variable_input {
#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_AES256 (0x2UL << 1) #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_AES256 (0x2UL << 1)
#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH (0x3UL << 1) #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH (0x3UL << 1)
#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH
#define NVM_SET_VARIABLE_REQ_FLAGS_FLAGS_UNUSED_0_MASK 0x70UL
#define NVM_SET_VARIABLE_REQ_FLAGS_FLAGS_UNUSED_0_SFT 4
#define NVM_SET_VARIABLE_REQ_FLAGS_FACTORY_DEFAULT 0x80UL
u8 unused_0; u8 unused_0;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment