Commit 05fafbfb authored by Yuval Mintz's avatar Yuval Mintz Committed by David S. Miller

qed: utilize FW 8.10.10.0

This new firmware for the qed* adpaters fixes several issues:
 - Better blocking of malicious VFs.
 - After FLR, Tx-switching [internal routing] of packets might
   be incorrect.
 - Deletion of unicast MAC filters would sometime have side-effect
   of corrupting the MAC filters configred for a device.
It also contains fixes for future qed* drivers that *hopefully* would be
sent for review in the near future.

In addition, it would allow driver some new functionality, including:
 - Allowing PF/VF driver compaitibility with old drivers [running
   pre-8.10.5.0 firmware].
 - Better debug facilities.

This would also bump the qed* driver versions to 8.10.9.20.
Signed-off-by: default avatarYuval Mintz <Yuval.Mintz@qlogic.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 75d67207
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
#include "qed_hsi.h" #include "qed_hsi.h"
extern const struct qed_common_ops qed_common_ops_pass; extern const struct qed_common_ops qed_common_ops_pass;
#define DRV_MODULE_VERSION "8.7.1.20" #define DRV_MODULE_VERSION "8.10.9.20"
#define MAX_HWFNS_PER_DEVICE (4) #define MAX_HWFNS_PER_DEVICE (4)
#define NAME_SIZE 16 #define NAME_SIZE 16
......
...@@ -772,6 +772,9 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, ...@@ -772,6 +772,9 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id); concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id);
qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid); qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid);
qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1); qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
qed_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0);
qed_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1);
qed_wr(p_hwfn, p_ptt, TCFC_REG_WEAK_ENABLE_VF, 0x0);
} }
/* pretend to original PF */ /* pretend to original PF */
qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
...@@ -782,34 +785,8 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, ...@@ -782,34 +785,8 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
static int qed_hw_init_port(struct qed_hwfn *p_hwfn, static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, int hw_mode) struct qed_ptt *p_ptt, int hw_mode)
{ {
int rc = 0; return qed_init_run(p_hwfn, p_ptt, PHASE_PORT,
p_hwfn->port_id, hw_mode);
rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, hw_mode);
if (rc)
return rc;
if (hw_mode & (1 << MODE_MF_SI)) {
u8 pf_id = 0;
if (!qed_hw_init_first_eth(p_hwfn, p_ptt, &pf_id)) {
DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
"PF[%08x] is first eth on engine\n", pf_id);
/* We should have configured BIT for ppfid, i.e., the
* relative function number in the port. But there's a
* bug in LLH in BB where the ppfid is actually engine
* based, so we need to take this into account.
*/
qed_wr(p_hwfn, p_ptt,
NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR, 1 << pf_id);
}
/* Take the protocol-based hit vector if there is a hit,
* otherwise take the other vector.
*/
qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_CLS_TYPE_DUALMODE, 0x2);
}
return rc;
} }
static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
...@@ -878,21 +855,6 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, ...@@ -878,21 +855,6 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
/* Pure runtime initializations - directly to the HW */ /* Pure runtime initializations - directly to the HW */
qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true); qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
if (hw_mode & (1 << MODE_MF_SI)) {
u8 pf_id = 0;
u32 val = 0;
if (!qed_hw_init_first_eth(p_hwfn, p_ptt, &pf_id)) {
if (p_hwfn->rel_pf_id == pf_id) {
DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
"PF[%d] is first ETH on engine\n",
pf_id);
val = 1;
}
qed_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, val);
}
}
if (b_hw_start) { if (b_hw_start) {
/* enable interrupts */ /* enable interrupts */
qed_int_igu_enable(p_hwfn, p_ptt, int_mode); qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
......
This diff is collapsed.
...@@ -534,7 +534,7 @@ int qed_init_fw_data(struct qed_dev *cdev, const u8 *data) ...@@ -534,7 +534,7 @@ int qed_init_fw_data(struct qed_dev *cdev, const u8 *data)
/* First Dword contains metadata and should be skipped */ /* First Dword contains metadata and should be skipped */
buf_hdr = (struct bin_buffer_hdr *)(data + sizeof(u32)); buf_hdr = (struct bin_buffer_hdr *)(data + sizeof(u32));
offset = buf_hdr[BIN_BUF_FW_VER_INFO].offset; offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
fw->fw_ver_info = (struct fw_ver_info *)(data + offset); fw->fw_ver_info = (struct fw_ver_info *)(data + offset);
offset = buf_hdr[BIN_BUF_INIT_CMD].offset; offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
......
...@@ -802,34 +802,6 @@ static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn, ...@@ -802,34 +802,6 @@ static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
return size; return size;
} }
int qed_hw_init_first_eth(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 *p_pf)
{
struct public_func shmem_info;
int i;
/* Find first Ethernet interface in port */
for (i = 0; i < NUM_OF_ENG_PFS(p_hwfn->cdev);
i += p_hwfn->cdev->num_ports_in_engines) {
qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
MCP_PF_ID_BY_REL(p_hwfn, i));
if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
continue;
if ((shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK) ==
FUNC_MF_CFG_PROTOCOL_ETHERNET) {
*p_pf = (u8)i;
return 0;
}
}
DP_NOTICE(p_hwfn,
"Failed to find on port an ethernet interface in MF_SI mode\n");
return -EINVAL;
}
static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{ {
struct qed_mcp_function_info *p_info; struct qed_mcp_function_info *p_info;
......
...@@ -500,6 +500,4 @@ int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn, ...@@ -500,6 +500,4 @@ int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_state *p_link, struct qed_mcp_link_state *p_link,
u8 min_bw); u8 min_bw);
int qed_hw_init_first_eth(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 *p_pf);
#endif #endif
...@@ -116,8 +116,14 @@ ...@@ -116,8 +116,14 @@
0x1009c4UL 0x1009c4UL
#define QM_REG_PF_EN \ #define QM_REG_PF_EN \
0x2f2ea4UL 0x2f2ea4UL
#define TCFC_REG_WEAK_ENABLE_VF \
0x2d0704UL
#define TCFC_REG_STRONG_ENABLE_PF \ #define TCFC_REG_STRONG_ENABLE_PF \
0x2d0708UL 0x2d0708UL
#define TCFC_REG_STRONG_ENABLE_VF \
0x2d070cUL
#define CCFC_REG_WEAK_ENABLE_VF \
0x2e0704UL
#define CCFC_REG_STRONG_ENABLE_PF \ #define CCFC_REG_STRONG_ENABLE_PF \
0x2e0708UL 0x2e0708UL
#define PGLUE_B_REG_PGL_ADDR_88_F0 \ #define PGLUE_B_REG_PGL_ADDR_88_F0 \
......
...@@ -1280,6 +1280,13 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, ...@@ -1280,6 +1280,13 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
memset(resp, 0, sizeof(*resp)); memset(resp, 0, sizeof(*resp));
/* Write the PF version so that VF would know which version
* is supported - might be later overriden. This guarantees that
* VF could recognize legacy PF based on lack of versions in reply.
*/
pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
/* Validate FW compatibility */ /* Validate FW compatibility */
if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) { if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
DP_INFO(p_hwfn, DP_INFO(p_hwfn,
...@@ -1289,12 +1296,6 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, ...@@ -1289,12 +1296,6 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
req->vfdev_info.eth_fp_hsi_minor, req->vfdev_info.eth_fp_hsi_minor,
ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR); ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
/* Write the PF version so that VF would know which version
* is supported.
*/
pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
goto out; goto out;
} }
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
#define QEDE_MAJOR_VERSION 8 #define QEDE_MAJOR_VERSION 8
#define QEDE_MINOR_VERSION 10 #define QEDE_MINOR_VERSION 10
#define QEDE_REVISION_VERSION 1 #define QEDE_REVISION_VERSION 9
#define QEDE_ENGINEERING_VERSION 20 #define QEDE_ENGINEERING_VERSION 20
#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \ #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
__stringify(QEDE_MINOR_VERSION) "." \ __stringify(QEDE_MINOR_VERSION) "." \
......
This diff is collapsed.
...@@ -13,9 +13,12 @@ ...@@ -13,9 +13,12 @@
/* ETH FW CONSTANTS */ /* ETH FW CONSTANTS */
/********************/ /********************/
#define ETH_HSI_VER_MAJOR 3 #define ETH_HSI_VER_MAJOR 3
#define ETH_HSI_VER_MINOR 0 #define ETH_HSI_VER_MINOR 10
#define ETH_CACHE_LINE_SIZE 64
#define ETH_HSI_VER_NO_PKT_LEN_TUNN 5
#define ETH_CACHE_LINE_SIZE 64
#define ETH_RX_CQE_GAP 32
#define ETH_MAX_RAMROD_PER_CON 8 #define ETH_MAX_RAMROD_PER_CON 8
#define ETH_TX_BD_PAGE_SIZE_BYTES 4096 #define ETH_TX_BD_PAGE_SIZE_BYTES 4096
#define ETH_RX_BD_PAGE_SIZE_BYTES 4096 #define ETH_RX_BD_PAGE_SIZE_BYTES 4096
...@@ -24,15 +27,25 @@ ...@@ -24,15 +27,25 @@
#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1 #define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1
#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18 #define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18
#define ETH_TX_MAX_BDS_PER_LSO_PACKET 255
#define ETH_TX_MAX_LSO_HDR_NBD 4 #define ETH_TX_MAX_LSO_HDR_NBD 4
#define ETH_TX_MIN_BDS_PER_LSO_PKT 3 #define ETH_TX_MIN_BDS_PER_LSO_PKT 3
#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3 #define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3
#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2 #define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2
#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2 #define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2
#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 12 + 8)) #define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 4 + 12 + 8))
#define ETH_TX_MAX_LSO_HDR_BYTES 510 #define ETH_TX_MAX_LSO_HDR_BYTES 510
#define ETH_TX_LSO_WINDOW_BDS_NUM (18 - 1)
#define ETH_TX_LSO_WINDOW_MIN_LEN 9700
#define ETH_TX_MAX_LSO_PAYLOAD_LEN 0xFE000
#define ETH_TX_NUM_SAME_AS_LAST_ENTRIES 320
#define ETH_TX_INACTIVE_SAME_AS_LAST 0xFFFF
#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS #define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
#define ETH_NUM_STATISTIC_COUNTERS_DOUBLE_VF_ZONE \
(ETH_NUM_STATISTIC_COUNTERS - MAX_NUM_VFS / 2)
#define ETH_NUM_STATISTIC_COUNTERS_QUAD_VF_ZONE \
(ETH_NUM_STATISTIC_COUNTERS - 3 * MAX_NUM_VFS / 4)
/* Maximum number of buffers, used for RX packet placement */ /* Maximum number of buffers, used for RX packet placement */
#define ETH_RX_MAX_BUFF_PER_PKT 5 #define ETH_RX_MAX_BUFF_PER_PKT 5
...@@ -59,6 +72,8 @@ ...@@ -59,6 +72,8 @@
#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6 #define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6
#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4 #define ETH_TPA_CQE_END_LEN_LIST_SIZE 4
/* Control frame check constants */
#define ETH_CTL_FRAME_ETH_TYPE_NUM 4
struct eth_tx_1st_bd_flags { struct eth_tx_1st_bd_flags {
u8 bitfields; u8 bitfields;
...@@ -82,10 +97,10 @@ struct eth_tx_1st_bd_flags { ...@@ -82,10 +97,10 @@ struct eth_tx_1st_bd_flags {
/* The parsing information data fo rthe first tx bd of a given packet. */ /* The parsing information data fo rthe first tx bd of a given packet. */
struct eth_tx_data_1st_bd { struct eth_tx_data_1st_bd {
__le16 vlan; __le16 vlan;
u8 nbds; u8 nbds;
struct eth_tx_1st_bd_flags bd_flags; struct eth_tx_1st_bd_flags bd_flags;
__le16 bitfields; __le16 bitfields;
#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK 0x1 #define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK 0x1
#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0 #define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0
#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1 #define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1
...@@ -96,7 +111,7 @@ struct eth_tx_data_1st_bd { ...@@ -96,7 +111,7 @@ struct eth_tx_data_1st_bd {
/* The parsing information data for the second tx bd of a given packet. */ /* The parsing information data for the second tx bd of a given packet. */
struct eth_tx_data_2nd_bd { struct eth_tx_data_2nd_bd {
__le16 tunn_ip_size; __le16 tunn_ip_size;
__le16 bitfields1; __le16 bitfields1;
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF #define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0 #define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0
...@@ -125,9 +140,14 @@ struct eth_tx_data_2nd_bd { ...@@ -125,9 +140,14 @@ struct eth_tx_data_2nd_bd {
#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13 #define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13
}; };
/* Firmware data for L2-EDPM packet. */
struct eth_edpm_fw_data {
struct eth_tx_data_1st_bd data_1st_bd;
struct eth_tx_data_2nd_bd data_2nd_bd;
__le32 reserved;
};
struct eth_fast_path_cqe_fw_debug { struct eth_fast_path_cqe_fw_debug {
u8 reserved0;
u8 reserved1;
__le16 reserved2; __le16 reserved2;
}; };
...@@ -148,6 +168,17 @@ struct eth_tunnel_parsing_flags { ...@@ -148,6 +168,17 @@ struct eth_tunnel_parsing_flags {
#define ETH_TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT 7 #define ETH_TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT 7
}; };
/* PMD flow control bits */
struct eth_pmd_flow_flags {
u8 flags;
#define ETH_PMD_FLOW_FLAGS_VALID_MASK 0x1
#define ETH_PMD_FLOW_FLAGS_VALID_SHIFT 0
#define ETH_PMD_FLOW_FLAGS_TOGGLE_MASK 0x1
#define ETH_PMD_FLOW_FLAGS_TOGGLE_SHIFT 1
#define ETH_PMD_FLOW_FLAGS_RESERVED_MASK 0x3F
#define ETH_PMD_FLOW_FLAGS_RESERVED_SHIFT 2
};
/* Regular ETH Rx FP CQE. */ /* Regular ETH Rx FP CQE. */
struct eth_fast_path_rx_reg_cqe { struct eth_fast_path_rx_reg_cqe {
u8 type; u8 type;
...@@ -166,64 +197,63 @@ struct eth_fast_path_rx_reg_cqe { ...@@ -166,64 +197,63 @@ struct eth_fast_path_rx_reg_cqe {
u8 placement_offset; u8 placement_offset;
struct eth_tunnel_parsing_flags tunnel_pars_flags; struct eth_tunnel_parsing_flags tunnel_pars_flags;
u8 bd_num; u8 bd_num;
u8 reserved[7]; u8 reserved[9];
struct eth_fast_path_cqe_fw_debug fw_debug; struct eth_fast_path_cqe_fw_debug fw_debug;
u8 reserved1[3]; u8 reserved1[3];
u8 flags; struct eth_pmd_flow_flags pmd_flags;
#define ETH_FAST_PATH_RX_REG_CQE_VALID_MASK 0x1
#define ETH_FAST_PATH_RX_REG_CQE_VALID_SHIFT 0
#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_MASK 0x1
#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_SHIFT 1
#define ETH_FAST_PATH_RX_REG_CQE_RESERVED2_MASK 0x3F
#define ETH_FAST_PATH_RX_REG_CQE_RESERVED2_SHIFT 2
}; };
/* TPA-continue ETH Rx FP CQE. */ /* TPA-continue ETH Rx FP CQE. */
struct eth_fast_path_rx_tpa_cont_cqe { struct eth_fast_path_rx_tpa_cont_cqe {
u8 type; u8 type;
u8 tpa_agg_index; u8 tpa_agg_index;
__le16 len_list[ETH_TPA_CQE_CONT_LEN_LIST_SIZE]; __le16 len_list[ETH_TPA_CQE_CONT_LEN_LIST_SIZE];
u8 reserved[5]; u8 reserved;
u8 reserved1; u8 reserved1;
__le16 reserved2[ETH_TPA_CQE_CONT_LEN_LIST_SIZE]; __le16 reserved2[ETH_TPA_CQE_CONT_LEN_LIST_SIZE];
u8 reserved3[3];
struct eth_pmd_flow_flags pmd_flags;
}; };
/* TPA-end ETH Rx FP CQE. */ /* TPA-end ETH Rx FP CQE. */
struct eth_fast_path_rx_tpa_end_cqe { struct eth_fast_path_rx_tpa_end_cqe {
u8 type; u8 type;
u8 tpa_agg_index; u8 tpa_agg_index;
__le16 total_packet_len; __le16 total_packet_len;
u8 num_of_bds; u8 num_of_bds;
u8 end_reason; u8 end_reason;
__le16 num_of_coalesced_segs; __le16 num_of_coalesced_segs;
__le32 ts_delta; __le32 ts_delta;
__le16 len_list[ETH_TPA_CQE_END_LEN_LIST_SIZE]; __le16 len_list[ETH_TPA_CQE_END_LEN_LIST_SIZE];
u8 reserved1[3]; __le16 reserved3[ETH_TPA_CQE_END_LEN_LIST_SIZE];
u8 reserved2; __le16 reserved1;
__le16 reserved3[ETH_TPA_CQE_END_LEN_LIST_SIZE]; u8 reserved2;
struct eth_pmd_flow_flags pmd_flags;
}; };
/* TPA-start ETH Rx FP CQE. */ /* TPA-start ETH Rx FP CQE. */
struct eth_fast_path_rx_tpa_start_cqe { struct eth_fast_path_rx_tpa_start_cqe {
u8 type; u8 type;
u8 bitfields; u8 bitfields;
#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7 #define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7
#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0 #define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0
#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF #define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF
#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3 #define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3
#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1 #define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1
#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7 #define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7
__le16 seg_len; __le16 seg_len;
struct parsing_and_err_flags pars_flags; struct parsing_and_err_flags pars_flags;
__le16 vlan_tag; __le16 vlan_tag;
__le32 rss_hash; __le32 rss_hash;
__le16 len_on_first_bd; __le16 len_on_first_bd;
u8 placement_offset; u8 placement_offset;
struct eth_tunnel_parsing_flags tunnel_pars_flags; struct eth_tunnel_parsing_flags tunnel_pars_flags;
u8 tpa_agg_index; u8 tpa_agg_index;
u8 header_len; u8 header_len;
__le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE]; __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE];
struct eth_fast_path_cqe_fw_debug fw_debug; struct eth_fast_path_cqe_fw_debug fw_debug;
u8 reserved;
struct eth_pmd_flow_flags pmd_flags;
}; };
/* The L4 pseudo checksum mode for Ethernet */ /* The L4 pseudo checksum mode for Ethernet */
...@@ -245,15 +275,7 @@ struct eth_slow_path_rx_cqe { ...@@ -245,15 +275,7 @@ struct eth_slow_path_rx_cqe {
u8 reserved[25]; u8 reserved[25];
__le16 echo; __le16 echo;
u8 reserved1; u8 reserved1;
u8 flags; struct eth_pmd_flow_flags pmd_flags;
/* for PMD mode - valid indication */
#define ETH_SLOW_PATH_RX_CQE_VALID_MASK 0x1
#define ETH_SLOW_PATH_RX_CQE_VALID_SHIFT 0
/* for PMD mode - valid toggle indication */
#define ETH_SLOW_PATH_RX_CQE_VALID_TOGGLE_MASK 0x1
#define ETH_SLOW_PATH_RX_CQE_VALID_TOGGLE_SHIFT 1
#define ETH_SLOW_PATH_RX_CQE_RESERVED2_MASK 0x3F
#define ETH_SLOW_PATH_RX_CQE_RESERVED2_SHIFT 2
}; };
/* union for all ETH Rx CQE types */ /* union for all ETH Rx CQE types */
...@@ -276,6 +298,11 @@ enum eth_rx_cqe_type { ...@@ -276,6 +298,11 @@ enum eth_rx_cqe_type {
MAX_ETH_RX_CQE_TYPE MAX_ETH_RX_CQE_TYPE
}; };
struct eth_rx_pmd_cqe {
union eth_rx_cqe cqe;
u8 reserved[ETH_RX_CQE_GAP];
};
enum eth_rx_tunn_type { enum eth_rx_tunn_type {
ETH_RX_NO_TUNN, ETH_RX_NO_TUNN,
ETH_RX_TUNN_GENEVE, ETH_RX_TUNN_GENEVE,
...@@ -313,8 +340,8 @@ struct eth_tx_2nd_bd { ...@@ -313,8 +340,8 @@ struct eth_tx_2nd_bd {
/* The parsing information data for the third tx bd of a given packet. */ /* The parsing information data for the third tx bd of a given packet. */
struct eth_tx_data_3rd_bd { struct eth_tx_data_3rd_bd {
__le16 lso_mss; __le16 lso_mss;
__le16 bitfields; __le16 bitfields;
#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF #define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF
#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0 #define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0
#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF #define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF
...@@ -323,8 +350,8 @@ struct eth_tx_data_3rd_bd { ...@@ -323,8 +350,8 @@ struct eth_tx_data_3rd_bd {
#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8 #define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8
#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F #define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F
#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9 #define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9
u8 tunn_l4_hdr_start_offset_w; u8 tunn_l4_hdr_start_offset_w;
u8 tunn_hdr_size_w; u8 tunn_hdr_size_w;
}; };
/* The third tx bd of a given packet */ /* The third tx bd of a given packet */
...@@ -355,10 +382,10 @@ struct eth_tx_bd { ...@@ -355,10 +382,10 @@ struct eth_tx_bd {
}; };
union eth_tx_bd_types { union eth_tx_bd_types {
struct eth_tx_1st_bd first_bd; struct eth_tx_1st_bd first_bd;
struct eth_tx_2nd_bd second_bd; struct eth_tx_2nd_bd second_bd;
struct eth_tx_3rd_bd third_bd; struct eth_tx_3rd_bd third_bd;
struct eth_tx_bd reg_bd; struct eth_tx_bd reg_bd;
}; };
/* Mstorm Queue Zone */ /* Mstorm Queue Zone */
...@@ -389,8 +416,8 @@ struct eth_db_data { ...@@ -389,8 +416,8 @@ struct eth_db_data {
#define ETH_DB_DATA_RESERVED_SHIFT 5 #define ETH_DB_DATA_RESERVED_SHIFT 5
#define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3 #define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3
#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6 #define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6
u8 agg_flags; u8 agg_flags;
__le16 bd_prod; __le16 bd_prod;
}; };
#endif /* __ETH_COMMON__ */ #endif /* __ETH_COMMON__ */
...@@ -311,7 +311,7 @@ struct iscsi_login_req_hdr { ...@@ -311,7 +311,7 @@ struct iscsi_login_req_hdr {
#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_SHIFT 0 #define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_SHIFT 0
#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_MASK 0xFF #define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_MASK 0xFF
#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_SHIFT 24 #define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_SHIFT 24
__le32 isid_TABC; __le32 isid_tabc;
__le16 tsih; __le16 tsih;
__le16 isid_d; __le16 isid_d;
__le32 itt; __le32 itt;
...@@ -464,7 +464,7 @@ struct iscsi_login_response_hdr { ...@@ -464,7 +464,7 @@ struct iscsi_login_response_hdr {
#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 #define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF #define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 #define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
__le32 isid_TABC; __le32 isid_tabc;
__le16 tsih; __le16 tsih;
__le16 isid_d; __le16 isid_d;
__le32 itt; __le32 itt;
...@@ -688,8 +688,7 @@ union iscsi_cqe { ...@@ -688,8 +688,7 @@ union iscsi_cqe {
enum iscsi_cqes_type { enum iscsi_cqes_type {
ISCSI_CQE_TYPE_SOLICITED = 1, ISCSI_CQE_TYPE_SOLICITED = 1,
ISCSI_CQE_TYPE_UNSOLICITED, ISCSI_CQE_TYPE_UNSOLICITED,
ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE,
,
ISCSI_CQE_TYPE_TASK_CLEANUP, ISCSI_CQE_TYPE_TASK_CLEANUP,
ISCSI_CQE_TYPE_DUMMY, ISCSI_CQE_TYPE_DUMMY,
MAX_ISCSI_CQES_TYPE MAX_ISCSI_CQES_TYPE
...@@ -769,9 +768,9 @@ enum iscsi_eqe_opcode { ...@@ -769,9 +768,9 @@ enum iscsi_eqe_opcode {
ISCSI_EVENT_TYPE_UPDATE_CONN, ISCSI_EVENT_TYPE_UPDATE_CONN,
ISCSI_EVENT_TYPE_CLEAR_SQ, ISCSI_EVENT_TYPE_CLEAR_SQ,
ISCSI_EVENT_TYPE_TERMINATE_CONN, ISCSI_EVENT_TYPE_TERMINATE_CONN,
ISCSI_EVENT_TYPE_MAC_UPDATE_CONN,
ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE, ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE,
ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE, ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE,
RESERVED8,
RESERVED9, RESERVED9,
ISCSI_EVENT_TYPE_START_OF_ERROR_TYPES = 10, ISCSI_EVENT_TYPE_START_OF_ERROR_TYPES = 10,
ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD, ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD,
...@@ -867,6 +866,7 @@ enum iscsi_ramrod_cmd_id { ...@@ -867,6 +866,7 @@ enum iscsi_ramrod_cmd_id {
ISCSI_RAMROD_CMD_ID_UPDATE_CONN = 4, ISCSI_RAMROD_CMD_ID_UPDATE_CONN = 4,
ISCSI_RAMROD_CMD_ID_TERMINATION_CONN = 5, ISCSI_RAMROD_CMD_ID_TERMINATION_CONN = 5,
ISCSI_RAMROD_CMD_ID_CLEAR_SQ = 6, ISCSI_RAMROD_CMD_ID_CLEAR_SQ = 6,
ISCSI_RAMROD_CMD_ID_MAC_UPDATE = 7,
MAX_ISCSI_RAMROD_CMD_ID MAX_ISCSI_RAMROD_CMD_ID
}; };
...@@ -883,6 +883,16 @@ union iscsi_seq_num { ...@@ -883,6 +883,16 @@ union iscsi_seq_num {
__le16 r2t_sn; __le16 r2t_sn;
}; };
struct iscsi_spe_conn_mac_update {
struct iscsi_slow_path_hdr hdr;
__le16 conn_id;
__le32 fw_cid;
__le16 remote_mac_addr_lo;
__le16 remote_mac_addr_mid;
__le16 remote_mac_addr_hi;
u8 reserved0[2];
};
struct iscsi_spe_conn_offload { struct iscsi_spe_conn_offload {
struct iscsi_slow_path_hdr hdr; struct iscsi_slow_path_hdr hdr;
__le16 conn_id; __le16 conn_id;
...@@ -1302,14 +1312,6 @@ struct mstorm_iscsi_stats_drv { ...@@ -1302,14 +1312,6 @@ struct mstorm_iscsi_stats_drv {
struct regpair iscsi_rx_dropped_pdus_task_not_valid; struct regpair iscsi_rx_dropped_pdus_task_not_valid;
}; };
struct ooo_opaque {
__le32 cid;
u8 drop_isle;
u8 drop_size;
u8 ooo_opcode;
u8 ooo_isle;
};
struct pstorm_iscsi_stats_drv { struct pstorm_iscsi_stats_drv {
struct regpair iscsi_tx_bytes_cnt; struct regpair iscsi_tx_bytes_cnt;
struct regpair iscsi_tx_packet_cnt; struct regpair iscsi_tx_packet_cnt;
......
...@@ -16,19 +16,6 @@ ...@@ -16,19 +16,6 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/qed/common_hsi.h> #include <linux/qed/common_hsi.h>
/* dma_addr_t manip */
#define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x))
#define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x))
#define DMA_REGPAIR_LE(x, val) do { \
(x).hi = DMA_HI_LE((val)); \
(x).lo = DMA_LO_LE((val)); \
} while (0)
#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo))
#define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64)
#define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo))
#define HILO_DMA_REGPAIR(regpair) ((dma_addr_t)HILO_64_REGPAIR(regpair))
enum qed_chain_mode { enum qed_chain_mode {
/* Each Page contains a next pointer at its end */ /* Each Page contains a next pointer at its end */
QED_CHAIN_MODE_NEXT_PTR, QED_CHAIN_MODE_NEXT_PTR,
......
...@@ -11,6 +11,14 @@ ...@@ -11,6 +11,14 @@
#define TCP_INVALID_TIMEOUT_VAL -1 #define TCP_INVALID_TIMEOUT_VAL -1
struct ooo_opaque {
__le32 cid;
u8 drop_isle;
u8 drop_size;
u8 ooo_opcode;
u8 ooo_isle;
};
enum tcp_connect_mode { enum tcp_connect_mode {
TCP_CONNECT_ACTIVE, TCP_CONNECT_ACTIVE,
TCP_CONNECT_PASSIVE, TCP_CONNECT_PASSIVE,
...@@ -18,14 +26,10 @@ enum tcp_connect_mode { ...@@ -18,14 +26,10 @@ enum tcp_connect_mode {
}; };
struct tcp_init_params { struct tcp_init_params {
__le32 max_cwnd; __le32 two_msl_timer;
__le16 dup_ack_threshold;
__le16 tx_sws_timer; __le16 tx_sws_timer;
__le16 min_rto;
__le16 min_rto_rt;
__le16 max_rto;
u8 maxfinrt; u8 maxfinrt;
u8 reserved[1]; u8 reserved[9];
}; };
enum tcp_ip_version { enum tcp_ip_version {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment