Commit 5d24bcf1 authored by Tomer Tayar's avatar Tomer Tayar Committed by David S. Miller

qed: Move to new load request scheme

Management firmware is used as an arbiter between the various PFs
in regard to loading - it causes the various PFs to load/unload
sequentially and informs each of its appropriate rule in the init.

But the existing flow is too weak to handle some scenarios where
PFs aren't properly cleaned prior to loading.
The significant scenarios falling under this criteria:
  a. Preboot drivers in some environment can't properly unload.
  b. Unexpected driver replacement [kdump, PDA].

Modern management firmware supports a more intricate loading flow,
where the driver has the ability to overcome previous limitations.
This moves qed into using this newer scheme.

Notice new scheme is backward compatible, so new drivers would
still be able to load properly on top of older management firmwares
and vice versa.
Signed-off-by: default avatarTomer Tayar <Tomer.Tayar@cavium.com>
Signed-off-by: default avatarYuval Mintz <Yuval.Mintz@cavium.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c0c2d0b4
...@@ -51,7 +51,19 @@ ...@@ -51,7 +51,19 @@
#include "qed_hsi.h" #include "qed_hsi.h"
extern const struct qed_common_ops qed_common_ops_pass; extern const struct qed_common_ops qed_common_ops_pass;
#define DRV_MODULE_VERSION "8.10.10.21"
#define QED_MAJOR_VERSION 8
#define QED_MINOR_VERSION 10
#define QED_REVISION_VERSION 10
#define QED_ENGINEERING_VERSION 21
#define QED_VERSION \
((QED_MAJOR_VERSION << 24) | (QED_MINOR_VERSION << 16) | \
(QED_REVISION_VERSION << 8) | QED_ENGINEERING_VERSION)
#define STORM_FW_VERSION \
((FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | \
(FW_REVISION_VERSION << 8) | FW_ENGINEERING_VERSION)
#define MAX_HWFNS_PER_DEVICE (4) #define MAX_HWFNS_PER_DEVICE (4)
#define NAME_SIZE 16 #define NAME_SIZE 16
...@@ -76,6 +88,15 @@ union qed_mcp_protocol_stats; ...@@ -76,6 +88,15 @@ union qed_mcp_protocol_stats;
enum qed_mcp_protocol_type; enum qed_mcp_protocol_type;
/* helpers */ /* helpers */
#define QED_MFW_GET_FIELD(name, field) \
(((name) & (field ## _MASK)) >> (field ## _SHIFT))
#define QED_MFW_SET_FIELD(name, field, value) \
do { \
(name) &= ~((field ## _MASK) << (field ## _SHIFT)); \
(name) |= (((value) << (field ## _SHIFT)) & (field ## _MASK));\
} while (0)
static inline u32 qed_db_addr(u32 cid, u32 DEMS) static inline u32 qed_db_addr(u32 cid, u32 DEMS)
{ {
u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) | u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
...@@ -355,6 +376,12 @@ struct qed_fw_data { ...@@ -355,6 +376,12 @@ struct qed_fw_data {
u32 init_ops_size; u32 init_ops_size;
}; };
#define DRV_MODULE_VERSION \
__stringify(QED_MAJOR_VERSION) "." \
__stringify(QED_MINOR_VERSION) "." \
__stringify(QED_REVISION_VERSION) "." \
__stringify(QED_ENGINEERING_VERSION)
struct qed_simd_fp_handler { struct qed_simd_fp_handler {
void *token; void *token;
void (*func)(void *); void (*func)(void *);
......
...@@ -85,9 +85,6 @@ struct qed_dcbx_app_metadata { ...@@ -85,9 +85,6 @@ struct qed_dcbx_app_metadata {
enum qed_pci_personality personality; enum qed_pci_personality personality;
}; };
#define QED_MFW_GET_FIELD(name, field) \
(((name) & (field ## _MASK)) >> (field ## _SHIFT))
struct qed_dcbx_info { struct qed_dcbx_info {
struct lldp_status_params_s lldp_remote[LLDP_MAX_LLDP_AGENTS]; struct lldp_status_params_s lldp_remote[LLDP_MAX_LLDP_AGENTS];
struct lldp_config_params_s lldp_local[LLDP_MAX_LLDP_AGENTS]; struct lldp_config_params_s lldp_local[LLDP_MAX_LLDP_AGENTS];
......
...@@ -1106,8 +1106,22 @@ static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn, ...@@ -1106,8 +1106,22 @@ static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
p_hwfn->mcp_info->mfw_mb_cur, p_hwfn->mcp_info->mfw_mb_length); p_hwfn->mcp_info->mfw_mb_cur, p_hwfn->mcp_info->mfw_mb_length);
} }
static void
qed_fill_load_req_params(struct qed_load_req_params *p_load_req,
struct qed_drv_load_params *p_drv_load)
{
memset(p_load_req, 0, sizeof(*p_load_req));
p_load_req->drv_role = p_drv_load->is_crash_kernel ?
QED_DRV_ROLE_KDUMP : QED_DRV_ROLE_OS;
p_load_req->timeout_val = p_drv_load->mfw_timeout_val;
p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset;
p_load_req->override_force_load = p_drv_load->override_force_load;
}
int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
{ {
struct qed_load_req_params load_req_params;
u32 load_code, param, drv_mb_param; u32 load_code, param, drv_mb_param;
bool b_default_mtu = true; bool b_default_mtu = true;
struct qed_hwfn *p_hwfn; struct qed_hwfn *p_hwfn;
...@@ -1145,17 +1159,21 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) ...@@ -1145,17 +1159,21 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
if (rc) if (rc)
return rc; return rc;
rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, &load_code); qed_fill_load_req_params(&load_req_params,
p_params->p_drv_load_params);
rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
&load_req_params);
if (rc) { if (rc) {
DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n"); DP_NOTICE(p_hwfn, "Failed sending a LOAD_REQ command\n");
return rc; return rc;
} }
qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt); load_code = load_req_params.load_code;
DP_VERBOSE(p_hwfn, QED_MSG_SP, DP_VERBOSE(p_hwfn, QED_MSG_SP,
"Load request was sent. Resp:0x%x, Load code: 0x%x\n", "Load request was sent. Load code: 0x%x\n",
rc, load_code); load_code);
qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
p_hwfn->first_on_engine = (load_code == p_hwfn->first_on_engine = (load_code ==
FW_MSG_CODE_DRV_LOAD_ENGINE); FW_MSG_CODE_DRV_LOAD_ENGINE);
...@@ -1224,10 +1242,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) ...@@ -1224,10 +1242,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
if (IS_PF(cdev)) { if (IS_PF(cdev)) {
p_hwfn = QED_LEADING_HWFN(cdev); p_hwfn = QED_LEADING_HWFN(cdev);
drv_mb_param = (FW_MAJOR_VERSION << 24) | drv_mb_param = STORM_FW_VERSION;
(FW_MINOR_VERSION << 16) |
(FW_REVISION_VERSION << 8) |
(FW_ENGINEERING_VERSION);
rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER, DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER,
drv_mb_param, &load_code, &param); drv_mb_param, &load_code, &param);
......
...@@ -82,6 +82,35 @@ int qed_resc_alloc(struct qed_dev *cdev); ...@@ -82,6 +82,35 @@ int qed_resc_alloc(struct qed_dev *cdev);
*/ */
void qed_resc_setup(struct qed_dev *cdev); void qed_resc_setup(struct qed_dev *cdev);
enum qed_override_force_load {
QED_OVERRIDE_FORCE_LOAD_NONE,
QED_OVERRIDE_FORCE_LOAD_ALWAYS,
QED_OVERRIDE_FORCE_LOAD_NEVER,
};
struct qed_drv_load_params {
/* Indicates whether the driver is running over a crash kernel.
* As part of the load request, this will be used for providing the
* driver role to the MFW.
* In case of a crash kernel over PDA - this should be set to false.
*/
bool is_crash_kernel;
/* The timeout value that the MFW should use when locking the engine for
* the driver load process.
* A value of '0' means the default value, and '255' means no timeout.
*/
u8 mfw_timeout_val;
#define QED_LOAD_REQ_LOCK_TO_DEFAULT 0
#define QED_LOAD_REQ_LOCK_TO_NONE 255
/* Avoid engine reset when first PF loads on it */
bool avoid_eng_reset;
/* Allow overriding the default force load behavior */
enum qed_override_force_load override_force_load;
};
struct qed_hw_init_params { struct qed_hw_init_params {
/* Tunneling parameters */ /* Tunneling parameters */
struct qed_tunn_start_params *p_tunn; struct qed_tunn_start_params *p_tunn;
...@@ -96,6 +125,9 @@ struct qed_hw_init_params { ...@@ -96,6 +125,9 @@ struct qed_hw_init_params {
/* Binary fw data pointer in binary fw file */ /* Binary fw data pointer in binary fw file */
const u8 *bin_fw_data; const u8 *bin_fw_data;
/* Driver load parameters */
struct qed_drv_load_params *p_drv_load_params;
}; };
/** /**
......
...@@ -9887,9 +9887,11 @@ struct public_func { ...@@ -9887,9 +9887,11 @@ struct public_func {
#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff #define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff
#define DRV_ID_PDA_COMP_VER_SHIFT 0 #define DRV_ID_PDA_COMP_VER_SHIFT 0
#define LOAD_REQ_HSI_VERSION 2
#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000 #define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000
#define DRV_ID_MCP_HSI_VER_SHIFT 16 #define DRV_ID_MCP_HSI_VER_SHIFT 16
#define DRV_ID_MCP_HSI_VER_CURRENT (1 << DRV_ID_MCP_HSI_VER_SHIFT) #define DRV_ID_MCP_HSI_VER_CURRENT (LOAD_REQ_HSI_VERSION << \
DRV_ID_MCP_HSI_VER_SHIFT)
#define DRV_ID_DRV_TYPE_MASK 0x7f000000 #define DRV_ID_DRV_TYPE_MASK 0x7f000000
#define DRV_ID_DRV_TYPE_SHIFT 24 #define DRV_ID_DRV_TYPE_SHIFT 24
...@@ -10001,6 +10003,46 @@ struct resource_info { ...@@ -10001,6 +10003,46 @@ struct resource_info {
#define RESOURCE_ELEMENT_STRICT (1 << 0) #define RESOURCE_ELEMENT_STRICT (1 << 0)
}; };
#define DRV_ROLE_NONE 0
#define DRV_ROLE_PREBOOT 1
#define DRV_ROLE_OS 2
#define DRV_ROLE_KDUMP 3
struct load_req_stc {
u32 drv_ver_0;
u32 drv_ver_1;
u32 fw_ver;
u32 misc0;
#define LOAD_REQ_ROLE_MASK 0x000000FF
#define LOAD_REQ_ROLE_SHIFT 0
#define LOAD_REQ_LOCK_TO_MASK 0x0000FF00
#define LOAD_REQ_LOCK_TO_SHIFT 8
#define LOAD_REQ_LOCK_TO_DEFAULT 0
#define LOAD_REQ_LOCK_TO_NONE 255
#define LOAD_REQ_FORCE_MASK 0x000F0000
#define LOAD_REQ_FORCE_SHIFT 16
#define LOAD_REQ_FORCE_NONE 0
#define LOAD_REQ_FORCE_PF 1
#define LOAD_REQ_FORCE_ALL 2
#define LOAD_REQ_FLAGS0_MASK 0x00F00000
#define LOAD_REQ_FLAGS0_SHIFT 20
#define LOAD_REQ_FLAGS0_AVOID_RESET (0x1 << 0)
};
struct load_rsp_stc {
u32 drv_ver_0;
u32 drv_ver_1;
u32 fw_ver;
u32 misc0;
#define LOAD_RSP_ROLE_MASK 0x000000FF
#define LOAD_RSP_ROLE_SHIFT 0
#define LOAD_RSP_HSI_MASK 0x0000FF00
#define LOAD_RSP_HSI_SHIFT 8
#define LOAD_RSP_FLAGS0_MASK 0x000F0000
#define LOAD_RSP_FLAGS0_SHIFT 16
#define LOAD_RSP_FLAGS0_DRV_EXISTS (0x1 << 0)
};
union drv_union_data { union drv_union_data {
u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD]; u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD];
struct mcp_mac wol_mac; struct mcp_mac wol_mac;
...@@ -10032,6 +10074,7 @@ struct public_drv_mb { ...@@ -10032,6 +10074,7 @@ struct public_drv_mb {
#define DRV_MSG_CODE_LOAD_REQ 0x10000000 #define DRV_MSG_CODE_LOAD_REQ 0x10000000
#define DRV_MSG_CODE_LOAD_DONE 0x11000000 #define DRV_MSG_CODE_LOAD_DONE 0x11000000
#define DRV_MSG_CODE_INIT_HW 0x12000000 #define DRV_MSG_CODE_INIT_HW 0x12000000
#define DRV_MSG_CODE_CANCEL_LOAD_REQ 0x13000000
#define DRV_MSG_CODE_UNLOAD_REQ 0x20000000 #define DRV_MSG_CODE_UNLOAD_REQ 0x20000000
#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000 #define DRV_MSG_CODE_UNLOAD_DONE 0x21000000
#define DRV_MSG_CODE_INIT_PHY 0x22000000 #define DRV_MSG_CODE_INIT_PHY 0x22000000
...@@ -10167,8 +10210,11 @@ struct public_drv_mb { ...@@ -10167,8 +10210,11 @@ struct public_drv_mb {
#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000 #define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000
#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000 #define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000
#define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA 0x10200000 #define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA 0x10200000
#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10210000 #define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1 0x10210000
#define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG 0x10220000 #define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG 0x10220000
#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10230000
#define FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE 0x10300000
#define FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT 0x10310000
#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000 #define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000
#define FW_MSG_CODE_DRV_UNLOAD_ENGINE 0x20110000 #define FW_MSG_CODE_DRV_UNLOAD_ENGINE 0x20110000
#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000 #define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000
......
...@@ -45,6 +45,7 @@ ...@@ -45,6 +45,7 @@
#include <linux/ethtool.h> #include <linux/ethtool.h>
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/crash_dump.h>
#include <linux/qed/qed_if.h> #include <linux/qed/qed_if.h>
#include <linux/qed/qed_ll2_if.h> #include <linux/qed/qed_ll2_if.h>
...@@ -901,6 +902,7 @@ static void qed_update_pf_params(struct qed_dev *cdev, ...@@ -901,6 +902,7 @@ static void qed_update_pf_params(struct qed_dev *cdev,
static int qed_slowpath_start(struct qed_dev *cdev, static int qed_slowpath_start(struct qed_dev *cdev,
struct qed_slowpath_params *params) struct qed_slowpath_params *params)
{ {
struct qed_drv_load_params drv_load_params;
struct qed_hw_init_params hw_init_params; struct qed_hw_init_params hw_init_params;
struct qed_tunn_start_params tunn_info; struct qed_tunn_start_params tunn_info;
struct qed_mcp_drv_version drv_version; struct qed_mcp_drv_version drv_version;
...@@ -974,6 +976,13 @@ static int qed_slowpath_start(struct qed_dev *cdev, ...@@ -974,6 +976,13 @@ static int qed_slowpath_start(struct qed_dev *cdev,
hw_init_params.allow_npar_tx_switch = true; hw_init_params.allow_npar_tx_switch = true;
hw_init_params.bin_fw_data = data; hw_init_params.bin_fw_data = data;
memset(&drv_load_params, 0, sizeof(drv_load_params));
drv_load_params.is_crash_kernel = is_kdump_kernel();
drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT;
drv_load_params.avoid_eng_reset = false;
drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE;
hw_init_params.p_drv_load_params = &drv_load_params;
rc = qed_hw_init(cdev, &hw_init_params); rc = qed_hw_init(cdev, &hw_init_params);
if (rc) if (rc)
goto err2; goto err2;
......
This diff is collapsed.
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/qed/qed_fcoe_if.h> #include <linux/qed/qed_fcoe_if.h>
#include "qed_hsi.h" #include "qed_hsi.h"
#include "qed_dev_api.h"
struct qed_mcp_link_speed_params { struct qed_mcp_link_speed_params {
bool autoneg; bool autoneg;
...@@ -570,27 +571,35 @@ int qed_mcp_free(struct qed_hwfn *p_hwfn); ...@@ -570,27 +571,35 @@ int qed_mcp_free(struct qed_hwfn *p_hwfn);
int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt); struct qed_ptt *p_ptt);
enum qed_drv_role {
QED_DRV_ROLE_OS,
QED_DRV_ROLE_KDUMP,
};
struct qed_load_req_params {
/* Input params */
enum qed_drv_role drv_role;
u8 timeout_val;
bool avoid_eng_reset;
enum qed_override_force_load override_force_load;
/* Output params */
u32 load_code;
};
/** /**
* @brief Sends a LOAD_REQ to the MFW, and in case operation * @brief Sends a LOAD_REQ to the MFW, and in case the operation succeeds,
* succeed, returns whether this PF is the first on the * returns whether this PF is the first on the engine/port or function.
* chip/engine/port or function. This function should be
* called when driver is ready to accept MFW events after
* Storms initializations are done.
* *
* @param p_hwfn - hw function * @param p_hwfn
* @param p_ptt - PTT required for register access * @param p_ptt
* @param p_load_code - The MCP response param containing one * @param p_params
* of the following: *
* FW_MSG_CODE_DRV_LOAD_ENGINE * @return int - 0 - Operation was successful.
* FW_MSG_CODE_DRV_LOAD_PORT
* FW_MSG_CODE_DRV_LOAD_FUNCTION
* @return int -
* 0 - Operation was successul.
* -EBUSY - Operation failed
*/ */
int qed_mcp_load_req(struct qed_hwfn *p_hwfn, int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
u32 *p_load_code); struct qed_load_req_params *p_params);
/** /**
* @brief Sends a UNLOAD_REQ message to the MFW * @brief Sends a UNLOAD_REQ message to the MFW
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment