Commit c552a50e authored by David S. Miller's avatar David S. Miller

Merge branch 'qed-load-unload-mfw'

Yuval Mintz says:

====================
qed: load/unload mfw series

This series correct the unload flow and greatly enhances its
initialization flow in regard to interactions between driver
and management firmware.

Patch #1 makes sure unloading is done under management-firmware's
'criticial section' protection.

Patches #2 - #4 move driver into using a newer scheme for loading
in regard to the MFW; This newer scheme would help cleaning the device
in case a previous instance has dirtied it [preboot, PDA, etc.].

Patches #5 - #6 let driver inform management-firmware on number of
resources which are dependent on the non-management firmware used.
Patch #7 then uses a new resource [BDQ] instead of some set value.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 30b38236 d0d40a73
......@@ -51,7 +51,19 @@
#include "qed_hsi.h"
extern const struct qed_common_ops qed_common_ops_pass;
#define DRV_MODULE_VERSION "8.10.10.21"
#define QED_MAJOR_VERSION 8
#define QED_MINOR_VERSION 10
#define QED_REVISION_VERSION 10
#define QED_ENGINEERING_VERSION 21
#define QED_VERSION \
((QED_MAJOR_VERSION << 24) | (QED_MINOR_VERSION << 16) | \
(QED_REVISION_VERSION << 8) | QED_ENGINEERING_VERSION)
#define STORM_FW_VERSION \
((FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | \
(FW_REVISION_VERSION << 8) | FW_ENGINEERING_VERSION)
#define MAX_HWFNS_PER_DEVICE (4)
#define NAME_SIZE 16
......@@ -59,8 +71,6 @@ extern const struct qed_common_ops qed_common_ops_pass;
#define QED_WFQ_UNIT 100
#define ISCSI_BDQ_ID(_port_id) (_port_id)
#define FCOE_BDQ_ID(_port_id) ((_port_id) + 2)
#define QED_WID_SIZE (1024)
#define QED_PF_DEMS_SIZE (4)
......@@ -76,6 +86,15 @@ union qed_mcp_protocol_stats;
enum qed_mcp_protocol_type;
/* helpers */
#define QED_MFW_GET_FIELD(name, field) \
(((name) & (field ## _MASK)) >> (field ## _SHIFT))
#define QED_MFW_SET_FIELD(name, field, value) \
do { \
(name) &= ~((field ## _MASK) << (field ## _SHIFT)); \
(name) |= (((value) << (field ## _SHIFT)) & (field ## _MASK));\
} while (0)
static inline u32 qed_db_addr(u32 cid, u32 DEMS)
{
u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
......@@ -198,6 +217,7 @@ enum qed_resources {
QED_LL2_QUEUE,
QED_CMDQS_CQS,
QED_RDMA_STATS_QUEUE,
QED_BDQ,
QED_MAX_RESC,
};
......@@ -355,6 +375,12 @@ struct qed_fw_data {
u32 init_ops_size;
};
#define DRV_MODULE_VERSION \
__stringify(QED_MAJOR_VERSION) "." \
__stringify(QED_MINOR_VERSION) "." \
__stringify(QED_REVISION_VERSION) "." \
__stringify(QED_ENGINEERING_VERSION)
struct qed_simd_fp_handler {
void *token;
void (*func)(void *);
......@@ -732,5 +758,6 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
enum qed_mcp_protocol_type type,
union qed_mcp_protocol_stats *stats);
int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn);
#endif /* _QED_H */
......@@ -85,9 +85,6 @@ struct qed_dcbx_app_metadata {
enum qed_pci_personality personality;
};
#define QED_MFW_GET_FIELD(name, field) \
(((name) & (field ## _MASK)) >> (field ## _SHIFT))
struct qed_dcbx_info {
struct lldp_status_params_s lldp_remote[LLDP_MAX_LLDP_AGENTS];
struct lldp_config_params_s lldp_local[LLDP_MAX_LLDP_AGENTS];
......
This diff is collapsed.
......@@ -82,26 +82,63 @@ int qed_resc_alloc(struct qed_dev *cdev);
*/
void qed_resc_setup(struct qed_dev *cdev);
enum qed_override_force_load {
QED_OVERRIDE_FORCE_LOAD_NONE,
QED_OVERRIDE_FORCE_LOAD_ALWAYS,
QED_OVERRIDE_FORCE_LOAD_NEVER,
};
struct qed_drv_load_params {
/* Indicates whether the driver is running over a crash kernel.
* As part of the load request, this will be used for providing the
* driver role to the MFW.
* In case of a crash kernel over PDA - this should be set to false.
*/
bool is_crash_kernel;
/* The timeout value that the MFW should use when locking the engine for
* the driver load process.
* A value of '0' means the default value, and '255' means no timeout.
*/
u8 mfw_timeout_val;
#define QED_LOAD_REQ_LOCK_TO_DEFAULT 0
#define QED_LOAD_REQ_LOCK_TO_NONE 255
/* Avoid engine reset when first PF loads on it */
bool avoid_eng_reset;
/* Allow overriding the default force load behavior */
enum qed_override_force_load override_force_load;
};
struct qed_hw_init_params {
/* Tunneling parameters */
struct qed_tunn_start_params *p_tunn;
bool b_hw_start;
/* Interrupt mode [msix, inta, etc.] to use */
enum qed_int_mode int_mode;
/* NPAR tx switching to be used for vports for tx-switching */
bool allow_npar_tx_switch;
/* Binary fw data pointer in binary fw file */
const u8 *bin_fw_data;
/* Driver load parameters */
struct qed_drv_load_params *p_drv_load_params;
};
/**
* @brief qed_hw_init -
*
* @param cdev
* @param p_tunn
* @param b_hw_start
* @param int_mode - interrupt mode [msix, inta, etc.] to use.
* @param allow_npar_tx_switch - npar tx switching to be used
* for vports configured for tx-switching.
* @param bin_fw_data - binary fw data pointer in binary fw file.
* Pass NULL if not using binary fw file.
* @param p_params
*
* @return int
*/
int qed_hw_init(struct qed_dev *cdev,
struct qed_tunn_start_params *p_tunn,
bool b_hw_start,
enum qed_int_mode int_mode,
bool allow_npar_tx_switch,
const u8 *bin_fw_data);
int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params);
/**
* @brief qed_hw_timers_stop_all - stop the timers HW block
......@@ -140,14 +177,6 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev);
*/
void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn);
/**
* @brief qed_hw_reset -
*
* @param cdev
*
* @return int
*/
int qed_hw_reset(struct qed_dev *cdev);
/**
* @brief qed_hw_prepare -
......@@ -441,4 +470,6 @@ int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
*/
int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u16 coalesce, u8 qid, u16 sb_id);
const char *qed_hw_get_resc_name(enum qed_resources res_id);
#endif
......@@ -191,7 +191,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
p_data->q_params.cq_sb_pi = fcoe_pf_params->gl_rq_pi;
p_data->q_params.cmdq_sb_pi = fcoe_pf_params->gl_cmd_pi;
p_data->q_params.bdq_resource_id = FCOE_BDQ_ID(p_hwfn->port_id);
p_data->q_params.bdq_resource_id = (u8)RESC_START(p_hwfn, QED_BDQ);
DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_RQ],
fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_RQ]);
......@@ -512,19 +512,31 @@ static void __iomem *qed_fcoe_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid)
static void __iomem *qed_fcoe_get_primary_bdq_prod(struct qed_hwfn *p_hwfn,
u8 bdq_id)
{
u8 bdq_function_id = FCOE_BDQ_ID(p_hwfn->port_id);
return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_MSDM_RAM +
MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id, bdq_id);
if (RESC_NUM(p_hwfn, QED_BDQ)) {
return (u8 __iomem *)p_hwfn->regview +
GTT_BAR0_MAP_REG_MSDM_RAM +
MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
QED_BDQ),
bdq_id);
} else {
DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
return NULL;
}
}
static void __iomem *qed_fcoe_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
u8 bdq_id)
{
u8 bdq_function_id = FCOE_BDQ_ID(p_hwfn->port_id);
return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_TSDM_RAM +
TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id, bdq_id);
if (RESC_NUM(p_hwfn, QED_BDQ)) {
return (u8 __iomem *)p_hwfn->regview +
GTT_BAR0_MAP_REG_TSDM_RAM +
TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
QED_BDQ),
bdq_id);
} else {
DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
return NULL;
}
}
struct qed_fcoe_info *qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
......
......@@ -9887,9 +9887,11 @@ struct public_func {
#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff
#define DRV_ID_PDA_COMP_VER_SHIFT 0
#define LOAD_REQ_HSI_VERSION 2
#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000
#define DRV_ID_MCP_HSI_VER_SHIFT 16
#define DRV_ID_MCP_HSI_VER_CURRENT (1 << DRV_ID_MCP_HSI_VER_SHIFT)
#define DRV_ID_MCP_HSI_VER_CURRENT (LOAD_REQ_HSI_VERSION << \
DRV_ID_MCP_HSI_VER_SHIFT)
#define DRV_ID_DRV_TYPE_MASK 0x7f000000
#define DRV_ID_DRV_TYPE_SHIFT 24
......@@ -9984,6 +9986,7 @@ enum resource_id_enum {
RESOURCE_NUM_RSS_ENGINES_E = 14,
RESOURCE_LL2_QUEUE_E = 15,
RESOURCE_RDMA_STATS_QUEUE_E = 16,
RESOURCE_BDQ_E = 17,
RESOURCE_MAX_NUM,
RESOURCE_NUM_INVALID = 0xFFFFFFFF
};
......@@ -10001,6 +10004,46 @@ struct resource_info {
#define RESOURCE_ELEMENT_STRICT (1 << 0)
};
#define DRV_ROLE_NONE 0
#define DRV_ROLE_PREBOOT 1
#define DRV_ROLE_OS 2
#define DRV_ROLE_KDUMP 3
struct load_req_stc {
u32 drv_ver_0;
u32 drv_ver_1;
u32 fw_ver;
u32 misc0;
#define LOAD_REQ_ROLE_MASK 0x000000FF
#define LOAD_REQ_ROLE_SHIFT 0
#define LOAD_REQ_LOCK_TO_MASK 0x0000FF00
#define LOAD_REQ_LOCK_TO_SHIFT 8
#define LOAD_REQ_LOCK_TO_DEFAULT 0
#define LOAD_REQ_LOCK_TO_NONE 255
#define LOAD_REQ_FORCE_MASK 0x000F0000
#define LOAD_REQ_FORCE_SHIFT 16
#define LOAD_REQ_FORCE_NONE 0
#define LOAD_REQ_FORCE_PF 1
#define LOAD_REQ_FORCE_ALL 2
#define LOAD_REQ_FLAGS0_MASK 0x00F00000
#define LOAD_REQ_FLAGS0_SHIFT 20
#define LOAD_REQ_FLAGS0_AVOID_RESET (0x1 << 0)
};
struct load_rsp_stc {
u32 drv_ver_0;
u32 drv_ver_1;
u32 fw_ver;
u32 misc0;
#define LOAD_RSP_ROLE_MASK 0x000000FF
#define LOAD_RSP_ROLE_SHIFT 0
#define LOAD_RSP_HSI_MASK 0x0000FF00
#define LOAD_RSP_HSI_SHIFT 8
#define LOAD_RSP_FLAGS0_MASK 0x000F0000
#define LOAD_RSP_FLAGS0_SHIFT 16
#define LOAD_RSP_FLAGS0_DRV_EXISTS (0x1 << 0)
};
union drv_union_data {
u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD];
struct mcp_mac wol_mac;
......@@ -10032,6 +10075,7 @@ struct public_drv_mb {
#define DRV_MSG_CODE_LOAD_REQ 0x10000000
#define DRV_MSG_CODE_LOAD_DONE 0x11000000
#define DRV_MSG_CODE_INIT_HW 0x12000000
#define DRV_MSG_CODE_CANCEL_LOAD_REQ 0x13000000
#define DRV_MSG_CODE_UNLOAD_REQ 0x20000000
#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000
#define DRV_MSG_CODE_INIT_PHY 0x22000000
......@@ -10044,12 +10088,14 @@ struct public_drv_mb {
#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE 0x31000000
#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
#define DRV_MSG_CODE_OV_UPDATE_MTU 0x33000000
#define DRV_MSG_GET_RESOURCE_ALLOC_MSG 0x34000000
#define DRV_MSG_SET_RESOURCE_VALUE_MSG 0x35000000
#define DRV_MSG_CODE_OV_UPDATE_WOL 0x38000000
#define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE 0x39000000
#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
#define DRV_MSG_GET_RESOURCE_ALLOC_MSG 0x34000000
#define DRV_MSG_CODE_INITIATE_PF_FLR 0x02010000
#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000
#define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000
......@@ -10075,6 +10121,33 @@ struct public_drv_mb {
#define DRV_MSG_CODE_BIST_TEST 0x001e0000
#define DRV_MSG_CODE_SET_LED_MODE 0x00200000
#define DRV_MSG_CODE_RESOURCE_CMD 0x00230000
#define RESOURCE_CMD_REQ_RESC_MASK 0x0000001F
#define RESOURCE_CMD_REQ_RESC_SHIFT 0
#define RESOURCE_CMD_REQ_OPCODE_MASK 0x000000E0
#define RESOURCE_CMD_REQ_OPCODE_SHIFT 5
#define RESOURCE_OPCODE_REQ 1
#define RESOURCE_OPCODE_REQ_WO_AGING 2
#define RESOURCE_OPCODE_REQ_W_AGING 3
#define RESOURCE_OPCODE_RELEASE 4
#define RESOURCE_OPCODE_FORCE_RELEASE 5
#define RESOURCE_CMD_REQ_AGE_MASK 0x0000FF00
#define RESOURCE_CMD_REQ_AGE_SHIFT 8
#define RESOURCE_CMD_RSP_OWNER_MASK 0x000000FF
#define RESOURCE_CMD_RSP_OWNER_SHIFT 0
#define RESOURCE_CMD_RSP_OPCODE_MASK 0x00000700
#define RESOURCE_CMD_RSP_OPCODE_SHIFT 8
#define RESOURCE_OPCODE_GNT 1
#define RESOURCE_OPCODE_BUSY 2
#define RESOURCE_OPCODE_RELEASED 3
#define RESOURCE_OPCODE_RELEASED_PREVIOUS 4
#define RESOURCE_OPCODE_WRONG_OWNER 5
#define RESOURCE_OPCODE_UNKNOWN_CMD 255
#define RESOURCE_DUMP 0
#define DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL 0x002b0000
#define DRV_MSG_CODE_OS_WOL 0x002e0000
......@@ -10163,12 +10236,16 @@ struct public_drv_mb {
u32 fw_mb_header;
#define FW_MSG_CODE_MASK 0xffff0000
#define FW_MSG_CODE_UNSUPPORTED 0x00000000
#define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000
#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000
#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000
#define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA 0x10200000
#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10210000
#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1 0x10210000
#define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG 0x10220000
#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10230000
#define FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE 0x10300000
#define FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT 0x10310000
#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000
#define FW_MSG_CODE_DRV_UNLOAD_ENGINE 0x20110000
#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000
......@@ -10188,6 +10265,10 @@ struct public_drv_mb {
#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
u32 fw_mb_param;
#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000
#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000FFFF
#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0
/* get pf rdma protocol command responce */
#define FW_MB_PARAM_GET_PF_RDMA_NONE 0x0
......
......@@ -216,7 +216,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
p_queue->cq_cmdq_sb_num_arr[i] = cpu_to_le16(val);
}
p_queue->bdq_resource_id = ISCSI_BDQ_ID(p_hwfn->port_id);
p_queue->bdq_resource_id = (u8)RESC_START(p_hwfn, QED_BDQ);
DMA_REGPAIR_LE(p_queue->bdq_pbl_base_address[BDQ_ID_RQ],
p_params->bdq_pbl_base_addr[BDQ_ID_RQ]);
......@@ -593,21 +593,31 @@ static void __iomem *qed_iscsi_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid)
static void __iomem *qed_iscsi_get_primary_bdq_prod(struct qed_hwfn *p_hwfn,
u8 bdq_id)
{
u8 bdq_function_id = ISCSI_BDQ_ID(p_hwfn->port_id);
return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_MSDM_RAM +
MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id,
if (RESC_NUM(p_hwfn, QED_BDQ)) {
return (u8 __iomem *)p_hwfn->regview +
GTT_BAR0_MAP_REG_MSDM_RAM +
MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
QED_BDQ),
bdq_id);
} else {
DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
return NULL;
}
}
static void __iomem *qed_iscsi_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
u8 bdq_id)
{
u8 bdq_function_id = ISCSI_BDQ_ID(p_hwfn->port_id);
return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_TSDM_RAM +
TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id,
if (RESC_NUM(p_hwfn, QED_BDQ)) {
return (u8 __iomem *)p_hwfn->regview +
GTT_BAR0_MAP_REG_TSDM_RAM +
TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
QED_BDQ),
bdq_id);
} else {
DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
return NULL;
}
}
static int qed_iscsi_setup_connection(struct qed_hwfn *p_hwfn,
......
......@@ -45,6 +45,7 @@
#include <linux/ethtool.h>
#include <linux/etherdevice.h>
#include <linux/vmalloc.h>
#include <linux/crash_dump.h>
#include <linux/qed/qed_if.h>
#include <linux/qed/qed_ll2_if.h>
......@@ -589,6 +590,19 @@ int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
return rc;
}
void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn)
{
struct qed_dev *cdev = p_hwfn->cdev;
u8 id = p_hwfn->my_id;
u32 int_mode;
int_mode = cdev->int_params.out.int_mode;
if (int_mode == QED_INT_MODE_MSIX)
synchronize_irq(cdev->int_params.msix_table[id].vector);
else
synchronize_irq(cdev->pdev->irq);
}
static void qed_slowpath_irq_free(struct qed_dev *cdev)
{
int i;
......@@ -631,19 +645,6 @@ static int qed_nic_stop(struct qed_dev *cdev)
return rc;
}
static int qed_nic_reset(struct qed_dev *cdev)
{
int rc;
rc = qed_hw_reset(cdev);
if (rc)
return rc;
qed_resc_free(cdev);
return 0;
}
static int qed_nic_setup(struct qed_dev *cdev)
{
int rc, i;
......@@ -901,6 +902,8 @@ static void qed_update_pf_params(struct qed_dev *cdev,
static int qed_slowpath_start(struct qed_dev *cdev,
struct qed_slowpath_params *params)
{
struct qed_drv_load_params drv_load_params;
struct qed_hw_init_params hw_init_params;
struct qed_tunn_start_params tunn_info;
struct qed_mcp_drv_version drv_version;
const u8 *data = NULL;
......@@ -966,9 +969,21 @@ static int qed_slowpath_start(struct qed_dev *cdev,
tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN;
/* Start the slowpath */
rc = qed_hw_init(cdev, &tunn_info, true,
cdev->int_params.out.int_mode,
true, data);
memset(&hw_init_params, 0, sizeof(hw_init_params));
hw_init_params.p_tunn = &tunn_info;
hw_init_params.b_hw_start = true;
hw_init_params.int_mode = cdev->int_params.out.int_mode;
hw_init_params.allow_npar_tx_switch = true;
hw_init_params.bin_fw_data = data;
memset(&drv_load_params, 0, sizeof(drv_load_params));
drv_load_params.is_crash_kernel = is_kdump_kernel();
drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT;
drv_load_params.avoid_eng_reset = false;
drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE;
hw_init_params.p_drv_load_params = &drv_load_params;
rc = qed_hw_init(cdev, &hw_init_params);
if (rc)
goto err2;
......@@ -1043,7 +1058,8 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
}
qed_disable_msix(cdev);
qed_nic_reset(cdev);
qed_resc_free(cdev);
qed_iov_wq_stop(cdev, true);
......
This diff is collapsed.
......@@ -39,6 +39,7 @@
#include <linux/spinlock.h>
#include <linux/qed/qed_fcoe_if.h>
#include "qed_hsi.h"
#include "qed_dev_api.h"
struct qed_mcp_link_speed_params {
bool autoneg;
......@@ -570,27 +571,55 @@ int qed_mcp_free(struct qed_hwfn *p_hwfn);
int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
enum qed_drv_role {
QED_DRV_ROLE_OS,
QED_DRV_ROLE_KDUMP,
};
struct qed_load_req_params {
/* Input params */
enum qed_drv_role drv_role;
u8 timeout_val;
bool avoid_eng_reset;
enum qed_override_force_load override_force_load;
/* Output params */
u32 load_code;
};
/**
* @brief Sends a LOAD_REQ to the MFW, and in case operation
* succeed, returns whether this PF is the first on the
* chip/engine/port or function. This function should be
* called when driver is ready to accept MFW events after
* Storms initializations are done.
* @brief Sends a LOAD_REQ to the MFW, and in case the operation succeeds,
* returns whether this PF is the first on the engine/port or function.
*
* @param p_hwfn - hw function
* @param p_ptt - PTT required for register access
* @param p_load_code - The MCP response param containing one
* of the following:
* FW_MSG_CODE_DRV_LOAD_ENGINE
* FW_MSG_CODE_DRV_LOAD_PORT
* FW_MSG_CODE_DRV_LOAD_FUNCTION
* @return int -
* 0 - Operation was successul.
* -EBUSY - Operation failed
* @param p_hwfn
* @param p_ptt
* @param p_params
*
* @return int - 0 - Operation was successful.
*/
int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *p_load_code);
struct qed_load_req_params *p_params);
/**
* @brief Sends a UNLOAD_REQ message to the MFW
*
* @param p_hwfn
* @param p_ptt
*
* @return int - 0 - Operation was successful.
*/
int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
* @brief Sends a UNLOAD_DONE message to the MFW
*
* @param p_hwfn
* @param p_ptt
*
* @return int - 0 - Operation was successful.
*/
int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
* @brief Read the MFW mailbox into Current buffer.
......@@ -713,6 +742,41 @@ int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 mask_parities);
/**
* @brief - Sets the MFW's max value for the given resource
*
* @param p_hwfn
* @param p_ptt
* @param res_id
* @param resc_max_val
* @param p_mcp_resp
*
* @return int - 0 - operation was successful.
*/
int
qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_resources res_id,
u32 resc_max_val, u32 *p_mcp_resp);
/**
* @brief - Gets the MFW allocation info for the given resource
*
* @param p_hwfn
* @param p_ptt
* @param res_id
* @param p_mcp_resp
* @param p_resc_num
* @param p_resc_start
*
* @return int - 0 - operation was successful.
*/
int
qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_resources res_id,
u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start);
/**
* @brief Send eswitch mode to MFW
*
......@@ -726,19 +790,86 @@ int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_ov_eswitch eswitch);
#define QED_MCP_RESC_LOCK_MIN_VAL RESOURCE_DUMP
#define QED_MCP_RESC_LOCK_MAX_VAL 31
enum qed_resc_lock {
QED_RESC_LOCK_DBG_DUMP = QED_MCP_RESC_LOCK_MIN_VAL,
QED_RESC_LOCK_RESC_ALLOC = QED_MCP_RESC_LOCK_MAX_VAL
};
/**
* @brief - Gets the MFW allocation info for the given resource
* @brief - Initiates PF FLR
*
* @param p_hwfn
* @param p_ptt
* @param p_resc_info - descriptor of requested resource
* @param p_mcp_resp
* @param p_mcp_param
*
* @return int - 0 - operation was successful.
*/
int qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
struct qed_resc_lock_params {
/* Resource number [valid values are 0..31] */
u8 resource;
/* Lock timeout value in seconds [default, none or 1..254] */
u8 timeout;
#define QED_MCP_RESC_LOCK_TO_DEFAULT 0
#define QED_MCP_RESC_LOCK_TO_NONE 255
/* Number of times to retry locking */
u8 retry_num;
/* The interval in usec between retries */
u16 retry_interval;
/* Use sleep or delay between retries */
bool sleep_b4_retry;
/* Will be set as true if the resource is free and granted */
bool b_granted;
/* Will be filled with the resource owner.
* [0..15 = PF0-15, 16 = MFW]
*/
u8 owner;
};
/**
* @brief Acquires MFW generic resource lock
*
* @param p_hwfn
* @param p_ptt
* @param p_params
*
* @return int - 0 - operation was successful.
*/
int
qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_resc_lock_params *p_params);
struct qed_resc_unlock_params {
/* Resource number [valid values are 0..31] */
u8 resource;
/* Allow to release a resource even if belongs to another PF */
bool b_force;
/* Will be set as true if the resource is released */
bool b_released;
};
/**
* @brief Releases MFW generic resource lock
*
* @param p_hwfn
* @param p_ptt
* @param p_params
*
* @return int - 0 - operation was successful.
*/
int
qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct resource_info *p_resc_info,
u32 *p_mcp_resp, u32 *p_mcp_param);
struct qed_resc_unlock_params *p_params);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment