Commit f2a74107 authored by Prabhakar Kushwaha's avatar Prabhakar Kushwaha Committed by David S. Miller

qed: Update qed_mfw_hsi.h for FW ver 8.59.1.0

The qed_mfw_hsi.h contains HSI (Hardware Software Interface) changes
related to management firmware. It has been updated to support new FW
version 8.59.1.0 with below changes.
 - New defines for VF bitmap.
 - fec_mode and extended_speed defines updated in struct eth_phy_cfg.
 - Updated structutres lldp_system_tlvs_buffer_s, public_global,
   public_port, public_func, drv_union_data, public_drv_mb
   with all dependent new structures.
 - Updates in NVM related structures and defines.
 - Msg defines are added in enum drv_msg_code and fw_msg_code.
 - Updated/added new defines.

This patch also fixes the existing checkpatch warnings and few important
checks.
Signed-off-by: default avatarAriel Elior <aelior@marvell.com>
Signed-off-by: default avatarOmkar Kulkarni <okulkarni@marvell.com>
Signed-off-by: default avatarShai Malin <smalin@marvell.com>
Signed-off-by: default avatarPrabhakar Kushwaha <pkushwaha@marvell.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 484563e2
...@@ -99,10 +99,6 @@ static const u32 qed_mfw_ext_10g[] __initconst = { ...@@ -99,10 +99,6 @@ static const u32 qed_mfw_ext_10g[] __initconst = {
ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
}; };
static const u32 qed_mfw_ext_20g[] __initconst = {
ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
};
static const u32 qed_mfw_ext_25g[] __initconst = { static const u32 qed_mfw_ext_25g[] __initconst = {
ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
...@@ -148,7 +144,6 @@ static const u32 qed_mfw_ext_100g_base_r4[] __initconst = { ...@@ -148,7 +144,6 @@ static const u32 qed_mfw_ext_100g_base_r4[] __initconst = {
static struct qed_mfw_speed_map qed_mfw_ext_maps[] __ro_after_init = { static struct qed_mfw_speed_map qed_mfw_ext_maps[] __ro_after_init = {
QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_1G, qed_mfw_ext_1g), QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_1G, qed_mfw_ext_1g),
QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_10G, qed_mfw_ext_10g), QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_10G, qed_mfw_ext_10g),
QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_20G, qed_mfw_ext_20g),
QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_25G, qed_mfw_ext_25g), QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_25G, qed_mfw_ext_25g),
QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_40G, qed_mfw_ext_40g), QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_40G, qed_mfw_ext_40g),
QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R, QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R,
...@@ -262,7 +257,7 @@ module_exit(qed_exit); ...@@ -262,7 +257,7 @@ module_exit(qed_exit);
/* Check if the DMA controller on the machine can properly handle the DMA /* Check if the DMA controller on the machine can properly handle the DMA
* addressing required by the device. * addressing required by the device.
*/ */
static int qed_set_coherency_mask(struct qed_dev *cdev) static int qed_set_coherency_mask(struct qed_dev *cdev)
{ {
struct device *dev = &cdev->pdev->dev; struct device *dev = &cdev->pdev->dev;
...@@ -547,7 +542,7 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev, ...@@ -547,7 +542,7 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev,
goto err2; goto err2;
} }
DP_INFO(cdev, "qed_probe completed successfully\n"); DP_INFO(cdev, "%s completed successfully\n", __func__);
return cdev; return cdev;
...@@ -980,7 +975,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, ...@@ -980,7 +975,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
rc = qed_set_int_mode(cdev, false); rc = qed_set_int_mode(cdev, false);
if (rc) { if (rc) {
DP_ERR(cdev, "qed_slowpath_setup_int ERR\n"); DP_ERR(cdev, "%s ERR\n", __func__);
return rc; return rc;
} }
...@@ -1161,6 +1156,7 @@ static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn, ...@@ -1161,6 +1156,7 @@ static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn,
/* Memory barrier for setting atomic bit */ /* Memory barrier for setting atomic bit */
smp_mb__before_atomic(); smp_mb__before_atomic();
set_bit(wq_flag, &hwfn->slowpath_task_flags); set_bit(wq_flag, &hwfn->slowpath_task_flags);
/* Memory barrier after setting atomic bit */
smp_mb__after_atomic(); smp_mb__after_atomic();
queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, delay); queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, delay);
...@@ -1381,7 +1377,7 @@ static int qed_slowpath_start(struct qed_dev *cdev, ...@@ -1381,7 +1377,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
(params->drv_minor << 16) | (params->drv_minor << 16) |
(params->drv_rev << 8) | (params->drv_rev << 8) |
(params->drv_eng); (params->drv_eng);
strlcpy(drv_version.name, params->name, strscpy(drv_version.name, params->name,
MCP_DRV_VER_STR_SIZE - 4); MCP_DRV_VER_STR_SIZE - 4);
rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt, rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
&drv_version); &drv_version);
...@@ -3078,8 +3074,10 @@ int qed_mfw_tlv_req(struct qed_hwfn *hwfn) ...@@ -3078,8 +3074,10 @@ int qed_mfw_tlv_req(struct qed_hwfn *hwfn)
DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV, DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
"Scheduling slowpath task [Flag: %d]\n", "Scheduling slowpath task [Flag: %d]\n",
QED_SLOWPATH_MFW_TLV_REQ); QED_SLOWPATH_MFW_TLV_REQ);
/* Memory barrier for setting atomic bit */
smp_mb__before_atomic(); smp_mb__before_atomic();
set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags); set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags);
/* Memory barrier after setting atomic bit */
smp_mb__after_atomic(); smp_mb__after_atomic();
queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0); queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
......
...@@ -1527,15 +1527,13 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) ...@@ -1527,15 +1527,13 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL) { FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL) {
ext_speed = 0; ext_speed = 0;
if (params->ext_speed.autoneg) if (params->ext_speed.autoneg)
ext_speed |= ETH_EXT_SPEED_AN; ext_speed |= ETH_EXT_SPEED_NONE;
val = params->ext_speed.forced_speed; val = params->ext_speed.forced_speed;
if (val & QED_EXT_SPEED_1G) if (val & QED_EXT_SPEED_1G)
ext_speed |= ETH_EXT_SPEED_1G; ext_speed |= ETH_EXT_SPEED_1G;
if (val & QED_EXT_SPEED_10G) if (val & QED_EXT_SPEED_10G)
ext_speed |= ETH_EXT_SPEED_10G; ext_speed |= ETH_EXT_SPEED_10G;
if (val & QED_EXT_SPEED_20G)
ext_speed |= ETH_EXT_SPEED_20G;
if (val & QED_EXT_SPEED_25G) if (val & QED_EXT_SPEED_25G)
ext_speed |= ETH_EXT_SPEED_25G; ext_speed |= ETH_EXT_SPEED_25G;
if (val & QED_EXT_SPEED_40G) if (val & QED_EXT_SPEED_40G)
...@@ -1561,8 +1559,6 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) ...@@ -1561,8 +1559,6 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
ext_speed |= ETH_EXT_ADV_SPEED_1G; ext_speed |= ETH_EXT_ADV_SPEED_1G;
if (val & QED_EXT_SPEED_MASK_10G) if (val & QED_EXT_SPEED_MASK_10G)
ext_speed |= ETH_EXT_ADV_SPEED_10G; ext_speed |= ETH_EXT_ADV_SPEED_10G;
if (val & QED_EXT_SPEED_MASK_20G)
ext_speed |= ETH_EXT_ADV_SPEED_20G;
if (val & QED_EXT_SPEED_MASK_25G) if (val & QED_EXT_SPEED_MASK_25G)
ext_speed |= ETH_EXT_ADV_SPEED_25G; ext_speed |= ETH_EXT_ADV_SPEED_25G;
if (val & QED_EXT_SPEED_MASK_40G) if (val & QED_EXT_SPEED_MASK_40G)
...@@ -2446,9 +2442,6 @@ qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn, ...@@ -2446,9 +2442,6 @@ qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
case FUNC_MF_CFG_PROTOCOL_ISCSI: case FUNC_MF_CFG_PROTOCOL_ISCSI:
*p_proto = QED_PCI_ISCSI; *p_proto = QED_PCI_ISCSI;
break; break;
case FUNC_MF_CFG_PROTOCOL_NVMETCP:
*p_proto = QED_PCI_NVMETCP;
break;
case FUNC_MF_CFG_PROTOCOL_FCOE: case FUNC_MF_CFG_PROTOCOL_FCOE:
*p_proto = QED_PCI_FCOE; *p_proto = QED_PCI_FCOE;
break; break;
...@@ -3389,7 +3382,7 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, ...@@ -3389,7 +3382,7 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
type = NVM_TYPE_DEFAULT_CFG; type = NVM_TYPE_DEFAULT_CFG;
break; break;
case QED_NVM_IMAGE_NVM_META: case QED_NVM_IMAGE_NVM_META:
type = NVM_TYPE_META; type = NVM_TYPE_NVM_META;
break; break;
default: default:
DP_NOTICE(p_hwfn, "Unknown request of image_id %08x\n", DP_NOTICE(p_hwfn, "Unknown request of image_id %08x\n",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment