Commit f3a6f592 authored by David S. Miller's avatar David S. Miller

Merge branch 'qed-next'

Yuval Mintz says:

====================
qed*: Patch series

This series does several things. The bigger changes:

 - Add new notification APIs [& Defaults] for various fields.
The series then utilizes some of those qed <-> qede APIs to bass WoL
support upon.

 - Change the resource allocation scheme to receive the values from
management firmware, instead of equally sharing resources between
functions [that might not need those]. That would, e.g., allow us to
configure additional filters to network interfaces in presence of
storage [PCI] functions from same adapter.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 89d9123e 2edbff8d
......@@ -154,7 +154,10 @@ struct qed_qm_iids {
u32 tids;
};
enum QED_RESOURCES {
/* HW / FW resources, output of features supported below, most information
* is received from MFW.
*/
enum qed_resources {
QED_SB,
QED_L2_QUEUE,
QED_VPORT,
......@@ -166,6 +169,7 @@ enum QED_RESOURCES {
QED_RDMA_CNQ_RAM,
QED_ILT,
QED_LL2_QUEUE,
QED_CMDQS_CQS,
QED_RDMA_STATS_QUEUE,
QED_MAX_RESC,
};
......@@ -174,6 +178,7 @@ enum QED_FEATURE {
QED_PF_L2_QUE,
QED_VF,
QED_RDMA_CNQ,
QED_VF_L2_QUE,
QED_MAX_FEATURES,
};
......@@ -195,6 +200,11 @@ enum qed_dev_cap {
QED_DEV_CAP_ROCE,
};
enum qed_wol_support {
QED_WOL_SUPPORT_NONE,
QED_WOL_SUPPORT_PME,
};
struct qed_hw_info {
/* PCI personality */
enum qed_pci_personality personality;
......@@ -226,6 +236,9 @@ struct qed_hw_info {
u32 port_mode;
u32 hw_mode;
unsigned long device_capabilities;
u16 mtu;
enum qed_wol_support b_wol_support;
};
struct qed_hw_cid_data {
......@@ -538,7 +551,9 @@ struct qed_dev {
u8 mcp_rev;
u8 boot_mode;
u8 wol;
/* WoL related configurations */
u8 wol_config;
u8 wol_mac[ETH_ALEN];
u32 int_mode;
enum qed_coalescing_mode int_coalescing_mode;
......
......@@ -1057,8 +1057,10 @@ int qed_hw_init(struct qed_dev *cdev,
bool allow_npar_tx_switch,
const u8 *bin_fw_data)
{
u32 load_code, param;
int rc, mfw_rc, i;
u32 load_code, param, drv_mb_param;
bool b_default_mtu = true;
struct qed_hwfn *p_hwfn;
int rc = 0, mfw_rc, i;
if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
......@@ -1074,6 +1076,12 @@ int qed_hw_init(struct qed_dev *cdev,
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
/* If management didn't provide a default, set one of our own */
if (!p_hwfn->hw_info.mtu) {
p_hwfn->hw_info.mtu = 1500;
b_default_mtu = false;
}
if (IS_VF(cdev)) {
p_hwfn->b_int_enabled = 1;
continue;
......@@ -1157,6 +1165,38 @@ int qed_hw_init(struct qed_dev *cdev,
p_hwfn->hw_init_done = true;
}
if (IS_PF(cdev)) {
p_hwfn = QED_LEADING_HWFN(cdev);
drv_mb_param = (FW_MAJOR_VERSION << 24) |
(FW_MINOR_VERSION << 16) |
(FW_REVISION_VERSION << 8) |
(FW_ENGINEERING_VERSION);
rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER,
drv_mb_param, &load_code, &param);
if (rc)
DP_INFO(p_hwfn, "Failed to update firmware version\n");
if (!b_default_mtu) {
rc = qed_mcp_ov_update_mtu(p_hwfn, p_hwfn->p_main_ptt,
p_hwfn->hw_info.mtu);
if (rc)
DP_INFO(p_hwfn,
"Failed to update default mtu\n");
}
rc = qed_mcp_ov_update_driver_state(p_hwfn,
p_hwfn->p_main_ptt,
QED_OV_DRIVER_STATE_DISABLED);
if (rc)
DP_INFO(p_hwfn, "Failed to update driver state\n");
rc = qed_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt,
QED_OV_ESWITCH_VEB);
if (rc)
DP_INFO(p_hwfn, "Failed to update eswitch mode\n");
}
return 0;
}
......@@ -1324,8 +1364,24 @@ int qed_hw_reset(struct qed_dev *cdev)
{
int rc = 0;
u32 unload_resp, unload_param;
u32 wol_param;
int i;
switch (cdev->wol_config) {
case QED_OV_WOL_DISABLED:
wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED;
break;
case QED_OV_WOL_ENABLED:
wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED;
break;
default:
DP_NOTICE(cdev,
"Unknown WoL configuration %02x\n", cdev->wol_config);
/* Fallthrough */
case QED_OV_WOL_DEFAULT:
wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
}
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
......@@ -1354,8 +1410,7 @@ int qed_hw_reset(struct qed_dev *cdev)
/* Send unload command to MCP */
rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
DRV_MSG_CODE_UNLOAD_REQ,
DRV_MB_PARAM_UNLOAD_WOL_MCP,
DRV_MSG_CODE_UNLOAD_REQ, wol_param,
&unload_resp, &unload_param);
if (rc) {
DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_REQ failed\n");
......@@ -1421,6 +1476,7 @@ static void get_function_id(struct qed_hwfn *p_hwfn)
static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
{
u32 *feat_num = p_hwfn->hw_info.feat_num;
struct qed_sb_cnt_info sb_cnt_info;
int num_features = 1;
if (IS_ENABLED(CONFIG_QED_RDMA) &&
......@@ -1439,53 +1495,257 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
num_features,
RESC_NUM(p_hwfn, QED_L2_QUEUE));
DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
"#PF_L2_QUEUES=%d #SBS=%d num_features=%d\n",
feat_num[QED_PF_L2_QUE], RESC_NUM(p_hwfn, QED_SB),
num_features);
memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
feat_num[QED_VF_L2_QUE] =
min_t(u32,
RESC_NUM(p_hwfn, QED_L2_QUEUE) -
FEAT_NUM(p_hwfn, QED_PF_L2_QUE), sb_cnt_info.sb_iov_cnt);
DP_VERBOSE(p_hwfn,
NETIF_MSG_PROBE,
"#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #SBS=%d num_features=%d\n",
(int)FEAT_NUM(p_hwfn, QED_PF_L2_QUE),
(int)FEAT_NUM(p_hwfn, QED_VF_L2_QUE),
(int)FEAT_NUM(p_hwfn, QED_RDMA_CNQ),
RESC_NUM(p_hwfn, QED_SB), num_features);
}
static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
static enum resource_id_enum qed_hw_get_mfw_res_id(enum qed_resources res_id)
{
enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
switch (res_id) {
case QED_SB:
mfw_res_id = RESOURCE_NUM_SB_E;
break;
case QED_L2_QUEUE:
mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
break;
case QED_VPORT:
mfw_res_id = RESOURCE_NUM_VPORT_E;
break;
case QED_RSS_ENG:
mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
break;
case QED_PQ:
mfw_res_id = RESOURCE_NUM_PQ_E;
break;
case QED_RL:
mfw_res_id = RESOURCE_NUM_RL_E;
break;
case QED_MAC:
case QED_VLAN:
/* Each VFC resource can accommodate both a MAC and a VLAN */
mfw_res_id = RESOURCE_VFC_FILTER_E;
break;
case QED_ILT:
mfw_res_id = RESOURCE_ILT_E;
break;
case QED_LL2_QUEUE:
mfw_res_id = RESOURCE_LL2_QUEUE_E;
break;
case QED_RDMA_CNQ_RAM:
case QED_CMDQS_CQS:
/* CNQ/CMDQS are the same resource */
mfw_res_id = RESOURCE_CQS_E;
break;
case QED_RDMA_STATS_QUEUE:
mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
break;
default:
break;
}
return mfw_res_id;
}
static u32 qed_hw_get_dflt_resc_num(struct qed_hwfn *p_hwfn,
enum qed_resources res_id)
{
u8 enabled_func_idx = p_hwfn->enabled_func_idx;
u32 *resc_start = p_hwfn->hw_info.resc_start;
u8 num_funcs = p_hwfn->num_funcs_on_engine;
u32 *resc_num = p_hwfn->hw_info.resc_num;
struct qed_sb_cnt_info sb_cnt_info;
int i, max_vf_vlan_filters;
u32 dflt_resc_num = 0;
switch (res_id) {
case QED_SB:
memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
dflt_resc_num = sb_cnt_info.sb_cnt;
break;
case QED_L2_QUEUE:
dflt_resc_num = MAX_NUM_L2_QUEUES_BB / num_funcs;
break;
case QED_VPORT:
dflt_resc_num = MAX_NUM_VPORTS_BB / num_funcs;
break;
case QED_RSS_ENG:
dflt_resc_num = ETH_RSS_ENGINE_NUM_BB / num_funcs;
break;
case QED_PQ:
/* The granularity of the PQs is 8 */
dflt_resc_num = MAX_QM_TX_QUEUES_BB / num_funcs;
dflt_resc_num &= ~0x7;
break;
case QED_RL:
dflt_resc_num = MAX_QM_GLOBAL_RLS / num_funcs;
break;
case QED_MAC:
case QED_VLAN:
/* Each VFC resource can accommodate both a MAC and a VLAN */
dflt_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;
break;
case QED_ILT:
dflt_resc_num = PXP_NUM_ILT_RECORDS_BB / num_funcs;
break;
case QED_LL2_QUEUE:
dflt_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs;
break;
case QED_RDMA_CNQ_RAM:
case QED_CMDQS_CQS:
/* CNQ/CMDQS are the same resource */
dflt_resc_num = NUM_OF_CMDQS_CQS / num_funcs;
break;
case QED_RDMA_STATS_QUEUE:
dflt_resc_num = RDMA_NUM_STATISTIC_COUNTERS_BB / num_funcs;
break;
default:
break;
}
#ifdef CONFIG_QED_SRIOV
max_vf_vlan_filters = QED_ETH_MAX_VF_NUM_VLAN_FILTERS;
#else
max_vf_vlan_filters = 0;
#endif
return dflt_resc_num;
}
static const char *qed_hw_get_resc_name(enum qed_resources res_id)
{
switch (res_id) {
case QED_SB:
return "SB";
case QED_L2_QUEUE:
return "L2_QUEUE";
case QED_VPORT:
return "VPORT";
case QED_RSS_ENG:
return "RSS_ENG";
case QED_PQ:
return "PQ";
case QED_RL:
return "RL";
case QED_MAC:
return "MAC";
case QED_VLAN:
return "VLAN";
case QED_RDMA_CNQ_RAM:
return "RDMA_CNQ_RAM";
case QED_ILT:
return "ILT";
case QED_LL2_QUEUE:
return "LL2_QUEUE";
case QED_CMDQS_CQS:
return "CMDQS_CQS";
case QED_RDMA_STATS_QUEUE:
return "RDMA_STATS_QUEUE";
default:
return "UNKNOWN_RESOURCE";
}
}
qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn,
enum qed_resources res_id)
{
u32 dflt_resc_num = 0, dflt_resc_start = 0, mcp_resp, mcp_param;
u32 *p_resc_num, *p_resc_start;
struct resource_info resc_info;
int rc;
resc_num[QED_SB] = min_t(u32,
(MAX_SB_PER_PATH_BB / num_funcs),
sb_cnt_info.sb_cnt);
resc_num[QED_L2_QUEUE] = MAX_NUM_L2_QUEUES_BB / num_funcs;
resc_num[QED_VPORT] = MAX_NUM_VPORTS_BB / num_funcs;
resc_num[QED_RSS_ENG] = ETH_RSS_ENGINE_NUM_BB / num_funcs;
resc_num[QED_PQ] = MAX_QM_TX_QUEUES_BB / num_funcs;
resc_num[QED_RL] = min_t(u32, 64, resc_num[QED_VPORT]);
resc_num[QED_MAC] = ETH_NUM_MAC_FILTERS / num_funcs;
resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) /
num_funcs;
resc_num[QED_ILT] = PXP_NUM_ILT_RECORDS_BB / num_funcs;
resc_num[QED_LL2_QUEUE] = MAX_NUM_LL2_RX_QUEUES / num_funcs;
resc_num[QED_RDMA_CNQ_RAM] = NUM_OF_CMDQS_CQS / num_funcs;
resc_num[QED_RDMA_STATS_QUEUE] = RDMA_NUM_STATISTIC_COUNTERS_BB /
num_funcs;
for (i = 0; i < QED_MAX_RESC; i++)
resc_start[i] = resc_num[i] * enabled_func_idx;
p_resc_num = &RESC_NUM(p_hwfn, res_id);
p_resc_start = &RESC_START(p_hwfn, res_id);
/* Default values assumes that each function received equal share */
dflt_resc_num = qed_hw_get_dflt_resc_num(p_hwfn, res_id);
if (!dflt_resc_num) {
DP_ERR(p_hwfn,
"Failed to get default amount for resource %d [%s]\n",
res_id, qed_hw_get_resc_name(res_id));
return -EINVAL;
}
dflt_resc_start = dflt_resc_num * p_hwfn->enabled_func_idx;
memset(&resc_info, 0, sizeof(resc_info));
resc_info.res_id = qed_hw_get_mfw_res_id(res_id);
if (resc_info.res_id == RESOURCE_NUM_INVALID) {
DP_ERR(p_hwfn,
"Failed to match resource %d [%s] with the MFW resources\n",
res_id, qed_hw_get_resc_name(res_id));
return -EINVAL;
}
rc = qed_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, &resc_info,
&mcp_resp, &mcp_param);
if (rc) {
DP_NOTICE(p_hwfn,
"MFW response failure for an allocation request for resource %d [%s]\n",
res_id, qed_hw_get_resc_name(res_id));
return rc;
}
/* Default driver values are applied in the following cases:
* - The resource allocation MB command is not supported by the MFW
* - There is an internal error in the MFW while processing the request
* - The resource ID is unknown to the MFW
*/
if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK &&
mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_DEPRECATED) {
DP_NOTICE(p_hwfn,
"Resource %d [%s]: No allocation info was received [mcp_resp 0x%x]. Applying default values [num %d, start %d].\n",
res_id,
qed_hw_get_resc_name(res_id),
mcp_resp, dflt_resc_num, dflt_resc_start);
*p_resc_num = dflt_resc_num;
*p_resc_start = dflt_resc_start;
goto out;
}
/* Special handling for status blocks; Would be revised in future */
if (res_id == QED_SB) {
resc_info.size -= 1;
resc_info.offset -= p_hwfn->enabled_func_idx;
}
*p_resc_num = resc_info.size;
*p_resc_start = resc_info.offset;
out:
/* PQs have to divide by 8 [that's the HW granularity].
* Reduce number so it would fit.
*/
if ((res_id == QED_PQ) && ((*p_resc_num % 8) || (*p_resc_start % 8))) {
DP_INFO(p_hwfn,
"PQs need to align by 8; Number %08x --> %08x, Start %08x --> %08x\n",
*p_resc_num,
(*p_resc_num) & ~0x7,
*p_resc_start, (*p_resc_start) & ~0x7);
*p_resc_num &= ~0x7;
*p_resc_start &= ~0x7;
}
return 0;
}
static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
{
u8 res_id;
int rc;
for (res_id = 0; res_id < QED_MAX_RESC; res_id++) {
rc = qed_hw_set_resc_info(p_hwfn, res_id);
if (rc)
return rc;
}
/* Sanity for ILT */
if (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB) {
if ((RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB)) {
DP_NOTICE(p_hwfn, "Can't assign ILT pages [%08x,...,%08x]\n",
RESC_START(p_hwfn, QED_ILT),
RESC_END(p_hwfn, QED_ILT) - 1);
......@@ -1495,34 +1755,12 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
qed_hw_set_feat(p_hwfn);
DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
"The numbers for each resource are:\n"
"SB = %d start = %d\n"
"L2_QUEUE = %d start = %d\n"
"VPORT = %d start = %d\n"
"PQ = %d start = %d\n"
"RL = %d start = %d\n"
"MAC = %d start = %d\n"
"VLAN = %d start = %d\n"
"ILT = %d start = %d\n"
"LL2_QUEUE = %d start = %d\n",
p_hwfn->hw_info.resc_num[QED_SB],
p_hwfn->hw_info.resc_start[QED_SB],
p_hwfn->hw_info.resc_num[QED_L2_QUEUE],
p_hwfn->hw_info.resc_start[QED_L2_QUEUE],
p_hwfn->hw_info.resc_num[QED_VPORT],
p_hwfn->hw_info.resc_start[QED_VPORT],
p_hwfn->hw_info.resc_num[QED_PQ],
p_hwfn->hw_info.resc_start[QED_PQ],
p_hwfn->hw_info.resc_num[QED_RL],
p_hwfn->hw_info.resc_start[QED_RL],
p_hwfn->hw_info.resc_num[QED_MAC],
p_hwfn->hw_info.resc_start[QED_MAC],
p_hwfn->hw_info.resc_num[QED_VLAN],
p_hwfn->hw_info.resc_start[QED_VLAN],
p_hwfn->hw_info.resc_num[QED_ILT],
p_hwfn->hw_info.resc_start[QED_ILT],
RESC_NUM(p_hwfn, QED_LL2_QUEUE),
RESC_START(p_hwfn, QED_LL2_QUEUE));
"The numbers for each resource are:\n");
for (res_id = 0; res_id < QED_MAX_RESC; res_id++)
DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, "%s = %d start = %d\n",
qed_hw_get_resc_name(res_id),
RESC_NUM(p_hwfn, res_id),
RESC_START(p_hwfn, res_id));
return 0;
}
......@@ -1801,6 +2039,9 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
qed_get_num_funcs(p_hwfn, p_ptt);
if (qed_mcp_is_init(p_hwfn))
p_hwfn->hw_info.mtu = p_hwfn->mcp_info->func_info.mtu;
return qed_hw_get_resc(p_hwfn);
}
......@@ -1975,8 +2216,13 @@ int qed_hw_prepare(struct qed_dev *cdev,
void qed_hw_remove(struct qed_dev *cdev)
{
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
int i;
if (IS_PF(cdev))
qed_mcp_ov_update_driver_state(p_hwfn, p_hwfn->p_main_ptt,
QED_OV_DRIVER_STATE_NOT_LOADED);
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
......
......@@ -8529,6 +8529,41 @@ struct mdump_config_stc {
u32 valid_logs;
};
enum resource_id_enum {
RESOURCE_NUM_SB_E = 0,
RESOURCE_NUM_L2_QUEUE_E = 1,
RESOURCE_NUM_VPORT_E = 2,
RESOURCE_NUM_VMQ_E = 3,
RESOURCE_FACTOR_NUM_RSS_PF_E = 4,
RESOURCE_FACTOR_RSS_PER_VF_E = 5,
RESOURCE_NUM_RL_E = 6,
RESOURCE_NUM_PQ_E = 7,
RESOURCE_NUM_VF_E = 8,
RESOURCE_VFC_FILTER_E = 9,
RESOURCE_ILT_E = 10,
RESOURCE_CQS_E = 11,
RESOURCE_GFT_PROFILES_E = 12,
RESOURCE_NUM_TC_E = 13,
RESOURCE_NUM_RSS_ENGINES_E = 14,
RESOURCE_LL2_QUEUE_E = 15,
RESOURCE_RDMA_STATS_QUEUE_E = 16,
RESOURCE_MAX_NUM,
RESOURCE_NUM_INVALID = 0xFFFFFFFF
};
/* Resource ID is to be filled by the driver in the MB request
* Size, offset & flags to be filled by the MFW in the MB response
*/
struct resource_info {
enum resource_id_enum res_id;
u32 size; /* number of allocated resources */
u32 offset; /* Offset of the 1st resource */
u32 vf_size;
u32 vf_offset;
u32 flags;
#define RESOURCE_ELEMENT_STRICT (1 << 0)
};
union drv_union_data {
u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD];
struct mcp_mac wol_mac;
......@@ -8549,6 +8584,7 @@ union drv_union_data {
u64 reserved_stats[11];
struct ocbb_data_stc ocbb_info;
struct temperature_status_stc temp_info;
struct resource_info resource;
struct bist_nvm_image_att nvm_image_att;
struct mdump_config_stc mdump_config;
};
......@@ -8564,9 +8600,19 @@ struct public_drv_mb {
#define DRV_MSG_CODE_INIT_PHY 0x22000000
#define DRV_MSG_CODE_LINK_RESET 0x23000000
#define DRV_MSG_CODE_SET_DCBX 0x25000000
#define DRV_MSG_CODE_OV_UPDATE_CURR_CFG 0x26000000
#define DRV_MSG_CODE_OV_UPDATE_BUS_NUM 0x27000000
#define DRV_MSG_CODE_OV_UPDATE_BOOT_PROGRESS 0x28000000
#define DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER 0x29000000
#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE 0x31000000
#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
#define DRV_MSG_CODE_OV_UPDATE_MTU 0x33000000
#define DRV_MSG_CODE_OV_UPDATE_WOL 0x38000000
#define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE 0x39000000
#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
#define DRV_MSG_GET_RESOURCE_ALLOC_MSG 0x34000000
#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000
#define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000
......@@ -8574,6 +8620,13 @@ struct public_drv_mb {
#define DRV_MSG_CODE_MCP_RESET 0x00090000
#define DRV_MSG_CODE_SET_VERSION 0x000f0000
#define DRV_MSG_CODE_MCP_HALT 0x00100000
#define DRV_MSG_CODE_SET_VMAC 0x00110000
#define DRV_MSG_CODE_GET_VMAC 0x00120000
#define DRV_MSG_CODE_VMAC_TYPE_SHIFT 4
#define DRV_MSG_CODE_VMAC_TYPE_MASK 0x30
#define DRV_MSG_CODE_VMAC_TYPE_MAC 1
#define DRV_MSG_CODE_VMAC_TYPE_WWNN 2
#define DRV_MSG_CODE_VMAC_TYPE_WWPN 3
#define DRV_MSG_CODE_GET_STATS 0x00130000
#define DRV_MSG_CODE_STATS_TYPE_LAN 1
......@@ -8585,11 +8638,16 @@ struct public_drv_mb {
#define DRV_MSG_CODE_BIST_TEST 0x001e0000
#define DRV_MSG_CODE_SET_LED_MODE 0x00200000
#define DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL 0x002b0000
#define DRV_MSG_CODE_OS_WOL 0x002e0000
#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
u32 drv_mb_param;
#define DRV_MB_PARAM_UNLOAD_WOL_UNKNOWN 0x00000000
#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001
#define DRV_MB_PARAM_UNLOAD_WOL_DISABLED 0x00000002
#define DRV_MB_PARAM_UNLOAD_WOL_ENABLED 0x00000003
#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x000000FF
#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3
......@@ -8602,13 +8660,59 @@ struct public_drv_mb {
#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001
#define DRV_MB_PARAM_LLDP_SEND_SHIFT 0
#define DRV_MB_PARAM_OV_CURR_CFG_SHIFT 0
#define DRV_MB_PARAM_OV_CURR_CFG_MASK 0x0000000F
#define DRV_MB_PARAM_OV_CURR_CFG_NONE 0
#define DRV_MB_PARAM_OV_CURR_CFG_OS 1
#define DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC 2
#define DRV_MB_PARAM_OV_CURR_CFG_OTHER 3
#define DRV_MB_PARAM_OV_STORM_FW_VER_SHIFT 0
#define DRV_MB_PARAM_OV_STORM_FW_VER_MASK 0xFFFFFFFF
#define DRV_MB_PARAM_OV_STORM_FW_VER_MAJOR_MASK 0xFF000000
#define DRV_MB_PARAM_OV_STORM_FW_VER_MINOR_MASK 0x00FF0000
#define DRV_MB_PARAM_OV_STORM_FW_VER_BUILD_MASK 0x0000FF00
#define DRV_MB_PARAM_OV_STORM_FW_VER_DROP_MASK 0x000000FF
#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_SHIFT 0
#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_MASK 0xF
#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_UNKNOWN 0x1
#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED 0x2
#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_LOADING 0x3
#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED 0x4
#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE 0x5
#define DRV_MB_PARAM_OV_MTU_SIZE_SHIFT 0
#define DRV_MB_PARAM_OV_MTU_SIZE_MASK 0xFFFFFFFF
#define DRV_MB_PARAM_WOL_MASK (DRV_MB_PARAM_WOL_DEFAULT | \
DRV_MB_PARAM_WOL_DISABLED | \
DRV_MB_PARAM_WOL_ENABLED)
#define DRV_MB_PARAM_WOL_DEFAULT DRV_MB_PARAM_UNLOAD_WOL_MCP
#define DRV_MB_PARAM_WOL_DISABLED DRV_MB_PARAM_UNLOAD_WOL_DISABLED
#define DRV_MB_PARAM_WOL_ENABLED DRV_MB_PARAM_UNLOAD_WOL_ENABLED
#define DRV_MB_PARAM_ESWITCH_MODE_MASK (DRV_MB_PARAM_ESWITCH_MODE_NONE | \
DRV_MB_PARAM_ESWITCH_MODE_VEB | \
DRV_MB_PARAM_ESWITCH_MODE_VEPA)
#define DRV_MB_PARAM_ESWITCH_MODE_NONE 0x0
#define DRV_MB_PARAM_ESWITCH_MODE_VEB 0x1
#define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2
#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0
#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1
#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2
/* Resource Allocation params - Driver version support */
#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000
#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000FFFF
#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0
#define DRV_MB_PARAM_BIST_REGISTER_TEST 1
#define DRV_MB_PARAM_BIST_CLOCK_TEST 2
#define DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES 3
#define DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX 4
#define DRV_MB_PARAM_BIST_RC_UNKNOWN 0
#define DRV_MB_PARAM_BIST_RC_PASSED 1
......@@ -8617,6 +8721,8 @@ struct public_drv_mb {
#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT 0
#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000FF
#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT 8
#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_MASK 0x0000FF00
u32 fw_mb_header;
#define FW_MSG_CODE_MASK 0xffff0000
......@@ -8631,15 +8737,27 @@ struct public_drv_mb {
#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000
#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20130000
#define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000
#define FW_MSG_CODE_RESOURCE_ALLOC_OK 0x34000000
#define FW_MSG_CODE_RESOURCE_ALLOC_UNKNOWN 0x35000000
#define FW_MSG_CODE_RESOURCE_ALLOC_DEPRECATED 0x36000000
#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000
#define FW_MSG_CODE_NVM_OK 0x00010000
#define FW_MSG_CODE_OK 0x00160000
#define FW_MSG_CODE_OS_WOL_SUPPORTED 0x00800000
#define FW_MSG_CODE_OS_WOL_NOT_SUPPORTED 0x00810000
#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
u32 fw_mb_param;
/* get pf rdma protocol command responce */
#define FW_MB_PARAM_GET_PF_RDMA_NONE 0x0
#define FW_MB_PARAM_GET_PF_RDMA_ROCE 0x1
#define FW_MB_PARAM_GET_PF_RDMA_IWARP 0x2
#define FW_MB_PARAM_GET_PF_RDMA_BOTH 0x3
u32 drv_pulse_mb;
#define DRV_PULSE_SEQ_MASK 0x00007fff
#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
......
......@@ -3030,6 +3030,31 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
}
}
}
/* There's a possibility the igu_sb_cnt_iov doesn't properly reflect
* the number of VF SBs [especially for first VF on engine, as we can't
* diffrentiate between empty entries and its entries].
* Since we don't really support more SBs than VFs today, prevent any
* such configuration by sanitizing the number of SBs to equal the
* number of VFs.
*/
if (IS_PF_SRIOV(p_hwfn)) {
u16 total_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
if (total_vfs < p_igu_info->free_blks) {
DP_VERBOSE(p_hwfn,
(NETIF_MSG_INTR | QED_MSG_IOV),
"Limiting number of SBs for IOV - %04x --> %04x\n",
p_igu_info->free_blks,
p_hwfn->cdev->p_iov_info->total_vfs);
p_igu_info->free_blks = total_vfs;
} else if (total_vfs > p_igu_info->free_blks) {
DP_NOTICE(p_hwfn,
"IGU has only %04x SBs for VFs while the device has %04x VFs\n",
p_igu_info->free_blks, total_vfs);
return -EINVAL;
}
}
p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks;
DP_VERBOSE(
......@@ -3163,7 +3188,12 @@ u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
return sb_id - p_info->igu_base_sb;
} else if ((sb_id >= p_info->igu_base_sb_iov) &&
(sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) {
return sb_id - p_info->igu_base_sb_iov + p_info->igu_sb_cnt;
/* We want the first VF queue to be adjacent to the
* last PF queue. Since L2 queues can be partial to
* SBs, we'll use the feature instead.
*/
return sb_id - p_info->igu_base_sb_iov +
FEAT_NUM(p_hwfn, QED_PF_L2_QUE);
} else {
DP_NOTICE(p_hwfn, "SB %d not in range for function\n", sb_id);
return 0;
......
......@@ -1691,7 +1691,7 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
}
qed_vf_get_num_vlan_filters(&cdev->hwfns[0],
&info->num_vlan_filters);
(u8 *)&info->num_vlan_filters);
qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac);
info->is_legacy = !!cdev->hwfns[0].vf_iov_info->b_pre_fp_hsi;
......
......@@ -221,6 +221,10 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->fw_eng = FW_ENGINEERING_VERSION;
dev_info->mf_mode = cdev->mf_mode;
dev_info->tx_switching = true;
if (QED_LEADING_HWFN(cdev)->hw_info.b_wol_support ==
QED_WOL_SUPPORT_PME)
dev_info->wol_support = true;
} else {
qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
&dev_info->fw_minor, &dev_info->fw_rev,
......@@ -243,6 +247,8 @@ int qed_fill_dev_info(struct qed_dev *cdev,
&dev_info->mfw_rev, NULL);
}
dev_info->mtu = QED_LEADING_HWFN(cdev)->hw_info.mtu;
return 0;
}
......@@ -1431,11 +1437,106 @@ static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
return status;
}
static int qed_update_wol(struct qed_dev *cdev, bool enabled)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
struct qed_ptt *ptt;
int rc = 0;
if (IS_VF(cdev))
return 0;
ptt = qed_ptt_acquire(hwfn);
if (!ptt)
return -EAGAIN;
rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED
: QED_OV_WOL_DISABLED);
if (rc)
goto out;
rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
out:
qed_ptt_release(hwfn, ptt);
return rc;
}
static int qed_update_drv_state(struct qed_dev *cdev, bool active)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
struct qed_ptt *ptt;
int status = 0;
if (IS_VF(cdev))
return 0;
ptt = qed_ptt_acquire(hwfn);
if (!ptt)
return -EAGAIN;
status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ?
QED_OV_DRIVER_STATE_ACTIVE :
QED_OV_DRIVER_STATE_DISABLED);
qed_ptt_release(hwfn, ptt);
return status;
}
static int qed_update_mac(struct qed_dev *cdev, u8 *mac)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
struct qed_ptt *ptt;
int status = 0;
if (IS_VF(cdev))
return 0;
ptt = qed_ptt_acquire(hwfn);
if (!ptt)
return -EAGAIN;
status = qed_mcp_ov_update_mac(hwfn, ptt, mac);
if (status)
goto out;
status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
out:
qed_ptt_release(hwfn, ptt);
return status;
}
static int qed_update_mtu(struct qed_dev *cdev, u16 mtu)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
struct qed_ptt *ptt;
int status = 0;
if (IS_VF(cdev))
return 0;
ptt = qed_ptt_acquire(hwfn);
if (!ptt)
return -EAGAIN;
status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu);
if (status)
goto out;
status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
out:
qed_ptt_release(hwfn, ptt);
return status;
}
static struct qed_selftest_ops qed_selftest_ops_pass = {
.selftest_memory = &qed_selftest_memory,
.selftest_interrupt = &qed_selftest_interrupt,
.selftest_register = &qed_selftest_register,
.selftest_clock = &qed_selftest_clock,
.selftest_nvram = &qed_selftest_nvram,
};
const struct qed_common_ops qed_common_ops_pass = {
......@@ -1465,6 +1566,10 @@ const struct qed_common_ops qed_common_ops_pass = {
.get_coalesce = &qed_get_coalesce,
.set_coalesce = &qed_set_coalesce,
.set_led = &qed_set_led,
.update_drv_state = &qed_update_drv_state,
.update_mac = &qed_update_mac,
.update_mtu = &qed_update_mtu,
.update_wol = &qed_update_wol,
};
void qed_get_protocol_stats(struct qed_dev *cdev,
......
......@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/etherdevice.h>
#include "qed.h"
#include "qed_dcbx.h"
#include "qed_hsi.h"
......@@ -329,6 +330,7 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
struct qed_mcp_mb_params *p_mb_params)
{
u32 union_data_addr;
int rc;
/* MCP not initialized */
......@@ -374,11 +376,32 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
u32 *o_mcp_param)
{
struct qed_mcp_mb_params mb_params;
union drv_union_data data_src;
int rc;
memset(&mb_params, 0, sizeof(mb_params));
memset(&data_src, 0, sizeof(data_src));
mb_params.cmd = cmd;
mb_params.param = param;
/* In case of UNLOAD_DONE, set the primary MAC */
if ((cmd == DRV_MSG_CODE_UNLOAD_DONE) &&
(p_hwfn->cdev->wol_config == QED_OV_WOL_ENABLED)) {
u8 *p_mac = p_hwfn->cdev->wol_mac;
data_src.wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1];
data_src.wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 |
p_mac[4] << 8 | p_mac[5];
DP_VERBOSE(p_hwfn,
(QED_MSG_SP | NETIF_MSG_IFDOWN),
"Setting WoL MAC: %pM --> [%08x,%08x]\n",
p_mac, data_src.wol_mac.mac_upper,
data_src.wol_mac.mac_lower);
mb_params.p_data_src = &data_src;
}
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc)
return rc;
......@@ -1001,28 +1024,89 @@ int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type)
return 0;
}
/* Old MFW has a global configuration for all PFs regarding RDMA support */
static void
qed_mcp_get_shmem_proto_legacy(struct qed_hwfn *p_hwfn,
enum qed_pci_personality *p_proto)
{
/* There wasn't ever a legacy MFW that published iwarp.
* So at this point, this is either plain l2 or RoCE.
*/
if (test_bit(QED_DEV_CAP_ROCE, &p_hwfn->hw_info.device_capabilities))
*p_proto = QED_PCI_ETH_ROCE;
else
*p_proto = QED_PCI_ETH;
DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
"According to Legacy capabilities, L2 personality is %08x\n",
(u32) *p_proto);
}
static int
qed_mcp_get_shmem_proto_mfw(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_pci_personality *p_proto)
{
u32 resp = 0, param = 0;
int rc;
rc = qed_mcp_cmd(p_hwfn, p_ptt,
DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL, 0, &resp, &param);
if (rc)
return rc;
if (resp != FW_MSG_CODE_OK) {
DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
"MFW lacks support for command; Returns %08x\n",
resp);
return -EINVAL;
}
switch (param) {
case FW_MB_PARAM_GET_PF_RDMA_NONE:
*p_proto = QED_PCI_ETH;
break;
case FW_MB_PARAM_GET_PF_RDMA_ROCE:
*p_proto = QED_PCI_ETH_ROCE;
break;
case FW_MB_PARAM_GET_PF_RDMA_BOTH:
DP_NOTICE(p_hwfn,
"Current day drivers don't support RoCE & iWARP. Default to RoCE-only\n");
*p_proto = QED_PCI_ETH_ROCE;
break;
case FW_MB_PARAM_GET_PF_RDMA_IWARP:
default:
DP_NOTICE(p_hwfn,
"MFW answers GET_PF_RDMA_PROTOCOL but param is %08x\n",
param);
return -EINVAL;
}
DP_VERBOSE(p_hwfn,
NETIF_MSG_IFUP,
"According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
(u32) *p_proto, resp, param);
return 0;
}
static int
qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
struct public_func *p_info,
struct qed_ptt *p_ptt,
enum qed_pci_personality *p_proto)
{
int rc = 0;
switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
case FUNC_MF_CFG_PROTOCOL_ETHERNET:
if (test_bit(QED_DEV_CAP_ROCE,
&p_hwfn->hw_info.device_capabilities))
*p_proto = QED_PCI_ETH_ROCE;
else
*p_proto = QED_PCI_ETH;
if (qed_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto))
qed_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
break;
case FUNC_MF_CFG_PROTOCOL_ISCSI:
*p_proto = QED_PCI_ISCSI;
break;
case FUNC_MF_CFG_PROTOCOL_ROCE:
DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n");
rc = -EINVAL;
break;
/* Fallthrough */
default:
rc = -EINVAL;
}
......@@ -1042,7 +1126,8 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
info->pause_on_host = (shmem_info.config &
FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, &info->protocol)) {
if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
&info->protocol)) {
DP_ERR(p_hwfn, "Unknown personality %08x\n",
(u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
return -EINVAL;
......@@ -1057,6 +1142,9 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
info->mac[5] = (u8)(shmem_info.mac_lower);
/* Store primary MAC for later possible WoL */
memcpy(&p_hwfn->cdev->wol_mac, info->mac, ETH_ALEN);
} else {
DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n");
}
......@@ -1068,13 +1156,30 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
info->mtu = (u16)shmem_info.mtu_size;
p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_NONE;
p_hwfn->cdev->wol_config = (u8)QED_OV_WOL_DEFAULT;
if (qed_mcp_is_init(p_hwfn)) {
u32 resp = 0, param = 0;
int rc;
rc = qed_mcp_cmd(p_hwfn, p_ptt,
DRV_MSG_CODE_OS_WOL, 0, &resp, &param);
if (rc)
return rc;
if (resp == FW_MSG_CODE_OS_WOL_SUPPORTED)
p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_PME;
}
DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP),
"Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x\n",
"Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x wol %02x\n",
info->pause_on_host, info->protocol,
info->bandwidth_min, info->bandwidth_max,
info->mac[0], info->mac[1], info->mac[2],
info->mac[3], info->mac[4], info->mac[5],
info->wwn_port, info->wwn_node, info->ovlan);
info->wwn_port, info->wwn_node,
info->ovlan, (u8)p_hwfn->hw_info.b_wol_support);
return 0;
}
......@@ -1223,6 +1328,178 @@ int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -EAGAIN : 0;
}
int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_ov_client client)
{
u32 resp = 0, param = 0;
u32 drv_mb_param;
int rc;
switch (client) {
case QED_OV_CLIENT_DRV:
drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
break;
case QED_OV_CLIENT_USER:
drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
break;
case QED_OV_CLIENT_VENDOR_SPEC:
drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
break;
default:
DP_NOTICE(p_hwfn, "Invalid client type %d\n", client);
return -EINVAL;
}
rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
drv_mb_param, &resp, &param);
if (rc)
DP_ERR(p_hwfn, "MCP response failure, aborting\n");
return rc;
}
int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_ov_driver_state drv_state)
{
u32 resp = 0, param = 0;
u32 drv_mb_param;
int rc;
switch (drv_state) {
case QED_OV_DRIVER_STATE_NOT_LOADED:
drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
break;
case QED_OV_DRIVER_STATE_DISABLED:
drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
break;
case QED_OV_DRIVER_STATE_ACTIVE:
drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
break;
default:
DP_NOTICE(p_hwfn, "Invalid driver state %d\n", drv_state);
return -EINVAL;
}
rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
drv_mb_param, &resp, &param);
if (rc)
DP_ERR(p_hwfn, "Failed to send driver state\n");
return rc;
}
int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 mtu)
{
u32 resp = 0, param = 0;
u32 drv_mb_param;
int rc;
drv_mb_param = (u32)mtu << DRV_MB_PARAM_OV_MTU_SIZE_SHIFT;
rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU,
drv_mb_param, &resp, &param);
if (rc)
DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc);
return rc;
}
int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 *mac)
{
struct qed_mcp_mb_params mb_params;
union drv_union_data union_data;
int rc;
memset(&mb_params, 0, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_SET_VMAC;
mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC <<
DRV_MSG_CODE_VMAC_TYPE_SHIFT;
mb_params.param |= MCP_PF_ID(p_hwfn);
ether_addr_copy(&union_data.raw_data[0], mac);
mb_params.p_data_src = &union_data;
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc)
DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
/* Store primary MAC for later possible WoL */
memcpy(p_hwfn->cdev->wol_mac, mac, ETH_ALEN);
return rc;
}
int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, enum qed_ov_wol wol)
{
u32 resp = 0, param = 0;
u32 drv_mb_param;
int rc;
if (p_hwfn->hw_info.b_wol_support == QED_WOL_SUPPORT_NONE) {
DP_VERBOSE(p_hwfn, QED_MSG_SP,
"Can't change WoL configuration when WoL isn't supported\n");
return -EINVAL;
}
switch (wol) {
case QED_OV_WOL_DEFAULT:
drv_mb_param = DRV_MB_PARAM_WOL_DEFAULT;
break;
case QED_OV_WOL_DISABLED:
drv_mb_param = DRV_MB_PARAM_WOL_DISABLED;
break;
case QED_OV_WOL_ENABLED:
drv_mb_param = DRV_MB_PARAM_WOL_ENABLED;
break;
default:
DP_ERR(p_hwfn, "Invalid wol state %d\n", wol);
return -EINVAL;
}
rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_WOL,
drv_mb_param, &resp, &param);
if (rc)
DP_ERR(p_hwfn, "Failed to send wol mode, rc = %d\n", rc);
/* Store the WoL update for a future unload */
p_hwfn->cdev->wol_config = (u8)wol;
return rc;
}
int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_ov_eswitch eswitch)
{
u32 resp = 0, param = 0;
u32 drv_mb_param;
int rc;
switch (eswitch) {
case QED_OV_ESWITCH_NONE:
drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE;
break;
case QED_OV_ESWITCH_VEB:
drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB;
break;
case QED_OV_ESWITCH_VEPA:
drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA;
break;
default:
DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch);
return -EINVAL;
}
rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE,
drv_mb_param, &resp, &param);
if (rc)
DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc);
return rc;
}
int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, enum qed_led_mode mode)
{
......@@ -1271,6 +1548,52 @@ int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
return rc;
}
int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len)
{
u32 bytes_left = len, offset = 0, bytes_to_copy, read_len = 0;
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
u32 resp = 0, resp_param = 0;
struct qed_ptt *p_ptt;
int rc = 0;
p_ptt = qed_ptt_acquire(p_hwfn);
if (!p_ptt)
return -EBUSY;
while (bytes_left > 0) {
bytes_to_copy = min_t(u32, bytes_left, MCP_DRV_NVM_BUF_LEN);
rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
DRV_MSG_CODE_NVM_READ_NVRAM,
addr + offset +
(bytes_to_copy <<
DRV_MB_PARAM_NVM_LEN_SHIFT),
&resp, &resp_param,
&read_len,
(u32 *)(p_buf + offset));
if (rc || (resp != FW_MSG_CODE_NVM_OK)) {
DP_NOTICE(cdev, "MCP command rc = %d\n", rc);
break;
}
/* This can be a lengthy process, and it's possible scheduler
* isn't preemptable. Sleep a bit to prevent CPU hogging.
*/
if (bytes_left % 0x1000 <
(bytes_left - read_len) % 0x1000)
usleep_range(1000, 2000);
offset += read_len;
bytes_left -= read_len;
}
cdev->mcp_nvm_resp = resp;
qed_ptt_release(p_hwfn, p_ptt);
return rc;
}
int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 drv_mb_param = 0, rsp, param;
......@@ -1312,3 +1635,93 @@ int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
return rc;
}
int qed_mcp_bist_nvm_test_get_num_images(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *num_images)
{
u32 drv_mb_param = 0, rsp;
int rc = 0;
drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
drv_mb_param, &rsp, num_images);
if (rc)
return rc;
if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
rc = -EINVAL;
return rc;
}
int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct bist_nvm_image_att *p_image_att,
u32 image_index)
{
u32 buf_size = 0, param, resp = 0, resp_param = 0;
int rc;
param = DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT;
param |= image_index << DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT;
rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
DRV_MSG_CODE_BIST_TEST, param,
&resp, &resp_param,
&buf_size,
(u32 *)p_image_att);
if (rc)
return rc;
if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
(p_image_att->return_code != 1))
rc = -EINVAL;
return rc;
}
#define QED_RESC_ALLOC_VERSION_MAJOR 1
#define QED_RESC_ALLOC_VERSION_MINOR 0
#define QED_RESC_ALLOC_VERSION \
((QED_RESC_ALLOC_VERSION_MAJOR << \
DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
(QED_RESC_ALLOC_VERSION_MINOR << \
DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
int qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct resource_info *p_resc_info,
u32 *p_mcp_resp, u32 *p_mcp_param)
{
struct qed_mcp_mb_params mb_params;
union drv_union_data *p_union_data;
int rc;
memset(&mb_params, 0, sizeof(mb_params));
mb_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
mb_params.param = QED_RESC_ALLOC_VERSION;
p_union_data = (union drv_union_data *)p_resc_info;
mb_params.p_data_src = p_union_data;
mb_params.p_data_dst = p_union_data;
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc)
return rc;
*p_mcp_resp = mb_params.mcp_resp;
*p_mcp_param = mb_params.mcp_param;
DP_VERBOSE(p_hwfn,
QED_MSG_SP,
"MFW resource_info: version 0x%x, res_id 0x%x, size 0x%x, offset 0x%x, vf_size 0x%x, vf_offset 0x%x, flags 0x%x\n",
*p_mcp_param,
p_resc_info->res_id,
p_resc_info->size,
p_resc_info->offset,
p_resc_info->vf_size,
p_resc_info->vf_offset, p_resc_info->flags);
return 0;
}
......@@ -92,6 +92,8 @@ struct qed_mcp_function_info {
#define QED_MCP_VLAN_UNSET (0xffff)
u16 ovlan;
u16 mtu;
};
struct qed_mcp_nvm_common {
......@@ -147,6 +149,30 @@ union qed_mcp_protocol_stats {
struct qed_mcp_rdma_stats rdma_stats;
};
enum qed_ov_eswitch {
QED_OV_ESWITCH_NONE,
QED_OV_ESWITCH_VEB,
QED_OV_ESWITCH_VEPA
};
enum qed_ov_client {
QED_OV_CLIENT_DRV,
QED_OV_CLIENT_USER,
QED_OV_CLIENT_VENDOR_SPEC
};
enum qed_ov_driver_state {
QED_OV_DRIVER_STATE_NOT_LOADED,
QED_OV_DRIVER_STATE_DISABLED,
QED_OV_DRIVER_STATE_ACTIVE
};
enum qed_ov_wol {
QED_OV_WOL_DEFAULT,
QED_OV_WOL_DISABLED,
QED_OV_WOL_ENABLED
};
/**
* @brief - returns the link params of the hw function
*
......@@ -277,6 +303,69 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_mcp_drv_version *p_ver);
/**
* @brief Notify MFW about the change in base device properties
*
* @param p_hwfn
* @param p_ptt
* @param client - qed client type
*
* @return int - 0 - operation was successful.
*/
int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_ov_client client);
/**
* @brief Notify MFW about the driver state
*
* @param p_hwfn
* @param p_ptt
* @param drv_state - Driver state
*
* @return int - 0 - operation was successful.
*/
int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_ov_driver_state drv_state);
/**
* @brief Send MTU size to MFW
*
* @param p_hwfn
* @param p_ptt
* @param mtu - MTU size
*
* @return int - 0 - operation was successful.
*/
int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 mtu);
/**
* @brief Send MAC address to MFW
*
* @param p_hwfn
* @param p_ptt
* @param mac - MAC address
*
* @return int - 0 - operation was successful.
*/
int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 *mac);
/**
* @brief Send WOL mode to MFW
*
* @param p_hwfn
* @param p_ptt
* @param wol - WOL mode
*
* @return int - 0 - operation was successful.
*/
int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_ov_wol wol);
/**
* @brief Set LED status
*
......@@ -290,6 +379,18 @@ int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_led_mode mode);
/**
* @brief Read from nvm
*
* @param cdev
* @param addr - nvm offset
* @param p_buf - nvm read buffer
* @param len - buffer len
*
* @return int - 0 - operation was successful.
*/
int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len);
/**
* @brief Bist register test
*
......@@ -312,6 +413,35 @@ int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn,
int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
* @brief Bist nvm test - get number of images
*
* @param p_hwfn - hw function
* @param p_ptt - PTT required for register access
* @param num_images - number of images if operation was
* successful. 0 if not.
*
* @return int - 0 - operation was successful.
*/
int qed_mcp_bist_nvm_test_get_num_images(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *num_images);
/**
* @brief Bist nvm test - get image attributes by index
*
* @param p_hwfn - hw function
* @param p_ptt - PTT required for register access
* @param p_image_att - Attributes of image
* @param image_index - Index of image to get information for
*
* @return int - 0 - operation was successful.
*/
int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct bist_nvm_image_att *p_image_att,
u32 image_index);
/* Using hwfn number (and not pf_num) is required since in CMT mode,
* same pf_num may be used by two different hwfn
* TODO - this shouldn't really be in .h file, but until all fields
......@@ -546,4 +676,32 @@ int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 mask_parities);
/**
* @brief Send eswitch mode to MFW
*
* @param p_hwfn
* @param p_ptt
* @param eswitch - eswitch mode
*
* @return int - 0 - operation was successful.
*/
int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_ov_eswitch eswitch);
/**
* @brief - Gets the MFW allocation info for the given resource
*
* @param p_hwfn
* @param p_ptt
* @param p_resc_info - descriptor of requested resource
* @param p_mcp_resp
* @param p_mcp_param
*
* @return int - 0 - operation was successful.
*/
int qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct resource_info *p_resc_info,
u32 *p_mcp_resp, u32 *p_mcp_param);
#endif
#include <linux/crc32.h>
#include "qed.h"
#include "qed_dev_api.h"
#include "qed_mcp.h"
......@@ -75,3 +76,103 @@ int qed_selftest_clock(struct qed_dev *cdev)
return rc;
}
int qed_selftest_nvram(struct qed_dev *cdev)
{
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
u32 num_images, i, j, nvm_crc, calc_crc;
struct bist_nvm_image_att image_att;
u8 *buf = NULL;
__be32 val;
int rc;
if (!p_ptt) {
DP_ERR(p_hwfn, "failed to acquire ptt\n");
return -EBUSY;
}
/* Acquire from MFW the amount of available images */
rc = qed_mcp_bist_nvm_test_get_num_images(p_hwfn, p_ptt, &num_images);
if (rc || !num_images) {
DP_ERR(p_hwfn, "Failed getting number of images\n");
return -EINVAL;
}
/* Iterate over images and validate CRC */
for (i = 0; i < num_images; i++) {
/* This mailbox returns information about the image required for
* reading it.
*/
rc = qed_mcp_bist_nvm_test_get_image_att(p_hwfn, p_ptt,
&image_att, i);
if (rc) {
DP_ERR(p_hwfn,
"Failed getting image index %d attributes\n",
i);
goto err0;
}
/* After MFW crash dump is collected - the image's CRC stops
* being valid.
*/
if (image_att.image_type == NVM_TYPE_MDUMP)
continue;
DP_VERBOSE(p_hwfn, QED_MSG_SP, "image index %d, size %x\n",
i, image_att.len);
/* Allocate a buffer for holding the nvram image */
buf = kzalloc(image_att.len, GFP_KERNEL);
if (!buf) {
rc = -ENOMEM;
goto err0;
}
/* Read image into buffer */
rc = qed_mcp_nvm_read(p_hwfn->cdev, image_att.nvm_start_addr,
buf, image_att.len);
if (rc) {
DP_ERR(p_hwfn,
"Failed reading image index %d from nvm.\n", i);
goto err1;
}
/* Convert the buffer into big-endian format (excluding the
* closing 4 bytes of CRC).
*/
for (j = 0; j < image_att.len - 4; j += 4) {
val = cpu_to_be32(*(u32 *)&buf[j]);
*(u32 *)&buf[j] = (__force u32)val;
}
/* Calc CRC for the "actual" image buffer, i.e. not including
* the last 4 CRC bytes.
*/
nvm_crc = *(u32 *)(buf + image_att.len - 4);
calc_crc = crc32(0xffffffff, buf, image_att.len - 4);
calc_crc = (__force u32)~cpu_to_be32(calc_crc);
DP_VERBOSE(p_hwfn, QED_MSG_SP,
"nvm crc 0x%x, calc_crc 0x%x\n", nvm_crc, calc_crc);
if (calc_crc != nvm_crc) {
rc = -EINVAL;
goto err1;
}
/* Done with this image; Free to prevent double release
* on subsequent failure.
*/
kfree(buf);
buf = NULL;
}
qed_ptt_release(p_hwfn, p_ptt);
return 0;
err1:
kfree(buf);
err0:
qed_ptt_release(p_hwfn, p_ptt);
return rc;
}
......@@ -37,4 +37,14 @@ int qed_selftest_register(struct qed_dev *cdev);
* @return int
*/
int qed_selftest_clock(struct qed_dev *cdev);
/**
* @brief qed_selftest_nvram - Perform nvram test
*
* @param cdev
*
* @return int
*/
int qed_selftest_nvram(struct qed_dev *cdev);
#endif
......@@ -3470,7 +3470,6 @@ int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
static int qed_sriov_enable(struct qed_dev *cdev, int num)
{
struct qed_sb_cnt_info sb_cnt_info;
int i, j, rc;
if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
......@@ -3483,7 +3482,11 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num)
for_each_hwfn(cdev, j) {
struct qed_hwfn *hwfn = &cdev->hwfns[j];
struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
int num_sbs = 0, limit = 16;
int num_queues;
/* Make sure not to use more than 16 queues per VF */
num_queues = min_t(int,
FEAT_NUM(hwfn, QED_VF_L2_QUE) / num, 16);
if (!ptt) {
DP_ERR(hwfn, "Failed to acquire ptt\n");
......@@ -3491,19 +3494,11 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num)
goto err;
}
if (IS_MF_DEFAULT(hwfn))
limit = MAX_NUM_VFS_BB / hwfn->num_funcs_on_engine;
memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
qed_int_get_num_sbs(hwfn, &sb_cnt_info);
num_sbs = min_t(int, sb_cnt_info.sb_free_blk, limit);
for (i = 0; i < num; i++) {
if (!qed_iov_is_valid_vfid(hwfn, i, false, true))
continue;
rc = qed_iov_init_hw_for_vf(hwfn,
ptt, i, num_sbs / num);
rc = qed_iov_init_hw_for_vf(hwfn, ptt, i, num_queues);
if (rc) {
DP_ERR(cdev, "Failed to enable VF[%d]\n", i);
qed_ptt_release(hwfn, ptt);
......
......@@ -193,6 +193,8 @@ struct qede_dev {
u16 vxlan_dst_port;
u16 geneve_dst_port;
bool wol_enabled;
struct qede_rdma_dev rdma_info;
};
......
......@@ -157,6 +157,7 @@ enum qede_ethtool_tests {
QEDE_ETHTOOL_MEMORY_TEST,
QEDE_ETHTOOL_REGISTER_TEST,
QEDE_ETHTOOL_CLOCK_TEST,
QEDE_ETHTOOL_NVRAM_TEST,
QEDE_ETHTOOL_TEST_MAX
};
......@@ -166,6 +167,7 @@ static const char qede_tests_str_arr[QEDE_ETHTOOL_TEST_MAX][ETH_GSTRING_LEN] = {
"Memory (online)\t\t",
"Register (online)\t",
"Clock (online)\t\t",
"Nvram (online)\t\t",
};
static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
......@@ -318,7 +320,7 @@ static const struct qede_link_mode_mapping qed_lm_map[] = {
{ \
int i; \
\
for (i = 0; i < QED_LM_COUNT; i++) { \
for (i = 0; i < ARRAY_SIZE(qed_lm_map); i++) { \
if ((caps) & (qed_lm_map[i].qed_link_mode)) \
__set_bit(qed_lm_map[i].ethtool_link_mode,\
lk_ksettings->link_modes.name); \
......@@ -329,7 +331,7 @@ static const struct qede_link_mode_mapping qed_lm_map[] = {
{ \
int i; \
\
for (i = 0; i < QED_LM_COUNT; i++) { \
for (i = 0; i < ARRAY_SIZE(qed_lm_map); i++) { \
if (test_bit(qed_lm_map[i].ethtool_link_mode, \
lk_ksettings->link_modes.name)) \
caps |= qed_lm_map[i].qed_link_mode; \
......@@ -481,6 +483,45 @@ static void qede_get_drvinfo(struct net_device *ndev,
strlcpy(info->bus_info, pci_name(edev->pdev), sizeof(info->bus_info));
}
static void qede_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
struct qede_dev *edev = netdev_priv(ndev);
if (edev->dev_info.common.wol_support) {
wol->supported = WAKE_MAGIC;
wol->wolopts = edev->wol_enabled ? WAKE_MAGIC : 0;
}
}
static int qede_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
struct qede_dev *edev = netdev_priv(ndev);
bool wol_requested;
int rc;
if (wol->wolopts & ~WAKE_MAGIC) {
DP_INFO(edev,
"Can't support WoL options other than magic-packet\n");
return -EINVAL;
}
wol_requested = !!(wol->wolopts & WAKE_MAGIC);
if (wol_requested == edev->wol_enabled)
return 0;
/* Need to actually change configuration */
if (!edev->dev_info.common.wol_support) {
DP_INFO(edev, "Device doesn't support WoL\n");
return -EINVAL;
}
rc = edev->ops->common->update_wol(edev->cdev, wol_requested);
if (!rc)
edev->wol_enabled = wol_requested;
return rc;
}
static u32 qede_get_msglevel(struct net_device *ndev)
{
struct qede_dev *edev = netdev_priv(ndev);
......@@ -739,6 +780,8 @@ int qede_change_mtu(struct net_device *ndev, int new_mtu)
qede_update_mtu(edev, &args);
edev->ops->common->update_mtu(edev->cdev, args.mtu);
return 0;
}
......@@ -1390,6 +1433,11 @@ static void qede_self_test(struct net_device *dev,
buf[QEDE_ETHTOOL_CLOCK_TEST] = 1;
etest->flags |= ETH_TEST_FL_FAILED;
}
if (edev->ops->common->selftest->selftest_nvram(edev->cdev)) {
buf[QEDE_ETHTOOL_NVRAM_TEST] = 1;
etest->flags |= ETH_TEST_FL_FAILED;
}
}
static int qede_set_tunable(struct net_device *dev,
......@@ -1440,6 +1488,8 @@ static const struct ethtool_ops qede_ethtool_ops = {
.get_drvinfo = qede_get_drvinfo,
.get_regs_len = qede_get_regs_len,
.get_regs = qede_get_regs,
.get_wol = qede_get_wol,
.set_wol = qede_set_wol,
.get_msglevel = qede_get_msglevel,
.set_msglevel = qede_set_msglevel,
.nway_reset = qede_nway_reset,
......
......@@ -95,6 +95,7 @@ static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
#define TX_TIMEOUT (5 * HZ)
static void qede_remove(struct pci_dev *pdev);
static void qede_shutdown(struct pci_dev *pdev);
static int qede_alloc_rx_buffer(struct qede_dev *edev,
struct qede_rx_queue *rxq);
static void qede_link_update(void *dev, struct qed_link_output *link);
......@@ -166,6 +167,7 @@ static struct pci_driver qede_pci_driver = {
.id_table = qede_pci_tbl,
.probe = qede_probe,
.remove = qede_remove,
.shutdown = qede_shutdown,
#ifdef CONFIG_QED_SRIOV
.sriov_configure = qede_sriov_configure,
#endif
......@@ -2396,6 +2398,8 @@ static void qede_init_ndev(struct qede_dev *edev)
/* Set network device HW mac */
ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac);
ndev->mtu = edev->dev_info.common.mtu;
}
/* This function converts from 32b param to two params of level and module
......@@ -2703,6 +2707,8 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
/* Use global ops since we've freed edev */
qed_ops->common->slowpath_stop(cdev);
if (system_state == SYSTEM_POWER_OFF)
return;
qed_ops->common->remove(cdev);
dev_info(&pdev->dev, "Ending qede_remove successfully\n");
......@@ -2713,6 +2719,11 @@ static void qede_remove(struct pci_dev *pdev)
__qede_remove(pdev, QEDE_REMOVE_NORMAL);
}
static void qede_shutdown(struct pci_dev *pdev)
{
__qede_remove(pdev, QEDE_REMOVE_NORMAL);
}
/* -------------------------------------------------------------------------
* START OF LOAD / UNLOAD
* -------------------------------------------------------------------------
......@@ -3751,6 +3762,8 @@ static int qede_open(struct net_device *ndev)
udp_tunnel_get_rx_info(ndev);
edev->ops->common->update_drv_state(edev->cdev, true);
return 0;
}
......@@ -3760,6 +3773,8 @@ static int qede_close(struct net_device *ndev)
qede_unload(edev, QEDE_UNLOAD_NORMAL);
edev->ops->common->update_drv_state(edev->cdev, false);
return 0;
}
......@@ -3820,6 +3835,8 @@ static int qede_set_mac_addr(struct net_device *ndev, void *p)
if (rc)
return rc;
edev->ops->common->update_mac(edev->cdev, addr->sa_data);
/* Add MAC filter according to the new unicast HW MAC address */
ether_addr_copy(edev->primary_mac, ndev->dev_addr);
return qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
......
......@@ -22,7 +22,7 @@ struct qed_dev_eth_info {
u8 num_tc;
u8 port_mac[ETH_ALEN];
u8 num_vlan_filters;
u16 num_vlan_filters;
u16 num_mac_filters;
/* Legacy VF - this affects the datapath, so qede has to know */
......
......@@ -267,6 +267,9 @@ struct qed_dev_info {
u8 mf_mode;
bool tx_switching;
bool rdma_supported;
u16 mtu;
bool wol_support;
};
enum qed_sb_type {
......@@ -401,6 +404,15 @@ struct qed_selftest_ops {
* @return 0 on success, error otherwise.
*/
int (*selftest_clock)(struct qed_dev *cdev);
/**
* @brief selftest_nvram - Perform nvram test
*
* @param cdev
*
* @return 0 on success, error otherwise.
*/
int (*selftest_nvram) (struct qed_dev *cdev);
};
struct qed_common_ops {
......@@ -554,6 +566,41 @@ struct qed_common_ops {
*/
int (*set_led)(struct qed_dev *cdev,
enum qed_led_mode mode);
/**
* @brief update_drv_state - API to inform the change in the driver state.
*
* @param cdev
* @param active
*
*/
int (*update_drv_state)(struct qed_dev *cdev, bool active);
/**
* @brief update_mac - API to inform the change in the mac address
*
* @param cdev
* @param mac
*
*/
int (*update_mac)(struct qed_dev *cdev, u8 *mac);
/**
* @brief update_mtu - API to inform the change in the mtu
*
* @param cdev
* @param mtu
*
*/
int (*update_mtu)(struct qed_dev *cdev, u16 mtu);
/**
* @brief update_wol - update of changes in the WoL configuration
*
* @param cdev
* @param enabled - true iff WoL should be enabled.
*/
int (*update_wol) (struct qed_dev *cdev, bool enabled);
};
#define MASK_FIELD(_name, _value) \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment