Commit f3a6f592 authored by David S. Miller's avatar David S. Miller

Merge branch 'qed-next'

Yuval Mintz says:

====================
qed*: Patch series

This series does several things. The bigger changes:

 - Add new notification APIs [& Defaults] for various fields.
The series then utilizes some of those qed <-> qede APIs to bass WoL
support upon.

 - Change the resource allocation scheme to receive the values from
management firmware, instead of equally sharing resources between
functions [that might not need those]. That would, e.g., allow us to
configure additional filters to network interfaces in presence of
storage [PCI] functions from same adapter.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 89d9123e 2edbff8d
......@@ -154,7 +154,10 @@ struct qed_qm_iids {
u32 tids;
};
enum QED_RESOURCES {
/* HW / FW resources, output of features supported below, most information
* is received from MFW.
*/
enum qed_resources {
QED_SB,
QED_L2_QUEUE,
QED_VPORT,
......@@ -166,6 +169,7 @@ enum QED_RESOURCES {
QED_RDMA_CNQ_RAM,
QED_ILT,
QED_LL2_QUEUE,
QED_CMDQS_CQS,
QED_RDMA_STATS_QUEUE,
QED_MAX_RESC,
};
......@@ -174,6 +178,7 @@ enum QED_FEATURE {
QED_PF_L2_QUE,
QED_VF,
QED_RDMA_CNQ,
QED_VF_L2_QUE,
QED_MAX_FEATURES,
};
......@@ -195,6 +200,11 @@ enum qed_dev_cap {
QED_DEV_CAP_ROCE,
};
enum qed_wol_support {
QED_WOL_SUPPORT_NONE,
QED_WOL_SUPPORT_PME,
};
struct qed_hw_info {
/* PCI personality */
enum qed_pci_personality personality;
......@@ -226,6 +236,9 @@ struct qed_hw_info {
u32 port_mode;
u32 hw_mode;
unsigned long device_capabilities;
u16 mtu;
enum qed_wol_support b_wol_support;
};
struct qed_hw_cid_data {
......@@ -538,7 +551,9 @@ struct qed_dev {
u8 mcp_rev;
u8 boot_mode;
u8 wol;
/* WoL related configurations */
u8 wol_config;
u8 wol_mac[ETH_ALEN];
u32 int_mode;
enum qed_coalescing_mode int_coalescing_mode;
......
This diff is collapsed.
......@@ -8529,6 +8529,41 @@ struct mdump_config_stc {
u32 valid_logs;
};
enum resource_id_enum {
RESOURCE_NUM_SB_E = 0,
RESOURCE_NUM_L2_QUEUE_E = 1,
RESOURCE_NUM_VPORT_E = 2,
RESOURCE_NUM_VMQ_E = 3,
RESOURCE_FACTOR_NUM_RSS_PF_E = 4,
RESOURCE_FACTOR_RSS_PER_VF_E = 5,
RESOURCE_NUM_RL_E = 6,
RESOURCE_NUM_PQ_E = 7,
RESOURCE_NUM_VF_E = 8,
RESOURCE_VFC_FILTER_E = 9,
RESOURCE_ILT_E = 10,
RESOURCE_CQS_E = 11,
RESOURCE_GFT_PROFILES_E = 12,
RESOURCE_NUM_TC_E = 13,
RESOURCE_NUM_RSS_ENGINES_E = 14,
RESOURCE_LL2_QUEUE_E = 15,
RESOURCE_RDMA_STATS_QUEUE_E = 16,
RESOURCE_MAX_NUM,
RESOURCE_NUM_INVALID = 0xFFFFFFFF
};
/* Resource ID is to be filled by the driver in the MB request
* Size, offset & flags to be filled by the MFW in the MB response
*/
struct resource_info {
enum resource_id_enum res_id;
u32 size; /* number of allocated resources */
u32 offset; /* Offset of the 1st resource */
u32 vf_size;
u32 vf_offset;
u32 flags;
#define RESOURCE_ELEMENT_STRICT (1 << 0)
};
union drv_union_data {
u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD];
struct mcp_mac wol_mac;
......@@ -8549,6 +8584,7 @@ union drv_union_data {
u64 reserved_stats[11];
struct ocbb_data_stc ocbb_info;
struct temperature_status_stc temp_info;
struct resource_info resource;
struct bist_nvm_image_att nvm_image_att;
struct mdump_config_stc mdump_config;
};
......@@ -8564,9 +8600,19 @@ struct public_drv_mb {
#define DRV_MSG_CODE_INIT_PHY 0x22000000
#define DRV_MSG_CODE_LINK_RESET 0x23000000
#define DRV_MSG_CODE_SET_DCBX 0x25000000
#define DRV_MSG_CODE_OV_UPDATE_CURR_CFG 0x26000000
#define DRV_MSG_CODE_OV_UPDATE_BUS_NUM 0x27000000
#define DRV_MSG_CODE_OV_UPDATE_BOOT_PROGRESS 0x28000000
#define DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER 0x29000000
#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE 0x31000000
#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
#define DRV_MSG_CODE_OV_UPDATE_MTU 0x33000000
#define DRV_MSG_CODE_OV_UPDATE_WOL 0x38000000
#define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE 0x39000000
#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
#define DRV_MSG_GET_RESOURCE_ALLOC_MSG 0x34000000
#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000
#define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000
......@@ -8574,6 +8620,13 @@ struct public_drv_mb {
#define DRV_MSG_CODE_MCP_RESET 0x00090000
#define DRV_MSG_CODE_SET_VERSION 0x000f0000
#define DRV_MSG_CODE_MCP_HALT 0x00100000
#define DRV_MSG_CODE_SET_VMAC 0x00110000
#define DRV_MSG_CODE_GET_VMAC 0x00120000
#define DRV_MSG_CODE_VMAC_TYPE_SHIFT 4
#define DRV_MSG_CODE_VMAC_TYPE_MASK 0x30
#define DRV_MSG_CODE_VMAC_TYPE_MAC 1
#define DRV_MSG_CODE_VMAC_TYPE_WWNN 2
#define DRV_MSG_CODE_VMAC_TYPE_WWPN 3
#define DRV_MSG_CODE_GET_STATS 0x00130000
#define DRV_MSG_CODE_STATS_TYPE_LAN 1
......@@ -8585,11 +8638,16 @@ struct public_drv_mb {
#define DRV_MSG_CODE_BIST_TEST 0x001e0000
#define DRV_MSG_CODE_SET_LED_MODE 0x00200000
#define DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL 0x002b0000
#define DRV_MSG_CODE_OS_WOL 0x002e0000
#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
u32 drv_mb_param;
#define DRV_MB_PARAM_UNLOAD_WOL_UNKNOWN 0x00000000
#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001
#define DRV_MB_PARAM_UNLOAD_WOL_DISABLED 0x00000002
#define DRV_MB_PARAM_UNLOAD_WOL_ENABLED 0x00000003
#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x000000FF
#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3
......@@ -8602,13 +8660,59 @@ struct public_drv_mb {
#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001
#define DRV_MB_PARAM_LLDP_SEND_SHIFT 0
#define DRV_MB_PARAM_OV_CURR_CFG_SHIFT 0
#define DRV_MB_PARAM_OV_CURR_CFG_MASK 0x0000000F
#define DRV_MB_PARAM_OV_CURR_CFG_NONE 0
#define DRV_MB_PARAM_OV_CURR_CFG_OS 1
#define DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC 2
#define DRV_MB_PARAM_OV_CURR_CFG_OTHER 3
#define DRV_MB_PARAM_OV_STORM_FW_VER_SHIFT 0
#define DRV_MB_PARAM_OV_STORM_FW_VER_MASK 0xFFFFFFFF
#define DRV_MB_PARAM_OV_STORM_FW_VER_MAJOR_MASK 0xFF000000
#define DRV_MB_PARAM_OV_STORM_FW_VER_MINOR_MASK 0x00FF0000
#define DRV_MB_PARAM_OV_STORM_FW_VER_BUILD_MASK 0x0000FF00
#define DRV_MB_PARAM_OV_STORM_FW_VER_DROP_MASK 0x000000FF
#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_SHIFT 0
#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_MASK 0xF
#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_UNKNOWN 0x1
#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED 0x2
#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_LOADING 0x3
#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED 0x4
#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE 0x5
#define DRV_MB_PARAM_OV_MTU_SIZE_SHIFT 0
#define DRV_MB_PARAM_OV_MTU_SIZE_MASK 0xFFFFFFFF
#define DRV_MB_PARAM_WOL_MASK (DRV_MB_PARAM_WOL_DEFAULT | \
DRV_MB_PARAM_WOL_DISABLED | \
DRV_MB_PARAM_WOL_ENABLED)
#define DRV_MB_PARAM_WOL_DEFAULT DRV_MB_PARAM_UNLOAD_WOL_MCP
#define DRV_MB_PARAM_WOL_DISABLED DRV_MB_PARAM_UNLOAD_WOL_DISABLED
#define DRV_MB_PARAM_WOL_ENABLED DRV_MB_PARAM_UNLOAD_WOL_ENABLED
#define DRV_MB_PARAM_ESWITCH_MODE_MASK (DRV_MB_PARAM_ESWITCH_MODE_NONE | \
DRV_MB_PARAM_ESWITCH_MODE_VEB | \
DRV_MB_PARAM_ESWITCH_MODE_VEPA)
#define DRV_MB_PARAM_ESWITCH_MODE_NONE 0x0
#define DRV_MB_PARAM_ESWITCH_MODE_VEB 0x1
#define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2
#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0
#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1
#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2
/* Resource Allocation params - Driver version support */
#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000
#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000FFFF
#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0
#define DRV_MB_PARAM_BIST_REGISTER_TEST 1
#define DRV_MB_PARAM_BIST_CLOCK_TEST 2
#define DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES 3
#define DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX 4
#define DRV_MB_PARAM_BIST_RC_UNKNOWN 0
#define DRV_MB_PARAM_BIST_RC_PASSED 1
......@@ -8617,6 +8721,8 @@ struct public_drv_mb {
#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT 0
#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000FF
#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT 8
#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_MASK 0x0000FF00
u32 fw_mb_header;
#define FW_MSG_CODE_MASK 0xffff0000
......@@ -8631,15 +8737,27 @@ struct public_drv_mb {
#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000
#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20130000
#define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000
#define FW_MSG_CODE_RESOURCE_ALLOC_OK 0x34000000
#define FW_MSG_CODE_RESOURCE_ALLOC_UNKNOWN 0x35000000
#define FW_MSG_CODE_RESOURCE_ALLOC_DEPRECATED 0x36000000
#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000
#define FW_MSG_CODE_NVM_OK 0x00010000
#define FW_MSG_CODE_OK 0x00160000
#define FW_MSG_CODE_OS_WOL_SUPPORTED 0x00800000
#define FW_MSG_CODE_OS_WOL_NOT_SUPPORTED 0x00810000
#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
u32 fw_mb_param;
/* get pf rdma protocol command responce */
#define FW_MB_PARAM_GET_PF_RDMA_NONE 0x0
#define FW_MB_PARAM_GET_PF_RDMA_ROCE 0x1
#define FW_MB_PARAM_GET_PF_RDMA_IWARP 0x2
#define FW_MB_PARAM_GET_PF_RDMA_BOTH 0x3
u32 drv_pulse_mb;
#define DRV_PULSE_SEQ_MASK 0x00007fff
#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
......
......@@ -3030,6 +3030,31 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
}
}
}
/* There's a possibility the igu_sb_cnt_iov doesn't properly reflect
* the number of VF SBs [especially for first VF on engine, as we can't
* diffrentiate between empty entries and its entries].
* Since we don't really support more SBs than VFs today, prevent any
* such configuration by sanitizing the number of SBs to equal the
* number of VFs.
*/
if (IS_PF_SRIOV(p_hwfn)) {
u16 total_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
if (total_vfs < p_igu_info->free_blks) {
DP_VERBOSE(p_hwfn,
(NETIF_MSG_INTR | QED_MSG_IOV),
"Limiting number of SBs for IOV - %04x --> %04x\n",
p_igu_info->free_blks,
p_hwfn->cdev->p_iov_info->total_vfs);
p_igu_info->free_blks = total_vfs;
} else if (total_vfs > p_igu_info->free_blks) {
DP_NOTICE(p_hwfn,
"IGU has only %04x SBs for VFs while the device has %04x VFs\n",
p_igu_info->free_blks, total_vfs);
return -EINVAL;
}
}
p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks;
DP_VERBOSE(
......@@ -3163,7 +3188,12 @@ u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
return sb_id - p_info->igu_base_sb;
} else if ((sb_id >= p_info->igu_base_sb_iov) &&
(sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) {
return sb_id - p_info->igu_base_sb_iov + p_info->igu_sb_cnt;
/* We want the first VF queue to be adjacent to the
* last PF queue. Since L2 queues can be partial to
* SBs, we'll use the feature instead.
*/
return sb_id - p_info->igu_base_sb_iov +
FEAT_NUM(p_hwfn, QED_PF_L2_QUE);
} else {
DP_NOTICE(p_hwfn, "SB %d not in range for function\n", sb_id);
return 0;
......
......@@ -1691,7 +1691,7 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
}
qed_vf_get_num_vlan_filters(&cdev->hwfns[0],
&info->num_vlan_filters);
(u8 *)&info->num_vlan_filters);
qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac);
info->is_legacy = !!cdev->hwfns[0].vf_iov_info->b_pre_fp_hsi;
......
......@@ -221,6 +221,10 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->fw_eng = FW_ENGINEERING_VERSION;
dev_info->mf_mode = cdev->mf_mode;
dev_info->tx_switching = true;
if (QED_LEADING_HWFN(cdev)->hw_info.b_wol_support ==
QED_WOL_SUPPORT_PME)
dev_info->wol_support = true;
} else {
qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
&dev_info->fw_minor, &dev_info->fw_rev,
......@@ -243,6 +247,8 @@ int qed_fill_dev_info(struct qed_dev *cdev,
&dev_info->mfw_rev, NULL);
}
dev_info->mtu = QED_LEADING_HWFN(cdev)->hw_info.mtu;
return 0;
}
......@@ -1431,11 +1437,106 @@ static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
return status;
}
static int qed_update_wol(struct qed_dev *cdev, bool enabled)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
struct qed_ptt *ptt;
int rc = 0;
if (IS_VF(cdev))
return 0;
ptt = qed_ptt_acquire(hwfn);
if (!ptt)
return -EAGAIN;
rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED
: QED_OV_WOL_DISABLED);
if (rc)
goto out;
rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
out:
qed_ptt_release(hwfn, ptt);
return rc;
}
static int qed_update_drv_state(struct qed_dev *cdev, bool active)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
struct qed_ptt *ptt;
int status = 0;
if (IS_VF(cdev))
return 0;
ptt = qed_ptt_acquire(hwfn);
if (!ptt)
return -EAGAIN;
status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ?
QED_OV_DRIVER_STATE_ACTIVE :
QED_OV_DRIVER_STATE_DISABLED);
qed_ptt_release(hwfn, ptt);
return status;
}
static int qed_update_mac(struct qed_dev *cdev, u8 *mac)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
struct qed_ptt *ptt;
int status = 0;
if (IS_VF(cdev))
return 0;
ptt = qed_ptt_acquire(hwfn);
if (!ptt)
return -EAGAIN;
status = qed_mcp_ov_update_mac(hwfn, ptt, mac);
if (status)
goto out;
status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
out:
qed_ptt_release(hwfn, ptt);
return status;
}
static int qed_update_mtu(struct qed_dev *cdev, u16 mtu)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
struct qed_ptt *ptt;
int status = 0;
if (IS_VF(cdev))
return 0;
ptt = qed_ptt_acquire(hwfn);
if (!ptt)
return -EAGAIN;
status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu);
if (status)
goto out;
status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
out:
qed_ptt_release(hwfn, ptt);
return status;
}
static struct qed_selftest_ops qed_selftest_ops_pass = {
.selftest_memory = &qed_selftest_memory,
.selftest_interrupt = &qed_selftest_interrupt,
.selftest_register = &qed_selftest_register,
.selftest_clock = &qed_selftest_clock,
.selftest_nvram = &qed_selftest_nvram,
};
const struct qed_common_ops qed_common_ops_pass = {
......@@ -1465,6 +1566,10 @@ const struct qed_common_ops qed_common_ops_pass = {
.get_coalesce = &qed_get_coalesce,
.set_coalesce = &qed_set_coalesce,
.set_led = &qed_set_led,
.update_drv_state = &qed_update_drv_state,
.update_mac = &qed_update_mac,
.update_mtu = &qed_update_mtu,
.update_wol = &qed_update_wol,
};
void qed_get_protocol_stats(struct qed_dev *cdev,
......
This diff is collapsed.
......@@ -92,6 +92,8 @@ struct qed_mcp_function_info {
#define QED_MCP_VLAN_UNSET (0xffff)
u16 ovlan;
u16 mtu;
};
struct qed_mcp_nvm_common {
......@@ -147,6 +149,30 @@ union qed_mcp_protocol_stats {
struct qed_mcp_rdma_stats rdma_stats;
};
enum qed_ov_eswitch {
QED_OV_ESWITCH_NONE,
QED_OV_ESWITCH_VEB,
QED_OV_ESWITCH_VEPA
};
enum qed_ov_client {
QED_OV_CLIENT_DRV,
QED_OV_CLIENT_USER,
QED_OV_CLIENT_VENDOR_SPEC
};
enum qed_ov_driver_state {
QED_OV_DRIVER_STATE_NOT_LOADED,
QED_OV_DRIVER_STATE_DISABLED,
QED_OV_DRIVER_STATE_ACTIVE
};
enum qed_ov_wol {
QED_OV_WOL_DEFAULT,
QED_OV_WOL_DISABLED,
QED_OV_WOL_ENABLED
};
/**
* @brief - returns the link params of the hw function
*
......@@ -277,6 +303,69 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_mcp_drv_version *p_ver);
/**
* @brief Notify MFW about the change in base device properties
*
* @param p_hwfn
* @param p_ptt
* @param client - qed client type
*
* @return int - 0 - operation was successful.
*/
int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_ov_client client);
/**
* @brief Notify MFW about the driver state
*
* @param p_hwfn
* @param p_ptt
* @param drv_state - Driver state
*
* @return int - 0 - operation was successful.
*/
int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_ov_driver_state drv_state);
/**
* @brief Send MTU size to MFW
*
* @param p_hwfn
* @param p_ptt
* @param mtu - MTU size
*
* @return int - 0 - operation was successful.
*/
int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 mtu);
/**
* @brief Send MAC address to MFW
*
* @param p_hwfn
* @param p_ptt
* @param mac - MAC address
*
* @return int - 0 - operation was successful.
*/
int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 *mac);
/**
* @brief Send WOL mode to MFW
*
* @param p_hwfn
* @param p_ptt
* @param wol - WOL mode
*
* @return int - 0 - operation was successful.
*/
int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_ov_wol wol);
/**
* @brief Set LED status
*
......@@ -290,6 +379,18 @@ int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_led_mode mode);
/**
* @brief Read from nvm
*
* @param cdev
* @param addr - nvm offset
* @param p_buf - nvm read buffer
* @param len - buffer len
*
* @return int - 0 - operation was successful.
*/
int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len);
/**
* @brief Bist register test
*
......@@ -312,6 +413,35 @@ int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn,
int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
* @brief Bist nvm test - get number of images
*
* @param p_hwfn - hw function
* @param p_ptt - PTT required for register access
* @param num_images - number of images if operation was
* successful. 0 if not.
*
* @return int - 0 - operation was successful.
*/
int qed_mcp_bist_nvm_test_get_num_images(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *num_images);
/**
* @brief Bist nvm test - get image attributes by index
*
* @param p_hwfn - hw function
* @param p_ptt - PTT required for register access
* @param p_image_att - Attributes of image
* @param image_index - Index of image to get information for
*
* @return int - 0 - operation was successful.
*/
int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct bist_nvm_image_att *p_image_att,
u32 image_index);
/* Using hwfn number (and not pf_num) is required since in CMT mode,
* same pf_num may be used by two different hwfn
* TODO - this shouldn't really be in .h file, but until all fields
......@@ -546,4 +676,32 @@ int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 mask_parities);
/**
* @brief Send eswitch mode to MFW
*
* @param p_hwfn
* @param p_ptt
* @param eswitch - eswitch mode
*
* @return int - 0 - operation was successful.
*/
int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_ov_eswitch eswitch);
/**
* @brief - Gets the MFW allocation info for the given resource
*
* @param p_hwfn
* @param p_ptt
* @param p_resc_info - descriptor of requested resource
* @param p_mcp_resp
* @param p_mcp_param
*
* @return int - 0 - operation was successful.
*/
int qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct resource_info *p_resc_info,
u32 *p_mcp_resp, u32 *p_mcp_param);
#endif
#include <linux/crc32.h>
#include "qed.h"
#include "qed_dev_api.h"
#include "qed_mcp.h"
......@@ -75,3 +76,103 @@ int qed_selftest_clock(struct qed_dev *cdev)
return rc;
}
int qed_selftest_nvram(struct qed_dev *cdev)
{
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
u32 num_images, i, j, nvm_crc, calc_crc;
struct bist_nvm_image_att image_att;
u8 *buf = NULL;
__be32 val;
int rc;
if (!p_ptt) {
DP_ERR(p_hwfn, "failed to acquire ptt\n");
return -EBUSY;
}
/* Acquire from MFW the amount of available images */
rc = qed_mcp_bist_nvm_test_get_num_images(p_hwfn, p_ptt, &num_images);
if (rc || !num_images) {
DP_ERR(p_hwfn, "Failed getting number of images\n");
return -EINVAL;
}
/* Iterate over images and validate CRC */
for (i = 0; i < num_images; i++) {
/* This mailbox returns information about the image required for
* reading it.
*/
rc = qed_mcp_bist_nvm_test_get_image_att(p_hwfn, p_ptt,
&image_att, i);
if (rc) {
DP_ERR(p_hwfn,
"Failed getting image index %d attributes\n",
i);
goto err0;
}
/* After MFW crash dump is collected - the image's CRC stops
* being valid.
*/
if (image_att.image_type == NVM_TYPE_MDUMP)
continue;
DP_VERBOSE(p_hwfn, QED_MSG_SP, "image index %d, size %x\n",
i, image_att.len);
/* Allocate a buffer for holding the nvram image */
buf = kzalloc(image_att.len, GFP_KERNEL);
if (!buf) {
rc = -ENOMEM;
goto err0;
}
/* Read image into buffer */
rc = qed_mcp_nvm_read(p_hwfn->cdev, image_att.nvm_start_addr,
buf, image_att.len);
if (rc) {
DP_ERR(p_hwfn,
"Failed reading image index %d from nvm.\n", i);
goto err1;
}
/* Convert the buffer into big-endian format (excluding the
* closing 4 bytes of CRC).
*/
for (j = 0; j < image_att.len - 4; j += 4) {
val = cpu_to_be32(*(u32 *)&buf[j]);
*(u32 *)&buf[j] = (__force u32)val;
}
/* Calc CRC for the "actual" image buffer, i.e. not including
* the last 4 CRC bytes.
*/
nvm_crc = *(u32 *)(buf + image_att.len - 4);
calc_crc = crc32(0xffffffff, buf, image_att.len - 4);
calc_crc = (__force u32)~cpu_to_be32(calc_crc);
DP_VERBOSE(p_hwfn, QED_MSG_SP,
"nvm crc 0x%x, calc_crc 0x%x\n", nvm_crc, calc_crc);
if (calc_crc != nvm_crc) {
rc = -EINVAL;
goto err1;
}
/* Done with this image; Free to prevent double release
* on subsequent failure.
*/
kfree(buf);
buf = NULL;
}
qed_ptt_release(p_hwfn, p_ptt);
return 0;
err1:
kfree(buf);
err0:
qed_ptt_release(p_hwfn, p_ptt);
return rc;
}
......@@ -37,4 +37,14 @@ int qed_selftest_register(struct qed_dev *cdev);
* @return int
*/
int qed_selftest_clock(struct qed_dev *cdev);
/**
* @brief qed_selftest_nvram - Perform nvram test
*
* @param cdev
*
* @return int
*/
int qed_selftest_nvram(struct qed_dev *cdev);
#endif
......@@ -3470,7 +3470,6 @@ int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
static int qed_sriov_enable(struct qed_dev *cdev, int num)
{
struct qed_sb_cnt_info sb_cnt_info;
int i, j, rc;
if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
......@@ -3483,7 +3482,11 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num)
for_each_hwfn(cdev, j) {
struct qed_hwfn *hwfn = &cdev->hwfns[j];
struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
int num_sbs = 0, limit = 16;
int num_queues;
/* Make sure not to use more than 16 queues per VF */
num_queues = min_t(int,
FEAT_NUM(hwfn, QED_VF_L2_QUE) / num, 16);
if (!ptt) {
DP_ERR(hwfn, "Failed to acquire ptt\n");
......@@ -3491,19 +3494,11 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num)
goto err;
}
if (IS_MF_DEFAULT(hwfn))
limit = MAX_NUM_VFS_BB / hwfn->num_funcs_on_engine;
memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
qed_int_get_num_sbs(hwfn, &sb_cnt_info);
num_sbs = min_t(int, sb_cnt_info.sb_free_blk, limit);
for (i = 0; i < num; i++) {
if (!qed_iov_is_valid_vfid(hwfn, i, false, true))
continue;
rc = qed_iov_init_hw_for_vf(hwfn,
ptt, i, num_sbs / num);
rc = qed_iov_init_hw_for_vf(hwfn, ptt, i, num_queues);
if (rc) {
DP_ERR(cdev, "Failed to enable VF[%d]\n", i);
qed_ptt_release(hwfn, ptt);
......
......@@ -193,6 +193,8 @@ struct qede_dev {
u16 vxlan_dst_port;
u16 geneve_dst_port;
bool wol_enabled;
struct qede_rdma_dev rdma_info;
};
......
......@@ -157,6 +157,7 @@ enum qede_ethtool_tests {
QEDE_ETHTOOL_MEMORY_TEST,
QEDE_ETHTOOL_REGISTER_TEST,
QEDE_ETHTOOL_CLOCK_TEST,
QEDE_ETHTOOL_NVRAM_TEST,
QEDE_ETHTOOL_TEST_MAX
};
......@@ -166,6 +167,7 @@ static const char qede_tests_str_arr[QEDE_ETHTOOL_TEST_MAX][ETH_GSTRING_LEN] = {
"Memory (online)\t\t",
"Register (online)\t",
"Clock (online)\t\t",
"Nvram (online)\t\t",
};
static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
......@@ -318,7 +320,7 @@ static const struct qede_link_mode_mapping qed_lm_map[] = {
{ \
int i; \
\
for (i = 0; i < QED_LM_COUNT; i++) { \
for (i = 0; i < ARRAY_SIZE(qed_lm_map); i++) { \
if ((caps) & (qed_lm_map[i].qed_link_mode)) \
__set_bit(qed_lm_map[i].ethtool_link_mode,\
lk_ksettings->link_modes.name); \
......@@ -329,7 +331,7 @@ static const struct qede_link_mode_mapping qed_lm_map[] = {
{ \
int i; \
\
for (i = 0; i < QED_LM_COUNT; i++) { \
for (i = 0; i < ARRAY_SIZE(qed_lm_map); i++) { \
if (test_bit(qed_lm_map[i].ethtool_link_mode, \
lk_ksettings->link_modes.name)) \
caps |= qed_lm_map[i].qed_link_mode; \
......@@ -481,6 +483,45 @@ static void qede_get_drvinfo(struct net_device *ndev,
strlcpy(info->bus_info, pci_name(edev->pdev), sizeof(info->bus_info));
}
static void qede_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
struct qede_dev *edev = netdev_priv(ndev);
if (edev->dev_info.common.wol_support) {
wol->supported = WAKE_MAGIC;
wol->wolopts = edev->wol_enabled ? WAKE_MAGIC : 0;
}
}
static int qede_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
struct qede_dev *edev = netdev_priv(ndev);
bool wol_requested;
int rc;
if (wol->wolopts & ~WAKE_MAGIC) {
DP_INFO(edev,
"Can't support WoL options other than magic-packet\n");
return -EINVAL;
}
wol_requested = !!(wol->wolopts & WAKE_MAGIC);
if (wol_requested == edev->wol_enabled)
return 0;
/* Need to actually change configuration */
if (!edev->dev_info.common.wol_support) {
DP_INFO(edev, "Device doesn't support WoL\n");
return -EINVAL;
}
rc = edev->ops->common->update_wol(edev->cdev, wol_requested);
if (!rc)
edev->wol_enabled = wol_requested;
return rc;
}
static u32 qede_get_msglevel(struct net_device *ndev)
{
struct qede_dev *edev = netdev_priv(ndev);
......@@ -739,6 +780,8 @@ int qede_change_mtu(struct net_device *ndev, int new_mtu)
qede_update_mtu(edev, &args);
edev->ops->common->update_mtu(edev->cdev, args.mtu);
return 0;
}
......@@ -1390,6 +1433,11 @@ static void qede_self_test(struct net_device *dev,
buf[QEDE_ETHTOOL_CLOCK_TEST] = 1;
etest->flags |= ETH_TEST_FL_FAILED;
}
if (edev->ops->common->selftest->selftest_nvram(edev->cdev)) {
buf[QEDE_ETHTOOL_NVRAM_TEST] = 1;
etest->flags |= ETH_TEST_FL_FAILED;
}
}
static int qede_set_tunable(struct net_device *dev,
......@@ -1440,6 +1488,8 @@ static const struct ethtool_ops qede_ethtool_ops = {
.get_drvinfo = qede_get_drvinfo,
.get_regs_len = qede_get_regs_len,
.get_regs = qede_get_regs,
.get_wol = qede_get_wol,
.set_wol = qede_set_wol,
.get_msglevel = qede_get_msglevel,
.set_msglevel = qede_set_msglevel,
.nway_reset = qede_nway_reset,
......
......@@ -95,6 +95,7 @@ static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
#define TX_TIMEOUT (5 * HZ)
static void qede_remove(struct pci_dev *pdev);
static void qede_shutdown(struct pci_dev *pdev);
static int qede_alloc_rx_buffer(struct qede_dev *edev,
struct qede_rx_queue *rxq);
static void qede_link_update(void *dev, struct qed_link_output *link);
......@@ -166,6 +167,7 @@ static struct pci_driver qede_pci_driver = {
.id_table = qede_pci_tbl,
.probe = qede_probe,
.remove = qede_remove,
.shutdown = qede_shutdown,
#ifdef CONFIG_QED_SRIOV
.sriov_configure = qede_sriov_configure,
#endif
......@@ -2396,6 +2398,8 @@ static void qede_init_ndev(struct qede_dev *edev)
/* Set network device HW mac */
ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac);
ndev->mtu = edev->dev_info.common.mtu;
}
/* This function converts from 32b param to two params of level and module
......@@ -2703,6 +2707,8 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
/* Use global ops since we've freed edev */
qed_ops->common->slowpath_stop(cdev);
if (system_state == SYSTEM_POWER_OFF)
return;
qed_ops->common->remove(cdev);
dev_info(&pdev->dev, "Ending qede_remove successfully\n");
......@@ -2713,6 +2719,11 @@ static void qede_remove(struct pci_dev *pdev)
__qede_remove(pdev, QEDE_REMOVE_NORMAL);
}
static void qede_shutdown(struct pci_dev *pdev)
{
__qede_remove(pdev, QEDE_REMOVE_NORMAL);
}
/* -------------------------------------------------------------------------
* START OF LOAD / UNLOAD
* -------------------------------------------------------------------------
......@@ -3751,6 +3762,8 @@ static int qede_open(struct net_device *ndev)
udp_tunnel_get_rx_info(ndev);
edev->ops->common->update_drv_state(edev->cdev, true);
return 0;
}
......@@ -3760,6 +3773,8 @@ static int qede_close(struct net_device *ndev)
qede_unload(edev, QEDE_UNLOAD_NORMAL);
edev->ops->common->update_drv_state(edev->cdev, false);
return 0;
}
......@@ -3820,6 +3835,8 @@ static int qede_set_mac_addr(struct net_device *ndev, void *p)
if (rc)
return rc;
edev->ops->common->update_mac(edev->cdev, addr->sa_data);
/* Add MAC filter according to the new unicast HW MAC address */
ether_addr_copy(edev->primary_mac, ndev->dev_addr);
return qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
......
......@@ -22,7 +22,7 @@ struct qed_dev_eth_info {
u8 num_tc;
u8 port_mac[ETH_ALEN];
u8 num_vlan_filters;
u16 num_vlan_filters;
u16 num_mac_filters;
/* Legacy VF - this affects the datapath, so qede has to know */
......
......@@ -267,6 +267,9 @@ struct qed_dev_info {
u8 mf_mode;
bool tx_switching;
bool rdma_supported;
u16 mtu;
bool wol_support;
};
enum qed_sb_type {
......@@ -401,6 +404,15 @@ struct qed_selftest_ops {
* @return 0 on success, error otherwise.
*/
int (*selftest_clock)(struct qed_dev *cdev);
/**
* @brief selftest_nvram - Perform nvram test
*
* @param cdev
*
* @return 0 on success, error otherwise.
*/
int (*selftest_nvram) (struct qed_dev *cdev);
};
struct qed_common_ops {
......@@ -554,6 +566,41 @@ struct qed_common_ops {
*/
int (*set_led)(struct qed_dev *cdev,
enum qed_led_mode mode);
/**
* @brief update_drv_state - API to inform the change in the driver state.
*
* @param cdev
* @param active
*
*/
int (*update_drv_state)(struct qed_dev *cdev, bool active);
/**
* @brief update_mac - API to inform the change in the mac address
*
* @param cdev
* @param mac
*
*/
int (*update_mac)(struct qed_dev *cdev, u8 *mac);
/**
* @brief update_mtu - API to inform the change in the mtu
*
* @param cdev
* @param mtu
*
*/
int (*update_mtu)(struct qed_dev *cdev, u16 mtu);
/**
* @brief update_wol - update of changes in the WoL configuration
*
* @param cdev
* @param enabled - true iff WoL should be enabled.
*/
int (*update_wol) (struct qed_dev *cdev, bool enabled);
};
#define MASK_FIELD(_name, _value) \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment