Commit 884714ce authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
100GbE Intel Wired LAN Driver Updates 2019-05-23

This series contains updates to ice driver only.

Anirudh cleans up white space issues and other code formatting issues in the
driver.  Also implemented LLDP persistence across reboots and start/stop of the
LLDP agent.  Updated print statements for driver capabilities to include
if it is a device or function capability.

Bruce cleaned up variable declarations by removing unneeded assignment.

Dave fixes a potential hang due to a couple of flows that recursively
acquire the RTNL lock which results in a deadlock.

Tony updates the driver to advertise what link modes we are capable of
when the user does not request a specific link mode.

Usha fixes up the LLDP MIB change event handling by cleaning up
workarounds and print the DCB configuration changes detected.

Brett fixes the driver to handle failures in the VF reset path, which
was failing to free resources upon an error.

Richard fixed the reported of stats via ethtool to align with our other
Intel drivers.

Jesse optimizes the transmit buffer and ring structures to have more
efficient ordering to get hot cache lines to have packed data.  Also
optimized the VF structure to use less memory, since it is used hundreds
of times throughout the driver.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 9395da4e feee3cb3
...@@ -277,10 +277,10 @@ struct ice_vsi { ...@@ -277,10 +277,10 @@ struct ice_vsi {
struct list_head tmp_sync_list; /* MAC filters to be synced */ struct list_head tmp_sync_list; /* MAC filters to be synced */
struct list_head tmp_unsync_list; /* MAC filters to be unsynced */ struct list_head tmp_unsync_list; /* MAC filters to be unsynced */
u8 irqs_ready; u8 irqs_ready:1;
u8 current_isup; /* Sync 'link up' logging */ u8 current_isup:1; /* Sync 'link up' logging */
u8 stat_offsets_loaded; u8 stat_offsets_loaded:1;
u8 vlan_ena; u8 vlan_ena:1;
/* queue information */ /* queue information */
u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
...@@ -330,7 +330,7 @@ enum ice_pf_flags { ...@@ -330,7 +330,7 @@ enum ice_pf_flags {
ICE_FLAG_DCB_CAPABLE, ICE_FLAG_DCB_CAPABLE,
ICE_FLAG_DCB_ENA, ICE_FLAG_DCB_ENA,
ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA,
ICE_FLAG_DISABLE_FW_LLDP, ICE_FLAG_ENABLE_FW_LLDP,
ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */ ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */
ICE_PF_FLAGS_NBITS /* must be last */ ICE_PF_FLAGS_NBITS /* must be last */
}; };
...@@ -384,7 +384,7 @@ struct ice_pf { ...@@ -384,7 +384,7 @@ struct ice_pf {
struct ice_hw_port_stats stats; struct ice_hw_port_stats stats;
struct ice_hw_port_stats stats_prev; struct ice_hw_port_stats stats_prev;
struct ice_hw hw; struct ice_hw hw;
u8 stat_prev_loaded; /* has previous stats been loaded */ u8 stat_prev_loaded:1; /* has previous stats been loaded */
#ifdef CONFIG_DCB #ifdef CONFIG_DCB
u16 dcbx_cap; u16 dcbx_cap;
#endif /* CONFIG_DCB */ #endif /* CONFIG_DCB */
...@@ -451,7 +451,6 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size); ...@@ -451,7 +451,6 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size); int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size); void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
void ice_print_link_msg(struct ice_vsi *vsi, bool isup); void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
void ice_napi_del(struct ice_vsi *vsi);
#ifdef CONFIG_DCB #ifdef CONFIG_DCB
int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked); int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked);
void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked); void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked);
......
...@@ -1447,6 +1447,7 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, ...@@ -1447,6 +1447,7 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
struct ice_hw_func_caps *func_p = NULL; struct ice_hw_func_caps *func_p = NULL;
struct ice_hw_dev_caps *dev_p = NULL; struct ice_hw_dev_caps *dev_p = NULL;
struct ice_hw_common_caps *caps; struct ice_hw_common_caps *caps;
char const *prefix;
u32 i; u32 i;
if (!buf) if (!buf)
...@@ -1457,9 +1458,11 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, ...@@ -1457,9 +1458,11 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
if (opc == ice_aqc_opc_list_dev_caps) { if (opc == ice_aqc_opc_list_dev_caps) {
dev_p = &hw->dev_caps; dev_p = &hw->dev_caps;
caps = &dev_p->common_cap; caps = &dev_p->common_cap;
prefix = "dev cap";
} else if (opc == ice_aqc_opc_list_func_caps) { } else if (opc == ice_aqc_opc_list_func_caps) {
func_p = &hw->func_caps; func_p = &hw->func_caps;
caps = &func_p->common_cap; caps = &func_p->common_cap;
prefix = "func cap";
} else { } else {
ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n"); ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n");
return; return;
...@@ -1475,28 +1478,29 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, ...@@ -1475,28 +1478,29 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
case ICE_AQC_CAPS_VALID_FUNCTIONS: case ICE_AQC_CAPS_VALID_FUNCTIONS:
caps->valid_functions = number; caps->valid_functions = number;
ice_debug(hw, ICE_DBG_INIT, ice_debug(hw, ICE_DBG_INIT,
"HW caps: Valid Functions = %d\n", "%s: valid functions = %d\n", prefix,
caps->valid_functions); caps->valid_functions);
break; break;
case ICE_AQC_CAPS_SRIOV: case ICE_AQC_CAPS_SRIOV:
caps->sr_iov_1_1 = (number == 1); caps->sr_iov_1_1 = (number == 1);
ice_debug(hw, ICE_DBG_INIT, ice_debug(hw, ICE_DBG_INIT,
"HW caps: SR-IOV = %d\n", caps->sr_iov_1_1); "%s: SR-IOV = %d\n", prefix,
caps->sr_iov_1_1);
break; break;
case ICE_AQC_CAPS_VF: case ICE_AQC_CAPS_VF:
if (dev_p) { if (dev_p) {
dev_p->num_vfs_exposed = number; dev_p->num_vfs_exposed = number;
ice_debug(hw, ICE_DBG_INIT, ice_debug(hw, ICE_DBG_INIT,
"HW caps: VFs exposed = %d\n", "%s: VFs exposed = %d\n", prefix,
dev_p->num_vfs_exposed); dev_p->num_vfs_exposed);
} else if (func_p) { } else if (func_p) {
func_p->num_allocd_vfs = number; func_p->num_allocd_vfs = number;
func_p->vf_base_id = logical_id; func_p->vf_base_id = logical_id;
ice_debug(hw, ICE_DBG_INIT, ice_debug(hw, ICE_DBG_INIT,
"HW caps: VFs allocated = %d\n", "%s: VFs allocated = %d\n", prefix,
func_p->num_allocd_vfs); func_p->num_allocd_vfs);
ice_debug(hw, ICE_DBG_INIT, ice_debug(hw, ICE_DBG_INIT,
"HW caps: VF base_id = %d\n", "%s: VF base_id = %d\n", prefix,
func_p->vf_base_id); func_p->vf_base_id);
} }
break; break;
...@@ -1504,69 +1508,69 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, ...@@ -1504,69 +1508,69 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
if (dev_p) { if (dev_p) {
dev_p->num_vsi_allocd_to_host = number; dev_p->num_vsi_allocd_to_host = number;
ice_debug(hw, ICE_DBG_INIT, ice_debug(hw, ICE_DBG_INIT,
"HW caps: Dev.VSI cnt = %d\n", "%s: num VSI alloc to host = %d\n",
prefix,
dev_p->num_vsi_allocd_to_host); dev_p->num_vsi_allocd_to_host);
} else if (func_p) { } else if (func_p) {
func_p->guar_num_vsi = func_p->guar_num_vsi =
ice_get_num_per_func(hw, ICE_MAX_VSI); ice_get_num_per_func(hw, ICE_MAX_VSI);
ice_debug(hw, ICE_DBG_INIT, ice_debug(hw, ICE_DBG_INIT,
"HW caps: Func.VSI cnt = %d\n", "%s: num guaranteed VSI (fw) = %d\n",
number); prefix, number);
ice_debug(hw, ICE_DBG_INIT,
"%s: num guaranteed VSI = %d\n",
prefix, func_p->guar_num_vsi);
} }
break; break;
case ICE_AQC_CAPS_RSS: case ICE_AQC_CAPS_RSS:
caps->rss_table_size = number; caps->rss_table_size = number;
caps->rss_table_entry_width = logical_id; caps->rss_table_entry_width = logical_id;
ice_debug(hw, ICE_DBG_INIT, ice_debug(hw, ICE_DBG_INIT,
"HW caps: RSS table size = %d\n", "%s: RSS table size = %d\n", prefix,
caps->rss_table_size); caps->rss_table_size);
ice_debug(hw, ICE_DBG_INIT, ice_debug(hw, ICE_DBG_INIT,
"HW caps: RSS table width = %d\n", "%s: RSS table width = %d\n", prefix,
caps->rss_table_entry_width); caps->rss_table_entry_width);
break; break;
case ICE_AQC_CAPS_RXQS: case ICE_AQC_CAPS_RXQS:
caps->num_rxq = number; caps->num_rxq = number;
caps->rxq_first_id = phys_id; caps->rxq_first_id = phys_id;
ice_debug(hw, ICE_DBG_INIT, ice_debug(hw, ICE_DBG_INIT,
"HW caps: Num Rx Qs = %d\n", caps->num_rxq); "%s: num Rx queues = %d\n", prefix,
caps->num_rxq);
ice_debug(hw, ICE_DBG_INIT, ice_debug(hw, ICE_DBG_INIT,
"HW caps: Rx first queue ID = %d\n", "%s: Rx first queue ID = %d\n", prefix,
caps->rxq_first_id); caps->rxq_first_id);
break; break;
case ICE_AQC_CAPS_TXQS: case ICE_AQC_CAPS_TXQS:
caps->num_txq = number; caps->num_txq = number;
caps->txq_first_id = phys_id; caps->txq_first_id = phys_id;
ice_debug(hw, ICE_DBG_INIT, ice_debug(hw, ICE_DBG_INIT,
"HW caps: Num Tx Qs = %d\n", caps->num_txq); "%s: num Tx queues = %d\n", prefix,
caps->num_txq);
ice_debug(hw, ICE_DBG_INIT, ice_debug(hw, ICE_DBG_INIT,
"HW caps: Tx first queue ID = %d\n", "%s: Tx first queue ID = %d\n", prefix,
caps->txq_first_id); caps->txq_first_id);
break; break;
case ICE_AQC_CAPS_MSIX: case ICE_AQC_CAPS_MSIX:
caps->num_msix_vectors = number; caps->num_msix_vectors = number;
caps->msix_vector_first_id = phys_id; caps->msix_vector_first_id = phys_id;
ice_debug(hw, ICE_DBG_INIT, ice_debug(hw, ICE_DBG_INIT,
"HW caps: MSIX vector count = %d\n", "%s: MSIX vector count = %d\n", prefix,
caps->num_msix_vectors); caps->num_msix_vectors);
ice_debug(hw, ICE_DBG_INIT, ice_debug(hw, ICE_DBG_INIT,
"HW caps: MSIX first vector index = %d\n", "%s: MSIX first vector index = %d\n", prefix,
caps->msix_vector_first_id); caps->msix_vector_first_id);
break; break;
case ICE_AQC_CAPS_MAX_MTU: case ICE_AQC_CAPS_MAX_MTU:
caps->max_mtu = number; caps->max_mtu = number;
if (dev_p) ice_debug(hw, ICE_DBG_INIT, "%s: max MTU = %d\n",
ice_debug(hw, ICE_DBG_INIT, prefix, caps->max_mtu);
"HW caps: Dev.MaxMTU = %d\n",
caps->max_mtu);
else if (func_p)
ice_debug(hw, ICE_DBG_INIT,
"HW caps: func.MaxMTU = %d\n",
caps->max_mtu);
break; break;
default: default:
ice_debug(hw, ICE_DBG_INIT, ice_debug(hw, ICE_DBG_INIT,
"HW caps: Unknown capability[%d]: 0x%x\n", i, "%s: unknown capability[%d]: 0x%x\n", prefix,
cap); i, cap);
break; break;
} }
} }
......
...@@ -82,12 +82,14 @@ ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update, ...@@ -82,12 +82,14 @@ ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
* @hw: pointer to the HW struct * @hw: pointer to the HW struct
* @shutdown_lldp_agent: True if LLDP Agent needs to be Shutdown * @shutdown_lldp_agent: True if LLDP Agent needs to be Shutdown
* False if LLDP Agent needs to be Stopped * False if LLDP Agent needs to be Stopped
* @persist: True if Stop/Shutdown of LLDP Agent needs to be persistent across
* reboots
* @cd: pointer to command details structure or NULL * @cd: pointer to command details structure or NULL
* *
* Stop or Shutdown the embedded LLDP Agent (0x0A05) * Stop or Shutdown the embedded LLDP Agent (0x0A05)
*/ */
enum ice_status enum ice_status
ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist,
struct ice_sq_cd *cd) struct ice_sq_cd *cd)
{ {
struct ice_aqc_lldp_stop *cmd; struct ice_aqc_lldp_stop *cmd;
...@@ -100,17 +102,22 @@ ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, ...@@ -100,17 +102,22 @@ ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent,
if (shutdown_lldp_agent) if (shutdown_lldp_agent)
cmd->command |= ICE_AQ_LLDP_AGENT_SHUTDOWN; cmd->command |= ICE_AQ_LLDP_AGENT_SHUTDOWN;
if (persist)
cmd->command |= ICE_AQ_LLDP_AGENT_PERSIST_DIS;
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
} }
/** /**
* ice_aq_start_lldp * ice_aq_start_lldp
* @hw: pointer to the HW struct * @hw: pointer to the HW struct
* @persist: True if Start of LLDP Agent needs to be persistent across reboots
* @cd: pointer to command details structure or NULL * @cd: pointer to command details structure or NULL
* *
* Start the embedded LLDP Agent on all ports. (0x0A06) * Start the embedded LLDP Agent on all ports. (0x0A06)
*/ */
enum ice_status ice_aq_start_lldp(struct ice_hw *hw, struct ice_sq_cd *cd) enum ice_status
ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd)
{ {
struct ice_aqc_lldp_start *cmd; struct ice_aqc_lldp_start *cmd;
struct ice_aq_desc desc; struct ice_aq_desc desc;
...@@ -121,6 +128,9 @@ enum ice_status ice_aq_start_lldp(struct ice_hw *hw, struct ice_sq_cd *cd) ...@@ -121,6 +128,9 @@ enum ice_status ice_aq_start_lldp(struct ice_hw *hw, struct ice_sq_cd *cd)
cmd->command = ICE_AQ_LLDP_AGENT_START; cmd->command = ICE_AQ_LLDP_AGENT_START;
if (persist)
cmd->command |= ICE_AQ_LLDP_AGENT_PERSIST_ENA;
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
} }
...@@ -163,7 +173,7 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, ...@@ -163,7 +173,7 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
* *
* Get the DCBX status from the Firmware * Get the DCBX status from the Firmware
*/ */
u8 ice_get_dcbx_status(struct ice_hw *hw) static u8 ice_get_dcbx_status(struct ice_hw *hw)
{ {
u32 reg; u32 reg;
...@@ -614,7 +624,8 @@ ice_parse_org_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) ...@@ -614,7 +624,8 @@ ice_parse_org_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
* *
* Parse DCB configuration from the LLDPDU * Parse DCB configuration from the LLDPDU
*/ */
enum ice_status ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg) static enum ice_status
ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg)
{ {
struct ice_lldp_org_tlv *tlv; struct ice_lldp_org_tlv *tlv;
enum ice_status ret = 0; enum ice_status ret = 0;
...@@ -664,7 +675,7 @@ enum ice_status ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg) ...@@ -664,7 +675,7 @@ enum ice_status ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg)
* *
* Query DCB configuration from the firmware * Query DCB configuration from the firmware
*/ */
static enum ice_status enum ice_status
ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype, ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
struct ice_dcbx_cfg *dcbcfg) struct ice_dcbx_cfg *dcbcfg)
{ {
......
...@@ -120,8 +120,9 @@ struct ice_cee_app_prio { ...@@ -120,8 +120,9 @@ struct ice_cee_app_prio {
u8 prio_map; u8 prio_map;
} __packed; } __packed;
u8 ice_get_dcbx_status(struct ice_hw *hw); enum ice_status
enum ice_status ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg); ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
struct ice_dcbx_cfg *dcbcfg);
enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi); enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi);
enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi); enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi);
enum ice_status ice_init_dcb(struct ice_hw *hw); enum ice_status ice_init_dcb(struct ice_hw *hw);
...@@ -131,9 +132,10 @@ ice_query_port_ets(struct ice_port_info *pi, ...@@ -131,9 +132,10 @@ ice_query_port_ets(struct ice_port_info *pi,
struct ice_sq_cd *cmd_details); struct ice_sq_cd *cmd_details);
#ifdef CONFIG_DCB #ifdef CONFIG_DCB
enum ice_status enum ice_status
ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist,
struct ice_sq_cd *cd); struct ice_sq_cd *cd);
enum ice_status ice_aq_start_lldp(struct ice_hw *hw, struct ice_sq_cd *cd); enum ice_status
ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd);
enum ice_status enum ice_status
ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent, ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
bool *dcbx_agent_status, struct ice_sq_cd *cd); bool *dcbx_agent_status, struct ice_sq_cd *cd);
...@@ -144,6 +146,7 @@ ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update, ...@@ -144,6 +146,7 @@ ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
static inline enum ice_status static inline enum ice_status
ice_aq_stop_lldp(struct ice_hw __always_unused *hw, ice_aq_stop_lldp(struct ice_hw __always_unused *hw,
bool __always_unused shutdown_lldp_agent, bool __always_unused shutdown_lldp_agent,
bool __always_unused persist,
struct ice_sq_cd __always_unused *cd) struct ice_sq_cd __always_unused *cd)
{ {
return 0; return 0;
...@@ -151,6 +154,7 @@ ice_aq_stop_lldp(struct ice_hw __always_unused *hw, ...@@ -151,6 +154,7 @@ ice_aq_stop_lldp(struct ice_hw __always_unused *hw,
static inline enum ice_status static inline enum ice_status
ice_aq_start_lldp(struct ice_hw __always_unused *hw, ice_aq_start_lldp(struct ice_hw __always_unused *hw,
bool __always_unused persist,
struct ice_sq_cd __always_unused *cd) struct ice_sq_cd __always_unused *cd)
{ {
return 0; return 0;
......
...@@ -133,8 +133,10 @@ static void ice_pf_dcb_recfg(struct ice_pf *pf) ...@@ -133,8 +133,10 @@ static void ice_pf_dcb_recfg(struct ice_pf *pf)
* ice_pf_dcb_cfg - Apply new DCB configuration * ice_pf_dcb_cfg - Apply new DCB configuration
* @pf: pointer to the PF struct * @pf: pointer to the PF struct
* @new_cfg: DCBX config to apply * @new_cfg: DCBX config to apply
* @locked: is the RTNL held
*/ */
static int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg) static
int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
{ {
struct ice_dcbx_cfg *old_cfg, *curr_cfg; struct ice_dcbx_cfg *old_cfg, *curr_cfg;
struct ice_aqc_port_ets_elem buf = { 0 }; struct ice_aqc_port_ets_elem buf = { 0 };
...@@ -163,6 +165,7 @@ static int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg) ...@@ -163,6 +165,7 @@ static int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg)
/* avoid race conditions by holding the lock while disabling and /* avoid race conditions by holding the lock while disabling and
* re-enabling the VSI * re-enabling the VSI
*/ */
if (!locked)
rtnl_lock(); rtnl_lock();
ice_pf_dis_all_vsi(pf, true); ice_pf_dis_all_vsi(pf, true);
...@@ -192,6 +195,7 @@ static int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg) ...@@ -192,6 +195,7 @@ static int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg)
out: out:
ice_pf_ena_all_vsi(pf, true); ice_pf_ena_all_vsi(pf, true);
if (!locked)
rtnl_unlock(); rtnl_unlock();
devm_kfree(&pf->pdev->dev, old_cfg); devm_kfree(&pf->pdev->dev, old_cfg);
return ret; return ret;
...@@ -271,15 +275,16 @@ void ice_dcb_rebuild(struct ice_pf *pf) ...@@ -271,15 +275,16 @@ void ice_dcb_rebuild(struct ice_pf *pf)
prev_cfg->etscfg.tcbwtable[0] = ICE_TC_MAX_BW; prev_cfg->etscfg.tcbwtable[0] = ICE_TC_MAX_BW;
prev_cfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS; prev_cfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS;
memcpy(&prev_cfg->etsrec, &prev_cfg->etscfg, sizeof(prev_cfg->etsrec)); memcpy(&prev_cfg->etsrec, &prev_cfg->etscfg, sizeof(prev_cfg->etsrec));
ice_pf_dcb_cfg(pf, prev_cfg); ice_pf_dcb_cfg(pf, prev_cfg, false);
devm_kfree(&pf->pdev->dev, prev_cfg); devm_kfree(&pf->pdev->dev, prev_cfg);
} }
/** /**
* ice_dcb_init_cfg - set the initial DCB config in SW * ice_dcb_init_cfg - set the initial DCB config in SW
* @pf: pf to apply config to * @pf: pf to apply config to
* @locked: Is the RTNL held
*/ */
static int ice_dcb_init_cfg(struct ice_pf *pf) static int ice_dcb_init_cfg(struct ice_pf *pf, bool locked)
{ {
struct ice_dcbx_cfg *newcfg; struct ice_dcbx_cfg *newcfg;
struct ice_port_info *pi; struct ice_port_info *pi;
...@@ -294,7 +299,7 @@ static int ice_dcb_init_cfg(struct ice_pf *pf) ...@@ -294,7 +299,7 @@ static int ice_dcb_init_cfg(struct ice_pf *pf)
memset(&pi->local_dcbx_cfg, 0, sizeof(*newcfg)); memset(&pi->local_dcbx_cfg, 0, sizeof(*newcfg));
dev_info(&pf->pdev->dev, "Configuring initial DCB values\n"); dev_info(&pf->pdev->dev, "Configuring initial DCB values\n");
if (ice_pf_dcb_cfg(pf, newcfg)) if (ice_pf_dcb_cfg(pf, newcfg, locked))
ret = -EINVAL; ret = -EINVAL;
devm_kfree(&pf->pdev->dev, newcfg); devm_kfree(&pf->pdev->dev, newcfg);
...@@ -305,8 +310,9 @@ static int ice_dcb_init_cfg(struct ice_pf *pf) ...@@ -305,8 +310,9 @@ static int ice_dcb_init_cfg(struct ice_pf *pf)
/** /**
* ice_dcb_sw_default_config - Apply a default DCB config * ice_dcb_sw_default_config - Apply a default DCB config
* @pf: pf to apply config to * @pf: pf to apply config to
* @locked: was this function called with RTNL held
*/ */
static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf) static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool locked)
{ {
struct ice_aqc_port_ets_elem buf = { 0 }; struct ice_aqc_port_ets_elem buf = { 0 };
struct ice_dcbx_cfg *dcbcfg; struct ice_dcbx_cfg *dcbcfg;
...@@ -338,7 +344,7 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf) ...@@ -338,7 +344,7 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf)
dcbcfg->app[0].priority = 3; dcbcfg->app[0].priority = 3;
dcbcfg->app[0].prot_id = ICE_APP_PROT_ID_FCOE; dcbcfg->app[0].prot_id = ICE_APP_PROT_ID_FCOE;
ret = ice_pf_dcb_cfg(pf, dcbcfg); ret = ice_pf_dcb_cfg(pf, dcbcfg, locked);
devm_kfree(&pf->pdev->dev, dcbcfg); devm_kfree(&pf->pdev->dev, dcbcfg);
if (ret) if (ret)
return ret; return ret;
...@@ -349,8 +355,9 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf) ...@@ -349,8 +355,9 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf)
/** /**
* ice_init_pf_dcb - initialize DCB for a PF * ice_init_pf_dcb - initialize DCB for a PF
* @pf: pf to initiialize DCB for * @pf: pf to initiialize DCB for
* @locked: Was function called with RTNL held
*/ */
int ice_init_pf_dcb(struct ice_pf *pf) int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
{ {
struct device *dev = &pf->pdev->dev; struct device *dev = &pf->pdev->dev;
struct ice_port_info *port_info; struct ice_port_info *port_info;
...@@ -360,33 +367,10 @@ int ice_init_pf_dcb(struct ice_pf *pf) ...@@ -360,33 +367,10 @@ int ice_init_pf_dcb(struct ice_pf *pf)
port_info = hw->port_info; port_info = hw->port_info;
/* check if device is DCB capable */
if (!hw->func_caps.common_cap.dcb) {
dev_dbg(dev, "DCB not supported\n");
return -EOPNOTSUPP;
}
/* Best effort to put DCBx and LLDP into a good state */
port_info->dcbx_status = ice_get_dcbx_status(hw);
if (port_info->dcbx_status != ICE_DCBX_STATUS_DONE &&
port_info->dcbx_status != ICE_DCBX_STATUS_IN_PROGRESS) {
bool dcbx_status;
/* Attempt to start LLDP engine. Ignore errors
* as this will error if it is already started
*/
ice_aq_start_lldp(hw, NULL);
/* Attempt to start DCBX. Ignore errors as this
* will error if it is already started
*/
ice_aq_start_stop_dcbx(hw, true, &dcbx_status, NULL);
}
err = ice_init_dcb(hw); err = ice_init_dcb(hw);
if (err) { if (err) {
/* FW LLDP not in usable state, default to SW DCBx/LLDP */ /* FW LLDP is not active, default to SW DCBx/LLDP */
dev_info(&pf->pdev->dev, "FW LLDP not in usable state\n"); dev_info(&pf->pdev->dev, "FW LLDP is not active\n");
hw->port_info->dcbx_status = ICE_DCBX_STATUS_NOT_STARTED; hw->port_info->dcbx_status = ICE_DCBX_STATUS_NOT_STARTED;
hw->port_info->is_sw_lldp = true; hw->port_info->is_sw_lldp = true;
} }
...@@ -398,6 +382,9 @@ int ice_init_pf_dcb(struct ice_pf *pf) ...@@ -398,6 +382,9 @@ int ice_init_pf_dcb(struct ice_pf *pf)
if (port_info->is_sw_lldp) { if (port_info->is_sw_lldp) {
sw_default = 1; sw_default = 1;
dev_info(&pf->pdev->dev, "DCBx/LLDP in SW mode.\n"); dev_info(&pf->pdev->dev, "DCBx/LLDP in SW mode.\n");
clear_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags);
} else {
set_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags);
} }
if (port_info->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) { if (port_info->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) {
...@@ -406,7 +393,7 @@ int ice_init_pf_dcb(struct ice_pf *pf) ...@@ -406,7 +393,7 @@ int ice_init_pf_dcb(struct ice_pf *pf)
} }
if (sw_default) { if (sw_default) {
err = ice_dcb_sw_dflt_cfg(pf); err = ice_dcb_sw_dflt_cfg(pf, locked);
if (err) { if (err) {
dev_err(&pf->pdev->dev, dev_err(&pf->pdev->dev,
"Failed to set local DCB config %d\n", err); "Failed to set local DCB config %d\n", err);
...@@ -425,7 +412,7 @@ int ice_init_pf_dcb(struct ice_pf *pf) ...@@ -425,7 +412,7 @@ int ice_init_pf_dcb(struct ice_pf *pf)
set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
err = ice_dcb_init_cfg(pf); err = ice_dcb_init_cfg(pf, locked);
if (err) if (err)
goto dcb_init_err; goto dcb_init_err;
...@@ -514,6 +501,55 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring, ...@@ -514,6 +501,55 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
return 0; return 0;
} }
/**
* ice_dcb_need_recfg - Check if DCB needs reconfig
* @pf: board private structure
* @old_cfg: current DCB config
* @new_cfg: new DCB config
*/
static bool ice_dcb_need_recfg(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
struct ice_dcbx_cfg *new_cfg)
{
bool need_reconfig = false;
/* Check if ETS configuration has changed */
if (memcmp(&new_cfg->etscfg, &old_cfg->etscfg,
sizeof(new_cfg->etscfg))) {
/* If Priority Table has changed reconfig is needed */
if (memcmp(&new_cfg->etscfg.prio_table,
&old_cfg->etscfg.prio_table,
sizeof(new_cfg->etscfg.prio_table))) {
need_reconfig = true;
dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
}
if (memcmp(&new_cfg->etscfg.tcbwtable,
&old_cfg->etscfg.tcbwtable,
sizeof(new_cfg->etscfg.tcbwtable)))
dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
if (memcmp(&new_cfg->etscfg.tsatable,
&old_cfg->etscfg.tsatable,
sizeof(new_cfg->etscfg.tsatable)))
dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
}
/* Check if PFC configuration has changed */
if (memcmp(&new_cfg->pfc, &old_cfg->pfc, sizeof(new_cfg->pfc))) {
need_reconfig = true;
dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
}
/* Check if APP Table has changed */
if (memcmp(&new_cfg->app, &old_cfg->app, sizeof(new_cfg->app))) {
need_reconfig = true;
dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
}
dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
return need_reconfig;
}
/** /**
* ice_dcb_process_lldp_set_mib_change - Process MIB change * ice_dcb_process_lldp_set_mib_change - Process MIB change
* @pf: ptr to ice_pf * @pf: ptr to ice_pf
...@@ -523,29 +559,95 @@ void ...@@ -523,29 +559,95 @@ void
ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
struct ice_rq_event_info *event) struct ice_rq_event_info *event)
{ {
if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) { struct ice_aqc_port_ets_elem buf = { 0 };
struct ice_dcbx_cfg *dcbcfg, *prev_cfg; struct ice_aqc_lldp_get_mib *mib;
int err; struct ice_dcbx_cfg tmp_dcbx_cfg;
bool need_reconfig = false;
struct ice_port_info *pi;
u8 type;
int ret;
prev_cfg = &pf->hw.port_info->local_dcbx_cfg; /* Not DCB capable or capability disabled */
dcbcfg = devm_kmemdup(&pf->pdev->dev, prev_cfg, if (!(test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags)))
sizeof(*dcbcfg), GFP_KERNEL);
if (!dcbcfg)
return; return;
err = ice_lldp_to_dcb_cfg(event->msg_buf, dcbcfg); if (pf->dcbx_cap & DCB_CAP_DCBX_HOST) {
if (!err) dev_dbg(&pf->pdev->dev,
ice_pf_dcb_cfg(pf, dcbcfg); "MIB Change Event in HOST mode\n");
return;
}
devm_kfree(&pf->pdev->dev, dcbcfg); pi = pf->hw.port_info;
mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw;
/* Ignore if event is not for Nearest Bridge */
type = ((mib->type >> ICE_AQ_LLDP_BRID_TYPE_S) &
ICE_AQ_LLDP_BRID_TYPE_M);
dev_dbg(&pf->pdev->dev, "LLDP event MIB bridge type 0x%x\n", type);
if (type != ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID)
return;
/* Check MIB Type and return if event for Remote MIB update */
type = mib->type & ICE_AQ_LLDP_MIB_TYPE_M;
dev_dbg(&pf->pdev->dev,
"LLDP event mib type %s\n", type ? "remote" : "local");
if (type == ICE_AQ_LLDP_MIB_REMOTE) {
/* Update the remote cached instance and return */
ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE,
ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID,
&pi->remote_dcbx_cfg);
if (ret) {
dev_err(&pf->pdev->dev, "Failed to get remote DCB config\n");
return;
}
}
/* store the old configuration */
tmp_dcbx_cfg = pf->hw.port_info->local_dcbx_cfg;
/* Reset the old DCBx configuration data */
memset(&pi->local_dcbx_cfg, 0, sizeof(pi->local_dcbx_cfg));
/* Get updated DCBx data from firmware */ /* Get updated DCBx data from firmware */
err = ice_get_dcb_cfg(pf->hw.port_info); ret = ice_get_dcb_cfg(pf->hw.port_info);
if (err) if (ret) {
dev_err(&pf->pdev->dev, dev_err(&pf->pdev->dev, "Failed to get DCB config\n");
"Failed to get DCB config\n"); return;
} else { }
/* No change detected in DCBX configs */
if (!memcmp(&tmp_dcbx_cfg, &pi->local_dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
dev_dbg(&pf->pdev->dev, dev_dbg(&pf->pdev->dev,
"MIB Change Event in HOST mode\n"); "No change detected in DCBX configuration.\n");
return;
}
need_reconfig = ice_dcb_need_recfg(pf, &tmp_dcbx_cfg,
&pi->local_dcbx_cfg);
if (!need_reconfig)
return;
/* Enable DCB tagging only when more than one TC */
if (ice_dcb_get_num_tc(&pi->local_dcbx_cfg) > 1) {
dev_dbg(&pf->pdev->dev, "DCB tagging enabled (num TC > 1)\n");
set_bit(ICE_FLAG_DCB_ENA, pf->flags);
} else {
dev_dbg(&pf->pdev->dev, "DCB tagging disabled (num TC = 1)\n");
clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
} }
rtnl_lock();
ice_pf_dis_all_vsi(pf, true);
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) {
dev_err(&pf->pdev->dev, "Query Port ETS failed\n");
rtnl_unlock();
return;
}
/* changes in configuration update VSI */
ice_pf_dcb_recfg(pf);
ice_pf_ena_all_vsi(pf, true);
rtnl_unlock();
} }
...@@ -14,7 +14,7 @@ void ice_dcb_rebuild(struct ice_pf *pf); ...@@ -14,7 +14,7 @@ void ice_dcb_rebuild(struct ice_pf *pf);
u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg); u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg);
u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg); u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg);
void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi); void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi);
int ice_init_pf_dcb(struct ice_pf *pf); int ice_init_pf_dcb(struct ice_pf *pf, bool locked);
void ice_update_dcb_stats(struct ice_pf *pf); void ice_update_dcb_stats(struct ice_pf *pf);
int int
ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring, ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
...@@ -40,7 +40,8 @@ static inline u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg __always_unused *dcbcfg) ...@@ -40,7 +40,8 @@ static inline u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg __always_unused *dcbcfg)
return 1; return 1;
} }
static inline int ice_init_pf_dcb(struct ice_pf *pf) static inline int
ice_init_pf_dcb(struct ice_pf *pf, bool __always_unused locked)
{ {
dev_dbg(&pf->pdev->dev, "DCB not supported\n"); dev_dbg(&pf->pdev->dev, "DCB not supported\n");
return -EOPNOTSUPP; return -EOPNOTSUPP;
......
...@@ -45,20 +45,20 @@ static int ice_q_stats_len(struct net_device *netdev) ...@@ -45,20 +45,20 @@ static int ice_q_stats_len(struct net_device *netdev)
ICE_VSI_STATS_LEN + ice_q_stats_len(n)) ICE_VSI_STATS_LEN + ice_q_stats_len(n))
static const struct ice_stats ice_gstrings_vsi_stats[] = { static const struct ice_stats ice_gstrings_vsi_stats[] = {
ICE_VSI_STAT("tx_unicast", eth_stats.tx_unicast),
ICE_VSI_STAT("rx_unicast", eth_stats.rx_unicast), ICE_VSI_STAT("rx_unicast", eth_stats.rx_unicast),
ICE_VSI_STAT("tx_multicast", eth_stats.tx_multicast), ICE_VSI_STAT("tx_unicast", eth_stats.tx_unicast),
ICE_VSI_STAT("rx_multicast", eth_stats.rx_multicast), ICE_VSI_STAT("rx_multicast", eth_stats.rx_multicast),
ICE_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast), ICE_VSI_STAT("tx_multicast", eth_stats.tx_multicast),
ICE_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast), ICE_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast),
ICE_VSI_STAT("tx_bytes", eth_stats.tx_bytes), ICE_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast),
ICE_VSI_STAT("rx_bytes", eth_stats.rx_bytes), ICE_VSI_STAT("rx_bytes", eth_stats.rx_bytes),
ICE_VSI_STAT("rx_discards", eth_stats.rx_discards), ICE_VSI_STAT("tx_bytes", eth_stats.tx_bytes),
ICE_VSI_STAT("tx_errors", eth_stats.tx_errors), ICE_VSI_STAT("rx_dropped", eth_stats.rx_discards),
ICE_VSI_STAT("tx_linearize", tx_linearize),
ICE_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol), ICE_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
ICE_VSI_STAT("rx_alloc_fail", rx_buf_failed), ICE_VSI_STAT("rx_alloc_fail", rx_buf_failed),
ICE_VSI_STAT("rx_pg_alloc_fail", rx_page_failed), ICE_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
ICE_VSI_STAT("tx_errors", eth_stats.tx_errors),
ICE_VSI_STAT("tx_linearize", tx_linearize),
}; };
/* These PF_STATs might look like duplicates of some NETDEV_STATs, /* These PF_STATs might look like duplicates of some NETDEV_STATs,
...@@ -71,45 +71,45 @@ static const struct ice_stats ice_gstrings_vsi_stats[] = { ...@@ -71,45 +71,45 @@ static const struct ice_stats ice_gstrings_vsi_stats[] = {
* is queried on the base PF netdev. * is queried on the base PF netdev.
*/ */
static const struct ice_stats ice_gstrings_pf_stats[] = { static const struct ice_stats ice_gstrings_pf_stats[] = {
ICE_PF_STAT("port.tx_bytes", stats.eth.tx_bytes), ICE_PF_STAT("rx_bytes.nic", stats.eth.rx_bytes),
ICE_PF_STAT("port.rx_bytes", stats.eth.rx_bytes), ICE_PF_STAT("tx_bytes.nic", stats.eth.tx_bytes),
ICE_PF_STAT("port.tx_unicast", stats.eth.tx_unicast), ICE_PF_STAT("rx_unicast.nic", stats.eth.rx_unicast),
ICE_PF_STAT("port.rx_unicast", stats.eth.rx_unicast), ICE_PF_STAT("tx_unicast.nic", stats.eth.tx_unicast),
ICE_PF_STAT("port.tx_multicast", stats.eth.tx_multicast), ICE_PF_STAT("rx_multicast.nic", stats.eth.rx_multicast),
ICE_PF_STAT("port.rx_multicast", stats.eth.rx_multicast), ICE_PF_STAT("tx_multicast.nic", stats.eth.tx_multicast),
ICE_PF_STAT("port.tx_broadcast", stats.eth.tx_broadcast), ICE_PF_STAT("rx_broadcast.nic", stats.eth.rx_broadcast),
ICE_PF_STAT("port.rx_broadcast", stats.eth.rx_broadcast), ICE_PF_STAT("tx_broadcast.nic", stats.eth.tx_broadcast),
ICE_PF_STAT("port.tx_errors", stats.eth.tx_errors), ICE_PF_STAT("tx_errors.nic", stats.eth.tx_errors),
ICE_PF_STAT("port.tx_size_64", stats.tx_size_64), ICE_PF_STAT("rx_size_64.nic", stats.rx_size_64),
ICE_PF_STAT("port.rx_size_64", stats.rx_size_64), ICE_PF_STAT("tx_size_64.nic", stats.tx_size_64),
ICE_PF_STAT("port.tx_size_127", stats.tx_size_127), ICE_PF_STAT("rx_size_127.nic", stats.rx_size_127),
ICE_PF_STAT("port.rx_size_127", stats.rx_size_127), ICE_PF_STAT("tx_size_127.nic", stats.tx_size_127),
ICE_PF_STAT("port.tx_size_255", stats.tx_size_255), ICE_PF_STAT("rx_size_255.nic", stats.rx_size_255),
ICE_PF_STAT("port.rx_size_255", stats.rx_size_255), ICE_PF_STAT("tx_size_255.nic", stats.tx_size_255),
ICE_PF_STAT("port.tx_size_511", stats.tx_size_511), ICE_PF_STAT("rx_size_511.nic", stats.rx_size_511),
ICE_PF_STAT("port.rx_size_511", stats.rx_size_511), ICE_PF_STAT("tx_size_511.nic", stats.tx_size_511),
ICE_PF_STAT("port.tx_size_1023", stats.tx_size_1023), ICE_PF_STAT("rx_size_1023.nic", stats.rx_size_1023),
ICE_PF_STAT("port.rx_size_1023", stats.rx_size_1023), ICE_PF_STAT("tx_size_1023.nic", stats.tx_size_1023),
ICE_PF_STAT("port.tx_size_1522", stats.tx_size_1522), ICE_PF_STAT("rx_size_1522.nic", stats.rx_size_1522),
ICE_PF_STAT("port.rx_size_1522", stats.rx_size_1522), ICE_PF_STAT("tx_size_1522.nic", stats.tx_size_1522),
ICE_PF_STAT("port.tx_size_big", stats.tx_size_big), ICE_PF_STAT("rx_size_big.nic", stats.rx_size_big),
ICE_PF_STAT("port.rx_size_big", stats.rx_size_big), ICE_PF_STAT("tx_size_big.nic", stats.tx_size_big),
ICE_PF_STAT("port.link_xon_tx", stats.link_xon_tx), ICE_PF_STAT("link_xon_rx.nic", stats.link_xon_rx),
ICE_PF_STAT("port.link_xon_rx", stats.link_xon_rx), ICE_PF_STAT("link_xon_tx.nic", stats.link_xon_tx),
ICE_PF_STAT("port.link_xoff_tx", stats.link_xoff_tx), ICE_PF_STAT("link_xoff_rx.nic", stats.link_xoff_rx),
ICE_PF_STAT("port.link_xoff_rx", stats.link_xoff_rx), ICE_PF_STAT("link_xoff_tx.nic", stats.link_xoff_tx),
ICE_PF_STAT("port.tx_dropped_link_down", stats.tx_dropped_link_down), ICE_PF_STAT("tx_dropped_link_down.nic", stats.tx_dropped_link_down),
ICE_PF_STAT("port.rx_undersize", stats.rx_undersize), ICE_PF_STAT("rx_undersize.nic", stats.rx_undersize),
ICE_PF_STAT("port.rx_fragments", stats.rx_fragments), ICE_PF_STAT("rx_fragments.nic", stats.rx_fragments),
ICE_PF_STAT("port.rx_oversize", stats.rx_oversize), ICE_PF_STAT("rx_oversize.nic", stats.rx_oversize),
ICE_PF_STAT("port.rx_jabber", stats.rx_jabber), ICE_PF_STAT("rx_jabber.nic", stats.rx_jabber),
ICE_PF_STAT("port.rx_csum_bad", hw_csum_rx_error), ICE_PF_STAT("rx_csum_bad.nic", hw_csum_rx_error),
ICE_PF_STAT("port.rx_length_errors", stats.rx_len_errors), ICE_PF_STAT("rx_length_errors.nic", stats.rx_len_errors),
ICE_PF_STAT("port.rx_dropped", stats.eth.rx_discards), ICE_PF_STAT("rx_dropped.nic", stats.eth.rx_discards),
ICE_PF_STAT("port.rx_crc_errors", stats.crc_errors), ICE_PF_STAT("rx_crc_errors.nic", stats.crc_errors),
ICE_PF_STAT("port.illegal_bytes", stats.illegal_bytes), ICE_PF_STAT("illegal_bytes.nic", stats.illegal_bytes),
ICE_PF_STAT("port.mac_local_faults", stats.mac_local_faults), ICE_PF_STAT("mac_local_faults.nic", stats.mac_local_faults),
ICE_PF_STAT("port.mac_remote_faults", stats.mac_remote_faults), ICE_PF_STAT("mac_remote_faults.nic", stats.mac_remote_faults),
}; };
static const u32 ice_regs_dump_list[] = { static const u32 ice_regs_dump_list[] = {
...@@ -134,7 +134,7 @@ struct ice_priv_flag { ...@@ -134,7 +134,7 @@ struct ice_priv_flag {
static const struct ice_priv_flag ice_gstrings_priv_flags[] = { static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA), ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA),
ICE_PRIV_FLAG("disable-fw-lldp", ICE_FLAG_DISABLE_FW_LLDP), ICE_PRIV_FLAG("enable-fw-lldp", ICE_FLAG_ENABLE_FW_LLDP),
}; };
#define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags) #define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags)
...@@ -295,17 +295,17 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data) ...@@ -295,17 +295,17 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
ice_for_each_alloc_txq(vsi, i) { ice_for_each_alloc_txq(vsi, i) {
snprintf(p, ETH_GSTRING_LEN, snprintf(p, ETH_GSTRING_LEN,
"tx-queue-%u.tx_packets", i); "tx_queue_%u_packets", i);
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
snprintf(p, ETH_GSTRING_LEN, "tx-queue-%u.tx_bytes", i); snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
} }
ice_for_each_alloc_rxq(vsi, i) { ice_for_each_alloc_rxq(vsi, i) {
snprintf(p, ETH_GSTRING_LEN, snprintf(p, ETH_GSTRING_LEN,
"rx-queue-%u.rx_packets", i); "rx_queue_%u_packets", i);
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
snprintf(p, ETH_GSTRING_LEN, "rx-queue-%u.rx_bytes", i); snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
} }
...@@ -320,18 +320,18 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data) ...@@ -320,18 +320,18 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) { for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
snprintf(p, ETH_GSTRING_LEN, snprintf(p, ETH_GSTRING_LEN,
"port.tx-priority-%u-xon", i); "tx_priority_%u_xon.nic", i);
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
snprintf(p, ETH_GSTRING_LEN, snprintf(p, ETH_GSTRING_LEN,
"port.tx-priority-%u-xoff", i); "tx_priority_%u_xoff.nic", i);
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
} }
for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) { for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
snprintf(p, ETH_GSTRING_LEN, snprintf(p, ETH_GSTRING_LEN,
"port.rx-priority-%u-xon", i); "rx_priority_%u_xon.nic", i);
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
snprintf(p, ETH_GSTRING_LEN, snprintf(p, ETH_GSTRING_LEN,
"port.rx-priority-%u-xoff", i); "rx_priority_%u_xoff.nic", i);
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
} }
break; break;
...@@ -433,8 +433,8 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) ...@@ -433,8 +433,8 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
bitmap_xor(change_flags, pf->flags, orig_flags, ICE_PF_FLAGS_NBITS); bitmap_xor(change_flags, pf->flags, orig_flags, ICE_PF_FLAGS_NBITS);
if (test_bit(ICE_FLAG_DISABLE_FW_LLDP, change_flags)) { if (test_bit(ICE_FLAG_ENABLE_FW_LLDP, change_flags)) {
if (test_bit(ICE_FLAG_DISABLE_FW_LLDP, pf->flags)) { if (!test_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags)) {
enum ice_status status; enum ice_status status;
status = ice_aq_cfg_lldp_mib_change(&pf->hw, false, status = ice_aq_cfg_lldp_mib_change(&pf->hw, false,
...@@ -450,7 +450,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) ...@@ -450,7 +450,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
/* The AQ call to stop the FW LLDP agent will generate /* The AQ call to stop the FW LLDP agent will generate
* an error if the agent is already stopped. * an error if the agent is already stopped.
*/ */
status = ice_aq_stop_lldp(&pf->hw, true, NULL); status = ice_aq_stop_lldp(&pf->hw, true, true, NULL);
if (status) if (status)
dev_warn(&pf->pdev->dev, dev_warn(&pf->pdev->dev,
"Fail to stop LLDP agent\n"); "Fail to stop LLDP agent\n");
...@@ -458,7 +458,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) ...@@ -458,7 +458,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
* will likely not need DCB, so failure to init is * will likely not need DCB, so failure to init is
* not a concern of ethtool * not a concern of ethtool
*/ */
status = ice_init_pf_dcb(pf); status = ice_init_pf_dcb(pf, true);
if (status) if (status)
dev_warn(&pf->pdev->dev, "Fail to init DCB\n"); dev_warn(&pf->pdev->dev, "Fail to init DCB\n");
} else { } else {
...@@ -468,7 +468,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) ...@@ -468,7 +468,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
/* AQ command to start FW LLDP agent will return an /* AQ command to start FW LLDP agent will return an
* error if the agent is already started * error if the agent is already started
*/ */
status = ice_aq_start_lldp(&pf->hw, NULL); status = ice_aq_start_lldp(&pf->hw, true, NULL);
if (status) if (status)
dev_warn(&pf->pdev->dev, dev_warn(&pf->pdev->dev,
"Fail to start LLDP Agent\n"); "Fail to start LLDP Agent\n");
...@@ -497,7 +497,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) ...@@ -497,7 +497,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
dev_dbg(&pf->pdev->dev, dev_dbg(&pf->pdev->dev,
"Fail to reg for MIB change\n"); "Fail to reg for MIB change\n");
status = ice_init_pf_dcb(pf); status = ice_init_pf_dcb(pf, true);
if (status) if (status)
dev_dbg(&pf->pdev->dev, "Fail to init DCB\n"); dev_dbg(&pf->pdev->dev, "Fail to init DCB\n");
} }
...@@ -628,7 +628,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev, ...@@ -628,7 +628,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_100M_SGMII) { phy_types_low & ICE_PHY_TYPE_LOW_100M_SGMII) {
ethtool_link_ksettings_add_link_mode(ks, supported, ethtool_link_ksettings_add_link_mode(ks, supported,
100baseT_Full); 100baseT_Full);
if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100MB) if (!hw_link_info->req_speeds ||
hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100MB)
ethtool_link_ksettings_add_link_mode(ks, advertising, ethtool_link_ksettings_add_link_mode(ks, advertising,
100baseT_Full); 100baseT_Full);
} }
...@@ -636,14 +637,16 @@ ice_phy_type_to_ethtool(struct net_device *netdev, ...@@ -636,14 +637,16 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_1G_SGMII) { phy_types_low & ICE_PHY_TYPE_LOW_1G_SGMII) {
ethtool_link_ksettings_add_link_mode(ks, supported, ethtool_link_ksettings_add_link_mode(ks, supported,
1000baseT_Full); 1000baseT_Full);
if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB) if (!hw_link_info->req_speeds ||
hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB)
ethtool_link_ksettings_add_link_mode(ks, advertising, ethtool_link_ksettings_add_link_mode(ks, advertising,
1000baseT_Full); 1000baseT_Full);
} }
if (phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_KX) { if (phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_KX) {
ethtool_link_ksettings_add_link_mode(ks, supported, ethtool_link_ksettings_add_link_mode(ks, supported,
1000baseKX_Full); 1000baseKX_Full);
if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB) if (!hw_link_info->req_speeds ||
hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB)
ethtool_link_ksettings_add_link_mode(ks, advertising, ethtool_link_ksettings_add_link_mode(ks, advertising,
1000baseKX_Full); 1000baseKX_Full);
} }
...@@ -651,14 +654,16 @@ ice_phy_type_to_ethtool(struct net_device *netdev, ...@@ -651,14 +654,16 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_LX) { phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_LX) {
ethtool_link_ksettings_add_link_mode(ks, supported, ethtool_link_ksettings_add_link_mode(ks, supported,
1000baseX_Full); 1000baseX_Full);
if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB) if (!hw_link_info->req_speeds ||
hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB)
ethtool_link_ksettings_add_link_mode(ks, advertising, ethtool_link_ksettings_add_link_mode(ks, advertising,
1000baseX_Full); 1000baseX_Full);
} }
if (phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_T) { if (phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_T) {
ethtool_link_ksettings_add_link_mode(ks, supported, ethtool_link_ksettings_add_link_mode(ks, supported,
2500baseT_Full); 2500baseT_Full);
if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_2500MB) if (!hw_link_info->req_speeds ||
hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_2500MB)
ethtool_link_ksettings_add_link_mode(ks, advertising, ethtool_link_ksettings_add_link_mode(ks, advertising,
2500baseT_Full); 2500baseT_Full);
} }
...@@ -666,7 +671,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev, ...@@ -666,7 +671,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_KX) { phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_KX) {
ethtool_link_ksettings_add_link_mode(ks, supported, ethtool_link_ksettings_add_link_mode(ks, supported,
2500baseX_Full); 2500baseX_Full);
if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_2500MB) if (!hw_link_info->req_speeds ||
hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_2500MB)
ethtool_link_ksettings_add_link_mode(ks, advertising, ethtool_link_ksettings_add_link_mode(ks, advertising,
2500baseX_Full); 2500baseX_Full);
} }
...@@ -674,7 +680,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev, ...@@ -674,7 +680,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_KR) { phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_KR) {
ethtool_link_ksettings_add_link_mode(ks, supported, ethtool_link_ksettings_add_link_mode(ks, supported,
5000baseT_Full); 5000baseT_Full);
if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_5GB) if (!hw_link_info->req_speeds ||
hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_5GB)
ethtool_link_ksettings_add_link_mode(ks, advertising, ethtool_link_ksettings_add_link_mode(ks, advertising,
5000baseT_Full); 5000baseT_Full);
} }
...@@ -684,28 +691,32 @@ ice_phy_type_to_ethtool(struct net_device *netdev, ...@@ -684,28 +691,32 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_10G_SFI_C2C) { phy_types_low & ICE_PHY_TYPE_LOW_10G_SFI_C2C) {
ethtool_link_ksettings_add_link_mode(ks, supported, ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseT_Full); 10000baseT_Full);
if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB) if (!hw_link_info->req_speeds ||
hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB)
ethtool_link_ksettings_add_link_mode(ks, advertising, ethtool_link_ksettings_add_link_mode(ks, advertising,
10000baseT_Full); 10000baseT_Full);
} }
if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_KR_CR1) { if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_KR_CR1) {
ethtool_link_ksettings_add_link_mode(ks, supported, ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseKR_Full); 10000baseKR_Full);
if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB) if (!hw_link_info->req_speeds ||
hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB)
ethtool_link_ksettings_add_link_mode(ks, advertising, ethtool_link_ksettings_add_link_mode(ks, advertising,
10000baseKR_Full); 10000baseKR_Full);
} }
if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_SR) { if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_SR) {
ethtool_link_ksettings_add_link_mode(ks, supported, ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseSR_Full); 10000baseSR_Full);
if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB) if (!hw_link_info->req_speeds ||
hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB)
ethtool_link_ksettings_add_link_mode(ks, advertising, ethtool_link_ksettings_add_link_mode(ks, advertising,
10000baseSR_Full); 10000baseSR_Full);
} }
if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_LR) { if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_LR) {
ethtool_link_ksettings_add_link_mode(ks, supported, ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseLR_Full); 10000baseLR_Full);
if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB) if (!hw_link_info->req_speeds ||
hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB)
ethtool_link_ksettings_add_link_mode(ks, advertising, ethtool_link_ksettings_add_link_mode(ks, advertising,
10000baseLR_Full); 10000baseLR_Full);
} }
...@@ -717,7 +728,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev, ...@@ -717,7 +728,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_25G_AUI_C2C) { phy_types_low & ICE_PHY_TYPE_LOW_25G_AUI_C2C) {
ethtool_link_ksettings_add_link_mode(ks, supported, ethtool_link_ksettings_add_link_mode(ks, supported,
25000baseCR_Full); 25000baseCR_Full);
if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB) if (!hw_link_info->req_speeds ||
hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB)
ethtool_link_ksettings_add_link_mode(ks, advertising, ethtool_link_ksettings_add_link_mode(ks, advertising,
25000baseCR_Full); 25000baseCR_Full);
} }
...@@ -725,7 +737,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev, ...@@ -725,7 +737,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_LR) { phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_LR) {
ethtool_link_ksettings_add_link_mode(ks, supported, ethtool_link_ksettings_add_link_mode(ks, supported,
25000baseSR_Full); 25000baseSR_Full);
if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB) if (!hw_link_info->req_speeds ||
hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB)
ethtool_link_ksettings_add_link_mode(ks, advertising, ethtool_link_ksettings_add_link_mode(ks, advertising,
25000baseSR_Full); 25000baseSR_Full);
} }
...@@ -734,14 +747,16 @@ ice_phy_type_to_ethtool(struct net_device *netdev, ...@@ -734,14 +747,16 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR1) { phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR1) {
ethtool_link_ksettings_add_link_mode(ks, supported, ethtool_link_ksettings_add_link_mode(ks, supported,
25000baseKR_Full); 25000baseKR_Full);
if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB) if (!hw_link_info->req_speeds ||
hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB)
ethtool_link_ksettings_add_link_mode(ks, advertising, ethtool_link_ksettings_add_link_mode(ks, advertising,
25000baseKR_Full); 25000baseKR_Full);
} }
if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_KR4) { if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_KR4) {
ethtool_link_ksettings_add_link_mode(ks, supported, ethtool_link_ksettings_add_link_mode(ks, supported,
40000baseKR4_Full); 40000baseKR4_Full);
if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB) if (!hw_link_info->req_speeds ||
hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB)
ethtool_link_ksettings_add_link_mode(ks, advertising, ethtool_link_ksettings_add_link_mode(ks, advertising,
40000baseKR4_Full); 40000baseKR4_Full);
} }
...@@ -750,21 +765,24 @@ ice_phy_type_to_ethtool(struct net_device *netdev, ...@@ -750,21 +765,24 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_40G_XLAUI) { phy_types_low & ICE_PHY_TYPE_LOW_40G_XLAUI) {
ethtool_link_ksettings_add_link_mode(ks, supported, ethtool_link_ksettings_add_link_mode(ks, supported,
40000baseCR4_Full); 40000baseCR4_Full);
if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB) if (!hw_link_info->req_speeds ||
hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB)
ethtool_link_ksettings_add_link_mode(ks, advertising, ethtool_link_ksettings_add_link_mode(ks, advertising,
40000baseCR4_Full); 40000baseCR4_Full);
} }
if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_SR4) { if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_SR4) {
ethtool_link_ksettings_add_link_mode(ks, supported, ethtool_link_ksettings_add_link_mode(ks, supported,
40000baseSR4_Full); 40000baseSR4_Full);
if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB) if (!hw_link_info->req_speeds ||
hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB)
ethtool_link_ksettings_add_link_mode(ks, advertising, ethtool_link_ksettings_add_link_mode(ks, advertising,
40000baseSR4_Full); 40000baseSR4_Full);
} }
if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_LR4) { if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_LR4) {
ethtool_link_ksettings_add_link_mode(ks, supported, ethtool_link_ksettings_add_link_mode(ks, supported,
40000baseLR4_Full); 40000baseLR4_Full);
if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB) if (!hw_link_info->req_speeds ||
hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB)
ethtool_link_ksettings_add_link_mode(ks, advertising, ethtool_link_ksettings_add_link_mode(ks, advertising,
40000baseLR4_Full); 40000baseLR4_Full);
} }
...@@ -779,7 +797,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev, ...@@ -779,7 +797,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI1) { phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI1) {
ethtool_link_ksettings_add_link_mode(ks, supported, ethtool_link_ksettings_add_link_mode(ks, supported,
50000baseCR2_Full); 50000baseCR2_Full);
if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB) if (!hw_link_info->req_speeds ||
hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB)
ethtool_link_ksettings_add_link_mode(ks, advertising, ethtool_link_ksettings_add_link_mode(ks, advertising,
50000baseCR2_Full); 50000baseCR2_Full);
} }
...@@ -787,7 +806,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev, ...@@ -787,7 +806,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4) { phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4) {
ethtool_link_ksettings_add_link_mode(ks, supported, ethtool_link_ksettings_add_link_mode(ks, supported,
50000baseKR2_Full); 50000baseKR2_Full);
if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB) if (!hw_link_info->req_speeds ||
hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB)
ethtool_link_ksettings_add_link_mode(ks, advertising, ethtool_link_ksettings_add_link_mode(ks, advertising,
50000baseKR2_Full); 50000baseKR2_Full);
} }
...@@ -797,7 +817,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev, ...@@ -797,7 +817,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_LR) { phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_LR) {
ethtool_link_ksettings_add_link_mode(ks, supported, ethtool_link_ksettings_add_link_mode(ks, supported,
50000baseSR2_Full); 50000baseSR2_Full);
if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB) if (!hw_link_info->req_speeds ||
hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB)
ethtool_link_ksettings_add_link_mode(ks, advertising, ethtool_link_ksettings_add_link_mode(ks, advertising,
50000baseSR2_Full); 50000baseSR2_Full);
} }
...@@ -814,7 +835,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev, ...@@ -814,7 +835,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_high & ICE_PHY_TYPE_HIGH_100G_AUI2) { phy_types_high & ICE_PHY_TYPE_HIGH_100G_AUI2) {
ethtool_link_ksettings_add_link_mode(ks, supported, ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseCR4_Full); 100000baseCR4_Full);
if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB) if (!hw_link_info->req_speeds ||
hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB)
need_add_adv_mode = true; need_add_adv_mode = true;
} }
if (need_add_adv_mode) { if (need_add_adv_mode) {
...@@ -826,7 +848,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev, ...@@ -826,7 +848,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR2) { phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR2) {
ethtool_link_ksettings_add_link_mode(ks, supported, ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseSR4_Full); 100000baseSR4_Full);
if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB) if (!hw_link_info->req_speeds ||
hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB)
need_add_adv_mode = true; need_add_adv_mode = true;
} }
if (need_add_adv_mode) { if (need_add_adv_mode) {
...@@ -838,7 +861,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev, ...@@ -838,7 +861,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_DR) { phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_DR) {
ethtool_link_ksettings_add_link_mode(ks, supported, ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseLR4_ER4_Full); 100000baseLR4_ER4_Full);
if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB) if (!hw_link_info->req_speeds ||
hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB)
need_add_adv_mode = true; need_add_adv_mode = true;
} }
if (need_add_adv_mode) { if (need_add_adv_mode) {
...@@ -851,7 +875,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev, ...@@ -851,7 +875,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_high & ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4) { phy_types_high & ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4) {
ethtool_link_ksettings_add_link_mode(ks, supported, ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseKR4_Full); 100000baseKR4_Full);
if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB) if (!hw_link_info->req_speeds ||
hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB)
need_add_adv_mode = true; need_add_adv_mode = true;
} }
if (need_add_adv_mode) if (need_add_adv_mode)
......
...@@ -2754,19 +2754,14 @@ int ice_vsi_release(struct ice_vsi *vsi) ...@@ -2754,19 +2754,14 @@ int ice_vsi_release(struct ice_vsi *vsi)
if (vsi->type == ICE_VSI_VF) if (vsi->type == ICE_VSI_VF)
vf = &pf->vf[vsi->vf_id]; vf = &pf->vf[vsi->vf_id];
/* do not unregister and free netdevs while driver is in the reset /* do not unregister while driver is in the reset recovery pending
* recovery pending state. Since reset/rebuild happens through PF * state. Since reset/rebuild happens through PF service task workqueue,
* service task workqueue, its not a good idea to unregister netdev * it's not a good idea to unregister netdev that is associated to the
* that is associated to the PF that is running the work queue items * PF that is running the work queue items currently. This is done to
* currently. This is done to avoid check_flush_dependency() warning * avoid check_flush_dependency() warning on this wq
* on this wq */
*/ if (vsi->netdev && !ice_is_reset_in_progress(pf->state))
if (vsi->netdev && !ice_is_reset_in_progress(pf->state)) {
ice_napi_del(vsi);
unregister_netdev(vsi->netdev); unregister_netdev(vsi->netdev);
free_netdev(vsi->netdev);
vsi->netdev = NULL;
}
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
ice_rss_clean(vsi); ice_rss_clean(vsi);
...@@ -2799,6 +2794,13 @@ int ice_vsi_release(struct ice_vsi *vsi) ...@@ -2799,6 +2794,13 @@ int ice_vsi_release(struct ice_vsi *vsi)
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
ice_vsi_delete(vsi); ice_vsi_delete(vsi);
ice_vsi_free_q_vectors(vsi); ice_vsi_free_q_vectors(vsi);
/* make sure unregister_netdev() was called by checking __ICE_DOWN */
if (vsi->netdev && test_bit(__ICE_DOWN, vsi->state)) {
free_netdev(vsi->netdev);
vsi->netdev = NULL;
}
ice_vsi_clear_rings(vsi); ice_vsi_clear_rings(vsi);
ice_vsi_put_qs(vsi); ice_vsi_put_qs(vsi);
......
...@@ -1667,7 +1667,7 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf) ...@@ -1667,7 +1667,7 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
* ice_napi_del - Remove NAPI handler for the VSI * ice_napi_del - Remove NAPI handler for the VSI
* @vsi: VSI for which NAPI handler is to be removed * @vsi: VSI for which NAPI handler is to be removed
*/ */
void ice_napi_del(struct ice_vsi *vsi) static void ice_napi_del(struct ice_vsi *vsi)
{ {
int v_idx; int v_idx;
...@@ -2302,7 +2302,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) ...@@ -2302,7 +2302,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
ice_init_pf(pf); ice_init_pf(pf);
err = ice_init_pf_dcb(pf); err = ice_init_pf_dcb(pf, false);
if (err) { if (err) {
clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
clear_bit(ICE_FLAG_DCB_ENA, pf->flags); clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
......
...@@ -119,7 +119,7 @@ ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data) ...@@ -119,7 +119,7 @@ ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
status = ice_read_sr_aq(hw, offset, 1, data, true); status = ice_read_sr_aq(hw, offset, 1, data, true);
if (!status) if (!status)
*data = le16_to_cpu(*(__le16 *)data); *data = le16_to_cpu(*(__force __le16 *)data);
return status; return status;
} }
...@@ -174,7 +174,7 @@ ice_read_sr_buf_aq(struct ice_hw *hw, u16 offset, u16 *words, u16 *data) ...@@ -174,7 +174,7 @@ ice_read_sr_buf_aq(struct ice_hw *hw, u16 offset, u16 *words, u16 *data)
} while (words_read < *words); } while (words_read < *words);
for (i = 0; i < *words; i++) for (i = 0; i < *words; i++)
data[i] = le16_to_cpu(((__le16 *)data)[i]); data[i] = le16_to_cpu(((__force __le16 *)data)[i]);
read_nvm_buf_aq_exit: read_nvm_buf_aq_exit:
*words = words_read; *words = words_read;
......
...@@ -799,7 +799,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, ...@@ -799,7 +799,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
daddr = f_info->l_data.ethertype_mac.mac_addr; daddr = f_info->l_data.ethertype_mac.mac_addr;
/* fall-through */ /* fall-through */
case ICE_SW_LKUP_ETHERTYPE: case ICE_SW_LKUP_ETHERTYPE:
off = (__be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET); off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
*off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype); *off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype);
break; break;
case ICE_SW_LKUP_MAC_VLAN: case ICE_SW_LKUP_MAC_VLAN:
...@@ -829,7 +829,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, ...@@ -829,7 +829,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr); ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr);
if (!(vlan_id > ICE_MAX_VLAN_ID)) { if (!(vlan_id > ICE_MAX_VLAN_ID)) {
off = (__be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET); off = (__force __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
*off = cpu_to_be16(vlan_id); *off = cpu_to_be16(vlan_id);
} }
......
...@@ -58,19 +58,19 @@ struct ice_tx_buf { ...@@ -58,19 +58,19 @@ struct ice_tx_buf {
unsigned int bytecount; unsigned int bytecount;
unsigned short gso_segs; unsigned short gso_segs;
u32 tx_flags; u32 tx_flags;
DEFINE_DMA_UNMAP_ADDR(dma);
DEFINE_DMA_UNMAP_LEN(len); DEFINE_DMA_UNMAP_LEN(len);
DEFINE_DMA_UNMAP_ADDR(dma);
}; };
struct ice_tx_offload_params { struct ice_tx_offload_params {
u8 header_len; u64 cd_qw1;
struct ice_ring *tx_ring;
u32 td_cmd; u32 td_cmd;
u32 td_offset; u32 td_offset;
u32 td_l2tag1; u32 td_l2tag1;
u16 cd_l2tag2;
u32 cd_tunnel_params; u32 cd_tunnel_params;
u64 cd_qw1; u16 cd_l2tag2;
struct ice_ring *tx_ring; u8 header_len;
}; };
struct ice_rx_buf { struct ice_rx_buf {
...@@ -150,6 +150,7 @@ enum ice_rx_dtype { ...@@ -150,6 +150,7 @@ enum ice_rx_dtype {
/* descriptor ring, associated with a VSI */ /* descriptor ring, associated with a VSI */
struct ice_ring { struct ice_ring {
/* CL1 - 1st cacheline starts here */
struct ice_ring *next; /* pointer to next ring in q_vector */ struct ice_ring *next; /* pointer to next ring in q_vector */
void *desc; /* Descriptor ring memory */ void *desc; /* Descriptor ring memory */
struct device *dev; /* Used for DMA mapping */ struct device *dev; /* Used for DMA mapping */
...@@ -161,11 +162,11 @@ struct ice_ring { ...@@ -161,11 +162,11 @@ struct ice_ring {
struct ice_tx_buf *tx_buf; struct ice_tx_buf *tx_buf;
struct ice_rx_buf *rx_buf; struct ice_rx_buf *rx_buf;
}; };
/* CL2 - 2nd cacheline starts here */
u16 q_index; /* Queue number of ring */ u16 q_index; /* Queue number of ring */
u32 txq_teid; /* Added Tx queue TEID */ u16 q_handle; /* Queue handle per TC */
#ifdef CONFIG_DCB
u8 dcb_tc; /* Traffic class of ring */ u8 ring_active:1; /* is ring online or not */
#endif /* CONFIG_DCB */
u16 count; /* Number of descriptors */ u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */ u16 reg_idx; /* HW register index of the ring */
...@@ -173,8 +174,7 @@ struct ice_ring { ...@@ -173,8 +174,7 @@ struct ice_ring {
/* used in interrupt processing */ /* used in interrupt processing */
u16 next_to_use; u16 next_to_use;
u16 next_to_clean; u16 next_to_clean;
u16 next_to_alloc;
u8 ring_active; /* is ring online or not */
/* stats structs */ /* stats structs */
struct ice_q_stats stats; struct ice_q_stats stats;
...@@ -184,10 +184,17 @@ struct ice_ring { ...@@ -184,10 +184,17 @@ struct ice_ring {
struct ice_rxq_stats rx_stats; struct ice_rxq_stats rx_stats;
}; };
unsigned int size; /* length of descriptor ring in bytes */
dma_addr_t dma; /* physical address of ring */
struct rcu_head rcu; /* to avoid race on free */ struct rcu_head rcu; /* to avoid race on free */
u16 next_to_alloc; /* CLX - the below items are only accessed infrequently and should be
* in their own cache line if possible
*/
dma_addr_t dma; /* physical address of ring */
unsigned int size; /* length of descriptor ring in bytes */
u32 txq_teid; /* Added Tx queue TEID */
u16 rx_buf_len;
#ifdef CONFIG_DCB
u8 dcb_tc; /* Traffic class of ring */
#endif /* CONFIG_DCB */
} ____cacheline_internodealigned_in_smp; } ____cacheline_internodealigned_in_smp;
struct ice_ring_container { struct ice_ring_container {
......
...@@ -1134,7 +1134,7 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs) ...@@ -1134,7 +1134,7 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
GFP_KERNEL); GFP_KERNEL);
if (!vfs) { if (!vfs) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_unroll_sriov; goto err_pci_disable_sriov;
} }
pf->vf = vfs; pf->vf = vfs;
...@@ -1154,12 +1154,19 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs) ...@@ -1154,12 +1154,19 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
pf->num_alloc_vfs = num_alloc_vfs; pf->num_alloc_vfs = num_alloc_vfs;
/* VF resources get allocated during reset */ /* VF resources get allocated during reset */
if (!ice_reset_all_vfs(pf, true)) if (!ice_reset_all_vfs(pf, true)) {
ret = -EIO;
goto err_unroll_sriov; goto err_unroll_sriov;
}
goto err_unroll_intr; goto err_unroll_intr;
err_unroll_sriov: err_unroll_sriov:
pf->vf = NULL;
devm_kfree(&pf->pdev->dev, vfs);
vfs = NULL;
pf->num_alloc_vfs = 0;
err_pci_disable_sriov:
pci_disable_sriov(pf->pdev); pci_disable_sriov(pf->pdev);
err_unroll_intr: err_unroll_intr:
/* rearm interrupts here */ /* rearm interrupts here */
...@@ -1807,16 +1814,16 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) ...@@ -1807,16 +1814,16 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
{ {
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_irq_map_info *irqmap_info = struct virtchnl_irq_map_info *irqmap_info;
(struct virtchnl_irq_map_info *)msg;
u16 vsi_id, vsi_q_id, vector_id; u16 vsi_id, vsi_q_id, vector_id;
struct virtchnl_vector_map *map; struct virtchnl_vector_map *map;
struct ice_vsi *vsi = NULL;
struct ice_pf *pf = vf->pf; struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
unsigned long qmap; unsigned long qmap;
u16 num_q_vectors; u16 num_q_vectors;
int i; int i;
irqmap_info = (struct virtchnl_irq_map_info *)msg;
num_q_vectors = irqmap_info->num_vectors - ICE_NONQ_VECS_VF; num_q_vectors = irqmap_info->num_vectors - ICE_NONQ_VECS_VF;
vsi = pf->vsi[vf->lan_vsi_idx]; vsi = pf->vsi[vf->lan_vsi_idx];
...@@ -1903,9 +1910,8 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) ...@@ -1903,9 +1910,8 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
} }
vsi = pf->vsi[vf->lan_vsi_idx]; vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) { if (!vsi)
goto error_param; goto error_param;
}
if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF) { if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF) {
dev_err(&pf->pdev->dev, dev_err(&pf->pdev->dev,
......
...@@ -49,29 +49,34 @@ struct ice_vf { ...@@ -49,29 +49,34 @@ struct ice_vf {
struct ice_pf *pf; struct ice_pf *pf;
s16 vf_id; /* VF ID in the PF space */ s16 vf_id; /* VF ID in the PF space */
u32 driver_caps; /* reported by VF driver */ u16 lan_vsi_idx; /* index into PF struct */
int first_vector_idx; /* first vector index of this VF */ int first_vector_idx; /* first vector index of this VF */
struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */ struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */
struct virtchnl_version_info vf_ver; struct virtchnl_version_info vf_ver;
u32 driver_caps; /* reported by VF driver */
struct virtchnl_ether_addr dflt_lan_addr; struct virtchnl_ether_addr dflt_lan_addr;
u16 port_vlan_id; u16 port_vlan_id;
u8 pf_set_mac; /* VF MAC address set by VMM admin */ u8 pf_set_mac:1; /* VF MAC address set by VMM admin */
u8 trusted; u8 trusted:1;
u16 lan_vsi_idx; /* index into PF struct */ u8 spoofchk:1;
u8 link_forced:1;
u8 link_up:1; /* only valid if VF link is forced */
/* VSI indices - actual VSI pointers are maintained in the PF structure
* When assigned, these will be non-zero, because VSI 0 is always
* the main LAN VSI for the PF.
*/
u16 lan_vsi_num; /* ID as used by firmware */ u16 lan_vsi_num; /* ID as used by firmware */
unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
u64 num_mdd_events; /* number of MDD events detected */ u64 num_mdd_events; /* number of MDD events detected */
u64 num_inval_msgs; /* number of continuous invalid msgs */ u64 num_inval_msgs; /* number of continuous invalid msgs */
u64 num_valid_msgs; /* number of valid msgs detected */ u64 num_valid_msgs; /* number of valid msgs detected */
unsigned long vf_caps; /* VF's adv. capabilities */ unsigned long vf_caps; /* VF's adv. capabilities */
DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */ u8 num_req_qs; /* num of queue pairs requested by VF */
unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
u8 link_forced;
u8 link_up; /* only valid if VF link is forced */
u8 spoofchk;
u16 num_mac; u16 num_mac;
u16 num_vlan; u16 num_vlan;
u16 num_vf_qs; /* num of queue configured per VF */ u16 num_vf_qs; /* num of queue configured per VF */
u8 num_req_qs; /* num of queue pairs requested by VF */
}; };
#ifdef CONFIG_PCI_IOV #ifdef CONFIG_PCI_IOV
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment