Commit 9dde6da5 authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
100GbE Intel Wired LAN Driver Updates 2019-01-15

This series contains updates to the ice driver only.

Bruce fixes an unused variable build warning, which was introduced with
the commit 2fd527b7 ("net: ndo_bridge_setlink: Add extack").  Added
ethtool support for get_eeprom and get_eeprom_len operations.  Added
support for bringing down the PHY link optional when the interface is
administratively downed.

Anirudh refactors the transmit scheduler functions, which results in
reduced code duplication and adds a helper function, which all the
scheduler functions call instead.  Added an LED blinking handler to
ethtool.  Reworked the queue management code to allow for reuse in
future XDP feature support.  Updates the driver to be able to preserve
the aggregator list after reset by moving it out of port_info and into
ice_hw.  Added the ability to offload SCTP checksum calculation to the
hardware.  Added support for new PHY types, which support higher link
speeds.

Md Fahad makes sure that RSS lookup table and hash key get configured
during the rebuild path after a reset.

Brett updates the driver to set the physical link state according to the
netdev state (up/down).  Added support for adaptive/dynamic interrupt
moderation in the ice driver, along with the ethtool operations needed.

Tony adds software timestamping support by using
ethtool_op_get_ts_info().
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 7939f8be d671e3e0
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <linux/bitmap.h> #include <linux/bitmap.h>
#include <linux/log2.h> #include <linux/log2.h>
#include <linux/ip.h> #include <linux/ip.h>
#include <linux/sctp.h>
#include <linux/ipv6.h> #include <linux/ipv6.h>
#include <linux/if_bridge.h> #include <linux/if_bridge.h>
#include <linux/avf/virtchnl.h> #include <linux/avf/virtchnl.h>
...@@ -110,6 +111,9 @@ extern const char ice_drv_ver[]; ...@@ -110,6 +111,9 @@ extern const char ice_drv_ver[];
#define ice_for_each_alloc_rxq(vsi, i) \ #define ice_for_each_alloc_rxq(vsi, i) \
for ((i) = 0; (i) < (vsi)->alloc_rxq; (i)++) for ((i) = 0; (i) < (vsi)->alloc_rxq; (i)++)
#define ice_for_each_q_vector(vsi, i) \
for ((i) = 0; (i) < (vsi)->num_q_vectors; (i)++)
struct ice_tc_info { struct ice_tc_info {
u16 qoffset; u16 qoffset;
u16 qcount_tx; u16 qcount_tx;
...@@ -129,6 +133,17 @@ struct ice_res_tracker { ...@@ -129,6 +133,17 @@ struct ice_res_tracker {
u16 list[1]; u16 list[1];
}; };
struct ice_qs_cfg {
struct mutex *qs_mutex; /* will be assgined to &pf->avail_q_mutex */
unsigned long *pf_map;
unsigned long pf_map_size;
unsigned int q_count;
unsigned int scatter_count;
u16 *vsi_map;
u16 vsi_map_offset;
u8 mapping_mode;
};
struct ice_sw { struct ice_sw {
struct ice_pf *pf; struct ice_pf *pf;
u16 sw_id; /* switch ID for this switch */ u16 sw_id; /* switch ID for this switch */
...@@ -270,6 +285,7 @@ enum ice_pf_flags { ...@@ -270,6 +285,7 @@ enum ice_pf_flags {
ICE_FLAG_RSS_ENA, ICE_FLAG_RSS_ENA,
ICE_FLAG_SRIOV_ENA, ICE_FLAG_SRIOV_ENA,
ICE_FLAG_SRIOV_CAPABLE, ICE_FLAG_SRIOV_CAPABLE,
ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA,
ICE_PF_FLAGS_NBITS /* must be last */ ICE_PF_FLAGS_NBITS /* must be last */
}; };
......
...@@ -657,8 +657,13 @@ struct ice_aqc_get_topo { ...@@ -657,8 +657,13 @@ struct ice_aqc_get_topo {
/* Update TSE (indirect 0x0403) /* Update TSE (indirect 0x0403)
* Get TSE (indirect 0x0404) * Get TSE (indirect 0x0404)
* Add TSE (indirect 0x0401)
* Delete TSE (indirect 0x040F)
* Move TSE (indirect 0x0408)
* Suspend Nodes (indirect 0x0409)
* Resume Nodes (indirect 0x040A)
*/ */
struct ice_aqc_get_cfg_elem { struct ice_aqc_sched_elem_cmd {
__le16 num_elem_req; /* Used by commands */ __le16 num_elem_req; /* Used by commands */
__le16 num_elem_resp; /* Used by responses */ __le16 num_elem_resp; /* Used by responses */
__le32 reserved; __le32 reserved;
...@@ -674,18 +679,6 @@ struct ice_aqc_suspend_resume_elem { ...@@ -674,18 +679,6 @@ struct ice_aqc_suspend_resume_elem {
__le32 teid[1]; __le32 teid[1];
}; };
/* Add TSE (indirect 0x0401)
* Delete TSE (indirect 0x040F)
* Move TSE (indirect 0x0408)
*/
struct ice_aqc_add_move_delete_elem {
__le16 num_grps_req;
__le16 num_grps_updated;
__le32 reserved;
__le32 addr_high;
__le32 addr_low;
};
struct ice_aqc_elem_info_bw { struct ice_aqc_elem_info_bw {
__le16 bw_profile_idx; __le16 bw_profile_idx;
__le16 bw_alloc; __le16 bw_alloc;
...@@ -854,11 +847,46 @@ struct ice_aqc_get_phy_caps { ...@@ -854,11 +847,46 @@ struct ice_aqc_get_phy_caps {
#define ICE_PHY_TYPE_LOW_40GBASE_KR4 BIT_ULL(33) #define ICE_PHY_TYPE_LOW_40GBASE_KR4 BIT_ULL(33)
#define ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC BIT_ULL(34) #define ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC BIT_ULL(34)
#define ICE_PHY_TYPE_LOW_40G_XLAUI BIT_ULL(35) #define ICE_PHY_TYPE_LOW_40G_XLAUI BIT_ULL(35)
#define ICE_PHY_TYPE_LOW_50GBASE_CR2 BIT_ULL(36)
#define ICE_PHY_TYPE_LOW_50GBASE_SR2 BIT_ULL(37)
#define ICE_PHY_TYPE_LOW_50GBASE_LR2 BIT_ULL(38)
#define ICE_PHY_TYPE_LOW_50GBASE_KR2 BIT_ULL(39)
#define ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC BIT_ULL(40)
#define ICE_PHY_TYPE_LOW_50G_LAUI2 BIT_ULL(41)
#define ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC BIT_ULL(42)
#define ICE_PHY_TYPE_LOW_50G_AUI2 BIT_ULL(43)
#define ICE_PHY_TYPE_LOW_50GBASE_CP BIT_ULL(44)
#define ICE_PHY_TYPE_LOW_50GBASE_SR BIT_ULL(45)
#define ICE_PHY_TYPE_LOW_50GBASE_FR BIT_ULL(46)
#define ICE_PHY_TYPE_LOW_50GBASE_LR BIT_ULL(47)
#define ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4 BIT_ULL(48)
#define ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC BIT_ULL(49)
#define ICE_PHY_TYPE_LOW_50G_AUI1 BIT_ULL(50)
#define ICE_PHY_TYPE_LOW_100GBASE_CR4 BIT_ULL(51)
#define ICE_PHY_TYPE_LOW_100GBASE_SR4 BIT_ULL(52)
#define ICE_PHY_TYPE_LOW_100GBASE_LR4 BIT_ULL(53)
#define ICE_PHY_TYPE_LOW_100GBASE_KR4 BIT_ULL(54)
#define ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC BIT_ULL(55)
#define ICE_PHY_TYPE_LOW_100G_CAUI4 BIT_ULL(56)
#define ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC BIT_ULL(57)
#define ICE_PHY_TYPE_LOW_100G_AUI4 BIT_ULL(58)
#define ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 BIT_ULL(59)
#define ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 BIT_ULL(60)
#define ICE_PHY_TYPE_LOW_100GBASE_CP2 BIT_ULL(61)
#define ICE_PHY_TYPE_LOW_100GBASE_SR2 BIT_ULL(62)
#define ICE_PHY_TYPE_LOW_100GBASE_DR BIT_ULL(63)
#define ICE_PHY_TYPE_LOW_MAX_INDEX 63 #define ICE_PHY_TYPE_LOW_MAX_INDEX 63
/* The second set of defines is for phy_type_high. */
#define ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4 BIT_ULL(0)
#define ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC BIT_ULL(1)
#define ICE_PHY_TYPE_HIGH_100G_CAUI2 BIT_ULL(2)
#define ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC BIT_ULL(3)
#define ICE_PHY_TYPE_HIGH_100G_AUI2 BIT_ULL(4)
#define ICE_PHY_TYPE_HIGH_MAX_INDEX 19
struct ice_aqc_get_phy_caps_data { struct ice_aqc_get_phy_caps_data {
__le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */ __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
__le64 reserved; __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */
u8 caps; u8 caps;
#define ICE_AQC_PHY_EN_TX_LINK_PAUSE BIT(0) #define ICE_AQC_PHY_EN_TX_LINK_PAUSE BIT(0)
#define ICE_AQC_PHY_EN_RX_LINK_PAUSE BIT(1) #define ICE_AQC_PHY_EN_RX_LINK_PAUSE BIT(1)
...@@ -923,7 +951,7 @@ struct ice_aqc_set_phy_cfg { ...@@ -923,7 +951,7 @@ struct ice_aqc_set_phy_cfg {
/* Set PHY config command data structure */ /* Set PHY config command data structure */
struct ice_aqc_set_phy_cfg_data { struct ice_aqc_set_phy_cfg_data {
__le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */ __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
__le64 rsvd0; __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */
u8 caps; u8 caps;
#define ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY BIT(0) #define ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY BIT(0)
#define ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY BIT(1) #define ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY BIT(1)
...@@ -1032,10 +1060,12 @@ struct ice_aqc_get_link_status_data { ...@@ -1032,10 +1060,12 @@ struct ice_aqc_get_link_status_data {
#define ICE_AQ_LINK_SPEED_20GB BIT(6) #define ICE_AQ_LINK_SPEED_20GB BIT(6)
#define ICE_AQ_LINK_SPEED_25GB BIT(7) #define ICE_AQ_LINK_SPEED_25GB BIT(7)
#define ICE_AQ_LINK_SPEED_40GB BIT(8) #define ICE_AQ_LINK_SPEED_40GB BIT(8)
#define ICE_AQ_LINK_SPEED_50GB BIT(9)
#define ICE_AQ_LINK_SPEED_100GB BIT(10)
#define ICE_AQ_LINK_SPEED_UNKNOWN BIT(15) #define ICE_AQ_LINK_SPEED_UNKNOWN BIT(15)
__le32 reserved3; /* Aligns next field to 8-byte boundary */ __le32 reserved3; /* Aligns next field to 8-byte boundary */
__le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */ __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
__le64 reserved4; __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */
}; };
/* Set event mask command (direct 0x0613) */ /* Set event mask command (direct 0x0613) */
...@@ -1055,6 +1085,16 @@ struct ice_aqc_set_event_mask { ...@@ -1055,6 +1085,16 @@ struct ice_aqc_set_event_mask {
u8 reserved1[6]; u8 reserved1[6];
}; };
/* Set Port Identification LED (direct, 0x06E9) */
struct ice_aqc_set_port_id_led {
u8 lport_num;
u8 lport_num_valid;
u8 ident_mode;
#define ICE_AQC_PORT_IDENT_LED_BLINK BIT(0)
#define ICE_AQC_PORT_IDENT_LED_ORIG 0
u8 rsvd[13];
};
/* NVM Read command (indirect 0x0701) /* NVM Read command (indirect 0x0701)
* NVM Erase commands (direct 0x0702) * NVM Erase commands (direct 0x0702)
* NVM Update commands (indirect 0x0703) * NVM Update commands (indirect 0x0703)
...@@ -1341,12 +1381,12 @@ struct ice_aq_desc { ...@@ -1341,12 +1381,12 @@ struct ice_aq_desc {
struct ice_aqc_get_phy_caps get_phy; struct ice_aqc_get_phy_caps get_phy;
struct ice_aqc_set_phy_cfg set_phy; struct ice_aqc_set_phy_cfg set_phy;
struct ice_aqc_restart_an restart_an; struct ice_aqc_restart_an restart_an;
struct ice_aqc_set_port_id_led set_port_id_led;
struct ice_aqc_get_sw_cfg get_sw_conf; struct ice_aqc_get_sw_cfg get_sw_conf;
struct ice_aqc_sw_rules sw_rules; struct ice_aqc_sw_rules sw_rules;
struct ice_aqc_get_topo get_topo; struct ice_aqc_get_topo get_topo;
struct ice_aqc_get_cfg_elem get_update_elem; struct ice_aqc_sched_elem_cmd sched_elem_cmd;
struct ice_aqc_query_txsched_res query_sched_res; struct ice_aqc_query_txsched_res query_sched_res;
struct ice_aqc_add_move_delete_elem add_move_delete_elem;
struct ice_aqc_nvm nvm; struct ice_aqc_nvm nvm;
struct ice_aqc_pf_vf_msg virt; struct ice_aqc_pf_vf_msg virt;
struct ice_aqc_get_set_rss_lut get_set_rss_lut; struct ice_aqc_get_set_rss_lut get_set_rss_lut;
...@@ -1442,6 +1482,7 @@ enum ice_adminq_opc { ...@@ -1442,6 +1482,7 @@ enum ice_adminq_opc {
ice_aqc_opc_restart_an = 0x0605, ice_aqc_opc_restart_an = 0x0605,
ice_aqc_opc_get_link_status = 0x0607, ice_aqc_opc_get_link_status = 0x0607,
ice_aqc_opc_set_event_mask = 0x0613, ice_aqc_opc_set_event_mask = 0x0613,
ice_aqc_opc_set_port_id_led = 0x06E9,
/* NVM commands */ /* NVM commands */
ice_aqc_opc_nvm_read = 0x0701, ice_aqc_opc_nvm_read = 0x0701,
......
...@@ -165,8 +165,10 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, ...@@ -165,8 +165,10 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
cmd->param0 |= cpu_to_le16(report_mode); cmd->param0 |= cpu_to_le16(report_mode);
status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd); status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd);
if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP) if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP) {
pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low); pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
}
return status; return status;
} }
...@@ -183,6 +185,9 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) ...@@ -183,6 +185,9 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
return ICE_MEDIA_UNKNOWN; return ICE_MEDIA_UNKNOWN;
hw_link_info = &pi->phy.link_info; hw_link_info = &pi->phy.link_info;
if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
/* If more than one media type is selected, report unknown */
return ICE_MEDIA_UNKNOWN;
if (hw_link_info->phy_type_low) { if (hw_link_info->phy_type_low) {
switch (hw_link_info->phy_type_low) { switch (hw_link_info->phy_type_low) {
...@@ -196,6 +201,15 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) ...@@ -196,6 +201,15 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
case ICE_PHY_TYPE_LOW_25G_AUI_C2C: case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
case ICE_PHY_TYPE_LOW_40GBASE_SR4: case ICE_PHY_TYPE_LOW_40GBASE_SR4:
case ICE_PHY_TYPE_LOW_40GBASE_LR4: case ICE_PHY_TYPE_LOW_40GBASE_LR4:
case ICE_PHY_TYPE_LOW_50GBASE_SR2:
case ICE_PHY_TYPE_LOW_50GBASE_LR2:
case ICE_PHY_TYPE_LOW_50GBASE_SR:
case ICE_PHY_TYPE_LOW_50GBASE_FR:
case ICE_PHY_TYPE_LOW_50GBASE_LR:
case ICE_PHY_TYPE_LOW_100GBASE_SR4:
case ICE_PHY_TYPE_LOW_100GBASE_LR4:
case ICE_PHY_TYPE_LOW_100GBASE_SR2:
case ICE_PHY_TYPE_LOW_100GBASE_DR:
return ICE_MEDIA_FIBER; return ICE_MEDIA_FIBER;
case ICE_PHY_TYPE_LOW_100BASE_TX: case ICE_PHY_TYPE_LOW_100BASE_TX:
case ICE_PHY_TYPE_LOW_1000BASE_T: case ICE_PHY_TYPE_LOW_1000BASE_T:
...@@ -209,6 +223,11 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) ...@@ -209,6 +223,11 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
case ICE_PHY_TYPE_LOW_25GBASE_CR_S: case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
case ICE_PHY_TYPE_LOW_25GBASE_CR1: case ICE_PHY_TYPE_LOW_25GBASE_CR1:
case ICE_PHY_TYPE_LOW_40GBASE_CR4: case ICE_PHY_TYPE_LOW_40GBASE_CR4:
case ICE_PHY_TYPE_LOW_50GBASE_CR2:
case ICE_PHY_TYPE_LOW_50GBASE_CP:
case ICE_PHY_TYPE_LOW_100GBASE_CR4:
case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
case ICE_PHY_TYPE_LOW_100GBASE_CP2:
return ICE_MEDIA_DA; return ICE_MEDIA_DA;
case ICE_PHY_TYPE_LOW_1000BASE_KX: case ICE_PHY_TYPE_LOW_1000BASE_KX:
case ICE_PHY_TYPE_LOW_2500BASE_KX: case ICE_PHY_TYPE_LOW_2500BASE_KX:
...@@ -219,10 +238,18 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) ...@@ -219,10 +238,18 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
case ICE_PHY_TYPE_LOW_25GBASE_KR1: case ICE_PHY_TYPE_LOW_25GBASE_KR1:
case ICE_PHY_TYPE_LOW_25GBASE_KR_S: case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
case ICE_PHY_TYPE_LOW_40GBASE_KR4: case ICE_PHY_TYPE_LOW_40GBASE_KR4:
case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
case ICE_PHY_TYPE_LOW_50GBASE_KR2:
case ICE_PHY_TYPE_LOW_100GBASE_KR4:
case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
return ICE_MEDIA_BACKPLANE;
}
} else {
switch (hw_link_info->phy_type_high) {
case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
return ICE_MEDIA_BACKPLANE; return ICE_MEDIA_BACKPLANE;
} }
} }
return ICE_MEDIA_UNKNOWN; return ICE_MEDIA_UNKNOWN;
} }
...@@ -274,6 +301,7 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, ...@@ -274,6 +301,7 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
/* update current link status information */ /* update current link status information */
hw_link_info->link_speed = le16_to_cpu(link_data.link_speed); hw_link_info->link_speed = le16_to_cpu(link_data.link_speed);
hw_link_info->phy_type_low = le64_to_cpu(link_data.phy_type_low); hw_link_info->phy_type_low = le64_to_cpu(link_data.phy_type_low);
hw_link_info->phy_type_high = le64_to_cpu(link_data.phy_type_high);
*hw_media_type = ice_get_media_type(pi); *hw_media_type = ice_get_media_type(pi);
hw_link_info->link_info = link_data.link_info; hw_link_info->link_info = link_data.link_info;
hw_link_info->an_info = link_data.an_info; hw_link_info->an_info = link_data.an_info;
...@@ -750,6 +778,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw) ...@@ -750,6 +778,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
status = ICE_ERR_CFG; status = ICE_ERR_CFG;
goto err_unroll_sched; goto err_unroll_sched;
} }
INIT_LIST_HEAD(&hw->agg_list);
status = ice_init_fltr_mgmt_struct(hw); status = ice_init_fltr_mgmt_struct(hw);
if (status) if (status)
...@@ -800,6 +829,7 @@ void ice_deinit_hw(struct ice_hw *hw) ...@@ -800,6 +829,7 @@ void ice_deinit_hw(struct ice_hw *hw)
ice_cleanup_fltr_mgmt_struct(hw); ice_cleanup_fltr_mgmt_struct(hw);
ice_sched_cleanup_all(hw); ice_sched_cleanup_all(hw);
ice_sched_clear_agg(hw);
if (hw->port_info) { if (hw->port_info) {
devm_kfree(ice_hw_to_dev(hw), hw->port_info); devm_kfree(ice_hw_to_dev(hw), hw->port_info);
...@@ -1655,7 +1685,7 @@ enum ice_status ice_get_caps(struct ice_hw *hw) ...@@ -1655,7 +1685,7 @@ enum ice_status ice_get_caps(struct ice_hw *hw)
* This function is used to write MAC address to the NVM (0x0108). * This function is used to write MAC address to the NVM (0x0108).
*/ */
enum ice_status enum ice_status
ice_aq_manage_mac_write(struct ice_hw *hw, u8 *mac_addr, u8 flags, ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd) struct ice_sq_cd *cd)
{ {
struct ice_aqc_manage_mac_write *cmd; struct ice_aqc_manage_mac_write *cmd;
...@@ -1667,8 +1697,8 @@ ice_aq_manage_mac_write(struct ice_hw *hw, u8 *mac_addr, u8 flags, ...@@ -1667,8 +1697,8 @@ ice_aq_manage_mac_write(struct ice_hw *hw, u8 *mac_addr, u8 flags,
cmd->flags = flags; cmd->flags = flags;
/* Prep values for flags, sah, sal */ /* Prep values for flags, sah, sal */
cmd->sah = htons(*((u16 *)mac_addr)); cmd->sah = htons(*((const u16 *)mac_addr));
cmd->sal = htonl(*((u32 *)(mac_addr + 2))); cmd->sal = htonl(*((const u32 *)(mac_addr + 2)));
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
} }
...@@ -1705,16 +1735,20 @@ void ice_clear_pxe_mode(struct ice_hw *hw) ...@@ -1705,16 +1735,20 @@ void ice_clear_pxe_mode(struct ice_hw *hw)
/** /**
* ice_get_link_speed_based_on_phy_type - returns link speed * ice_get_link_speed_based_on_phy_type - returns link speed
* @phy_type_low: lower part of phy_type * @phy_type_low: lower part of phy_type
* @phy_type_high: higher part of phy_type
* *
* This helper function will convert a phy_type_low to its corresponding link * This helper function will convert an entry in phy type structure
* [phy_type_low, phy_type_high] to its corresponding link speed.
* Note: In the structure of [phy_type_low, phy_type_high], there should
* be one bit set, as this function will convert one phy type to its
* speed. * speed.
* Note: In the structure of phy_type_low, there should be one bit set, as
* this function will convert one phy type to its speed.
* If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
* If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
*/ */
static u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low) static u16
ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
{ {
u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
switch (phy_type_low) { switch (phy_type_low) {
...@@ -1768,41 +1802,110 @@ static u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low) ...@@ -1768,41 +1802,110 @@ static u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low)
case ICE_PHY_TYPE_LOW_40G_XLAUI: case ICE_PHY_TYPE_LOW_40G_XLAUI:
speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB; speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
break; break;
case ICE_PHY_TYPE_LOW_50GBASE_CR2:
case ICE_PHY_TYPE_LOW_50GBASE_SR2:
case ICE_PHY_TYPE_LOW_50GBASE_LR2:
case ICE_PHY_TYPE_LOW_50GBASE_KR2:
case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
case ICE_PHY_TYPE_LOW_50G_LAUI2:
case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
case ICE_PHY_TYPE_LOW_50G_AUI2:
case ICE_PHY_TYPE_LOW_50GBASE_CP:
case ICE_PHY_TYPE_LOW_50GBASE_SR:
case ICE_PHY_TYPE_LOW_50GBASE_FR:
case ICE_PHY_TYPE_LOW_50GBASE_LR:
case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
case ICE_PHY_TYPE_LOW_50G_AUI1:
speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
break;
case ICE_PHY_TYPE_LOW_100GBASE_CR4:
case ICE_PHY_TYPE_LOW_100GBASE_SR4:
case ICE_PHY_TYPE_LOW_100GBASE_LR4:
case ICE_PHY_TYPE_LOW_100GBASE_KR4:
case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
case ICE_PHY_TYPE_LOW_100G_CAUI4:
case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
case ICE_PHY_TYPE_LOW_100G_AUI4:
case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
case ICE_PHY_TYPE_LOW_100GBASE_CP2:
case ICE_PHY_TYPE_LOW_100GBASE_SR2:
case ICE_PHY_TYPE_LOW_100GBASE_DR:
speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
break;
default: default:
speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
break; break;
} }
return speed_phy_type_low; switch (phy_type_high) {
case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
case ICE_PHY_TYPE_HIGH_100G_CAUI2:
case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
case ICE_PHY_TYPE_HIGH_100G_AUI2:
speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
break;
default:
speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
break;
}
if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
return ICE_AQ_LINK_SPEED_UNKNOWN;
else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
return ICE_AQ_LINK_SPEED_UNKNOWN;
else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
return speed_phy_type_low;
else
return speed_phy_type_high;
} }
/** /**
* ice_update_phy_type * ice_update_phy_type
* @phy_type_low: pointer to the lower part of phy_type * @phy_type_low: pointer to the lower part of phy_type
* @phy_type_high: pointer to the higher part of phy_type
* @link_speeds_bitmap: targeted link speeds bitmap * @link_speeds_bitmap: targeted link speeds bitmap
* *
* Note: For the link_speeds_bitmap structure, you can check it at * Note: For the link_speeds_bitmap structure, you can check it at
* [ice_aqc_get_link_status->link_speed]. Caller can pass in * [ice_aqc_get_link_status->link_speed]. Caller can pass in
* link_speeds_bitmap include multiple speeds. * link_speeds_bitmap include multiple speeds.
* *
* The value of phy_type_low will present a certain link speed. This helper * Each entry in this [phy_type_low, phy_type_high] structure will
* function will turn on bits in the phy_type_low based on the value of * present a certain link speed. This helper function will turn on bits
* in [phy_type_low, phy_type_high] structure based on the value of
* link_speeds_bitmap input parameter. * link_speeds_bitmap input parameter.
*/ */
void ice_update_phy_type(u64 *phy_type_low, u16 link_speeds_bitmap) void
ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
u16 link_speeds_bitmap)
{ {
u16 speed = ICE_AQ_LINK_SPEED_UNKNOWN; u16 speed = ICE_AQ_LINK_SPEED_UNKNOWN;
u64 pt_high;
u64 pt_low; u64 pt_low;
int index; int index;
/* We first check with low part of phy_type */ /* We first check with low part of phy_type */
for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) { for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
pt_low = BIT_ULL(index); pt_low = BIT_ULL(index);
speed = ice_get_link_speed_based_on_phy_type(pt_low); speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
if (link_speeds_bitmap & speed) if (link_speeds_bitmap & speed)
*phy_type_low |= BIT_ULL(index); *phy_type_low |= BIT_ULL(index);
} }
/* We then check with high part of phy_type */
for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
pt_high = BIT_ULL(index);
speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
if (link_speeds_bitmap & speed)
*phy_type_high |= BIT_ULL(index);
}
} }
/** /**
...@@ -1934,6 +2037,7 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) ...@@ -1934,6 +2037,7 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
if (ena_auto_link_update) if (ena_auto_link_update)
cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
/* Copy over all the old settings */ /* Copy over all the old settings */
cfg.phy_type_high = pcaps->phy_type_high;
cfg.phy_type_low = pcaps->phy_type_low; cfg.phy_type_low = pcaps->phy_type_low;
cfg.low_power_ctrl = pcaps->low_power_ctrl; cfg.low_power_ctrl = pcaps->low_power_ctrl;
cfg.eee_cap = pcaps->eee_cap; cfg.eee_cap = pcaps->eee_cap;
...@@ -2031,6 +2135,34 @@ ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, ...@@ -2031,6 +2135,34 @@ ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
} }
/**
* ice_aq_set_port_id_led
* @pi: pointer to the port information
* @is_orig_mode: is this LED set to original mode (by the net-list)
* @cd: pointer to command details structure or NULL
*
* Set LED value for the given port (0x06e9)
*/
enum ice_status
ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
struct ice_sq_cd *cd)
{
struct ice_aqc_set_port_id_led *cmd;
struct ice_hw *hw = pi->hw;
struct ice_aq_desc desc;
cmd = &desc.params.set_port_id_led;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
if (is_orig_mode)
cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
else
cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
/** /**
* __ice_aq_get_set_rss_lut * __ice_aq_get_set_rss_lut
* @hw: pointer to the hardware structure * @hw: pointer to the hardware structure
......
...@@ -28,6 +28,8 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, ...@@ -28,6 +28,8 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
enum ice_aq_res_access_type access, u32 timeout); enum ice_aq_res_access_type access, u32 timeout);
void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res); void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res);
enum ice_status ice_init_nvm(struct ice_hw *hw); enum ice_status ice_init_nvm(struct ice_hw *hw);
enum ice_status ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words,
u16 *data);
enum ice_status enum ice_status
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_aq_desc *desc, void *buf, u16 buf_size,
...@@ -70,9 +72,10 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, ...@@ -70,9 +72,10 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps_data *caps, struct ice_aqc_get_phy_caps_data *caps,
struct ice_sq_cd *cd); struct ice_sq_cd *cd);
void void
ice_update_phy_type(u64 *phy_type_low, u16 link_speeds_bitmap); ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
u16 link_speeds_bitmap);
enum ice_status enum ice_status
ice_aq_manage_mac_write(struct ice_hw *hw, u8 *mac_addr, u8 flags, ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd); struct ice_sq_cd *cd);
enum ice_status ice_clear_pf_cfg(struct ice_hw *hw); enum ice_status ice_clear_pf_cfg(struct ice_hw *hw);
enum ice_status enum ice_status
...@@ -86,6 +89,10 @@ enum ice_status ...@@ -86,6 +89,10 @@ enum ice_status
ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
struct ice_sq_cd *cd); struct ice_sq_cd *cd);
enum ice_status enum ice_status
ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
struct ice_sq_cd *cd);
enum ice_status
ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids, ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num, u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cmd_details); struct ice_sq_cd *cmd_details);
......
...@@ -114,6 +114,22 @@ static const u32 ice_regs_dump_list[] = { ...@@ -114,6 +114,22 @@ static const u32 ice_regs_dump_list[] = {
QRX_ITR(0), QRX_ITR(0),
}; };
struct ice_priv_flag {
char name[ETH_GSTRING_LEN];
u32 bitno; /* bit position in pf->flags */
};
#define ICE_PRIV_FLAG(_name, _bitno) { \
.name = _name, \
.bitno = _bitno, \
}
static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA),
};
#define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags)
/** /**
* ice_nvm_version_str - format the NVM version strings * ice_nvm_version_str - format the NVM version strings
* @hw: ptr to the hardware info * @hw: ptr to the hardware info
...@@ -152,6 +168,7 @@ ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) ...@@ -152,6 +168,7 @@ ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
sizeof(drvinfo->fw_version)); sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(pf->pdev), strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
sizeof(drvinfo->bus_info)); sizeof(drvinfo->bus_info));
drvinfo->n_priv_flags = ICE_PRIV_FLAG_ARRAY_SIZE;
} }
static int ice_get_regs_len(struct net_device __always_unused *netdev) static int ice_get_regs_len(struct net_device __always_unused *netdev)
...@@ -203,6 +220,55 @@ static void ice_set_msglevel(struct net_device *netdev, u32 data) ...@@ -203,6 +220,55 @@ static void ice_set_msglevel(struct net_device *netdev, u32 data)
#endif /* !CONFIG_DYNAMIC_DEBUG */ #endif /* !CONFIG_DYNAMIC_DEBUG */
} }
static int ice_get_eeprom_len(struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_pf *pf = np->vsi->back;
return (int)(pf->hw.nvm.sr_words * sizeof(u16));
}
static int
ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
u8 *bytes)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
u16 first_word, last_word, nwords;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
enum ice_status status;
struct device *dev;
int ret = 0;
u16 *buf;
dev = &pf->pdev->dev;
eeprom->magic = hw->vendor_id | (hw->device_id << 16);
first_word = eeprom->offset >> 1;
last_word = (eeprom->offset + eeprom->len - 1) >> 1;
nwords = last_word - first_word + 1;
buf = devm_kcalloc(dev, nwords, sizeof(u16), GFP_KERNEL);
if (!buf)
return -ENOMEM;
status = ice_read_sr_buf(hw, first_word, &nwords, buf);
if (status) {
dev_err(dev, "ice_read_sr_buf failed, err %d aq_err %d\n",
status, hw->adminq.sq_last_status);
eeprom->len = sizeof(u16) * nwords;
ret = -EIO;
goto out;
}
memcpy(bytes, (u8 *)buf + (eeprom->offset & 1), eeprom->len);
out:
devm_kfree(dev, buf);
return ret;
}
static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data) static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{ {
struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_netdev_priv *np = netdev_priv(netdev);
...@@ -243,12 +309,100 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data) ...@@ -243,12 +309,100 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
} }
break;
case ETH_SS_PRIV_FLAGS:
for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) {
snprintf(p, ETH_GSTRING_LEN, "%s",
ice_gstrings_priv_flags[i].name);
p += ETH_GSTRING_LEN;
}
break; break;
default: default:
break; break;
} }
} }
static int
ice_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
bool led_active;
switch (state) {
case ETHTOOL_ID_ACTIVE:
led_active = true;
break;
case ETHTOOL_ID_INACTIVE:
led_active = false;
break;
default:
return -EINVAL;
}
if (ice_aq_set_port_id_led(np->vsi->port_info, !led_active, NULL))
return -EIO;
return 0;
}
/**
* ice_get_priv_flags - report device private flags
* @netdev: network interface device structure
*
* The get string set count and the string set should be matched for each
* flag returned. Add new strings for each flag to the ice_gstrings_priv_flags
* array.
*
* Returns a u32 bitmap of flags.
*/
static u32 ice_get_priv_flags(struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
u32 i, ret_flags = 0;
for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) {
const struct ice_priv_flag *priv_flag;
priv_flag = &ice_gstrings_priv_flags[i];
if (test_bit(priv_flag->bitno, pf->flags))
ret_flags |= BIT(i);
}
return ret_flags;
}
/**
* ice_set_priv_flags - set private flags
* @netdev: network interface device structure
* @flags: bit flags to be set
*/
static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
u32 i;
if (flags > BIT(ICE_PRIV_FLAG_ARRAY_SIZE))
return -EINVAL;
for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) {
const struct ice_priv_flag *priv_flag;
priv_flag = &ice_gstrings_priv_flags[i];
if (flags & BIT(i))
set_bit(priv_flag->bitno, pf->flags);
else
clear_bit(priv_flag->bitno, pf->flags);
}
return 0;
}
static int ice_get_sset_count(struct net_device *netdev, int sset) static int ice_get_sset_count(struct net_device *netdev, int sset)
{ {
switch (sset) { switch (sset) {
...@@ -272,6 +426,8 @@ static int ice_get_sset_count(struct net_device *netdev, int sset) ...@@ -272,6 +426,8 @@ static int ice_get_sset_count(struct net_device *netdev, int sset)
* not safe. * not safe.
*/ */
return ICE_ALL_STATS_LEN(netdev); return ICE_ALL_STATS_LEN(netdev);
case ETH_SS_PRIV_FLAGS:
return ICE_PRIV_FLAG_ARRAY_SIZE;
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -337,16 +493,20 @@ ice_get_ethtool_stats(struct net_device *netdev, ...@@ -337,16 +493,20 @@ ice_get_ethtool_stats(struct net_device *netdev,
* @netdev: network interface device structure * @netdev: network interface device structure
* @ks: ethtool link ksettings struct to fill out * @ks: ethtool link ksettings struct to fill out
*/ */
static void ice_phy_type_to_ethtool(struct net_device *netdev, static void
struct ethtool_link_ksettings *ks) ice_phy_type_to_ethtool(struct net_device *netdev,
struct ethtool_link_ksettings *ks)
{ {
struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_link_status *hw_link_info; struct ice_link_status *hw_link_info;
bool need_add_adv_mode = false;
struct ice_vsi *vsi = np->vsi; struct ice_vsi *vsi = np->vsi;
u64 phy_types_high;
u64 phy_types_low; u64 phy_types_low;
hw_link_info = &vsi->port_info->phy.link_info; hw_link_info = &vsi->port_info->phy.link_info;
phy_types_low = vsi->port_info->phy.phy_type_low; phy_types_low = vsi->port_info->phy.phy_type_low;
phy_types_high = vsi->port_info->phy.phy_type_high;
ethtool_link_ksettings_zero_link_mode(ks, supported); ethtool_link_ksettings_zero_link_mode(ks, supported);
ethtool_link_ksettings_zero_link_mode(ks, advertising); ethtool_link_ksettings_zero_link_mode(ks, advertising);
...@@ -495,6 +655,95 @@ static void ice_phy_type_to_ethtool(struct net_device *netdev, ...@@ -495,6 +655,95 @@ static void ice_phy_type_to_ethtool(struct net_device *netdev,
ethtool_link_ksettings_add_link_mode(ks, advertising, ethtool_link_ksettings_add_link_mode(ks, advertising,
40000baseLR4_Full); 40000baseLR4_Full);
} }
if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CR2 ||
phy_types_low & ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC ||
phy_types_low & ICE_PHY_TYPE_LOW_50G_LAUI2 ||
phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC ||
phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI2 ||
phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CP ||
phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_SR ||
phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC ||
phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI1) {
ethtool_link_ksettings_add_link_mode(ks, supported,
50000baseCR2_Full);
if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
50000baseCR2_Full);
}
if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR2 ||
phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4) {
ethtool_link_ksettings_add_link_mode(ks, supported,
50000baseKR2_Full);
if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
50000baseKR2_Full);
}
if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_SR2 ||
phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_LR2 ||
phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_FR ||
phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_LR) {
ethtool_link_ksettings_add_link_mode(ks, supported,
50000baseSR2_Full);
if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
50000baseSR2_Full);
}
if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CR4 ||
phy_types_low & ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC ||
phy_types_low & ICE_PHY_TYPE_LOW_100G_CAUI4 ||
phy_types_low & ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC ||
phy_types_low & ICE_PHY_TYPE_LOW_100G_AUI4 ||
phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 ||
phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CP2 ||
phy_types_high & ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC ||
phy_types_high & ICE_PHY_TYPE_HIGH_100G_CAUI2 ||
phy_types_high & ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC ||
phy_types_high & ICE_PHY_TYPE_HIGH_100G_AUI2) {
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseCR4_Full);
if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB)
need_add_adv_mode = true;
}
if (need_add_adv_mode) {
need_add_adv_mode = false;
ethtool_link_ksettings_add_link_mode(ks, advertising,
100000baseCR4_Full);
}
if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR4 ||
phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR2) {
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseSR4_Full);
if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB)
need_add_adv_mode = true;
}
if (need_add_adv_mode) {
need_add_adv_mode = false;
ethtool_link_ksettings_add_link_mode(ks, advertising,
100000baseSR4_Full);
}
if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_LR4 ||
phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_DR) {
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseLR4_ER4_Full);
if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB)
need_add_adv_mode = true;
}
if (need_add_adv_mode) {
need_add_adv_mode = false;
ethtool_link_ksettings_add_link_mode(ks, advertising,
100000baseLR4_ER4_Full);
}
if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR4 ||
phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 ||
phy_types_high & ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4) {
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseKR4_Full);
if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB)
need_add_adv_mode = true;
}
if (need_add_adv_mode)
ethtool_link_ksettings_add_link_mode(ks, advertising,
100000baseKR4_Full);
/* Autoneg PHY types */ /* Autoneg PHY types */
if (phy_types_low & ICE_PHY_TYPE_LOW_100BASE_TX || if (phy_types_low & ICE_PHY_TYPE_LOW_100BASE_TX ||
...@@ -520,6 +769,24 @@ static void ice_phy_type_to_ethtool(struct net_device *netdev, ...@@ -520,6 +769,24 @@ static void ice_phy_type_to_ethtool(struct net_device *netdev,
ethtool_link_ksettings_add_link_mode(ks, advertising, ethtool_link_ksettings_add_link_mode(ks, advertising,
Autoneg); Autoneg);
} }
if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CR2 ||
phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR2 ||
phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CP ||
phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4) {
ethtool_link_ksettings_add_link_mode(ks, supported,
Autoneg);
ethtool_link_ksettings_add_link_mode(ks, advertising,
Autoneg);
}
if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CR4 ||
phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR4 ||
phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 ||
phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CP2) {
ethtool_link_ksettings_add_link_mode(ks, supported,
Autoneg);
ethtool_link_ksettings_add_link_mode(ks, advertising,
Autoneg);
}
} }
#define TEST_SET_BITS_TIMEOUT 50 #define TEST_SET_BITS_TIMEOUT 50
...@@ -531,13 +798,15 @@ static void ice_phy_type_to_ethtool(struct net_device *netdev, ...@@ -531,13 +798,15 @@ static void ice_phy_type_to_ethtool(struct net_device *netdev,
* @ks: ethtool ksettings to fill in * @ks: ethtool ksettings to fill in
* @netdev: network interface device structure * @netdev: network interface device structure
*/ */
static void ice_get_settings_link_up(struct ethtool_link_ksettings *ks, static void
struct net_device *netdev) ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
struct net_device *netdev)
{ {
struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_netdev_priv *np = netdev_priv(netdev);
struct ethtool_link_ksettings cap_ksettings; struct ethtool_link_ksettings cap_ksettings;
struct ice_link_status *link_info; struct ice_link_status *link_info;
struct ice_vsi *vsi = np->vsi; struct ice_vsi *vsi = np->vsi;
bool unrecog_phy_high = false;
bool unrecog_phy_low = false; bool unrecog_phy_low = false;
link_info = &vsi->port_info->phy.link_info; link_info = &vsi->port_info->phy.link_info;
...@@ -699,14 +968,116 @@ static void ice_get_settings_link_up(struct ethtool_link_ksettings *ks, ...@@ -699,14 +968,116 @@ static void ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
ethtool_link_ksettings_add_link_mode(ks, advertising, ethtool_link_ksettings_add_link_mode(ks, advertising,
40000baseKR4_Full); 40000baseKR4_Full);
break; break;
case ICE_PHY_TYPE_LOW_50GBASE_CR2:
case ICE_PHY_TYPE_LOW_50GBASE_CP:
ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
ethtool_link_ksettings_add_link_mode(ks, supported,
50000baseCR2_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
ethtool_link_ksettings_add_link_mode(ks, advertising,
50000baseCR2_Full);
break;
case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
case ICE_PHY_TYPE_LOW_50G_LAUI2:
case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
case ICE_PHY_TYPE_LOW_50G_AUI2:
case ICE_PHY_TYPE_LOW_50GBASE_SR:
case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
case ICE_PHY_TYPE_LOW_50G_AUI1:
ethtool_link_ksettings_add_link_mode(ks, supported,
50000baseCR2_Full);
break;
case ICE_PHY_TYPE_LOW_50GBASE_KR2:
case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
ethtool_link_ksettings_add_link_mode(ks, supported,
50000baseKR2_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
ethtool_link_ksettings_add_link_mode(ks, advertising,
50000baseKR2_Full);
break;
case ICE_PHY_TYPE_LOW_50GBASE_SR2:
case ICE_PHY_TYPE_LOW_50GBASE_LR2:
case ICE_PHY_TYPE_LOW_50GBASE_FR:
case ICE_PHY_TYPE_LOW_50GBASE_LR:
ethtool_link_ksettings_add_link_mode(ks, supported,
50000baseSR2_Full);
break;
case ICE_PHY_TYPE_LOW_100GBASE_CR4:
ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseCR4_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
ethtool_link_ksettings_add_link_mode(ks, advertising,
100000baseCR4_Full);
break;
case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
case ICE_PHY_TYPE_LOW_100G_CAUI4:
case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
case ICE_PHY_TYPE_LOW_100G_AUI4:
case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseCR4_Full);
break;
case ICE_PHY_TYPE_LOW_100GBASE_CP2:
ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseCR4_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
ethtool_link_ksettings_add_link_mode(ks, advertising,
100000baseCR4_Full);
break;
case ICE_PHY_TYPE_LOW_100GBASE_SR4:
case ICE_PHY_TYPE_LOW_100GBASE_SR2:
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseSR4_Full);
break;
case ICE_PHY_TYPE_LOW_100GBASE_LR4:
case ICE_PHY_TYPE_LOW_100GBASE_DR:
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseLR4_ER4_Full);
break;
case ICE_PHY_TYPE_LOW_100GBASE_KR4:
case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseKR4_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
ethtool_link_ksettings_add_link_mode(ks, advertising,
100000baseKR4_Full);
break;
default: default:
unrecog_phy_low = true; unrecog_phy_low = true;
} }
if (unrecog_phy_low) { switch (link_info->phy_type_high) {
case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseKR4_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
ethtool_link_ksettings_add_link_mode(ks, advertising,
100000baseKR4_Full);
break;
case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
case ICE_PHY_TYPE_HIGH_100G_CAUI2:
case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
case ICE_PHY_TYPE_HIGH_100G_AUI2:
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseCR4_Full);
break;
default:
unrecog_phy_high = true;
}
if (unrecog_phy_low && unrecog_phy_high) {
/* if we got here and link is up something bad is afoot */ /* if we got here and link is up something bad is afoot */
netdev_info(netdev, "WARNING: Unrecognized PHY_Low (0x%llx).\n", netdev_info(netdev,
"WARNING: Unrecognized PHY_Low (0x%llx).\n",
(u64)link_info->phy_type_low); (u64)link_info->phy_type_low);
netdev_info(netdev,
"WARNING: Unrecognized PHY_High (0x%llx).\n",
(u64)link_info->phy_type_high);
} }
/* Now that we've worked out everything that could be supported by the /* Now that we've worked out everything that could be supported by the
...@@ -718,6 +1089,12 @@ static void ice_get_settings_link_up(struct ethtool_link_ksettings *ks, ...@@ -718,6 +1089,12 @@ static void ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
ethtool_intersect_link_masks(ks, &cap_ksettings); ethtool_intersect_link_masks(ks, &cap_ksettings);
switch (link_info->link_speed) { switch (link_info->link_speed) {
case ICE_AQ_LINK_SPEED_100GB:
ks->base.speed = SPEED_100000;
break;
case ICE_AQ_LINK_SPEED_50GB:
ks->base.speed = SPEED_50000;
break;
case ICE_AQ_LINK_SPEED_40GB: case ICE_AQ_LINK_SPEED_40GB:
ks->base.speed = SPEED_40000; ks->base.speed = SPEED_40000;
break; break;
...@@ -911,6 +1288,23 @@ ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks) ...@@ -911,6 +1288,23 @@ ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks)
ethtool_link_ksettings_test_link_mode(ks, advertising, ethtool_link_ksettings_test_link_mode(ks, advertising,
40000baseKR4_Full)) 40000baseKR4_Full))
adv_link_speed |= ICE_AQ_LINK_SPEED_40GB; adv_link_speed |= ICE_AQ_LINK_SPEED_40GB;
if (ethtool_link_ksettings_test_link_mode(ks, advertising,
50000baseCR2_Full) ||
ethtool_link_ksettings_test_link_mode(ks, advertising,
50000baseKR2_Full))
adv_link_speed |= ICE_AQ_LINK_SPEED_50GB;
if (ethtool_link_ksettings_test_link_mode(ks, advertising,
50000baseSR2_Full))
adv_link_speed |= ICE_AQ_LINK_SPEED_50GB;
if (ethtool_link_ksettings_test_link_mode(ks, advertising,
100000baseCR4_Full) ||
ethtool_link_ksettings_test_link_mode(ks, advertising,
100000baseSR4_Full) ||
ethtool_link_ksettings_test_link_mode(ks, advertising,
100000baseLR4_ER4_Full) ||
ethtool_link_ksettings_test_link_mode(ks, advertising,
100000baseKR4_Full))
adv_link_speed |= ICE_AQ_LINK_SPEED_100GB;
return adv_link_speed; return adv_link_speed;
} }
...@@ -981,8 +1375,9 @@ ice_setup_autoneg(struct ice_port_info *p, struct ethtool_link_ksettings *ks, ...@@ -981,8 +1375,9 @@ ice_setup_autoneg(struct ice_port_info *p, struct ethtool_link_ksettings *ks,
* *
* Set speed/duplex per media_types advertised/forced * Set speed/duplex per media_types advertised/forced
*/ */
static int ice_set_link_ksettings(struct net_device *netdev, static int
const struct ethtool_link_ksettings *ks) ice_set_link_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *ks)
{ {
u8 autoneg, timeout = TEST_SET_BITS_TIMEOUT, lport = 0; u8 autoneg, timeout = TEST_SET_BITS_TIMEOUT, lport = 0;
struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_netdev_priv *np = netdev_priv(netdev);
...@@ -994,6 +1389,7 @@ static int ice_set_link_ksettings(struct net_device *netdev, ...@@ -994,6 +1389,7 @@ static int ice_set_link_ksettings(struct net_device *netdev,
struct ice_port_info *p; struct ice_port_info *p;
u8 autoneg_changed = 0; u8 autoneg_changed = 0;
enum ice_status status; enum ice_status status;
u64 phy_type_high;
u64 phy_type_low; u64 phy_type_low;
int err = 0; int err = 0;
bool linkup; bool linkup;
...@@ -1109,7 +1505,7 @@ static int ice_set_link_ksettings(struct net_device *netdev, ...@@ -1109,7 +1505,7 @@ static int ice_set_link_ksettings(struct net_device *netdev,
adv_link_speed = curr_link_speed; adv_link_speed = curr_link_speed;
/* Convert the advertise link speeds to their corresponded PHY_TYPE */ /* Convert the advertise link speeds to their corresponded PHY_TYPE */
ice_update_phy_type(&phy_type_low, adv_link_speed); ice_update_phy_type(&phy_type_low, &phy_type_high, adv_link_speed);
if (!autoneg_changed && adv_link_speed == curr_link_speed) { if (!autoneg_changed && adv_link_speed == curr_link_speed) {
netdev_info(netdev, "Nothing changed, exiting without setting anything.\n"); netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
...@@ -1128,7 +1524,9 @@ static int ice_set_link_ksettings(struct net_device *netdev, ...@@ -1128,7 +1524,9 @@ static int ice_set_link_ksettings(struct net_device *netdev,
/* set link and auto negotiation so changes take effect */ /* set link and auto negotiation so changes take effect */
config.caps |= ICE_AQ_PHY_ENA_LINK; config.caps |= ICE_AQ_PHY_ENA_LINK;
if (phy_type_low) { if (phy_type_low || phy_type_high) {
config.phy_type_high = cpu_to_le64(phy_type_high) &
abilities->phy_type_high;
config.phy_type_low = cpu_to_le64(phy_type_low) & config.phy_type_low = cpu_to_le64(phy_type_low) &
abilities->phy_type_low; abilities->phy_type_low;
} else { } else {
...@@ -1667,6 +2065,258 @@ static int ice_set_rxfh(struct net_device *netdev, const u32 *indir, ...@@ -1667,6 +2065,258 @@ static int ice_set_rxfh(struct net_device *netdev, const u32 *indir,
return 0; return 0;
} }
enum ice_container_type {
ICE_RX_CONTAINER,
ICE_TX_CONTAINER,
};
/**
* ice_get_rc_coalesce - get ITR values for specific ring container
* @ec: ethtool structure to fill with driver's coalesce settings
* @c_type: container type, RX or TX
* @rc: ring container that the ITR values will come from
*
* Query the device for ice_ring_container specific ITR values. This is
* done per ice_ring_container because each q_vector can have 1 or more rings
* and all of said ring(s) will have the same ITR values.
*
* Returns 0 on success, negative otherwise.
*/
static int
ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type,
struct ice_ring_container *rc)
{
struct ice_pf *pf = rc->ring->vsi->back;
switch (c_type) {
case ICE_RX_CONTAINER:
ec->use_adaptive_rx_coalesce = ITR_IS_DYNAMIC(rc->itr_setting);
ec->rx_coalesce_usecs = rc->itr_setting & ~ICE_ITR_DYNAMIC;
break;
case ICE_TX_CONTAINER:
ec->use_adaptive_tx_coalesce = ITR_IS_DYNAMIC(rc->itr_setting);
ec->tx_coalesce_usecs = rc->itr_setting & ~ICE_ITR_DYNAMIC;
break;
default:
dev_dbg(&pf->pdev->dev, "Invalid c_type %d\n", c_type);
return -EINVAL;
}
return 0;
}
/**
* __ice_get_coalesce - get ITR/INTRL values for the device
* @netdev: pointer to the netdev associated with this query
* @ec: ethtool structure to fill with driver's coalesce settings
* @q_num: queue number to get the coalesce settings for
*/
static int
__ice_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
int q_num)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
int tx = -EINVAL, rx = -EINVAL;
struct ice_vsi *vsi = np->vsi;
if (q_num < 0) {
rx = ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
&vsi->rx_rings[0]->q_vector->rx);
tx = ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
&vsi->tx_rings[0]->q_vector->tx);
goto update_coalesced_frames;
}
if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
rx = ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
&vsi->rx_rings[q_num]->q_vector->rx);
tx = ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
&vsi->tx_rings[q_num]->q_vector->tx);
} else if (q_num < vsi->num_rxq) {
rx = ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
&vsi->rx_rings[q_num]->q_vector->rx);
} else if (q_num < vsi->num_txq) {
tx = ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
&vsi->tx_rings[q_num]->q_vector->tx);
} else {
/* q_num is invalid for both Rx and Tx queues */
return -EINVAL;
}
update_coalesced_frames:
/* either q_num is invalid for both Rx and Tx queues or setting coalesce
* failed completely
*/
if (tx && rx)
return -EINVAL;
if (q_num < vsi->num_txq)
ec->tx_max_coalesced_frames_irq = vsi->work_lmt;
if (q_num < vsi->num_rxq)
ec->rx_max_coalesced_frames_irq = vsi->work_lmt;
return 0;
}
static int
ice_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec)
{
return __ice_get_coalesce(netdev, ec, -1);
}
static int ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
struct ethtool_coalesce *ec)
{
return __ice_get_coalesce(netdev, ec, q_num);
}
/**
* ice_set_rc_coalesce - set ITR values for specific ring container
* @c_type: container type, RX or TX
* @ec: ethtool structure from user to update ITR settings
* @rc: ring container that the ITR values will come from
* @vsi: VSI associated to the ring container
*
* Set specific ITR values. This is done per ice_ring_container because each
* q_vector can have 1 or more rings and all of said ring(s) will have the same
* ITR values.
*
* Returns 0 on success, negative otherwise.
*/
static int
ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
struct ice_ring_container *rc, struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
u16 itr_setting;
if (!rc->ring)
return -EINVAL;
itr_setting = rc->itr_setting & ~ICE_ITR_DYNAMIC;
switch (c_type) {
case ICE_RX_CONTAINER:
if (ec->rx_coalesce_usecs != itr_setting &&
ec->use_adaptive_rx_coalesce) {
netdev_info(vsi->netdev,
"Rx interrupt throttling cannot be changed if adaptive-rx is enabled\n");
return -EINVAL;
}
if (ec->rx_coalesce_usecs > ICE_ITR_MAX) {
netdev_info(vsi->netdev,
"Invalid value, rx-usecs range is 0-%d\n",
ICE_ITR_MAX);
return -EINVAL;
}
if (ec->use_adaptive_rx_coalesce) {
rc->itr_setting |= ICE_ITR_DYNAMIC;
} else {
rc->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs);
rc->target_itr = ITR_TO_REG(rc->itr_setting);
}
break;
case ICE_TX_CONTAINER:
if (ec->tx_coalesce_usecs != itr_setting &&
ec->use_adaptive_tx_coalesce) {
netdev_info(vsi->netdev,
"Tx interrupt throttling cannot be changed if adaptive-tx is enabled\n");
return -EINVAL;
}
if (ec->tx_coalesce_usecs > ICE_ITR_MAX) {
netdev_info(vsi->netdev,
"Invalid value, tx-usecs range is 0-%d\n",
ICE_ITR_MAX);
return -EINVAL;
}
if (ec->use_adaptive_tx_coalesce) {
rc->itr_setting |= ICE_ITR_DYNAMIC;
} else {
rc->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs);
rc->target_itr = ITR_TO_REG(rc->itr_setting);
}
break;
default:
dev_dbg(&pf->pdev->dev, "Invalid container type %d\n", c_type);
return -EINVAL;
}
return 0;
}
static int
__ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
int q_num)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
int rx = -EINVAL, tx = -EINVAL;
struct ice_vsi *vsi = np->vsi;
if (q_num < 0) {
int i;
ice_for_each_q_vector(vsi, i) {
struct ice_q_vector *q_vector = vsi->q_vectors[i];
if (ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
&q_vector->rx, vsi) ||
ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
&q_vector->tx, vsi))
return -EINVAL;
}
goto set_work_lmt;
}
if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
rx = ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
&vsi->rx_rings[q_num]->q_vector->rx,
vsi);
tx = ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
&vsi->tx_rings[q_num]->q_vector->tx,
vsi);
} else if (q_num < vsi->num_rxq) {
rx = ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
&vsi->rx_rings[q_num]->q_vector->rx,
vsi);
} else if (q_num < vsi->num_txq) {
tx = ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
&vsi->tx_rings[q_num]->q_vector->tx,
vsi);
}
/* either q_num is invalid for both Rx and Tx queues or setting coalesce
* failed completely
*/
if (rx && tx)
return -EINVAL;
set_work_lmt:
if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
vsi->work_lmt = max(ec->tx_max_coalesced_frames_irq,
ec->rx_max_coalesced_frames_irq);
return 0;
}
static int
ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec)
{
return __ice_set_coalesce(netdev, ec, -1);
}
static int ice_set_per_q_coalesce(struct net_device *netdev, u32 q_num,
struct ethtool_coalesce *ec)
{
return __ice_set_coalesce(netdev, ec, q_num);
}
static const struct ethtool_ops ice_ethtool_ops = { static const struct ethtool_ops ice_ethtool_ops = {
.get_link_ksettings = ice_get_link_ksettings, .get_link_ksettings = ice_get_link_ksettings,
.set_link_ksettings = ice_set_link_ksettings, .set_link_ksettings = ice_set_link_ksettings,
...@@ -1676,8 +2326,15 @@ static const struct ethtool_ops ice_ethtool_ops = { ...@@ -1676,8 +2326,15 @@ static const struct ethtool_ops ice_ethtool_ops = {
.get_msglevel = ice_get_msglevel, .get_msglevel = ice_get_msglevel,
.set_msglevel = ice_set_msglevel, .set_msglevel = ice_set_msglevel,
.get_link = ethtool_op_get_link, .get_link = ethtool_op_get_link,
.get_eeprom_len = ice_get_eeprom_len,
.get_eeprom = ice_get_eeprom,
.get_coalesce = ice_get_coalesce,
.set_coalesce = ice_set_coalesce,
.get_strings = ice_get_strings, .get_strings = ice_get_strings,
.set_phys_id = ice_set_phys_id,
.get_ethtool_stats = ice_get_ethtool_stats, .get_ethtool_stats = ice_get_ethtool_stats,
.get_priv_flags = ice_get_priv_flags,
.set_priv_flags = ice_set_priv_flags,
.get_sset_count = ice_get_sset_count, .get_sset_count = ice_get_sset_count,
.get_rxnfc = ice_get_rxnfc, .get_rxnfc = ice_get_rxnfc,
.get_ringparam = ice_get_ringparam, .get_ringparam = ice_get_ringparam,
...@@ -1689,6 +2346,9 @@ static const struct ethtool_ops ice_ethtool_ops = { ...@@ -1689,6 +2346,9 @@ static const struct ethtool_ops ice_ethtool_ops = {
.get_rxfh_indir_size = ice_get_rxfh_indir_size, .get_rxfh_indir_size = ice_get_rxfh_indir_size,
.get_rxfh = ice_get_rxfh, .get_rxfh = ice_get_rxfh,
.set_rxfh = ice_set_rxfh, .set_rxfh = ice_set_rxfh,
.get_ts_info = ethtool_op_get_ts_info,
.get_per_queue_coalesce = ice_get_per_q_coalesce,
.set_per_queue_coalesce = ice_set_per_q_coalesce,
}; };
/** /**
......
...@@ -110,6 +110,7 @@ ...@@ -110,6 +110,7 @@
#define GLINT_DYN_CTL_CLEARPBA_M BIT(1) #define GLINT_DYN_CTL_CLEARPBA_M BIT(1)
#define GLINT_DYN_CTL_SWINT_TRIG_M BIT(2) #define GLINT_DYN_CTL_SWINT_TRIG_M BIT(2)
#define GLINT_DYN_CTL_ITR_INDX_S 3 #define GLINT_DYN_CTL_ITR_INDX_S 3
#define GLINT_DYN_CTL_INTERVAL_S 5
#define GLINT_DYN_CTL_SW_ITR_INDX_M ICE_M(0x3, 25) #define GLINT_DYN_CTL_SW_ITR_INDX_M ICE_M(0x3, 25)
#define GLINT_DYN_CTL_INTENA_MSK_M BIT(31) #define GLINT_DYN_CTL_INTENA_MSK_M BIT(31)
#define GLINT_ITR(_i, _INT) (0x00154000 + ((_i) * 8192 + (_INT) * 4)) #define GLINT_ITR(_i, _INT) (0x00154000 + ((_i) * 8192 + (_INT) * 4))
......
...@@ -346,6 +346,7 @@ enum ice_tx_desc_cmd_bits { ...@@ -346,6 +346,7 @@ enum ice_tx_desc_cmd_bits {
ICE_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */ ICE_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */
ICE_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */ ICE_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */
ICE_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */ ICE_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */
ICE_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */
ICE_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */ ICE_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */
}; };
...@@ -488,5 +489,7 @@ static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype) ...@@ -488,5 +489,7 @@ static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype)
#define ICE_LINK_SPEED_20000MBPS 20000 #define ICE_LINK_SPEED_20000MBPS 20000
#define ICE_LINK_SPEED_25000MBPS 25000 #define ICE_LINK_SPEED_25000MBPS 25000
#define ICE_LINK_SPEED_40000MBPS 40000 #define ICE_LINK_SPEED_40000MBPS 40000
#define ICE_LINK_SPEED_50000MBPS 50000
#define ICE_LINK_SPEED_100000MBPS 100000
#endif /* _ICE_LAN_TX_RX_H_ */ #endif /* _ICE_LAN_TX_RX_H_ */
...@@ -514,109 +514,88 @@ static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type) ...@@ -514,109 +514,88 @@ static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type)
} }
/** /**
* ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
* @vsi: the VSI getting queues * @qs_cfg: gathered variables needed for PF->VSI queues assignment
* *
* Return 0 on success and a negative value on error * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
*/ */
static int ice_vsi_get_qs_contig(struct ice_vsi *vsi) static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
{ {
struct ice_pf *pf = vsi->back; int offset, i;
int offset, ret = 0;
mutex_lock(&pf->avail_q_mutex);
/* look for contiguous block of queues for Tx */
offset = bitmap_find_next_zero_area(pf->avail_txqs, ICE_MAX_TXQS,
0, vsi->alloc_txq, 0);
if (offset < ICE_MAX_TXQS) {
int i;
bitmap_set(pf->avail_txqs, offset, vsi->alloc_txq); mutex_lock(qs_cfg->qs_mutex);
for (i = 0; i < vsi->alloc_txq; i++) offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size,
vsi->txq_map[i] = i + offset; 0, qs_cfg->q_count, 0);
} else { if (offset >= qs_cfg->pf_map_size) {
ret = -ENOMEM; mutex_unlock(qs_cfg->qs_mutex);
vsi->tx_mapping_mode = ICE_VSI_MAP_SCATTER; return -ENOMEM;
} }
/* look for contiguous block of queues for Rx */ bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count);
offset = bitmap_find_next_zero_area(pf->avail_rxqs, ICE_MAX_RXQS, for (i = 0; i < qs_cfg->q_count; i++)
0, vsi->alloc_rxq, 0); qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = i + offset;
if (offset < ICE_MAX_RXQS) { mutex_unlock(qs_cfg->qs_mutex);
int i;
bitmap_set(pf->avail_rxqs, offset, vsi->alloc_rxq);
for (i = 0; i < vsi->alloc_rxq; i++)
vsi->rxq_map[i] = i + offset;
} else {
ret = -ENOMEM;
vsi->rx_mapping_mode = ICE_VSI_MAP_SCATTER;
}
mutex_unlock(&pf->avail_q_mutex);
return ret; return 0;
} }
/** /**
* ice_vsi_get_qs_scatter - Assign a scattered queues to VSI * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI
* @vsi: the VSI getting queues * @qs_cfg: gathered variables needed for PF->VSI queues assignment
* *
* Return 0 on success and a negative value on error * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
*/ */
static int ice_vsi_get_qs_scatter(struct ice_vsi *vsi) static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg)
{ {
struct ice_pf *pf = vsi->back;
int i, index = 0; int i, index = 0;
mutex_lock(&pf->avail_q_mutex); mutex_lock(qs_cfg->qs_mutex);
for (i = 0; i < qs_cfg->q_count; i++) {
if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER) { index = find_next_zero_bit(qs_cfg->pf_map,
for (i = 0; i < vsi->alloc_txq; i++) { qs_cfg->pf_map_size, index);
index = find_next_zero_bit(pf->avail_txqs, if (index >= qs_cfg->pf_map_size)
ICE_MAX_TXQS, index); goto err_scatter;
if (index < ICE_MAX_TXQS) { set_bit(index, qs_cfg->pf_map);
set_bit(index, pf->avail_txqs); qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = index;
vsi->txq_map[i] = index;
} else {
goto err_scatter_tx;
}
}
} }
mutex_unlock(qs_cfg->qs_mutex);
if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER) {
for (i = 0; i < vsi->alloc_rxq; i++) {
index = find_next_zero_bit(pf->avail_rxqs,
ICE_MAX_RXQS, index);
if (index < ICE_MAX_RXQS) {
set_bit(index, pf->avail_rxqs);
vsi->rxq_map[i] = index;
} else {
goto err_scatter_rx;
}
}
}
mutex_unlock(&pf->avail_q_mutex);
return 0; return 0;
err_scatter:
err_scatter_rx:
/* unflag any queues we have grabbed (i is failed position) */
for (index = 0; index < i; index++) {
clear_bit(vsi->rxq_map[index], pf->avail_rxqs);
vsi->rxq_map[index] = 0;
}
i = vsi->alloc_txq;
err_scatter_tx:
/* i is either position of failed attempt or vsi->alloc_txq */
for (index = 0; index < i; index++) { for (index = 0; index < i; index++) {
clear_bit(vsi->txq_map[index], pf->avail_txqs); clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map);
vsi->txq_map[index] = 0; qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0;
} }
mutex_unlock(qs_cfg->qs_mutex);
mutex_unlock(&pf->avail_q_mutex);
return -ENOMEM; return -ENOMEM;
} }
/**
* __ice_vsi_get_qs - helper function for assigning queues from PF to VSI
* @qs_cfg: gathered variables needed for PF->VSI queues assignment
*
* This is an internal function for assigning queues from the PF to VSI and
* initially tries to find contiguous space. If it is not successful to find
* contiguous space, then it tries with the scatter approach.
*
* Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
*/
static int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg)
{
int ret = 0;
ret = __ice_vsi_get_qs_contig(qs_cfg);
if (ret) {
/* contig failed, so try with scatter approach */
qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER;
qs_cfg->q_count = min_t(u16, qs_cfg->q_count,
qs_cfg->scatter_count);
ret = __ice_vsi_get_qs_sc(qs_cfg);
}
return ret;
}
/** /**
* ice_vsi_get_qs - Assign queues from PF to VSI * ice_vsi_get_qs - Assign queues from PF to VSI
* @vsi: the VSI to assign queues to * @vsi: the VSI to assign queues to
...@@ -625,25 +604,35 @@ static int ice_vsi_get_qs_scatter(struct ice_vsi *vsi) ...@@ -625,25 +604,35 @@ static int ice_vsi_get_qs_scatter(struct ice_vsi *vsi)
*/ */
static int ice_vsi_get_qs(struct ice_vsi *vsi) static int ice_vsi_get_qs(struct ice_vsi *vsi)
{ {
struct ice_pf *pf = vsi->back;
struct ice_qs_cfg tx_qs_cfg = {
.qs_mutex = &pf->avail_q_mutex,
.pf_map = pf->avail_txqs,
.pf_map_size = ICE_MAX_TXQS,
.q_count = vsi->alloc_txq,
.scatter_count = ICE_MAX_SCATTER_TXQS,
.vsi_map = vsi->txq_map,
.vsi_map_offset = 0,
.mapping_mode = vsi->tx_mapping_mode
};
struct ice_qs_cfg rx_qs_cfg = {
.qs_mutex = &pf->avail_q_mutex,
.pf_map = pf->avail_rxqs,
.pf_map_size = ICE_MAX_RXQS,
.q_count = vsi->alloc_rxq,
.scatter_count = ICE_MAX_SCATTER_RXQS,
.vsi_map = vsi->rxq_map,
.vsi_map_offset = 0,
.mapping_mode = vsi->rx_mapping_mode
};
int ret = 0; int ret = 0;
vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG; vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG;
vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG; vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG;
/* NOTE: ice_vsi_get_qs_contig() will set the Rx/Tx mapping ret = __ice_vsi_get_qs(&tx_qs_cfg);
* modes individually to scatter if assigning contiguous queues if (!ret)
* to Rx or Tx fails ret = __ice_vsi_get_qs(&rx_qs_cfg);
*/
ret = ice_vsi_get_qs_contig(vsi);
if (ret < 0) {
if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER)
vsi->alloc_txq = max_t(u16, vsi->alloc_txq,
ICE_MAX_SCATTER_TXQS);
if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER)
vsi->alloc_rxq = max_t(u16, vsi->alloc_rxq,
ICE_MAX_SCATTER_RXQS);
ret = ice_vsi_get_qs_scatter(vsi);
}
return ret; return ret;
} }
...@@ -1614,11 +1603,14 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi) ...@@ -1614,11 +1603,14 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
/** /**
* ice_vsi_cfg_txqs - Configure the VSI for Tx * ice_vsi_cfg_txqs - Configure the VSI for Tx
* @vsi: the VSI being configured * @vsi: the VSI being configured
* @rings: Tx ring array to be configured
* @offset: offset within vsi->txq_map
* *
* Return 0 on success and a negative value on error * Return 0 on success and a negative value on error
* Configure the Tx VSI for operation. * Configure the Tx VSI for operation.
*/ */
int ice_vsi_cfg_txqs(struct ice_vsi *vsi) static int
ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset)
{ {
struct ice_aqc_add_tx_qgrp *qg_buf; struct ice_aqc_add_tx_qgrp *qg_buf;
struct ice_aqc_add_txqs_perq *txq; struct ice_aqc_add_txqs_perq *txq;
...@@ -1626,7 +1618,7 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi) ...@@ -1626,7 +1618,7 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
u8 num_q_grps, q_idx = 0; u8 num_q_grps, q_idx = 0;
enum ice_status status; enum ice_status status;
u16 buf_len, i, pf_q; u16 buf_len, i, pf_q;
int err = 0, tc = 0; int err = 0, tc;
buf_len = sizeof(struct ice_aqc_add_tx_qgrp); buf_len = sizeof(struct ice_aqc_add_tx_qgrp);
qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL); qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL);
...@@ -1644,9 +1636,8 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi) ...@@ -1644,9 +1636,8 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) { for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) {
struct ice_tlan_ctx tlan_ctx = { 0 }; struct ice_tlan_ctx tlan_ctx = { 0 };
pf_q = vsi->txq_map[q_idx]; pf_q = vsi->txq_map[q_idx + offset];
ice_setup_tx_ctx(vsi->tx_rings[q_idx], &tlan_ctx, ice_setup_tx_ctx(rings[q_idx], &tlan_ctx, pf_q);
pf_q);
/* copy context contents into the qg_buf */ /* copy context contents into the qg_buf */
qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
...@@ -1655,7 +1646,7 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi) ...@@ -1655,7 +1646,7 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
/* init queue specific tail reg. It is referred as /* init queue specific tail reg. It is referred as
* transmit comm scheduler queue doorbell. * transmit comm scheduler queue doorbell.
*/ */
vsi->tx_rings[q_idx]->tail = rings[q_idx]->tail =
pf->hw.hw_addr + QTX_COMM_DBELL(pf_q); pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
num_q_grps, qg_buf, buf_len, num_q_grps, qg_buf, buf_len,
...@@ -1674,7 +1665,7 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi) ...@@ -1674,7 +1665,7 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
*/ */
txq = &qg_buf->txqs[0]; txq = &qg_buf->txqs[0];
if (pf_q == le16_to_cpu(txq->txq_id)) if (pf_q == le16_to_cpu(txq->txq_id))
vsi->tx_rings[q_idx]->txq_teid = rings[q_idx]->txq_teid =
le32_to_cpu(txq->q_teid); le32_to_cpu(txq->q_teid);
q_idx++; q_idx++;
...@@ -1685,6 +1676,18 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi) ...@@ -1685,6 +1676,18 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
return err; return err;
} }
/**
* ice_vsi_cfg_lan_txqs - Configure the VSI for Tx
* @vsi: the VSI being configured
*
* Return 0 on success and a negative value on error
* Configure the Tx VSI for operation.
*/
int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
{
return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, 0);
}
/** /**
* ice_intrl_usec_to_reg - convert interrupt rate limit to register value * ice_intrl_usec_to_reg - convert interrupt rate limit to register value
* @intrl: interrupt rate limit in usecs * @intrl: interrupt rate limit in usecs
...@@ -1714,22 +1717,34 @@ static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran) ...@@ -1714,22 +1717,34 @@ static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
static void static void
ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector) ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector)
{ {
u8 itr_gran = hw->itr_gran;
if (q_vector->num_ring_rx) { if (q_vector->num_ring_rx) {
struct ice_ring_container *rc = &q_vector->rx; struct ice_ring_container *rc = &q_vector->rx;
rc->itr = ITR_TO_REG(ICE_DFLT_RX_ITR, itr_gran); /* if this value is set then don't overwrite with default */
if (!rc->itr_setting)
rc->itr_setting = ICE_DFLT_RX_ITR;
rc->target_itr = ITR_TO_REG(rc->itr_setting);
rc->next_update = jiffies + 1;
rc->current_itr = rc->target_itr;
rc->latency_range = ICE_LOW_LATENCY; rc->latency_range = ICE_LOW_LATENCY;
wr32(hw, GLINT_ITR(rc->itr_idx, vector), rc->itr); wr32(hw, GLINT_ITR(rc->itr_idx, vector),
ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
} }
if (q_vector->num_ring_tx) { if (q_vector->num_ring_tx) {
struct ice_ring_container *rc = &q_vector->tx; struct ice_ring_container *rc = &q_vector->tx;
rc->itr = ITR_TO_REG(ICE_DFLT_TX_ITR, itr_gran); /* if this value is set then don't overwrite with default */
if (!rc->itr_setting)
rc->itr_setting = ICE_DFLT_TX_ITR;
rc->target_itr = ITR_TO_REG(rc->itr_setting);
rc->next_update = jiffies + 1;
rc->current_itr = rc->target_itr;
rc->latency_range = ICE_LOW_LATENCY; rc->latency_range = ICE_LOW_LATENCY;
wr32(hw, GLINT_ITR(rc->itr_idx, vector), rc->itr); wr32(hw, GLINT_ITR(rc->itr_idx, vector),
ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
} }
} }
...@@ -1897,9 +1912,12 @@ int ice_vsi_stop_rx_rings(struct ice_vsi *vsi) ...@@ -1897,9 +1912,12 @@ int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)
* @vsi: the VSI being configured * @vsi: the VSI being configured
* @rst_src: reset source * @rst_src: reset source
* @rel_vmvf_num: Relative id of VF/VM * @rel_vmvf_num: Relative id of VF/VM
* @rings: Tx ring array to be stopped
* @offset: offset within vsi->txq_map
*/ */
int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, static int
u16 rel_vmvf_num) ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num, struct ice_ring **rings, int offset)
{ {
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
...@@ -1927,19 +1945,18 @@ int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, ...@@ -1927,19 +1945,18 @@ int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
ice_for_each_txq(vsi, i) { ice_for_each_txq(vsi, i) {
u16 v_idx; u16 v_idx;
if (!vsi->tx_rings || !vsi->tx_rings[i] || if (!rings || !rings[i] || !rings[i]->q_vector) {
!vsi->tx_rings[i]->q_vector) {
err = -EINVAL; err = -EINVAL;
goto err_out; goto err_out;
} }
q_ids[i] = vsi->txq_map[i]; q_ids[i] = vsi->txq_map[i + offset];
q_teids[i] = vsi->tx_rings[i]->txq_teid; q_teids[i] = rings[i]->txq_teid;
/* clear cause_ena bit for disabled queues */ /* clear cause_ena bit for disabled queues */
val = rd32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx)); val = rd32(hw, QINT_TQCTL(rings[i]->reg_idx));
val &= ~QINT_TQCTL_CAUSE_ENA_M; val &= ~QINT_TQCTL_CAUSE_ENA_M;
wr32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val); wr32(hw, QINT_TQCTL(rings[i]->reg_idx), val);
/* software is expected to wait for 100 ns */ /* software is expected to wait for 100 ns */
ndelay(100); ndelay(100);
...@@ -1947,7 +1964,7 @@ int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, ...@@ -1947,7 +1964,7 @@ int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
/* trigger a software interrupt for the vector associated to /* trigger a software interrupt for the vector associated to
* the queue to schedule NAPI handler * the queue to schedule NAPI handler
*/ */
v_idx = vsi->tx_rings[i]->q_vector->v_idx; v_idx = rings[i]->q_vector->v_idx;
wr32(hw, GLINT_DYN_CTL(vsi->hw_base_vector + v_idx), wr32(hw, GLINT_DYN_CTL(vsi->hw_base_vector + v_idx),
GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M); GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M);
} }
...@@ -1976,6 +1993,19 @@ int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, ...@@ -1976,6 +1993,19 @@ int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
return err; return err;
} }
/**
* ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings
* @vsi: the VSI being configured
* @rst_src: reset source
* @rel_vmvf_num: Relative id of VF/VM
*/
int ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi,
enum ice_disq_rst_src rst_src, u16 rel_vmvf_num)
{
return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings,
0);
}
/** /**
* ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
* @vsi: VSI to enable or disable VLAN pruning on * @vsi: VSI to enable or disable VLAN pruning on
...@@ -2581,6 +2611,12 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) ...@@ -2581,6 +2611,12 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
goto err_vectors; goto err_vectors;
ice_vsi_map_rings_to_vectors(vsi); ice_vsi_map_rings_to_vectors(vsi);
/* Do not exit if configuring RSS had an issue, at least
* receive traffic on first queue. Hence no need to capture
* return value
*/
if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags))
ice_vsi_cfg_rss_lut_key(vsi);
break; break;
case ICE_VSI_VF: case ICE_VSI_VF:
ret = ice_vsi_alloc_q_vectors(vsi); ret = ice_vsi_alloc_q_vectors(vsi);
......
...@@ -15,7 +15,7 @@ void ice_update_eth_stats(struct ice_vsi *vsi); ...@@ -15,7 +15,7 @@ void ice_update_eth_stats(struct ice_vsi *vsi);
int ice_vsi_cfg_rxqs(struct ice_vsi *vsi); int ice_vsi_cfg_rxqs(struct ice_vsi *vsi);
int ice_vsi_cfg_txqs(struct ice_vsi *vsi); int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi);
void ice_vsi_cfg_msix(struct ice_vsi *vsi); void ice_vsi_cfg_msix(struct ice_vsi *vsi);
...@@ -31,7 +31,8 @@ int ice_vsi_start_rx_rings(struct ice_vsi *vsi); ...@@ -31,7 +31,8 @@ int ice_vsi_start_rx_rings(struct ice_vsi *vsi);
int ice_vsi_stop_rx_rings(struct ice_vsi *vsi); int ice_vsi_stop_rx_rings(struct ice_vsi *vsi);
int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, int
ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num); u16 rel_vmvf_num);
int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena); int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena);
......
...@@ -1389,7 +1389,6 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf) ...@@ -1389,7 +1389,6 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
{ {
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
int oicr_idx, err = 0; int oicr_idx, err = 0;
u8 itr_gran;
u32 val; u32 val;
if (!pf->int_name[0]) if (!pf->int_name[0])
...@@ -1453,10 +1452,8 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf) ...@@ -1453,10 +1452,8 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
PFINT_MBX_CTL_CAUSE_ENA_M); PFINT_MBX_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_MBX_CTL, val); wr32(hw, PFINT_MBX_CTL, val);
itr_gran = hw->itr_gran;
wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->hw_oicr_idx), wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->hw_oicr_idx),
ITR_TO_REG(ICE_ITR_8K, itr_gran)); ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
ice_flush(hw); ice_flush(hw);
ice_irq_dynamic_ena(hw, NULL, NULL); ice_irq_dynamic_ena(hw, NULL, NULL);
...@@ -1531,6 +1528,7 @@ static int ice_cfg_netdev(struct ice_vsi *vsi) ...@@ -1531,6 +1528,7 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
csumo_features = NETIF_F_RXCSUM | csumo_features = NETIF_F_RXCSUM |
NETIF_F_IP_CSUM | NETIF_F_IP_CSUM |
NETIF_F_SCTP_CRC |
NETIF_F_IPV6_CSUM; NETIF_F_IPV6_CSUM;
vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER | vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
...@@ -1997,6 +1995,23 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf) ...@@ -1997,6 +1995,23 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
return 0; return 0;
} }
/**
* ice_verify_itr_gran - verify driver's assumption of ITR granularity
* @pf: pointer to the PF structure
*
* There is no error returned here because the driver will be able to handle a
* different ITR granularity, but interrupt moderation will not be accurate if
* the driver's assumptions are not verified. This assumption is made so we can
* use constants in the hot path instead of accessing structure members.
*/
static void ice_verify_itr_gran(struct ice_pf *pf)
{
if (pf->hw.itr_gran != (ICE_ITR_GRAN_S << 1))
dev_warn(&pf->pdev->dev,
"%d ITR granularity assumption is invalid, actual ITR granularity is %d. Interrupt moderation will be inaccurate!\n",
(ICE_ITR_GRAN_S << 1), pf->hw.itr_gran);
}
/** /**
* ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
* @pf: pointer to the PF structure * @pf: pointer to the PF structure
...@@ -2163,6 +2178,7 @@ static int ice_probe(struct pci_dev *pdev, ...@@ -2163,6 +2178,7 @@ static int ice_probe(struct pci_dev *pdev,
mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
ice_verify_cacheline_size(pf); ice_verify_cacheline_size(pf);
ice_verify_itr_gran(pf);
return 0; return 0;
...@@ -2546,7 +2562,8 @@ static int ice_vsi_cfg(struct ice_vsi *vsi) ...@@ -2546,7 +2562,8 @@ static int ice_vsi_cfg(struct ice_vsi *vsi)
if (err) if (err)
return err; return err;
} }
err = ice_vsi_cfg_txqs(vsi);
err = ice_vsi_cfg_lan_txqs(vsi);
if (!err) if (!err)
err = ice_vsi_cfg_rxqs(vsi); err = ice_vsi_cfg_rxqs(vsi);
...@@ -2944,13 +2961,92 @@ static void ice_napi_disable_all(struct ice_vsi *vsi) ...@@ -2944,13 +2961,92 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
} }
} }
/**
* ice_force_phys_link_state - Force the physical link state
* @vsi: VSI to force the physical link state to up/down
* @link_up: true/false indicates to set the physical link to up/down
*
* Force the physical link state by getting the current PHY capabilities from
* hardware and setting the PHY config based on the determined capabilities. If
* link changes a link event will be triggered because both the Enable Automatic
* Link Update and LESM Enable bits are set when setting the PHY capabilities.
*
* Returns 0 on success, negative on failure
*/
static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
{
struct ice_aqc_get_phy_caps_data *pcaps;
struct ice_aqc_set_phy_cfg_data *cfg;
struct ice_port_info *pi;
struct device *dev;
int retcode;
if (!vsi || !vsi->port_info || !vsi->back)
return -EINVAL;
if (vsi->type != ICE_VSI_PF)
return 0;
dev = &vsi->back->pdev->dev;
pi = vsi->port_info;
pcaps = devm_kzalloc(dev, sizeof(*pcaps), GFP_KERNEL);
if (!pcaps)
return -ENOMEM;
retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
NULL);
if (retcode) {
dev_err(dev,
"Failed to get phy capabilities, VSI %d error %d\n",
vsi->vsi_num, retcode);
retcode = -EIO;
goto out;
}
/* No change in link */
if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
goto out;
cfg = devm_kzalloc(dev, sizeof(*cfg), GFP_KERNEL);
if (!cfg) {
retcode = -ENOMEM;
goto out;
}
cfg->phy_type_low = pcaps->phy_type_low;
cfg->phy_type_high = pcaps->phy_type_high;
cfg->caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
cfg->low_power_ctrl = pcaps->low_power_ctrl;
cfg->eee_cap = pcaps->eee_cap;
cfg->eeer_value = pcaps->eeer_value;
cfg->link_fec_opt = pcaps->link_fec_options;
if (link_up)
cfg->caps |= ICE_AQ_PHY_ENA_LINK;
else
cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi->lport, cfg, NULL);
if (retcode) {
dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
vsi->vsi_num, retcode);
retcode = -EIO;
}
devm_kfree(dev, cfg);
out:
devm_kfree(dev, pcaps);
return retcode;
}
/** /**
* ice_down - Shutdown the connection * ice_down - Shutdown the connection
* @vsi: The VSI being stopped * @vsi: The VSI being stopped
*/ */
int ice_down(struct ice_vsi *vsi) int ice_down(struct ice_vsi *vsi)
{ {
int i, tx_err, rx_err; int i, tx_err, rx_err, link_err = 0;
/* Caller of this function is expected to set the /* Caller of this function is expected to set the
* vsi->state __ICE_DOWN bit * vsi->state __ICE_DOWN bit
...@@ -2961,7 +3057,8 @@ int ice_down(struct ice_vsi *vsi) ...@@ -2961,7 +3057,8 @@ int ice_down(struct ice_vsi *vsi)
} }
ice_vsi_dis_irq(vsi); ice_vsi_dis_irq(vsi);
tx_err = ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0);
tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
if (tx_err) if (tx_err)
netdev_err(vsi->netdev, netdev_err(vsi->netdev,
"Failed stop Tx rings, VSI %d error %d\n", "Failed stop Tx rings, VSI %d error %d\n",
...@@ -2975,13 +3072,21 @@ int ice_down(struct ice_vsi *vsi) ...@@ -2975,13 +3072,21 @@ int ice_down(struct ice_vsi *vsi)
ice_napi_disable_all(vsi); ice_napi_disable_all(vsi);
if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
link_err = ice_force_phys_link_state(vsi, false);
if (link_err)
netdev_err(vsi->netdev,
"Failed to set physical link down, VSI %d error %d\n",
vsi->vsi_num, link_err);
}
ice_for_each_txq(vsi, i) ice_for_each_txq(vsi, i)
ice_clean_tx_ring(vsi->tx_rings[i]); ice_clean_tx_ring(vsi->tx_rings[i]);
ice_for_each_rxq(vsi, i) ice_for_each_rxq(vsi, i)
ice_clean_rx_ring(vsi->rx_rings[i]); ice_clean_rx_ring(vsi->rx_rings[i]);
if (tx_err || rx_err) { if (tx_err || rx_err || link_err) {
netdev_err(vsi->netdev, netdev_err(vsi->netdev,
"Failed to close VSI 0x%04X on switch 0x%04X\n", "Failed to close VSI 0x%04X on switch 0x%04X\n",
vsi->vsi_num, vsi->vsw->sw_id); vsi->vsi_num, vsi->vsw->sw_id);
...@@ -3641,7 +3746,8 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) ...@@ -3641,7 +3746,8 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
*/ */
static int static int
ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
u16 __always_unused flags, struct netlink_ext_ack *extack) u16 __always_unused flags,
struct netlink_ext_ack __always_unused *extack)
{ {
struct ice_netdev_priv *np = netdev_priv(dev); struct ice_netdev_priv *np = netdev_priv(dev);
struct ice_pf *pf = np->vsi->back; struct ice_pf *pf = np->vsi->back;
...@@ -3814,8 +3920,14 @@ static int ice_open(struct net_device *netdev) ...@@ -3814,8 +3920,14 @@ static int ice_open(struct net_device *netdev)
netif_carrier_off(netdev); netif_carrier_off(netdev);
err = ice_vsi_open(vsi); err = ice_force_phys_link_state(vsi, true);
if (err) {
netdev_err(netdev,
"Failed to set physical link up, error %d\n", err);
return err;
}
err = ice_vsi_open(vsi);
if (err) if (err)
netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n", netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
vsi->vsi_num, vsi->vsw->sw_id); vsi->vsi_num, vsi->vsw->sw_id);
......
...@@ -124,6 +124,62 @@ ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data) ...@@ -124,6 +124,62 @@ ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
return status; return status;
} }
/**
* ice_read_sr_buf_aq - Reads Shadow RAM buf via AQ
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
* @words: (in) number of words to read; (out) number of words actually read
* @data: words read from the Shadow RAM
*
* Reads 16 bit words (data buf) from the SR using the ice_read_sr_aq
* method. Ownership of the NVM is taken before reading the buffer and later
* released.
*/
static enum ice_status
ice_read_sr_buf_aq(struct ice_hw *hw, u16 offset, u16 *words, u16 *data)
{
enum ice_status status;
bool last_cmd = false;
u16 words_read = 0;
u16 i = 0;
do {
u16 read_size, off_w;
/* Calculate number of bytes we should read in this step.
* It's not allowed to read more than one page at a time or
* to cross page boundaries.
*/
off_w = offset % ICE_SR_SECTOR_SIZE_IN_WORDS;
read_size = off_w ?
min(*words,
(u16)(ICE_SR_SECTOR_SIZE_IN_WORDS - off_w)) :
min((*words - words_read), ICE_SR_SECTOR_SIZE_IN_WORDS);
/* Check if this is last command, if so set proper flag */
if ((words_read + read_size) >= *words)
last_cmd = true;
status = ice_read_sr_aq(hw, offset, read_size,
data + words_read, last_cmd);
if (status)
goto read_nvm_buf_aq_exit;
/* Increment counter for words already read and move offset to
* new read location
*/
words_read += read_size;
offset += read_size;
} while (words_read < *words);
for (i = 0; i < *words; i++)
data[i] = le16_to_cpu(((__le16 *)data)[i]);
read_nvm_buf_aq_exit:
*words = words_read;
return status;
}
/** /**
* ice_acquire_nvm - Generic request for acquiring the NVM ownership * ice_acquire_nvm - Generic request for acquiring the NVM ownership
* @hw: pointer to the HW structure * @hw: pointer to the HW structure
...@@ -234,3 +290,28 @@ enum ice_status ice_init_nvm(struct ice_hw *hw) ...@@ -234,3 +290,28 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
return status; return status;
} }
/**
* ice_read_sr_buf - Reads Shadow RAM buf and acquire lock if necessary
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
* @words: (in) number of words to read; (out) number of words actually read
* @data: words read from the Shadow RAM
*
* Reads 16 bit words (data buf) from the SR using the ice_read_nvm_buf_aq
* method. The buf read is preceded by the NVM ownership take
* and followed by the release.
*/
enum ice_status
ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data)
{
enum ice_status status;
status = ice_acquire_nvm(hw, ICE_RES_READ);
if (!status) {
status = ice_read_sr_buf_aq(hw, offset, words, data);
ice_release_nvm(hw);
}
return status;
}
...@@ -85,36 +85,58 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid) ...@@ -85,36 +85,58 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
} }
/** /**
* ice_aq_query_sched_elems - query scheduler elements * ice_aqc_send_sched_elem_cmd - send scheduling elements cmd
* @hw: pointer to the hw struct * @hw: pointer to the hw struct
* @elems_req: number of elements to query * @cmd_opc: cmd opcode
* @elems_req: number of elements to request
* @buf: pointer to buffer * @buf: pointer to buffer
* @buf_size: buffer size in bytes * @buf_size: buffer size in bytes
* @elems_ret: returns total number of elements returned * @elems_resp: returns total number of elements response
* @cd: pointer to command details structure or NULL * @cd: pointer to command details structure or NULL
* *
* Query scheduling elements (0x0404) * This function sends a scheduling elements cmd (cmd_opc)
*/ */
static enum ice_status static enum ice_status
ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
struct ice_aqc_get_elem *buf, u16 buf_size, u16 elems_req, void *buf, u16 buf_size,
u16 *elems_ret, struct ice_sq_cd *cd) u16 *elems_resp, struct ice_sq_cd *cd)
{ {
struct ice_aqc_get_cfg_elem *cmd; struct ice_aqc_sched_elem_cmd *cmd;
struct ice_aq_desc desc; struct ice_aq_desc desc;
enum ice_status status; enum ice_status status;
cmd = &desc.params.get_update_elem; cmd = &desc.params.sched_elem_cmd;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sched_elems); ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc);
cmd->num_elem_req = cpu_to_le16(elems_req); cmd->num_elem_req = cpu_to_le16(elems_req);
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
if (!status && elems_ret) if (!status && elems_resp)
*elems_ret = le16_to_cpu(cmd->num_elem_resp); *elems_resp = le16_to_cpu(cmd->num_elem_resp);
return status; return status;
} }
/**
* ice_aq_query_sched_elems - query scheduler elements
* @hw: pointer to the hw struct
* @elems_req: number of elements to query
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
* @elems_ret: returns total number of elements returned
* @cd: pointer to command details structure or NULL
*
* Query scheduling elements (0x0404)
*/
static enum ice_status
ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_get_elem *buf, u16 buf_size,
u16 *elems_ret, struct ice_sq_cd *cd)
{
return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_get_sched_elems,
elems_req, (void *)buf, buf_size,
elems_ret, cd);
}
/** /**
* ice_sched_query_elem - query element information from hw * ice_sched_query_elem - query element information from hw
* @hw: pointer to the hw struct * @hw: pointer to the hw struct
...@@ -218,20 +240,9 @@ ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req, ...@@ -218,20 +240,9 @@ ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
struct ice_aqc_delete_elem *buf, u16 buf_size, struct ice_aqc_delete_elem *buf, u16 buf_size,
u16 *grps_del, struct ice_sq_cd *cd) u16 *grps_del, struct ice_sq_cd *cd)
{ {
struct ice_aqc_add_move_delete_elem *cmd; return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_delete_sched_elems,
struct ice_aq_desc desc; grps_req, (void *)buf, buf_size,
enum ice_status status; grps_del, cd);
cmd = &desc.params.add_move_delete_elem;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_delete_sched_elems);
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
cmd->num_grps_req = cpu_to_le16(grps_req);
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
if (!status && grps_del)
*grps_del = le16_to_cpu(cmd->num_grps_updated);
return status;
} }
/** /**
...@@ -442,52 +453,9 @@ ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req, ...@@ -442,52 +453,9 @@ ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
struct ice_aqc_add_elem *buf, u16 buf_size, struct ice_aqc_add_elem *buf, u16 buf_size,
u16 *grps_added, struct ice_sq_cd *cd) u16 *grps_added, struct ice_sq_cd *cd)
{ {
struct ice_aqc_add_move_delete_elem *cmd; return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_add_sched_elems,
struct ice_aq_desc desc; grps_req, (void *)buf, buf_size,
enum ice_status status; grps_added, cd);
cmd = &desc.params.add_move_delete_elem;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_sched_elems);
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
cmd->num_grps_req = cpu_to_le16(grps_req);
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
if (!status && grps_added)
*grps_added = le16_to_cpu(cmd->num_grps_updated);
return status;
}
/**
* ice_suspend_resume_elems - suspend/resume scheduler elements
* @hw: pointer to the hw struct
* @elems_req: number of elements to suspend
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
* @elems_ret: returns total number of elements suspended
* @cd: pointer to command details structure or NULL
* @cmd_code: command code for suspend or resume
*
* suspend/resume scheduler elements
*/
static enum ice_status
ice_suspend_resume_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_suspend_resume_elem *buf, u16 buf_size,
u16 *elems_ret, struct ice_sq_cd *cd,
enum ice_adminq_opc cmd_code)
{
struct ice_aqc_get_cfg_elem *cmd;
struct ice_aq_desc desc;
enum ice_status status;
cmd = &desc.params.get_update_elem;
ice_fill_dflt_direct_cmd_desc(&desc, cmd_code);
cmd->num_elem_req = cpu_to_le16(elems_req);
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
if (!status && elems_ret)
*elems_ret = le16_to_cpu(cmd->num_elem_resp);
return status;
} }
/** /**
...@@ -506,8 +474,9 @@ ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, ...@@ -506,8 +474,9 @@ ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_suspend_resume_elem *buf, struct ice_aqc_suspend_resume_elem *buf,
u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
{ {
return ice_suspend_resume_elems(hw, elems_req, buf, buf_size, elems_ret, return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_suspend_sched_elems,
cd, ice_aqc_opc_suspend_sched_elems); elems_req, (void *)buf, buf_size,
elems_ret, cd);
} }
/** /**
...@@ -526,8 +495,9 @@ ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, ...@@ -526,8 +495,9 @@ ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_suspend_resume_elem *buf, struct ice_aqc_suspend_resume_elem *buf,
u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
{ {
return ice_suspend_resume_elems(hw, elems_req, buf, buf_size, elems_ret, return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_resume_sched_elems,
cd, ice_aqc_opc_resume_sched_elems); elems_req, (void *)buf, buf_size,
elems_ret, cd);
} }
/** /**
...@@ -591,23 +561,18 @@ ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids, ...@@ -591,23 +561,18 @@ ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
} }
/** /**
* ice_sched_clear_tx_topo - clears the schduler tree nodes * ice_sched_clear_agg - clears the agg related information
* @pi: port information structure * @hw: pointer to the hardware structure
* *
* This function removes all the nodes from HW as well as from SW DB. * This function removes agg list and free up agg related memory
* previously allocated.
*/ */
static void ice_sched_clear_tx_topo(struct ice_port_info *pi) void ice_sched_clear_agg(struct ice_hw *hw)
{ {
struct ice_sched_agg_info *agg_info; struct ice_sched_agg_info *agg_info;
struct ice_sched_agg_info *atmp; struct ice_sched_agg_info *atmp;
struct ice_hw *hw;
if (!pi)
return;
hw = pi->hw;
list_for_each_entry_safe(agg_info, atmp, &pi->agg_list, list_entry) { list_for_each_entry_safe(agg_info, atmp, &hw->agg_list, list_entry) {
struct ice_sched_agg_vsi_info *agg_vsi_info; struct ice_sched_agg_vsi_info *agg_vsi_info;
struct ice_sched_agg_vsi_info *vtmp; struct ice_sched_agg_vsi_info *vtmp;
...@@ -616,8 +581,21 @@ static void ice_sched_clear_tx_topo(struct ice_port_info *pi) ...@@ -616,8 +581,21 @@ static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
list_del(&agg_vsi_info->list_entry); list_del(&agg_vsi_info->list_entry);
devm_kfree(ice_hw_to_dev(hw), agg_vsi_info); devm_kfree(ice_hw_to_dev(hw), agg_vsi_info);
} }
list_del(&agg_info->list_entry);
devm_kfree(ice_hw_to_dev(hw), agg_info);
} }
}
/**
* ice_sched_clear_tx_topo - clears the scheduler tree nodes
* @pi: port information structure
*
* This function removes all the nodes from HW as well as from SW DB.
*/
static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
{
if (!pi)
return;
if (pi->root) { if (pi->root) {
ice_free_sched_node(pi, pi->root); ice_free_sched_node(pi, pi->root);
pi->root = NULL; pi->root = NULL;
...@@ -1035,7 +1013,6 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi) ...@@ -1035,7 +1013,6 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi)
/* initialize the port for handling the scheduler tree */ /* initialize the port for handling the scheduler tree */
pi->port_state = ICE_SCHED_PORT_STATE_READY; pi->port_state = ICE_SCHED_PORT_STATE_READY;
mutex_init(&pi->sched_lock); mutex_init(&pi->sched_lock);
INIT_LIST_HEAD(&pi->agg_list);
err_init_port: err_init_port:
if (status && pi->root) { if (status && pi->root) {
...@@ -1618,7 +1595,8 @@ ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle) ...@@ -1618,7 +1595,8 @@ ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle)
struct ice_sched_agg_info *agg_info; struct ice_sched_agg_info *agg_info;
struct ice_sched_agg_info *atmp; struct ice_sched_agg_info *atmp;
list_for_each_entry_safe(agg_info, atmp, &pi->agg_list, list_entry) { list_for_each_entry_safe(agg_info, atmp, &pi->hw->agg_list,
list_entry) {
struct ice_sched_agg_vsi_info *agg_vsi_info; struct ice_sched_agg_vsi_info *agg_vsi_info;
struct ice_sched_agg_vsi_info *vtmp; struct ice_sched_agg_vsi_info *vtmp;
......
...@@ -28,6 +28,8 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi); ...@@ -28,6 +28,8 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi);
enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw); enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw);
void ice_sched_clear_port(struct ice_port_info *pi); void ice_sched_clear_port(struct ice_port_info *pi);
void ice_sched_cleanup_all(struct ice_hw *hw); void ice_sched_cleanup_all(struct ice_hw *hw);
void ice_sched_clear_agg(struct ice_hw *hw);
struct ice_sched_node * struct ice_sched_node *
ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid); ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid);
enum ice_status enum ice_status
......
...@@ -85,6 +85,12 @@ u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed) ...@@ -85,6 +85,12 @@ u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
case ICE_AQ_LINK_SPEED_40GB: case ICE_AQ_LINK_SPEED_40GB:
speed = ICE_LINK_SPEED_40000MBPS; speed = ICE_LINK_SPEED_40000MBPS;
break; break;
case ICE_AQ_LINK_SPEED_50GB:
speed = ICE_LINK_SPEED_50000MBPS;
break;
case ICE_AQ_LINK_SPEED_100GB:
speed = ICE_LINK_SPEED_100000MBPS;
break;
default: default:
speed = ICE_LINK_SPEED_UNKNOWN; speed = ICE_LINK_SPEED_UNKNOWN;
break; break;
...@@ -116,6 +122,9 @@ u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed) ...@@ -116,6 +122,9 @@ u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
break; break;
case ICE_AQ_LINK_SPEED_40GB: case ICE_AQ_LINK_SPEED_40GB:
/* fall through */ /* fall through */
case ICE_AQ_LINK_SPEED_50GB:
/* fall through */
case ICE_AQ_LINK_SPEED_100GB:
speed = (u32)VIRTCHNL_LINK_SPEED_40GB; speed = (u32)VIRTCHNL_LINK_SPEED_40GB;
break; break;
default: default:
......
...@@ -1052,6 +1052,69 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) ...@@ -1052,6 +1052,69 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
return failure ? budget : (int)total_rx_pkts; return failure ? budget : (int)total_rx_pkts;
} }
/**
* ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register
* @itr_idx: interrupt throttling index
* @reg_itr: interrupt throttling value adjusted based on ITR granularity
*/
static u32 ice_buildreg_itr(int itr_idx, u16 reg_itr)
{
return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
(itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |
(reg_itr << GLINT_DYN_CTL_INTERVAL_S);
}
/**
* ice_update_ena_itr - Update ITR and re-enable MSIX interrupt
* @vsi: the VSI associated with the q_vector
* @q_vector: q_vector for which ITR is being updated and interrupt enabled
*/
static void
ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
{
struct ice_hw *hw = &vsi->back->hw;
struct ice_ring_container *rc;
u32 itr_val;
/* This block of logic allows us to get away with only updating
* one ITR value with each interrupt. The idea is to perform a
* pseudo-lazy update with the following criteria.
*
* 1. Rx is given higher priority than Tx if both are in same state
* 2. If we must reduce an ITR that is given highest priority.
* 3. We then give priority to increasing ITR based on amount.
*/
if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
rc = &q_vector->rx;
/* Rx ITR needs to be reduced, this is highest priority */
itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr);
rc->current_itr = rc->target_itr;
} else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
((q_vector->rx.target_itr - q_vector->rx.current_itr) <
(q_vector->tx.target_itr - q_vector->tx.current_itr))) {
rc = &q_vector->tx;
/* Tx ITR needs to be reduced, this is second priority
* Tx ITR needs to be increased more than Rx, fourth priority
*/
itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr);
rc->current_itr = rc->target_itr;
} else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
rc = &q_vector->rx;
/* Rx ITR needs to be increased, third priority */
itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr);
rc->current_itr = rc->target_itr;
} else {
/* Still have to re-enable the interrupts */
itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
}
if (!test_bit(__ICE_DOWN, vsi->state)) {
int vector = vsi->hw_base_vector + q_vector->v_idx;
wr32(hw, GLINT_DYN_CTL(vector), itr_val);
}
}
/** /**
* ice_napi_poll - NAPI polling Rx/Tx cleanup routine * ice_napi_poll - NAPI polling Rx/Tx cleanup routine
* @napi: napi struct with our devices info in it * @napi: napi struct with our devices info in it
...@@ -1108,7 +1171,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget) ...@@ -1108,7 +1171,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
*/ */
if (likely(napi_complete_done(napi, work_done))) if (likely(napi_complete_done(napi, work_done)))
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
ice_irq_dynamic_ena(&vsi->back->hw, vsi, q_vector); ice_update_ena_itr(vsi, q_vector);
return min(work_done, budget - 1); return min(work_done, budget - 1);
} }
...@@ -1402,6 +1465,12 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off) ...@@ -1402,6 +1465,12 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
break; break;
case IPPROTO_SCTP: case IPPROTO_SCTP:
/* enable SCTP checksum offload */
cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
l4_len = sizeof(struct sctphdr) >> 2;
offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
break;
default: default:
if (first->tx_flags & ICE_TX_FLAGS_TSO) if (first->tx_flags & ICE_TX_FLAGS_TSO)
return -1; return -1;
......
...@@ -116,16 +116,17 @@ enum ice_rx_dtype { ...@@ -116,16 +116,17 @@ enum ice_rx_dtype {
/* indices into GLINT_ITR registers */ /* indices into GLINT_ITR registers */
#define ICE_RX_ITR ICE_IDX_ITR0 #define ICE_RX_ITR ICE_IDX_ITR0
#define ICE_TX_ITR ICE_IDX_ITR1 #define ICE_TX_ITR ICE_IDX_ITR1
#define ICE_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ #define ICE_ITR_8K 124
#define ICE_ITR_8K 125
#define ICE_ITR_20K 50 #define ICE_ITR_20K 50
#define ICE_DFLT_TX_ITR ICE_ITR_20K #define ICE_ITR_MAX 8160
#define ICE_DFLT_RX_ITR ICE_ITR_20K #define ICE_DFLT_TX_ITR (ICE_ITR_20K | ICE_ITR_DYNAMIC)
/* apply ITR granularity translation to program the register. itr_gran is either #define ICE_DFLT_RX_ITR (ICE_ITR_20K | ICE_ITR_DYNAMIC)
* 2 or 4 usecs so we need to divide by 2 first then shift by that value #define ICE_ITR_DYNAMIC 0x8000 /* used as flag for itr_setting */
*/ #define ITR_IS_DYNAMIC(setting) (!!((setting) & ICE_ITR_DYNAMIC))
#define ITR_TO_REG(val, itr_gran) (((val) & ~ICE_ITR_DYNAMIC) >> \ #define ITR_TO_REG(setting) ((setting) & ~ICE_ITR_DYNAMIC)
((itr_gran) / 2)) #define ICE_ITR_GRAN_S 1 /* Assume ITR granularity is 2us */
#define ICE_ITR_MASK 0x1FFE /* ITR register value alignment mask */
#define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~ICE_ITR_MASK)
#define ICE_DFLT_INTRL 0 #define ICE_DFLT_INTRL 0
...@@ -180,13 +181,20 @@ enum ice_latency_range { ...@@ -180,13 +181,20 @@ enum ice_latency_range {
}; };
struct ice_ring_container { struct ice_ring_container {
/* array of pointers to rings */ /* head of linked-list of rings */
struct ice_ring *ring; struct ice_ring *ring;
unsigned long next_update; /* jiffies value of next queue update */
unsigned int total_bytes; /* total bytes processed this int */ unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_pkts; /* total packets processed this int */ unsigned int total_pkts; /* total packets processed this int */
enum ice_latency_range latency_range; enum ice_latency_range latency_range;
int itr_idx; /* index in the interrupt vector */ int itr_idx; /* index in the interrupt vector */
u16 itr; u16 target_itr; /* value in usecs divided by the hw->itr_gran */
u16 current_itr; /* value in usecs divided by the hw->itr_gran */
/* high bit set means dynamic ITR, rest is used to store user
* readable ITR value in usecs and must be converted before programming
* to a register.
*/
u16 itr_setting;
}; };
/* iterator for handling rings in ring container */ /* iterator for handling rings in ring container */
......
...@@ -90,6 +90,7 @@ enum ice_vsi_type { ...@@ -90,6 +90,7 @@ enum ice_vsi_type {
struct ice_link_status { struct ice_link_status {
/* Refer to ice_aq_phy_type for bits definition */ /* Refer to ice_aq_phy_type for bits definition */
u64 phy_type_low; u64 phy_type_low;
u64 phy_type_high;
u16 max_frame_size; u16 max_frame_size;
u16 link_speed; u16 link_speed;
u16 req_speeds; u16 req_speeds;
...@@ -118,6 +119,7 @@ struct ice_phy_info { ...@@ -118,6 +119,7 @@ struct ice_phy_info {
struct ice_link_status link_info; struct ice_link_status link_info;
struct ice_link_status link_info_old; struct ice_link_status link_info_old;
u64 phy_type_low; u64 phy_type_low;
u64 phy_type_high;
enum ice_media_type media_type; enum ice_media_type media_type;
u8 get_link_info; u8 get_link_info;
}; };
...@@ -272,7 +274,6 @@ struct ice_port_info { ...@@ -272,7 +274,6 @@ struct ice_port_info {
struct ice_mac_info mac; struct ice_mac_info mac;
struct ice_phy_info phy; struct ice_phy_info phy;
struct mutex sched_lock; /* protect access to TXSched tree */ struct mutex sched_lock; /* protect access to TXSched tree */
struct list_head agg_list; /* lists all aggregator */
u8 lport; u8 lport;
#define ICE_LPORT_MASK 0xff #define ICE_LPORT_MASK 0xff
u8 is_vf; u8 is_vf;
...@@ -326,6 +327,7 @@ struct ice_hw { ...@@ -326,6 +327,7 @@ struct ice_hw {
u8 max_cgds; u8 max_cgds;
u8 sw_entry_point_layer; u8 sw_entry_point_layer;
u16 max_children[ICE_AQC_TOPO_MAX_LEVEL_NUM]; u16 max_children[ICE_AQC_TOPO_MAX_LEVEL_NUM];
struct list_head agg_list; /* lists all aggregator */
struct ice_vsi_ctx *vsi_ctx[ICE_MAX_VSI]; struct ice_vsi_ctx *vsi_ctx[ICE_MAX_VSI];
u8 evb_veb; /* true for VEB, false for VEPA */ u8 evb_veb; /* true for VEB, false for VEPA */
......
...@@ -224,13 +224,15 @@ void ice_free_vfs(struct ice_pf *pf) ...@@ -224,13 +224,15 @@ void ice_free_vfs(struct ice_pf *pf)
/* Avoid wait time by stopping all VFs at the same time */ /* Avoid wait time by stopping all VFs at the same time */
for (i = 0; i < pf->num_alloc_vfs; i++) { for (i = 0; i < pf->num_alloc_vfs; i++) {
struct ice_vsi *vsi;
if (!test_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states)) if (!test_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states))
continue; continue;
vsi = pf->vsi[pf->vf[i].lan_vsi_idx];
/* stop rings without wait time */ /* stop rings without wait time */
ice_vsi_stop_tx_rings(pf->vsi[pf->vf[i].lan_vsi_idx], ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, i);
ICE_NO_RESET, i); ice_vsi_stop_rx_rings(vsi);
ice_vsi_stop_rx_rings(pf->vsi[pf->vf[i].lan_vsi_idx]);
clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states); clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states);
} }
...@@ -831,6 +833,7 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) ...@@ -831,6 +833,7 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
{ {
struct ice_pf *pf = vf->pf; struct ice_pf *pf = vf->pf;
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
struct ice_vsi *vsi;
bool rsd = false; bool rsd = false;
u32 reg; u32 reg;
int i; int i;
...@@ -843,17 +846,18 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) ...@@ -843,17 +846,18 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
ice_trigger_vf_reset(vf, is_vflr); ice_trigger_vf_reset(vf, is_vflr);
vsi = pf->vsi[vf->lan_vsi_idx];
if (test_bit(ICE_VF_STATE_ENA, vf->vf_states)) { if (test_bit(ICE_VF_STATE_ENA, vf->vf_states)) {
ice_vsi_stop_tx_rings(pf->vsi[vf->lan_vsi_idx], ICE_VF_RESET, ice_vsi_stop_lan_tx_rings(vsi, ICE_VF_RESET, vf->vf_id);
vf->vf_id); ice_vsi_stop_rx_rings(vsi);
ice_vsi_stop_rx_rings(pf->vsi[vf->lan_vsi_idx]);
clear_bit(ICE_VF_STATE_ENA, vf->vf_states); clear_bit(ICE_VF_STATE_ENA, vf->vf_states);
} else { } else {
/* Call Disable LAN Tx queue AQ call even when queues are not /* Call Disable LAN Tx queue AQ call even when queues are not
* enabled. This is needed for successful completiom of VFR * enabled. This is needed for successful completiom of VFR
*/ */
ice_dis_vsi_txq(pf->vsi[vf->lan_vsi_idx]->port_info, 0, ice_dis_vsi_txq(vsi->port_info, 0, NULL, NULL, ICE_VF_RESET,
NULL, NULL, ICE_VF_RESET, vf->vf_id, NULL); vf->vf_id, NULL);
} }
/* poll VPGEN_VFRSTAT reg to make sure /* poll VPGEN_VFRSTAT reg to make sure
...@@ -1614,7 +1618,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) ...@@ -1614,7 +1618,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
goto error_param; goto error_param;
} }
if (ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, vf->vf_id)) { if (ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id)) {
dev_err(&vsi->back->pdev->dev, dev_err(&vsi->back->pdev->dev,
"Failed to stop tx rings on VSI %d\n", "Failed to stop tx rings on VSI %d\n",
vsi->vsi_num); vsi->vsi_num);
...@@ -1784,7 +1788,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) ...@@ -1784,7 +1788,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
vsi->num_txq = qci->num_queue_pairs; vsi->num_txq = qci->num_queue_pairs;
vsi->num_rxq = qci->num_queue_pairs; vsi->num_rxq = qci->num_queue_pairs;
if (!ice_vsi_cfg_txqs(vsi) && !ice_vsi_cfg_rxqs(vsi)) if (!ice_vsi_cfg_lan_txqs(vsi) && !ice_vsi_cfg_rxqs(vsi))
aq_ret = 0; aq_ret = 0;
else else
aq_ret = ICE_ERR_PARAM; aq_ret = ICE_ERR_PARAM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment