Commit 6cd476d2 authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
100GbE Intel Wired LAN Driver Updates 2019-09-12

This series contains updates to ice driver to implement and support
loading a Dynamic Device Personalization (DDP) package from lib/firmware
onto the device.

Paul updates the way the driver version is stored in the driver so that
we can pass the driver version to the firmware.  Passing of the driver
version to the firmware is needed for the DDP package to ensure we have
the appropriate support in the driver for the features in the package.

Lukasz fixes how the firmware version is stored to align with how the
firmware stores its own version.  Also extended the log message to
display additional useful information such as NVM version, API patch
information and firmware build hash.

Tony adds the needed driver support to check, load and store the DDP
package.  Also add support for the ability to load DDP packages intended
for specific hardware devices, as well as what to do when loading of the
DDP package fails to load.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 069841ef 2de12566
...@@ -15,6 +15,7 @@ ice-y := ice_main.o \ ...@@ -15,6 +15,7 @@ ice-y := ice_main.o \
ice_sched.o \ ice_sched.o \
ice_lib.o \ ice_lib.o \
ice_txrx.o \ ice_txrx.o \
ice_flex_pipe.o \
ice_ethtool.o ice_ethtool.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_lib.o ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_lib.o
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/firmware.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
...@@ -29,6 +30,7 @@ ...@@ -29,6 +30,7 @@
#include <linux/sctp.h> #include <linux/sctp.h>
#include <linux/ipv6.h> #include <linux/ipv6.h>
#include <linux/if_bridge.h> #include <linux/if_bridge.h>
#include <linux/ctype.h>
#include <linux/avf/virtchnl.h> #include <linux/avf/virtchnl.h>
#include <net/ipv6.h> #include <net/ipv6.h>
#include "ice_devids.h" #include "ice_devids.h"
...@@ -52,7 +54,6 @@ extern const char ice_drv_ver[]; ...@@ -52,7 +54,6 @@ extern const char ice_drv_ver[];
#define ICE_DFLT_TRAFFIC_CLASS BIT(0) #define ICE_DFLT_TRAFFIC_CLASS BIT(0)
#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16) #define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16)
#define ICE_ETHTOOL_FWVER_LEN 32
#define ICE_AQ_LEN 64 #define ICE_AQ_LEN 64
#define ICE_MBXSQ_LEN 64 #define ICE_MBXSQ_LEN 64
#define ICE_MBXRQ_LEN 512 #define ICE_MBXRQ_LEN 512
...@@ -307,6 +308,7 @@ enum ice_pf_flags { ...@@ -307,6 +308,7 @@ enum ice_pf_flags {
ICE_FLAG_SRIOV_CAPABLE, ICE_FLAG_SRIOV_CAPABLE,
ICE_FLAG_DCB_CAPABLE, ICE_FLAG_DCB_CAPABLE,
ICE_FLAG_DCB_ENA, ICE_FLAG_DCB_ENA,
ICE_FLAG_ADV_FEATURES,
ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA,
ICE_FLAG_NO_MEDIA, ICE_FLAG_NO_MEDIA,
ICE_FLAG_FW_LLDP_AGENT, ICE_FLAG_FW_LLDP_AGENT,
...@@ -404,6 +406,17 @@ ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi, ...@@ -404,6 +406,17 @@ ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,
wr32(hw, GLINT_DYN_CTL(vector), val); wr32(hw, GLINT_DYN_CTL(vector), val);
} }
/**
* ice_netdev_to_pf - Retrieve the PF struct associated with a netdev
* @netdev: pointer to the netdev struct
*/
static inline struct ice_pf *ice_netdev_to_pf(struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
return np->vsi->back;
}
/** /**
* ice_get_main_vsi - Get the PF VSI * ice_get_main_vsi - Get the PF VSI
* @pf: PF instance * @pf: PF instance
...@@ -421,6 +434,7 @@ static inline struct ice_vsi *ice_get_main_vsi(struct ice_pf *pf) ...@@ -421,6 +434,7 @@ static inline struct ice_vsi *ice_get_main_vsi(struct ice_pf *pf)
int ice_vsi_setup_tx_rings(struct ice_vsi *vsi); int ice_vsi_setup_tx_rings(struct ice_vsi *vsi);
int ice_vsi_setup_rx_rings(struct ice_vsi *vsi); int ice_vsi_setup_rx_rings(struct ice_vsi *vsi);
void ice_set_ethtool_ops(struct net_device *netdev); void ice_set_ethtool_ops(struct net_device *netdev);
void ice_set_ethtool_safe_mode_ops(struct net_device *netdev);
u16 ice_get_avail_txq_count(struct ice_pf *pf); u16 ice_get_avail_txq_count(struct ice_pf *pf);
u16 ice_get_avail_rxq_count(struct ice_pf *pf); u16 ice_get_avail_rxq_count(struct ice_pf *pf);
void ice_update_vsi_stats(struct ice_vsi *vsi); void ice_update_vsi_stats(struct ice_vsi *vsi);
......
...@@ -33,6 +33,17 @@ struct ice_aqc_get_ver { ...@@ -33,6 +33,17 @@ struct ice_aqc_get_ver {
u8 api_patch; u8 api_patch;
}; };
/* Send driver version (indirect 0x0002) */
struct ice_aqc_driver_ver {
u8 major_ver;
u8 minor_ver;
u8 build_ver;
u8 subbuild_ver;
u8 reserved[4];
__le32 addr_high;
__le32 addr_low;
};
/* Queue Shutdown (direct 0x0003) */ /* Queue Shutdown (direct 0x0003) */
struct ice_aqc_q_shutdown { struct ice_aqc_q_shutdown {
u8 driver_unloading; u8 driver_unloading;
...@@ -1519,6 +1530,56 @@ struct ice_aqc_get_clear_fw_log { ...@@ -1519,6 +1530,56 @@ struct ice_aqc_get_clear_fw_log {
__le32 addr_low; __le32 addr_low;
}; };
/* Download Package (indirect 0x0C40) */
/* Also used for Update Package (indirect 0x0C42) */
struct ice_aqc_download_pkg {
u8 flags;
#define ICE_AQC_DOWNLOAD_PKG_LAST_BUF 0x01
u8 reserved[3];
__le32 reserved1;
__le32 addr_high;
__le32 addr_low;
};
struct ice_aqc_download_pkg_resp {
__le32 error_offset;
__le32 error_info;
__le32 addr_high;
__le32 addr_low;
};
/* Get Package Info List (indirect 0x0C43) */
struct ice_aqc_get_pkg_info_list {
__le32 reserved1;
__le32 reserved2;
__le32 addr_high;
__le32 addr_low;
};
/* Version format for packages */
struct ice_pkg_ver {
u8 major;
u8 minor;
u8 update;
u8 draft;
};
#define ICE_PKG_NAME_SIZE 32
struct ice_aqc_get_pkg_info {
struct ice_pkg_ver ver;
char name[ICE_PKG_NAME_SIZE];
u8 is_in_nvm;
u8 is_active;
u8 is_active_at_boot;
u8 is_modified;
};
/* Get Package Info List response buffer format (0x0C43) */
struct ice_aqc_get_pkg_info_resp {
__le32 count;
struct ice_aqc_get_pkg_info pkg_info[1];
};
/** /**
* struct ice_aq_desc - Admin Queue (AQ) descriptor * struct ice_aq_desc - Admin Queue (AQ) descriptor
* @flags: ICE_AQ_FLAG_* flags * @flags: ICE_AQ_FLAG_* flags
...@@ -1547,6 +1608,7 @@ struct ice_aq_desc { ...@@ -1547,6 +1608,7 @@ struct ice_aq_desc {
u8 raw[16]; u8 raw[16];
struct ice_aqc_generic generic; struct ice_aqc_generic generic;
struct ice_aqc_get_ver get_ver; struct ice_aqc_get_ver get_ver;
struct ice_aqc_driver_ver driver_ver;
struct ice_aqc_q_shutdown q_shutdown; struct ice_aqc_q_shutdown q_shutdown;
struct ice_aqc_req_res res_owner; struct ice_aqc_req_res res_owner;
struct ice_aqc_manage_mac_read mac_read; struct ice_aqc_manage_mac_read mac_read;
...@@ -1580,6 +1642,7 @@ struct ice_aq_desc { ...@@ -1580,6 +1642,7 @@ struct ice_aq_desc {
struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res; struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res;
struct ice_aqc_fw_logging fw_logging; struct ice_aqc_fw_logging fw_logging;
struct ice_aqc_get_clear_fw_log get_clear_fw_log; struct ice_aqc_get_clear_fw_log get_clear_fw_log;
struct ice_aqc_download_pkg download_pkg;
struct ice_aqc_set_mac_lb set_mac_lb; struct ice_aqc_set_mac_lb set_mac_lb;
struct ice_aqc_alloc_free_res_cmd sw_res_ctrl; struct ice_aqc_alloc_free_res_cmd sw_res_ctrl;
struct ice_aqc_set_event_mask set_event_mask; struct ice_aqc_set_event_mask set_event_mask;
...@@ -1612,12 +1675,18 @@ enum ice_aq_err { ...@@ -1612,12 +1675,18 @@ enum ice_aq_err {
ICE_AQ_RC_EEXIST = 13, /* Object already exists */ ICE_AQ_RC_EEXIST = 13, /* Object already exists */
ICE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */ ICE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */
ICE_AQ_RC_ENOSYS = 17, /* Function not implemented */ ICE_AQ_RC_ENOSYS = 17, /* Function not implemented */
ICE_AQ_RC_ENOSEC = 24, /* Missing security manifest */
ICE_AQ_RC_EBADSIG = 25, /* Bad RSA signature */
ICE_AQ_RC_ESVN = 26, /* SVN number prohibits this package */
ICE_AQ_RC_EBADMAN = 27, /* Manifest hash mismatch */
ICE_AQ_RC_EBADBUF = 28, /* Buffer hash mismatches manifest */
}; };
/* Admin Queue command opcodes */ /* Admin Queue command opcodes */
enum ice_adminq_opc { enum ice_adminq_opc {
/* AQ commands */ /* AQ commands */
ice_aqc_opc_get_ver = 0x0001, ice_aqc_opc_get_ver = 0x0001,
ice_aqc_opc_driver_ver = 0x0002,
ice_aqc_opc_q_shutdown = 0x0003, ice_aqc_opc_q_shutdown = 0x0003,
/* resource ownership */ /* resource ownership */
...@@ -1699,6 +1768,10 @@ enum ice_adminq_opc { ...@@ -1699,6 +1768,10 @@ enum ice_adminq_opc {
ice_aqc_opc_add_txqs = 0x0C30, ice_aqc_opc_add_txqs = 0x0C30,
ice_aqc_opc_dis_txqs = 0x0C31, ice_aqc_opc_dis_txqs = 0x0C31,
/* package commands */
ice_aqc_opc_download_pkg = 0x0C40,
ice_aqc_opc_get_pkg_info_list = 0x0C43,
/* debug commands */ /* debug commands */
ice_aqc_opc_fw_logging = 0xFF09, ice_aqc_opc_fw_logging = 0xFF09,
ice_aqc_opc_fw_logging_info = 0xFF10, ice_aqc_opc_fw_logging_info = 0xFF10,
......
...@@ -728,6 +728,29 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw) ...@@ -728,6 +728,29 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw)
} }
} }
/**
* ice_get_nvm_version - get cached NVM version data
* @hw: pointer to the hardware structure
* @oem_ver: 8 bit NVM version
* @oem_build: 16 bit NVM build number
* @oem_patch: 8 NVM patch number
* @ver_hi: high 16 bits of the NVM version
* @ver_lo: low 16 bits of the NVM version
*/
void
ice_get_nvm_version(struct ice_hw *hw, u8 *oem_ver, u16 *oem_build,
u8 *oem_patch, u8 *ver_hi, u8 *ver_lo)
{
struct ice_nvm_info *nvm = &hw->nvm;
*oem_ver = (u8)((nvm->oem_ver & ICE_OEM_VER_MASK) >> ICE_OEM_VER_SHIFT);
*oem_patch = (u8)(nvm->oem_ver & ICE_OEM_VER_PATCH_MASK);
*oem_build = (u16)((nvm->oem_ver & ICE_OEM_VER_BUILD_MASK) >>
ICE_OEM_VER_BUILD_SHIFT);
*ver_hi = (nvm->ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT;
*ver_lo = (nvm->ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT;
}
/** /**
* ice_init_hw - main hardware initialization routine * ice_init_hw - main hardware initialization routine
* @hw: pointer to the hardware structure * @hw: pointer to the hardware structure
...@@ -859,7 +882,9 @@ enum ice_status ice_init_hw(struct ice_hw *hw) ...@@ -859,7 +882,9 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC); ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC);
ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC_2); ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC_2);
status = ice_init_hw_tbls(hw);
if (status)
goto err_unroll_fltr_mgmt_struct;
return 0; return 0;
err_unroll_fltr_mgmt_struct: err_unroll_fltr_mgmt_struct:
...@@ -887,6 +912,8 @@ void ice_deinit_hw(struct ice_hw *hw) ...@@ -887,6 +912,8 @@ void ice_deinit_hw(struct ice_hw *hw)
ice_sched_cleanup_all(hw); ice_sched_cleanup_all(hw);
ice_sched_clear_agg(hw); ice_sched_clear_agg(hw);
ice_free_seg(hw);
ice_free_hw_tbls(hw);
if (hw->port_info) { if (hw->port_info) {
devm_kfree(ice_hw_to_dev(hw), hw->port_info); devm_kfree(ice_hw_to_dev(hw), hw->port_info);
...@@ -1207,6 +1234,12 @@ ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc, void *buf, ...@@ -1207,6 +1234,12 @@ ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc, void *buf,
/* FW Admin Queue command wrappers */ /* FW Admin Queue command wrappers */
/* Software lock/mutex that is meant to be held while the Global Config Lock
* in firmware is acquired by the software to prevent most (but not all) types
* of AQ commands from being sent to FW
*/
DEFINE_MUTEX(ice_global_cfg_lock_sw);
/** /**
* ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
* @hw: pointer to the HW struct * @hw: pointer to the HW struct
...@@ -1221,7 +1254,38 @@ enum ice_status ...@@ -1221,7 +1254,38 @@ enum ice_status
ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
u16 buf_size, struct ice_sq_cd *cd) u16 buf_size, struct ice_sq_cd *cd)
{ {
return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd); struct ice_aqc_req_res *cmd = &desc->params.res_owner;
bool lock_acquired = false;
enum ice_status status;
/* When a package download is in process (i.e. when the firmware's
* Global Configuration Lock resource is held), only the Download
* Package, Get Version, Get Package Info List and Release Resource
* (with resource ID set to Global Config Lock) AdminQ commands are
* allowed; all others must block until the package download completes
* and the Global Config Lock is released. See also
* ice_acquire_global_cfg_lock().
*/
switch (le16_to_cpu(desc->opcode)) {
case ice_aqc_opc_download_pkg:
case ice_aqc_opc_get_pkg_info_list:
case ice_aqc_opc_get_ver:
break;
case ice_aqc_opc_release_res:
if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK)
break;
/* fall-through */
default:
mutex_lock(&ice_global_cfg_lock_sw);
lock_acquired = true;
break;
}
status = ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
if (lock_acquired)
mutex_unlock(&ice_global_cfg_lock_sw);
return status;
} }
/** /**
...@@ -1258,6 +1322,43 @@ enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) ...@@ -1258,6 +1322,43 @@ enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
return status; return status;
} }
/**
* ice_aq_send_driver_ver
* @hw: pointer to the HW struct
* @dv: driver's major, minor version
* @cd: pointer to command details structure or NULL
*
* Send the driver version (0x0002) to the firmware
*/
enum ice_status
ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
struct ice_sq_cd *cd)
{
struct ice_aqc_driver_ver *cmd;
struct ice_aq_desc desc;
u16 len;
cmd = &desc.params.driver_ver;
if (!dv)
return ICE_ERR_PARAM;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
cmd->major_ver = dv->major_ver;
cmd->minor_ver = dv->minor_ver;
cmd->build_ver = dv->build_ver;
cmd->subbuild_ver = dv->subbuild_ver;
len = 0;
while (len < sizeof(dv->driver_string) &&
isascii(dv->driver_string[len]) && dv->driver_string[len])
len++;
return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
}
/** /**
* ice_aq_q_shutdown * ice_aq_q_shutdown
* @hw: pointer to the HW struct * @hw: pointer to the HW struct
...@@ -1745,6 +1846,75 @@ ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc) ...@@ -1745,6 +1846,75 @@ ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc)
return status; return status;
} }
/**
* ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
* @hw: pointer to the hardware structure
*/
void ice_set_safe_mode_caps(struct ice_hw *hw)
{
struct ice_hw_func_caps *func_caps = &hw->func_caps;
struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
u32 valid_func, rxq_first_id, txq_first_id;
u32 msix_vector_first_id, max_mtu;
u32 num_func = 0;
u8 i;
/* cache some func_caps values that should be restored after memset */
valid_func = func_caps->common_cap.valid_functions;
txq_first_id = func_caps->common_cap.txq_first_id;
rxq_first_id = func_caps->common_cap.rxq_first_id;
msix_vector_first_id = func_caps->common_cap.msix_vector_first_id;
max_mtu = func_caps->common_cap.max_mtu;
/* unset func capabilities */
memset(func_caps, 0, sizeof(*func_caps));
/* restore cached values */
func_caps->common_cap.valid_functions = valid_func;
func_caps->common_cap.txq_first_id = txq_first_id;
func_caps->common_cap.rxq_first_id = rxq_first_id;
func_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
func_caps->common_cap.max_mtu = max_mtu;
/* one Tx and one Rx queue in safe mode */
func_caps->common_cap.num_rxq = 1;
func_caps->common_cap.num_txq = 1;
/* two MSIX vectors, one for traffic and one for misc causes */
func_caps->common_cap.num_msix_vectors = 2;
func_caps->guar_num_vsi = 1;
/* cache some dev_caps values that should be restored after memset */
valid_func = dev_caps->common_cap.valid_functions;
txq_first_id = dev_caps->common_cap.txq_first_id;
rxq_first_id = dev_caps->common_cap.rxq_first_id;
msix_vector_first_id = dev_caps->common_cap.msix_vector_first_id;
max_mtu = dev_caps->common_cap.max_mtu;
/* unset dev capabilities */
memset(dev_caps, 0, sizeof(*dev_caps));
/* restore cached values */
dev_caps->common_cap.valid_functions = valid_func;
dev_caps->common_cap.txq_first_id = txq_first_id;
dev_caps->common_cap.rxq_first_id = rxq_first_id;
dev_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
dev_caps->common_cap.max_mtu = max_mtu;
/* valid_func is a bitmap. get number of functions */
#define ICE_MAX_FUNCS 8
for (i = 0; i < ICE_MAX_FUNCS; i++)
if (valid_func & BIT(i))
num_func++;
/* one Tx and one Rx queue per function in safe mode */
dev_caps->common_cap.num_rxq = num_func;
dev_caps->common_cap.num_txq = num_func;
/* two MSIX vectors per function */
dev_caps->common_cap.num_msix_vectors = 2 * num_func;
}
/** /**
* ice_get_caps - get info about the HW * ice_get_caps - get info about the HW
* @hw: pointer to the hardware structure * @hw: pointer to the hardware structure
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include "ice.h" #include "ice.h"
#include "ice_type.h" #include "ice_type.h"
#include "ice_flex_pipe.h"
#include "ice_switch.h" #include "ice_switch.h"
#include <linux/avf/virtchnl.h> #include <linux/avf/virtchnl.h>
...@@ -41,6 +42,8 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, ...@@ -41,6 +42,8 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
void ice_clear_pxe_mode(struct ice_hw *hw); void ice_clear_pxe_mode(struct ice_hw *hw);
enum ice_status ice_get_caps(struct ice_hw *hw); enum ice_status ice_get_caps(struct ice_hw *hw);
void ice_set_safe_mode_caps(struct ice_hw *hw);
void ice_dev_onetime_setup(struct ice_hw *hw); void ice_dev_onetime_setup(struct ice_hw *hw);
enum ice_status enum ice_status
...@@ -66,11 +69,17 @@ void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode); ...@@ -66,11 +69,17 @@ void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);
extern const struct ice_ctx_ele ice_tlan_ctx_info[]; extern const struct ice_ctx_ele ice_tlan_ctx_info[];
enum ice_status enum ice_status
ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info); ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info);
extern struct mutex ice_global_cfg_lock_sw;
enum ice_status enum ice_status
ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc,
void *buf, u16 buf_size, struct ice_sq_cd *cd); void *buf, u16 buf_size, struct ice_sq_cd *cd);
enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd); enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd);
enum ice_status
ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
struct ice_sq_cd *cd);
enum ice_status enum ice_status
ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps_data *caps, struct ice_aqc_get_phy_caps_data *caps,
...@@ -130,6 +139,9 @@ ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, ...@@ -130,6 +139,9 @@ ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
void void
ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat); u64 *prev_stat, u64 *cur_stat);
void
ice_get_nvm_version(struct ice_hw *hw, u8 *oem_ver, u16 *oem_build,
u8 *oem_patch, u8 *ver_hi, u8 *ver_lo);
enum ice_status enum ice_status
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_get_elem *buf); struct ice_aqc_get_elem *buf);
......
...@@ -3,6 +3,48 @@ ...@@ -3,6 +3,48 @@
#include "ice_dcb_lib.h" #include "ice_dcb_lib.h"
/**
* ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration
* @vsi: the VSI being configured
* @ena_tc: TC map to be enabled
*/
void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
{
struct net_device *netdev = vsi->netdev;
struct ice_pf *pf = vsi->back;
struct ice_dcbx_cfg *dcbcfg;
u8 netdev_tc;
int i;
if (!netdev)
return;
if (!ena_tc) {
netdev_reset_tc(netdev);
return;
}
if (netdev_set_num_tc(netdev, vsi->tc_cfg.numtc))
return;
dcbcfg = &pf->hw.port_info->local_dcbx_cfg;
ice_for_each_traffic_class(i)
if (vsi->tc_cfg.ena_tc & BIT(i))
netdev_set_tc_queue(netdev,
vsi->tc_cfg.tc_info[i].netdev_tc,
vsi->tc_cfg.tc_info[i].qcount_tx,
vsi->tc_cfg.tc_info[i].qoffset);
for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
u8 ets_tc = dcbcfg->etscfg.prio_table[i];
/* Get the mapped netdev TC# for the UP */
netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc;
netdev_set_prio_tc_map(netdev, i, netdev_tc);
}
}
/** /**
* ice_dcb_get_ena_tc - return bitmap of enabled TCs * ice_dcb_get_ena_tc - return bitmap of enabled TCs
* @dcbcfg: DCB config to evaluate for enabled TCs * @dcbcfg: DCB config to evaluate for enabled TCs
......
...@@ -22,6 +22,7 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring, ...@@ -22,6 +22,7 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
void void
ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
struct ice_rq_event_info *event); struct ice_rq_event_info *event);
void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
static inline void static inline void
ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring) ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring)
{ {
...@@ -58,5 +59,6 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring __always_unused *tx_ring, ...@@ -58,5 +59,6 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring __always_unused *tx_ring,
#define ice_vsi_cfg_dcb_rings(vsi) do {} while (0) #define ice_vsi_cfg_dcb_rings(vsi) do {} while (0)
#define ice_dcb_process_lldp_set_mib_change(pf, event) do {} while (0) #define ice_dcb_process_lldp_set_mib_change(pf, event) do {} while (0)
#define ice_set_cgd_num(tlan_ctx, ring) do {} while (0) #define ice_set_cgd_num(tlan_ctx, ring) do {} while (0)
#define ice_vsi_cfg_netdev_tc(vsi, ena_tc) do {} while (0)
#endif /* CONFIG_DCB */ #endif /* CONFIG_DCB */
#endif /* _ICE_DCB_LIB_H_ */ #endif /* _ICE_DCB_LIB_H_ */
...@@ -160,31 +160,6 @@ static const struct ice_priv_flag ice_gstrings_priv_flags[] = { ...@@ -160,31 +160,6 @@ static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
#define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags) #define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags)
/**
* ice_nvm_version_str - format the NVM version strings
* @hw: ptr to the hardware info
*/
static char *ice_nvm_version_str(struct ice_hw *hw)
{
static char buf[ICE_ETHTOOL_FWVER_LEN];
u8 ver, patch;
u32 full_ver;
u16 build;
full_ver = hw->nvm.oem_ver;
ver = (u8)((full_ver & ICE_OEM_VER_MASK) >> ICE_OEM_VER_SHIFT);
build = (u16)((full_ver & ICE_OEM_VER_BUILD_MASK) >>
ICE_OEM_VER_BUILD_SHIFT);
patch = (u8)(full_ver & ICE_OEM_VER_PATCH_MASK);
snprintf(buf, sizeof(buf), "%x.%02x 0x%x %d.%d.%d",
(hw->nvm.ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT,
(hw->nvm.ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT,
hw->nvm.eetrack, ver, build, patch);
return buf;
}
static void static void
ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{ {
...@@ -3460,6 +3435,33 @@ static const struct ethtool_ops ice_ethtool_ops = { ...@@ -3460,6 +3435,33 @@ static const struct ethtool_ops ice_ethtool_ops = {
.set_fecparam = ice_set_fecparam, .set_fecparam = ice_set_fecparam,
}; };
static const struct ethtool_ops ice_ethtool_safe_mode_ops = {
.get_link_ksettings = ice_get_link_ksettings,
.set_link_ksettings = ice_set_link_ksettings,
.get_drvinfo = ice_get_drvinfo,
.get_regs_len = ice_get_regs_len,
.get_regs = ice_get_regs,
.get_msglevel = ice_get_msglevel,
.set_msglevel = ice_set_msglevel,
.get_eeprom_len = ice_get_eeprom_len,
.get_eeprom = ice_get_eeprom,
.get_strings = ice_get_strings,
.get_ethtool_stats = ice_get_ethtool_stats,
.get_sset_count = ice_get_sset_count,
.get_ringparam = ice_get_ringparam,
.set_ringparam = ice_set_ringparam,
.nway_reset = ice_nway_reset,
};
/**
* ice_set_ethtool_safe_mode_ops - setup safe mode ethtool ops
* @netdev: network interface device structure
*/
void ice_set_ethtool_safe_mode_ops(struct net_device *netdev)
{
netdev->ethtool_ops = &ice_ethtool_safe_mode_ops;
}
/** /**
* ice_set_ethtool_ops - setup netdev ethtool ops * ice_set_ethtool_ops - setup netdev ethtool ops
* @netdev: network interface device structure * @netdev: network interface device structure
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019, Intel Corporation. */
#include "ice_common.h"
#include "ice_flex_pipe.h"
/**
* ice_pkg_val_buf
* @buf: pointer to the ice buffer
*
* This helper function validates a buffer's header.
*/
static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
{
struct ice_buf_hdr *hdr;
u16 section_count;
u16 data_end;
hdr = (struct ice_buf_hdr *)buf->buf;
/* verify data */
section_count = le16_to_cpu(hdr->section_count);
if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
return NULL;
data_end = le16_to_cpu(hdr->data_end);
if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END)
return NULL;
return hdr;
}
/**
* ice_find_buf_table
* @ice_seg: pointer to the ice segment
*
* Returns the address of the buffer table within the ice segment.
*/
static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
{
struct ice_nvm_table *nvms;
nvms = (struct ice_nvm_table *)
(ice_seg->device_table +
le32_to_cpu(ice_seg->device_table_count));
return (__force struct ice_buf_table *)
(nvms->vers + le32_to_cpu(nvms->table_count));
}
/**
* ice_pkg_enum_buf
* @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
* @state: pointer to the enum state
*
* This function will enumerate all the buffers in the ice segment. The first
* call is made with the ice_seg parameter non-NULL; on subsequent calls,
* ice_seg is set to NULL which continues the enumeration. When the function
* returns a NULL pointer, then the end of the buffers has been reached, or an
* unexpected value has been detected (for example an invalid section count or
* an invalid buffer end value).
*/
static struct ice_buf_hdr *
ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
{
if (ice_seg) {
state->buf_table = ice_find_buf_table(ice_seg);
if (!state->buf_table)
return NULL;
state->buf_idx = 0;
return ice_pkg_val_buf(state->buf_table->buf_array);
}
if (++state->buf_idx < le32_to_cpu(state->buf_table->buf_count))
return ice_pkg_val_buf(state->buf_table->buf_array +
state->buf_idx);
else
return NULL;
}
/**
* ice_pkg_advance_sect
* @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
* @state: pointer to the enum state
*
* This helper function will advance the section within the ice segment,
* also advancing the buffer if needed.
*/
static bool
ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
{
if (!ice_seg && !state->buf)
return false;
if (!ice_seg && state->buf)
if (++state->sect_idx < le16_to_cpu(state->buf->section_count))
return true;
state->buf = ice_pkg_enum_buf(ice_seg, state);
if (!state->buf)
return false;
/* start of new buffer, reset section index */
state->sect_idx = 0;
return true;
}
/**
* ice_pkg_enum_section
* @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
* @state: pointer to the enum state
* @sect_type: section type to enumerate
*
* This function will enumerate all the sections of a particular type in the
* ice segment. The first call is made with the ice_seg parameter non-NULL;
* on subsequent calls, ice_seg is set to NULL which continues the enumeration.
* When the function returns a NULL pointer, then the end of the matching
* sections has been reached.
*/
static void *
ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
u32 sect_type)
{
u16 offset, size;
if (ice_seg)
state->type = sect_type;
if (!ice_pkg_advance_sect(ice_seg, state))
return NULL;
/* scan for next matching section */
while (state->buf->section_entry[state->sect_idx].type !=
cpu_to_le32(state->type))
if (!ice_pkg_advance_sect(NULL, state))
return NULL;
/* validate section */
offset = le16_to_cpu(state->buf->section_entry[state->sect_idx].offset);
if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF)
return NULL;
size = le16_to_cpu(state->buf->section_entry[state->sect_idx].size);
if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ)
return NULL;
/* make sure the section fits in the buffer */
if (offset + size > ICE_PKG_BUF_SIZE)
return NULL;
state->sect_type =
le32_to_cpu(state->buf->section_entry[state->sect_idx].type);
/* calc pointer to this section */
state->sect = ((u8 *)state->buf) +
le16_to_cpu(state->buf->section_entry[state->sect_idx].offset);
return state->sect;
}
/**
* ice_acquire_global_cfg_lock
* @hw: pointer to the HW structure
* @access: access type (read or write)
*
* This function will request ownership of the global config lock for reading
* or writing of the package. When attempting to obtain write access, the
* caller must check for the following two return values:
*
* ICE_SUCCESS - Means the caller has acquired the global config lock
* and can perform writing of the package.
* ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the
* package or has found that no update was necessary; in
* this case, the caller can just skip performing any
* update of the package.
*/
static enum ice_status
ice_acquire_global_cfg_lock(struct ice_hw *hw,
enum ice_aq_res_access_type access)
{
enum ice_status status;
status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
ICE_GLOBAL_CFG_LOCK_TIMEOUT);
if (!status)
mutex_lock(&ice_global_cfg_lock_sw);
else if (status == ICE_ERR_AQ_NO_WORK)
ice_debug(hw, ICE_DBG_PKG,
"Global config lock: No work to do\n");
return status;
}
/**
* ice_release_global_cfg_lock
* @hw: pointer to the HW structure
*
* This function will release the global config lock.
*/
static void ice_release_global_cfg_lock(struct ice_hw *hw)
{
mutex_unlock(&ice_global_cfg_lock_sw);
ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
}
/**
* ice_aq_download_pkg
* @hw: pointer to the hardware structure
* @pkg_buf: the package buffer to transfer
* @buf_size: the size of the package buffer
* @last_buf: last buffer indicator
* @error_offset: returns error offset
* @error_info: returns error information
* @cd: pointer to command details structure or NULL
*
* Download Package (0x0C40)
*/
static enum ice_status
ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
u16 buf_size, bool last_buf, u32 *error_offset,
u32 *error_info, struct ice_sq_cd *cd)
{
struct ice_aqc_download_pkg *cmd;
struct ice_aq_desc desc;
enum ice_status status;
if (error_offset)
*error_offset = 0;
if (error_info)
*error_info = 0;
cmd = &desc.params.download_pkg;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
if (last_buf)
cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
if (status == ICE_ERR_AQ_ERROR) {
/* Read error from buffer only when the FW returned an error */
struct ice_aqc_download_pkg_resp *resp;
resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
if (error_offset)
*error_offset = le32_to_cpu(resp->error_offset);
if (error_info)
*error_info = le32_to_cpu(resp->error_info);
}
return status;
}
/**
* ice_find_seg_in_pkg
* @hw: pointer to the hardware structure
* @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK)
* @pkg_hdr: pointer to the package header to be searched
*
* This function searches a package file for a particular segment type. On
* success it returns a pointer to the segment header, otherwise it will
* return NULL.
*/
static struct ice_generic_seg_hdr *
ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
struct ice_pkg_hdr *pkg_hdr)
{
u32 i;
ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
pkg_hdr->format_ver.major, pkg_hdr->format_ver.minor,
pkg_hdr->format_ver.update, pkg_hdr->format_ver.draft);
/* Search all package segments for the requested segment type */
for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) {
struct ice_generic_seg_hdr *seg;
seg = (struct ice_generic_seg_hdr *)
((u8 *)pkg_hdr + le32_to_cpu(pkg_hdr->seg_offset[i]));
if (le32_to_cpu(seg->seg_type) == seg_type)
return seg;
}
return NULL;
}
/**
* ice_dwnld_cfg_bufs
* @hw: pointer to the hardware structure
* @bufs: pointer to an array of buffers
* @count: the number of buffers in the array
*
* Obtains global config lock and downloads the package configuration buffers
* to the firmware. Metadata buffers are skipped, and the first metadata buffer
* found indicates that the rest of the buffers are all metadata buffers.
*/
static enum ice_status
ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
{
enum ice_status status;
struct ice_buf_hdr *bh;
u32 offset, info, i;
if (!bufs || !count)
return ICE_ERR_PARAM;
/* If the first buffer's first section has its metadata bit set
* then there are no buffers to be downloaded, and the operation is
* considered a success.
*/
bh = (struct ice_buf_hdr *)bufs;
if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF)
return 0;
/* reset pkg_dwnld_status in case this function is called in the
* reset/rebuild flow
*/
hw->pkg_dwnld_status = ICE_AQ_RC_OK;
status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
if (status) {
if (status == ICE_ERR_AQ_NO_WORK)
hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST;
else
hw->pkg_dwnld_status = hw->adminq.sq_last_status;
return status;
}
for (i = 0; i < count; i++) {
bool last = ((i + 1) == count);
if (!last) {
/* check next buffer for metadata flag */
bh = (struct ice_buf_hdr *)(bufs + i + 1);
/* A set metadata flag in the next buffer will signal
* that the current buffer will be the last buffer
* downloaded
*/
if (le16_to_cpu(bh->section_count))
if (le32_to_cpu(bh->section_entry[0].type) &
ICE_METADATA_BUF)
last = true;
}
bh = (struct ice_buf_hdr *)(bufs + i);
status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
&offset, &info, NULL);
/* Save AQ status from download package */
hw->pkg_dwnld_status = hw->adminq.sq_last_status;
if (status) {
ice_debug(hw, ICE_DBG_PKG,
"Pkg download failed: err %d off %d inf %d\n",
status, offset, info);
break;
}
if (last)
break;
}
ice_release_global_cfg_lock(hw);
return status;
}
/**
* ice_aq_get_pkg_info_list
* @hw: pointer to the hardware structure
* @pkg_info: the buffer which will receive the information list
* @buf_size: the size of the pkg_info information buffer
* @cd: pointer to command details structure or NULL
*
* Get Package Info List (0x0C43)
*/
static enum ice_status
ice_aq_get_pkg_info_list(struct ice_hw *hw,
struct ice_aqc_get_pkg_info_resp *pkg_info,
u16 buf_size, struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
}
/**
* ice_download_pkg
* @hw: pointer to the hardware structure
* @ice_seg: pointer to the segment of the package to be downloaded
*
* Handles the download of a complete package.
*/
static enum ice_status
ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
{
struct ice_buf_table *ice_buf_tbl;
ice_debug(hw, ICE_DBG_PKG, "Segment version: %d.%d.%d.%d\n",
ice_seg->hdr.seg_ver.major, ice_seg->hdr.seg_ver.minor,
ice_seg->hdr.seg_ver.update, ice_seg->hdr.seg_ver.draft);
ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
le32_to_cpu(ice_seg->hdr.seg_type),
le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_name);
ice_buf_tbl = ice_find_buf_table(ice_seg);
ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
le32_to_cpu(ice_buf_tbl->buf_count));
return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
le32_to_cpu(ice_buf_tbl->buf_count));
}
/**
* ice_init_pkg_info
* @hw: pointer to the hardware structure
* @pkg_hdr: pointer to the driver's package hdr
*
* Saves off the package details into the HW structure.
*/
static enum ice_status
ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
{
struct ice_global_metadata_seg *meta_seg;
struct ice_generic_seg_hdr *seg_hdr;
if (!pkg_hdr)
return ICE_ERR_PARAM;
meta_seg = (struct ice_global_metadata_seg *)
ice_find_seg_in_pkg(hw, SEGMENT_TYPE_METADATA, pkg_hdr);
if (meta_seg) {
hw->pkg_ver = meta_seg->pkg_ver;
memcpy(hw->pkg_name, meta_seg->pkg_name, sizeof(hw->pkg_name));
ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
meta_seg->pkg_ver.major, meta_seg->pkg_ver.minor,
meta_seg->pkg_ver.update, meta_seg->pkg_ver.draft,
meta_seg->pkg_name);
} else {
ice_debug(hw, ICE_DBG_INIT,
"Did not find metadata segment in driver package\n");
return ICE_ERR_CFG;
}
seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
if (seg_hdr) {
hw->ice_pkg_ver = seg_hdr->seg_ver;
memcpy(hw->ice_pkg_name, seg_hdr->seg_name,
sizeof(hw->ice_pkg_name));
ice_debug(hw, ICE_DBG_PKG, "Ice Pkg: %d.%d.%d.%d, %s\n",
seg_hdr->seg_ver.major, seg_hdr->seg_ver.minor,
seg_hdr->seg_ver.update, seg_hdr->seg_ver.draft,
seg_hdr->seg_name);
} else {
ice_debug(hw, ICE_DBG_INIT,
"Did not find ice segment in driver package\n");
return ICE_ERR_CFG;
}
return 0;
}
/**
* ice_get_pkg_info
* @hw: pointer to the hardware structure
*
* Store details of the package currently loaded in HW into the HW structure.
*/
static enum ice_status ice_get_pkg_info(struct ice_hw *hw)
{
struct ice_aqc_get_pkg_info_resp *pkg_info;
enum ice_status status;
u16 size;
u32 i;
size = sizeof(*pkg_info) + (sizeof(pkg_info->pkg_info[0]) *
(ICE_PKG_CNT - 1));
pkg_info = kzalloc(size, GFP_KERNEL);
if (!pkg_info)
return ICE_ERR_NO_MEMORY;
status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL);
if (status)
goto init_pkg_free_alloc;
for (i = 0; i < le32_to_cpu(pkg_info->count); i++) {
#define ICE_PKG_FLAG_COUNT 4
char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
u8 place = 0;
if (pkg_info->pkg_info[i].is_active) {
flags[place++] = 'A';
hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
memcpy(hw->active_pkg_name,
pkg_info->pkg_info[i].name,
sizeof(hw->active_pkg_name));
hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
}
if (pkg_info->pkg_info[i].is_active_at_boot)
flags[place++] = 'B';
if (pkg_info->pkg_info[i].is_modified)
flags[place++] = 'M';
if (pkg_info->pkg_info[i].is_in_nvm)
flags[place++] = 'N';
ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n",
i, pkg_info->pkg_info[i].ver.major,
pkg_info->pkg_info[i].ver.minor,
pkg_info->pkg_info[i].ver.update,
pkg_info->pkg_info[i].ver.draft,
pkg_info->pkg_info[i].name, flags);
}
init_pkg_free_alloc:
kfree(pkg_info);
return status;
}
/**
* ice_verify_pkg - verify package
* @pkg: pointer to the package buffer
* @len: size of the package buffer
*
* Verifies various attributes of the package file, including length, format
* version, and the requirement of at least one segment.
*/
static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
{
u32 seg_count;
u32 i;
if (len < sizeof(*pkg))
return ICE_ERR_BUF_TOO_SHORT;
if (pkg->format_ver.major != ICE_PKG_FMT_VER_MAJ ||
pkg->format_ver.minor != ICE_PKG_FMT_VER_MNR ||
pkg->format_ver.update != ICE_PKG_FMT_VER_UPD ||
pkg->format_ver.draft != ICE_PKG_FMT_VER_DFT)
return ICE_ERR_CFG;
/* pkg must have at least one segment */
seg_count = le32_to_cpu(pkg->seg_count);
if (seg_count < 1)
return ICE_ERR_CFG;
/* make sure segment array fits in package length */
if (len < sizeof(*pkg) + ((seg_count - 1) * sizeof(pkg->seg_offset)))
return ICE_ERR_BUF_TOO_SHORT;
/* all segments must fit within length */
for (i = 0; i < seg_count; i++) {
u32 off = le32_to_cpu(pkg->seg_offset[i]);
struct ice_generic_seg_hdr *seg;
/* segment header must fit */
if (len < off + sizeof(*seg))
return ICE_ERR_BUF_TOO_SHORT;
seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
/* segment body must fit */
if (len < off + le32_to_cpu(seg->seg_size))
return ICE_ERR_BUF_TOO_SHORT;
}
return 0;
}
/**
* ice_free_seg - free package segment pointer
* @hw: pointer to the hardware structure
*
* Frees the package segment pointer in the proper manner, depending on if the
* segment was allocated or just the passed in pointer was stored.
*/
void ice_free_seg(struct ice_hw *hw)
{
if (hw->pkg_copy) {
devm_kfree(ice_hw_to_dev(hw), hw->pkg_copy);
hw->pkg_copy = NULL;
hw->pkg_size = 0;
}
hw->seg = NULL;
}
/**
* ice_init_pkg_regs - initialize additional package registers
* @hw: pointer to the hardware structure
*/
static void ice_init_pkg_regs(struct ice_hw *hw)
{
#define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
#define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
#define ICE_SW_BLK_IDX 0
/* setup Switch block input mask, which is 48-bits in two parts */
wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
}
/**
* ice_chk_pkg_version - check package version for compatibility with driver
* @pkg_ver: pointer to a version structure to check
*
* Check to make sure that the package about to be downloaded is compatible with
* the driver. To be compatible, the major and minor components of the package
* version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR
* definitions.
*/
static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
{
if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ ||
pkg_ver->minor != ICE_PKG_SUPP_VER_MNR)
return ICE_ERR_NOT_SUPPORTED;
return 0;
}
/**
* ice_init_pkg - initialize/download package
* @hw: pointer to the hardware structure
* @buf: pointer to the package buffer
* @len: size of the package buffer
*
* This function initializes a package. The package contains HW tables
* required to do packet processing. First, the function extracts package
* information such as version. Then it finds the ice configuration segment
* within the package; this function then saves a copy of the segment pointer
* within the supplied package buffer. Next, the function will cache any hints
* from the package, followed by downloading the package itself. Note, that if
* a previous PF driver has already downloaded the package successfully, then
* the current driver will not have to download the package again.
*
* The local package contents will be used to query default behavior and to
* update specific sections of the HW's version of the package (e.g. to update
* the parse graph to understand new protocols).
*
* This function stores a pointer to the package buffer memory, and it is
* expected that the supplied buffer will not be freed immediately. If the
* package buffer needs to be freed, such as when read from a file, use
* ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
* case.
*/
enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
{
struct ice_pkg_hdr *pkg;
enum ice_status status;
struct ice_seg *seg;
if (!buf || !len)
return ICE_ERR_PARAM;
pkg = (struct ice_pkg_hdr *)buf;
status = ice_verify_pkg(pkg, len);
if (status) {
ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
status);
return status;
}
/* initialize package info */
status = ice_init_pkg_info(hw, pkg);
if (status)
return status;
/* before downloading the package, check package version for
* compatibility with driver
*/
status = ice_chk_pkg_version(&hw->pkg_ver);
if (status)
return status;
/* find segment in given package */
seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg);
if (!seg) {
ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
return ICE_ERR_CFG;
}
/* download package */
status = ice_download_pkg(hw, seg);
if (status == ICE_ERR_AQ_NO_WORK) {
ice_debug(hw, ICE_DBG_INIT,
"package previously loaded - no work.\n");
status = 0;
}
/* Get information on the package currently loaded in HW, then make sure
* the driver is compatible with this version.
*/
if (!status) {
status = ice_get_pkg_info(hw);
if (!status)
status = ice_chk_pkg_version(&hw->active_pkg_ver);
}
if (!status) {
hw->seg = seg;
/* on successful package download update other required
* registers to support the package and fill HW tables
* with package content.
*/
ice_init_pkg_regs(hw);
ice_fill_blk_tbls(hw);
} else {
ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
status);
}
return status;
}
/**
* ice_copy_and_init_pkg - initialize/download a copy of the package
* @hw: pointer to the hardware structure
* @buf: pointer to the package buffer
* @len: size of the package buffer
*
* This function copies the package buffer, and then calls ice_init_pkg() to
* initialize the copied package contents.
*
* The copying is necessary if the package buffer supplied is constant, or if
* the memory may disappear shortly after calling this function.
*
* If the package buffer resides in the data segment and can be modified, the
* caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg().
*
* However, if the package buffer needs to be copied first, such as when being
* read from a file, the caller should use ice_copy_and_init_pkg().
*
* This function will first copy the package buffer, before calling
* ice_init_pkg(). The caller is free to immediately destroy the original
* package buffer, as the new copy will be managed by this function and
* related routines.
*/
enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
{
enum ice_status status;
u8 *buf_copy;
if (!buf || !len)
return ICE_ERR_PARAM;
buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL);
status = ice_init_pkg(hw, buf_copy, len);
if (status) {
/* Free the copy, since we failed to initialize the package */
devm_kfree(ice_hw_to_dev(hw), buf_copy);
} else {
/* Track the copied pkg so we can free it later */
hw->pkg_copy = buf_copy;
hw->pkg_size = len;
}
return status;
}
/* PTG Management */
/**
* ice_ptg_find_ptype - Search for packet type group using packet type (ptype)
* @hw: pointer to the hardware structure
* @blk: HW block
* @ptype: the ptype to search for
* @ptg: pointer to variable that receives the PTG
*
* This function will search the PTGs for a particular ptype, returning the
* PTG ID that contains it through the PTG parameter, with the value of
* ICE_DEFAULT_PTG (0) meaning it is part the default PTG.
*/
static enum ice_status
ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg)
{
if (ptype >= ICE_XLT1_CNT || !ptg)
return ICE_ERR_PARAM;
*ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg;
return 0;
}
/**
* ice_ptg_alloc_val - Allocates a new packet type group ID by value
* @hw: pointer to the hardware structure
* @blk: HW block
* @ptg: the PTG to allocate
*
* This function allocates a given packet type group ID specified by the PTG
* parameter.
*/
static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg)
{
hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true;
}
/**
* ice_ptg_remove_ptype - Removes ptype from a particular packet type group
* @hw: pointer to the hardware structure
* @blk: HW block
* @ptype: the ptype to remove
* @ptg: the PTG to remove the ptype from
*
* This function will remove the ptype from the specific PTG, and move it to
* the default PTG (ICE_DEFAULT_PTG).
*/
static enum ice_status
ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
{
struct ice_ptg_ptype **ch;
struct ice_ptg_ptype *p;
if (ptype > ICE_XLT1_CNT - 1)
return ICE_ERR_PARAM;
if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use)
return ICE_ERR_DOES_NOT_EXIST;
/* Should not happen if .in_use is set, bad config */
if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype)
return ICE_ERR_CFG;
/* find the ptype within this PTG, and bypass the link over it */
p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
while (p) {
if (ptype == (p - hw->blk[blk].xlt1.ptypes)) {
*ch = p->next_ptype;
break;
}
ch = &p->next_ptype;
p = p->next_ptype;
}
hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG;
hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL;
return 0;
}
/**
* ice_ptg_add_mv_ptype - Adds/moves ptype to a particular packet type group
* @hw: pointer to the hardware structure
* @blk: HW block
* @ptype: the ptype to add or move
* @ptg: the PTG to add or move the ptype to
*
* This function will either add or move a ptype to a particular PTG depending
* on if the ptype is already part of another group. Note that using a
* a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the
* default PTG.
*/
static enum ice_status
ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
{
enum ice_status status;
u8 original_ptg;
if (ptype > ICE_XLT1_CNT - 1)
return ICE_ERR_PARAM;
if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG)
return ICE_ERR_DOES_NOT_EXIST;
status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg);
if (status)
return status;
/* Is ptype already in the correct PTG? */
if (original_ptg == ptg)
return 0;
/* Remove from original PTG and move back to the default PTG */
if (original_ptg != ICE_DEFAULT_PTG)
ice_ptg_remove_ptype(hw, blk, ptype, original_ptg);
/* Moving to default PTG? Then we're done with this request */
if (ptg == ICE_DEFAULT_PTG)
return 0;
/* Add ptype to PTG at beginning of list */
hw->blk[blk].xlt1.ptypes[ptype].next_ptype =
hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype =
&hw->blk[blk].xlt1.ptypes[ptype];
hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg;
hw->blk[blk].xlt1.t[ptype] = ptg;
return 0;
}
/* Block / table size info */
struct ice_blk_size_details {
u16 xlt1; /* # XLT1 entries */
u16 xlt2; /* # XLT2 entries */
u16 prof_tcam; /* # profile ID TCAM entries */
u16 prof_id; /* # profile IDs */
u8 prof_cdid_bits; /* # CDID one-hot bits used in key */
u16 prof_redir; /* # profile redirection entries */
u16 es; /* # extraction sequence entries */
u16 fvw; /* # field vector words */
u8 overwrite; /* overwrite existing entries allowed */
u8 reverse; /* reverse FV order */
};
static const struct ice_blk_size_details blk_sizes[ICE_BLK_COUNT] = {
/**
* Table Definitions
* XLT1 - Number of entries in XLT1 table
* XLT2 - Number of entries in XLT2 table
* TCAM - Number of entries Profile ID TCAM table
* CDID - Control Domain ID of the hardware block
* PRED - Number of entries in the Profile Redirection Table
* FV - Number of entries in the Field Vector
* FVW - Width (in WORDs) of the Field Vector
* OVR - Overwrite existing table entries
* REV - Reverse FV
*/
/* XLT1 , XLT2 ,TCAM, PID,CDID,PRED, FV, FVW */
/* Overwrite , Reverse FV */
/* SW */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 256, 0, 256, 256, 48,
false, false },
/* ACL */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 32,
false, false },
/* FD */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24,
false, true },
/* RSS */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24,
true, true },
/* PE */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 64, 32, 0, 32, 32, 24,
false, false },
};
enum ice_sid_all {
ICE_SID_XLT1_OFF = 0,
ICE_SID_XLT2_OFF,
ICE_SID_PR_OFF,
ICE_SID_PR_REDIR_OFF,
ICE_SID_ES_OFF,
ICE_SID_OFF_COUNT,
};
/* VSIG Management */
/**
* ice_vsig_find_vsi - find a VSIG that contains a specified VSI
* @hw: pointer to the hardware structure
* @blk: HW block
* @vsi: VSI of interest
* @vsig: pointer to receive the VSI group
*
* This function will lookup the VSI entry in the XLT2 list and return
* the VSI group its associated with.
*/
static enum ice_status
ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig)
{
if (!vsig || vsi >= ICE_MAX_VSI)
return ICE_ERR_PARAM;
/* As long as there's a default or valid VSIG associated with the input
* VSI, the functions returns a success. Any handling of VSIG will be
* done by the following add, update or remove functions.
*/
*vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
return 0;
}
/**
* ice_vsig_alloc_val - allocate a new VSIG by value
* @hw: pointer to the hardware structure
* @blk: HW block
* @vsig: the VSIG to allocate
*
* This function will allocate a given VSIG specified by the VSIG parameter.
*/
static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig)
{
u16 idx = vsig & ICE_VSIG_IDX_M;
if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) {
INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true;
}
return ICE_VSIG_VALUE(idx, hw->pf_id);
}
/**
* ice_vsig_remove_vsi - remove VSI from VSIG
* @hw: pointer to the hardware structure
* @blk: HW block
* @vsi: VSI to remove
* @vsig: VSI group to remove from
*
* The function will remove the input VSI from its VSI group and move it
* to the DEFAULT_VSIG.
*/
static enum ice_status
ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
{
struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt;
u16 idx;
idx = vsig & ICE_VSIG_IDX_M;
if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
return ICE_ERR_PARAM;
if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
return ICE_ERR_DOES_NOT_EXIST;
/* entry already in default VSIG, don't have to remove */
if (idx == ICE_DEFAULT_VSIG)
return 0;
vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
if (!(*vsi_head))
return ICE_ERR_CFG;
vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi];
vsi_cur = (*vsi_head);
/* iterate the VSI list, skip over the entry to be removed */
while (vsi_cur) {
if (vsi_tgt == vsi_cur) {
(*vsi_head) = vsi_cur->next_vsi;
break;
}
vsi_head = &vsi_cur->next_vsi;
vsi_cur = vsi_cur->next_vsi;
}
/* verify if VSI was removed from group list */
if (!vsi_cur)
return ICE_ERR_DOES_NOT_EXIST;
vsi_cur->vsig = ICE_DEFAULT_VSIG;
vsi_cur->changed = 1;
vsi_cur->next_vsi = NULL;
return 0;
}
/**
* ice_vsig_add_mv_vsi - add or move a VSI to a VSI group
* @hw: pointer to the hardware structure
* @blk: HW block
* @vsi: VSI to move
* @vsig: destination VSI group
*
* This function will move or add the input VSI to the target VSIG.
* The function will find the original VSIG the VSI belongs to and
* move the entry to the DEFAULT_VSIG, update the original VSIG and
* then move entry to the new VSIG.
*/
static enum ice_status
ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
{
struct ice_vsig_vsi *tmp;
enum ice_status status;
u16 orig_vsig, idx;
idx = vsig & ICE_VSIG_IDX_M;
if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
return ICE_ERR_PARAM;
/* if VSIG not in use and VSIG is not default type this VSIG
* doesn't exist.
*/
if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use &&
vsig != ICE_DEFAULT_VSIG)
return ICE_ERR_DOES_NOT_EXIST;
status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
if (status)
return status;
/* no update required if vsigs match */
if (orig_vsig == vsig)
return 0;
if (orig_vsig != ICE_DEFAULT_VSIG) {
/* remove entry from orig_vsig and add to default VSIG */
status = ice_vsig_remove_vsi(hw, blk, vsi, orig_vsig);
if (status)
return status;
}
if (idx == ICE_DEFAULT_VSIG)
return 0;
/* Create VSI entry and add VSIG and prop_mask values */
hw->blk[blk].xlt2.vsis[vsi].vsig = vsig;
hw->blk[blk].xlt2.vsis[vsi].changed = 1;
/* Add new entry to the head of the VSIG list */
tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi =
&hw->blk[blk].xlt2.vsis[vsi];
hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp;
hw->blk[blk].xlt2.t[vsi] = vsig;
return 0;
}
/* Block / table section IDs */
static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = {
/* SWITCH */
{ ICE_SID_XLT1_SW,
ICE_SID_XLT2_SW,
ICE_SID_PROFID_TCAM_SW,
ICE_SID_PROFID_REDIR_SW,
ICE_SID_FLD_VEC_SW
},
/* ACL */
{ ICE_SID_XLT1_ACL,
ICE_SID_XLT2_ACL,
ICE_SID_PROFID_TCAM_ACL,
ICE_SID_PROFID_REDIR_ACL,
ICE_SID_FLD_VEC_ACL
},
/* FD */
{ ICE_SID_XLT1_FD,
ICE_SID_XLT2_FD,
ICE_SID_PROFID_TCAM_FD,
ICE_SID_PROFID_REDIR_FD,
ICE_SID_FLD_VEC_FD
},
/* RSS */
{ ICE_SID_XLT1_RSS,
ICE_SID_XLT2_RSS,
ICE_SID_PROFID_TCAM_RSS,
ICE_SID_PROFID_REDIR_RSS,
ICE_SID_FLD_VEC_RSS
},
/* PE */
{ ICE_SID_XLT1_PE,
ICE_SID_XLT2_PE,
ICE_SID_PROFID_TCAM_PE,
ICE_SID_PROFID_REDIR_PE,
ICE_SID_FLD_VEC_PE
}
};
/**
* ice_init_sw_xlt1_db - init software XLT1 database from HW tables
* @hw: pointer to the hardware structure
* @blk: the HW block to initialize
*/
static void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk)
{
u16 pt;
for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) {
u8 ptg;
ptg = hw->blk[blk].xlt1.t[pt];
if (ptg != ICE_DEFAULT_PTG) {
ice_ptg_alloc_val(hw, blk, ptg);
ice_ptg_add_mv_ptype(hw, blk, pt, ptg);
}
}
}
/**
* ice_init_sw_xlt2_db - init software XLT2 database from HW tables
* @hw: pointer to the hardware structure
* @blk: the HW block to initialize
*/
static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk)
{
u16 vsi;
for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) {
u16 vsig;
vsig = hw->blk[blk].xlt2.t[vsi];
if (vsig) {
ice_vsig_alloc_val(hw, blk, vsig);
ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
/* no changes at this time, since this has been
* initialized from the original package
*/
hw->blk[blk].xlt2.vsis[vsi].changed = 0;
}
}
}
/**
* ice_init_sw_db - init software database from HW tables
* @hw: pointer to the hardware structure
*/
static void ice_init_sw_db(struct ice_hw *hw)
{
u16 i;
for (i = 0; i < ICE_BLK_COUNT; i++) {
ice_init_sw_xlt1_db(hw, (enum ice_block)i);
ice_init_sw_xlt2_db(hw, (enum ice_block)i);
}
}
/**
* ice_fill_tbl - Reads content of a single table type into database
* @hw: pointer to the hardware structure
* @block_id: Block ID of the table to copy
* @sid: Section ID of the table to copy
*
* Will attempt to read the entire content of a given table of a single block
* into the driver database. We assume that the buffer will always
* be as large or larger than the data contained in the package. If
* this condition is not met, there is most likely an error in the package
* contents.
*/
static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
{
u32 dst_len, sect_len, offset = 0;
struct ice_prof_redir_section *pr;
struct ice_prof_id_section *pid;
struct ice_xlt1_section *xlt1;
struct ice_xlt2_section *xlt2;
struct ice_sw_fv_section *es;
struct ice_pkg_enum state;
u8 *src, *dst;
void *sect;
/* if the HW segment pointer is null then the first iteration of
* ice_pkg_enum_section() will fail. In this case the HW tables will
* not be filled and return success.
*/
if (!hw->seg) {
ice_debug(hw, ICE_DBG_PKG, "hw->seg is NULL, tables are not filled\n");
return;
}
memset(&state, 0, sizeof(state));
sect = ice_pkg_enum_section(hw->seg, &state, sid);
while (sect) {
switch (sid) {
case ICE_SID_XLT1_SW:
case ICE_SID_XLT1_FD:
case ICE_SID_XLT1_RSS:
case ICE_SID_XLT1_ACL:
case ICE_SID_XLT1_PE:
xlt1 = (struct ice_xlt1_section *)sect;
src = xlt1->value;
sect_len = le16_to_cpu(xlt1->count) *
sizeof(*hw->blk[block_id].xlt1.t);
dst = hw->blk[block_id].xlt1.t;
dst_len = hw->blk[block_id].xlt1.count *
sizeof(*hw->blk[block_id].xlt1.t);
break;
case ICE_SID_XLT2_SW:
case ICE_SID_XLT2_FD:
case ICE_SID_XLT2_RSS:
case ICE_SID_XLT2_ACL:
case ICE_SID_XLT2_PE:
xlt2 = (struct ice_xlt2_section *)sect;
src = (__force u8 *)xlt2->value;
sect_len = le16_to_cpu(xlt2->count) *
sizeof(*hw->blk[block_id].xlt2.t);
dst = (u8 *)hw->blk[block_id].xlt2.t;
dst_len = hw->blk[block_id].xlt2.count *
sizeof(*hw->blk[block_id].xlt2.t);
break;
case ICE_SID_PROFID_TCAM_SW:
case ICE_SID_PROFID_TCAM_FD:
case ICE_SID_PROFID_TCAM_RSS:
case ICE_SID_PROFID_TCAM_ACL:
case ICE_SID_PROFID_TCAM_PE:
pid = (struct ice_prof_id_section *)sect;
src = (u8 *)pid->entry;
sect_len = le16_to_cpu(pid->count) *
sizeof(*hw->blk[block_id].prof.t);
dst = (u8 *)hw->blk[block_id].prof.t;
dst_len = hw->blk[block_id].prof.count *
sizeof(*hw->blk[block_id].prof.t);
break;
case ICE_SID_PROFID_REDIR_SW:
case ICE_SID_PROFID_REDIR_FD:
case ICE_SID_PROFID_REDIR_RSS:
case ICE_SID_PROFID_REDIR_ACL:
case ICE_SID_PROFID_REDIR_PE:
pr = (struct ice_prof_redir_section *)sect;
src = pr->redir_value;
sect_len = le16_to_cpu(pr->count) *
sizeof(*hw->blk[block_id].prof_redir.t);
dst = hw->blk[block_id].prof_redir.t;
dst_len = hw->blk[block_id].prof_redir.count *
sizeof(*hw->blk[block_id].prof_redir.t);
break;
case ICE_SID_FLD_VEC_SW:
case ICE_SID_FLD_VEC_FD:
case ICE_SID_FLD_VEC_RSS:
case ICE_SID_FLD_VEC_ACL:
case ICE_SID_FLD_VEC_PE:
es = (struct ice_sw_fv_section *)sect;
src = (u8 *)es->fv;
sect_len = (u32)(le16_to_cpu(es->count) *
hw->blk[block_id].es.fvw) *
sizeof(*hw->blk[block_id].es.t);
dst = (u8 *)hw->blk[block_id].es.t;
dst_len = (u32)(hw->blk[block_id].es.count *
hw->blk[block_id].es.fvw) *
sizeof(*hw->blk[block_id].es.t);
break;
default:
return;
}
/* if the section offset exceeds destination length, terminate
* table fill.
*/
if (offset > dst_len)
return;
/* if the sum of section size and offset exceed destination size
* then we are out of bounds of the HW table size for that PF.
* Changing section length to fill the remaining table space
* of that PF.
*/
if ((offset + sect_len) > dst_len)
sect_len = dst_len - offset;
memcpy(dst + offset, src, sect_len);
offset += sect_len;
sect = ice_pkg_enum_section(NULL, &state, sid);
}
}
/**
* ice_fill_blk_tbls - Read package context for tables
* @hw: pointer to the hardware structure
*
* Reads the current package contents and populates the driver
* database with the data iteratively for all advanced feature
* blocks. Assume that the HW tables have been allocated.
*/
void ice_fill_blk_tbls(struct ice_hw *hw)
{
u8 i;
for (i = 0; i < ICE_BLK_COUNT; i++) {
enum ice_block blk_id = (enum ice_block)i;
ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt1.sid);
ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt2.sid);
ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid);
ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof_redir.sid);
ice_fill_tbl(hw, blk_id, hw->blk[blk_id].es.sid);
}
ice_init_sw_db(hw);
}
/**
* ice_free_hw_tbls - free hardware table memory
* @hw: pointer to the hardware structure
*/
void ice_free_hw_tbls(struct ice_hw *hw)
{
u8 i;
for (i = 0; i < ICE_BLK_COUNT; i++) {
hw->blk[i].is_list_init = false;
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptypes);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptg_tbl);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.t);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.t);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsig_tbl);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsis);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof.t);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_redir.t);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.t);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.ref_count);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.written);
}
memset(hw->blk, 0, sizeof(hw->blk));
}
/**
* ice_clear_hw_tbls - clear HW tables and flow profiles
* @hw: pointer to the hardware structure
*/
void ice_clear_hw_tbls(struct ice_hw *hw)
{
u8 i;
for (i = 0; i < ICE_BLK_COUNT; i++) {
struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
struct ice_prof_tcam *prof = &hw->blk[i].prof;
struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
struct ice_es *es = &hw->blk[i].es;
memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes));
memset(xlt1->ptg_tbl, 0,
ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl));
memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t));
memset(xlt2->vsis, 0, xlt2->count * sizeof(*xlt2->vsis));
memset(xlt2->vsig_tbl, 0,
xlt2->count * sizeof(*xlt2->vsig_tbl));
memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t));
memset(prof->t, 0, prof->count * sizeof(*prof->t));
memset(prof_redir->t, 0,
prof_redir->count * sizeof(*prof_redir->t));
memset(es->t, 0, es->count * sizeof(*es->t));
memset(es->ref_count, 0, es->count * sizeof(*es->ref_count));
memset(es->written, 0, es->count * sizeof(*es->written));
}
}
/**
* ice_init_hw_tbls - init hardware table memory
* @hw: pointer to the hardware structure
*/
enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
{
u8 i;
for (i = 0; i < ICE_BLK_COUNT; i++) {
struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
struct ice_prof_tcam *prof = &hw->blk[i].prof;
struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
struct ice_es *es = &hw->blk[i].es;
u16 j;
if (hw->blk[i].is_list_init)
continue;
hw->blk[i].is_list_init = true;
hw->blk[i].overwrite = blk_sizes[i].overwrite;
es->reverse = blk_sizes[i].reverse;
xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF];
xlt1->count = blk_sizes[i].xlt1;
xlt1->ptypes = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count,
sizeof(*xlt1->ptypes), GFP_KERNEL);
if (!xlt1->ptypes)
goto err;
xlt1->ptg_tbl = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_PTGS,
sizeof(*xlt1->ptg_tbl),
GFP_KERNEL);
if (!xlt1->ptg_tbl)
goto err;
xlt1->t = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count,
sizeof(*xlt1->t), GFP_KERNEL);
if (!xlt1->t)
goto err;
xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF];
xlt2->count = blk_sizes[i].xlt2;
xlt2->vsis = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
sizeof(*xlt2->vsis), GFP_KERNEL);
if (!xlt2->vsis)
goto err;
xlt2->vsig_tbl = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
sizeof(*xlt2->vsig_tbl),
GFP_KERNEL);
if (!xlt2->vsig_tbl)
goto err;
for (j = 0; j < xlt2->count; j++)
INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst);
xlt2->t = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
sizeof(*xlt2->t), GFP_KERNEL);
if (!xlt2->t)
goto err;
prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF];
prof->count = blk_sizes[i].prof_tcam;
prof->max_prof_id = blk_sizes[i].prof_id;
prof->cdid_bits = blk_sizes[i].prof_cdid_bits;
prof->t = devm_kcalloc(ice_hw_to_dev(hw), prof->count,
sizeof(*prof->t), GFP_KERNEL);
if (!prof->t)
goto err;
prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF];
prof_redir->count = blk_sizes[i].prof_redir;
prof_redir->t = devm_kcalloc(ice_hw_to_dev(hw),
prof_redir->count,
sizeof(*prof_redir->t),
GFP_KERNEL);
if (!prof_redir->t)
goto err;
es->sid = ice_blk_sids[i][ICE_SID_ES_OFF];
es->count = blk_sizes[i].es;
es->fvw = blk_sizes[i].fvw;
es->t = devm_kcalloc(ice_hw_to_dev(hw),
(u32)(es->count * es->fvw),
sizeof(*es->t), GFP_KERNEL);
if (!es->t)
goto err;
es->ref_count = devm_kcalloc(ice_hw_to_dev(hw), es->count,
sizeof(*es->ref_count),
GFP_KERNEL);
es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count,
sizeof(*es->written), GFP_KERNEL);
if (!es->ref_count)
goto err;
}
return 0;
err:
ice_free_hw_tbls(hw);
return ICE_ERR_NO_MEMORY;
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2019, Intel Corporation. */
#ifndef _ICE_FLEX_PIPE_H_
#define _ICE_FLEX_PIPE_H_
#include "ice_type.h"
/* Package minimal version supported */
#define ICE_PKG_SUPP_VER_MAJ 1
#define ICE_PKG_SUPP_VER_MNR 3
/* Package format version */
#define ICE_PKG_FMT_VER_MAJ 1
#define ICE_PKG_FMT_VER_MNR 0
#define ICE_PKG_FMT_VER_UPD 0
#define ICE_PKG_FMT_VER_DFT 0
#define ICE_PKG_CNT 4
enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len);
enum ice_status
ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len);
enum ice_status ice_init_hw_tbls(struct ice_hw *hw);
void ice_free_seg(struct ice_hw *hw);
void ice_fill_blk_tbls(struct ice_hw *hw);
void ice_clear_hw_tbls(struct ice_hw *hw);
void ice_free_hw_tbls(struct ice_hw *hw);
#endif /* _ICE_FLEX_PIPE_H_ */
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2019, Intel Corporation. */
#ifndef _ICE_FLEX_TYPE_H_
#define _ICE_FLEX_TYPE_H_
/* Extraction Sequence (Field Vector) Table */
struct ice_fv_word {
u8 prot_id;
u16 off; /* Offset within the protocol header */
u8 resvrd;
} __packed;
#define ICE_MAX_FV_WORDS 48
struct ice_fv {
struct ice_fv_word ew[ICE_MAX_FV_WORDS];
};
/* Package and segment headers and tables */
struct ice_pkg_hdr {
struct ice_pkg_ver format_ver;
__le32 seg_count;
__le32 seg_offset[1];
};
/* generic segment */
struct ice_generic_seg_hdr {
#define SEGMENT_TYPE_METADATA 0x00000001
#define SEGMENT_TYPE_ICE 0x00000010
__le32 seg_type;
struct ice_pkg_ver seg_ver;
__le32 seg_size;
char seg_name[ICE_PKG_NAME_SIZE];
};
/* ice specific segment */
union ice_device_id {
struct {
__le16 device_id;
__le16 vendor_id;
} dev_vend_id;
__le32 id;
};
struct ice_device_id_entry {
union ice_device_id device;
union ice_device_id sub_device;
};
struct ice_seg {
struct ice_generic_seg_hdr hdr;
__le32 device_table_count;
struct ice_device_id_entry device_table[1];
};
struct ice_nvm_table {
__le32 table_count;
__le32 vers[1];
};
struct ice_buf {
#define ICE_PKG_BUF_SIZE 4096
u8 buf[ICE_PKG_BUF_SIZE];
};
struct ice_buf_table {
__le32 buf_count;
struct ice_buf buf_array[1];
};
/* global metadata specific segment */
struct ice_global_metadata_seg {
struct ice_generic_seg_hdr hdr;
struct ice_pkg_ver pkg_ver;
__le32 track_id;
char pkg_name[ICE_PKG_NAME_SIZE];
};
#define ICE_MIN_S_OFF 12
#define ICE_MAX_S_OFF 4095
#define ICE_MIN_S_SZ 1
#define ICE_MAX_S_SZ 4084
/* section information */
struct ice_section_entry {
__le32 type;
__le16 offset;
__le16 size;
};
#define ICE_MIN_S_COUNT 1
#define ICE_MAX_S_COUNT 511
#define ICE_MIN_S_DATA_END 12
#define ICE_MAX_S_DATA_END 4096
#define ICE_METADATA_BUF 0x80000000
struct ice_buf_hdr {
__le16 section_count;
__le16 data_end;
struct ice_section_entry section_entry[1];
};
#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) ((ICE_PKG_BUF_SIZE - \
sizeof(struct ice_buf_hdr) - (hd_sz)) / (ent_sz))
/* ice package section IDs */
#define ICE_SID_XLT1_SW 12
#define ICE_SID_XLT2_SW 13
#define ICE_SID_PROFID_TCAM_SW 14
#define ICE_SID_PROFID_REDIR_SW 15
#define ICE_SID_FLD_VEC_SW 16
#define ICE_SID_XLT1_ACL 22
#define ICE_SID_XLT2_ACL 23
#define ICE_SID_PROFID_TCAM_ACL 24
#define ICE_SID_PROFID_REDIR_ACL 25
#define ICE_SID_FLD_VEC_ACL 26
#define ICE_SID_XLT1_FD 32
#define ICE_SID_XLT2_FD 33
#define ICE_SID_PROFID_TCAM_FD 34
#define ICE_SID_PROFID_REDIR_FD 35
#define ICE_SID_FLD_VEC_FD 36
#define ICE_SID_XLT1_RSS 42
#define ICE_SID_XLT2_RSS 43
#define ICE_SID_PROFID_TCAM_RSS 44
#define ICE_SID_PROFID_REDIR_RSS 45
#define ICE_SID_FLD_VEC_RSS 46
#define ICE_SID_RXPARSER_BOOST_TCAM 56
#define ICE_SID_XLT1_PE 82
#define ICE_SID_XLT2_PE 83
#define ICE_SID_PROFID_TCAM_PE 84
#define ICE_SID_PROFID_REDIR_PE 85
#define ICE_SID_FLD_VEC_PE 86
/* Label Metadata section IDs */
#define ICE_SID_LBL_FIRST 0x80000010
#define ICE_SID_LBL_RXPARSER_TMEM 0x80000018
/* The following define MUST be updated to reflect the last label section ID */
#define ICE_SID_LBL_LAST 0x80000038
enum ice_block {
ICE_BLK_SW = 0,
ICE_BLK_ACL,
ICE_BLK_FD,
ICE_BLK_RSS,
ICE_BLK_PE,
ICE_BLK_COUNT
};
/* package labels */
struct ice_label {
__le16 value;
#define ICE_PKG_LABEL_SIZE 64
char name[ICE_PKG_LABEL_SIZE];
};
struct ice_label_section {
__le16 count;
struct ice_label label[1];
};
#define ICE_MAX_LABELS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \
sizeof(struct ice_label_section) - sizeof(struct ice_label), \
sizeof(struct ice_label))
struct ice_sw_fv_section {
__le16 count;
__le16 base_offset;
struct ice_fv fv[1];
};
/* The BOOST TCAM stores the match packet header in reverse order, meaning
* the fields are reversed; in addition, this means that the normally big endian
* fields of the packet are now little endian.
*/
struct ice_boost_key_value {
#define ICE_BOOST_REMAINING_HV_KEY 15
u8 remaining_hv_key[ICE_BOOST_REMAINING_HV_KEY];
__le16 hv_dst_port_key;
__le16 hv_src_port_key;
u8 tcam_search_key;
} __packed;
struct ice_boost_key {
struct ice_boost_key_value key;
struct ice_boost_key_value key2;
};
/* package Boost TCAM entry */
struct ice_boost_tcam_entry {
__le16 addr;
__le16 reserved;
/* break up the 40 bytes of key into different fields */
struct ice_boost_key key;
u8 boost_hit_index_group;
/* The following contains bitfields which are not on byte boundaries.
* These fields are currently unused by driver software.
*/
#define ICE_BOOST_BIT_FIELDS 43
u8 bit_fields[ICE_BOOST_BIT_FIELDS];
};
struct ice_boost_tcam_section {
__le16 count;
__le16 reserved;
struct ice_boost_tcam_entry tcam[1];
};
#define ICE_MAX_BST_TCAMS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \
sizeof(struct ice_boost_tcam_section) - \
sizeof(struct ice_boost_tcam_entry), \
sizeof(struct ice_boost_tcam_entry))
struct ice_xlt1_section {
__le16 count;
__le16 offset;
u8 value[1];
} __packed;
struct ice_xlt2_section {
__le16 count;
__le16 offset;
__le16 value[1];
};
struct ice_prof_redir_section {
__le16 count;
__le16 offset;
u8 redir_value[1];
};
struct ice_pkg_enum {
struct ice_buf_table *buf_table;
u32 buf_idx;
u32 type;
struct ice_buf_hdr *buf;
u32 sect_idx;
void *sect;
u32 sect_type;
u32 entry_idx;
void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset);
};
struct ice_es {
u32 sid;
u16 count;
u16 fvw;
u16 *ref_count;
struct list_head prof_map;
struct ice_fv_word *t;
struct mutex prof_map_lock; /* protect access to profiles list */
u8 *written;
u8 reverse; /* set to true to reverse FV order */
};
/* PTYPE Group management */
/* Note: XLT1 table takes 13-bit as input, and results in an 8-bit packet type
* group (PTG) ID as output.
*
* Note: PTG 0 is the default packet type group and it is assumed that all PTYPE
* are a part of this group until moved to a new PTG.
*/
#define ICE_DEFAULT_PTG 0
struct ice_ptg_entry {
struct ice_ptg_ptype *first_ptype;
u8 in_use;
};
struct ice_ptg_ptype {
struct ice_ptg_ptype *next_ptype;
u8 ptg;
};
struct ice_vsig_entry {
struct list_head prop_lst;
struct ice_vsig_vsi *first_vsi;
u8 in_use;
};
struct ice_vsig_vsi {
struct ice_vsig_vsi *next_vsi;
u32 prop_mask;
u16 changed;
u16 vsig;
};
#define ICE_XLT1_CNT 1024
#define ICE_MAX_PTGS 256
/* XLT1 Table */
struct ice_xlt1 {
struct ice_ptg_entry *ptg_tbl;
struct ice_ptg_ptype *ptypes;
u8 *t;
u32 sid;
u16 count;
};
#define ICE_XLT2_CNT 768
#define ICE_MAX_VSIGS 768
/* VSIG bit layout:
* [0:12]: incremental VSIG index 1 to ICE_MAX_VSIGS
* [13:15]: PF number of device
*/
#define ICE_VSIG_IDX_M (0x1FFF)
#define ICE_PF_NUM_S 13
#define ICE_PF_NUM_M (0x07 << ICE_PF_NUM_S)
#define ICE_VSIG_VALUE(vsig, pf_id) \
(u16)((((u16)(vsig)) & ICE_VSIG_IDX_M) | \
(((u16)(pf_id) << ICE_PF_NUM_S) & ICE_PF_NUM_M))
#define ICE_DEFAULT_VSIG 0
/* XLT2 Table */
struct ice_xlt2 {
struct ice_vsig_entry *vsig_tbl;
struct ice_vsig_vsi *vsis;
u16 *t;
u32 sid;
u16 count;
};
/* Keys are made up of two values, each one-half the size of the key.
* For TCAM, the entire key is 80 bits wide (or 2, 40-bit wide values)
*/
#define ICE_TCAM_KEY_VAL_SZ 5
#define ICE_TCAM_KEY_SZ (2 * ICE_TCAM_KEY_VAL_SZ)
struct ice_prof_tcam_entry {
__le16 addr;
u8 key[ICE_TCAM_KEY_SZ];
u8 prof_id;
} __packed;
struct ice_prof_id_section {
__le16 count;
struct ice_prof_tcam_entry entry[1];
} __packed;
struct ice_prof_tcam {
u32 sid;
u16 count;
u16 max_prof_id;
struct ice_prof_tcam_entry *t;
u8 cdid_bits; /* # CDID bits to use in key, 0, 2, 4, or 8 */
};
struct ice_prof_redir {
u8 *t;
u32 sid;
u16 count;
};
/* Tables per block */
struct ice_blk_info {
struct ice_xlt1 xlt1;
struct ice_xlt2 xlt2;
struct ice_prof_tcam prof;
struct ice_prof_redir prof_redir;
struct ice_es es;
u8 overwrite; /* set to true to allow overwrite of table entries */
u8 is_list_init;
};
#endif /* _ICE_FLEX_TYPE_H_ */
...@@ -55,6 +55,8 @@ ...@@ -55,6 +55,8 @@
#define PRTDCB_GENS 0x00083020 #define PRTDCB_GENS 0x00083020
#define PRTDCB_GENS_DCBX_STATUS_S 0 #define PRTDCB_GENS_DCBX_STATUS_S 0
#define PRTDCB_GENS_DCBX_STATUS_M ICE_M(0x7, 0) #define PRTDCB_GENS_DCBX_STATUS_M ICE_M(0x7, 0)
#define GL_PREEXT_L2_PMASK0(_i) (0x0020F0FC + ((_i) * 4))
#define GL_PREEXT_L2_PMASK1(_i) (0x0020F108 + ((_i) * 4))
#define GLFLXP_RXDID_FLAGS(_i, _j) (0x0045D000 + ((_i) * 4 + (_j) * 256)) #define GLFLXP_RXDID_FLAGS(_i, _j) (0x0045D000 + ((_i) * 4 + (_j) * 256))
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S 0 #define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S 0
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M ICE_M(0x3F, 0) #define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M ICE_M(0x3F, 0)
......
...@@ -752,6 +752,17 @@ void ice_vsi_put_qs(struct ice_vsi *vsi) ...@@ -752,6 +752,17 @@ void ice_vsi_put_qs(struct ice_vsi *vsi)
mutex_unlock(&pf->avail_q_mutex); mutex_unlock(&pf->avail_q_mutex);
} }
/**
* ice_is_safe_mode
* @pf: pointer to the PF struct
*
* returns true if driver is in safe mode, false otherwise
*/
bool ice_is_safe_mode(struct ice_pf *pf)
{
return !test_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
}
/** /**
* ice_rss_clean - Delete RSS related VSI structures that hold user inputs * ice_rss_clean - Delete RSS related VSI structures that hold user inputs
* @vsi: the VSI being removed * @vsi: the VSI being removed
...@@ -2629,6 +2640,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, ...@@ -2629,6 +2640,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
* DCB settings in the HW. Also, if the FW DCBX engine is not running * DCB settings in the HW. Also, if the FW DCBX engine is not running
* then Rx LLDP packets need to be redirected up the stack. * then Rx LLDP packets need to be redirected up the stack.
*/ */
if (!ice_is_safe_mode(pf)) {
if (vsi->type == ICE_VSI_PF) { if (vsi->type == ICE_VSI_PF) {
ice_vsi_add_rem_eth_mac(vsi, true); ice_vsi_add_rem_eth_mac(vsi, true);
...@@ -2639,6 +2651,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, ...@@ -2639,6 +2651,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
ice_cfg_sw_lldp(vsi, false, true); ice_cfg_sw_lldp(vsi, false, true);
} }
}
return vsi; return vsi;
...@@ -2905,8 +2918,11 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi) ...@@ -2905,8 +2918,11 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi)
} }
/* disable each interrupt */ /* disable each interrupt */
ice_for_each_q_vector(vsi, i) ice_for_each_q_vector(vsi, i) {
if (!vsi->q_vectors[i])
continue;
wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0); wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
}
ice_flush(hw); ice_flush(hw);
...@@ -2975,6 +2991,7 @@ int ice_vsi_release(struct ice_vsi *vsi) ...@@ -2975,6 +2991,7 @@ int ice_vsi_release(struct ice_vsi *vsi)
pf->num_avail_sw_msix += vsi->num_q_vectors; pf->num_avail_sw_msix += vsi->num_q_vectors;
} }
if (!ice_is_safe_mode(pf)) {
if (vsi->type == ICE_VSI_PF) { if (vsi->type == ICE_VSI_PF) {
ice_vsi_add_rem_eth_mac(vsi, false); ice_vsi_add_rem_eth_mac(vsi, false);
ice_cfg_sw_lldp(vsi, true, false); ice_cfg_sw_lldp(vsi, true, false);
...@@ -2984,6 +3001,7 @@ int ice_vsi_release(struct ice_vsi *vsi) ...@@ -2984,6 +3001,7 @@ int ice_vsi_release(struct ice_vsi *vsi)
if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
ice_cfg_sw_lldp(vsi, false, false); ice_cfg_sw_lldp(vsi, false, false);
} }
}
ice_remove_vsi_fltr(&pf->hw, vsi->idx); ice_remove_vsi_fltr(&pf->hw, vsi->idx);
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
...@@ -3168,48 +3186,6 @@ static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx) ...@@ -3168,48 +3186,6 @@ static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
sizeof(vsi->info.tc_mapping)); sizeof(vsi->info.tc_mapping));
} }
/**
* ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration
* @vsi: the VSI being configured
* @ena_tc: TC map to be enabled
*/
static void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
{
struct net_device *netdev = vsi->netdev;
struct ice_pf *pf = vsi->back;
struct ice_dcbx_cfg *dcbcfg;
u8 netdev_tc;
int i;
if (!netdev)
return;
if (!ena_tc) {
netdev_reset_tc(netdev);
return;
}
if (netdev_set_num_tc(netdev, vsi->tc_cfg.numtc))
return;
dcbcfg = &pf->hw.port_info->local_dcbx_cfg;
ice_for_each_traffic_class(i)
if (vsi->tc_cfg.ena_tc & BIT(i))
netdev_set_tc_queue(netdev,
vsi->tc_cfg.tc_info[i].netdev_tc,
vsi->tc_cfg.tc_info[i].qcount_tx,
vsi->tc_cfg.tc_info[i].qoffset);
for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
u8 ets_tc = dcbcfg->etscfg.prio_table[i];
/* Get the mapped netdev TC# for the UP */
netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc;
netdev_set_prio_tc_map(netdev, i, netdev_tc);
}
}
/** /**
* ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map * ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map
* @vsi: VSI to be configured * @vsi: VSI to be configured
...@@ -3275,6 +3251,25 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) ...@@ -3275,6 +3251,25 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
} }
#endif /* CONFIG_DCB */ #endif /* CONFIG_DCB */
/**
* ice_nvm_version_str - format the NVM version strings
* @hw: ptr to the hardware info
*/
char *ice_nvm_version_str(struct ice_hw *hw)
{
u8 oem_ver, oem_patch, ver_hi, ver_lo;
static char buf[ICE_NVM_VER_LEN];
u16 oem_build;
ice_get_nvm_version(hw, &oem_ver, &oem_build, &oem_patch, &ver_hi,
&ver_lo);
snprintf(buf, sizeof(buf), "%x.%02x 0x%x %d.%d.%d", ver_hi, ver_lo,
hw->nvm.eetrack, oem_ver, oem_build, oem_patch);
return buf;
}
/** /**
* ice_vsi_cfg_mac_fltr - Add or remove a MAC address filter for a VSI * ice_vsi_cfg_mac_fltr - Add or remove a MAC address filter for a VSI
* @vsi: the VSI being configured MAC filter * @vsi: the VSI being configured MAC filter
......
...@@ -120,7 +120,10 @@ int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena); ...@@ -120,7 +120,10 @@ int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena);
u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran); u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran);
char *ice_nvm_version_str(struct ice_hw *hw);
enum ice_status enum ice_status
ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set); ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set);
bool ice_is_safe_mode(struct ice_pf *pf);
#endif /* !_ICE_LIB_H_ */ #endif /* !_ICE_LIB_H_ */
...@@ -9,16 +9,27 @@ ...@@ -9,16 +9,27 @@
#include "ice_lib.h" #include "ice_lib.h"
#include "ice_dcb_lib.h" #include "ice_dcb_lib.h"
#define DRV_VERSION "0.7.5-k" #define DRV_VERSION_MAJOR 0
#define DRV_VERSION_MINOR 8
#define DRV_VERSION_BUILD 1
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) "-k"
#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver" #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
const char ice_drv_ver[] = DRV_VERSION; const char ice_drv_ver[] = DRV_VERSION;
static const char ice_driver_string[] = DRV_SUMMARY; static const char ice_driver_string[] = DRV_SUMMARY;
static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation."; static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
/* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */
#define ICE_DDP_PKG_PATH "intel/ice/ddp/"
#define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg"
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION(DRV_SUMMARY); MODULE_DESCRIPTION(DRV_SUMMARY);
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
MODULE_VERSION(DRV_VERSION); MODULE_VERSION(DRV_VERSION);
MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
static int debug = -1; static int debug = -1;
module_param(debug, int, 0644); module_param(debug, int, 0644);
...@@ -29,9 +40,10 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)"); ...@@ -29,9 +40,10 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
#endif /* !CONFIG_DYNAMIC_DEBUG */ #endif /* !CONFIG_DYNAMIC_DEBUG */
static struct workqueue_struct *ice_wq; static struct workqueue_struct *ice_wq;
static const struct net_device_ops ice_netdev_safe_mode_ops;
static const struct net_device_ops ice_netdev_ops; static const struct net_device_ops ice_netdev_ops;
static void ice_rebuild(struct ice_pf *pf); static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
static void ice_vsi_release_all(struct ice_pf *pf); static void ice_vsi_release_all(struct ice_pf *pf);
...@@ -491,6 +503,8 @@ ice_prepare_for_reset(struct ice_pf *pf) ...@@ -491,6 +503,8 @@ ice_prepare_for_reset(struct ice_pf *pf)
for (i = 0; i < pf->num_alloc_vfs; i++) for (i = 0; i < pf->num_alloc_vfs; i++)
ice_set_vf_state_qs_dis(&pf->vf[i]); ice_set_vf_state_qs_dis(&pf->vf[i]);
/* clear SW filtering DB */
ice_clear_hw_tbls(hw);
/* disable the VSIs and their queues that are not already DOWN */ /* disable the VSIs and their queues that are not already DOWN */
ice_pf_dis_all_vsi(pf, false); ice_pf_dis_all_vsi(pf, false);
...@@ -536,7 +550,7 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) ...@@ -536,7 +550,7 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
*/ */
if (reset_type == ICE_RESET_PFR) { if (reset_type == ICE_RESET_PFR) {
pf->pfr_count++; pf->pfr_count++;
ice_rebuild(pf); ice_rebuild(pf, reset_type);
clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
clear_bit(__ICE_PFR_REQ, pf->state); clear_bit(__ICE_PFR_REQ, pf->state);
ice_reset_all_vfs(pf, true); ice_reset_all_vfs(pf, true);
...@@ -580,7 +594,7 @@ static void ice_reset_subtask(struct ice_pf *pf) ...@@ -580,7 +594,7 @@ static void ice_reset_subtask(struct ice_pf *pf)
} else { } else {
/* done with reset. start rebuild */ /* done with reset. start rebuild */
pf->hw.reset_ongoing = false; pf->hw.reset_ongoing = false;
ice_rebuild(pf); ice_rebuild(pf, reset_type);
/* clear bit to resume normal operations, but /* clear bit to resume normal operations, but
* ICE_NEEDS_RESTART bit is set in case rebuild failed * ICE_NEEDS_RESTART bit is set in case rebuild failed
*/ */
...@@ -1490,13 +1504,19 @@ static void ice_service_task(struct work_struct *work) ...@@ -1490,13 +1504,19 @@ static void ice_service_task(struct work_struct *work)
return; return;
} }
ice_clean_adminq_subtask(pf);
ice_check_media_subtask(pf); ice_check_media_subtask(pf);
ice_check_for_hang_subtask(pf); ice_check_for_hang_subtask(pf);
ice_sync_fltr_subtask(pf); ice_sync_fltr_subtask(pf);
ice_handle_mdd_event(pf); ice_handle_mdd_event(pf);
ice_process_vflr_event(pf);
ice_watchdog_subtask(pf); ice_watchdog_subtask(pf);
ice_clean_adminq_subtask(pf);
if (ice_is_safe_mode(pf)) {
ice_service_task_complete(pf);
return;
}
ice_process_vflr_event(pf);
ice_clean_mailboxq_subtask(pf); ice_clean_mailboxq_subtask(pf);
/* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */ /* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */
...@@ -1931,30 +1951,41 @@ static void ice_napi_add(struct ice_vsi *vsi) ...@@ -1931,30 +1951,41 @@ static void ice_napi_add(struct ice_vsi *vsi)
} }
/** /**
* ice_cfg_netdev - Allocate, configure and register a netdev * ice_set_ops - set netdev and ethtools ops for the given netdev
* @vsi: the VSI associated with the new netdev * @netdev: netdev instance
*
* Returns 0 on success, negative value on failure
*/ */
static int ice_cfg_netdev(struct ice_vsi *vsi) static void ice_set_ops(struct net_device *netdev)
{ {
struct ice_pf *pf = ice_netdev_to_pf(netdev);
if (ice_is_safe_mode(pf)) {
netdev->netdev_ops = &ice_netdev_safe_mode_ops;
ice_set_ethtool_safe_mode_ops(netdev);
return;
}
netdev->netdev_ops = &ice_netdev_ops;
ice_set_ethtool_ops(netdev);
}
/**
* ice_set_netdev_features - set features for the given netdev
* @netdev: netdev instance
*/
static void ice_set_netdev_features(struct net_device *netdev)
{
struct ice_pf *pf = ice_netdev_to_pf(netdev);
netdev_features_t csumo_features; netdev_features_t csumo_features;
netdev_features_t vlano_features; netdev_features_t vlano_features;
netdev_features_t dflt_features; netdev_features_t dflt_features;
netdev_features_t tso_features; netdev_features_t tso_features;
struct ice_netdev_priv *np;
struct net_device *netdev;
u8 mac_addr[ETH_ALEN];
int err;
netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq, if (ice_is_safe_mode(pf)) {
vsi->alloc_rxq); /* safe mode */
if (!netdev) netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA;
return -ENOMEM; netdev->hw_features = netdev->features;
return;
vsi->netdev = netdev; }
np = netdev_priv(netdev);
np->vsi = vsi;
dflt_features = NETIF_F_SG | dflt_features = NETIF_F_SG |
NETIF_F_HIGHDMA | NETIF_F_HIGHDMA |
...@@ -1982,25 +2013,50 @@ static int ice_cfg_netdev(struct ice_vsi *vsi) ...@@ -1982,25 +2013,50 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
tso_features; tso_features;
netdev->vlan_features |= dflt_features | csumo_features | netdev->vlan_features |= dflt_features | csumo_features |
tso_features; tso_features;
}
/**
* ice_cfg_netdev - Allocate, configure and register a netdev
* @vsi: the VSI associated with the new netdev
*
* Returns 0 on success, negative value on failure
*/
static int ice_cfg_netdev(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
struct ice_netdev_priv *np;
struct net_device *netdev;
u8 mac_addr[ETH_ALEN];
int err;
netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
vsi->alloc_rxq);
if (!netdev)
return -ENOMEM;
vsi->netdev = netdev;
np = netdev_priv(netdev);
np->vsi = vsi;
ice_set_netdev_features(netdev);
ice_set_ops(netdev);
if (vsi->type == ICE_VSI_PF) { if (vsi->type == ICE_VSI_PF) {
SET_NETDEV_DEV(netdev, &vsi->back->pdev->dev); SET_NETDEV_DEV(netdev, &pf->pdev->dev);
ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
ether_addr_copy(netdev->dev_addr, mac_addr); ether_addr_copy(netdev->dev_addr, mac_addr);
ether_addr_copy(netdev->perm_addr, mac_addr); ether_addr_copy(netdev->perm_addr, mac_addr);
} }
netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_UNICAST_FLT;
/* assign netdev_ops */ /* Setup netdev TC information */
netdev->netdev_ops = &ice_netdev_ops; ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
/* setup watchdog timeout value to be 5 second */ /* setup watchdog timeout value to be 5 second */
netdev->watchdog_timeo = 5 * HZ; netdev->watchdog_timeo = 5 * HZ;
ice_set_ethtool_ops(netdev);
netdev->min_mtu = ETH_MIN_MTU; netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = ICE_MAX_MTU; netdev->max_mtu = ICE_MAX_MTU;
...@@ -2258,29 +2314,41 @@ static void ice_deinit_pf(struct ice_pf *pf) ...@@ -2258,29 +2314,41 @@ static void ice_deinit_pf(struct ice_pf *pf)
} }
/** /**
* ice_init_pf - Initialize general software structures (struct ice_pf) * ice_set_pf_caps - set PFs capability flags
* @pf: board private structure to initialize * @pf: pointer to the PF instance
*/ */
static int ice_init_pf(struct ice_pf *pf) static void ice_set_pf_caps(struct ice_pf *pf)
{ {
bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS); struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
if (pf->hw.func_caps.common_cap.dcb)
clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
if (func_caps->common_cap.dcb)
set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
#ifdef CONFIG_PCI_IOV #ifdef CONFIG_PCI_IOV
if (pf->hw.func_caps.common_cap.sr_iov_1_1) { clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
struct ice_hw *hw = &pf->hw; if (func_caps->common_cap.sr_iov_1_1) {
set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
pf->num_vfs_supported = min_t(int, hw->func_caps.num_allocd_vfs, pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs,
ICE_MAX_VF_COUNT); ICE_MAX_VF_COUNT);
} }
#endif /* CONFIG_PCI_IOV */ #endif /* CONFIG_PCI_IOV */
clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
if (func_caps->common_cap.rss_table_size)
set_bit(ICE_FLAG_RSS_ENA, pf->flags);
mutex_init(&pf->sw_mutex); pf->max_pf_txqs = func_caps->common_cap.num_txq;
mutex_init(&pf->avail_q_mutex); pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
}
if (pf->hw.func_caps.common_cap.rss_table_size) /**
set_bit(ICE_FLAG_RSS_ENA, pf->flags); * ice_init_pf - Initialize general software structures (struct ice_pf)
* @pf: board private structure to initialize
*/
static int ice_init_pf(struct ice_pf *pf)
{
ice_set_pf_caps(pf);
mutex_init(&pf->sw_mutex);
/* setup service timer and periodic service task */ /* setup service timer and periodic service task */
timer_setup(&pf->serv_tmr, ice_service_timer, 0); timer_setup(&pf->serv_tmr, ice_service_timer, 0);
...@@ -2288,9 +2356,7 @@ static int ice_init_pf(struct ice_pf *pf) ...@@ -2288,9 +2356,7 @@ static int ice_init_pf(struct ice_pf *pf)
INIT_WORK(&pf->serv_task, ice_service_task); INIT_WORK(&pf->serv_task, ice_service_task);
clear_bit(__ICE_SERVICE_SCHED, pf->state); clear_bit(__ICE_SERVICE_SCHED, pf->state);
pf->max_pf_txqs = pf->hw.func_caps.common_cap.num_txq; mutex_init(&pf->avail_q_mutex);
pf->max_pf_rxqs = pf->hw.func_caps.common_cap.num_rxq;
pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL); pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
if (!pf->avail_txqs) if (!pf->avail_txqs)
return -ENOMEM; return -ENOMEM;
...@@ -2443,6 +2509,163 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf) ...@@ -2443,6 +2509,163 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
return 0; return 0;
} }
/**
* ice_log_pkg_init - log result of DDP package load
* @hw: pointer to hardware info
* @status: status of package load
*/
static void
ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
{
struct ice_pf *pf = (struct ice_pf *)hw->back;
struct device *dev = &pf->pdev->dev;
switch (*status) {
case ICE_SUCCESS:
/* The package download AdminQ command returned success because
* this download succeeded or ICE_ERR_AQ_NO_WORK since there is
* already a package loaded on the device.
*/
if (hw->pkg_ver.major == hw->active_pkg_ver.major &&
hw->pkg_ver.minor == hw->active_pkg_ver.minor &&
hw->pkg_ver.update == hw->active_pkg_ver.update &&
hw->pkg_ver.draft == hw->active_pkg_ver.draft &&
!memcmp(hw->pkg_name, hw->active_pkg_name,
sizeof(hw->pkg_name))) {
if (hw->pkg_dwnld_status == ICE_AQ_RC_EEXIST)
dev_info(dev,
"DDP package already present on device: %s version %d.%d.%d.%d\n",
hw->active_pkg_name,
hw->active_pkg_ver.major,
hw->active_pkg_ver.minor,
hw->active_pkg_ver.update,
hw->active_pkg_ver.draft);
else
dev_info(dev,
"The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
hw->active_pkg_name,
hw->active_pkg_ver.major,
hw->active_pkg_ver.minor,
hw->active_pkg_ver.update,
hw->active_pkg_ver.draft);
} else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ ||
hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) {
dev_err(dev,
"The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
hw->active_pkg_name,
hw->active_pkg_ver.major,
hw->active_pkg_ver.minor,
ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
*status = ICE_ERR_NOT_SUPPORTED;
} else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) {
dev_info(dev,
"The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
hw->active_pkg_name,
hw->active_pkg_ver.major,
hw->active_pkg_ver.minor,
hw->active_pkg_ver.update,
hw->active_pkg_ver.draft,
hw->pkg_name,
hw->pkg_ver.major,
hw->pkg_ver.minor,
hw->pkg_ver.update,
hw->pkg_ver.draft);
} else {
dev_err(dev,
"An unknown error occurred when loading the DDP package, please reboot the system. If the problem persists, update the NVM. Entering Safe Mode.\n");
*status = ICE_ERR_NOT_SUPPORTED;
}
break;
case ICE_ERR_BUF_TOO_SHORT:
/* fall-through */
case ICE_ERR_CFG:
dev_err(dev,
"The DDP package file is invalid. Entering Safe Mode.\n");
break;
case ICE_ERR_NOT_SUPPORTED:
/* Package File version not supported */
if (hw->pkg_ver.major > ICE_PKG_SUPP_VER_MAJ ||
(hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
hw->pkg_ver.minor > ICE_PKG_SUPP_VER_MNR))
dev_err(dev,
"The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n");
else if (hw->pkg_ver.major < ICE_PKG_SUPP_VER_MAJ ||
(hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
hw->pkg_ver.minor < ICE_PKG_SUPP_VER_MNR))
dev_err(dev,
"The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n",
ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
break;
case ICE_ERR_AQ_ERROR:
switch (hw->adminq.sq_last_status) {
case ICE_AQ_RC_ENOSEC:
case ICE_AQ_RC_EBADSIG:
dev_err(dev,
"The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n");
return;
case ICE_AQ_RC_ESVN:
dev_err(dev,
"The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n");
return;
case ICE_AQ_RC_EBADMAN:
case ICE_AQ_RC_EBADBUF:
dev_err(dev,
"An error occurred on the device while loading the DDP package. The device will be reset.\n");
return;
default:
break;
}
/* fall-through */
default:
dev_err(dev,
"An unknown error (%d) occurred when loading the DDP package. Entering Safe Mode.\n",
*status);
break;
}
}
/**
* ice_load_pkg - load/reload the DDP Package file
* @firmware: firmware structure when firmware requested or NULL for reload
* @pf: pointer to the PF instance
*
* Called on probe and post CORER/GLOBR rebuild to load DDP Package and
* initialize HW tables.
*/
static void
ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
{
enum ice_status status = ICE_ERR_PARAM;
struct device *dev = &pf->pdev->dev;
struct ice_hw *hw = &pf->hw;
/* Load DDP Package */
if (firmware && !hw->pkg_copy) {
status = ice_copy_and_init_pkg(hw, firmware->data,
firmware->size);
ice_log_pkg_init(hw, &status);
} else if (!firmware && hw->pkg_copy) {
/* Reload package during rebuild after CORER/GLOBR reset */
status = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
ice_log_pkg_init(hw, &status);
} else {
dev_err(dev,
"The DDP package file failed to load. Entering Safe Mode.\n");
}
if (status) {
/* Safe Mode */
clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
return;
}
/* Successful download package is the precondition for advanced
* features, hence setting the ICE_FLAG_ADV_FEATURES flag
*/
set_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
}
/** /**
* ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
* @pf: pointer to the PF structure * @pf: pointer to the PF structure
...@@ -2459,6 +2682,105 @@ static void ice_verify_cacheline_size(struct ice_pf *pf) ...@@ -2459,6 +2682,105 @@ static void ice_verify_cacheline_size(struct ice_pf *pf)
ICE_CACHE_LINE_BYTES); ICE_CACHE_LINE_BYTES);
} }
/**
* ice_send_version - update firmware with driver version
* @pf: PF struct
*
* Returns ICE_SUCCESS on success, else error code
*/
static enum ice_status ice_send_version(struct ice_pf *pf)
{
struct ice_driver_ver dv;
dv.major_ver = DRV_VERSION_MAJOR;
dv.minor_ver = DRV_VERSION_MINOR;
dv.build_ver = DRV_VERSION_BUILD;
dv.subbuild_ver = 0;
strscpy((char *)dv.driver_string, DRV_VERSION,
sizeof(dv.driver_string));
return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
}
/**
* ice_get_opt_fw_name - return optional firmware file name or NULL
* @pf: pointer to the PF instance
*/
static char *ice_get_opt_fw_name(struct ice_pf *pf)
{
/* Optional firmware name same as default with additional dash
* followed by a EUI-64 identifier (PCIe Device Serial Number)
*/
struct pci_dev *pdev = pf->pdev;
char *opt_fw_filename = NULL;
u32 dword;
u8 dsn[8];
int pos;
/* Determine the name of the optional file using the DSN (two
* dwords following the start of the DSN Capability).
*/
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
if (pos) {
opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
if (!opt_fw_filename)
return NULL;
pci_read_config_dword(pdev, pos + 4, &dword);
put_unaligned_le32(dword, &dsn[0]);
pci_read_config_dword(pdev, pos + 8, &dword);
put_unaligned_le32(dword, &dsn[4]);
snprintf(opt_fw_filename, NAME_MAX,
"%sice-%02x%02x%02x%02x%02x%02x%02x%02x.pkg",
ICE_DDP_PKG_PATH,
dsn[7], dsn[6], dsn[5], dsn[4],
dsn[3], dsn[2], dsn[1], dsn[0]);
}
return opt_fw_filename;
}
/**
* ice_request_fw - Device initialization routine
* @pf: pointer to the PF instance
*/
static void ice_request_fw(struct ice_pf *pf)
{
char *opt_fw_filename = ice_get_opt_fw_name(pf);
const struct firmware *firmware = NULL;
struct device *dev = &pf->pdev->dev;
int err = 0;
/* optional device-specific DDP (if present) overrides the default DDP
* package file. kernel logs a debug message if the file doesn't exist,
* and warning messages for other errors.
*/
if (opt_fw_filename) {
err = firmware_request_nowarn(&firmware, opt_fw_filename, dev);
if (err) {
kfree(opt_fw_filename);
goto dflt_pkg_load;
}
/* request for firmware was successful. Download to device */
ice_load_pkg(firmware, pf);
kfree(opt_fw_filename);
release_firmware(firmware);
return;
}
dflt_pkg_load:
err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev);
if (err) {
dev_err(dev,
"The DDP package file was not found or could not be read. Entering Safe Mode\n");
return;
}
/* request for firmware was successful. Download to device */
ice_load_pkg(firmware, pf);
release_firmware(firmware);
}
/** /**
* ice_probe - Device initialization routine * ice_probe - Device initialization routine
* @pdev: PCI device information struct * @pdev: PCI device information struct
...@@ -2533,9 +2855,27 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) ...@@ -2533,9 +2855,27 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
goto err_exit_unroll; goto err_exit_unroll;
} }
dev_info(dev, "firmware %d.%d.%05d api %d.%d\n", dev_info(dev, "firmware %d.%d.%d api %d.%d.%d nvm %s build 0x%08x\n",
hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build, hw->fw_maj_ver, hw->fw_min_ver, hw->fw_patch,
hw->api_maj_ver, hw->api_min_ver); hw->api_maj_ver, hw->api_min_ver, hw->api_patch,
ice_nvm_version_str(hw), hw->fw_build);
ice_request_fw(pf);
/* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
* set in pf->state, which will cause ice_is_safe_mode to return
* true
*/
if (ice_is_safe_mode(pf)) {
dev_err(dev,
"Package download failed. Advanced features disabled - Device now in Safe Mode\n");
/* we already got function/device capabilities but these don't
* reflect what the driver needs to do in safe mode. Instead of
* adding conditional logic everywhere to ignore these
* device/function capabilities, override them.
*/
ice_set_safe_mode_caps(hw);
}
err = ice_init_pf(pf); err = ice_init_pf(pf);
if (err) { if (err) {
...@@ -2543,16 +2883,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) ...@@ -2543,16 +2883,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
goto err_init_pf_unroll; goto err_init_pf_unroll;
} }
if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags)) {
/* Note: DCB init failure is non-fatal to load */
if (ice_init_pf_dcb(pf, false)) {
clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
} else {
ice_cfg_lldp_mib_change(&pf->hw, true);
}
}
pf->num_alloc_vsi = hw->func_caps.guar_num_vsi; pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
if (!pf->num_alloc_vsi) { if (!pf->num_alloc_vsi) {
err = -EIO; err = -EIO;
...@@ -2612,6 +2942,15 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) ...@@ -2612,6 +2942,15 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
clear_bit(__ICE_SERVICE_DIS, pf->state); clear_bit(__ICE_SERVICE_DIS, pf->state);
/* tell the firmware we are up */
err = ice_send_version(pf);
if (err) {
dev_err(dev,
"probe failed sending driver version %s. error: %d\n",
ice_drv_ver, err);
goto err_alloc_sw_unroll;
}
/* since everything is good, start the service timer */ /* since everything is good, start the service timer */
mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
...@@ -2623,6 +2962,20 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) ...@@ -2623,6 +2962,20 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
ice_verify_cacheline_size(pf); ice_verify_cacheline_size(pf);
/* If no DDP driven features have to be setup, return here */
if (ice_is_safe_mode(pf))
return 0;
/* initialize DDP driven features */
/* Note: DCB init failure is non-fatal to load */
if (ice_init_pf_dcb(pf, false)) {
clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
} else {
ice_cfg_lldp_mib_change(&pf->hw, true);
}
return 0; return 0;
err_alloc_sw_unroll: err_alloc_sw_unroll:
...@@ -3075,6 +3428,13 @@ ice_set_features(struct net_device *netdev, netdev_features_t features) ...@@ -3075,6 +3428,13 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
struct ice_vsi *vsi = np->vsi; struct ice_vsi *vsi = np->vsi;
int ret = 0; int ret = 0;
/* Don't set any netdev advanced features with device in Safe Mode */
if (ice_is_safe_mode(vsi->back)) {
dev_err(&vsi->back->pdev->dev,
"Device is in Safe Mode - not enabling advanced netdev features\n");
return ret;
}
/* Multiple features can be changed in one call so keep features in /* Multiple features can be changed in one call so keep features in
* separate if/else statements to guarantee each feature is checked * separate if/else statements to guarantee each feature is checked
*/ */
...@@ -3764,9 +4124,6 @@ static int ice_ena_vsi(struct ice_vsi *vsi, bool locked) ...@@ -3764,9 +4124,6 @@ static int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
*/ */
#ifdef CONFIG_DCB #ifdef CONFIG_DCB
int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked) int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked)
#else
static int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked)
#endif /* CONFIG_DCB */
{ {
int v; int v;
...@@ -3777,94 +4134,107 @@ static int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked) ...@@ -3777,94 +4134,107 @@ static int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked)
return 0; return 0;
} }
#endif /* CONFIG_DCB */
/** /**
* ice_vsi_rebuild_all - rebuild all VSIs in PF * ice_vsi_rebuild_by_type - Rebuild VSI of a given type
* @pf: the PF * @pf: pointer to the PF instance
* @type: VSI type to rebuild
*
* Iterates through the pf->vsi array and rebuilds VSIs of the requested type
*/ */
static int ice_vsi_rebuild_all(struct ice_pf *pf) static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
{ {
int i; enum ice_status status;
int i, err;
/* loop through pf->vsi array and reinit the VSI if found */
ice_for_each_vsi(pf, i) { ice_for_each_vsi(pf, i) {
struct ice_vsi *vsi = pf->vsi[i]; struct ice_vsi *vsi = pf->vsi[i];
int err;
if (!vsi) if (!vsi || vsi->type != type)
continue; continue;
/* rebuild the VSI */
err = ice_vsi_rebuild(vsi); err = ice_vsi_rebuild(vsi);
if (err) { if (err) {
dev_err(&pf->pdev->dev, dev_err(&pf->pdev->dev,
"VSI at index %d rebuild failed\n", "rebuild VSI failed, err %d, VSI index %d, type %d\n",
vsi->idx); err, vsi->idx, type);
return err; return err;
} }
dev_info(&pf->pdev->dev, /* replay filters for the VSI */
"VSI at index %d rebuilt. vsi_num = 0x%x\n", status = ice_replay_vsi(&pf->hw, vsi->idx);
vsi->idx, vsi->vsi_num); if (status) {
dev_err(&pf->pdev->dev,
"replay VSI failed, status %d, VSI index %d, type %d\n",
status, vsi->idx, type);
return -EIO;
}
/* Re-map HW VSI number, using VSI handle that has been
* previously validated in ice_replay_vsi() call above
*/
vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
/* enable the VSI */
err = ice_ena_vsi(vsi, false);
if (err) {
dev_err(&pf->pdev->dev,
"enable VSI failed, err %d, VSI index %d, type %d\n",
err, vsi->idx, type);
return err;
}
dev_info(&pf->pdev->dev, "VSI rebuilt. VSI index %d, type %d\n",
vsi->idx, type);
} }
return 0; return 0;
} }
/** /**
* ice_vsi_replay_all - replay all VSIs configuration in the PF * ice_update_pf_netdev_link - Update PF netdev link status
* @pf: the PF * @pf: pointer to the PF instance
*/ */
static int ice_vsi_replay_all(struct ice_pf *pf) static void ice_update_pf_netdev_link(struct ice_pf *pf)
{ {
struct ice_hw *hw = &pf->hw; bool link_up;
enum ice_status ret;
int i; int i;
/* loop through pf->vsi array and replay the VSI if found */
ice_for_each_vsi(pf, i) { ice_for_each_vsi(pf, i) {
struct ice_vsi *vsi = pf->vsi[i]; struct ice_vsi *vsi = pf->vsi[i];
if (!vsi) if (!vsi || vsi->type != ICE_VSI_PF)
continue; return;
ret = ice_replay_vsi(hw, vsi->idx); ice_get_link_status(pf->vsi[i]->port_info, &link_up);
if (ret) { if (link_up) {
dev_err(&pf->pdev->dev, netif_carrier_on(pf->vsi[i]->netdev);
"VSI at index %d replay failed %d\n", netif_tx_wake_all_queues(pf->vsi[i]->netdev);
vsi->idx, ret); } else {
return -EIO; netif_carrier_off(pf->vsi[i]->netdev);
netif_tx_stop_all_queues(pf->vsi[i]->netdev);
} }
/* Re-map HW VSI number, using VSI handle that has been
* previously validated in ice_replay_vsi() call above
*/
vsi->vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
dev_info(&pf->pdev->dev,
"VSI at index %d filter replayed successfully - vsi_num %i\n",
vsi->idx, vsi->vsi_num);
} }
/* Clean up replay filter after successful re-configuration */
ice_replay_post(hw);
return 0;
} }
/** /**
* ice_rebuild - rebuild after reset * ice_rebuild - rebuild after reset
* @pf: PF to rebuild * @pf: PF to rebuild
* @reset_type: type of reset
*/ */
static void ice_rebuild(struct ice_pf *pf) static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
{ {
struct device *dev = &pf->pdev->dev; struct device *dev = &pf->pdev->dev;
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
enum ice_status ret; enum ice_status ret;
int err, i; int err;
if (test_bit(__ICE_DOWN, pf->state)) if (test_bit(__ICE_DOWN, pf->state))
goto clear_recovery; goto clear_recovery;
dev_dbg(dev, "rebuilding PF\n"); dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
ret = ice_init_all_ctrlq(hw); ret = ice_init_all_ctrlq(hw);
if (ret) { if (ret) {
...@@ -3872,6 +4242,16 @@ static void ice_rebuild(struct ice_pf *pf) ...@@ -3872,6 +4242,16 @@ static void ice_rebuild(struct ice_pf *pf)
goto err_init_ctrlq; goto err_init_ctrlq;
} }
/* if DDP was previously loaded successfully */
if (!ice_is_safe_mode(pf)) {
/* reload the SW DB of filter tables */
if (reset_type == ICE_RESET_PFR)
ice_fill_blk_tbls(hw);
else
/* Reload DDP Package after CORER/GLOBR reset */
ice_load_pkg(NULL, pf);
}
ret = ice_clear_pf_cfg(hw); ret = ice_clear_pf_cfg(hw);
if (ret) { if (ret) {
dev_err(dev, "clear PF configuration failed %d\n", ret); dev_err(dev, "clear PF configuration failed %d\n", ret);
...@@ -3890,63 +4270,53 @@ static void ice_rebuild(struct ice_pf *pf) ...@@ -3890,63 +4270,53 @@ static void ice_rebuild(struct ice_pf *pf)
if (err) if (err)
goto err_sched_init_port; goto err_sched_init_port;
ice_dcb_rebuild(pf);
err = ice_vsi_rebuild_all(pf);
if (err) {
dev_err(dev, "ice_vsi_rebuild_all failed\n");
goto err_vsi_rebuild;
}
err = ice_update_link_info(hw->port_info); err = ice_update_link_info(hw->port_info);
if (err) if (err)
dev_err(&pf->pdev->dev, "Get link status error %d\n", err); dev_err(&pf->pdev->dev, "Get link status error %d\n", err);
/* Replay all VSIs Configuration, including filters after reset */
if (ice_vsi_replay_all(pf)) {
dev_err(&pf->pdev->dev,
"error replaying VSI configurations with switch filter rules\n");
goto err_vsi_rebuild;
}
/* start misc vector */ /* start misc vector */
err = ice_req_irq_msix_misc(pf); err = ice_req_irq_msix_misc(pf);
if (err) { if (err) {
dev_err(dev, "misc vector setup failed: %d\n", err); dev_err(dev, "misc vector setup failed: %d\n", err);
goto err_sched_init_port;
}
if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
ice_dcb_rebuild(pf);
/* rebuild PF VSI */
err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
if (err) {
dev_err(dev, "PF VSI rebuild failed: %d\n", err);
goto err_vsi_rebuild; goto err_vsi_rebuild;
} }
/* restart the VSIs that were rebuilt and running before the reset */ if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
err = ice_pf_ena_all_vsi(pf, false); err = ice_vsi_rebuild_by_type(pf, ICE_VSI_VF);
if (err) { if (err) {
dev_err(&pf->pdev->dev, "error enabling VSIs\n"); dev_err(dev, "VF VSI rebuild failed: %d\n", err);
/* no need to disable VSIs in tear down path in ice_rebuild()
* since its already taken care in ice_vsi_open()
*/
goto err_vsi_rebuild; goto err_vsi_rebuild;
} }
}
ice_for_each_vsi(pf, i) { ice_update_pf_netdev_link(pf);
bool link_up;
if (!pf->vsi[i] || pf->vsi[i]->type != ICE_VSI_PF) /* tell the firmware we are up */
continue; ret = ice_send_version(pf);
ice_get_link_status(pf->vsi[i]->port_info, &link_up); if (ret) {
if (link_up) { dev_err(dev,
netif_carrier_on(pf->vsi[i]->netdev); "Rebuild failed due to error sending driver version: %d\n",
netif_tx_wake_all_queues(pf->vsi[i]->netdev); ret);
} else { goto err_vsi_rebuild;
netif_carrier_off(pf->vsi[i]->netdev);
netif_tx_stop_all_queues(pf->vsi[i]->netdev);
}
} }
ice_replay_post(hw);
/* if we get here, reset flow is successful */ /* if we get here, reset flow is successful */
clear_bit(__ICE_RESET_FAILED, pf->state); clear_bit(__ICE_RESET_FAILED, pf->state);
return; return;
err_vsi_rebuild: err_vsi_rebuild:
ice_vsi_release_all(pf);
err_sched_init_port: err_sched_init_port:
ice_sched_cleanup_all(hw); ice_sched_cleanup_all(hw);
err_init_ctrlq: err_init_ctrlq:
...@@ -4473,6 +4843,17 @@ ice_features_check(struct sk_buff *skb, ...@@ -4473,6 +4843,17 @@ ice_features_check(struct sk_buff *skb,
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
} }
static const struct net_device_ops ice_netdev_safe_mode_ops = {
.ndo_open = ice_open,
.ndo_stop = ice_stop,
.ndo_start_xmit = ice_start_xmit,
.ndo_set_mac_address = ice_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = ice_change_mtu,
.ndo_get_stats64 = ice_get_stats64,
.ndo_tx_timeout = ice_tx_timeout,
};
static const struct net_device_ops ice_netdev_ops = { static const struct net_device_ops ice_netdev_ops = {
.ndo_open = ice_open, .ndo_open = ice_open,
.ndo_stop = ice_stop, .ndo_stop = ice_stop,
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include "ice_osdep.h" #include "ice_osdep.h"
#include "ice_controlq.h" #include "ice_controlq.h"
#include "ice_lan_tx_rx.h" #include "ice_lan_tx_rx.h"
#include "ice_flex_type.h"
static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc) static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)
{ {
...@@ -31,6 +32,7 @@ static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc) ...@@ -31,6 +32,7 @@ static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)
#define ICE_DBG_LAN BIT_ULL(8) #define ICE_DBG_LAN BIT_ULL(8)
#define ICE_DBG_SW BIT_ULL(13) #define ICE_DBG_SW BIT_ULL(13)
#define ICE_DBG_SCHED BIT_ULL(14) #define ICE_DBG_SCHED BIT_ULL(14)
#define ICE_DBG_PKG BIT_ULL(16)
#define ICE_DBG_RES BIT_ULL(17) #define ICE_DBG_RES BIT_ULL(17)
#define ICE_DBG_AQ_MSG BIT_ULL(24) #define ICE_DBG_AQ_MSG BIT_ULL(24)
#define ICE_DBG_AQ_CMD BIT_ULL(27) #define ICE_DBG_AQ_CMD BIT_ULL(27)
...@@ -53,6 +55,14 @@ enum ice_aq_res_access_type { ...@@ -53,6 +55,14 @@ enum ice_aq_res_access_type {
ICE_RES_WRITE ICE_RES_WRITE
}; };
struct ice_driver_ver {
u8 major_ver;
u8 minor_ver;
u8 build_ver;
u8 subbuild_ver;
u8 driver_string[32];
};
enum ice_fc_mode { enum ice_fc_mode {
ICE_FC_NONE = 0, ICE_FC_NONE = 0,
ICE_FC_RX_PAUSE, ICE_FC_RX_PAUSE,
...@@ -222,6 +232,8 @@ struct ice_nvm_info { ...@@ -222,6 +232,8 @@ struct ice_nvm_info {
u8 blank_nvm_mode; /* is NVM empty (no FW present) */ u8 blank_nvm_mode; /* is NVM empty (no FW present) */
}; };
#define ICE_NVM_VER_LEN 32
/* Max number of port to queue branches w.r.t topology */ /* Max number of port to queue branches w.r.t topology */
#define ICE_MAX_TRAFFIC_CLASS 8 #define ICE_MAX_TRAFFIC_CLASS 8
#define ICE_TXSCHED_MAX_BRANCHES ICE_MAX_TRAFFIC_CLASS #define ICE_TXSCHED_MAX_BRANCHES ICE_MAX_TRAFFIC_CLASS
...@@ -459,6 +471,30 @@ struct ice_hw { ...@@ -459,6 +471,30 @@ struct ice_hw {
u8 ucast_shared; /* true if VSIs can share unicast addr */ u8 ucast_shared; /* true if VSIs can share unicast addr */
/* Active package version (currently active) */
struct ice_pkg_ver active_pkg_ver;
u8 active_pkg_name[ICE_PKG_NAME_SIZE];
u8 active_pkg_in_nvm;
enum ice_aq_err pkg_dwnld_status;
/* Driver's package ver - (from the Metadata seg) */
struct ice_pkg_ver pkg_ver;
u8 pkg_name[ICE_PKG_NAME_SIZE];
/* Driver's Ice package version (from the Ice seg) */
struct ice_pkg_ver ice_pkg_ver;
u8 ice_pkg_name[ICE_PKG_NAME_SIZE];
/* Pointer to the ice segment */
struct ice_seg *seg;
/* Pointer to allocated copy of pkg memory */
u8 *pkg_copy;
u32 pkg_size;
/* HW block tables */
struct ice_blk_info blk[ICE_BLK_COUNT];
}; };
/* Statistics collected by each port, VSI, VEB, and S-channel */ /* Statistics collected by each port, VSI, VEB, and S-channel */
......
...@@ -1443,6 +1443,12 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs) ...@@ -1443,6 +1443,12 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
{ {
struct ice_pf *pf = pci_get_drvdata(pdev); struct ice_pf *pf = pci_get_drvdata(pdev);
if (ice_is_safe_mode(pf)) {
dev_err(&pf->pdev->dev,
"SR-IOV cannot be configured - Device is in Safe Mode\n");
return -EOPNOTSUPP;
}
if (num_vfs) if (num_vfs)
return ice_pci_sriov_ena(pf, num_vfs); return ice_pci_sriov_ena(pf, num_vfs);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment