Commit 6cd476d2 authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
100GbE Intel Wired LAN Driver Updates 2019-09-12

This series contains updates to ice driver to implement and support
loading a Dynamic Device Personalization (DDP) package from lib/firmware
onto the device.

Paul updates the way the driver version is stored in the driver so that
we can pass the driver version to the firmware.  Passing of the driver
version to the firmware is needed for the DDP package to ensure we have
the appropriate support in the driver for the features in the package.

Lukasz fixes how the firmware version is stored to align with how the
firmware stores its own version.  Also extended the log message to
display additional useful information such as NVM version, API patch
information and firmware build hash.

Tony adds the needed driver support to check, load and store the DDP
package.  Also add support for the ability to load DDP packages intended
for specific hardware devices, as well as what to do when loading of the
DDP package fails to load.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 069841ef 2de12566
......@@ -15,6 +15,7 @@ ice-y := ice_main.o \
ice_sched.o \
ice_lib.o \
ice_txrx.o \
ice_flex_pipe.o \
ice_ethtool.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_lib.o
......@@ -8,6 +8,7 @@
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/netdevice.h>
#include <linux/compiler.h>
#include <linux/etherdevice.h>
......@@ -29,6 +30,7 @@
#include <linux/sctp.h>
#include <linux/ipv6.h>
#include <linux/if_bridge.h>
#include <linux/ctype.h>
#include <linux/avf/virtchnl.h>
#include <net/ipv6.h>
#include "ice_devids.h"
......@@ -52,7 +54,6 @@ extern const char ice_drv_ver[];
#define ICE_DFLT_TRAFFIC_CLASS BIT(0)
#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16)
#define ICE_ETHTOOL_FWVER_LEN 32
#define ICE_AQ_LEN 64
#define ICE_MBXSQ_LEN 64
#define ICE_MBXRQ_LEN 512
......@@ -307,6 +308,7 @@ enum ice_pf_flags {
ICE_FLAG_SRIOV_CAPABLE,
ICE_FLAG_DCB_CAPABLE,
ICE_FLAG_DCB_ENA,
ICE_FLAG_ADV_FEATURES,
ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA,
ICE_FLAG_NO_MEDIA,
ICE_FLAG_FW_LLDP_AGENT,
......@@ -404,6 +406,17 @@ ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,
wr32(hw, GLINT_DYN_CTL(vector), val);
}
/**
* ice_netdev_to_pf - Retrieve the PF struct associated with a netdev
* @netdev: pointer to the netdev struct
*/
static inline struct ice_pf *ice_netdev_to_pf(struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
return np->vsi->back;
}
/**
* ice_get_main_vsi - Get the PF VSI
* @pf: PF instance
......@@ -421,6 +434,7 @@ static inline struct ice_vsi *ice_get_main_vsi(struct ice_pf *pf)
int ice_vsi_setup_tx_rings(struct ice_vsi *vsi);
int ice_vsi_setup_rx_rings(struct ice_vsi *vsi);
void ice_set_ethtool_ops(struct net_device *netdev);
void ice_set_ethtool_safe_mode_ops(struct net_device *netdev);
u16 ice_get_avail_txq_count(struct ice_pf *pf);
u16 ice_get_avail_rxq_count(struct ice_pf *pf);
void ice_update_vsi_stats(struct ice_vsi *vsi);
......
......@@ -33,6 +33,17 @@ struct ice_aqc_get_ver {
u8 api_patch;
};
/* Send driver version (indirect 0x0002) */
struct ice_aqc_driver_ver {
u8 major_ver;
u8 minor_ver;
u8 build_ver;
u8 subbuild_ver;
u8 reserved[4];
__le32 addr_high;
__le32 addr_low;
};
/* Queue Shutdown (direct 0x0003) */
struct ice_aqc_q_shutdown {
u8 driver_unloading;
......@@ -1519,6 +1530,56 @@ struct ice_aqc_get_clear_fw_log {
__le32 addr_low;
};
/* Download Package (indirect 0x0C40) */
/* Also used for Update Package (indirect 0x0C42) */
struct ice_aqc_download_pkg {
u8 flags;
#define ICE_AQC_DOWNLOAD_PKG_LAST_BUF 0x01
u8 reserved[3];
__le32 reserved1;
__le32 addr_high;
__le32 addr_low;
};
struct ice_aqc_download_pkg_resp {
__le32 error_offset;
__le32 error_info;
__le32 addr_high;
__le32 addr_low;
};
/* Get Package Info List (indirect 0x0C43) */
struct ice_aqc_get_pkg_info_list {
__le32 reserved1;
__le32 reserved2;
__le32 addr_high;
__le32 addr_low;
};
/* Version format for packages */
struct ice_pkg_ver {
u8 major;
u8 minor;
u8 update;
u8 draft;
};
#define ICE_PKG_NAME_SIZE 32
struct ice_aqc_get_pkg_info {
struct ice_pkg_ver ver;
char name[ICE_PKG_NAME_SIZE];
u8 is_in_nvm;
u8 is_active;
u8 is_active_at_boot;
u8 is_modified;
};
/* Get Package Info List response buffer format (0x0C43) */
struct ice_aqc_get_pkg_info_resp {
__le32 count;
struct ice_aqc_get_pkg_info pkg_info[1];
};
/**
* struct ice_aq_desc - Admin Queue (AQ) descriptor
* @flags: ICE_AQ_FLAG_* flags
......@@ -1547,6 +1608,7 @@ struct ice_aq_desc {
u8 raw[16];
struct ice_aqc_generic generic;
struct ice_aqc_get_ver get_ver;
struct ice_aqc_driver_ver driver_ver;
struct ice_aqc_q_shutdown q_shutdown;
struct ice_aqc_req_res res_owner;
struct ice_aqc_manage_mac_read mac_read;
......@@ -1580,6 +1642,7 @@ struct ice_aq_desc {
struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res;
struct ice_aqc_fw_logging fw_logging;
struct ice_aqc_get_clear_fw_log get_clear_fw_log;
struct ice_aqc_download_pkg download_pkg;
struct ice_aqc_set_mac_lb set_mac_lb;
struct ice_aqc_alloc_free_res_cmd sw_res_ctrl;
struct ice_aqc_set_event_mask set_event_mask;
......@@ -1612,12 +1675,18 @@ enum ice_aq_err {
ICE_AQ_RC_EEXIST = 13, /* Object already exists */
ICE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */
ICE_AQ_RC_ENOSYS = 17, /* Function not implemented */
ICE_AQ_RC_ENOSEC = 24, /* Missing security manifest */
ICE_AQ_RC_EBADSIG = 25, /* Bad RSA signature */
ICE_AQ_RC_ESVN = 26, /* SVN number prohibits this package */
ICE_AQ_RC_EBADMAN = 27, /* Manifest hash mismatch */
ICE_AQ_RC_EBADBUF = 28, /* Buffer hash mismatches manifest */
};
/* Admin Queue command opcodes */
enum ice_adminq_opc {
/* AQ commands */
ice_aqc_opc_get_ver = 0x0001,
ice_aqc_opc_driver_ver = 0x0002,
ice_aqc_opc_q_shutdown = 0x0003,
/* resource ownership */
......@@ -1699,6 +1768,10 @@ enum ice_adminq_opc {
ice_aqc_opc_add_txqs = 0x0C30,
ice_aqc_opc_dis_txqs = 0x0C31,
/* package commands */
ice_aqc_opc_download_pkg = 0x0C40,
ice_aqc_opc_get_pkg_info_list = 0x0C43,
/* debug commands */
ice_aqc_opc_fw_logging = 0xFF09,
ice_aqc_opc_fw_logging_info = 0xFF10,
......
......@@ -728,6 +728,29 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw)
}
}
/**
* ice_get_nvm_version - get cached NVM version data
* @hw: pointer to the hardware structure
* @oem_ver: 8 bit NVM version
* @oem_build: 16 bit NVM build number
* @oem_patch: 8 NVM patch number
* @ver_hi: high 16 bits of the NVM version
* @ver_lo: low 16 bits of the NVM version
*/
void
ice_get_nvm_version(struct ice_hw *hw, u8 *oem_ver, u16 *oem_build,
u8 *oem_patch, u8 *ver_hi, u8 *ver_lo)
{
struct ice_nvm_info *nvm = &hw->nvm;
*oem_ver = (u8)((nvm->oem_ver & ICE_OEM_VER_MASK) >> ICE_OEM_VER_SHIFT);
*oem_patch = (u8)(nvm->oem_ver & ICE_OEM_VER_PATCH_MASK);
*oem_build = (u16)((nvm->oem_ver & ICE_OEM_VER_BUILD_MASK) >>
ICE_OEM_VER_BUILD_SHIFT);
*ver_hi = (nvm->ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT;
*ver_lo = (nvm->ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT;
}
/**
* ice_init_hw - main hardware initialization routine
* @hw: pointer to the hardware structure
......@@ -859,7 +882,9 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC);
ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC_2);
status = ice_init_hw_tbls(hw);
if (status)
goto err_unroll_fltr_mgmt_struct;
return 0;
err_unroll_fltr_mgmt_struct:
......@@ -887,6 +912,8 @@ void ice_deinit_hw(struct ice_hw *hw)
ice_sched_cleanup_all(hw);
ice_sched_clear_agg(hw);
ice_free_seg(hw);
ice_free_hw_tbls(hw);
if (hw->port_info) {
devm_kfree(ice_hw_to_dev(hw), hw->port_info);
......@@ -1207,6 +1234,12 @@ ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc, void *buf,
/* FW Admin Queue command wrappers */
/* Software lock/mutex that is meant to be held while the Global Config Lock
* in firmware is acquired by the software to prevent most (but not all) types
* of AQ commands from being sent to FW
*/
DEFINE_MUTEX(ice_global_cfg_lock_sw);
/**
* ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
* @hw: pointer to the HW struct
......@@ -1221,7 +1254,38 @@ enum ice_status
ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
u16 buf_size, struct ice_sq_cd *cd)
{
return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
struct ice_aqc_req_res *cmd = &desc->params.res_owner;
bool lock_acquired = false;
enum ice_status status;
/* When a package download is in process (i.e. when the firmware's
* Global Configuration Lock resource is held), only the Download
* Package, Get Version, Get Package Info List and Release Resource
* (with resource ID set to Global Config Lock) AdminQ commands are
* allowed; all others must block until the package download completes
* and the Global Config Lock is released. See also
* ice_acquire_global_cfg_lock().
*/
switch (le16_to_cpu(desc->opcode)) {
case ice_aqc_opc_download_pkg:
case ice_aqc_opc_get_pkg_info_list:
case ice_aqc_opc_get_ver:
break;
case ice_aqc_opc_release_res:
if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK)
break;
/* fall-through */
default:
mutex_lock(&ice_global_cfg_lock_sw);
lock_acquired = true;
break;
}
status = ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
if (lock_acquired)
mutex_unlock(&ice_global_cfg_lock_sw);
return status;
}
/**
......@@ -1258,6 +1322,43 @@ enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
return status;
}
/**
* ice_aq_send_driver_ver
* @hw: pointer to the HW struct
* @dv: driver's major, minor version
* @cd: pointer to command details structure or NULL
*
* Send the driver version (0x0002) to the firmware
*/
enum ice_status
ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
struct ice_sq_cd *cd)
{
struct ice_aqc_driver_ver *cmd;
struct ice_aq_desc desc;
u16 len;
cmd = &desc.params.driver_ver;
if (!dv)
return ICE_ERR_PARAM;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
cmd->major_ver = dv->major_ver;
cmd->minor_ver = dv->minor_ver;
cmd->build_ver = dv->build_ver;
cmd->subbuild_ver = dv->subbuild_ver;
len = 0;
while (len < sizeof(dv->driver_string) &&
isascii(dv->driver_string[len]) && dv->driver_string[len])
len++;
return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
}
/**
* ice_aq_q_shutdown
* @hw: pointer to the HW struct
......@@ -1745,6 +1846,75 @@ ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc)
return status;
}
/**
* ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
* @hw: pointer to the hardware structure
*/
void ice_set_safe_mode_caps(struct ice_hw *hw)
{
struct ice_hw_func_caps *func_caps = &hw->func_caps;
struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
u32 valid_func, rxq_first_id, txq_first_id;
u32 msix_vector_first_id, max_mtu;
u32 num_func = 0;
u8 i;
/* cache some func_caps values that should be restored after memset */
valid_func = func_caps->common_cap.valid_functions;
txq_first_id = func_caps->common_cap.txq_first_id;
rxq_first_id = func_caps->common_cap.rxq_first_id;
msix_vector_first_id = func_caps->common_cap.msix_vector_first_id;
max_mtu = func_caps->common_cap.max_mtu;
/* unset func capabilities */
memset(func_caps, 0, sizeof(*func_caps));
/* restore cached values */
func_caps->common_cap.valid_functions = valid_func;
func_caps->common_cap.txq_first_id = txq_first_id;
func_caps->common_cap.rxq_first_id = rxq_first_id;
func_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
func_caps->common_cap.max_mtu = max_mtu;
/* one Tx and one Rx queue in safe mode */
func_caps->common_cap.num_rxq = 1;
func_caps->common_cap.num_txq = 1;
/* two MSIX vectors, one for traffic and one for misc causes */
func_caps->common_cap.num_msix_vectors = 2;
func_caps->guar_num_vsi = 1;
/* cache some dev_caps values that should be restored after memset */
valid_func = dev_caps->common_cap.valid_functions;
txq_first_id = dev_caps->common_cap.txq_first_id;
rxq_first_id = dev_caps->common_cap.rxq_first_id;
msix_vector_first_id = dev_caps->common_cap.msix_vector_first_id;
max_mtu = dev_caps->common_cap.max_mtu;
/* unset dev capabilities */
memset(dev_caps, 0, sizeof(*dev_caps));
/* restore cached values */
dev_caps->common_cap.valid_functions = valid_func;
dev_caps->common_cap.txq_first_id = txq_first_id;
dev_caps->common_cap.rxq_first_id = rxq_first_id;
dev_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
dev_caps->common_cap.max_mtu = max_mtu;
/* valid_func is a bitmap. get number of functions */
#define ICE_MAX_FUNCS 8
for (i = 0; i < ICE_MAX_FUNCS; i++)
if (valid_func & BIT(i))
num_func++;
/* one Tx and one Rx queue per function in safe mode */
dev_caps->common_cap.num_rxq = num_func;
dev_caps->common_cap.num_txq = num_func;
/* two MSIX vectors per function */
dev_caps->common_cap.num_msix_vectors = 2 * num_func;
}
/**
* ice_get_caps - get info about the HW
* @hw: pointer to the hardware structure
......
......@@ -6,6 +6,7 @@
#include "ice.h"
#include "ice_type.h"
#include "ice_flex_pipe.h"
#include "ice_switch.h"
#include <linux/avf/virtchnl.h>
......@@ -41,6 +42,8 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
void ice_clear_pxe_mode(struct ice_hw *hw);
enum ice_status ice_get_caps(struct ice_hw *hw);
void ice_set_safe_mode_caps(struct ice_hw *hw);
void ice_dev_onetime_setup(struct ice_hw *hw);
enum ice_status
......@@ -66,11 +69,17 @@ void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);
extern const struct ice_ctx_ele ice_tlan_ctx_info[];
enum ice_status
ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info);
extern struct mutex ice_global_cfg_lock_sw;
enum ice_status
ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc,
void *buf, u16 buf_size, struct ice_sq_cd *cd);
enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd);
enum ice_status
ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps_data *caps,
......@@ -130,6 +139,9 @@ ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
void
ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
void
ice_get_nvm_version(struct ice_hw *hw, u8 *oem_ver, u16 *oem_build,
u8 *oem_patch, u8 *ver_hi, u8 *ver_lo);
enum ice_status
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_get_elem *buf);
......
......@@ -3,6 +3,48 @@
#include "ice_dcb_lib.h"
/**
* ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration
* @vsi: the VSI being configured
* @ena_tc: TC map to be enabled
*/
void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
{
struct net_device *netdev = vsi->netdev;
struct ice_pf *pf = vsi->back;
struct ice_dcbx_cfg *dcbcfg;
u8 netdev_tc;
int i;
if (!netdev)
return;
if (!ena_tc) {
netdev_reset_tc(netdev);
return;
}
if (netdev_set_num_tc(netdev, vsi->tc_cfg.numtc))
return;
dcbcfg = &pf->hw.port_info->local_dcbx_cfg;
ice_for_each_traffic_class(i)
if (vsi->tc_cfg.ena_tc & BIT(i))
netdev_set_tc_queue(netdev,
vsi->tc_cfg.tc_info[i].netdev_tc,
vsi->tc_cfg.tc_info[i].qcount_tx,
vsi->tc_cfg.tc_info[i].qoffset);
for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
u8 ets_tc = dcbcfg->etscfg.prio_table[i];
/* Get the mapped netdev TC# for the UP */
netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc;
netdev_set_prio_tc_map(netdev, i, netdev_tc);
}
}
/**
* ice_dcb_get_ena_tc - return bitmap of enabled TCs
* @dcbcfg: DCB config to evaluate for enabled TCs
......
......@@ -22,6 +22,7 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
void
ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
struct ice_rq_event_info *event);
void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
static inline void
ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring)
{
......@@ -58,5 +59,6 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring __always_unused *tx_ring,
#define ice_vsi_cfg_dcb_rings(vsi) do {} while (0)
#define ice_dcb_process_lldp_set_mib_change(pf, event) do {} while (0)
#define ice_set_cgd_num(tlan_ctx, ring) do {} while (0)
#define ice_vsi_cfg_netdev_tc(vsi, ena_tc) do {} while (0)
#endif /* CONFIG_DCB */
#endif /* _ICE_DCB_LIB_H_ */
......@@ -160,31 +160,6 @@ static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
#define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags)
/**
* ice_nvm_version_str - format the NVM version strings
* @hw: ptr to the hardware info
*/
static char *ice_nvm_version_str(struct ice_hw *hw)
{
static char buf[ICE_ETHTOOL_FWVER_LEN];
u8 ver, patch;
u32 full_ver;
u16 build;
full_ver = hw->nvm.oem_ver;
ver = (u8)((full_ver & ICE_OEM_VER_MASK) >> ICE_OEM_VER_SHIFT);
build = (u16)((full_ver & ICE_OEM_VER_BUILD_MASK) >>
ICE_OEM_VER_BUILD_SHIFT);
patch = (u8)(full_ver & ICE_OEM_VER_PATCH_MASK);
snprintf(buf, sizeof(buf), "%x.%02x 0x%x %d.%d.%d",
(hw->nvm.ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT,
(hw->nvm.ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT,
hw->nvm.eetrack, ver, build, patch);
return buf;
}
static void
ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
......@@ -3460,6 +3435,33 @@ static const struct ethtool_ops ice_ethtool_ops = {
.set_fecparam = ice_set_fecparam,
};
static const struct ethtool_ops ice_ethtool_safe_mode_ops = {
.get_link_ksettings = ice_get_link_ksettings,
.set_link_ksettings = ice_set_link_ksettings,
.get_drvinfo = ice_get_drvinfo,
.get_regs_len = ice_get_regs_len,
.get_regs = ice_get_regs,
.get_msglevel = ice_get_msglevel,
.set_msglevel = ice_set_msglevel,
.get_eeprom_len = ice_get_eeprom_len,
.get_eeprom = ice_get_eeprom,
.get_strings = ice_get_strings,
.get_ethtool_stats = ice_get_ethtool_stats,
.get_sset_count = ice_get_sset_count,
.get_ringparam = ice_get_ringparam,
.set_ringparam = ice_set_ringparam,
.nway_reset = ice_nway_reset,
};
/**
* ice_set_ethtool_safe_mode_ops - setup safe mode ethtool ops
* @netdev: network interface device structure
*/
void ice_set_ethtool_safe_mode_ops(struct net_device *netdev)
{
netdev->ethtool_ops = &ice_ethtool_safe_mode_ops;
}
/**
* ice_set_ethtool_ops - setup netdev ethtool ops
* @netdev: network interface device structure
......
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2019, Intel Corporation. */
#ifndef _ICE_FLEX_PIPE_H_
#define _ICE_FLEX_PIPE_H_
#include "ice_type.h"
/* Package minimal version supported */
#define ICE_PKG_SUPP_VER_MAJ 1
#define ICE_PKG_SUPP_VER_MNR 3
/* Package format version */
#define ICE_PKG_FMT_VER_MAJ 1
#define ICE_PKG_FMT_VER_MNR 0
#define ICE_PKG_FMT_VER_UPD 0
#define ICE_PKG_FMT_VER_DFT 0
#define ICE_PKG_CNT 4
enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len);
enum ice_status
ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len);
enum ice_status ice_init_hw_tbls(struct ice_hw *hw);
void ice_free_seg(struct ice_hw *hw);
void ice_fill_blk_tbls(struct ice_hw *hw);
void ice_clear_hw_tbls(struct ice_hw *hw);
void ice_free_hw_tbls(struct ice_hw *hw);
#endif /* _ICE_FLEX_PIPE_H_ */
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2019, Intel Corporation. */
#ifndef _ICE_FLEX_TYPE_H_
#define _ICE_FLEX_TYPE_H_
/* Extraction Sequence (Field Vector) Table */
struct ice_fv_word {
u8 prot_id;
u16 off; /* Offset within the protocol header */
u8 resvrd;
} __packed;
#define ICE_MAX_FV_WORDS 48
struct ice_fv {
struct ice_fv_word ew[ICE_MAX_FV_WORDS];
};
/* Package and segment headers and tables */
struct ice_pkg_hdr {
struct ice_pkg_ver format_ver;
__le32 seg_count;
__le32 seg_offset[1];
};
/* generic segment */
struct ice_generic_seg_hdr {
#define SEGMENT_TYPE_METADATA 0x00000001
#define SEGMENT_TYPE_ICE 0x00000010
__le32 seg_type;
struct ice_pkg_ver seg_ver;
__le32 seg_size;
char seg_name[ICE_PKG_NAME_SIZE];
};
/* ice specific segment */
union ice_device_id {
struct {
__le16 device_id;
__le16 vendor_id;
} dev_vend_id;
__le32 id;
};
struct ice_device_id_entry {
union ice_device_id device;
union ice_device_id sub_device;
};
struct ice_seg {
struct ice_generic_seg_hdr hdr;
__le32 device_table_count;
struct ice_device_id_entry device_table[1];
};
struct ice_nvm_table {
__le32 table_count;
__le32 vers[1];
};
struct ice_buf {
#define ICE_PKG_BUF_SIZE 4096
u8 buf[ICE_PKG_BUF_SIZE];
};
struct ice_buf_table {
__le32 buf_count;
struct ice_buf buf_array[1];
};
/* global metadata specific segment */
struct ice_global_metadata_seg {
struct ice_generic_seg_hdr hdr;
struct ice_pkg_ver pkg_ver;
__le32 track_id;
char pkg_name[ICE_PKG_NAME_SIZE];
};
#define ICE_MIN_S_OFF 12
#define ICE_MAX_S_OFF 4095
#define ICE_MIN_S_SZ 1
#define ICE_MAX_S_SZ 4084
/* section information */
struct ice_section_entry {
__le32 type;
__le16 offset;
__le16 size;
};
#define ICE_MIN_S_COUNT 1
#define ICE_MAX_S_COUNT 511
#define ICE_MIN_S_DATA_END 12
#define ICE_MAX_S_DATA_END 4096
#define ICE_METADATA_BUF 0x80000000
struct ice_buf_hdr {
__le16 section_count;
__le16 data_end;
struct ice_section_entry section_entry[1];
};
#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) ((ICE_PKG_BUF_SIZE - \
sizeof(struct ice_buf_hdr) - (hd_sz)) / (ent_sz))
/* ice package section IDs */
#define ICE_SID_XLT1_SW 12
#define ICE_SID_XLT2_SW 13
#define ICE_SID_PROFID_TCAM_SW 14
#define ICE_SID_PROFID_REDIR_SW 15
#define ICE_SID_FLD_VEC_SW 16
#define ICE_SID_XLT1_ACL 22
#define ICE_SID_XLT2_ACL 23
#define ICE_SID_PROFID_TCAM_ACL 24
#define ICE_SID_PROFID_REDIR_ACL 25
#define ICE_SID_FLD_VEC_ACL 26
#define ICE_SID_XLT1_FD 32
#define ICE_SID_XLT2_FD 33
#define ICE_SID_PROFID_TCAM_FD 34
#define ICE_SID_PROFID_REDIR_FD 35
#define ICE_SID_FLD_VEC_FD 36
#define ICE_SID_XLT1_RSS 42
#define ICE_SID_XLT2_RSS 43
#define ICE_SID_PROFID_TCAM_RSS 44
#define ICE_SID_PROFID_REDIR_RSS 45
#define ICE_SID_FLD_VEC_RSS 46
#define ICE_SID_RXPARSER_BOOST_TCAM 56
#define ICE_SID_XLT1_PE 82
#define ICE_SID_XLT2_PE 83
#define ICE_SID_PROFID_TCAM_PE 84
#define ICE_SID_PROFID_REDIR_PE 85
#define ICE_SID_FLD_VEC_PE 86
/* Label Metadata section IDs */
#define ICE_SID_LBL_FIRST 0x80000010
#define ICE_SID_LBL_RXPARSER_TMEM 0x80000018
/* The following define MUST be updated to reflect the last label section ID */
#define ICE_SID_LBL_LAST 0x80000038
enum ice_block {
ICE_BLK_SW = 0,
ICE_BLK_ACL,
ICE_BLK_FD,
ICE_BLK_RSS,
ICE_BLK_PE,
ICE_BLK_COUNT
};
/* package labels */
struct ice_label {
__le16 value;
#define ICE_PKG_LABEL_SIZE 64
char name[ICE_PKG_LABEL_SIZE];
};
struct ice_label_section {
__le16 count;
struct ice_label label[1];
};
#define ICE_MAX_LABELS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \
sizeof(struct ice_label_section) - sizeof(struct ice_label), \
sizeof(struct ice_label))
struct ice_sw_fv_section {
__le16 count;
__le16 base_offset;
struct ice_fv fv[1];
};
/* The BOOST TCAM stores the match packet header in reverse order, meaning
* the fields are reversed; in addition, this means that the normally big endian
* fields of the packet are now little endian.
*/
struct ice_boost_key_value {
#define ICE_BOOST_REMAINING_HV_KEY 15
u8 remaining_hv_key[ICE_BOOST_REMAINING_HV_KEY];
__le16 hv_dst_port_key;
__le16 hv_src_port_key;
u8 tcam_search_key;
} __packed;
struct ice_boost_key {
struct ice_boost_key_value key;
struct ice_boost_key_value key2;
};
/* package Boost TCAM entry */
struct ice_boost_tcam_entry {
__le16 addr;
__le16 reserved;
/* break up the 40 bytes of key into different fields */
struct ice_boost_key key;
u8 boost_hit_index_group;
/* The following contains bitfields which are not on byte boundaries.
* These fields are currently unused by driver software.
*/
#define ICE_BOOST_BIT_FIELDS 43
u8 bit_fields[ICE_BOOST_BIT_FIELDS];
};
struct ice_boost_tcam_section {
__le16 count;
__le16 reserved;
struct ice_boost_tcam_entry tcam[1];
};
#define ICE_MAX_BST_TCAMS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \
sizeof(struct ice_boost_tcam_section) - \
sizeof(struct ice_boost_tcam_entry), \
sizeof(struct ice_boost_tcam_entry))
struct ice_xlt1_section {
__le16 count;
__le16 offset;
u8 value[1];
} __packed;
struct ice_xlt2_section {
__le16 count;
__le16 offset;
__le16 value[1];
};
struct ice_prof_redir_section {
__le16 count;
__le16 offset;
u8 redir_value[1];
};
struct ice_pkg_enum {
struct ice_buf_table *buf_table;
u32 buf_idx;
u32 type;
struct ice_buf_hdr *buf;
u32 sect_idx;
void *sect;
u32 sect_type;
u32 entry_idx;
void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset);
};
struct ice_es {
u32 sid;
u16 count;
u16 fvw;
u16 *ref_count;
struct list_head prof_map;
struct ice_fv_word *t;
struct mutex prof_map_lock; /* protect access to profiles list */
u8 *written;
u8 reverse; /* set to true to reverse FV order */
};
/* PTYPE Group management */
/* Note: XLT1 table takes 13-bit as input, and results in an 8-bit packet type
* group (PTG) ID as output.
*
* Note: PTG 0 is the default packet type group and it is assumed that all PTYPE
* are a part of this group until moved to a new PTG.
*/
#define ICE_DEFAULT_PTG 0
struct ice_ptg_entry {
struct ice_ptg_ptype *first_ptype;
u8 in_use;
};
struct ice_ptg_ptype {
struct ice_ptg_ptype *next_ptype;
u8 ptg;
};
struct ice_vsig_entry {
struct list_head prop_lst;
struct ice_vsig_vsi *first_vsi;
u8 in_use;
};
struct ice_vsig_vsi {
struct ice_vsig_vsi *next_vsi;
u32 prop_mask;
u16 changed;
u16 vsig;
};
#define ICE_XLT1_CNT 1024
#define ICE_MAX_PTGS 256
/* XLT1 Table */
struct ice_xlt1 {
struct ice_ptg_entry *ptg_tbl;
struct ice_ptg_ptype *ptypes;
u8 *t;
u32 sid;
u16 count;
};
#define ICE_XLT2_CNT 768
#define ICE_MAX_VSIGS 768
/* VSIG bit layout:
* [0:12]: incremental VSIG index 1 to ICE_MAX_VSIGS
* [13:15]: PF number of device
*/
#define ICE_VSIG_IDX_M (0x1FFF)
#define ICE_PF_NUM_S 13
#define ICE_PF_NUM_M (0x07 << ICE_PF_NUM_S)
#define ICE_VSIG_VALUE(vsig, pf_id) \
(u16)((((u16)(vsig)) & ICE_VSIG_IDX_M) | \
(((u16)(pf_id) << ICE_PF_NUM_S) & ICE_PF_NUM_M))
#define ICE_DEFAULT_VSIG 0
/* XLT2 Table */
struct ice_xlt2 {
struct ice_vsig_entry *vsig_tbl;
struct ice_vsig_vsi *vsis;
u16 *t;
u32 sid;
u16 count;
};
/* Keys are made up of two values, each one-half the size of the key.
* For TCAM, the entire key is 80 bits wide (or 2, 40-bit wide values)
*/
#define ICE_TCAM_KEY_VAL_SZ 5
#define ICE_TCAM_KEY_SZ (2 * ICE_TCAM_KEY_VAL_SZ)
struct ice_prof_tcam_entry {
__le16 addr;
u8 key[ICE_TCAM_KEY_SZ];
u8 prof_id;
} __packed;
struct ice_prof_id_section {
__le16 count;
struct ice_prof_tcam_entry entry[1];
} __packed;
struct ice_prof_tcam {
u32 sid;
u16 count;
u16 max_prof_id;
struct ice_prof_tcam_entry *t;
u8 cdid_bits; /* # CDID bits to use in key, 0, 2, 4, or 8 */
};
struct ice_prof_redir {
u8 *t;
u32 sid;
u16 count;
};
/* Tables per block */
struct ice_blk_info {
struct ice_xlt1 xlt1;
struct ice_xlt2 xlt2;
struct ice_prof_tcam prof;
struct ice_prof_redir prof_redir;
struct ice_es es;
u8 overwrite; /* set to true to allow overwrite of table entries */
u8 is_list_init;
};
#endif /* _ICE_FLEX_TYPE_H_ */
......@@ -55,6 +55,8 @@
#define PRTDCB_GENS 0x00083020
#define PRTDCB_GENS_DCBX_STATUS_S 0
#define PRTDCB_GENS_DCBX_STATUS_M ICE_M(0x7, 0)
#define GL_PREEXT_L2_PMASK0(_i) (0x0020F0FC + ((_i) * 4))
#define GL_PREEXT_L2_PMASK1(_i) (0x0020F108 + ((_i) * 4))
#define GLFLXP_RXDID_FLAGS(_i, _j) (0x0045D000 + ((_i) * 4 + (_j) * 256))
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S 0
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M ICE_M(0x3F, 0)
......
......@@ -752,6 +752,17 @@ void ice_vsi_put_qs(struct ice_vsi *vsi)
mutex_unlock(&pf->avail_q_mutex);
}
/**
* ice_is_safe_mode
* @pf: pointer to the PF struct
*
* returns true if driver is in safe mode, false otherwise
*/
bool ice_is_safe_mode(struct ice_pf *pf)
{
return !test_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
}
/**
* ice_rss_clean - Delete RSS related VSI structures that hold user inputs
* @vsi: the VSI being removed
......@@ -2629,6 +2640,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
* DCB settings in the HW. Also, if the FW DCBX engine is not running
* then Rx LLDP packets need to be redirected up the stack.
*/
if (!ice_is_safe_mode(pf)) {
if (vsi->type == ICE_VSI_PF) {
ice_vsi_add_rem_eth_mac(vsi, true);
......@@ -2639,6 +2651,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
ice_cfg_sw_lldp(vsi, false, true);
}
}
return vsi;
......@@ -2905,8 +2918,11 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi)
}
/* disable each interrupt */
ice_for_each_q_vector(vsi, i)
ice_for_each_q_vector(vsi, i) {
if (!vsi->q_vectors[i])
continue;
wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
}
ice_flush(hw);
......@@ -2975,6 +2991,7 @@ int ice_vsi_release(struct ice_vsi *vsi)
pf->num_avail_sw_msix += vsi->num_q_vectors;
}
if (!ice_is_safe_mode(pf)) {
if (vsi->type == ICE_VSI_PF) {
ice_vsi_add_rem_eth_mac(vsi, false);
ice_cfg_sw_lldp(vsi, true, false);
......@@ -2984,6 +3001,7 @@ int ice_vsi_release(struct ice_vsi *vsi)
if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
ice_cfg_sw_lldp(vsi, false, false);
}
}
ice_remove_vsi_fltr(&pf->hw, vsi->idx);
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
......@@ -3168,48 +3186,6 @@ static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
sizeof(vsi->info.tc_mapping));
}
/**
* ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration
* @vsi: the VSI being configured
* @ena_tc: TC map to be enabled
*/
static void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
{
struct net_device *netdev = vsi->netdev;
struct ice_pf *pf = vsi->back;
struct ice_dcbx_cfg *dcbcfg;
u8 netdev_tc;
int i;
if (!netdev)
return;
if (!ena_tc) {
netdev_reset_tc(netdev);
return;
}
if (netdev_set_num_tc(netdev, vsi->tc_cfg.numtc))
return;
dcbcfg = &pf->hw.port_info->local_dcbx_cfg;
ice_for_each_traffic_class(i)
if (vsi->tc_cfg.ena_tc & BIT(i))
netdev_set_tc_queue(netdev,
vsi->tc_cfg.tc_info[i].netdev_tc,
vsi->tc_cfg.tc_info[i].qcount_tx,
vsi->tc_cfg.tc_info[i].qoffset);
for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
u8 ets_tc = dcbcfg->etscfg.prio_table[i];
/* Get the mapped netdev TC# for the UP */
netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc;
netdev_set_prio_tc_map(netdev, i, netdev_tc);
}
}
/**
* ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map
* @vsi: VSI to be configured
......@@ -3275,6 +3251,25 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
}
#endif /* CONFIG_DCB */
/**
* ice_nvm_version_str - format the NVM version strings
* @hw: ptr to the hardware info
*/
char *ice_nvm_version_str(struct ice_hw *hw)
{
u8 oem_ver, oem_patch, ver_hi, ver_lo;
static char buf[ICE_NVM_VER_LEN];
u16 oem_build;
ice_get_nvm_version(hw, &oem_ver, &oem_build, &oem_patch, &ver_hi,
&ver_lo);
snprintf(buf, sizeof(buf), "%x.%02x 0x%x %d.%d.%d", ver_hi, ver_lo,
hw->nvm.eetrack, oem_ver, oem_build, oem_patch);
return buf;
}
/**
* ice_vsi_cfg_mac_fltr - Add or remove a MAC address filter for a VSI
* @vsi: the VSI being configured MAC filter
......
......@@ -120,7 +120,10 @@ int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena);
u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran);
char *ice_nvm_version_str(struct ice_hw *hw);
enum ice_status
ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set);
bool ice_is_safe_mode(struct ice_pf *pf);
#endif /* !_ICE_LIB_H_ */
This diff is collapsed.
......@@ -12,6 +12,7 @@
#include "ice_osdep.h"
#include "ice_controlq.h"
#include "ice_lan_tx_rx.h"
#include "ice_flex_type.h"
static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)
{
......@@ -31,6 +32,7 @@ static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)
#define ICE_DBG_LAN BIT_ULL(8)
#define ICE_DBG_SW BIT_ULL(13)
#define ICE_DBG_SCHED BIT_ULL(14)
#define ICE_DBG_PKG BIT_ULL(16)
#define ICE_DBG_RES BIT_ULL(17)
#define ICE_DBG_AQ_MSG BIT_ULL(24)
#define ICE_DBG_AQ_CMD BIT_ULL(27)
......@@ -53,6 +55,14 @@ enum ice_aq_res_access_type {
ICE_RES_WRITE
};
struct ice_driver_ver {
u8 major_ver;
u8 minor_ver;
u8 build_ver;
u8 subbuild_ver;
u8 driver_string[32];
};
enum ice_fc_mode {
ICE_FC_NONE = 0,
ICE_FC_RX_PAUSE,
......@@ -222,6 +232,8 @@ struct ice_nvm_info {
u8 blank_nvm_mode; /* is NVM empty (no FW present) */
};
#define ICE_NVM_VER_LEN 32
/* Max number of port to queue branches w.r.t topology */
#define ICE_MAX_TRAFFIC_CLASS 8
#define ICE_TXSCHED_MAX_BRANCHES ICE_MAX_TRAFFIC_CLASS
......@@ -459,6 +471,30 @@ struct ice_hw {
u8 ucast_shared; /* true if VSIs can share unicast addr */
/* Active package version (currently active) */
struct ice_pkg_ver active_pkg_ver;
u8 active_pkg_name[ICE_PKG_NAME_SIZE];
u8 active_pkg_in_nvm;
enum ice_aq_err pkg_dwnld_status;
/* Driver's package ver - (from the Metadata seg) */
struct ice_pkg_ver pkg_ver;
u8 pkg_name[ICE_PKG_NAME_SIZE];
/* Driver's Ice package version (from the Ice seg) */
struct ice_pkg_ver ice_pkg_ver;
u8 ice_pkg_name[ICE_PKG_NAME_SIZE];
/* Pointer to the ice segment */
struct ice_seg *seg;
/* Pointer to allocated copy of pkg memory */
u8 *pkg_copy;
u32 pkg_size;
/* HW block tables */
struct ice_blk_info blk[ICE_BLK_COUNT];
};
/* Statistics collected by each port, VSI, VEB, and S-channel */
......
......@@ -1443,6 +1443,12 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
struct ice_pf *pf = pci_get_drvdata(pdev);
if (ice_is_safe_mode(pf)) {
dev_err(&pf->pdev->dev,
"SR-IOV cannot be configured - Device is in Safe Mode\n");
return -EOPNOTSUPP;
}
if (num_vfs)
return ice_pci_sriov_ena(pf, num_vfs);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment