Commit 6ee24258 authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
100GbE Intel Wired LAN Driver Updates 2020-03-10

This series contains updates to ice and iavf drivers.

Cleaned up unnecessary parenthesis, which was pointed out by Sergei
Shtylyov.

Mitch updates the iavf and ice drivers to expand the limitation on the
number of queues that the driver can support to account for the newer
800-series capabilities.

Brett cleans up the error messages for both SR-IOV and non SR-IOV use
cases.  Fixed the logic when the ice driver is removed and a bare-metal
VF is passing traffic, which was causing a transmit hang on the VF.
Updated the ice driver to display "Link detected" field via ethtool,
when the driver is in safe mode.  Updated ice driver to properly set
VLAN pruning when transmit anti-spoof is off.

Avinash fixed a corner case in DCB, when switching from IEEE to CEE
mode, the DCBX mode does not get properly updated.

Dave updates the logic when switching from software DCB to firmware DCB
to renegotiate DCBX to ensure the firmware agent has up to date
information about the DCB settings of the link partner.

Lukasz increases the PF's mailbox receive queue size to the maximum to
prevent potential bottleneck or slow down occurring from the PF's
mailbox receive queue being full.

Bruce updates the ice driver to use strscpy() instead of strlcpy().
Cleaned up variable names that were not very descriptive with names that
had more meaning.

Anirudh replaces the use of ENOTSUPP with EOPNOTSUPP in the ice driver.

Jake fixed up a function header comment to properly reflect the variable
size and use.

v2: Dropped patch 5 of the original series, where Tony added tunnel
    offload support.  Based on community feedback, the patch needed
    changes, so giving Tony additional time to work on those changes and
    not hold up the remaining changes in the series.
====================
Acked-by: default avatarJakub Kicinski <kuba@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 13e787ca dab02de8
...@@ -81,7 +81,7 @@ struct iavf_vsi { ...@@ -81,7 +81,7 @@ struct iavf_vsi {
#define IAVF_TX_DESC(R, i) (&(((struct iavf_tx_desc *)((R)->desc))[i])) #define IAVF_TX_DESC(R, i) (&(((struct iavf_tx_desc *)((R)->desc))[i]))
#define IAVF_TX_CTXTDESC(R, i) \ #define IAVF_TX_CTXTDESC(R, i) \
(&(((struct iavf_tx_context_desc *)((R)->desc))[i])) (&(((struct iavf_tx_context_desc *)((R)->desc))[i]))
#define IAVF_MAX_REQ_QUEUES 4 #define IAVF_MAX_REQ_QUEUES 16
#define IAVF_HKEY_ARRAY_SIZE ((IAVF_VFQF_HKEY_MAX_INDEX + 1) * 4) #define IAVF_HKEY_ARRAY_SIZE ((IAVF_VFQF_HKEY_MAX_INDEX + 1) * 4)
#define IAVF_HLUT_ARRAY_SIZE ((IAVF_VFQF_HLUT_MAX_INDEX + 1) * 4) #define IAVF_HLUT_ARRAY_SIZE ((IAVF_VFQF_HLUT_MAX_INDEX + 1) * 4)
......
...@@ -860,7 +860,7 @@ static void iavf_get_channels(struct net_device *netdev, ...@@ -860,7 +860,7 @@ static void iavf_get_channels(struct net_device *netdev,
struct iavf_adapter *adapter = netdev_priv(netdev); struct iavf_adapter *adapter = netdev_priv(netdev);
/* Report maximum channels */ /* Report maximum channels */
ch->max_combined = IAVF_MAX_REQ_QUEUES; ch->max_combined = adapter->vsi_res->num_queue_pairs;
ch->max_other = NONQ_VECS; ch->max_other = NONQ_VECS;
ch->other_count = NONQ_VECS; ch->other_count = NONQ_VECS;
...@@ -881,14 +881,7 @@ static int iavf_set_channels(struct net_device *netdev, ...@@ -881,14 +881,7 @@ static int iavf_set_channels(struct net_device *netdev,
struct ethtool_channels *ch) struct ethtool_channels *ch)
{ {
struct iavf_adapter *adapter = netdev_priv(netdev); struct iavf_adapter *adapter = netdev_priv(netdev);
int num_req = ch->combined_count; u32 num_req = ch->combined_count;
if (num_req != adapter->num_active_queues &&
!(adapter->vf_res->vf_cap_flags &
VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)) {
dev_info(&adapter->pdev->dev, "PF is not capable of queue negotiation.\n");
return -EINVAL;
}
if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
adapter->num_tc) { adapter->num_tc) {
...@@ -899,14 +892,19 @@ static int iavf_set_channels(struct net_device *netdev, ...@@ -899,14 +892,19 @@ static int iavf_set_channels(struct net_device *netdev,
/* All of these should have already been checked by ethtool before this /* All of these should have already been checked by ethtool before this
* even gets to us, but just to be sure. * even gets to us, but just to be sure.
*/ */
if (num_req <= 0 || num_req > IAVF_MAX_REQ_QUEUES) if (num_req > adapter->vsi_res->num_queue_pairs)
return -EINVAL; return -EINVAL;
if (num_req == adapter->num_active_queues)
return 0;
if (ch->rx_count || ch->tx_count || ch->other_count != NONQ_VECS) if (ch->rx_count || ch->tx_count || ch->other_count != NONQ_VECS)
return -EINVAL; return -EINVAL;
adapter->num_req_queues = num_req; adapter->num_req_queues = num_req;
return iavf_request_queues(adapter, num_req); adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
iavf_schedule_reset(adapter);
return 0;
} }
/** /**
......
...@@ -3450,7 +3450,7 @@ int iavf_process_config(struct iavf_adapter *adapter) ...@@ -3450,7 +3450,7 @@ int iavf_process_config(struct iavf_adapter *adapter)
} }
if (num_req_queues && if (num_req_queues &&
num_req_queues != adapter->vsi_res->num_queue_pairs) { num_req_queues > adapter->vsi_res->num_queue_pairs) {
/* Problem. The PF gave us fewer queues than what we had /* Problem. The PF gave us fewer queues than what we had
* negotiated in our request. Need a reset to see if we can't * negotiated in our request. Need a reset to see if we can't
* get back to a working state. * get back to a working state.
......
...@@ -396,33 +396,6 @@ void iavf_map_queues(struct iavf_adapter *adapter) ...@@ -396,33 +396,6 @@ void iavf_map_queues(struct iavf_adapter *adapter)
kfree(vimi); kfree(vimi);
} }
/**
* iavf_request_queues
* @adapter: adapter structure
* @num: number of requested queues
*
* We get a default number of queues from the PF. This enables us to request a
* different number. Returns 0 on success, negative on failure
**/
int iavf_request_queues(struct iavf_adapter *adapter, int num)
{
struct virtchnl_vf_res_request vfres;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot request queues, command %d pending\n",
adapter->current_op);
return -EBUSY;
}
vfres.num_queue_pairs = min_t(int, num, num_online_cpus());
adapter->current_op = VIRTCHNL_OP_REQUEST_QUEUES;
adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
return iavf_send_pf_msg(adapter, VIRTCHNL_OP_REQUEST_QUEUES,
(u8 *)&vfres, sizeof(vfres));
}
/** /**
* iavf_add_ether_addrs * iavf_add_ether_addrs
* @adapter: adapter structure * @adapter: adapter structure
......
...@@ -60,7 +60,6 @@ extern const char ice_drv_ver[]; ...@@ -60,7 +60,6 @@ extern const char ice_drv_ver[];
#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16) #define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16)
#define ICE_AQ_LEN 64 #define ICE_AQ_LEN 64
#define ICE_MBXSQ_LEN 64 #define ICE_MBXSQ_LEN 64
#define ICE_MBXRQ_LEN 512
#define ICE_MIN_MSIX 2 #define ICE_MIN_MSIX 2
#define ICE_NO_VSI 0xffff #define ICE_NO_VSI 0xffff
#define ICE_VSI_MAP_CONTIG 0 #define ICE_VSI_MAP_CONTIG 0
...@@ -70,7 +69,6 @@ extern const char ice_drv_ver[]; ...@@ -70,7 +69,6 @@ extern const char ice_drv_ver[];
#define ICE_Q_WAIT_RETRY_LIMIT 10 #define ICE_Q_WAIT_RETRY_LIMIT 10
#define ICE_Q_WAIT_MAX_RETRY (5 * ICE_Q_WAIT_RETRY_LIMIT) #define ICE_Q_WAIT_MAX_RETRY (5 * ICE_Q_WAIT_RETRY_LIMIT)
#define ICE_MAX_LG_RSS_QS 256 #define ICE_MAX_LG_RSS_QS 256
#define ICE_MAX_SMALL_RSS_QS 8
#define ICE_RES_VALID_BIT 0x8000 #define ICE_RES_VALID_BIT 0x8000
#define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1) #define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1)
#define ICE_INVAL_Q_INDEX 0xffff #define ICE_INVAL_Q_INDEX 0xffff
...@@ -213,6 +211,7 @@ enum ice_state { ...@@ -213,6 +211,7 @@ enum ice_state {
__ICE_SERVICE_DIS, __ICE_SERVICE_DIS,
__ICE_OICR_INTR_DIS, /* Global OICR interrupt disabled */ __ICE_OICR_INTR_DIS, /* Global OICR interrupt disabled */
__ICE_MDD_VF_PRINT_PENDING, /* set when MDD event handle */ __ICE_MDD_VF_PRINT_PENDING, /* set when MDD event handle */
__ICE_VF_RESETS_DISABLED, /* disable resets during ice_remove */
__ICE_STATE_NBITS /* must be last */ __ICE_STATE_NBITS /* must be last */
}; };
...@@ -363,8 +362,8 @@ struct ice_pf { ...@@ -363,8 +362,8 @@ struct ice_pf {
struct ice_vf *vf; struct ice_vf *vf;
int num_alloc_vfs; /* actual number of VFs allocated */ int num_alloc_vfs; /* actual number of VFs allocated */
u16 num_vfs_supported; /* num VFs supported for this PF */ u16 num_vfs_supported; /* num VFs supported for this PF */
u16 num_vf_qps; /* num queue pairs per VF */ u16 num_qps_per_vf;
u16 num_vf_msix; /* num vectors per VF */ u16 num_msix_per_vf;
/* used to ratelimit the MDD event logging */ /* used to ratelimit the MDD event logging */
unsigned long last_printed_mdd_jiffies; unsigned long last_printed_mdd_jiffies;
DECLARE_BITMAP(state, __ICE_STATE_NBITS); DECLARE_BITMAP(state, __ICE_STATE_NBITS);
......
...@@ -620,8 +620,8 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw) ...@@ -620,8 +620,8 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw)
* @oem_ver: 8 bit NVM version * @oem_ver: 8 bit NVM version
* @oem_build: 16 bit NVM build number * @oem_build: 16 bit NVM build number
* @oem_patch: 8 NVM patch number * @oem_patch: 8 NVM patch number
* @ver_hi: high 16 bits of the NVM version * @ver_hi: high 8 bits of the NVM version
* @ver_lo: low 16 bits of the NVM version * @ver_lo: low 8 bits of the NVM version
*/ */
void void
ice_get_nvm_version(struct ice_hw *hw, u8 *oem_ver, u16 *oem_build, ice_get_nvm_version(struct ice_hw *hw, u8 *oem_ver, u16 *oem_build,
......
...@@ -77,9 +77,9 @@ static u8 ice_dcb_get_mode(struct ice_port_info *port_info, bool host) ...@@ -77,9 +77,9 @@ static u8 ice_dcb_get_mode(struct ice_port_info *port_info, bool host)
mode = DCB_CAP_DCBX_LLD_MANAGED; mode = DCB_CAP_DCBX_LLD_MANAGED;
if (port_info->local_dcbx_cfg.dcbx_mode & ICE_DCBX_MODE_CEE) if (port_info->local_dcbx_cfg.dcbx_mode & ICE_DCBX_MODE_CEE)
return (mode | DCB_CAP_DCBX_VER_CEE); return mode | DCB_CAP_DCBX_VER_CEE;
else else
return (mode | DCB_CAP_DCBX_VER_IEEE); return mode | DCB_CAP_DCBX_VER_IEEE;
} }
/** /**
...@@ -779,7 +779,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, ...@@ -779,7 +779,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
bool need_reconfig = false; bool need_reconfig = false;
struct ice_port_info *pi; struct ice_port_info *pi;
struct ice_vsi *pf_vsi; struct ice_vsi *pf_vsi;
u8 type; u8 mib_type;
int ret; int ret;
/* Not DCB capable or capability disabled */ /* Not DCB capable or capability disabled */
...@@ -794,16 +794,16 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, ...@@ -794,16 +794,16 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
pi = pf->hw.port_info; pi = pf->hw.port_info;
mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw; mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw;
/* Ignore if event is not for Nearest Bridge */ /* Ignore if event is not for Nearest Bridge */
type = ((mib->type >> ICE_AQ_LLDP_BRID_TYPE_S) & mib_type = ((mib->type >> ICE_AQ_LLDP_BRID_TYPE_S) &
ICE_AQ_LLDP_BRID_TYPE_M); ICE_AQ_LLDP_BRID_TYPE_M);
dev_dbg(dev, "LLDP event MIB bridge type 0x%x\n", type); dev_dbg(dev, "LLDP event MIB bridge type 0x%x\n", mib_type);
if (type != ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID) if (mib_type != ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID)
return; return;
/* Check MIB Type and return if event for Remote MIB update */ /* Check MIB Type and return if event for Remote MIB update */
type = mib->type & ICE_AQ_LLDP_MIB_TYPE_M; mib_type = mib->type & ICE_AQ_LLDP_MIB_TYPE_M;
dev_dbg(dev, "LLDP event mib type %s\n", type ? "remote" : "local"); dev_dbg(dev, "LLDP event mib type %s\n", mib_type ? "remote" : "local");
if (type == ICE_AQ_LLDP_MIB_REMOTE) { if (mib_type == ICE_AQ_LLDP_MIB_REMOTE) {
/* Update the remote cached instance and return */ /* Update the remote cached instance and return */
ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE, ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE,
ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID,
...@@ -832,10 +832,11 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, ...@@ -832,10 +832,11 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
/* No change detected in DCBX configs */ /* No change detected in DCBX configs */
if (!memcmp(&tmp_dcbx_cfg, &pi->local_dcbx_cfg, sizeof(tmp_dcbx_cfg))) { if (!memcmp(&tmp_dcbx_cfg, &pi->local_dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
dev_dbg(dev, "No change detected in DCBX configuration.\n"); dev_dbg(dev, "No change detected in DCBX configuration.\n");
pf->dcbx_cap = ice_dcb_get_mode(pi, false);
goto out; goto out;
} }
pf->dcbx_cap = ice_dcb_get_mode(pi, false);
need_reconfig = ice_dcb_need_recfg(pf, &tmp_dcbx_cfg, need_reconfig = ice_dcb_need_recfg(pf, &tmp_dcbx_cfg,
&pi->local_dcbx_cfg); &pi->local_dcbx_cfg);
ice_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &pi->local_dcbx_cfg); ice_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &pi->local_dcbx_cfg);
......
...@@ -173,8 +173,8 @@ ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) ...@@ -173,8 +173,8 @@ ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
u16 oem_build; u16 oem_build;
strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, ice_drv_ver, sizeof(drvinfo->version)); strscpy(drvinfo->version, ice_drv_ver, sizeof(drvinfo->version));
/* Display NVM version (from which the firmware version can be /* Display NVM version (from which the firmware version can be
* determined) which contains more pertinent information. * determined) which contains more pertinent information.
...@@ -185,7 +185,7 @@ ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) ...@@ -185,7 +185,7 @@ ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
"%x.%02x 0x%x %d.%d.%d", nvm_ver_hi, nvm_ver_lo, "%x.%02x 0x%x %d.%d.%d", nvm_ver_hi, nvm_ver_lo,
hw->nvm.eetrack, oem_ver, oem_build, oem_patch); hw->nvm.eetrack, oem_ver, oem_build, oem_patch);
strlcpy(drvinfo->bus_info, pci_name(pf->pdev), strscpy(drvinfo->bus_info, pci_name(pf->pdev),
sizeof(drvinfo->bus_info)); sizeof(drvinfo->bus_info));
drvinfo->n_priv_flags = ICE_PRIV_FLAG_ARRAY_SIZE; drvinfo->n_priv_flags = ICE_PRIV_FLAG_ARRAY_SIZE;
} }
...@@ -1131,6 +1131,33 @@ ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam) ...@@ -1131,6 +1131,33 @@ ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam)
return err; return err;
} }
/**
* ice_nway_reset - restart autonegotiation
* @netdev: network interface device structure
*/
static int ice_nway_reset(struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_port_info *pi;
enum ice_status status;
pi = vsi->port_info;
/* If VSI state is up, then restart autoneg with link up */
if (!test_bit(__ICE_DOWN, vsi->back->state))
status = ice_aq_set_link_restart_an(pi, true, NULL);
else
status = ice_aq_set_link_restart_an(pi, false, NULL);
if (status) {
netdev_info(netdev, "link restart failed, err %d aq_err %d\n",
status, pi->hw->adminq.sq_last_status);
return -EIO;
}
return 0;
}
/** /**
* ice_get_priv_flags - report device private flags * ice_get_priv_flags - report device private flags
* @netdev: network interface device structure * @netdev: network interface device structure
...@@ -1264,6 +1291,8 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) ...@@ -1264,6 +1291,8 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
status = ice_cfg_lldp_mib_change(&pf->hw, true); status = ice_cfg_lldp_mib_change(&pf->hw, true);
if (status) if (status)
dev_dbg(dev, "Fail to enable MIB change events\n"); dev_dbg(dev, "Fail to enable MIB change events\n");
ice_nway_reset(netdev);
} }
} }
if (test_bit(ICE_FLAG_LEGACY_RX, change_flags)) { if (test_bit(ICE_FLAG_LEGACY_RX, change_flags)) {
...@@ -2775,30 +2804,6 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) ...@@ -2775,30 +2804,6 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
return err; return err;
} }
static int ice_nway_reset(struct net_device *netdev)
{
/* restart autonegotiation */
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_port_info *pi;
enum ice_status status;
pi = vsi->port_info;
/* If VSI state is up, then restart autoneg with link up */
if (!test_bit(__ICE_DOWN, vsi->back->state))
status = ice_aq_set_link_restart_an(pi, true, NULL);
else
status = ice_aq_set_link_restart_an(pi, false, NULL);
if (status) {
netdev_info(netdev, "link restart failed, err %d aq_err %d\n",
status, pi->hw->adminq.sq_last_status);
return -EIO;
}
return 0;
}
/** /**
* ice_get_pauseparam - Get Flow Control status * ice_get_pauseparam - Get Flow Control status
* @netdev: network interface device structure * @netdev: network interface device structure
...@@ -3813,6 +3818,7 @@ static const struct ethtool_ops ice_ethtool_safe_mode_ops = { ...@@ -3813,6 +3818,7 @@ static const struct ethtool_ops ice_ethtool_safe_mode_ops = {
.get_regs = ice_get_regs, .get_regs = ice_get_regs,
.get_msglevel = ice_get_msglevel, .get_msglevel = ice_get_msglevel,
.set_msglevel = ice_set_msglevel, .set_msglevel = ice_set_msglevel,
.get_link = ethtool_op_get_link,
.get_eeprom_len = ice_get_eeprom_len, .get_eeprom_len = ice_get_eeprom_len,
.get_eeprom = ice_get_eeprom, .get_eeprom = ice_get_eeprom,
.get_strings = ice_get_strings, .get_strings = ice_get_strings,
......
...@@ -694,7 +694,7 @@ ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id) ...@@ -694,7 +694,7 @@ ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
* ice_flow_set_fld_ext - specifies locations of field from entry's input buffer * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
* @seg: packet segment the field being set belongs to * @seg: packet segment the field being set belongs to
* @fld: field to be set * @fld: field to be set
* @type: type of the field * @field_type: type of the field
* @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
* entry's input buffer * entry's input buffer
* @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
...@@ -715,16 +715,16 @@ ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id) ...@@ -715,16 +715,16 @@ ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
*/ */
static void static void
ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld, ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
enum ice_flow_fld_match_type type, u16 val_loc, enum ice_flow_fld_match_type field_type, u16 val_loc,
u16 mask_loc, u16 last_loc) u16 mask_loc, u16 last_loc)
{ {
u64 bit = BIT_ULL(fld); u64 bit = BIT_ULL(fld);
seg->match |= bit; seg->match |= bit;
if (type == ICE_FLOW_FLD_TYPE_RANGE) if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
seg->range |= bit; seg->range |= bit;
seg->fields[fld].type = type; seg->fields[fld].type = field_type;
seg->fields[fld].src.val = val_loc; seg->fields[fld].src.val = val_loc;
seg->fields[fld].src.mask = mask_loc; seg->fields[fld].src.mask = mask_loc;
seg->fields[fld].src.last = last_loc; seg->fields[fld].src.last = last_loc;
......
This diff is collapsed.
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
#include "ice.h" #include "ice.h"
const char *ice_vsi_type_str(enum ice_vsi_type type); const char *ice_vsi_type_str(enum ice_vsi_type vsi_type);
int int
ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list, ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
...@@ -58,7 +58,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc); ...@@ -58,7 +58,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc);
struct ice_vsi * struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
enum ice_vsi_type type, u16 vf_id); enum ice_vsi_type vsi_type, u16 vf_id);
void ice_napi_del(struct ice_vsi *vsi); void ice_napi_del(struct ice_vsi *vsi);
......
...@@ -1518,7 +1518,7 @@ static void ice_set_ctrlq_len(struct ice_hw *hw) ...@@ -1518,7 +1518,7 @@ static void ice_set_ctrlq_len(struct ice_hw *hw)
hw->adminq.num_sq_entries = ICE_AQ_LEN; hw->adminq.num_sq_entries = ICE_AQ_LEN;
hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN; hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN; hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
hw->mailboxq.num_rq_entries = ICE_MBXRQ_LEN; hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN; hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN; hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN; hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
...@@ -2054,8 +2054,16 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) ...@@ -2054,8 +2054,16 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
set_bit(__ICE_MDD_EVENT_PENDING, pf->state); set_bit(__ICE_MDD_EVENT_PENDING, pf->state);
} }
if (oicr & PFINT_OICR_VFLR_M) { if (oicr & PFINT_OICR_VFLR_M) {
ena_mask &= ~PFINT_OICR_VFLR_M; /* disable any further VFLR event notifications */
set_bit(__ICE_VFLR_EVENT_PENDING, pf->state); if (test_bit(__ICE_VF_RESETS_DISABLED, pf->state)) {
u32 reg = rd32(hw, PFINT_OICR_ENA);
reg &= ~PFINT_OICR_VFLR_M;
wr32(hw, PFINT_OICR_ENA, reg);
} else {
ena_mask &= ~PFINT_OICR_VFLR_M;
set_bit(__ICE_VFLR_EVENT_PENDING, pf->state);
}
} }
if (oicr & PFINT_OICR_GRST_M) { if (oicr & PFINT_OICR_GRST_M) {
...@@ -3380,11 +3388,14 @@ static void ice_remove(struct pci_dev *pdev) ...@@ -3380,11 +3388,14 @@ static void ice_remove(struct pci_dev *pdev)
msleep(100); msleep(100);
} }
if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
set_bit(__ICE_VF_RESETS_DISABLED, pf->state);
ice_free_vfs(pf);
}
set_bit(__ICE_DOWN, pf->state); set_bit(__ICE_DOWN, pf->state);
ice_service_task_stop(pf); ice_service_task_stop(pf);
if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags))
ice_free_vfs(pf);
ice_vsi_release_all(pf); ice_vsi_release_all(pf);
ice_free_irq_msix_misc(pf); ice_free_irq_msix_misc(pf);
ice_for_each_vsi(pf, i) { ice_for_each_vsi(pf, i) {
...@@ -5086,13 +5097,13 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) ...@@ -5086,13 +5097,13 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
/* Read interrupt register */ /* Read interrupt register */
val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx)); val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n", netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
vsi->vsi_num, txqueue, tx_ring->next_to_clean, vsi->vsi_num, txqueue, tx_ring->next_to_clean,
head, tx_ring->next_to_use, val); head, tx_ring->next_to_use, val);
} }
pf->tx_timeout_last_recovery = jiffies; pf->tx_timeout_last_recovery = jiffies;
netdev_info(netdev, "tx_timeout recovery level %d, txqueue %d\n", netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
pf->tx_timeout_recovery_level, txqueue); pf->tx_timeout_recovery_level, txqueue);
switch (pf->tx_timeout_recovery_level) { switch (pf->tx_timeout_recovery_level) {
......
...@@ -578,7 +578,7 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw) ...@@ -578,7 +578,7 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
struct ice_aqc_get_sw_cfg_resp_elem *ele; struct ice_aqc_get_sw_cfg_resp_elem *ele;
u16 pf_vf_num, swid, vsi_port_num; u16 pf_vf_num, swid, vsi_port_num;
bool is_vf = false; bool is_vf = false;
u8 type; u8 res_type;
ele = rbuf[i].elements; ele = rbuf[i].elements;
vsi_port_num = le16_to_cpu(ele->vsi_port_num) & vsi_port_num = le16_to_cpu(ele->vsi_port_num) &
...@@ -593,16 +593,16 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw) ...@@ -593,16 +593,16 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
ICE_AQC_GET_SW_CONF_RESP_IS_VF) ICE_AQC_GET_SW_CONF_RESP_IS_VF)
is_vf = true; is_vf = true;
type = le16_to_cpu(ele->vsi_port_num) >> res_type = le16_to_cpu(ele->vsi_port_num) >>
ICE_AQC_GET_SW_CONF_RESP_TYPE_S; ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
if (type == ICE_AQC_GET_SW_CONF_RESP_VSI) { if (res_type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
/* FW VSI is not needed. Just continue. */ /* FW VSI is not needed. Just continue. */
continue; continue;
} }
ice_init_port_info(hw->port_info, vsi_port_num, ice_init_port_info(hw->port_info, vsi_port_num,
type, swid, pf_vf_num, is_vf); res_type, swid, pf_vf_num, is_vf);
} }
} while (req_desc && !status); } while (req_desc && !status);
...@@ -958,7 +958,7 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, ...@@ -958,7 +958,7 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
struct ice_aqc_sw_rules_elem *s_rule; struct ice_aqc_sw_rules_elem *s_rule;
enum ice_status status; enum ice_status status;
u16 s_rule_size; u16 s_rule_size;
u16 type; u16 rule_type;
int i; int i;
if (!num_vsi) if (!num_vsi)
...@@ -970,11 +970,11 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, ...@@ -970,11 +970,11 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
lkup_type == ICE_SW_LKUP_PROMISC || lkup_type == ICE_SW_LKUP_PROMISC ||
lkup_type == ICE_SW_LKUP_PROMISC_VLAN) lkup_type == ICE_SW_LKUP_PROMISC_VLAN)
type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR : rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
ICE_AQC_SW_RULES_T_VSI_LIST_SET; ICE_AQC_SW_RULES_T_VSI_LIST_SET;
else if (lkup_type == ICE_SW_LKUP_VLAN) else if (lkup_type == ICE_SW_LKUP_VLAN)
type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR : rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
ICE_AQC_SW_RULES_T_PRUNE_LIST_SET; ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
else else
return ICE_ERR_PARAM; return ICE_ERR_PARAM;
...@@ -992,7 +992,7 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, ...@@ -992,7 +992,7 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i])); cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
} }
s_rule->type = cpu_to_le16(type); s_rule->type = cpu_to_le16(rule_type);
s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi); s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi);
s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id); s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
......
...@@ -21,18 +21,15 @@ ...@@ -21,18 +21,15 @@
#define ICE_PCI_CIAD_WAIT_COUNT 100 #define ICE_PCI_CIAD_WAIT_COUNT 100
#define ICE_PCI_CIAD_WAIT_DELAY_US 1 #define ICE_PCI_CIAD_WAIT_DELAY_US 1
/* VF resources default values and limitation */ /* VF resource constraints */
#define ICE_MAX_VF_COUNT 256 #define ICE_MAX_VF_COUNT 256
#define ICE_MAX_QS_PER_VF 256
#define ICE_MIN_QS_PER_VF 1 #define ICE_MIN_QS_PER_VF 1
#define ICE_DFLT_QS_PER_VF 4
#define ICE_NONQ_VECS_VF 1 #define ICE_NONQ_VECS_VF 1
#define ICE_MAX_SCATTER_QS_PER_VF 16 #define ICE_MAX_SCATTER_QS_PER_VF 16
#define ICE_MAX_BASE_QS_PER_VF 16 #define ICE_MAX_RSS_QS_PER_VF 16
#define ICE_MAX_INTR_PER_VF 65 #define ICE_NUM_VF_MSIX_MED 17
#define ICE_MAX_POLICY_INTR_PER_VF 33 #define ICE_NUM_VF_MSIX_SMALL 5
#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1) #define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1)
#define ICE_MAX_VF_RESET_TRIES 40 #define ICE_MAX_VF_RESET_TRIES 40
#define ICE_MAX_VF_RESET_SLEEP_MS 20 #define ICE_MAX_VF_RESET_SLEEP_MS 20
...@@ -75,8 +72,8 @@ struct ice_vf { ...@@ -75,8 +72,8 @@ struct ice_vf {
struct virtchnl_version_info vf_ver; struct virtchnl_version_info vf_ver;
u32 driver_caps; /* reported by VF driver */ u32 driver_caps; /* reported by VF driver */
struct virtchnl_ether_addr dflt_lan_addr; struct virtchnl_ether_addr dflt_lan_addr;
DECLARE_BITMAP(txq_ena, ICE_MAX_BASE_QS_PER_VF); DECLARE_BITMAP(txq_ena, ICE_MAX_RSS_QS_PER_VF);
DECLARE_BITMAP(rxq_ena, ICE_MAX_BASE_QS_PER_VF); DECLARE_BITMAP(rxq_ena, ICE_MAX_RSS_QS_PER_VF);
u16 port_vlan_info; /* Port VLAN ID and QoS */ u16 port_vlan_info; /* Port VLAN ID and QoS */
u8 pf_set_mac:1; /* VF MAC address set by VMM admin */ u8 pf_set_mac:1; /* VF MAC address set by VMM admin */
u8 trusted:1; u8 trusted:1;
......
...@@ -24,7 +24,7 @@ ice_xsk_umem_setup(struct ice_vsi __always_unused *vsi, ...@@ -24,7 +24,7 @@ ice_xsk_umem_setup(struct ice_vsi __always_unused *vsi,
struct xdp_umem __always_unused *umem, struct xdp_umem __always_unused *umem,
u16 __always_unused qid) u16 __always_unused qid)
{ {
return -ENOTSUPP; return -EOPNOTSUPP;
} }
static inline void static inline void
...@@ -63,7 +63,7 @@ static inline int ...@@ -63,7 +63,7 @@ static inline int
ice_xsk_wakeup(struct net_device __always_unused *netdev, ice_xsk_wakeup(struct net_device __always_unused *netdev,
u32 __always_unused queue_id, u32 __always_unused flags) u32 __always_unused queue_id, u32 __always_unused flags)
{ {
return -ENOTSUPP; return -EOPNOTSUPP;
} }
#define ice_xsk_clean_rx_ring(rx_ring) do {} while (0) #define ice_xsk_clean_rx_ring(rx_ring) do {} while (0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment