Commit 5f9721a2 authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
100GbE Intel Wired LAN Driver Updates 2020-02-19

This series contains updates to the ice driver only.

Avinash adds input validation for software DCB configurations received
via lldptool or pcap to ensure bad bandwidth inputs are not inputted
which could cause the loss of link.

Paul update the malicious driver detection event messages to rate limit
once per second and to include the total number of receive|transmit MDD
event count.

Dan updates how TCAM entries are managed to ensure when overriding
pre-existing TCAM entries, properly delete the existing entry and remove
it from the change/update list.

Brett ensures we clear the relevant values in the QRXFLXP_CNTXT register
for VF queues to ensure the receive queue data is not stale.

Avinash adds required DCBNL operations for configuring ETS in software
DCB CEE mode.  Also added code to detect if DCB is in IEEE or CEE mode
to properly report what mode we are in.

Dave fixes the driver to properly report the current maximum TC, not the
maximum allowed number of TCs.

Krzysztof adds support for AF_XDP feature in the ice driver.

Jake increases the maximum time that the driver will wait for a PR reset
to account for possibility of a slightly longer than expected PD reset.

Jesse fixes a number of strings which did not have line feeds, so add
line feeds so that messages do not rum together, creating a jumbled
mess.

Bruce adds support for additional E810 and E823 device ids.  Also
updated the product name change for E822 devices.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 2e92a2d0 2fbfa966
...@@ -212,6 +212,7 @@ enum ice_state { ...@@ -212,6 +212,7 @@ enum ice_state {
__ICE_SERVICE_SCHED, __ICE_SERVICE_SCHED,
__ICE_SERVICE_DIS, __ICE_SERVICE_DIS,
__ICE_OICR_INTR_DIS, /* Global OICR interrupt disabled */ __ICE_OICR_INTR_DIS, /* Global OICR interrupt disabled */
__ICE_MDD_VF_PRINT_PENDING, /* set when MDD event handle */
__ICE_STATE_NBITS /* must be last */ __ICE_STATE_NBITS /* must be last */
}; };
...@@ -340,6 +341,7 @@ enum ice_pf_flags { ...@@ -340,6 +341,7 @@ enum ice_pf_flags {
ICE_FLAG_FW_LLDP_AGENT, ICE_FLAG_FW_LLDP_AGENT,
ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */ ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */
ICE_FLAG_LEGACY_RX, ICE_FLAG_LEGACY_RX,
ICE_FLAG_MDD_AUTO_RESET_VF,
ICE_PF_FLAGS_NBITS /* must be last */ ICE_PF_FLAGS_NBITS /* must be last */
}; };
...@@ -363,6 +365,8 @@ struct ice_pf { ...@@ -363,6 +365,8 @@ struct ice_pf {
u16 num_vfs_supported; /* num VFs supported for this PF */ u16 num_vfs_supported; /* num VFs supported for this PF */
u16 num_vf_qps; /* num queue pairs per VF */ u16 num_vf_qps; /* num queue pairs per VF */
u16 num_vf_msix; /* num vectors per VF */ u16 num_vf_msix; /* num vectors per VF */
/* used to ratelimit the MDD event logging */
unsigned long last_printed_mdd_jiffies;
DECLARE_BITMAP(state, __ICE_STATE_NBITS); DECLARE_BITMAP(state, __ICE_STATE_NBITS);
DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS); DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS);
unsigned long *avail_txqs; /* bitmap to track PF Tx queue usage */ unsigned long *avail_txqs; /* bitmap to track PF Tx queue usage */
......
...@@ -203,8 +203,7 @@ static void ice_cfg_itr_gran(struct ice_hw *hw) ...@@ -203,8 +203,7 @@ static void ice_cfg_itr_gran(struct ice_hw *hw)
*/ */
static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc) static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc)
{ {
WARN_ONCE(ice_ring_is_xdp(ring) && tc, WARN_ONCE(ice_ring_is_xdp(ring) && tc, "XDP ring can't belong to TC other than 0\n");
"XDP ring can't belong to TC other than 0");
/* Idea here for calculation is that we subtract the number of queue /* Idea here for calculation is that we subtract the number of queue
* count from TC that ring belongs to from it's absolute queue index * count from TC that ring belongs to from it's absolute queue index
...@@ -386,8 +385,8 @@ int ice_setup_rx_ctx(struct ice_ring *ring) ...@@ -386,8 +385,8 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
/* Enable Flexible Descriptors in the queue context which /* Enable Flexible Descriptors in the queue context which
* allows this driver to select a specific receive descriptor format * allows this driver to select a specific receive descriptor format
*/ */
regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
if (vsi->type != ICE_VSI_VF) { if (vsi->type != ICE_VSI_VF) {
regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
QRXFLXP_CNTXT_RXDID_IDX_M; QRXFLXP_CNTXT_RXDID_IDX_M;
...@@ -398,8 +397,12 @@ int ice_setup_rx_ctx(struct ice_ring *ring) ...@@ -398,8 +397,12 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) & regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
QRXFLXP_CNTXT_RXDID_PRIO_M; QRXFLXP_CNTXT_RXDID_PRIO_M;
wr32(hw, QRXFLXP_CNTXT(pf_q), regval); } else {
regval &= ~(QRXFLXP_CNTXT_RXDID_IDX_M |
QRXFLXP_CNTXT_RXDID_PRIO_M |
QRXFLXP_CNTXT_TS_M);
} }
wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
/* Absolute queue number out of 2K needs to be passed */ /* Absolute queue number out of 2K needs to be passed */
err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q); err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
#include "ice_adminq_cmd.h" #include "ice_adminq_cmd.h"
#include "ice_flow.h" #include "ice_flow.h"
#define ICE_PF_RESET_WAIT_COUNT 200 #define ICE_PF_RESET_WAIT_COUNT 300
/** /**
* ice_set_mac_type - Sets MAC type * ice_set_mac_type - Sets MAC type
......
...@@ -62,6 +62,26 @@ u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg) ...@@ -62,6 +62,26 @@ u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg)
return ena_tc; return ena_tc;
} }
/**
* ice_dcb_get_mode - gets the DCB mode
* @port_info: pointer to port info structure
* @host: if set it's HOST if not it's MANAGED
*/
static u8 ice_dcb_get_mode(struct ice_port_info *port_info, bool host)
{
u8 mode;
if (host)
mode = DCB_CAP_DCBX_HOST;
else
mode = DCB_CAP_DCBX_LLD_MANAGED;
if (port_info->local_dcbx_cfg.dcbx_mode & ICE_DCBX_MODE_CEE)
return (mode | DCB_CAP_DCBX_VER_CEE);
else
return (mode | DCB_CAP_DCBX_VER_IEEE);
}
/** /**
* ice_dcb_get_num_tc - Get the number of TCs from DCBX config * ice_dcb_get_num_tc - Get the number of TCs from DCBX config
* @dcbcfg: config to retrieve number of TCs from * @dcbcfg: config to retrieve number of TCs from
...@@ -148,6 +168,43 @@ void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi) ...@@ -148,6 +168,43 @@ void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi)
} }
} }
/**
* ice_dcb_bwchk - check if ETS bandwidth input parameters are correct
* @pf: pointer to the PF struct
* @dcbcfg: pointer to DCB config structure
*/
int ice_dcb_bwchk(struct ice_pf *pf, struct ice_dcbx_cfg *dcbcfg)
{
struct ice_dcb_ets_cfg *etscfg = &dcbcfg->etscfg;
u8 num_tc, total_bw = 0;
int i;
/* returns number of contigous TCs and 1 TC for non-contigous TCs,
* since at least 1 TC has to be configured
*/
num_tc = ice_dcb_get_num_tc(dcbcfg);
/* no bandwidth checks required if there's only one TC, so assign
* all bandwidth to TC0 and return
*/
if (num_tc == 1) {
etscfg->tcbwtable[0] = ICE_TC_MAX_BW;
return 0;
}
for (i = 0; i < num_tc; i++)
total_bw += etscfg->tcbwtable[i];
if (!total_bw) {
etscfg->tcbwtable[0] = ICE_TC_MAX_BW;
} else if (total_bw != ICE_TC_MAX_BW) {
dev_err(ice_pf_to_dev(pf), "Invalid config, total bandwidth must equal 100\n");
return -EINVAL;
}
return 0;
}
/** /**
* ice_pf_dcb_cfg - Apply new DCB configuration * ice_pf_dcb_cfg - Apply new DCB configuration
* @pf: pointer to the PF struct * @pf: pointer to the PF struct
...@@ -182,6 +239,9 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked) ...@@ -182,6 +239,9 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
return ret; return ret;
} }
if (ice_dcb_bwchk(pf, new_cfg))
return -EINVAL;
/* Store old config in case FW config fails */ /* Store old config in case FW config fails */
old_cfg = kmemdup(curr_cfg, sizeof(*old_cfg), GFP_KERNEL); old_cfg = kmemdup(curr_cfg, sizeof(*old_cfg), GFP_KERNEL);
if (!old_cfg) if (!old_cfg)
...@@ -605,14 +665,14 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked) ...@@ -605,14 +665,14 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
ice_cfg_sw_lldp(pf_vsi, false, true); ice_cfg_sw_lldp(pf_vsi, false, true);
pf->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; pf->dcbx_cap = ice_dcb_get_mode(port_info, true);
return 0; return 0;
} }
set_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags); set_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags);
/* DCBX in FW and LLDP enabled in FW */ /* DCBX/LLDP enabled in FW, set DCBNL mode advertisement */
pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_IEEE; pf->dcbx_cap = ice_dcb_get_mode(port_info, false);
err = ice_dcb_init_cfg(pf, locked); err = ice_dcb_init_cfg(pf, locked);
if (err) if (err)
...@@ -772,6 +832,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, ...@@ -772,6 +832,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
/* No change detected in DCBX configs */ /* No change detected in DCBX configs */
if (!memcmp(&tmp_dcbx_cfg, &pi->local_dcbx_cfg, sizeof(tmp_dcbx_cfg))) { if (!memcmp(&tmp_dcbx_cfg, &pi->local_dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
dev_dbg(dev, "No change detected in DCBX configuration.\n"); dev_dbg(dev, "No change detected in DCBX configuration.\n");
pf->dcbx_cap = ice_dcb_get_mode(pi, false);
goto out; goto out;
} }
......
...@@ -20,6 +20,7 @@ u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg); ...@@ -20,6 +20,7 @@ u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg);
u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index); u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index);
int int
ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked); ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked);
int ice_dcb_bwchk(struct ice_pf *pf, struct ice_dcbx_cfg *dcbcfg);
void ice_pf_dcb_recfg(struct ice_pf *pf); void ice_pf_dcb_recfg(struct ice_pf *pf);
void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi); void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi);
int ice_init_pf_dcb(struct ice_pf *pf, bool locked); int ice_init_pf_dcb(struct ice_pf *pf, bool locked);
......
...@@ -95,14 +95,12 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets) ...@@ -95,14 +95,12 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
new_cfg->etsrec.prio_table[i] = ets->reco_prio_tc[i]; new_cfg->etsrec.prio_table[i] = ets->reco_prio_tc[i];
} }
/* max_tc is a 1-8 value count of number of TC's, not a 0-7 value if (ice_dcb_bwchk(pf, new_cfg)) {
* for the TC's index number. Add one to value if not zero, and err = -EINVAL;
* for zero set it to the FW's default value goto ets_out;
*/ }
if (max_tc)
max_tc++; max_tc = pf->hw.func_caps.common_cap.maxtc;
else
max_tc = IEEE_8021QAZ_MAX_TCS;
new_cfg->etscfg.maxtcs = max_tc; new_cfg->etscfg.maxtcs = max_tc;
...@@ -119,6 +117,7 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets) ...@@ -119,6 +117,7 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
if (err == ICE_DCB_NO_HW_CHG) if (err == ICE_DCB_NO_HW_CHG)
err = ICE_DCB_HW_CHG_RST; err = ICE_DCB_HW_CHG_RST;
ets_out:
mutex_unlock(&pf->tc_mutex); mutex_unlock(&pf->tc_mutex);
return err; return err;
} }
...@@ -534,6 +533,30 @@ ice_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int prio, ...@@ -534,6 +533,30 @@ ice_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int prio,
*pgid = pi->local_dcbx_cfg.etscfg.prio_table[prio]; *pgid = pi->local_dcbx_cfg.etscfg.prio_table[prio];
} }
/**
* ice_dcbnl_set_pg_tc_cfg_rx
* @netdev: relevant netdev struct
* @prio: corresponding user priority
* @prio_type: the traffic priority type
* @pgid: the PG ID
* @bw_pct: BW percentage for corresponding BWG
* @up_map: prio mapped to corresponding TC
*
* lldpad requires this function pointer to be non-NULL to complete CEE config.
*/
static void
ice_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev,
int __always_unused prio,
u8 __always_unused prio_type,
u8 __always_unused pgid,
u8 __always_unused bw_pct,
u8 __always_unused up_map)
{
struct ice_pf *pf = ice_netdev_to_pf(netdev);
dev_dbg(ice_pf_to_dev(pf), "Rx TC PG Config Not Supported.\n");
}
/** /**
* ice_dcbnl_get_pg_bwg_cfg_rx - Get CEE PG BW Rx config * ice_dcbnl_get_pg_bwg_cfg_rx - Get CEE PG BW Rx config
* @netdev: pointer to netdev struct * @netdev: pointer to netdev struct
...@@ -553,6 +576,23 @@ ice_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int __always_unused pgid, ...@@ -553,6 +576,23 @@ ice_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int __always_unused pgid,
*bw_pct = 0; *bw_pct = 0;
} }
/**
* ice_dcbnl_set_pg_bwg_cfg_rx
* @netdev: the corresponding netdev
* @pgid: corresponding TC
* @bw_pct: BW percentage for given TC
*
* lldpad requires this function pointer to be non-NULL to complete CEE config.
*/
static void
ice_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int __always_unused pgid,
u8 __always_unused bw_pct)
{
struct ice_pf *pf = ice_netdev_to_pf(netdev);
dev_dbg(ice_pf_to_dev(pf), "Rx BWG PG Config Not Supported.\n");
}
/** /**
* ice_dcbnl_get_cap - Get DCBX capabilities of adapter * ice_dcbnl_get_cap - Get DCBX capabilities of adapter
* @netdev: pointer to netdev struct * @netdev: pointer to netdev struct
...@@ -799,6 +839,8 @@ static const struct dcbnl_rtnl_ops dcbnl_ops = { ...@@ -799,6 +839,8 @@ static const struct dcbnl_rtnl_ops dcbnl_ops = {
.getpermhwaddr = ice_dcbnl_get_perm_hw_addr, .getpermhwaddr = ice_dcbnl_get_perm_hw_addr,
.setpgtccfgtx = ice_dcbnl_set_pg_tc_cfg_tx, .setpgtccfgtx = ice_dcbnl_set_pg_tc_cfg_tx,
.setpgbwgcfgtx = ice_dcbnl_set_pg_bwg_cfg_tx, .setpgbwgcfgtx = ice_dcbnl_set_pg_bwg_cfg_tx,
.setpgtccfgrx = ice_dcbnl_set_pg_tc_cfg_rx,
.setpgbwgcfgrx = ice_dcbnl_set_pg_bwg_cfg_rx,
.getpgtccfgtx = ice_dcbnl_get_pg_tc_cfg_tx, .getpgtccfgtx = ice_dcbnl_get_pg_tc_cfg_tx,
.getpgbwgcfgtx = ice_dcbnl_get_pg_bwg_cfg_tx, .getpgbwgcfgtx = ice_dcbnl_get_pg_bwg_cfg_tx,
.getpgtccfgrx = ice_dcbnl_get_pg_tc_cfg_rx, .getpgtccfgrx = ice_dcbnl_get_pg_tc_cfg_rx,
......
...@@ -5,12 +5,34 @@ ...@@ -5,12 +5,34 @@
#define _ICE_DEVIDS_H_ #define _ICE_DEVIDS_H_
/* Device IDs */ /* Device IDs */
/* Intel(R) Ethernet Connection E823-L for backplane */
#define ICE_DEV_ID_E823L_BACKPLANE 0x124C
/* Intel(R) Ethernet Connection E823-L for SFP */
#define ICE_DEV_ID_E823L_SFP 0x124D
/* Intel(R) Ethernet Connection E823-L/X557-AT 10GBASE-T */
#define ICE_DEV_ID_E823L_10G_BASE_T 0x124E
/* Intel(R) Ethernet Connection E823-L 1GbE */
#define ICE_DEV_ID_E823L_1GBE 0x124F
/* Intel(R) Ethernet Connection E823-L for QSFP */
#define ICE_DEV_ID_E823L_QSFP 0x151D
/* Intel(R) Ethernet Controller E810-C for backplane */ /* Intel(R) Ethernet Controller E810-C for backplane */
#define ICE_DEV_ID_E810C_BACKPLANE 0x1591 #define ICE_DEV_ID_E810C_BACKPLANE 0x1591
/* Intel(R) Ethernet Controller E810-C for QSFP */ /* Intel(R) Ethernet Controller E810-C for QSFP */
#define ICE_DEV_ID_E810C_QSFP 0x1592 #define ICE_DEV_ID_E810C_QSFP 0x1592
/* Intel(R) Ethernet Controller E810-C for SFP */ /* Intel(R) Ethernet Controller E810-C for SFP */
#define ICE_DEV_ID_E810C_SFP 0x1593 #define ICE_DEV_ID_E810C_SFP 0x1593
/* Intel(R) Ethernet Controller E810-XXV for SFP */
#define ICE_DEV_ID_E810_XXV_SFP 0x159B
/* Intel(R) Ethernet Connection E823-C for backplane */
#define ICE_DEV_ID_E823C_BACKPLANE 0x188A
/* Intel(R) Ethernet Connection E823-C for QSFP */
#define ICE_DEV_ID_E823C_QSFP 0x188B
/* Intel(R) Ethernet Connection E823-C for SFP */
#define ICE_DEV_ID_E823C_SFP 0x188C
/* Intel(R) Ethernet Connection E823-C/X557-AT 10GBASE-T */
#define ICE_DEV_ID_E823C_10G_BASE_T 0x188D
/* Intel(R) Ethernet Connection E823-C 1GbE */
#define ICE_DEV_ID_E823C_SGMII 0x188E
/* Intel(R) Ethernet Connection E822-C for backplane */ /* Intel(R) Ethernet Connection E822-C for backplane */
#define ICE_DEV_ID_E822C_BACKPLANE 0x1890 #define ICE_DEV_ID_E822C_BACKPLANE 0x1890
/* Intel(R) Ethernet Connection E822-C for QSFP */ /* Intel(R) Ethernet Connection E822-C for QSFP */
...@@ -21,8 +43,8 @@ ...@@ -21,8 +43,8 @@
#define ICE_DEV_ID_E822C_10G_BASE_T 0x1893 #define ICE_DEV_ID_E822C_10G_BASE_T 0x1893
/* Intel(R) Ethernet Connection E822-C 1GbE */ /* Intel(R) Ethernet Connection E822-C 1GbE */
#define ICE_DEV_ID_E822C_SGMII 0x1894 #define ICE_DEV_ID_E822C_SGMII 0x1894
/* Intel(R) Ethernet Connection E822-X for backplane */ /* Intel(R) Ethernet Connection E822-L for backplane */
#define ICE_DEV_ID_E822X_BACKPLANE 0x1897 #define ICE_DEV_ID_E822L_BACKPLANE 0x1897
/* Intel(R) Ethernet Connection E822-L for SFP */ /* Intel(R) Ethernet Connection E822-L for SFP */
#define ICE_DEV_ID_E822L_SFP 0x1898 #define ICE_DEV_ID_E822L_SFP 0x1898
/* Intel(R) Ethernet Connection E822-L/X557-AT 10GBASE-T */ /* Intel(R) Ethernet Connection E822-L/X557-AT 10GBASE-T */
......
...@@ -157,6 +157,7 @@ struct ice_priv_flag { ...@@ -157,6 +157,7 @@ struct ice_priv_flag {
static const struct ice_priv_flag ice_gstrings_priv_flags[] = { static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA), ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA),
ICE_PRIV_FLAG("fw-lldp-agent", ICE_FLAG_FW_LLDP_AGENT), ICE_PRIV_FLAG("fw-lldp-agent", ICE_FLAG_FW_LLDP_AGENT),
ICE_PRIV_FLAG("mdd-auto-reset-vf", ICE_FLAG_MDD_AUTO_RESET_VF),
ICE_PRIV_FLAG("legacy-rx", ICE_FLAG_LEGACY_RX), ICE_PRIV_FLAG("legacy-rx", ICE_FLAG_LEGACY_RX),
}; };
...@@ -672,7 +673,7 @@ static u64 ice_loopback_test(struct net_device *netdev) ...@@ -672,7 +673,7 @@ static u64 ice_loopback_test(struct net_device *netdev)
test_vsi = ice_lb_vsi_setup(pf, pf->hw.port_info); test_vsi = ice_lb_vsi_setup(pf, pf->hw.port_info);
if (!test_vsi) { if (!test_vsi) {
netdev_err(netdev, "Failed to create a VSI for the loopback test"); netdev_err(netdev, "Failed to create a VSI for the loopback test\n");
return 1; return 1;
} }
...@@ -731,7 +732,7 @@ static u64 ice_loopback_test(struct net_device *netdev) ...@@ -731,7 +732,7 @@ static u64 ice_loopback_test(struct net_device *netdev)
devm_kfree(dev, tx_frame); devm_kfree(dev, tx_frame);
remove_mac_filters: remove_mac_filters:
if (ice_remove_mac(&pf->hw, &tmp_list)) if (ice_remove_mac(&pf->hw, &tmp_list))
netdev_err(netdev, "Could not remove MAC filter for the test VSI"); netdev_err(netdev, "Could not remove MAC filter for the test VSI\n");
free_mac_list: free_mac_list:
ice_free_fltr_list(dev, &tmp_list); ice_free_fltr_list(dev, &tmp_list);
lbtest_mac_dis: lbtest_mac_dis:
...@@ -744,7 +745,7 @@ static u64 ice_loopback_test(struct net_device *netdev) ...@@ -744,7 +745,7 @@ static u64 ice_loopback_test(struct net_device *netdev)
lbtest_vsi_close: lbtest_vsi_close:
test_vsi->netdev = NULL; test_vsi->netdev = NULL;
if (ice_vsi_release(test_vsi)) if (ice_vsi_release(test_vsi))
netdev_err(netdev, "Failed to remove the test VSI"); netdev_err(netdev, "Failed to remove the test VSI\n");
return ret; return ret;
} }
...@@ -834,7 +835,7 @@ ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test, ...@@ -834,7 +835,7 @@ ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test,
int status = ice_open(netdev); int status = ice_open(netdev);
if (status) { if (status) {
dev_err(dev, "Could not open device %s, err %d", dev_err(dev, "Could not open device %s, err %d\n",
pf->int_name, status); pf->int_name, status);
} }
} }
......
...@@ -3470,6 +3470,24 @@ ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig, ...@@ -3470,6 +3470,24 @@ ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
return 0; return 0;
} }
/**
* ice_rem_chg_tcam_ent - remove a specific TCAM entry from change list
* @hw: pointer to the HW struct
* @idx: the index of the TCAM entry to remove
* @chg: the list of change structures to search
*/
static void
ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct list_head *chg)
{
struct ice_chs_chg *pos, *tmp;
list_for_each_entry_safe(tmp, pos, chg, list_entry)
if (tmp->type == ICE_TCAM_ADD && tmp->tcam_idx == idx) {
list_del(&tmp->list_entry);
devm_kfree(ice_hw_to_dev(hw), tmp);
}
}
/** /**
* ice_prof_tcam_ena_dis - add enable or disable TCAM change * ice_prof_tcam_ena_dis - add enable or disable TCAM change
* @hw: pointer to the HW struct * @hw: pointer to the HW struct
...@@ -3489,14 +3507,19 @@ ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable, ...@@ -3489,14 +3507,19 @@ ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
enum ice_status status; enum ice_status status;
struct ice_chs_chg *p; struct ice_chs_chg *p;
/* Default: enable means change the low flag bit to don't care */ u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 }; u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 }; u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
/* if disabling, free the TCAM */ /* if disabling, free the TCAM */
if (!enable) { if (!enable) {
status = ice_free_tcam_ent(hw, blk, tcam->tcam_idx); status = ice_rel_tcam_idx(hw, blk, tcam->tcam_idx);
/* if we have already created a change for this TCAM entry, then
* we need to remove that entry, in order to prevent writing to
* a TCAM entry we no longer will have ownership of.
*/
ice_rem_chg_tcam_ent(hw, tcam->tcam_idx, chg);
tcam->tcam_idx = 0; tcam->tcam_idx = 0;
tcam->in_use = 0; tcam->in_use = 0;
return status; return status;
...@@ -3612,11 +3635,12 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig, ...@@ -3612,11 +3635,12 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
* @blk: hardware block * @blk: hardware block
* @vsig: the VSIG to which this profile is to be added * @vsig: the VSIG to which this profile is to be added
* @hdl: the profile handle indicating the profile to add * @hdl: the profile handle indicating the profile to add
* @rev: true to add entries to the end of the list
* @chg: the change list * @chg: the change list
*/ */
static enum ice_status static enum ice_status
ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
struct list_head *chg) bool rev, struct list_head *chg)
{ {
/* Masks that ignore flags */ /* Masks that ignore flags */
u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
...@@ -3625,7 +3649,7 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, ...@@ -3625,7 +3649,7 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
struct ice_prof_map *map; struct ice_prof_map *map;
struct ice_vsig_prof *t; struct ice_vsig_prof *t;
struct ice_chs_chg *p; struct ice_chs_chg *p;
u16 i; u16 vsig_idx, i;
/* Get the details on the profile specified by the handle ID */ /* Get the details on the profile specified by the handle ID */
map = ice_search_prof_id(hw, blk, hdl); map = ice_search_prof_id(hw, blk, hdl);
...@@ -3687,8 +3711,13 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, ...@@ -3687,8 +3711,13 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
} }
/* add profile to VSIG */ /* add profile to VSIG */
list_add(&t->list, vsig_idx = vsig & ICE_VSIG_IDX_M;
&hw->blk[blk].xlt2.vsig_tbl[(vsig & ICE_VSIG_IDX_M)].prop_lst); if (rev)
list_add_tail(&t->list,
&hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
else
list_add(&t->list,
&hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
return 0; return 0;
...@@ -3728,7 +3757,7 @@ ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl, ...@@ -3728,7 +3757,7 @@ ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
if (status) if (status)
goto err_ice_create_prof_id_vsig; goto err_ice_create_prof_id_vsig;
status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, chg); status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, false, chg);
if (status) if (status)
goto err_ice_create_prof_id_vsig; goto err_ice_create_prof_id_vsig;
...@@ -3753,11 +3782,13 @@ ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl, ...@@ -3753,11 +3782,13 @@ ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
* @blk: hardware block * @blk: hardware block
* @vsi: the initial VSI that will be in VSIG * @vsi: the initial VSI that will be in VSIG
* @lst: the list of profile that will be added to the VSIG * @lst: the list of profile that will be added to the VSIG
* @new_vsig: return of new VSIG
* @chg: the change list * @chg: the change list
*/ */
static enum ice_status static enum ice_status
ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi, ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
struct list_head *lst, struct list_head *chg) struct list_head *lst, u16 *new_vsig,
struct list_head *chg)
{ {
struct ice_vsig_prof *t; struct ice_vsig_prof *t;
enum ice_status status; enum ice_status status;
...@@ -3772,12 +3803,15 @@ ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi, ...@@ -3772,12 +3803,15 @@ ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
return status; return status;
list_for_each_entry(t, lst, list) { list_for_each_entry(t, lst, list) {
/* Reverse the order here since we are copying the list */
status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie, status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie,
chg); true, chg);
if (status) if (status)
return status; return status;
} }
*new_vsig = vsig;
return 0; return 0;
} }
...@@ -3899,7 +3933,8 @@ ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl) ...@@ -3899,7 +3933,8 @@ ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
* not sharing entries and we can simply add the new * not sharing entries and we can simply add the new
* profile to the VSIG. * profile to the VSIG.
*/ */
status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, &chg); status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, false,
&chg);
if (status) if (status)
goto err_ice_add_prof_id_flow; goto err_ice_add_prof_id_flow;
...@@ -3910,7 +3945,8 @@ ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl) ...@@ -3910,7 +3945,8 @@ ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
} else { } else {
/* No match, so we need a new VSIG */ /* No match, so we need a new VSIG */
status = ice_create_vsig_from_lst(hw, blk, vsi, status = ice_create_vsig_from_lst(hw, blk, vsi,
&union_lst, &chg); &union_lst, &vsig,
&chg);
if (status) if (status)
goto err_ice_add_prof_id_flow; goto err_ice_add_prof_id_flow;
...@@ -4076,7 +4112,8 @@ ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl) ...@@ -4076,7 +4112,8 @@ ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
* new VSIG and TCAM entries * new VSIG and TCAM entries
*/ */
status = ice_create_vsig_from_lst(hw, blk, vsi, status = ice_create_vsig_from_lst(hw, blk, vsi,
&copy, &chg); &copy, &vsig,
&chg);
if (status) if (status)
goto err_ice_rem_prof_id_flow; goto err_ice_rem_prof_id_flow;
......
...@@ -85,6 +85,7 @@ ...@@ -85,6 +85,7 @@
#define QRXFLXP_CNTXT_RXDID_IDX_M ICE_M(0x3F, 0) #define QRXFLXP_CNTXT_RXDID_IDX_M ICE_M(0x3F, 0)
#define QRXFLXP_CNTXT_RXDID_PRIO_S 8 #define QRXFLXP_CNTXT_RXDID_PRIO_S 8
#define QRXFLXP_CNTXT_RXDID_PRIO_M ICE_M(0x7, 8) #define QRXFLXP_CNTXT_RXDID_PRIO_M ICE_M(0x7, 8)
#define QRXFLXP_CNTXT_TS_M BIT(11)
#define GLGEN_RSTAT 0x000B8188 #define GLGEN_RSTAT 0x000B8188
#define GLGEN_RSTAT_DEVSTATE_M ICE_M(0x3, 0) #define GLGEN_RSTAT_DEVSTATE_M ICE_M(0x3, 0)
#define GLGEN_RSTCTL 0x000B8180 #define GLGEN_RSTCTL 0x000B8180
...@@ -217,6 +218,8 @@ ...@@ -217,6 +218,8 @@
#define VPLAN_TX_QBASE_VFNUMQ_M ICE_M(0xFF, 16) #define VPLAN_TX_QBASE_VFNUMQ_M ICE_M(0xFF, 16)
#define VPLAN_TXQ_MAPENA(_VF) (0x00073800 + ((_VF) * 4)) #define VPLAN_TXQ_MAPENA(_VF) (0x00073800 + ((_VF) * 4))
#define VPLAN_TXQ_MAPENA_TX_ENA_M BIT(0) #define VPLAN_TXQ_MAPENA_TX_ENA_M BIT(0)
#define GL_MDCK_TX_TDPU 0x00049348
#define GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M BIT(1)
#define GL_MDET_RX 0x00294C00 #define GL_MDET_RX 0x00294C00
#define GL_MDET_RX_QNUM_S 0 #define GL_MDET_RX_QNUM_S 0
#define GL_MDET_RX_QNUM_M ICE_M(0x7FFF, 0) #define GL_MDET_RX_QNUM_M ICE_M(0x7FFF, 0)
......
...@@ -1187,20 +1187,28 @@ static void ice_service_timer(struct timer_list *t) ...@@ -1187,20 +1187,28 @@ static void ice_service_timer(struct timer_list *t)
* ice_handle_mdd_event - handle malicious driver detect event * ice_handle_mdd_event - handle malicious driver detect event
* @pf: pointer to the PF structure * @pf: pointer to the PF structure
* *
* Called from service task. OICR interrupt handler indicates MDD event * Called from service task. OICR interrupt handler indicates MDD event.
* VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
* messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
* disable the queue, the PF can be configured to reset the VF using ethtool
* private flag mdd-auto-reset-vf.
*/ */
static void ice_handle_mdd_event(struct ice_pf *pf) static void ice_handle_mdd_event(struct ice_pf *pf)
{ {
struct device *dev = ice_pf_to_dev(pf); struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
bool mdd_detected = false;
u32 reg; u32 reg;
int i; int i;
if (!test_and_clear_bit(__ICE_MDD_EVENT_PENDING, pf->state)) if (!test_and_clear_bit(__ICE_MDD_EVENT_PENDING, pf->state)) {
/* Since the VF MDD event logging is rate limited, check if
* there are pending MDD events.
*/
ice_print_vfs_mdd_events(pf);
return; return;
}
/* find what triggered the MDD event */ /* find what triggered an MDD event */
reg = rd32(hw, GL_MDET_TX_PQM); reg = rd32(hw, GL_MDET_TX_PQM);
if (reg & GL_MDET_TX_PQM_VALID_M) { if (reg & GL_MDET_TX_PQM_VALID_M) {
u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >> u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
...@@ -1216,7 +1224,6 @@ static void ice_handle_mdd_event(struct ice_pf *pf) ...@@ -1216,7 +1224,6 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num); event, queue, pf_num, vf_num);
wr32(hw, GL_MDET_TX_PQM, 0xffffffff); wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
mdd_detected = true;
} }
reg = rd32(hw, GL_MDET_TX_TCLAN); reg = rd32(hw, GL_MDET_TX_TCLAN);
...@@ -1234,7 +1241,6 @@ static void ice_handle_mdd_event(struct ice_pf *pf) ...@@ -1234,7 +1241,6 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num); event, queue, pf_num, vf_num);
wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff); wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
mdd_detected = true;
} }
reg = rd32(hw, GL_MDET_RX); reg = rd32(hw, GL_MDET_RX);
...@@ -1252,85 +1258,85 @@ static void ice_handle_mdd_event(struct ice_pf *pf) ...@@ -1252,85 +1258,85 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n", dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num); event, queue, pf_num, vf_num);
wr32(hw, GL_MDET_RX, 0xffffffff); wr32(hw, GL_MDET_RX, 0xffffffff);
mdd_detected = true;
} }
if (mdd_detected) { /* check to see if this PF caused an MDD event */
bool pf_mdd_detected = false; reg = rd32(hw, PF_MDET_TX_PQM);
if (reg & PF_MDET_TX_PQM_VALID_M) {
reg = rd32(hw, PF_MDET_TX_PQM); wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
if (reg & PF_MDET_TX_PQM_VALID_M) { if (netif_msg_tx_err(pf))
wr32(hw, PF_MDET_TX_PQM, 0xFFFF); dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
dev_info(dev, "TX driver issue detected, PF reset issued\n"); }
pf_mdd_detected = true;
}
reg = rd32(hw, PF_MDET_TX_TCLAN); reg = rd32(hw, PF_MDET_TX_TCLAN);
if (reg & PF_MDET_TX_TCLAN_VALID_M) { if (reg & PF_MDET_TX_TCLAN_VALID_M) {
wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF); wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
dev_info(dev, "TX driver issue detected, PF reset issued\n"); if (netif_msg_tx_err(pf))
pf_mdd_detected = true; dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
} }
reg = rd32(hw, PF_MDET_RX); reg = rd32(hw, PF_MDET_RX);
if (reg & PF_MDET_RX_VALID_M) { if (reg & PF_MDET_RX_VALID_M) {
wr32(hw, PF_MDET_RX, 0xFFFF); wr32(hw, PF_MDET_RX, 0xFFFF);
dev_info(dev, "RX driver issue detected, PF reset issued\n"); if (netif_msg_rx_err(pf))
pf_mdd_detected = true; dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
}
/* Queue belongs to the PF initiate a reset */
if (pf_mdd_detected) {
set_bit(__ICE_NEEDS_RESTART, pf->state);
ice_service_task_schedule(pf);
}
} }
/* check to see if one of the VFs caused the MDD */ /* Check to see if one of the VFs caused an MDD event, and then
* increment counters and set print pending
*/
ice_for_each_vf(pf, i) { ice_for_each_vf(pf, i) {
struct ice_vf *vf = &pf->vf[i]; struct ice_vf *vf = &pf->vf[i];
bool vf_mdd_detected = false;
reg = rd32(hw, VP_MDET_TX_PQM(i)); reg = rd32(hw, VP_MDET_TX_PQM(i));
if (reg & VP_MDET_TX_PQM_VALID_M) { if (reg & VP_MDET_TX_PQM_VALID_M) {
wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF); wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
vf_mdd_detected = true; vf->mdd_tx_events.count++;
dev_info(dev, "TX driver issue detected on VF %d\n", set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
i); if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
i);
} }
reg = rd32(hw, VP_MDET_TX_TCLAN(i)); reg = rd32(hw, VP_MDET_TX_TCLAN(i));
if (reg & VP_MDET_TX_TCLAN_VALID_M) { if (reg & VP_MDET_TX_TCLAN_VALID_M) {
wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF); wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
vf_mdd_detected = true; vf->mdd_tx_events.count++;
dev_info(dev, "TX driver issue detected on VF %d\n", set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
i); if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
i);
} }
reg = rd32(hw, VP_MDET_TX_TDPU(i)); reg = rd32(hw, VP_MDET_TX_TDPU(i));
if (reg & VP_MDET_TX_TDPU_VALID_M) { if (reg & VP_MDET_TX_TDPU_VALID_M) {
wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF); wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
vf_mdd_detected = true; vf->mdd_tx_events.count++;
dev_info(dev, "TX driver issue detected on VF %d\n", set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
i); if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
i);
} }
reg = rd32(hw, VP_MDET_RX(i)); reg = rd32(hw, VP_MDET_RX(i));
if (reg & VP_MDET_RX_VALID_M) { if (reg & VP_MDET_RX_VALID_M) {
wr32(hw, VP_MDET_RX(i), 0xFFFF); wr32(hw, VP_MDET_RX(i), 0xFFFF);
vf_mdd_detected = true; vf->mdd_rx_events.count++;
dev_info(dev, "RX driver issue detected on VF %d\n", set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
i); if (netif_msg_rx_err(pf))
} dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
i);
if (vf_mdd_detected) {
vf->num_mdd_events++; /* Since the queue is disabled on VF Rx MDD events, the
if (vf->num_mdd_events && * PF can be configured to reset the VF through ethtool
vf->num_mdd_events <= ICE_MDD_EVENTS_THRESHOLD) * private flag mdd-auto-reset-vf.
dev_info(dev, "VF %d has had %llu MDD events since last boot, Admin might need to reload AVF driver with this number of events\n", */
i, vf->num_mdd_events); if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags))
ice_reset_vf(&pf->vf[i], false);
} }
} }
ice_print_vfs_mdd_events(pf);
} }
/** /**
...@@ -1918,8 +1924,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, ...@@ -1918,8 +1924,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
if (if_running && !test_and_set_bit(__ICE_DOWN, vsi->state)) { if (if_running && !test_and_set_bit(__ICE_DOWN, vsi->state)) {
ret = ice_down(vsi); ret = ice_down(vsi);
if (ret) { if (ret) {
NL_SET_ERR_MSG_MOD(extack, NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
"Preparing device for XDP attach failed");
return ret; return ret;
} }
} }
...@@ -1928,13 +1933,11 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, ...@@ -1928,13 +1933,11 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
vsi->num_xdp_txq = vsi->alloc_txq; vsi->num_xdp_txq = vsi->alloc_txq;
xdp_ring_err = ice_prepare_xdp_rings(vsi, prog); xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
if (xdp_ring_err) if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
"Setting up XDP Tx resources failed");
} else if (ice_is_xdp_ena_vsi(vsi) && !prog) { } else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
xdp_ring_err = ice_destroy_xdp_rings(vsi); xdp_ring_err = ice_destroy_xdp_rings(vsi);
if (xdp_ring_err) if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
"Freeing XDP Tx resources failed");
} else { } else {
ice_vsi_assign_bpf_prog(vsi, prog); ice_vsi_assign_bpf_prog(vsi, prog);
} }
...@@ -1967,8 +1970,7 @@ static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp) ...@@ -1967,8 +1970,7 @@ static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
struct ice_vsi *vsi = np->vsi; struct ice_vsi *vsi = np->vsi;
if (vsi->type != ICE_VSI_PF) { if (vsi->type != ICE_VSI_PF) {
NL_SET_ERR_MSG_MOD(xdp->extack, NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
"XDP can be loaded only on PF VSI");
return -EINVAL; return -EINVAL;
} }
...@@ -1995,6 +1997,14 @@ static void ice_ena_misc_vector(struct ice_pf *pf) ...@@ -1995,6 +1997,14 @@ static void ice_ena_misc_vector(struct ice_pf *pf)
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
u32 val; u32 val;
/* Disable anti-spoof detection interrupt to prevent spurious event
* interrupts during a function reset. Anti-spoof functionally is
* still supported.
*/
val = rd32(hw, GL_MDCK_TX_TDPU);
val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
wr32(hw, GL_MDCK_TX_TDPU, val);
/* clear things first */ /* clear things first */
wr32(hw, PFINT_OICR_ENA, 0); /* disable all */ wr32(hw, PFINT_OICR_ENA, 0); /* disable all */
rd32(hw, PFINT_OICR); /* read to clear */ rd32(hw, PFINT_OICR); /* read to clear */
...@@ -3542,15 +3552,26 @@ static const struct pci_device_id ice_pci_tbl[] = { ...@@ -3542,15 +3552,26 @@ static const struct pci_device_id ice_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822X_BACKPLANE), 0 }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 },
/* required last entry */ /* required last entry */
{ 0, } { 0, }
}; };
......
...@@ -289,17 +289,31 @@ enum ice_status ice_init_nvm(struct ice_hw *hw) ...@@ -289,17 +289,31 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
nvm->eetrack = (eetrack_hi << 16) | eetrack_lo; nvm->eetrack = (eetrack_hi << 16) | eetrack_lo;
switch (hw->device_id) {
/* the following devices do not have boot_cfg_tlv yet */ /* the following devices do not have boot_cfg_tlv yet */
if (hw->device_id == ICE_DEV_ID_E822C_BACKPLANE || case ICE_DEV_ID_E823C_BACKPLANE:
hw->device_id == ICE_DEV_ID_E822C_QSFP || case ICE_DEV_ID_E823C_QSFP:
hw->device_id == ICE_DEV_ID_E822C_10G_BASE_T || case ICE_DEV_ID_E823C_SFP:
hw->device_id == ICE_DEV_ID_E822C_SGMII || case ICE_DEV_ID_E823C_10G_BASE_T:
hw->device_id == ICE_DEV_ID_E822C_SFP || case ICE_DEV_ID_E823C_SGMII:
hw->device_id == ICE_DEV_ID_E822X_BACKPLANE || case ICE_DEV_ID_E822C_BACKPLANE:
hw->device_id == ICE_DEV_ID_E822L_SFP || case ICE_DEV_ID_E822C_QSFP:
hw->device_id == ICE_DEV_ID_E822L_10G_BASE_T || case ICE_DEV_ID_E822C_10G_BASE_T:
hw->device_id == ICE_DEV_ID_E822L_SGMII) case ICE_DEV_ID_E822C_SGMII:
case ICE_DEV_ID_E822C_SFP:
case ICE_DEV_ID_E822L_BACKPLANE:
case ICE_DEV_ID_E822L_SFP:
case ICE_DEV_ID_E822L_10G_BASE_T:
case ICE_DEV_ID_E822L_SGMII:
case ICE_DEV_ID_E823L_BACKPLANE:
case ICE_DEV_ID_E823L_SFP:
case ICE_DEV_ID_E823L_10G_BASE_T:
case ICE_DEV_ID_E823L_1GBE:
case ICE_DEV_ID_E823L_QSFP:
return status; return status;
default:
break;
}
status = ice_get_pfa_module_tlv(hw, &boot_cfg_tlv, &boot_cfg_tlv_len, status = ice_get_pfa_module_tlv(hw, &boot_cfg_tlv, &boot_cfg_tlv_len,
ICE_SR_BOOT_CFG_PTR); ICE_SR_BOOT_CFG_PTR);
......
...@@ -171,6 +171,11 @@ static void ice_free_vf_res(struct ice_vf *vf) ...@@ -171,6 +171,11 @@ static void ice_free_vf_res(struct ice_vf *vf)
} }
last_vector_idx = vf->first_vector_idx + pf->num_vf_msix - 1; last_vector_idx = vf->first_vector_idx + pf->num_vf_msix - 1;
/* clear VF MDD event information */
memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
/* Disable interrupts so that VF starts in a known state */ /* Disable interrupts so that VF starts in a known state */
for (i = vf->first_vector_idx; i <= last_vector_idx; i++) { for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M); wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
...@@ -1175,7 +1180,7 @@ static bool ice_is_vf_disabled(struct ice_vf *vf) ...@@ -1175,7 +1180,7 @@ static bool ice_is_vf_disabled(struct ice_vf *vf)
* *
* Returns true if the VF is reset, false otherwise. * Returns true if the VF is reset, false otherwise.
*/ */
static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
{ {
struct ice_pf *pf = vf->pf; struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi; struct ice_vsi *vsi;
...@@ -2013,7 +2018,7 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) ...@@ -2013,7 +2018,7 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL); status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
if (status) { if (status) {
dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %d", dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %d\n",
ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num, status); ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num, status);
ret = -EIO; ret = -EIO;
goto out; goto out;
...@@ -3529,3 +3534,52 @@ int ice_get_vf_stats(struct net_device *netdev, int vf_id, ...@@ -3529,3 +3534,52 @@ int ice_get_vf_stats(struct net_device *netdev, int vf_id,
return 0; return 0;
} }
/**
* ice_print_vfs_mdd_event - print VFs malicious driver detect event
* @pf: pointer to the PF structure
*
* Called from ice_handle_mdd_event to rate limit and print VFs MDD events.
*/
void ice_print_vfs_mdd_events(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
int i;
/* check that there are pending MDD events to print */
if (!test_and_clear_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state))
return;
/* VF MDD event logs are rate limited to one second intervals */
if (time_is_after_jiffies(pf->last_printed_mdd_jiffies + HZ * 1))
return;
pf->last_printed_mdd_jiffies = jiffies;
ice_for_each_vf(pf, i) {
struct ice_vf *vf = &pf->vf[i];
/* only print Rx MDD event message if there are new events */
if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
vf->mdd_rx_events.last_printed =
vf->mdd_rx_events.count;
dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
vf->mdd_rx_events.count, hw->pf_id, i,
vf->dflt_lan_addr.addr,
test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
? "on" : "off");
}
/* only print Tx MDD event message if there are new events */
if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
vf->mdd_tx_events.last_printed =
vf->mdd_tx_events.count;
dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
vf->mdd_tx_events.count, hw->pf_id, i,
vf->dflt_lan_addr.addr);
}
}
}
...@@ -55,6 +55,13 @@ enum ice_virtchnl_cap { ...@@ -55,6 +55,13 @@ enum ice_virtchnl_cap {
ICE_VIRTCHNL_VF_CAP_PRIVILEGE, ICE_VIRTCHNL_VF_CAP_PRIVILEGE,
}; };
/* VF MDD events print structure */
struct ice_mdd_vf_events {
u16 count; /* total count of Rx|Tx events */
/* count number of the last printed event */
u16 last_printed;
};
/* VF information structure */ /* VF information structure */
struct ice_vf { struct ice_vf {
struct ice_pf *pf; struct ice_pf *pf;
...@@ -83,13 +90,14 @@ struct ice_vf { ...@@ -83,13 +90,14 @@ struct ice_vf {
unsigned int tx_rate; /* Tx bandwidth limit in Mbps */ unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */ DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
u64 num_mdd_events; /* number of MDD events detected */
u64 num_inval_msgs; /* number of continuous invalid msgs */ u64 num_inval_msgs; /* number of continuous invalid msgs */
u64 num_valid_msgs; /* number of valid msgs detected */ u64 num_valid_msgs; /* number of valid msgs detected */
unsigned long vf_caps; /* VF's adv. capabilities */ unsigned long vf_caps; /* VF's adv. capabilities */
u8 num_req_qs; /* num of queue pairs requested by VF */ u8 num_req_qs; /* num of queue pairs requested by VF */
u16 num_mac; u16 num_mac;
u16 num_vf_qs; /* num of queue configured per VF */ u16 num_vf_qs; /* num of queue configured per VF */
struct ice_mdd_vf_events mdd_rx_events;
struct ice_mdd_vf_events mdd_tx_events;
}; };
#ifdef CONFIG_PCI_IOV #ifdef CONFIG_PCI_IOV
...@@ -104,6 +112,7 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event); ...@@ -104,6 +112,7 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event);
void ice_vc_notify_link_state(struct ice_pf *pf); void ice_vc_notify_link_state(struct ice_pf *pf);
void ice_vc_notify_reset(struct ice_pf *pf); void ice_vc_notify_reset(struct ice_pf *pf);
bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr); bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr);
bool ice_reset_vf(struct ice_vf *vf, bool is_vflr);
int int
ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
...@@ -123,7 +132,7 @@ ice_get_vf_stats(struct net_device *netdev, int vf_id, ...@@ -123,7 +132,7 @@ ice_get_vf_stats(struct net_device *netdev, int vf_id,
struct ifla_vf_stats *vf_stats); struct ifla_vf_stats *vf_stats);
void void
ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event); ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event);
void ice_print_vfs_mdd_events(struct ice_pf *pf);
#else /* CONFIG_PCI_IOV */ #else /* CONFIG_PCI_IOV */
#define ice_process_vflr_event(pf) do {} while (0) #define ice_process_vflr_event(pf) do {} while (0)
#define ice_free_vfs(pf) do {} while (0) #define ice_free_vfs(pf) do {} while (0)
...@@ -132,6 +141,7 @@ ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event); ...@@ -132,6 +141,7 @@ ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event);
#define ice_vc_notify_reset(pf) do {} while (0) #define ice_vc_notify_reset(pf) do {} while (0)
#define ice_set_vf_state_qs_dis(vf) do {} while (0) #define ice_set_vf_state_qs_dis(vf) do {} while (0)
#define ice_vf_lan_overflow_event(pf, event) do {} while (0) #define ice_vf_lan_overflow_event(pf, event) do {} while (0)
#define ice_print_vfs_mdd_events(pf) do {} while (0)
static inline bool static inline bool
ice_reset_all_vfs(struct ice_pf __always_unused *pf, ice_reset_all_vfs(struct ice_pf __always_unused *pf,
...@@ -140,6 +150,12 @@ ice_reset_all_vfs(struct ice_pf __always_unused *pf, ...@@ -140,6 +150,12 @@ ice_reset_all_vfs(struct ice_pf __always_unused *pf,
return true; return true;
} }
static inline bool
ice_reset_vf(struct ice_vf __always_unused *vf, bool __always_unused is_vflr)
{
return true;
}
static inline int static inline int
ice_sriov_configure(struct pci_dev __always_unused *pdev, ice_sriov_configure(struct pci_dev __always_unused *pdev,
int __always_unused num_vfs) int __always_unused num_vfs)
......
...@@ -457,7 +457,7 @@ int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid) ...@@ -457,7 +457,7 @@ int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
if (if_running) { if (if_running) {
ret = ice_qp_dis(vsi, qid); ret = ice_qp_dis(vsi, qid);
if (ret) { if (ret) {
netdev_err(vsi->netdev, "ice_qp_dis error = %d", ret); netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret);
goto xsk_umem_if_up; goto xsk_umem_if_up;
} }
} }
...@@ -471,11 +471,11 @@ int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid) ...@@ -471,11 +471,11 @@ int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
if (!ret && umem_present) if (!ret && umem_present)
napi_schedule(&vsi->xdp_rings[qid]->q_vector->napi); napi_schedule(&vsi->xdp_rings[qid]->q_vector->napi);
else if (ret) else if (ret)
netdev_err(vsi->netdev, "ice_qp_ena error = %d", ret); netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret);
} }
if (umem_failure) { if (umem_failure) {
netdev_err(vsi->netdev, "Could not %sable UMEM, error = %d", netdev_err(vsi->netdev, "Could not %sable UMEM, error = %d\n",
umem_present ? "en" : "dis", umem_failure); umem_present ? "en" : "dis", umem_failure);
return umem_failure; return umem_failure;
} }
...@@ -937,6 +937,15 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget) ...@@ -937,6 +937,15 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
ice_finalize_xdp_rx(rx_ring, xdp_xmit); ice_finalize_xdp_rx(rx_ring, xdp_xmit);
ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes); ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) {
if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
xsk_set_rx_need_wakeup(rx_ring->xsk_umem);
else
xsk_clear_rx_need_wakeup(rx_ring->xsk_umem);
return (int)total_rx_packets;
}
return failure ? budget : (int)total_rx_packets; return failure ? budget : (int)total_rx_packets;
} }
...@@ -988,6 +997,8 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget) ...@@ -988,6 +997,8 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
if (tx_desc) { if (tx_desc) {
ice_xdp_ring_update_tail(xdp_ring); ice_xdp_ring_update_tail(xdp_ring);
xsk_umem_consume_tx_done(xdp_ring->xsk_umem); xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem))
xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem);
} }
return budget > 0 && work_done; return budget > 0 && work_done;
...@@ -1063,6 +1074,13 @@ bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget) ...@@ -1063,6 +1074,13 @@ bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget)
if (xsk_frames) if (xsk_frames)
xsk_umem_complete_tx(xdp_ring->xsk_umem, xsk_frames); xsk_umem_complete_tx(xdp_ring->xsk_umem, xsk_frames);
if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem)) {
if (xdp_ring->next_to_clean == xdp_ring->next_to_use)
xsk_set_tx_need_wakeup(xdp_ring->xsk_umem);
else
xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem);
}
ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes); ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes);
xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK); xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment