Commit 3bbefbbd authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch '1GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2024-08-30 (igc, e1000e, i40e)

This series contains updates to igc, e1000e, and i40 drivers.

Kurt Kanzenbach adds support for MQPRIO offloads and stops unintended,
excess interrupts on igc.

Sasha adds reporting of EEE (Energy Efficient Ethernet) ability and
moves a register define to a better suited file for igc.

Vitaly stops reporting errors on shutdown and suspend as they are not
fatal for e1000e.

Alex adds reporting of EEE to i40e.

* '1GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue:
  i40e: Add Energy Efficient Ethernet ability for X710 Base-T/KR/KX cards
  e1000e: avoid failing the system during pm_suspend
  igc: Move the MULTI GBT AN Control Register to _regs file
  igc: Add Energy Efficient Ethernet ability
  igc: Get rid of spurious interrupts
  igc: Add MQPRIO offload support
====================

Link: https://patch.msgid.link/20240830210451.2375215-1-anthony.l.nguyen@intel.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 7f85b112 0568ee11
......@@ -6671,8 +6671,10 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
/* enable wakeup by the PHY */
retval = e1000_init_phy_wakeup(adapter, wufc);
if (retval)
return retval;
if (retval) {
e_err("Failed to enable wakeup\n");
goto skip_phy_configurations;
}
} else {
/* enable wakeup by the MAC */
ew32(WUFC, wufc);
......@@ -6693,8 +6695,10 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
* or broadcast.
*/
retval = e1000_enable_ulp_lpt_lp(hw, !runtime);
if (retval)
return retval;
if (retval) {
e_err("Failed to enable ULP\n");
goto skip_phy_configurations;
}
}
}
......@@ -6726,6 +6730,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
hw->phy.ops.release(hw);
}
skip_phy_configurations:
/* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant.
*/
......@@ -6968,15 +6973,13 @@ static int e1000e_pm_suspend(struct device *dev)
e1000e_pm_freeze(dev);
rc = __e1000_shutdown(pdev, false);
if (rc) {
e1000e_pm_thaw(dev);
} else {
if (!rc) {
/* Introduce S0ix implementation */
if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS)
e1000e_s0ix_entry_flow(adapter);
}
return rc;
return 0;
}
static int e1000e_pm_resume(struct device *dev)
......
......@@ -4,6 +4,7 @@
#ifndef _I40E_H_
#define _I40E_H_
#include <linux/linkmode.h>
#include <linux/pci.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/types.h>
......
......@@ -5641,6 +5641,26 @@ static int i40e_get_module_eeprom(struct net_device *netdev,
return 0;
}
static void i40e_eee_capability_to_kedata_supported(__le16 eee_capability_,
unsigned long *supported)
{
const int eee_capability = le16_to_cpu(eee_capability_);
static const int lut[] = {
ETHTOOL_LINK_MODE_100baseT_Full_BIT,
ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
};
linkmode_zero(supported);
for (unsigned int i = ARRAY_SIZE(lut); i--; )
if (eee_capability & BIT(i + 1))
linkmode_set_bit(lut[i], supported);
}
static int i40e_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
......@@ -5648,7 +5668,7 @@ static int i40e_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
int status = 0;
int status;
/* Get initial PHY capabilities */
status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_cfg, NULL);
......@@ -5661,11 +5681,18 @@ static int i40e_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
if (phy_cfg.eee_capability == 0)
return -EOPNOTSUPP;
i40e_eee_capability_to_kedata_supported(phy_cfg.eee_capability,
edata->supported);
linkmode_copy(edata->lp_advertised, edata->supported);
/* Get current configuration */
status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_cfg, NULL);
if (status)
return -EAGAIN;
linkmode_zero(edata->advertised);
if (phy_cfg.eee_capability)
linkmode_copy(edata->advertised, edata->supported);
edata->eee_enabled = !!phy_cfg.eee_capability;
edata->tx_lpi_enabled = pf->stats.tx_lpi_status;
......@@ -5681,10 +5708,11 @@ static int i40e_is_eee_param_supported(struct net_device *netdev,
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
struct i40e_ethtool_not_used {
u32 value;
bool value;
const char *name;
} param[] = {
{edata->tx_lpi_timer, "tx-timer"},
{!!(edata->advertised[0] & ~edata->supported[0]), "advertise"},
{!!edata->tx_lpi_timer, "tx-timer"},
{edata->tx_lpi_enabled != pf->stats.tx_lpi_status, "tx-lpi"}
};
int i;
......@@ -5710,7 +5738,7 @@ static int i40e_set_eee(struct net_device *netdev, struct ethtool_keee *edata)
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
__le16 eee_capability;
int status = 0;
int status;
/* Deny parameters we don't support */
if (i40e_is_eee_param_supported(netdev, edata))
......
......@@ -7264,6 +7264,26 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
}
#endif /* CONFIG_I40E_DCB */
static void i40e_print_link_message_eee(struct i40e_vsi *vsi,
const char *speed, const char *fc)
{
struct ethtool_keee kedata;
memzero_explicit(&kedata, sizeof(kedata));
if (vsi->netdev->ethtool_ops->get_eee)
vsi->netdev->ethtool_ops->get_eee(vsi->netdev, &kedata);
if (!linkmode_empty(kedata.supported))
netdev_info(vsi->netdev,
"NIC Link is Up, %sbps Full Duplex, Flow Control: %s, EEE: %s\n",
speed, fc,
kedata.eee_enabled ? "Enabled" : "Disabled");
else
netdev_info(vsi->netdev,
"NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n",
speed, fc);
}
/**
* i40e_print_link_message - print link up or down
* @vsi: the VSI for which link needs a message
......@@ -7395,9 +7415,7 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
"NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
speed, req_fec, fec, an, fc);
} else {
netdev_info(vsi->netdev,
"NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n",
speed, fc);
i40e_print_link_message_eee(vsi, speed, fc);
}
}
......
......@@ -259,6 +259,10 @@ struct igc_adapter {
*/
spinlock_t qbv_tx_lock;
bool strict_priority_enable;
u8 num_tc;
u16 queue_per_tc[IGC_MAX_TX_QUEUES];
/* OS defined structs */
struct pci_dev *pdev;
/* lock for statistics */
......@@ -382,9 +386,11 @@ extern char igc_driver_name[];
#define IGC_FLAG_RX_LEGACY BIT(16)
#define IGC_FLAG_TSN_QBV_ENABLED BIT(17)
#define IGC_FLAG_TSN_QAV_ENABLED BIT(18)
#define IGC_FLAG_TSN_LEGACY_ENABLED BIT(19)
#define IGC_FLAG_TSN_ANY_ENABLED \
(IGC_FLAG_TSN_QBV_ENABLED | IGC_FLAG_TSN_QAV_ENABLED)
#define IGC_FLAG_TSN_ANY_ENABLED \
(IGC_FLAG_TSN_QBV_ENABLED | IGC_FLAG_TSN_QAV_ENABLED | \
IGC_FLAG_TSN_LEGACY_ENABLED)
#define IGC_FLAG_RSS_FIELD_IPV4_UDP BIT(6)
#define IGC_FLAG_RSS_FIELD_IPV6_UDP BIT(7)
......@@ -681,6 +687,7 @@ enum igc_ring_flags_t {
IGC_RING_FLAG_TX_DETECT_HANG,
IGC_RING_FLAG_AF_XDP_ZC,
IGC_RING_FLAG_TX_HWTSTAMP,
IGC_RING_FLAG_RX_ALLOC_FAILED,
};
#define ring_uses_large_buffer(ring) \
......
......@@ -4,6 +4,8 @@
#ifndef _IGC_DEFINES_H_
#define _IGC_DEFINES_H_
#include <linux/bitfield.h>
/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
#define REQ_TX_DESCRIPTOR_MULTIPLE 8
#define REQ_RX_DESCRIPTOR_MULTIPLE 8
......@@ -176,7 +178,6 @@
/* PHY GPY 211 registers */
#define STANDARD_AN_REG_MASK 0x0007 /* MMD */
#define ANEG_MULTIGBT_AN_CTRL 0x0020 /* MULTI GBT AN Control Register */
#define MMD_DEVADDR_SHIFT 16 /* Shift MMD to higher bits */
#define CR_2500T_FD_CAPS 0x0080 /* Advertise 2500T FD capability */
......@@ -553,6 +554,15 @@
#define IGC_MAX_SR_QUEUES 2
#define IGC_TXARB_TXQ_PRIO_0_MASK GENMASK(1, 0)
#define IGC_TXARB_TXQ_PRIO_1_MASK GENMASK(3, 2)
#define IGC_TXARB_TXQ_PRIO_2_MASK GENMASK(5, 4)
#define IGC_TXARB_TXQ_PRIO_3_MASK GENMASK(7, 6)
#define IGC_TXARB_TXQ_PRIO_0(x) FIELD_PREP(IGC_TXARB_TXQ_PRIO_0_MASK, (x))
#define IGC_TXARB_TXQ_PRIO_1(x) FIELD_PREP(IGC_TXARB_TXQ_PRIO_1_MASK, (x))
#define IGC_TXARB_TXQ_PRIO_2(x) FIELD_PREP(IGC_TXARB_TXQ_PRIO_2_MASK, (x))
#define IGC_TXARB_TXQ_PRIO_3(x) FIELD_PREP(IGC_TXARB_TXQ_PRIO_3_MASK, (x))
/* Receive Checksum Control */
#define IGC_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */
#define IGC_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
......@@ -641,6 +651,16 @@
#define IGC_MDIC_READY 0x10000000
#define IGC_MDIC_ERROR 0x40000000
/* EEE Link Ability */
#define IGC_EEE_2500BT_MASK BIT(0)
#define IGC_EEE_1000BT_MASK BIT(2)
#define IGC_EEE_100BT_MASK BIT(1)
/* EEE Link-Partner Ability */
#define IGC_LP_EEE_2500BT_MASK BIT(0)
#define IGC_LP_EEE_1000BT_MASK BIT(2)
#define IGC_LP_EEE_100BT_MASK BIT(1)
#define IGC_N0_QUEUE -1
#define IGC_MAX_MAC_HDR_LEN 127
......
......@@ -1540,6 +1540,10 @@ static int igc_ethtool_set_channels(struct net_device *netdev,
if (ch->other_count != NON_Q_VECTORS)
return -EINVAL;
/* Do not allow channel reconfiguration when mqprio is enabled */
if (adapter->strict_priority_enable)
return -EINVAL;
/* Verify the number of channels doesn't exceed hw limits */
max_combined = igc_get_max_rss_queues(adapter);
if (count > max_combined)
......@@ -1627,8 +1631,11 @@ static int igc_ethtool_get_eee(struct net_device *netdev,
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct igc_hw *hw = &adapter->hw;
u32 eeer;
struct igc_phy_info *phy = &hw->phy;
u16 eee_advert, eee_lp_advert;
u32 eeer, ret_val;
/* EEE supported */
linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
edata->supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
......@@ -1636,6 +1643,74 @@ static int igc_ethtool_get_eee(struct net_device *netdev,
linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
edata->supported);
/* EEE Advertisement 1 - reg 7.60 */
ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK <<
MMD_DEVADDR_SHIFT) |
IGC_ANEG_EEE_AB1,
&eee_advert);
if (ret_val) {
netdev_err(adapter->netdev,
"Failed to read IEEE 7.60 register\n");
return -EINVAL;
}
if (eee_advert & IGC_EEE_1000BT_MASK)
linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
edata->advertised);
if (eee_advert & IGC_EEE_100BT_MASK)
linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
edata->advertised);
/* EEE Advertisement 2 - reg 7.62 */
ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK <<
MMD_DEVADDR_SHIFT) |
IGC_ANEG_EEE_AB2,
&eee_advert);
if (ret_val) {
netdev_err(adapter->netdev,
"Failed to read IEEE 7.62 register\n");
return -EINVAL;
}
if (eee_advert & IGC_EEE_2500BT_MASK)
linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
edata->advertised);
/* EEE Link-Partner Ability 1 - reg 7.61 */
ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK <<
MMD_DEVADDR_SHIFT) |
IGC_ANEG_EEE_LP_AB1,
&eee_lp_advert);
if (ret_val) {
netdev_err(adapter->netdev,
"Failed to read IEEE 7.61 register\n");
return -EINVAL;
}
if (eee_lp_advert & IGC_LP_EEE_1000BT_MASK)
linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
edata->lp_advertised);
if (eee_lp_advert & IGC_LP_EEE_100BT_MASK)
linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
edata->lp_advertised);
/* EEE Link-Partner Ability 2 - reg 7.63 */
ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK <<
MMD_DEVADDR_SHIFT) |
IGC_ANEG_EEE_LP_AB2,
&eee_lp_advert);
if (ret_val) {
netdev_err(adapter->netdev,
"Failed to read IEEE 7.63 register\n");
return -EINVAL;
}
if (eee_lp_advert & IGC_LP_EEE_2500BT_MASK)
linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
edata->lp_advertised);
eeer = rd32(IGC_EEER);
/* EEE status on negotiated link */
......
......@@ -2191,6 +2191,7 @@ static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
page = dev_alloc_pages(igc_rx_pg_order(rx_ring));
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_failed++;
set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
return false;
}
......@@ -2207,6 +2208,7 @@ static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
__free_page(page);
rx_ring->rx_stats.alloc_failed++;
set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
return false;
}
......@@ -2658,6 +2660,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
if (!skb) {
rx_ring->rx_stats.alloc_failed++;
rx_buffer->pagecnt_bias++;
set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
break;
}
......@@ -2738,6 +2741,7 @@ static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector,
skb = igc_construct_skb_zc(ring, xdp);
if (!skb) {
ring->rx_stats.alloc_failed++;
set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &ring->flags);
return;
}
......@@ -5807,11 +5811,29 @@ static void igc_watchdog_task(struct work_struct *work)
if (adapter->flags & IGC_FLAG_HAS_MSIX) {
u32 eics = 0;
for (i = 0; i < adapter->num_q_vectors; i++)
eics |= adapter->q_vector[i]->eims_value;
wr32(IGC_EICS, eics);
for (i = 0; i < adapter->num_q_vectors; i++) {
struct igc_q_vector *q_vector = adapter->q_vector[i];
struct igc_ring *rx_ring;
if (!q_vector->rx.ring)
continue;
rx_ring = adapter->rx_ring[q_vector->rx.ring->queue_index];
if (test_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags)) {
eics |= q_vector->eims_value;
clear_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
}
}
if (eics)
wr32(IGC_EICS, eics);
} else {
wr32(IGC_ICS, IGC_ICS_RXDMT0);
struct igc_ring *rx_ring = adapter->rx_ring[0];
if (test_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags)) {
clear_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
wr32(IGC_ICS, IGC_ICS_RXDMT0);
}
}
igc_ptp_tx_hang(adapter);
......@@ -6515,6 +6537,13 @@ static int igc_tc_query_caps(struct igc_adapter *adapter,
struct igc_hw *hw = &adapter->hw;
switch (base->type) {
case TC_SETUP_QDISC_MQPRIO: {
struct tc_mqprio_caps *caps = base->caps;
caps->validate_queue_counts = true;
return 0;
}
case TC_SETUP_QDISC_TAPRIO: {
struct tc_taprio_caps *caps = base->caps;
......@@ -6532,6 +6561,65 @@ static int igc_tc_query_caps(struct igc_adapter *adapter,
}
}
static void igc_save_mqprio_params(struct igc_adapter *adapter, u8 num_tc,
u16 *offset)
{
int i;
adapter->strict_priority_enable = true;
adapter->num_tc = num_tc;
for (i = 0; i < num_tc; i++)
adapter->queue_per_tc[i] = offset[i];
}
static int igc_tsn_enable_mqprio(struct igc_adapter *adapter,
struct tc_mqprio_qopt_offload *mqprio)
{
struct igc_hw *hw = &adapter->hw;
int i;
if (hw->mac.type != igc_i225)
return -EOPNOTSUPP;
if (!mqprio->qopt.num_tc) {
adapter->strict_priority_enable = false;
goto apply;
}
/* There are as many TCs as Tx queues. */
if (mqprio->qopt.num_tc != adapter->num_tx_queues) {
NL_SET_ERR_MSG_FMT_MOD(mqprio->extack,
"Only %d traffic classes supported",
adapter->num_tx_queues);
return -EOPNOTSUPP;
}
/* Only one queue per TC is supported. */
for (i = 0; i < mqprio->qopt.num_tc; i++) {
if (mqprio->qopt.count[i] != 1) {
NL_SET_ERR_MSG_MOD(mqprio->extack,
"Only one queue per TC supported");
return -EOPNOTSUPP;
}
}
/* Preemption is not supported yet. */
if (mqprio->preemptible_tcs) {
NL_SET_ERR_MSG_MOD(mqprio->extack,
"Preemption is not supported yet");
return -EOPNOTSUPP;
}
igc_save_mqprio_params(adapter, mqprio->qopt.num_tc,
mqprio->qopt.offset);
mqprio->qopt.hw = TC_MQPRIO_HW_OFFLOAD_TCS;
apply:
return igc_tsn_offload_apply(adapter);
}
static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
......@@ -6551,6 +6639,9 @@ static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
case TC_SETUP_QDISC_CBS:
return igc_tsn_enable_cbs(adapter, type_data);
case TC_SETUP_QDISC_MQPRIO:
return igc_tsn_enable_mqprio(adapter, type_data);
default:
return -EOPNOTSUPP;
}
......
......@@ -240,7 +240,7 @@ static s32 igc_phy_setup_autoneg(struct igc_hw *hw)
/* Read the MULTI GBT AN Control Register - reg 7.32 */
ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK <<
MMD_DEVADDR_SHIFT) |
ANEG_MULTIGBT_AN_CTRL,
IGC_ANEG_MULTIGBT_AN_CTRL,
&aneg_multigbt_an_ctrl);
if (ret_val)
......@@ -380,7 +380,7 @@ static s32 igc_phy_setup_autoneg(struct igc_hw *hw)
ret_val = phy->ops.write_reg(hw,
(STANDARD_AN_REG_MASK <<
MMD_DEVADDR_SHIFT) |
ANEG_MULTIGBT_AN_CTRL,
IGC_ANEG_MULTIGBT_AN_CTRL,
aneg_multigbt_an_ctrl);
return ret_val;
......
......@@ -238,6 +238,8 @@
#define IGC_TQAVCC(_n) (0x3004 + ((_n) * 0x40))
#define IGC_TQAVHC(_n) (0x300C + ((_n) * 0x40))
#define IGC_TXARB 0x3354 /* Tx Arbitration Control TxARB - RW */
/* System Time Registers */
#define IGC_SYSTIML 0x0B600 /* System time register Low - RO */
#define IGC_SYSTIMH 0x0B604 /* System time register High - RO */
......@@ -308,6 +310,16 @@
#define IGC_IPCNFG 0x0E38 /* Internal PHY Configuration */
#define IGC_EEE_SU 0x0E34 /* EEE Setup */
/* MULTI GBT AN Control Register - reg. 7.32 */
#define IGC_ANEG_MULTIGBT_AN_CTRL 0x0020
/* EEE ANeg Advertisement Register - reg 7.60 and reg 7.62 */
#define IGC_ANEG_EEE_AB1 0x003c
#define IGC_ANEG_EEE_AB2 0x003e
/* EEE ANeg Link-Partner Advertisement Register - reg 7.61 and reg 7.63 */
#define IGC_ANEG_EEE_LP_AB1 0x003d
#define IGC_ANEG_EEE_LP_AB2 0x003f
/* LTR registers */
#define IGC_LTRC 0x01A0 /* Latency Tolerance Reporting Control */
#define IGC_LTRMINV 0x5BB0 /* LTR Minimum Value */
......
......@@ -46,6 +46,9 @@ static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter)
if (is_cbs_enabled(adapter))
new_flags |= IGC_FLAG_TSN_QAV_ENABLED;
if (adapter->strict_priority_enable)
new_flags |= IGC_FLAG_TSN_LEGACY_ENABLED;
return new_flags;
}
......@@ -102,11 +105,32 @@ bool igc_tsn_is_taprio_activated_by_user(struct igc_adapter *adapter)
adapter->taprio_offload_enable;
}
static void igc_tsn_tx_arb(struct igc_adapter *adapter, u16 *queue_per_tc)
{
struct igc_hw *hw = &adapter->hw;
u32 txarb;
txarb = rd32(IGC_TXARB);
txarb &= ~(IGC_TXARB_TXQ_PRIO_0_MASK |
IGC_TXARB_TXQ_PRIO_1_MASK |
IGC_TXARB_TXQ_PRIO_2_MASK |
IGC_TXARB_TXQ_PRIO_3_MASK);
txarb |= IGC_TXARB_TXQ_PRIO_0(queue_per_tc[3]);
txarb |= IGC_TXARB_TXQ_PRIO_1(queue_per_tc[2]);
txarb |= IGC_TXARB_TXQ_PRIO_2(queue_per_tc[1]);
txarb |= IGC_TXARB_TXQ_PRIO_3(queue_per_tc[0]);
wr32(IGC_TXARB, txarb);
}
/* Returns the TSN specific registers to their default values after
* the adapter is reset.
*/
static int igc_tsn_disable_offload(struct igc_adapter *adapter)
{
u16 queue_per_tc[4] = { 3, 2, 1, 0 };
struct igc_hw *hw = &adapter->hw;
u32 tqavctrl;
int i;
......@@ -133,7 +157,16 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter)
wr32(IGC_QBVCYCLET_S, 0);
wr32(IGC_QBVCYCLET, NSEC_PER_SEC);
/* Reset mqprio TC configuration. */
netdev_reset_tc(adapter->netdev);
/* Restore the default Tx arbitration: Priority 0 has the highest
* priority and is assigned to queue 0 and so on and so forth.
*/
igc_tsn_tx_arb(adapter, queue_per_tc);
adapter->flags &= ~IGC_FLAG_TSN_QBV_ENABLED;
adapter->flags &= ~IGC_FLAG_TSN_LEGACY_ENABLED;
return 0;
}
......@@ -172,6 +205,40 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter)
if (igc_is_device_id_i226(hw))
igc_tsn_set_retx_qbvfullthreshold(adapter);
if (adapter->strict_priority_enable) {
int err;
err = netdev_set_num_tc(adapter->netdev, adapter->num_tc);
if (err)
return err;
for (i = 0; i < adapter->num_tc; i++) {
err = netdev_set_tc_queue(adapter->netdev, i, 1,
adapter->queue_per_tc[i]);
if (err)
return err;
}
/* In case the card is configured with less than four queues. */
for (; i < IGC_MAX_TX_QUEUES; i++)
adapter->queue_per_tc[i] = i;
/* Configure queue priorities according to the user provided
* mapping.
*/
igc_tsn_tx_arb(adapter, adapter->queue_per_tc);
/* Enable legacy TSN mode which will do strict priority without
* any other TSN features.
*/
tqavctrl = rd32(IGC_TQAVCTRL);
tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN;
tqavctrl &= ~IGC_TQAVCTRL_ENHANCED_QAV;
wr32(IGC_TQAVCTRL, tqavctrl);
return 0;
}
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igc_ring *ring = adapter->tx_ring[i];
u32 txqctl = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment