Commit f89370d4 authored by David S. Miller's avatar David S. Miller

Merge branch '1GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
1GbE Intel Wired LAN Driver Updates 2020-04-17

This series contains updates to e1000e and igc only.

Sasha adds partial generic segmentation offload (GSO partial) support to
the igc driver.  Also added support for translating taprio schedules
into i225 cycles in igc.  Did clean up of dead code or unused defines in
the igc driver.  Refactored the code to avoid forward declarations where
possible.  Enables the NETIF_F_HW_TC flag for igc by default.

Vinicius adds support for ETF offloading using the similar approach that
taprio offload used.

Kees Cook fixes a clang warning in the e1000e driver by moving the
declared variable either into the switch case that uses the variable or
lift them up into the main function body, to help the compiler.

Andre fixed some register overwriting when dumping registers via ethtool
for igc driver.  Also fixed support for ethtool Network Flow
Classification (NFC) queue redirection by adding the missing code needed
to enable the queue selection feature from Receive Address High (RAH)
register.  Cleans up code to remove the code bits designed to support
tc-flower filters, since this client part does not support it.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 123aff2a ac9156b2
...@@ -3136,8 +3136,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, ...@@ -3136,8 +3136,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
if (skb->data_len && hdr_len == len) { if (skb->data_len && hdr_len == len) {
switch (hw->mac_type) { switch (hw->mac_type) {
case e1000_82544: {
unsigned int pull_size; unsigned int pull_size;
case e1000_82544:
/* Make sure we have room to chop off 4 bytes, /* Make sure we have room to chop off 4 bytes,
* and that the end alignment will work out to * and that the end alignment will work out to
* this hardware's requirements * this hardware's requirements
...@@ -3158,6 +3159,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, ...@@ -3158,6 +3159,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
} }
len = skb_headlen(skb); len = skb_headlen(skb);
break; break;
}
default: default:
/* do nothing */ /* do nothing */
break; break;
......
...@@ -8,4 +8,4 @@ ...@@ -8,4 +8,4 @@
obj-$(CONFIG_IGC) += igc.o obj-$(CONFIG_IGC) += igc.o
igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o \ igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o \
igc_ethtool.o igc_ptp.o igc_dump.o igc_ethtool.o igc_ptp.o igc_dump.o igc_tsn.o
...@@ -19,8 +19,199 @@ ...@@ -19,8 +19,199 @@
/* forward declaration */ /* forward declaration */
void igc_set_ethtool_ops(struct net_device *); void igc_set_ethtool_ops(struct net_device *);
struct igc_adapter; /* Transmit and receive queues */
struct igc_ring; #define IGC_MAX_RX_QUEUES 4
#define IGC_MAX_TX_QUEUES 4
#define MAX_Q_VECTORS 8
#define MAX_STD_JUMBO_FRAME_SIZE 9216
#define MAX_ETYPE_FILTER (4 - 1)
#define IGC_RETA_SIZE 128
struct igc_tx_queue_stats {
u64 packets;
u64 bytes;
u64 restart_queue;
u64 restart_queue2;
};
struct igc_rx_queue_stats {
u64 packets;
u64 bytes;
u64 drops;
u64 csum_err;
u64 alloc_failed;
};
struct igc_rx_packet_stats {
u64 ipv4_packets; /* IPv4 headers processed */
u64 ipv4e_packets; /* IPv4E headers with extensions processed */
u64 ipv6_packets; /* IPv6 headers processed */
u64 ipv6e_packets; /* IPv6E headers with extensions processed */
u64 tcp_packets; /* TCP headers processed */
u64 udp_packets; /* UDP headers processed */
u64 sctp_packets; /* SCTP headers processed */
u64 nfs_packets; /* NFS headers processe */
u64 other_packets;
};
struct igc_ring_container {
struct igc_ring *ring; /* pointer to linked list of rings */
unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_packets; /* total packets processed this int */
u16 work_limit; /* total work allowed per interrupt */
u8 count; /* total number of rings in vector */
u8 itr; /* current ITR setting for ring */
};
struct igc_ring {
struct igc_q_vector *q_vector; /* backlink to q_vector */
struct net_device *netdev; /* back pointer to net_device */
struct device *dev; /* device for dma mapping */
union { /* array of buffer info structs */
struct igc_tx_buffer *tx_buffer_info;
struct igc_rx_buffer *rx_buffer_info;
};
void *desc; /* descriptor ring memory */
unsigned long flags; /* ring specific flags */
void __iomem *tail; /* pointer to ring tail register */
dma_addr_t dma; /* phys address of the ring */
unsigned int size; /* length of desc. ring in bytes */
u16 count; /* number of desc. in the ring */
u8 queue_index; /* logical index of the ring*/
u8 reg_idx; /* physical index of the ring */
bool launchtime_enable; /* true if LaunchTime is enabled */
u32 start_time;
u32 end_time;
/* everything past this point are written often */
u16 next_to_clean;
u16 next_to_use;
u16 next_to_alloc;
union {
/* TX */
struct {
struct igc_tx_queue_stats tx_stats;
struct u64_stats_sync tx_syncp;
struct u64_stats_sync tx_syncp2;
};
/* RX */
struct {
struct igc_rx_queue_stats rx_stats;
struct igc_rx_packet_stats pkt_stats;
struct u64_stats_sync rx_syncp;
struct sk_buff *skb;
};
};
} ____cacheline_internodealigned_in_smp;
/* Board specific private data structure */
struct igc_adapter {
struct net_device *netdev;
unsigned long state;
unsigned int flags;
unsigned int num_q_vectors;
struct msix_entry *msix_entries;
/* TX */
u16 tx_work_limit;
u32 tx_timeout_count;
int num_tx_queues;
struct igc_ring *tx_ring[IGC_MAX_TX_QUEUES];
/* RX */
int num_rx_queues;
struct igc_ring *rx_ring[IGC_MAX_RX_QUEUES];
struct timer_list watchdog_timer;
struct timer_list dma_err_timer;
struct timer_list phy_info_timer;
u32 wol;
u32 en_mng_pt;
u16 link_speed;
u16 link_duplex;
u8 port_num;
u8 __iomem *io_addr;
/* Interrupt Throttle Rate */
u32 rx_itr_setting;
u32 tx_itr_setting;
struct work_struct reset_task;
struct work_struct watchdog_task;
struct work_struct dma_err_task;
bool fc_autoneg;
u8 tx_timeout_factor;
int msg_enable;
u32 max_frame_size;
u32 min_frame_size;
ktime_t base_time;
ktime_t cycle_time;
/* OS defined structs */
struct pci_dev *pdev;
/* lock for statistics */
spinlock_t stats64_lock;
struct rtnl_link_stats64 stats64;
/* structs defined in igc_hw.h */
struct igc_hw hw;
struct igc_hw_stats stats;
struct igc_q_vector *q_vector[MAX_Q_VECTORS];
u32 eims_enable_mask;
u32 eims_other;
u16 tx_ring_count;
u16 rx_ring_count;
u32 tx_hwtstamp_timeouts;
u32 tx_hwtstamp_skipped;
u32 rx_hwtstamp_cleared;
u32 rss_queues;
u32 rss_indir_tbl_init;
/* RX network flow classification support */
struct hlist_head nfc_filter_list;
unsigned int nfc_filter_count;
/* lock for RX network flow classification filter */
spinlock_t nfc_lock;
bool etype_bitmap[MAX_ETYPE_FILTER];
struct igc_mac_addr *mac_table;
u8 rss_indir_tbl[IGC_RETA_SIZE];
unsigned long link_check_timeout;
struct igc_info ei;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_caps;
struct work_struct ptp_tx_work;
struct sk_buff *ptp_tx_skb;
struct hwtstamp_config tstamp_config;
unsigned long ptp_tx_start;
unsigned long last_rx_ptp_check;
unsigned long last_rx_timestamp;
unsigned int ptp_flags;
/* System time value lock */
spinlock_t tmreg_lock;
struct cyclecounter cc;
struct timecounter tc;
};
void igc_up(struct igc_adapter *adapter); void igc_up(struct igc_adapter *adapter);
void igc_down(struct igc_adapter *adapter); void igc_down(struct igc_adapter *adapter);
...@@ -50,14 +241,10 @@ extern char igc_driver_name[]; ...@@ -50,14 +241,10 @@ extern char igc_driver_name[];
extern char igc_driver_version[]; extern char igc_driver_version[];
#define IGC_REGS_LEN 740 #define IGC_REGS_LEN 740
#define IGC_RETA_SIZE 128
/* flags controlling PTP/1588 function */ /* flags controlling PTP/1588 function */
#define IGC_PTP_ENABLED BIT(0) #define IGC_PTP_ENABLED BIT(0)
/* Interrupt defines */
#define IGC_START_ITR 648 /* ~6000 ints/sec */
/* Flags definitions */ /* Flags definitions */
#define IGC_FLAG_HAS_MSI BIT(0) #define IGC_FLAG_HAS_MSI BIT(0)
#define IGC_FLAG_QUEUE_PAIRS BIT(3) #define IGC_FLAG_QUEUE_PAIRS BIT(3)
...@@ -70,6 +257,7 @@ extern char igc_driver_version[]; ...@@ -70,6 +257,7 @@ extern char igc_driver_version[];
#define IGC_FLAG_HAS_MSIX BIT(13) #define IGC_FLAG_HAS_MSIX BIT(13)
#define IGC_FLAG_VLAN_PROMISC BIT(15) #define IGC_FLAG_VLAN_PROMISC BIT(15)
#define IGC_FLAG_RX_LEGACY BIT(16) #define IGC_FLAG_RX_LEGACY BIT(16)
#define IGC_FLAG_TSN_QBV_ENABLED BIT(17)
#define IGC_FLAG_RSS_FIELD_IPV4_UDP BIT(6) #define IGC_FLAG_RSS_FIELD_IPV4_UDP BIT(6)
#define IGC_FLAG_RSS_FIELD_IPV6_UDP BIT(7) #define IGC_FLAG_RSS_FIELD_IPV6_UDP BIT(7)
...@@ -78,6 +266,7 @@ extern char igc_driver_version[]; ...@@ -78,6 +266,7 @@ extern char igc_driver_version[];
#define IGC_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 #define IGC_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
#define IGC_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 #define IGC_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
/* Interrupt defines */
#define IGC_START_ITR 648 /* ~6000 ints/sec */ #define IGC_START_ITR 648 /* ~6000 ints/sec */
#define IGC_4K_ITR 980 #define IGC_4K_ITR 980
#define IGC_20K_ITR 196 #define IGC_20K_ITR 196
...@@ -99,13 +288,6 @@ extern char igc_driver_version[]; ...@@ -99,13 +288,6 @@ extern char igc_driver_version[];
#define IGC_MIN_RXD 80 #define IGC_MIN_RXD 80
#define IGC_MAX_RXD 4096 #define IGC_MAX_RXD 4096
/* Transmit and receive queues */
#define IGC_MAX_RX_QUEUES 4
#define IGC_MAX_TX_QUEUES 4
#define MAX_Q_VECTORS 8
#define MAX_STD_JUMBO_FRAME_SIZE 9216
/* Supported Rx Buffer Sizes */ /* Supported Rx Buffer Sizes */
#define IGC_RXBUFFER_256 256 #define IGC_RXBUFFER_256 256
#define IGC_RXBUFFER_2048 2048 #define IGC_RXBUFFER_2048 2048
...@@ -232,83 +414,6 @@ struct igc_rx_buffer { ...@@ -232,83 +414,6 @@ struct igc_rx_buffer {
__u16 pagecnt_bias; __u16 pagecnt_bias;
}; };
struct igc_tx_queue_stats {
u64 packets;
u64 bytes;
u64 restart_queue;
u64 restart_queue2;
};
struct igc_rx_queue_stats {
u64 packets;
u64 bytes;
u64 drops;
u64 csum_err;
u64 alloc_failed;
};
struct igc_rx_packet_stats {
u64 ipv4_packets; /* IPv4 headers processed */
u64 ipv4e_packets; /* IPv4E headers with extensions processed */
u64 ipv6_packets; /* IPv6 headers processed */
u64 ipv6e_packets; /* IPv6E headers with extensions processed */
u64 tcp_packets; /* TCP headers processed */
u64 udp_packets; /* UDP headers processed */
u64 sctp_packets; /* SCTP headers processed */
u64 nfs_packets; /* NFS headers processe */
u64 other_packets;
};
struct igc_ring_container {
struct igc_ring *ring; /* pointer to linked list of rings */
unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_packets; /* total packets processed this int */
u16 work_limit; /* total work allowed per interrupt */
u8 count; /* total number of rings in vector */
u8 itr; /* current ITR setting for ring */
};
struct igc_ring {
struct igc_q_vector *q_vector; /* backlink to q_vector */
struct net_device *netdev; /* back pointer to net_device */
struct device *dev; /* device for dma mapping */
union { /* array of buffer info structs */
struct igc_tx_buffer *tx_buffer_info;
struct igc_rx_buffer *rx_buffer_info;
};
void *desc; /* descriptor ring memory */
unsigned long flags; /* ring specific flags */
void __iomem *tail; /* pointer to ring tail register */
dma_addr_t dma; /* phys address of the ring */
unsigned int size; /* length of desc. ring in bytes */
u16 count; /* number of desc. in the ring */
u8 queue_index; /* logical index of the ring*/
u8 reg_idx; /* physical index of the ring */
bool launchtime_enable; /* true if LaunchTime is enabled */
/* everything past this point are written often */
u16 next_to_clean;
u16 next_to_use;
u16 next_to_alloc;
union {
/* TX */
struct {
struct igc_tx_queue_stats tx_stats;
struct u64_stats_sync tx_syncp;
struct u64_stats_sync tx_syncp2;
};
/* RX */
struct {
struct igc_rx_queue_stats rx_stats;
struct igc_rx_packet_stats pkt_stats;
struct u64_stats_sync rx_syncp;
struct sk_buff *skb;
};
};
} ____cacheline_internodealigned_in_smp;
struct igc_q_vector { struct igc_q_vector {
struct igc_adapter *adapter; /* backlink */ struct igc_adapter *adapter; /* backlink */
void __iomem *itr_register; void __iomem *itr_register;
...@@ -329,8 +434,6 @@ struct igc_q_vector { ...@@ -329,8 +434,6 @@ struct igc_q_vector {
struct igc_ring ring[] ____cacheline_internodealigned_in_smp; struct igc_ring ring[] ____cacheline_internodealigned_in_smp;
}; };
#define MAX_ETYPE_FILTER (4 - 1)
enum igc_filter_match_flags { enum igc_filter_match_flags {
IGC_FILTER_FLAG_ETHER_TYPE = 0x1, IGC_FILTER_FLAG_ETHER_TYPE = 0x1,
IGC_FILTER_FLAG_VLAN_TCI = 0x2, IGC_FILTER_FLAG_VLAN_TCI = 0x2,
...@@ -374,108 +477,6 @@ struct igc_mac_addr { ...@@ -374,108 +477,6 @@ struct igc_mac_addr {
#define IGC_MAX_RXNFC_FILTERS 16 #define IGC_MAX_RXNFC_FILTERS 16
/* Board specific private data structure */
struct igc_adapter {
struct net_device *netdev;
unsigned long state;
unsigned int flags;
unsigned int num_q_vectors;
struct msix_entry *msix_entries;
/* TX */
u16 tx_work_limit;
u32 tx_timeout_count;
int num_tx_queues;
struct igc_ring *tx_ring[IGC_MAX_TX_QUEUES];
/* RX */
int num_rx_queues;
struct igc_ring *rx_ring[IGC_MAX_RX_QUEUES];
struct timer_list watchdog_timer;
struct timer_list dma_err_timer;
struct timer_list phy_info_timer;
u32 wol;
u32 en_mng_pt;
u16 link_speed;
u16 link_duplex;
u8 port_num;
u8 __iomem *io_addr;
/* Interrupt Throttle Rate */
u32 rx_itr_setting;
u32 tx_itr_setting;
struct work_struct reset_task;
struct work_struct watchdog_task;
struct work_struct dma_err_task;
bool fc_autoneg;
u8 tx_timeout_factor;
int msg_enable;
u32 max_frame_size;
u32 min_frame_size;
/* OS defined structs */
struct pci_dev *pdev;
/* lock for statistics */
spinlock_t stats64_lock;
struct rtnl_link_stats64 stats64;
/* structs defined in igc_hw.h */
struct igc_hw hw;
struct igc_hw_stats stats;
struct igc_q_vector *q_vector[MAX_Q_VECTORS];
u32 eims_enable_mask;
u32 eims_other;
u16 tx_ring_count;
u16 rx_ring_count;
u32 tx_hwtstamp_timeouts;
u32 tx_hwtstamp_skipped;
u32 rx_hwtstamp_cleared;
u32 rss_queues;
u32 rss_indir_tbl_init;
/* RX network flow classification support */
struct hlist_head nfc_filter_list;
struct hlist_head cls_flower_list;
unsigned int nfc_filter_count;
/* lock for RX network flow classification filter */
spinlock_t nfc_lock;
bool etype_bitmap[MAX_ETYPE_FILTER];
struct igc_mac_addr *mac_table;
u8 rss_indir_tbl[IGC_RETA_SIZE];
unsigned long link_check_timeout;
struct igc_info ei;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_caps;
struct work_struct ptp_tx_work;
struct sk_buff *ptp_tx_skb;
struct hwtstamp_config tstamp_config;
unsigned long ptp_tx_start;
unsigned long last_rx_ptp_check;
unsigned long last_rx_timestamp;
unsigned int ptp_flags;
/* System time value lock */
spinlock_t tmreg_lock;
struct cyclecounter cc;
struct timecounter tc;
};
/* igc_desc_unused - calculate if we have unused descriptors */ /* igc_desc_unused - calculate if we have unused descriptors */
static inline u16 igc_desc_unused(const struct igc_ring *ring) static inline u16 igc_desc_unused(const struct igc_ring *ring)
{ {
......
...@@ -44,9 +44,6 @@ ...@@ -44,9 +44,6 @@
/* Wake Up Packet Memory stores the first 128 bytes of the wake up packet */ /* Wake Up Packet Memory stores the first 128 bytes of the wake up packet */
#define IGC_WUPM_BYTES 128 #define IGC_WUPM_BYTES 128
/* Physical Func Reset Done Indication */
#define IGC_CTRL_EXT_LINK_MODE_MASK 0x00C00000
/* Loop limit on how long we wait for auto-negotiation to complete */ /* Loop limit on how long we wait for auto-negotiation to complete */
#define COPPER_LINK_UP_LIMIT 10 #define COPPER_LINK_UP_LIMIT 10
#define PHY_AUTO_NEG_LIMIT 45 #define PHY_AUTO_NEG_LIMIT 45
...@@ -66,8 +63,11 @@ ...@@ -66,8 +63,11 @@
* (RAR[15]) for our directed address used by controllers with * (RAR[15]) for our directed address used by controllers with
* manageability enabled, allowing us room for 15 multicast addresses. * manageability enabled, allowing us room for 15 multicast addresses.
*/ */
#define IGC_RAH_QSEL_MASK 0x000C0000
#define IGC_RAH_QSEL_SHIFT 18
#define IGC_RAH_QSEL_ENABLE BIT(28)
#define IGC_RAH_AV 0x80000000 /* Receive descriptor valid */ #define IGC_RAH_AV 0x80000000 /* Receive descriptor valid */
#define IGC_RAH_POOL_1 0x00040000
#define IGC_RAL_MAC_ADDR_LEN 4 #define IGC_RAL_MAC_ADDR_LEN 4
#define IGC_RAH_MAC_ADDR_LEN 2 #define IGC_RAH_MAC_ADDR_LEN 2
...@@ -94,8 +94,6 @@ ...@@ -94,8 +94,6 @@
#define IGC_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ #define IGC_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
#define IGC_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ #define IGC_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
#define IGC_CONNSW_AUTOSENSE_EN 0x1
/* As per the EAS the maximum supported size is 9.5KB (9728 bytes) */ /* As per the EAS the maximum supported size is 9.5KB (9728 bytes) */
#define MAX_JUMBO_FRAME_SIZE 0x2600 #define MAX_JUMBO_FRAME_SIZE 0x2600
...@@ -377,6 +375,11 @@ ...@@ -377,6 +375,11 @@
#define I225_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */ #define I225_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */
#define IGC_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */ #define IGC_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */
#define IGC_TXPBSIZE_TSN 0x04145145 /* 5k bytes buffer for each queue */
#define IGC_DTXMXPKTSZ_TSN 0x19 /* 1600 bytes of max TX DMA packet size */
#define IGC_DTXMXPKTSZ_DEFAULT 0x98 /* 9728-byte Jumbo frames */
/* Time Sync Interrupt Causes */ /* Time Sync Interrupt Causes */
#define IGC_TSICR_SYS_WRAP BIT(0) /* SYSTIM Wrap around. */ #define IGC_TSICR_SYS_WRAP BIT(0) /* SYSTIM Wrap around. */
#define IGC_TSICR_TXTS BIT(1) /* Transmit Timestamp. */ #define IGC_TSICR_TXTS BIT(1) /* Transmit Timestamp. */
...@@ -431,6 +434,14 @@ ...@@ -431,6 +434,14 @@
#define IGC_TSYNCTXCTL_START_SYNC 0x80000000 /* initiate sync */ #define IGC_TSYNCTXCTL_START_SYNC 0x80000000 /* initiate sync */
#define IGC_TSYNCTXCTL_TXSYNSIG 0x00000020 /* Sample TX tstamp in PHY sop */ #define IGC_TSYNCTXCTL_TXSYNSIG 0x00000020 /* Sample TX tstamp in PHY sop */
/* Transmit Scheduling */
#define IGC_TQAVCTRL_TRANSMIT_MODE_TSN 0x00000001
#define IGC_TQAVCTRL_ENHANCED_QAV 0x00000008
#define IGC_TXQCTL_QUEUE_MODE_LAUNCHT 0x00000001
#define IGC_TXQCTL_STRICT_CYCLE 0x00000002
#define IGC_TXQCTL_STRICT_END 0x00000004
/* Receive Checksum Control */ /* Receive Checksum Control */
#define IGC_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */ #define IGC_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */
#define IGC_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ #define IGC_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
...@@ -497,7 +508,6 @@ ...@@ -497,7 +508,6 @@
#define IGC_MDIC_READY 0x10000000 #define IGC_MDIC_READY 0x10000000
#define IGC_MDIC_INT_EN 0x20000000 #define IGC_MDIC_INT_EN 0x20000000
#define IGC_MDIC_ERROR 0x40000000 #define IGC_MDIC_ERROR 0x40000000
#define IGC_MDIC_DEST 0x80000000
#define IGC_N0_QUEUE -1 #define IGC_N0_QUEUE -1
......
...@@ -153,7 +153,7 @@ static void igc_get_regs(struct net_device *netdev, ...@@ -153,7 +153,7 @@ static void igc_get_regs(struct net_device *netdev,
memset(p, 0, IGC_REGS_LEN * sizeof(u32)); memset(p, 0, IGC_REGS_LEN * sizeof(u32));
regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id; regs->version = (2u << 24) | (hw->revision_id << 16) | hw->device_id;
/* General Registers */ /* General Registers */
regs_buff[0] = rd32(IGC_CTRL); regs_buff[0] = rd32(IGC_CTRL);
...@@ -306,6 +306,15 @@ static void igc_get_regs(struct net_device *netdev, ...@@ -306,6 +306,15 @@ static void igc_get_regs(struct net_device *netdev,
regs_buff[164 + i] = rd32(IGC_TDT(i)); regs_buff[164 + i] = rd32(IGC_TDT(i));
for (i = 0; i < 4; i++) for (i = 0; i < 4; i++)
regs_buff[168 + i] = rd32(IGC_TXDCTL(i)); regs_buff[168 + i] = rd32(IGC_TXDCTL(i));
/* XXX: Due to a bug few lines above, RAL and RAH registers are
* overwritten. To preserve the ABI, we write these registers again in
* regs_buff.
*/
for (i = 0; i < 16; i++)
regs_buff[172 + i] = rd32(IGC_RAL(i));
for (i = 0; i < 16; i++)
regs_buff[188 + i] = rd32(IGC_RAH(i));
} }
static void igc_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) static void igc_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
......
...@@ -9,11 +9,13 @@ ...@@ -9,11 +9,13 @@
#include <linux/udp.h> #include <linux/udp.h>
#include <linux/ip.h> #include <linux/ip.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#include <net/pkt_sched.h>
#include <net/ipv6.h> #include <net/ipv6.h>
#include "igc.h" #include "igc.h"
#include "igc_hw.h" #include "igc_hw.h"
#include "igc_tsn.h"
#define DRV_VERSION "0.0.1-k" #define DRV_VERSION "0.0.1-k"
#define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver" #define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver"
...@@ -106,6 +108,9 @@ void igc_reset(struct igc_adapter *adapter) ...@@ -106,6 +108,9 @@ void igc_reset(struct igc_adapter *adapter)
/* Re-enable PTP, where applicable. */ /* Re-enable PTP, where applicable. */
igc_ptp_reset(adapter); igc_ptp_reset(adapter);
/* Re-enable TSN offloading, where applicable. */
igc_tsn_offload_apply(adapter);
igc_get_phy_info(hw); igc_get_phy_info(hw);
} }
...@@ -775,13 +780,18 @@ static void igc_rar_set_index(struct igc_adapter *adapter, u32 index) ...@@ -775,13 +780,18 @@ static void igc_rar_set_index(struct igc_adapter *adapter, u32 index)
rar_low = le32_to_cpup((__le32 *)(addr)); rar_low = le32_to_cpup((__le32 *)(addr));
rar_high = le16_to_cpup((__le16 *)(addr + 4)); rar_high = le16_to_cpup((__le16 *)(addr + 4));
if (adapter->mac_table[index].state & IGC_MAC_STATE_QUEUE_STEERING) {
u8 queue = adapter->mac_table[index].queue;
u32 qsel = IGC_RAH_QSEL_MASK & (queue << IGC_RAH_QSEL_SHIFT);
rar_high |= qsel;
rar_high |= IGC_RAH_QSEL_ENABLE;
}
/* Indicate to hardware the Address is Valid. */ /* Indicate to hardware the Address is Valid. */
if (adapter->mac_table[index].state & IGC_MAC_STATE_IN_USE) { if (adapter->mac_table[index].state & IGC_MAC_STATE_IN_USE) {
if (is_valid_ether_addr(addr)) if (is_valid_ether_addr(addr))
rar_high |= IGC_RAH_AV; rar_high |= IGC_RAH_AV;
rar_high |= IGC_RAH_POOL_1 <<
adapter->mac_table[index].queue;
} }
wr32(IGC_RAL(index), rar_low); wr32(IGC_RAL(index), rar_low);
...@@ -864,6 +874,23 @@ static int igc_write_mc_addr_list(struct net_device *netdev) ...@@ -864,6 +874,23 @@ static int igc_write_mc_addr_list(struct net_device *netdev)
return netdev_mc_count(netdev); return netdev_mc_count(netdev);
} }
static __le32 igc_tx_launchtime(struct igc_adapter *adapter, ktime_t txtime)
{
ktime_t cycle_time = adapter->cycle_time;
ktime_t base_time = adapter->base_time;
u32 launchtime;
/* FIXME: when using ETF together with taprio, we may have a
* case where 'delta' is larger than the cycle_time, this may
* cause problems if we don't read the current value of
* IGC_BASET, as the value writen into the launchtime
* descriptor field may be misinterpreted.
*/
div_s64_rem(ktime_sub_ns(txtime, base_time), cycle_time, &launchtime);
return cpu_to_le32(launchtime);
}
static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
struct igc_tx_buffer *first, struct igc_tx_buffer *first,
u32 vlan_macip_lens, u32 type_tucmd, u32 vlan_macip_lens, u32 type_tucmd,
...@@ -871,7 +898,6 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, ...@@ -871,7 +898,6 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
{ {
struct igc_adv_tx_context_desc *context_desc; struct igc_adv_tx_context_desc *context_desc;
u16 i = tx_ring->next_to_use; u16 i = tx_ring->next_to_use;
struct timespec64 ts;
context_desc = IGC_TX_CTXTDESC(tx_ring, i); context_desc = IGC_TX_CTXTDESC(tx_ring, i);
...@@ -893,9 +919,12 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, ...@@ -893,9 +919,12 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
* should have been handled by the upper layers. * should have been handled by the upper layers.
*/ */
if (tx_ring->launchtime_enable) { if (tx_ring->launchtime_enable) {
ts = ktime_to_timespec64(first->skb->tstamp); struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
ktime_t txtime = first->skb->tstamp;
first->skb->tstamp = ktime_set(0, 0); first->skb->tstamp = ktime_set(0, 0);
context_desc->launch_time = cpu_to_le32(ts.tv_nsec / 32); context_desc->launch_time = igc_tx_launchtime(adapter,
txtime);
} else { } else {
context_desc->launch_time = 0; context_desc->launch_time = 0;
} }
...@@ -2325,7 +2354,9 @@ static void igc_configure(struct igc_adapter *adapter) ...@@ -2325,7 +2354,9 @@ static void igc_configure(struct igc_adapter *adapter)
igc_setup_mrqc(adapter); igc_setup_mrqc(adapter);
igc_setup_rctl(adapter); igc_setup_rctl(adapter);
igc_set_default_mac_filter(adapter);
igc_nfc_filter_restore(adapter); igc_nfc_filter_restore(adapter);
igc_configure_tx(adapter); igc_configure_tx(adapter);
igc_configure_rx(adapter); igc_configure_rx(adapter);
...@@ -3458,9 +3489,6 @@ static void igc_nfc_filter_exit(struct igc_adapter *adapter) ...@@ -3458,9 +3489,6 @@ static void igc_nfc_filter_exit(struct igc_adapter *adapter)
hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
igc_erase_filter(adapter, rule); igc_erase_filter(adapter, rule);
hlist_for_each_entry(rule, &adapter->cls_flower_list, nfc_node)
igc_erase_filter(adapter, rule);
spin_unlock(&adapter->nfc_lock); spin_unlock(&adapter->nfc_lock);
} }
...@@ -4009,7 +4037,6 @@ static void igc_watchdog_task(struct work_struct *work) ...@@ -4009,7 +4037,6 @@ static void igc_watchdog_task(struct work_struct *work)
struct igc_hw *hw = &adapter->hw; struct igc_hw *hw = &adapter->hw;
struct igc_phy_info *phy = &hw->phy; struct igc_phy_info *phy = &hw->phy;
u16 phy_data, retry_count = 20; u16 phy_data, retry_count = 20;
u32 connsw;
u32 link; u32 link;
int i; int i;
...@@ -4022,14 +4049,6 @@ static void igc_watchdog_task(struct work_struct *work) ...@@ -4022,14 +4049,6 @@ static void igc_watchdog_task(struct work_struct *work)
link = false; link = false;
} }
/* Force link down if we have fiber to swap to */
if (adapter->flags & IGC_FLAG_MAS_ENABLE) {
if (hw->phy.media_type == igc_media_type_copper) {
connsw = rd32(IGC_CONNSW);
if (!(connsw & IGC_CONNSW_AUTOSENSE_EN))
link = 0;
}
}
if (link) { if (link) {
/* Cancel scheduled suspend requests. */ /* Cancel scheduled suspend requests. */
pm_runtime_resume(netdev->dev.parent); pm_runtime_resume(netdev->dev.parent);
...@@ -4491,6 +4510,158 @@ static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) ...@@ -4491,6 +4510,158 @@ static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
} }
} }
static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue,
bool enable)
{
struct igc_ring *ring;
int i;
if (queue < 0 || queue >= adapter->num_tx_queues)
return -EINVAL;
ring = adapter->tx_ring[queue];
ring->launchtime_enable = enable;
if (adapter->base_time)
return 0;
adapter->cycle_time = NSEC_PER_SEC;
for (i = 0; i < adapter->num_tx_queues; i++) {
ring = adapter->tx_ring[i];
ring->start_time = 0;
ring->end_time = NSEC_PER_SEC;
}
return 0;
}
static bool validate_schedule(const struct tc_taprio_qopt_offload *qopt)
{
int queue_uses[IGC_MAX_TX_QUEUES] = { };
size_t n;
if (qopt->cycle_time_extension)
return false;
for (n = 0; n < qopt->num_entries; n++) {
const struct tc_taprio_sched_entry *e;
int i;
e = &qopt->entries[n];
/* i225 only supports "global" frame preemption
* settings.
*/
if (e->command != TC_TAPRIO_CMD_SET_GATES)
return false;
for (i = 0; i < IGC_MAX_TX_QUEUES; i++) {
if (e->gate_mask & BIT(i))
queue_uses[i]++;
if (queue_uses[i] > 1)
return false;
}
}
return true;
}
static int igc_tsn_enable_launchtime(struct igc_adapter *adapter,
struct tc_etf_qopt_offload *qopt)
{
struct igc_hw *hw = &adapter->hw;
int err;
if (hw->mac.type != igc_i225)
return -EOPNOTSUPP;
err = igc_save_launchtime_params(adapter, qopt->queue, qopt->enable);
if (err)
return err;
return igc_tsn_offload_apply(adapter);
}
static int igc_save_qbv_schedule(struct igc_adapter *adapter,
struct tc_taprio_qopt_offload *qopt)
{
u32 start_time = 0, end_time = 0;
size_t n;
if (!qopt->enable) {
adapter->base_time = 0;
return 0;
}
if (adapter->base_time)
return -EALREADY;
if (!validate_schedule(qopt))
return -EINVAL;
adapter->cycle_time = qopt->cycle_time;
adapter->base_time = qopt->base_time;
/* FIXME: be a little smarter about cases when the gate for a
* queue stays open for more than one entry.
*/
for (n = 0; n < qopt->num_entries; n++) {
struct tc_taprio_sched_entry *e = &qopt->entries[n];
int i;
end_time += e->interval;
for (i = 0; i < IGC_MAX_TX_QUEUES; i++) {
struct igc_ring *ring = adapter->tx_ring[i];
if (!(e->gate_mask & BIT(i)))
continue;
ring->start_time = start_time;
ring->end_time = end_time;
}
start_time += e->interval;
}
return 0;
}
static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter,
struct tc_taprio_qopt_offload *qopt)
{
struct igc_hw *hw = &adapter->hw;
int err;
if (hw->mac.type != igc_i225)
return -EOPNOTSUPP;
err = igc_save_qbv_schedule(adapter, qopt);
if (err)
return err;
return igc_tsn_offload_apply(adapter);
}
static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
struct igc_adapter *adapter = netdev_priv(dev);
switch (type) {
case TC_SETUP_QDISC_TAPRIO:
return igc_tsn_enable_qbv_scheduling(adapter, type_data);
case TC_SETUP_QDISC_ETF:
return igc_tsn_enable_launchtime(adapter, type_data);
default:
return -EOPNOTSUPP;
}
}
static const struct net_device_ops igc_netdev_ops = { static const struct net_device_ops igc_netdev_ops = {
.ndo_open = igc_open, .ndo_open = igc_open,
.ndo_stop = igc_close, .ndo_stop = igc_close,
...@@ -4503,6 +4674,7 @@ static const struct net_device_ops igc_netdev_ops = { ...@@ -4503,6 +4674,7 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_set_features = igc_set_features, .ndo_set_features = igc_set_features,
.ndo_features_check = igc_features_check, .ndo_features_check = igc_features_check,
.ndo_do_ioctl = igc_ioctl, .ndo_do_ioctl = igc_ioctl,
.ndo_setup_tc = igc_setup_tc,
}; };
/* PCIe configuration access */ /* PCIe configuration access */
...@@ -4726,6 +4898,17 @@ static int igc_probe(struct pci_dev *pdev, ...@@ -4726,6 +4898,17 @@ static int igc_probe(struct pci_dev *pdev,
netdev->features |= NETIF_F_RXCSUM; netdev->features |= NETIF_F_RXCSUM;
netdev->features |= NETIF_F_HW_CSUM; netdev->features |= NETIF_F_HW_CSUM;
netdev->features |= NETIF_F_SCTP_CRC; netdev->features |= NETIF_F_SCTP_CRC;
netdev->features |= NETIF_F_HW_TC;
#define IGC_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
NETIF_F_GSO_GRE_CSUM | \
NETIF_F_GSO_IPXIP4 | \
NETIF_F_GSO_IPXIP6 | \
NETIF_F_GSO_UDP_TUNNEL | \
NETIF_F_GSO_UDP_TUNNEL_CSUM)
netdev->gso_partial_features = IGC_GSO_PARTIAL_FEATURES;
netdev->features |= NETIF_F_GSO_PARTIAL | IGC_GSO_PARTIAL_FEATURES;
/* setup the private structure */ /* setup the private structure */
err = igc_sw_init(adapter); err = igc_sw_init(adapter);
......
...@@ -231,6 +231,18 @@ ...@@ -231,6 +231,18 @@
#define IGC_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ #define IGC_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
/* Transmit Scheduling Registers */
#define IGC_TQAVCTRL 0x3570
#define IGC_TXQCTL(_n) (0x3344 + 0x4 * (_n))
#define IGC_BASET_L 0x3314
#define IGC_BASET_H 0x3318
#define IGC_QBVCYCLET 0x331C
#define IGC_QBVCYCLET_S 0x3320
#define IGC_STQT(_n) (0x3324 + 0x4 * (_n))
#define IGC_ENDQT(_n) (0x3334 + 0x4 * (_n))
#define IGC_DTXMXPKTSZ 0x355C
/* System Time Registers */ /* System Time Registers */
#define IGC_SYSTIML 0x0B600 /* System time register Low - RO */ #define IGC_SYSTIML 0x0B600 /* System time register Low - RO */
#define IGC_SYSTIMH 0x0B604 /* System time register High - RO */ #define IGC_SYSTIMH 0x0B604 /* System time register High - RO */
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Intel Corporation */
#include "igc.h"
#include "igc_tsn.h"
static bool is_any_launchtime(struct igc_adapter *adapter)
{
int i;
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igc_ring *ring = adapter->tx_ring[i];
if (ring->launchtime_enable)
return true;
}
return false;
}
/* Returns the TSN specific registers to their default values after
* TSN offloading is disabled.
*/
static int igc_tsn_disable_offload(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
u32 tqavctrl;
int i;
if (!(adapter->flags & IGC_FLAG_TSN_QBV_ENABLED))
return 0;
adapter->cycle_time = 0;
wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_DEFAULT);
tqavctrl = rd32(IGC_TQAVCTRL);
tqavctrl &= ~(IGC_TQAVCTRL_TRANSMIT_MODE_TSN |
IGC_TQAVCTRL_ENHANCED_QAV);
wr32(IGC_TQAVCTRL, tqavctrl);
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igc_ring *ring = adapter->tx_ring[i];
ring->start_time = 0;
ring->end_time = 0;
ring->launchtime_enable = false;
wr32(IGC_TXQCTL(i), 0);
wr32(IGC_STQT(i), 0);
wr32(IGC_ENDQT(i), NSEC_PER_SEC);
}
wr32(IGC_QBVCYCLET_S, NSEC_PER_SEC);
wr32(IGC_QBVCYCLET, NSEC_PER_SEC);
adapter->flags &= ~IGC_FLAG_TSN_QBV_ENABLED;
return 0;
}
static int igc_tsn_enable_offload(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
u32 tqavctrl, baset_l, baset_h;
u32 sec, nsec, cycle;
ktime_t base_time, systim;
int i;
if (adapter->flags & IGC_FLAG_TSN_QBV_ENABLED)
return 0;
cycle = adapter->cycle_time;
base_time = adapter->base_time;
wr32(IGC_TSAUXC, 0);
wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_TSN);
wr32(IGC_TXPBS, IGC_TXPBSIZE_TSN);
tqavctrl = rd32(IGC_TQAVCTRL);
tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN | IGC_TQAVCTRL_ENHANCED_QAV;
wr32(IGC_TQAVCTRL, tqavctrl);
wr32(IGC_QBVCYCLET_S, cycle);
wr32(IGC_QBVCYCLET, cycle);
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igc_ring *ring = adapter->tx_ring[i];
u32 txqctl = 0;
wr32(IGC_STQT(i), ring->start_time);
wr32(IGC_ENDQT(i), ring->end_time);
if (adapter->base_time) {
/* If we have a base_time we are in "taprio"
* mode and we need to be strict about the
* cycles: only transmit a packet if it can be
* completed during that cycle.
*/
txqctl |= IGC_TXQCTL_STRICT_CYCLE |
IGC_TXQCTL_STRICT_END;
}
if (ring->launchtime_enable)
txqctl |= IGC_TXQCTL_QUEUE_MODE_LAUNCHT;
wr32(IGC_TXQCTL(i), txqctl);
}
nsec = rd32(IGC_SYSTIML);
sec = rd32(IGC_SYSTIMH);
systim = ktime_set(sec, nsec);
if (ktime_compare(systim, base_time) > 0) {
s64 n;
n = div64_s64(ktime_sub_ns(systim, base_time), cycle);
base_time = ktime_add_ns(base_time, (n + 1) * cycle);
}
baset_h = div_s64_rem(base_time, NSEC_PER_SEC, &baset_l);
wr32(IGC_BASET_H, baset_h);
wr32(IGC_BASET_L, baset_l);
adapter->flags |= IGC_FLAG_TSN_QBV_ENABLED;
return 0;
}
int igc_tsn_offload_apply(struct igc_adapter *adapter)
{
bool is_any_enabled = adapter->base_time || is_any_launchtime(adapter);
if (!(adapter->flags & IGC_FLAG_TSN_QBV_ENABLED) && !is_any_enabled)
return 0;
if (!is_any_enabled) {
int err = igc_tsn_disable_offload(adapter);
if (err < 0)
return err;
/* The BASET registers aren't cleared when writing
* into them, force a reset if the interface is
* running.
*/
if (netif_running(adapter->netdev))
schedule_work(&adapter->reset_task);
return 0;
}
return igc_tsn_enable_offload(adapter);
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2020 Intel Corporation */
#ifndef _IGC_TSN_H_
#define _IGC_TSN_H_
int igc_tsn_offload_apply(struct igc_adapter *adapter);
#endif /* _IGC_BASE_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment