Commit 2f219d5f authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next

Jeff Kirsher says:

====================
This series contains updates to e1000, e1000e, igb, igbvf and ixgbe.
The e1000, e1000e, igb and igbvf are single patch changes and the
remaining 11 patches are all against ixgbe.

The e1000 patch is a comment cleanup to align e1000 with the code
commenting style for /drivers/net.  It also contains a few other white
space cleanups (i.e. fix lines over 80 char, remove unnecessary blank
lines and fix the use of tabs/spaces).

The e1000e patch from Koki (Fujitsu) adds a warning when link speed is
downgraded due to SmartSpeed.

The igb patch from Stefan (Red Hat) increases the timeout in the ethtool
offline self-test because some i350 adapters would sometimes fail the
self-test because link auto negotiation may take longer than the current
4 second timeout.

The igbvf patch from Alex is meant to address several race issues that
become possible because next_to_watch could possibly be set to a value
that shows that the descriptor is done when it is not.  In order to correct
that we instead make next_to_watch a pointer that is set to NULL during
cleanup, and set to the eop_desc after the descriptor rings have been written.

The remaining patches for ixgbe are a mix of fixes and added support as well
as some cleanup.  Most notably is the added support for displaying the
number of Tx/Rx channels via ethtool by Alex.  Also Aurélien adds the
ability for reading data from SFP+ modules over i2c for diagnostic
monitoring.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 96b45cbd 990a3158
...@@ -81,68 +81,69 @@ struct e1000_adapter; ...@@ -81,68 +81,69 @@ struct e1000_adapter;
#include "e1000_hw.h" #include "e1000_hw.h"
#define E1000_MAX_INTR 10 #define E1000_MAX_INTR 10
/* TX/RX descriptor defines */ /* TX/RX descriptor defines */
#define E1000_DEFAULT_TXD 256 #define E1000_DEFAULT_TXD 256
#define E1000_MAX_TXD 256 #define E1000_MAX_TXD 256
#define E1000_MIN_TXD 48 #define E1000_MIN_TXD 48
#define E1000_MAX_82544_TXD 4096 #define E1000_MAX_82544_TXD 4096
#define E1000_DEFAULT_RXD 256 #define E1000_DEFAULT_RXD 256
#define E1000_MAX_RXD 256 #define E1000_MAX_RXD 256
#define E1000_MIN_RXD 48 #define E1000_MIN_RXD 48
#define E1000_MAX_82544_RXD 4096 #define E1000_MAX_82544_RXD 4096
#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */ #define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */
#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */ #define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */
/* this is the size past which hardware will drop packets when setting LPE=0 */ /* this is the size past which hardware will drop packets when setting LPE=0 */
#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 #define MAXIMUM_ETHERNET_VLAN_SIZE 1522
/* Supported Rx Buffer Sizes */ /* Supported Rx Buffer Sizes */
#define E1000_RXBUFFER_128 128 /* Used for packet split */ #define E1000_RXBUFFER_128 128 /* Used for packet split */
#define E1000_RXBUFFER_256 256 /* Used for packet split */ #define E1000_RXBUFFER_256 256 /* Used for packet split */
#define E1000_RXBUFFER_512 512 #define E1000_RXBUFFER_512 512
#define E1000_RXBUFFER_1024 1024 #define E1000_RXBUFFER_1024 1024
#define E1000_RXBUFFER_2048 2048 #define E1000_RXBUFFER_2048 2048
#define E1000_RXBUFFER_4096 4096 #define E1000_RXBUFFER_4096 4096
#define E1000_RXBUFFER_8192 8192 #define E1000_RXBUFFER_8192 8192
#define E1000_RXBUFFER_16384 16384 #define E1000_RXBUFFER_16384 16384
/* SmartSpeed delimiters */ /* SmartSpeed delimiters */
#define E1000_SMARTSPEED_DOWNSHIFT 3 #define E1000_SMARTSPEED_DOWNSHIFT 3
#define E1000_SMARTSPEED_MAX 15 #define E1000_SMARTSPEED_MAX 15
/* Packet Buffer allocations */ /* Packet Buffer allocations */
#define E1000_PBA_BYTES_SHIFT 0xA #define E1000_PBA_BYTES_SHIFT 0xA
#define E1000_TX_HEAD_ADDR_SHIFT 7 #define E1000_TX_HEAD_ADDR_SHIFT 7
#define E1000_PBA_TX_MASK 0xFFFF0000 #define E1000_PBA_TX_MASK 0xFFFF0000
/* Flow Control Watermarks */ /* Flow Control Watermarks */
#define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */ #define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */
#define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */ #define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */
#define E1000_FC_PAUSE_TIME 0xFFFF /* pause for the max or until send xon */ #define E1000_FC_PAUSE_TIME 0xFFFF /* pause for the max or until send xon */
/* How many Tx Descriptors do we need to call netif_wake_queue ? */ /* How many Tx Descriptors do we need to call netif_wake_queue ? */
#define E1000_TX_QUEUE_WAKE 16 #define E1000_TX_QUEUE_WAKE 16
/* How many Rx Buffers do we bundle into one write to the hardware ? */ /* How many Rx Buffers do we bundle into one write to the hardware ? */
#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */ #define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */
#define AUTO_ALL_MODES 0 #define AUTO_ALL_MODES 0
#define E1000_EEPROM_82544_APM 0x0004 #define E1000_EEPROM_82544_APM 0x0004
#define E1000_EEPROM_APME 0x0400 #define E1000_EEPROM_APME 0x0400
#ifndef E1000_MASTER_SLAVE #ifndef E1000_MASTER_SLAVE
/* Switch to override PHY master/slave setting */ /* Switch to override PHY master/slave setting */
#define E1000_MASTER_SLAVE e1000_ms_hw_default #define E1000_MASTER_SLAVE e1000_ms_hw_default
#endif #endif
#define E1000_MNG_VLAN_NONE (-1) #define E1000_MNG_VLAN_NONE (-1)
/* wrapper around a pointer to a socket buffer, /* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */ * so a DMA handle can be stored along with the buffer
*/
struct e1000_buffer { struct e1000_buffer {
struct sk_buff *skb; struct sk_buff *skb;
dma_addr_t dma; dma_addr_t dma;
......
This diff is collapsed.
...@@ -267,7 +267,6 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter); ...@@ -267,7 +267,6 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter);
* value exists, a default value is used. The final value is stored * value exists, a default value is used. The final value is stored
* in a variable in the adapter structure. * in a variable in the adapter structure.
**/ **/
void e1000_check_options(struct e1000_adapter *adapter) void e1000_check_options(struct e1000_adapter *adapter)
{ {
struct e1000_option opt; struct e1000_option opt;
...@@ -319,7 +318,8 @@ void e1000_check_options(struct e1000_adapter *adapter) ...@@ -319,7 +318,8 @@ void e1000_check_options(struct e1000_adapter *adapter)
.def = E1000_DEFAULT_RXD, .def = E1000_DEFAULT_RXD,
.arg = { .r = { .arg = { .r = {
.min = E1000_MIN_RXD, .min = E1000_MIN_RXD,
.max = mac_type < e1000_82544 ? E1000_MAX_RXD : E1000_MAX_82544_RXD .max = mac_type < e1000_82544 ? E1000_MAX_RXD :
E1000_MAX_82544_RXD
}} }}
}; };
...@@ -408,7 +408,7 @@ void e1000_check_options(struct e1000_adapter *adapter) ...@@ -408,7 +408,7 @@ void e1000_check_options(struct e1000_adapter *adapter)
if (num_TxAbsIntDelay > bd) { if (num_TxAbsIntDelay > bd) {
adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
e1000_validate_option(&adapter->tx_abs_int_delay, &opt, e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
adapter); adapter);
} else { } else {
adapter->tx_abs_int_delay = opt.def; adapter->tx_abs_int_delay = opt.def;
} }
...@@ -426,7 +426,7 @@ void e1000_check_options(struct e1000_adapter *adapter) ...@@ -426,7 +426,7 @@ void e1000_check_options(struct e1000_adapter *adapter)
if (num_RxIntDelay > bd) { if (num_RxIntDelay > bd) {
adapter->rx_int_delay = RxIntDelay[bd]; adapter->rx_int_delay = RxIntDelay[bd];
e1000_validate_option(&adapter->rx_int_delay, &opt, e1000_validate_option(&adapter->rx_int_delay, &opt,
adapter); adapter);
} else { } else {
adapter->rx_int_delay = opt.def; adapter->rx_int_delay = opt.def;
} }
...@@ -444,7 +444,7 @@ void e1000_check_options(struct e1000_adapter *adapter) ...@@ -444,7 +444,7 @@ void e1000_check_options(struct e1000_adapter *adapter)
if (num_RxAbsIntDelay > bd) { if (num_RxAbsIntDelay > bd) {
adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
e1000_validate_option(&adapter->rx_abs_int_delay, &opt, e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
adapter); adapter);
} else { } else {
adapter->rx_abs_int_delay = opt.def; adapter->rx_abs_int_delay = opt.def;
} }
...@@ -479,16 +479,17 @@ void e1000_check_options(struct e1000_adapter *adapter) ...@@ -479,16 +479,17 @@ void e1000_check_options(struct e1000_adapter *adapter)
break; break;
case 4: case 4:
e_dev_info("%s set to simplified " e_dev_info("%s set to simplified "
"(2000-8000) ints mode\n", opt.name); "(2000-8000) ints mode\n", opt.name);
adapter->itr_setting = adapter->itr; adapter->itr_setting = adapter->itr;
break; break;
default: default:
e1000_validate_option(&adapter->itr, &opt, e1000_validate_option(&adapter->itr, &opt,
adapter); adapter);
/* save the setting, because the dynamic bits /* save the setting, because the dynamic bits
* change itr. * change itr.
* clear the lower two bits because they are * clear the lower two bits because they are
* used as control */ * used as control
*/
adapter->itr_setting = adapter->itr & ~3; adapter->itr_setting = adapter->itr & ~3;
break; break;
} }
...@@ -533,7 +534,6 @@ void e1000_check_options(struct e1000_adapter *adapter) ...@@ -533,7 +534,6 @@ void e1000_check_options(struct e1000_adapter *adapter)
* *
* Handles speed and duplex options on fiber adapters * Handles speed and duplex options on fiber adapters
**/ **/
static void e1000_check_fiber_options(struct e1000_adapter *adapter) static void e1000_check_fiber_options(struct e1000_adapter *adapter)
{ {
int bd = adapter->bd_number; int bd = adapter->bd_number;
...@@ -559,7 +559,6 @@ static void e1000_check_fiber_options(struct e1000_adapter *adapter) ...@@ -559,7 +559,6 @@ static void e1000_check_fiber_options(struct e1000_adapter *adapter)
* *
* Handles speed and duplex options on copper adapters * Handles speed and duplex options on copper adapters
**/ **/
static void e1000_check_copper_options(struct e1000_adapter *adapter) static void e1000_check_copper_options(struct e1000_adapter *adapter)
{ {
struct e1000_option opt; struct e1000_option opt;
...@@ -681,22 +680,22 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter) ...@@ -681,22 +680,22 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter)
e_dev_info("Using Autonegotiation at Half Duplex only\n"); e_dev_info("Using Autonegotiation at Half Duplex only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1; adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
ADVERTISE_100_HALF; ADVERTISE_100_HALF;
break; break;
case FULL_DUPLEX: case FULL_DUPLEX:
e_dev_info("Full Duplex specified without Speed\n"); e_dev_info("Full Duplex specified without Speed\n");
e_dev_info("Using Autonegotiation at Full Duplex only\n"); e_dev_info("Using Autonegotiation at Full Duplex only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1; adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_FULL | adapter->hw.autoneg_advertised = ADVERTISE_10_FULL |
ADVERTISE_100_FULL | ADVERTISE_100_FULL |
ADVERTISE_1000_FULL; ADVERTISE_1000_FULL;
break; break;
case SPEED_10: case SPEED_10:
e_dev_info("10 Mbps Speed specified without Duplex\n"); e_dev_info("10 Mbps Speed specified without Duplex\n");
e_dev_info("Using Autonegotiation at 10 Mbps only\n"); e_dev_info("Using Autonegotiation at 10 Mbps only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1; adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
ADVERTISE_10_FULL; ADVERTISE_10_FULL;
break; break;
case SPEED_10 + HALF_DUPLEX: case SPEED_10 + HALF_DUPLEX:
e_dev_info("Forcing to 10 Mbps Half Duplex\n"); e_dev_info("Forcing to 10 Mbps Half Duplex\n");
...@@ -715,7 +714,7 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter) ...@@ -715,7 +714,7 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter)
e_dev_info("Using Autonegotiation at 100 Mbps only\n"); e_dev_info("Using Autonegotiation at 100 Mbps only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1; adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_100_HALF | adapter->hw.autoneg_advertised = ADVERTISE_100_HALF |
ADVERTISE_100_FULL; ADVERTISE_100_FULL;
break; break;
case SPEED_100 + HALF_DUPLEX: case SPEED_100 + HALF_DUPLEX:
e_dev_info("Forcing to 100 Mbps Half Duplex\n"); e_dev_info("Forcing to 100 Mbps Half Duplex\n");
......
...@@ -4830,6 +4830,13 @@ static void e1000_watchdog_task(struct work_struct *work) ...@@ -4830,6 +4830,13 @@ static void e1000_watchdog_task(struct work_struct *work)
&adapter->link_speed, &adapter->link_speed,
&adapter->link_duplex); &adapter->link_duplex);
e1000_print_link_info(adapter); e1000_print_link_info(adapter);
/* check if SmartSpeed worked */
e1000e_check_downshift(hw);
if (phy->speed_downgraded)
netdev_warn(netdev,
"Link Speed was downgraded by SmartSpeed\n");
/* On supported PHYs, check for duplex mismatch only /* On supported PHYs, check for duplex mismatch only
* if link has autonegotiated at 10/100 half * if link has autonegotiated at 10/100 half
*/ */
......
...@@ -1891,7 +1891,7 @@ static int igb_link_test(struct igb_adapter *adapter, u64 *data) ...@@ -1891,7 +1891,7 @@ static int igb_link_test(struct igb_adapter *adapter, u64 *data)
} else { } else {
hw->mac.ops.check_for_link(&adapter->hw); hw->mac.ops.check_for_link(&adapter->hw);
if (hw->mac.autoneg) if (hw->mac.autoneg)
msleep(4000); msleep(5000);
if (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) if (!(rd32(E1000_STATUS) & E1000_STATUS_LU))
*data = 1; *data = 1;
......
...@@ -127,8 +127,8 @@ struct igbvf_buffer { ...@@ -127,8 +127,8 @@ struct igbvf_buffer {
/* Tx */ /* Tx */
struct { struct {
unsigned long time_stamp; unsigned long time_stamp;
union e1000_adv_tx_desc *next_to_watch;
u16 length; u16 length;
u16 next_to_watch;
u16 mapped_as_page; u16 mapped_as_page;
}; };
/* Rx */ /* Rx */
......
...@@ -797,20 +797,31 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) ...@@ -797,20 +797,31 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
struct sk_buff *skb; struct sk_buff *skb;
union e1000_adv_tx_desc *tx_desc, *eop_desc; union e1000_adv_tx_desc *tx_desc, *eop_desc;
unsigned int total_bytes = 0, total_packets = 0; unsigned int total_bytes = 0, total_packets = 0;
unsigned int i, eop, count = 0; unsigned int i, count = 0;
bool cleaned = false; bool cleaned = false;
i = tx_ring->next_to_clean; i = tx_ring->next_to_clean;
eop = tx_ring->buffer_info[i].next_to_watch; buffer_info = &tx_ring->buffer_info[i];
eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop); eop_desc = buffer_info->next_to_watch;
do {
/* if next_to_watch is not set then there is no work pending */
if (!eop_desc)
break;
/* prevent any other reads prior to eop_desc */
read_barrier_depends();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
break;
/* clear next_to_watch to prevent false hangs */
buffer_info->next_to_watch = NULL;
while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
(count < tx_ring->count)) {
rmb(); /* read buffer_info after eop_desc status */
for (cleaned = false; !cleaned; count++) { for (cleaned = false; !cleaned; count++) {
tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
buffer_info = &tx_ring->buffer_info[i]; cleaned = (tx_desc == eop_desc);
cleaned = (i == eop);
skb = buffer_info->skb; skb = buffer_info->skb;
if (skb) { if (skb) {
...@@ -831,10 +842,12 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) ...@@ -831,10 +842,12 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
i++; i++;
if (i == tx_ring->count) if (i == tx_ring->count)
i = 0; i = 0;
buffer_info = &tx_ring->buffer_info[i];
} }
eop = tx_ring->buffer_info[i].next_to_watch;
eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop); eop_desc = buffer_info->next_to_watch;
} } while (count < tx_ring->count);
tx_ring->next_to_clean = i; tx_ring->next_to_clean = i;
...@@ -1961,7 +1974,6 @@ static int igbvf_tso(struct igbvf_adapter *adapter, ...@@ -1961,7 +1974,6 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
context_desc->seqnum_seed = 0; context_desc->seqnum_seed = 0;
buffer_info->time_stamp = jiffies; buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
buffer_info->dma = 0; buffer_info->dma = 0;
i++; i++;
if (i == tx_ring->count) if (i == tx_ring->count)
...@@ -2021,7 +2033,6 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, ...@@ -2021,7 +2033,6 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
context_desc->mss_l4len_idx = 0; context_desc->mss_l4len_idx = 0;
buffer_info->time_stamp = jiffies; buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
buffer_info->dma = 0; buffer_info->dma = 0;
i++; i++;
if (i == tx_ring->count) if (i == tx_ring->count)
...@@ -2061,8 +2072,7 @@ static int igbvf_maybe_stop_tx(struct net_device *netdev, int size) ...@@ -2061,8 +2072,7 @@ static int igbvf_maybe_stop_tx(struct net_device *netdev, int size)
static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
struct igbvf_ring *tx_ring, struct igbvf_ring *tx_ring,
struct sk_buff *skb, struct sk_buff *skb)
unsigned int first)
{ {
struct igbvf_buffer *buffer_info; struct igbvf_buffer *buffer_info;
struct pci_dev *pdev = adapter->pdev; struct pci_dev *pdev = adapter->pdev;
...@@ -2077,7 +2087,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, ...@@ -2077,7 +2087,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
buffer_info->length = len; buffer_info->length = len;
/* set time_stamp *before* dma to help avoid a possible race */ /* set time_stamp *before* dma to help avoid a possible race */
buffer_info->time_stamp = jiffies; buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
buffer_info->mapped_as_page = false; buffer_info->mapped_as_page = false;
buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len, buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
...@@ -2100,7 +2109,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, ...@@ -2100,7 +2109,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD); BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
buffer_info->length = len; buffer_info->length = len;
buffer_info->time_stamp = jiffies; buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
buffer_info->mapped_as_page = true; buffer_info->mapped_as_page = true;
buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len, buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
...@@ -2109,7 +2117,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, ...@@ -2109,7 +2117,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
} }
tx_ring->buffer_info[i].skb = skb; tx_ring->buffer_info[i].skb = skb;
tx_ring->buffer_info[first].next_to_watch = i;
return ++count; return ++count;
...@@ -2120,7 +2127,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, ...@@ -2120,7 +2127,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
buffer_info->dma = 0; buffer_info->dma = 0;
buffer_info->time_stamp = 0; buffer_info->time_stamp = 0;
buffer_info->length = 0; buffer_info->length = 0;
buffer_info->next_to_watch = 0;
buffer_info->mapped_as_page = false; buffer_info->mapped_as_page = false;
if (count) if (count)
count--; count--;
...@@ -2139,7 +2145,8 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, ...@@ -2139,7 +2145,8 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
struct igbvf_ring *tx_ring, struct igbvf_ring *tx_ring,
int tx_flags, int count, u32 paylen, int tx_flags, int count,
unsigned int first, u32 paylen,
u8 hdr_len) u8 hdr_len)
{ {
union e1000_adv_tx_desc *tx_desc = NULL; union e1000_adv_tx_desc *tx_desc = NULL;
...@@ -2189,6 +2196,7 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, ...@@ -2189,6 +2196,7 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
* such as IA-64). */ * such as IA-64). */
wmb(); wmb();
tx_ring->buffer_info[first].next_to_watch = tx_desc;
tx_ring->next_to_use = i; tx_ring->next_to_use = i;
writel(i, adapter->hw.hw_addr + tx_ring->tail); writel(i, adapter->hw.hw_addr + tx_ring->tail);
/* we need this if more than one processor can write to our tail /* we need this if more than one processor can write to our tail
...@@ -2255,11 +2263,11 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb, ...@@ -2255,11 +2263,11 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
* count reflects descriptors mapped, if 0 then mapping error * count reflects descriptors mapped, if 0 then mapping error
* has occurred and we need to rewind the descriptor queue * has occurred and we need to rewind the descriptor queue
*/ */
count = igbvf_tx_map_adv(adapter, tx_ring, skb, first); count = igbvf_tx_map_adv(adapter, tx_ring, skb);
if (count) { if (count) {
igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count, igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count,
skb->len, hdr_len); first, skb->len, hdr_len);
/* Make sure there is space in the ring for the next send. */ /* Make sure there is space in the ring for the next send. */
igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4); igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4);
} else { } else {
......
...@@ -156,7 +156,7 @@ struct vf_macvlans { ...@@ -156,7 +156,7 @@ struct vf_macvlans {
/* Tx Descriptors needed, worst case */ /* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4) #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
/* wrapper around a pointer to a socket buffer, /* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */ * so a DMA handle can be stored along with the buffer */
...@@ -201,6 +201,7 @@ struct ixgbe_rx_queue_stats { ...@@ -201,6 +201,7 @@ struct ixgbe_rx_queue_stats {
enum ixgbe_ring_state_t { enum ixgbe_ring_state_t {
__IXGBE_TX_FDIR_INIT_DONE, __IXGBE_TX_FDIR_INIT_DONE,
__IXGBE_TX_XPS_INIT_DONE,
__IXGBE_TX_DETECT_HANG, __IXGBE_TX_DETECT_HANG,
__IXGBE_HANG_CHECK_ARMED, __IXGBE_HANG_CHECK_ARMED,
__IXGBE_RX_RSC_ENABLED, __IXGBE_RX_RSC_ENABLED,
...@@ -278,15 +279,10 @@ enum ixgbe_ring_f_enum { ...@@ -278,15 +279,10 @@ enum ixgbe_ring_f_enum {
#define IXGBE_MAX_RSS_INDICES 16 #define IXGBE_MAX_RSS_INDICES 16
#define IXGBE_MAX_VMDQ_INDICES 64 #define IXGBE_MAX_VMDQ_INDICES 64
#define IXGBE_MAX_FDIR_INDICES 64 #define IXGBE_MAX_FDIR_INDICES 63 /* based on q_vector limit */
#ifdef IXGBE_FCOE
#define IXGBE_MAX_FCOE_INDICES 8 #define IXGBE_MAX_FCOE_INDICES 8
#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES) #define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES) #define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
#else
#define MAX_RX_QUEUES IXGBE_MAX_FDIR_INDICES
#define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES
#endif /* IXGBE_FCOE */
struct ixgbe_ring_feature { struct ixgbe_ring_feature {
u16 limit; /* upper limit on feature indices */ u16 limit; /* upper limit on feature indices */
u16 indices; /* current value of indices */ u16 indices; /* current value of indices */
...@@ -624,6 +620,7 @@ enum ixgbe_state_t { ...@@ -624,6 +620,7 @@ enum ixgbe_state_t {
__IXGBE_DOWN, __IXGBE_DOWN,
__IXGBE_SERVICE_SCHED, __IXGBE_SERVICE_SCHED,
__IXGBE_IN_SFP_INIT, __IXGBE_IN_SFP_INIT,
__IXGBE_READ_I2C,
}; };
struct ixgbe_cb { struct ixgbe_cb {
...@@ -704,8 +701,8 @@ extern bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw); ...@@ -704,8 +701,8 @@ extern bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
extern void ixgbe_set_rx_mode(struct net_device *netdev); extern void ixgbe_set_rx_mode(struct net_device *netdev);
#ifdef CONFIG_IXGBE_DCB #ifdef CONFIG_IXGBE_DCB
extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter); extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
#endif #endif
extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32); extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
extern void ixgbe_do_reset(struct net_device *netdev); extern void ixgbe_do_reset(struct net_device *netdev);
#ifdef CONFIG_IXGBE_HWMON #ifdef CONFIG_IXGBE_HWMON
......
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include "ixgbe.h" #include "ixgbe.h"
#include "ixgbe_phy.h"
#define IXGBE_ALL_RAR_ENTRIES 16 #define IXGBE_ALL_RAR_ENTRIES 16
...@@ -2112,13 +2113,17 @@ static int ixgbe_set_coalesce(struct net_device *netdev, ...@@ -2112,13 +2113,17 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_q_vector *q_vector; struct ixgbe_q_vector *q_vector;
int i; int i;
u16 tx_itr_param, rx_itr_param; u16 tx_itr_param, rx_itr_param, tx_itr_prev;
bool need_reset = false; bool need_reset = false;
/* don't accept tx specific changes if we've got mixed RxTx vectors */ if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) {
if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count /* reject Tx specific changes in case of mixed RxTx vectors */
&& ec->tx_coalesce_usecs) if (ec->tx_coalesce_usecs)
return -EINVAL; return -EINVAL;
tx_itr_prev = adapter->rx_itr_setting;
} else {
tx_itr_prev = adapter->tx_itr_setting;
}
if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) || if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
(ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2))) (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
...@@ -2144,8 +2149,25 @@ static int ixgbe_set_coalesce(struct net_device *netdev, ...@@ -2144,8 +2149,25 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
else else
tx_itr_param = adapter->tx_itr_setting; tx_itr_param = adapter->tx_itr_setting;
/* mixed Rx/Tx */
if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
adapter->tx_itr_setting = adapter->rx_itr_setting;
#if IS_ENABLED(CONFIG_BQL)
/* detect ITR changes that require update of TXDCTL.WTHRESH */
if ((adapter->tx_itr_setting > 1) &&
(adapter->tx_itr_setting < IXGBE_100K_ITR)) {
if ((tx_itr_prev == 1) ||
(tx_itr_prev > IXGBE_100K_ITR))
need_reset = true;
} else {
if ((tx_itr_prev > 1) &&
(tx_itr_prev < IXGBE_100K_ITR))
need_reset = true;
}
#endif
/* check the old value and enable RSC if necessary */ /* check the old value and enable RSC if necessary */
need_reset = ixgbe_update_rsc(adapter); need_reset |= ixgbe_update_rsc(adapter);
for (i = 0; i < adapter->num_q_vectors; i++) { for (i = 0; i < adapter->num_q_vectors; i++) {
q_vector = adapter->q_vector[i]; q_vector = adapter->q_vector[i];
...@@ -2731,6 +2753,225 @@ static int ixgbe_get_ts_info(struct net_device *dev, ...@@ -2731,6 +2753,225 @@ static int ixgbe_get_ts_info(struct net_device *dev,
return 0; return 0;
} }
static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
{
unsigned int max_combined;
u8 tcs = netdev_get_num_tc(adapter->netdev);
if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
/* We only support one q_vector without MSI-X */
max_combined = 1;
} else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
/* SR-IOV currently only allows one queue on the PF */
max_combined = 1;
} else if (tcs > 1) {
/* For DCB report channels per traffic class */
if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
/* 8 TC w/ 4 queues per TC */
max_combined = 4;
} else if (tcs > 4) {
/* 8 TC w/ 8 queues per TC */
max_combined = 8;
} else {
/* 4 TC w/ 16 queues per TC */
max_combined = 16;
}
} else if (adapter->atr_sample_rate) {
/* support up to 64 queues with ATR */
max_combined = IXGBE_MAX_FDIR_INDICES;
} else {
/* support up to 16 queues with RSS */
max_combined = IXGBE_MAX_RSS_INDICES;
}
return max_combined;
}
static void ixgbe_get_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
/* report maximum channels */
ch->max_combined = ixgbe_max_channels(adapter);
/* report info for other vector */
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
ch->max_other = NON_Q_VECTORS;
ch->other_count = NON_Q_VECTORS;
}
/* record RSS queues */
ch->combined_count = adapter->ring_feature[RING_F_RSS].indices;
/* nothing else to report if RSS is disabled */
if (ch->combined_count == 1)
return;
/* we do not support ATR queueing if SR-IOV is enabled */
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
return;
/* same thing goes for being DCB enabled */
if (netdev_get_num_tc(dev) > 1)
return;
/* if ATR is disabled we can exit */
if (!adapter->atr_sample_rate)
return;
/* report flow director queues as maximum channels */
ch->combined_count = adapter->ring_feature[RING_F_FDIR].indices;
}
static int ixgbe_set_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
unsigned int count = ch->combined_count;
/* verify they are not requesting separate vectors */
if (!count || ch->rx_count || ch->tx_count)
return -EINVAL;
/* verify other_count has not changed */
if (ch->other_count != NON_Q_VECTORS)
return -EINVAL;
/* verify the number of channels does not exceed hardware limits */
if (count > ixgbe_max_channels(adapter))
return -EINVAL;
/* update feature limits from largest to smallest supported values */
adapter->ring_feature[RING_F_FDIR].limit = count;
/* cap RSS limit at 16 */
if (count > IXGBE_MAX_RSS_INDICES)
count = IXGBE_MAX_RSS_INDICES;
adapter->ring_feature[RING_F_RSS].limit = count;
#ifdef IXGBE_FCOE
/* cap FCoE limit at 8 */
if (count > IXGBE_FCRETA_SIZE)
count = IXGBE_FCRETA_SIZE;
adapter->ring_feature[RING_F_FCOE].limit = count;
#endif
/* use setup TC to update any traffic class queue mapping */
return ixgbe_setup_tc(dev, netdev_get_num_tc(dev));
}
static int ixgbe_get_module_info(struct net_device *dev,
struct ethtool_modinfo *modinfo)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
u32 status;
u8 sff8472_rev, addr_mode;
int ret_val = 0;
bool page_swap = false;
/* avoid concurent i2c reads */
while (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
msleep(100);
/* used by the service task */
set_bit(__IXGBE_READ_I2C, &adapter->state);
/* Check whether we support SFF-8472 or not */
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_SFF_8472_COMP,
&sff8472_rev);
if (status != 0) {
ret_val = -EIO;
goto err_out;
}
/* addressing mode is not supported */
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_SFF_8472_SWAP,
&addr_mode);
if (status != 0) {
ret_val = -EIO;
goto err_out;
}
if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
page_swap = true;
}
if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) {
/* We have a SFP, but it does not support SFF-8472 */
modinfo->type = ETH_MODULE_SFF_8079;
modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
} else {
/* We have a SFP which supports a revision of SFF-8472. */
modinfo->type = ETH_MODULE_SFF_8472;
modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
}
err_out:
clear_bit(__IXGBE_READ_I2C, &adapter->state);
return ret_val;
}
static int ixgbe_get_module_eeprom(struct net_device *dev,
struct ethtool_eeprom *ee,
u8 *data)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
u32 status = IXGBE_ERR_PHY_ADDR_INVALID;
u8 databyte = 0xFF;
int i = 0;
int ret_val = 0;
/* ixgbe_get_module_info is called before this function in all
* cases, so we do not need any checks we already do above,
* and can trust ee->len to be a known value.
*/
while (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
msleep(100);
set_bit(__IXGBE_READ_I2C, &adapter->state);
/* Read the first block, SFF-8079 */
for (i = 0; i < ETH_MODULE_SFF_8079_LEN; i++) {
status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
if (status != 0) {
/* Error occured while reading module */
ret_val = -EIO;
goto err_out;
}
data[i] = databyte;
}
/* If the second block is requested, check if SFF-8472 is supported. */
if (ee->len == ETH_MODULE_SFF_8472_LEN) {
if (data[IXGBE_SFF_SFF_8472_COMP] == IXGBE_SFF_SFF_8472_UNSUP)
return -EOPNOTSUPP;
/* Read the second block, SFF-8472 */
for (i = ETH_MODULE_SFF_8079_LEN;
i < ETH_MODULE_SFF_8472_LEN; i++) {
status = hw->phy.ops.read_i2c_sff8472(hw,
i - ETH_MODULE_SFF_8079_LEN, &databyte);
if (status != 0) {
/* Error occured while reading module */
ret_val = -EIO;
goto err_out;
}
data[i] = databyte;
}
}
err_out:
clear_bit(__IXGBE_READ_I2C, &adapter->state);
return ret_val;
}
static const struct ethtool_ops ixgbe_ethtool_ops = { static const struct ethtool_ops ixgbe_ethtool_ops = {
.get_settings = ixgbe_get_settings, .get_settings = ixgbe_get_settings,
.set_settings = ixgbe_set_settings, .set_settings = ixgbe_set_settings,
...@@ -2759,7 +3000,11 @@ static const struct ethtool_ops ixgbe_ethtool_ops = { ...@@ -2759,7 +3000,11 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.set_coalesce = ixgbe_set_coalesce, .set_coalesce = ixgbe_set_coalesce,
.get_rxnfc = ixgbe_get_rxnfc, .get_rxnfc = ixgbe_get_rxnfc,
.set_rxnfc = ixgbe_set_rxnfc, .set_rxnfc = ixgbe_set_rxnfc,
.get_channels = ixgbe_get_channels,
.set_channels = ixgbe_set_channels,
.get_ts_info = ixgbe_get_ts_info, .get_ts_info = ixgbe_get_ts_info,
.get_module_info = ixgbe_get_module_info,
.get_module_eeprom = ixgbe_get_module_eeprom,
}; };
void ixgbe_set_ethtool_ops(struct net_device *netdev) void ixgbe_set_ethtool_ops(struct net_device *netdev)
......
...@@ -386,7 +386,6 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter) ...@@ -386,7 +386,6 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
fcoe = &adapter->ring_feature[RING_F_FCOE]; fcoe = &adapter->ring_feature[RING_F_FCOE];
/* limit ourselves based on feature limits */ /* limit ourselves based on feature limits */
fcoe_i = min_t(u16, fcoe_i, num_online_cpus());
fcoe_i = min_t(u16, fcoe_i, fcoe->limit); fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
if (fcoe_i) { if (fcoe_i) {
...@@ -562,9 +561,6 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) ...@@ -562,9 +561,6 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
fcoe_i = min_t(u16, fcoe_i, fcoe->limit); fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
if (vmdq_i > 1 && fcoe_i) { if (vmdq_i > 1 && fcoe_i) {
/* reserve no more than number of CPUs */
fcoe_i = min_t(u16, fcoe_i, num_online_cpus());
/* alloc queues for FCoE separately */ /* alloc queues for FCoE separately */
fcoe->indices = fcoe_i; fcoe->indices = fcoe_i;
fcoe->offset = vmdq_i * rss_i; fcoe->offset = vmdq_i * rss_i;
...@@ -623,8 +619,7 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) ...@@ -623,8 +619,7 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
if (rss_i > 1 && adapter->atr_sample_rate) { if (rss_i > 1 && adapter->atr_sample_rate) {
f = &adapter->ring_feature[RING_F_FDIR]; f = &adapter->ring_feature[RING_F_FDIR];
f->indices = min_t(u16, num_online_cpus(), f->limit); rss_i = f->indices = f->limit;
rss_i = max_t(u16, rss_i, f->indices);
if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
...@@ -776,19 +771,23 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, ...@@ -776,19 +771,23 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
{ {
struct ixgbe_q_vector *q_vector; struct ixgbe_q_vector *q_vector;
struct ixgbe_ring *ring; struct ixgbe_ring *ring;
int node = -1; int node = NUMA_NO_NODE;
int cpu = -1; int cpu = -1;
int ring_count, size; int ring_count, size;
u8 tcs = netdev_get_num_tc(adapter->netdev);
ring_count = txr_count + rxr_count; ring_count = txr_count + rxr_count;
size = sizeof(struct ixgbe_q_vector) + size = sizeof(struct ixgbe_q_vector) +
(sizeof(struct ixgbe_ring) * ring_count); (sizeof(struct ixgbe_ring) * ring_count);
/* customize cpu for Flow Director mapping */ /* customize cpu for Flow Director mapping */
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
if (cpu_online(v_idx)) { u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
cpu = v_idx; if (rss_i > 1 && adapter->atr_sample_rate) {
node = cpu_to_node(cpu); if (cpu_online(v_idx)) {
cpu = v_idx;
node = cpu_to_node(cpu);
}
} }
} }
......
...@@ -2786,13 +2786,19 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, ...@@ -2786,13 +2786,19 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
/* /*
* set WTHRESH to encourage burst writeback, it should not be set * set WTHRESH to encourage burst writeback, it should not be set
* higher than 1 when ITR is 0 as it could cause false TX hangs * higher than 1 when:
* - ITR is 0 as it could cause false TX hangs
* - ITR is set to > 100k int/sec and BQL is enabled
* *
* In order to avoid issues WTHRESH + PTHRESH should always be equal * In order to avoid issues WTHRESH + PTHRESH should always be equal
* to or less than the number of on chip descriptors, which is * to or less than the number of on chip descriptors, which is
* currently 40. * currently 40.
*/ */
#if IS_ENABLED(CONFIG_BQL)
if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
#else
if (!ring->q_vector || (ring->q_vector->itr < 8)) if (!ring->q_vector || (ring->q_vector->itr < 8))
#endif
txdctl |= (1 << 16); /* WTHRESH = 1 */ txdctl |= (1 << 16); /* WTHRESH = 1 */
else else
txdctl |= (8 << 16); /* WTHRESH = 8 */ txdctl |= (8 << 16); /* WTHRESH = 8 */
...@@ -2813,6 +2819,16 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, ...@@ -2813,6 +2819,16 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
ring->atr_sample_rate = 0; ring->atr_sample_rate = 0;
} }
/* initialize XPS */
if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) {
struct ixgbe_q_vector *q_vector = ring->q_vector;
if (q_vector)
netif_set_xps_queue(adapter->netdev,
&q_vector->affinity_mask,
ring->queue_index);
}
clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state); clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
/* enable queue */ /* enable queue */
...@@ -4465,7 +4481,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) ...@@ -4465,7 +4481,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
{ {
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev; struct pci_dev *pdev = adapter->pdev;
unsigned int rss; unsigned int rss, fdir;
u32 fwsm; u32 fwsm;
#ifdef CONFIG_IXGBE_DCB #ifdef CONFIG_IXGBE_DCB
int j; int j;
...@@ -4485,9 +4501,10 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) ...@@ -4485,9 +4501,10 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->ring_feature[RING_F_RSS].limit = rss; adapter->ring_feature[RING_F_RSS].limit = rss;
adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
adapter->ring_feature[RING_F_FDIR].limit = IXGBE_MAX_FDIR_INDICES;
adapter->max_q_vectors = MAX_Q_VECTORS_82599; adapter->max_q_vectors = MAX_Q_VECTORS_82599;
adapter->atr_sample_rate = 20; adapter->atr_sample_rate = 20;
fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus());
adapter->ring_feature[RING_F_FDIR].limit = fdir;
adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K; adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
#ifdef CONFIG_IXGBE_DCA #ifdef CONFIG_IXGBE_DCA
adapter->flags |= IXGBE_FLAG_DCA_CAPABLE; adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
...@@ -5698,6 +5715,10 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter) ...@@ -5698,6 +5715,10 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
return; return;
/* concurent i2c reads are not supported */
if (test_bit(__IXGBE_READ_I2C, &adapter->state))
return;
/* someone else is in init, wait until next service event */ /* someone else is in init, wait until next service event */
if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
return; return;
...@@ -6363,38 +6384,40 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) ...@@ -6363,38 +6384,40 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
return __ixgbe_maybe_stop_tx(tx_ring, size); return __ixgbe_maybe_stop_tx(tx_ring, size);
} }
#ifdef IXGBE_FCOE
static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
{ {
struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_adapter *adapter;
int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : struct ixgbe_ring_feature *f;
smp_processor_id(); int txq;
#ifdef IXGBE_FCOE
__be16 protocol = vlan_get_protocol(skb);
if (((protocol == htons(ETH_P_FCOE)) || /*
(protocol == htons(ETH_P_FIP))) && * only execute the code below if protocol is FCoE
(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) { * or FIP and we have FCoE enabled on the adapter
struct ixgbe_ring_feature *f; */
switch (vlan_get_protocol(skb)) {
case __constant_htons(ETH_P_FCOE):
case __constant_htons(ETH_P_FIP):
adapter = netdev_priv(dev);
f = &adapter->ring_feature[RING_F_FCOE]; if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
break;
default:
return __netdev_pick_tx(dev, skb);
}
while (txq >= f->indices) f = &adapter->ring_feature[RING_F_FCOE];
txq -= f->indices;
txq += adapter->ring_feature[RING_F_FCOE].offset;
return txq; txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
} smp_processor_id();
#endif
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { while (txq >= f->indices)
while (unlikely(txq >= dev->real_num_tx_queues)) txq -= f->indices;
txq -= dev->real_num_tx_queues;
return txq;
}
return skb_tx_hash(dev, skb); return txq + f->offset;
} }
#endif
netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
struct ixgbe_adapter *adapter, struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring) struct ixgbe_ring *tx_ring)
...@@ -6799,6 +6822,7 @@ static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter) ...@@ -6799,6 +6822,7 @@ static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
} }
} }
#endif /* CONFIG_IXGBE_DCB */
/** /**
* ixgbe_setup_tc - configure net_device for multiple traffic classes * ixgbe_setup_tc - configure net_device for multiple traffic classes
* *
...@@ -6824,6 +6848,7 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) ...@@ -6824,6 +6848,7 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
ixgbe_close(dev); ixgbe_close(dev);
ixgbe_clear_interrupt_scheme(adapter); ixgbe_clear_interrupt_scheme(adapter);
#ifdef CONFIG_IXGBE_DCB
if (tc) { if (tc) {
netdev_set_num_tc(dev, tc); netdev_set_num_tc(dev, tc);
ixgbe_set_prio_tc_map(adapter); ixgbe_set_prio_tc_map(adapter);
...@@ -6846,31 +6871,24 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) ...@@ -6846,31 +6871,24 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
adapter->dcb_cfg.pfc_mode_enable = false; adapter->dcb_cfg.pfc_mode_enable = false;
} }
ixgbe_init_interrupt_scheme(adapter);
ixgbe_validate_rtr(adapter, tc); ixgbe_validate_rtr(adapter, tc);
#endif /* CONFIG_IXGBE_DCB */
ixgbe_init_interrupt_scheme(adapter);
if (netif_running(dev)) if (netif_running(dev))
ixgbe_open(dev); return ixgbe_open(dev);
return 0; return 0;
} }
#endif /* CONFIG_IXGBE_DCB */
#ifdef CONFIG_PCI_IOV #ifdef CONFIG_PCI_IOV
void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter) void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
{ {
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
rtnl_lock(); rtnl_lock();
#ifdef CONFIG_IXGBE_DCB
ixgbe_setup_tc(netdev, netdev_get_num_tc(netdev)); ixgbe_setup_tc(netdev, netdev_get_num_tc(netdev));
#else
if (netif_running(netdev))
ixgbe_close(netdev);
ixgbe_clear_interrupt_scheme(adapter);
ixgbe_init_interrupt_scheme(adapter);
if (netif_running(netdev))
ixgbe_open(netdev);
#endif
rtnl_unlock(); rtnl_unlock();
} }
...@@ -7118,7 +7136,9 @@ static const struct net_device_ops ixgbe_netdev_ops = { ...@@ -7118,7 +7136,9 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_open = ixgbe_open, .ndo_open = ixgbe_open,
.ndo_stop = ixgbe_close, .ndo_stop = ixgbe_close,
.ndo_start_xmit = ixgbe_xmit_frame, .ndo_start_xmit = ixgbe_xmit_frame,
#ifdef IXGBE_FCOE
.ndo_select_queue = ixgbe_select_queue, .ndo_select_queue = ixgbe_select_queue,
#endif
.ndo_set_rx_mode = ixgbe_set_rx_mode, .ndo_set_rx_mode = ixgbe_set_rx_mode,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = ixgbe_set_mac, .ndo_set_mac_address = ixgbe_set_mac,
...@@ -7230,9 +7250,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -7230,9 +7250,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
static int cards_found; static int cards_found;
int i, err, pci_using_dac; int i, err, pci_using_dac;
unsigned int indices = MAX_TX_QUEUES;
u8 part_str[IXGBE_PBANUM_LENGTH]; u8 part_str[IXGBE_PBANUM_LENGTH];
unsigned int indices = num_possible_cpus();
unsigned int dcb_max = 0;
#ifdef IXGBE_FCOE #ifdef IXGBE_FCOE
u16 device_caps; u16 device_caps;
#endif #endif
...@@ -7281,25 +7300,15 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -7281,25 +7300,15 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev); pci_set_master(pdev);
pci_save_state(pdev); pci_save_state(pdev);
if (ii->mac == ixgbe_mac_82598EB) {
#ifdef CONFIG_IXGBE_DCB #ifdef CONFIG_IXGBE_DCB
if (ii->mac == ixgbe_mac_82598EB) /* 8 TC w/ 4 queues per TC */
dcb_max = min_t(unsigned int, indices * MAX_TRAFFIC_CLASS, indices = 4 * MAX_TRAFFIC_CLASS;
IXGBE_MAX_RSS_INDICES); #else
else indices = IXGBE_MAX_RSS_INDICES;
dcb_max = min_t(unsigned int, indices * MAX_TRAFFIC_CLASS,
IXGBE_MAX_FDIR_INDICES);
#endif #endif
}
if (ii->mac == ixgbe_mac_82598EB)
indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES);
else
indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
#ifdef IXGBE_FCOE
indices += min_t(unsigned int, num_possible_cpus(),
IXGBE_MAX_FCOE_INDICES);
#endif
indices = max_t(unsigned int, dcb_max, indices);
netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices); netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
if (!netdev) { if (!netdev) {
err = -ENOMEM; err = -ENOMEM;
...@@ -7454,13 +7463,17 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -7454,13 +7463,17 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#ifdef IXGBE_FCOE #ifdef IXGBE_FCOE
if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) { if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
unsigned int fcoe_l;
if (hw->mac.ops.get_device_caps) { if (hw->mac.ops.get_device_caps) {
hw->mac.ops.get_device_caps(hw, &device_caps); hw->mac.ops.get_device_caps(hw, &device_caps);
if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS) if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
} }
adapter->ring_feature[RING_F_FCOE].limit = IXGBE_FCRETA_SIZE;
fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus());
adapter->ring_feature[RING_F_FCOE].limit = fcoe_l;
netdev->features |= NETIF_F_FSO | netdev->features |= NETIF_F_FSO |
NETIF_F_FCOE_CRC; NETIF_F_FCOE_CRC;
......
...@@ -852,11 +852,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) ...@@ -852,11 +852,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
status = hw->phy.ops.read_i2c_eeprom(hw, status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_IDENTIFIER, IXGBE_SFF_IDENTIFIER,
&identifier); &identifier);
if (status == IXGBE_ERR_SWFW_SYNC || if (status != 0)
status == IXGBE_ERR_I2C ||
status == IXGBE_ERR_SFP_NOT_PRESENT)
goto err_read_i2c_eeprom; goto err_read_i2c_eeprom;
/* LAN ID is needed for sfp_type determination */ /* LAN ID is needed for sfp_type determination */
...@@ -870,26 +868,20 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) ...@@ -870,26 +868,20 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
IXGBE_SFF_1GBE_COMP_CODES, IXGBE_SFF_1GBE_COMP_CODES,
&comp_codes_1g); &comp_codes_1g);
if (status == IXGBE_ERR_SWFW_SYNC || if (status != 0)
status == IXGBE_ERR_I2C ||
status == IXGBE_ERR_SFP_NOT_PRESENT)
goto err_read_i2c_eeprom; goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw, status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_10GBE_COMP_CODES, IXGBE_SFF_10GBE_COMP_CODES,
&comp_codes_10g); &comp_codes_10g);
if (status == IXGBE_ERR_SWFW_SYNC || if (status != 0)
status == IXGBE_ERR_I2C ||
status == IXGBE_ERR_SFP_NOT_PRESENT)
goto err_read_i2c_eeprom; goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw, status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_CABLE_TECHNOLOGY, IXGBE_SFF_CABLE_TECHNOLOGY,
&cable_tech); &cable_tech);
if (status == IXGBE_ERR_SWFW_SYNC || if (status != 0)
status == IXGBE_ERR_I2C ||
status == IXGBE_ERR_SFP_NOT_PRESENT)
goto err_read_i2c_eeprom; goto err_read_i2c_eeprom;
/* ID Module /* ID Module
...@@ -984,30 +976,24 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) ...@@ -984,30 +976,24 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
if (hw->phy.type != ixgbe_phy_nl) { if (hw->phy.type != ixgbe_phy_nl) {
hw->phy.id = identifier; hw->phy.id = identifier;
status = hw->phy.ops.read_i2c_eeprom(hw, status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_VENDOR_OUI_BYTE0, IXGBE_SFF_VENDOR_OUI_BYTE0,
&oui_bytes[0]); &oui_bytes[0]);
if (status == IXGBE_ERR_SWFW_SYNC || if (status != 0)
status == IXGBE_ERR_I2C ||
status == IXGBE_ERR_SFP_NOT_PRESENT)
goto err_read_i2c_eeprom; goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw, status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_VENDOR_OUI_BYTE1, IXGBE_SFF_VENDOR_OUI_BYTE1,
&oui_bytes[1]); &oui_bytes[1]);
if (status == IXGBE_ERR_SWFW_SYNC || if (status != 0)
status == IXGBE_ERR_I2C ||
status == IXGBE_ERR_SFP_NOT_PRESENT)
goto err_read_i2c_eeprom; goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw, status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_VENDOR_OUI_BYTE2, IXGBE_SFF_VENDOR_OUI_BYTE2,
&oui_bytes[2]); &oui_bytes[2]);
if (status == IXGBE_ERR_SWFW_SYNC || if (status != 0)
status == IXGBE_ERR_I2C ||
status == IXGBE_ERR_SFP_NOT_PRESENT)
goto err_read_i2c_eeprom; goto err_read_i2c_eeprom;
vendor_oui = vendor_oui =
...@@ -1307,9 +1293,9 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, ...@@ -1307,9 +1293,9 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
break; break;
fail: fail:
ixgbe_i2c_bus_clear(hw);
hw->mac.ops.release_swfw_sync(hw, swfw_mask); hw->mac.ops.release_swfw_sync(hw, swfw_mask);
msleep(100); msleep(100);
ixgbe_i2c_bus_clear(hw);
retry++; retry++;
if (retry < max_retry) if (retry < max_retry)
hw_dbg(hw, "I2C byte read error - Retrying.\n"); hw_dbg(hw, "I2C byte read error - Retrying.\n");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment