Commit 2f219d5f authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next

Jeff Kirsher says:

====================
This series contains updates to e1000, e1000e, igb, igbvf and ixgbe.
The e1000, e1000e, igb and igbvf are single patch changes and the
remaining 11 patches are all against ixgbe.

The e1000 patch is a comment cleanup to align e1000 with the code
commenting style for /drivers/net.  It also contains a few other white
space cleanups (i.e. fix lines over 80 char, remove unnecessary blank
lines and fix the use of tabs/spaces).

The e1000e patch from Koki (Fujitsu) adds a warning when link speed is
downgraded due to SmartSpeed.

The igb patch from Stefan (Red Hat) increases the timeout in the ethtool
offline self-test because some i350 adapters would sometimes fail the
self-test because link auto negotiation may take longer than the current
4 second timeout.

The igbvf patch from Alex is meant to address several race issues that
become possible because next_to_watch could possibly be set to a value
that shows that the descriptor is done when it is not.  In order to correct
that we instead make next_to_watch a pointer that is set to NULL during
cleanup, and set to the eop_desc after the descriptor rings have been written.

The remaining patches for ixgbe are a mix of fixes and added support as well
as some cleanup.  Most notably is the added support for displaying the
number of Tx/Rx channels via ethtool by Alex.  Also Aurélien adds the
ability for reading data from SFP+ modules over i2c for diagnostic
monitoring.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 96b45cbd 990a3158
...@@ -81,68 +81,69 @@ struct e1000_adapter; ...@@ -81,68 +81,69 @@ struct e1000_adapter;
#include "e1000_hw.h" #include "e1000_hw.h"
#define E1000_MAX_INTR 10 #define E1000_MAX_INTR 10
/* TX/RX descriptor defines */ /* TX/RX descriptor defines */
#define E1000_DEFAULT_TXD 256 #define E1000_DEFAULT_TXD 256
#define E1000_MAX_TXD 256 #define E1000_MAX_TXD 256
#define E1000_MIN_TXD 48 #define E1000_MIN_TXD 48
#define E1000_MAX_82544_TXD 4096 #define E1000_MAX_82544_TXD 4096
#define E1000_DEFAULT_RXD 256 #define E1000_DEFAULT_RXD 256
#define E1000_MAX_RXD 256 #define E1000_MAX_RXD 256
#define E1000_MIN_RXD 48 #define E1000_MIN_RXD 48
#define E1000_MAX_82544_RXD 4096 #define E1000_MAX_82544_RXD 4096
#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */ #define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */
#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */ #define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */
/* this is the size past which hardware will drop packets when setting LPE=0 */ /* this is the size past which hardware will drop packets when setting LPE=0 */
#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 #define MAXIMUM_ETHERNET_VLAN_SIZE 1522
/* Supported Rx Buffer Sizes */ /* Supported Rx Buffer Sizes */
#define E1000_RXBUFFER_128 128 /* Used for packet split */ #define E1000_RXBUFFER_128 128 /* Used for packet split */
#define E1000_RXBUFFER_256 256 /* Used for packet split */ #define E1000_RXBUFFER_256 256 /* Used for packet split */
#define E1000_RXBUFFER_512 512 #define E1000_RXBUFFER_512 512
#define E1000_RXBUFFER_1024 1024 #define E1000_RXBUFFER_1024 1024
#define E1000_RXBUFFER_2048 2048 #define E1000_RXBUFFER_2048 2048
#define E1000_RXBUFFER_4096 4096 #define E1000_RXBUFFER_4096 4096
#define E1000_RXBUFFER_8192 8192 #define E1000_RXBUFFER_8192 8192
#define E1000_RXBUFFER_16384 16384 #define E1000_RXBUFFER_16384 16384
/* SmartSpeed delimiters */ /* SmartSpeed delimiters */
#define E1000_SMARTSPEED_DOWNSHIFT 3 #define E1000_SMARTSPEED_DOWNSHIFT 3
#define E1000_SMARTSPEED_MAX 15 #define E1000_SMARTSPEED_MAX 15
/* Packet Buffer allocations */ /* Packet Buffer allocations */
#define E1000_PBA_BYTES_SHIFT 0xA #define E1000_PBA_BYTES_SHIFT 0xA
#define E1000_TX_HEAD_ADDR_SHIFT 7 #define E1000_TX_HEAD_ADDR_SHIFT 7
#define E1000_PBA_TX_MASK 0xFFFF0000 #define E1000_PBA_TX_MASK 0xFFFF0000
/* Flow Control Watermarks */ /* Flow Control Watermarks */
#define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */ #define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */
#define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */ #define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */
#define E1000_FC_PAUSE_TIME 0xFFFF /* pause for the max or until send xon */ #define E1000_FC_PAUSE_TIME 0xFFFF /* pause for the max or until send xon */
/* How many Tx Descriptors do we need to call netif_wake_queue ? */ /* How many Tx Descriptors do we need to call netif_wake_queue ? */
#define E1000_TX_QUEUE_WAKE 16 #define E1000_TX_QUEUE_WAKE 16
/* How many Rx Buffers do we bundle into one write to the hardware ? */ /* How many Rx Buffers do we bundle into one write to the hardware ? */
#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */ #define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */
#define AUTO_ALL_MODES 0 #define AUTO_ALL_MODES 0
#define E1000_EEPROM_82544_APM 0x0004 #define E1000_EEPROM_82544_APM 0x0004
#define E1000_EEPROM_APME 0x0400 #define E1000_EEPROM_APME 0x0400
#ifndef E1000_MASTER_SLAVE #ifndef E1000_MASTER_SLAVE
/* Switch to override PHY master/slave setting */ /* Switch to override PHY master/slave setting */
#define E1000_MASTER_SLAVE e1000_ms_hw_default #define E1000_MASTER_SLAVE e1000_ms_hw_default
#endif #endif
#define E1000_MNG_VLAN_NONE (-1) #define E1000_MNG_VLAN_NONE (-1)
/* wrapper around a pointer to a socket buffer, /* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */ * so a DMA handle can be stored along with the buffer
*/
struct e1000_buffer { struct e1000_buffer {
struct sk_buff *skb; struct sk_buff *skb;
dma_addr_t dma; dma_addr_t dma;
......
...@@ -115,12 +115,12 @@ static int e1000_get_settings(struct net_device *netdev, ...@@ -115,12 +115,12 @@ static int e1000_get_settings(struct net_device *netdev,
if (hw->media_type == e1000_media_type_copper) { if (hw->media_type == e1000_media_type_copper) {
ecmd->supported = (SUPPORTED_10baseT_Half | ecmd->supported = (SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full | SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full | SUPPORTED_100baseT_Full |
SUPPORTED_1000baseT_Full| SUPPORTED_1000baseT_Full|
SUPPORTED_Autoneg | SUPPORTED_Autoneg |
SUPPORTED_TP); SUPPORTED_TP);
ecmd->advertising = ADVERTISED_TP; ecmd->advertising = ADVERTISED_TP;
if (hw->autoneg == 1) { if (hw->autoneg == 1) {
...@@ -161,8 +161,8 @@ static int e1000_get_settings(struct net_device *netdev, ...@@ -161,8 +161,8 @@ static int e1000_get_settings(struct net_device *netdev,
ethtool_cmd_speed_set(ecmd, adapter->link_speed); ethtool_cmd_speed_set(ecmd, adapter->link_speed);
/* unfortunately FULL_DUPLEX != DUPLEX_FULL /* unfortunately FULL_DUPLEX != DUPLEX_FULL
* and HALF_DUPLEX != DUPLEX_HALF */ * and HALF_DUPLEX != DUPLEX_HALF
*/
if (adapter->link_duplex == FULL_DUPLEX) if (adapter->link_duplex == FULL_DUPLEX)
ecmd->duplex = DUPLEX_FULL; ecmd->duplex = DUPLEX_FULL;
else else
...@@ -179,8 +179,7 @@ static int e1000_get_settings(struct net_device *netdev, ...@@ -179,8 +179,7 @@ static int e1000_get_settings(struct net_device *netdev,
if ((hw->media_type == e1000_media_type_copper) && if ((hw->media_type == e1000_media_type_copper) &&
netif_carrier_ok(netdev)) netif_carrier_ok(netdev))
ecmd->eth_tp_mdix = (!!adapter->phy_info.mdix_mode ? ecmd->eth_tp_mdix = (!!adapter->phy_info.mdix_mode ?
ETH_TP_MDI_X : ETH_TP_MDI_X : ETH_TP_MDI);
ETH_TP_MDI);
else else
ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID; ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
...@@ -197,8 +196,7 @@ static int e1000_set_settings(struct net_device *netdev, ...@@ -197,8 +196,7 @@ static int e1000_set_settings(struct net_device *netdev,
struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
/* /* MDI setting is only allowed when autoneg enabled because
* MDI setting is only allowed when autoneg enabled because
* some hardware doesn't allow MDI setting when speed or * some hardware doesn't allow MDI setting when speed or
* duplex is forced. * duplex is forced.
*/ */
...@@ -224,8 +222,8 @@ static int e1000_set_settings(struct net_device *netdev, ...@@ -224,8 +222,8 @@ static int e1000_set_settings(struct net_device *netdev,
ADVERTISED_Autoneg; ADVERTISED_Autoneg;
else else
hw->autoneg_advertised = ecmd->advertising | hw->autoneg_advertised = ecmd->advertising |
ADVERTISED_TP | ADVERTISED_TP |
ADVERTISED_Autoneg; ADVERTISED_Autoneg;
ecmd->advertising = hw->autoneg_advertised; ecmd->advertising = hw->autoneg_advertised;
} else { } else {
u32 speed = ethtool_cmd_speed(ecmd); u32 speed = ethtool_cmd_speed(ecmd);
...@@ -260,8 +258,7 @@ static u32 e1000_get_link(struct net_device *netdev) ...@@ -260,8 +258,7 @@ static u32 e1000_get_link(struct net_device *netdev)
{ {
struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_adapter *adapter = netdev_priv(netdev);
/* /* If the link is not reported up to netdev, interrupts are disabled,
* If the link is not reported up to netdev, interrupts are disabled,
* and so the physical link state may have changed since we last * and so the physical link state may have changed since we last
* looked. Set get_link_status to make sure that the true link * looked. Set get_link_status to make sure that the true link
* state is interrogated, rather than pulling a cached and possibly * state is interrogated, rather than pulling a cached and possibly
...@@ -484,7 +481,7 @@ static int e1000_get_eeprom(struct net_device *netdev, ...@@ -484,7 +481,7 @@ static int e1000_get_eeprom(struct net_device *netdev,
le16_to_cpus(&eeprom_buff[i]); le16_to_cpus(&eeprom_buff[i]);
memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
eeprom->len); eeprom->len);
kfree(eeprom_buff); kfree(eeprom_buff);
return ret_val; return ret_val;
...@@ -517,15 +514,17 @@ static int e1000_set_eeprom(struct net_device *netdev, ...@@ -517,15 +514,17 @@ static int e1000_set_eeprom(struct net_device *netdev,
ptr = (void *)eeprom_buff; ptr = (void *)eeprom_buff;
if (eeprom->offset & 1) { if (eeprom->offset & 1) {
/* need read/modify/write of first changed EEPROM word */ /* need read/modify/write of first changed EEPROM word
/* only the second byte of the word is being modified */ * only the second byte of the word is being modified
*/
ret_val = e1000_read_eeprom(hw, first_word, 1, ret_val = e1000_read_eeprom(hw, first_word, 1,
&eeprom_buff[0]); &eeprom_buff[0]);
ptr++; ptr++;
} }
if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
/* need read/modify/write of last changed EEPROM word */ /* need read/modify/write of last changed EEPROM word
/* only the first byte of the word is being modified */ * only the first byte of the word is being modified
*/
ret_val = e1000_read_eeprom(hw, last_word, 1, ret_val = e1000_read_eeprom(hw, last_word, 1,
&eeprom_buff[last_word - first_word]); &eeprom_buff[last_word - first_word]);
} }
...@@ -606,11 +605,13 @@ static int e1000_set_ringparam(struct net_device *netdev, ...@@ -606,11 +605,13 @@ static int e1000_set_ringparam(struct net_device *netdev,
rx_old = adapter->rx_ring; rx_old = adapter->rx_ring;
err = -ENOMEM; err = -ENOMEM;
txdr = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring), GFP_KERNEL); txdr = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring),
GFP_KERNEL);
if (!txdr) if (!txdr)
goto err_alloc_tx; goto err_alloc_tx;
rxdr = kcalloc(adapter->num_rx_queues, sizeof(struct e1000_rx_ring), GFP_KERNEL); rxdr = kcalloc(adapter->num_rx_queues, sizeof(struct e1000_rx_ring),
GFP_KERNEL);
if (!rxdr) if (!rxdr)
goto err_alloc_rx; goto err_alloc_rx;
...@@ -619,12 +620,12 @@ static int e1000_set_ringparam(struct net_device *netdev, ...@@ -619,12 +620,12 @@ static int e1000_set_ringparam(struct net_device *netdev,
rxdr->count = max(ring->rx_pending,(u32)E1000_MIN_RXD); rxdr->count = max(ring->rx_pending,(u32)E1000_MIN_RXD);
rxdr->count = min(rxdr->count,(u32)(mac_type < e1000_82544 ? rxdr->count = min(rxdr->count,(u32)(mac_type < e1000_82544 ?
E1000_MAX_RXD : E1000_MAX_82544_RXD)); E1000_MAX_RXD : E1000_MAX_82544_RXD));
rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE); rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE);
txdr->count = max(ring->tx_pending,(u32)E1000_MIN_TXD); txdr->count = max(ring->tx_pending,(u32)E1000_MIN_TXD);
txdr->count = min(txdr->count,(u32)(mac_type < e1000_82544 ? txdr->count = min(txdr->count,(u32)(mac_type < e1000_82544 ?
E1000_MAX_TXD : E1000_MAX_82544_TXD)); E1000_MAX_TXD : E1000_MAX_82544_TXD));
txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE);
for (i = 0; i < adapter->num_tx_queues; i++) for (i = 0; i < adapter->num_tx_queues; i++)
...@@ -642,7 +643,8 @@ static int e1000_set_ringparam(struct net_device *netdev, ...@@ -642,7 +643,8 @@ static int e1000_set_ringparam(struct net_device *netdev,
goto err_setup_tx; goto err_setup_tx;
/* save the new, restore the old in order to free it, /* save the new, restore the old in order to free it,
* then restore the new back again */ * then restore the new back again
*/
adapter->rx_ring = rx_old; adapter->rx_ring = rx_old;
adapter->tx_ring = tx_old; adapter->tx_ring = tx_old;
...@@ -784,7 +786,6 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) ...@@ -784,7 +786,6 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000); REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000);
if (hw->mac_type >= e1000_82543) { if (hw->mac_type >= e1000_82543) {
REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF); REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF);
REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF); REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF);
...@@ -795,14 +796,11 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) ...@@ -795,14 +796,11 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF, REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF,
0xFFFFFFFF); 0xFFFFFFFF);
} }
} else { } else {
REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x01FFFFFF); REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x01FFFFFF);
REG_PATTERN_TEST(RDBAL, 0xFFFFF000, 0xFFFFFFFF); REG_PATTERN_TEST(RDBAL, 0xFFFFF000, 0xFFFFFFFF);
REG_PATTERN_TEST(TXCW, 0x0000FFFF, 0x0000FFFF); REG_PATTERN_TEST(TXCW, 0x0000FFFF, 0x0000FFFF);
REG_PATTERN_TEST(TDBAL, 0xFFFFF000, 0xFFFFFFFF); REG_PATTERN_TEST(TDBAL, 0xFFFFF000, 0xFFFFFFFF);
} }
value = E1000_MC_TBL_SIZE; value = E1000_MC_TBL_SIZE;
...@@ -858,13 +856,14 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) ...@@ -858,13 +856,14 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
*data = 0; *data = 0;
/* NOTE: we don't test MSI interrupts here, yet */ /* NOTE: we don't test MSI interrupts here, yet
/* Hook up test interrupt handler just for this test */ * Hook up test interrupt handler just for this test
*/
if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
netdev)) netdev))
shared_int = false; shared_int = false;
else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, else if (request_irq(irq, e1000_test_intr, IRQF_SHARED,
netdev->name, netdev)) { netdev->name, netdev)) {
*data = 1; *data = 1;
return -1; return -1;
} }
...@@ -1253,14 +1252,15 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) ...@@ -1253,14 +1252,15 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
E1000_CTRL_FD); /* Force Duplex to FULL */ E1000_CTRL_FD); /* Force Duplex to FULL */
if (hw->media_type == e1000_media_type_copper && if (hw->media_type == e1000_media_type_copper &&
hw->phy_type == e1000_phy_m88) hw->phy_type == e1000_phy_m88)
ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
else { else {
/* Set the ILOS bit on the fiber Nic is half /* Set the ILOS bit on the fiber Nic is half
* duplex link is detected. */ * duplex link is detected.
*/
stat_reg = er32(STATUS); stat_reg = er32(STATUS);
if ((stat_reg & E1000_STATUS_FD) == 0) if ((stat_reg & E1000_STATUS_FD) == 0)
ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
...@@ -1446,7 +1446,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter) ...@@ -1446,7 +1446,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
ret_val = e1000_check_lbtest_frame( ret_val = e1000_check_lbtest_frame(
rxdr->buffer_info[l].skb, rxdr->buffer_info[l].skb,
1024); 1024);
if (!ret_val) if (!ret_val)
good_cnt++; good_cnt++;
if (unlikely(++l == rxdr->count)) l = 0; if (unlikely(++l == rxdr->count)) l = 0;
...@@ -1493,7 +1493,8 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data) ...@@ -1493,7 +1493,8 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
hw->serdes_has_link = false; hw->serdes_has_link = false;
/* On some blade server designs, link establishment /* On some blade server designs, link establishment
* could take as long as 2-3 minutes */ * could take as long as 2-3 minutes
*/
do { do {
e1000_check_for_link(hw); e1000_check_for_link(hw);
if (hw->serdes_has_link) if (hw->serdes_has_link)
...@@ -1545,7 +1546,8 @@ static void e1000_diag_test(struct net_device *netdev, ...@@ -1545,7 +1546,8 @@ static void e1000_diag_test(struct net_device *netdev,
e_info(hw, "offline testing starting\n"); e_info(hw, "offline testing starting\n");
/* Link test performed before hardware reset so autoneg doesn't /* Link test performed before hardware reset so autoneg doesn't
* interfere with test result */ * interfere with test result
*/
if (e1000_link_test(adapter, &data[4])) if (e1000_link_test(adapter, &data[4]))
eth_test->flags |= ETH_TEST_FL_FAILED; eth_test->flags |= ETH_TEST_FL_FAILED;
...@@ -1639,7 +1641,8 @@ static int e1000_wol_exclusion(struct e1000_adapter *adapter, ...@@ -1639,7 +1641,8 @@ static int e1000_wol_exclusion(struct e1000_adapter *adapter,
default: default:
/* dual port cards only support WoL on port A from now on /* dual port cards only support WoL on port A from now on
* unless it was enabled in the eeprom for port B * unless it was enabled in the eeprom for port B
* so exclude FUNC_1 ports from having WoL enabled */ * so exclude FUNC_1 ports from having WoL enabled
*/
if (er32(STATUS) & E1000_STATUS_FUNC_1 && if (er32(STATUS) & E1000_STATUS_FUNC_1 &&
!adapter->eeprom_wol) { !adapter->eeprom_wol) {
wol->supported = 0; wol->supported = 0;
...@@ -1663,7 +1666,8 @@ static void e1000_get_wol(struct net_device *netdev, ...@@ -1663,7 +1666,8 @@ static void e1000_get_wol(struct net_device *netdev,
wol->wolopts = 0; wol->wolopts = 0;
/* this function will set ->supported = 0 and return 1 if wol is not /* this function will set ->supported = 0 and return 1 if wol is not
* supported by this hardware */ * supported by this hardware
*/
if (e1000_wol_exclusion(adapter, wol) || if (e1000_wol_exclusion(adapter, wol) ||
!device_can_wakeup(&adapter->pdev->dev)) !device_can_wakeup(&adapter->pdev->dev))
return; return;
...@@ -1839,7 +1843,7 @@ static void e1000_get_ethtool_stats(struct net_device *netdev, ...@@ -1839,7 +1843,7 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
data[i] = (e1000_gstrings_stats[i].sizeof_stat == data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p; sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
} }
/* BUG_ON(i != E1000_STATS_LEN); */ /* BUG_ON(i != E1000_STATS_LEN); */
} }
static void e1000_get_strings(struct net_device *netdev, u32 stringset, static void e1000_get_strings(struct net_device *netdev, u32 stringset,
...@@ -1859,37 +1863,37 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset, ...@@ -1859,37 +1863,37 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset,
ETH_GSTRING_LEN); ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
} }
/* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */ /* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */
break; break;
} }
} }
static const struct ethtool_ops e1000_ethtool_ops = { static const struct ethtool_ops e1000_ethtool_ops = {
.get_settings = e1000_get_settings, .get_settings = e1000_get_settings,
.set_settings = e1000_set_settings, .set_settings = e1000_set_settings,
.get_drvinfo = e1000_get_drvinfo, .get_drvinfo = e1000_get_drvinfo,
.get_regs_len = e1000_get_regs_len, .get_regs_len = e1000_get_regs_len,
.get_regs = e1000_get_regs, .get_regs = e1000_get_regs,
.get_wol = e1000_get_wol, .get_wol = e1000_get_wol,
.set_wol = e1000_set_wol, .set_wol = e1000_set_wol,
.get_msglevel = e1000_get_msglevel, .get_msglevel = e1000_get_msglevel,
.set_msglevel = e1000_set_msglevel, .set_msglevel = e1000_set_msglevel,
.nway_reset = e1000_nway_reset, .nway_reset = e1000_nway_reset,
.get_link = e1000_get_link, .get_link = e1000_get_link,
.get_eeprom_len = e1000_get_eeprom_len, .get_eeprom_len = e1000_get_eeprom_len,
.get_eeprom = e1000_get_eeprom, .get_eeprom = e1000_get_eeprom,
.set_eeprom = e1000_set_eeprom, .set_eeprom = e1000_set_eeprom,
.get_ringparam = e1000_get_ringparam, .get_ringparam = e1000_get_ringparam,
.set_ringparam = e1000_set_ringparam, .set_ringparam = e1000_set_ringparam,
.get_pauseparam = e1000_get_pauseparam, .get_pauseparam = e1000_get_pauseparam,
.set_pauseparam = e1000_set_pauseparam, .set_pauseparam = e1000_set_pauseparam,
.self_test = e1000_diag_test, .self_test = e1000_diag_test,
.get_strings = e1000_get_strings, .get_strings = e1000_get_strings,
.set_phys_id = e1000_set_phys_id, .set_phys_id = e1000_set_phys_id,
.get_ethtool_stats = e1000_get_ethtool_stats, .get_ethtool_stats = e1000_get_ethtool_stats,
.get_sset_count = e1000_get_sset_count, .get_sset_count = e1000_get_sset_count,
.get_coalesce = e1000_get_coalesce, .get_coalesce = e1000_get_coalesce,
.set_coalesce = e1000_set_coalesce, .set_coalesce = e1000_set_coalesce,
.get_ts_info = ethtool_op_get_ts_info, .get_ts_info = ethtool_op_get_ts_info,
}; };
......
...@@ -164,8 +164,9 @@ static void e1000_phy_init_script(struct e1000_hw *hw) ...@@ -164,8 +164,9 @@ static void e1000_phy_init_script(struct e1000_hw *hw)
if (hw->phy_init_script) { if (hw->phy_init_script) {
msleep(20); msleep(20);
/* Save off the current value of register 0x2F5B to be restored at /* Save off the current value of register 0x2F5B to be restored
* the end of this routine. */ * at the end of this routine.
*/
ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
/* Disabled the PHY transmitter */ /* Disabled the PHY transmitter */
...@@ -466,7 +467,8 @@ s32 e1000_reset_hw(struct e1000_hw *hw) ...@@ -466,7 +467,8 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
case e1000_82541: case e1000_82541:
case e1000_82541_rev_2: case e1000_82541_rev_2:
/* These controllers can't ack the 64-bit write when issuing the /* These controllers can't ack the 64-bit write when issuing the
* reset, so use IO-mapping as a workaround to issue the reset */ * reset, so use IO-mapping as a workaround to issue the reset
*/
E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_RST)); E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_RST));
break; break;
case e1000_82545_rev_3: case e1000_82545_rev_3:
...@@ -480,9 +482,9 @@ s32 e1000_reset_hw(struct e1000_hw *hw) ...@@ -480,9 +482,9 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
break; break;
} }
/* After MAC reset, force reload of EEPROM to restore power-on settings to /* After MAC reset, force reload of EEPROM to restore power-on settings
* device. Later controllers reload the EEPROM automatically, so just wait * to device. Later controllers reload the EEPROM automatically, so
* for reload to complete. * just wait for reload to complete.
*/ */
switch (hw->mac_type) { switch (hw->mac_type) {
case e1000_82542_rev2_0: case e1000_82542_rev2_0:
...@@ -591,8 +593,8 @@ s32 e1000_init_hw(struct e1000_hw *hw) ...@@ -591,8 +593,8 @@ s32 e1000_init_hw(struct e1000_hw *hw)
msleep(5); msleep(5);
} }
/* Setup the receive address. This involves initializing all of the Receive /* Setup the receive address. This involves initializing all of the
* Address Registers (RARs 0 - 15). * Receive Address Registers (RARs 0 - 15).
*/ */
e1000_init_rx_addrs(hw); e1000_init_rx_addrs(hw);
...@@ -611,7 +613,8 @@ s32 e1000_init_hw(struct e1000_hw *hw) ...@@ -611,7 +613,8 @@ s32 e1000_init_hw(struct e1000_hw *hw)
for (i = 0; i < mta_size; i++) { for (i = 0; i < mta_size; i++) {
E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
/* use write flush to prevent Memory Write Block (MWB) from /* use write flush to prevent Memory Write Block (MWB) from
* occurring when accessing our register space */ * occurring when accessing our register space
*/
E1000_WRITE_FLUSH(); E1000_WRITE_FLUSH();
} }
...@@ -630,7 +633,9 @@ s32 e1000_init_hw(struct e1000_hw *hw) ...@@ -630,7 +633,9 @@ s32 e1000_init_hw(struct e1000_hw *hw)
case e1000_82546_rev_3: case e1000_82546_rev_3:
break; break;
default: default:
/* Workaround for PCI-X problem when BIOS sets MMRBC incorrectly. */ /* Workaround for PCI-X problem when BIOS sets MMRBC
* incorrectly.
*/
if (hw->bus_type == e1000_bus_type_pcix if (hw->bus_type == e1000_bus_type_pcix
&& e1000_pcix_get_mmrbc(hw) > 2048) && e1000_pcix_get_mmrbc(hw) > 2048)
e1000_pcix_set_mmrbc(hw, 2048); e1000_pcix_set_mmrbc(hw, 2048);
...@@ -660,7 +665,8 @@ s32 e1000_init_hw(struct e1000_hw *hw) ...@@ -660,7 +665,8 @@ s32 e1000_init_hw(struct e1000_hw *hw)
hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) { hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) {
ctrl_ext = er32(CTRL_EXT); ctrl_ext = er32(CTRL_EXT);
/* Relaxed ordering must be disabled to avoid a parity /* Relaxed ordering must be disabled to avoid a parity
* error crash in a PCI slot. */ * error crash in a PCI slot.
*/
ctrl_ext |= E1000_CTRL_EXT_RO_DIS; ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
ew32(CTRL_EXT, ctrl_ext); ew32(CTRL_EXT, ctrl_ext);
} }
...@@ -810,8 +816,9 @@ s32 e1000_setup_link(struct e1000_hw *hw) ...@@ -810,8 +816,9 @@ s32 e1000_setup_link(struct e1000_hw *hw)
ew32(FCRTL, 0); ew32(FCRTL, 0);
ew32(FCRTH, 0); ew32(FCRTH, 0);
} else { } else {
/* We need to set up the Receive Threshold high and low water marks /* We need to set up the Receive Threshold high and low water
* as well as (optionally) enabling the transmission of XON frames. * marks as well as (optionally) enabling the transmission of
* XON frames.
*/ */
if (hw->fc_send_xon) { if (hw->fc_send_xon) {
ew32(FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE)); ew32(FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE));
...@@ -868,42 +875,46 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw) ...@@ -868,42 +875,46 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
e1000_config_collision_dist(hw); e1000_config_collision_dist(hw);
/* Check for a software override of the flow control settings, and setup /* Check for a software override of the flow control settings, and setup
* the device accordingly. If auto-negotiation is enabled, then software * the device accordingly. If auto-negotiation is enabled, then
* will have to set the "PAUSE" bits to the correct value in the Tranmsit * software will have to set the "PAUSE" bits to the correct value in
* Config Word Register (TXCW) and re-start auto-negotiation. However, if * the Tranmsit Config Word Register (TXCW) and re-start
* auto-negotiation is disabled, then software will have to manually * auto-negotiation. However, if auto-negotiation is disabled, then
* configure the two flow control enable bits in the CTRL register. * software will have to manually configure the two flow control enable
* bits in the CTRL register.
* *
* The possible values of the "fc" parameter are: * The possible values of the "fc" parameter are:
* 0: Flow control is completely disabled * 0: Flow control is completely disabled
* 1: Rx flow control is enabled (we can receive pause frames, but * 1: Rx flow control is enabled (we can receive pause frames, but
* not send pause frames). * not send pause frames).
* 2: Tx flow control is enabled (we can send pause frames but we do * 2: Tx flow control is enabled (we can send pause frames but we do
* not support receiving pause frames). * not support receiving pause frames).
* 3: Both Rx and TX flow control (symmetric) are enabled. * 3: Both Rx and TX flow control (symmetric) are enabled.
*/ */
switch (hw->fc) { switch (hw->fc) {
case E1000_FC_NONE: case E1000_FC_NONE:
/* Flow control is completely disabled by a software over-ride. */ /* Flow ctrl is completely disabled by a software over-ride */
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
break; break;
case E1000_FC_RX_PAUSE: case E1000_FC_RX_PAUSE:
/* RX Flow control is enabled and TX Flow control is disabled by a /* Rx Flow control is enabled and Tx Flow control is disabled by
* software over-ride. Since there really isn't a way to advertise * a software over-ride. Since there really isn't a way to
* that we are capable of RX Pause ONLY, we will advertise that we * advertise that we are capable of Rx Pause ONLY, we will
* support both symmetric and asymmetric RX PAUSE. Later, we will * advertise that we support both symmetric and asymmetric Rx
* disable the adapter's ability to send PAUSE frames. * PAUSE. Later, we will disable the adapter's ability to send
* PAUSE frames.
*/ */
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
break; break;
case E1000_FC_TX_PAUSE: case E1000_FC_TX_PAUSE:
/* TX Flow control is enabled, and RX Flow control is disabled, by a /* Tx Flow control is enabled, and Rx Flow control is disabled,
* software over-ride. * by a software over-ride.
*/ */
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
break; break;
case E1000_FC_FULL: case E1000_FC_FULL:
/* Flow control (both RX and TX) is enabled by a software over-ride. */ /* Flow control (both Rx and Tx) is enabled by a software
* over-ride.
*/
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
break; break;
default: default:
...@@ -912,11 +923,11 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw) ...@@ -912,11 +923,11 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
break; break;
} }
/* Since auto-negotiation is enabled, take the link out of reset (the link /* Since auto-negotiation is enabled, take the link out of reset (the
* will be in reset, because we previously reset the chip). This will * link will be in reset, because we previously reset the chip). This
* restart auto-negotiation. If auto-negotiation is successful then the * will restart auto-negotiation. If auto-negotiation is successful
* link-up status bit will be set and the flow control enable bits (RFCE * then the link-up status bit will be set and the flow control enable
* and TFCE) will be set according to their negotiated value. * bits (RFCE and TFCE) will be set according to their negotiated value.
*/ */
e_dbg("Auto-negotiation enabled\n"); e_dbg("Auto-negotiation enabled\n");
...@@ -927,11 +938,12 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw) ...@@ -927,11 +938,12 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
hw->txcw = txcw; hw->txcw = txcw;
msleep(1); msleep(1);
/* If we have a signal (the cable is plugged in) then poll for a "Link-Up" /* If we have a signal (the cable is plugged in) then poll for a
* indication in the Device Status Register. Time-out if a link isn't * "Link-Up" indication in the Device Status Register. Time-out if a
* seen in 500 milliseconds seconds (Auto-negotiation should complete in * link isn't seen in 500 milliseconds seconds (Auto-negotiation should
* less than 500 milliseconds even if the other end is doing it in SW). * complete in less than 500 milliseconds even if the other end is doing
* For internal serdes, we just assume a signal is present, then poll. * it in SW). For internal serdes, we just assume a signal is present,
* then poll.
*/ */
if (hw->media_type == e1000_media_type_internal_serdes || if (hw->media_type == e1000_media_type_internal_serdes ||
(er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) { (er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) {
...@@ -946,9 +958,9 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw) ...@@ -946,9 +958,9 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
e_dbg("Never got a valid link from auto-neg!!!\n"); e_dbg("Never got a valid link from auto-neg!!!\n");
hw->autoneg_failed = 1; hw->autoneg_failed = 1;
/* AutoNeg failed to achieve a link, so we'll call /* AutoNeg failed to achieve a link, so we'll call
* e1000_check_for_link. This routine will force the link up if * e1000_check_for_link. This routine will force the
* we detect a signal. This will allow us to communicate with * link up if we detect a signal. This will allow us to
* non-autonegotiating link partners. * communicate with non-autonegotiating link partners.
*/ */
ret_val = e1000_check_for_link(hw); ret_val = e1000_check_for_link(hw);
if (ret_val) { if (ret_val) {
...@@ -1042,9 +1054,9 @@ static s32 e1000_copper_link_preconfig(struct e1000_hw *hw) ...@@ -1042,9 +1054,9 @@ static s32 e1000_copper_link_preconfig(struct e1000_hw *hw)
e_dbg("e1000_copper_link_preconfig"); e_dbg("e1000_copper_link_preconfig");
ctrl = er32(CTRL); ctrl = er32(CTRL);
/* With 82543, we need to force speed and duplex on the MAC equal to what /* With 82543, we need to force speed and duplex on the MAC equal to
* the PHY speed and duplex configuration is. In addition, we need to * what the PHY speed and duplex configuration is. In addition, we need
* perform a hardware reset on the PHY to take it out of reset. * to perform a hardware reset on the PHY to take it out of reset.
*/ */
if (hw->mac_type > e1000_82543) { if (hw->mac_type > e1000_82543) {
ctrl |= E1000_CTRL_SLU; ctrl |= E1000_CTRL_SLU;
...@@ -1175,7 +1187,8 @@ static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw) ...@@ -1175,7 +1187,8 @@ static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw)
/* when autonegotiation advertisement is only 1000Mbps then we /* when autonegotiation advertisement is only 1000Mbps then we
* should disable SmartSpeed and enable Auto MasterSlave * should disable SmartSpeed and enable Auto MasterSlave
* resolution as hardware default. */ * resolution as hardware default.
*/
if (hw->autoneg_advertised == ADVERTISE_1000_FULL) { if (hw->autoneg_advertised == ADVERTISE_1000_FULL) {
/* Disable SmartSpeed */ /* Disable SmartSpeed */
ret_val = ret_val =
...@@ -1485,13 +1498,15 @@ static s32 e1000_setup_copper_link(struct e1000_hw *hw) ...@@ -1485,13 +1498,15 @@ static s32 e1000_setup_copper_link(struct e1000_hw *hw)
if (hw->autoneg) { if (hw->autoneg) {
/* Setup autoneg and flow control advertisement /* Setup autoneg and flow control advertisement
* and perform autonegotiation */ * and perform autonegotiation
*/
ret_val = e1000_copper_link_autoneg(hw); ret_val = e1000_copper_link_autoneg(hw);
if (ret_val) if (ret_val)
return ret_val; return ret_val;
} else { } else {
/* PHY will be set to 10H, 10F, 100H,or 100F /* PHY will be set to 10H, 10F, 100H,or 100F
* depending on value from forced_speed_duplex. */ * depending on value from forced_speed_duplex.
*/
e_dbg("Forcing speed and duplex\n"); e_dbg("Forcing speed and duplex\n");
ret_val = e1000_phy_force_speed_duplex(hw); ret_val = e1000_phy_force_speed_duplex(hw);
if (ret_val) { if (ret_val) {
...@@ -1609,7 +1624,8 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) ...@@ -1609,7 +1624,8 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
* setup the PHY advertisement registers accordingly. If * setup the PHY advertisement registers accordingly. If
* auto-negotiation is enabled, then software will have to set the * auto-negotiation is enabled, then software will have to set the
* "PAUSE" bits to the correct value in the Auto-Negotiation * "PAUSE" bits to the correct value in the Auto-Negotiation
* Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-negotiation. * Advertisement Register (PHY_AUTONEG_ADV) and re-start
* auto-negotiation.
* *
* The possible values of the "fc" parameter are: * The possible values of the "fc" parameter are:
* 0: Flow control is completely disabled * 0: Flow control is completely disabled
...@@ -1636,7 +1652,7 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) ...@@ -1636,7 +1652,7 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
* capable of RX Pause ONLY, we will advertise that we * capable of RX Pause ONLY, we will advertise that we
* support both symmetric and asymmetric RX PAUSE. Later * support both symmetric and asymmetric RX PAUSE. Later
* (in e1000_config_fc_after_link_up) we will disable the * (in e1000_config_fc_after_link_up) we will disable the
*hw's ability to send PAUSE frames. * hw's ability to send PAUSE frames.
*/ */
mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
break; break;
...@@ -1720,15 +1736,15 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) ...@@ -1720,15 +1736,15 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
/* Are we forcing Full or Half Duplex? */ /* Are we forcing Full or Half Duplex? */
if (hw->forced_speed_duplex == e1000_100_full || if (hw->forced_speed_duplex == e1000_100_full ||
hw->forced_speed_duplex == e1000_10_full) { hw->forced_speed_duplex == e1000_10_full) {
/* We want to force full duplex so we SET the full duplex bits in the /* We want to force full duplex so we SET the full duplex bits
* Device and MII Control Registers. * in the Device and MII Control Registers.
*/ */
ctrl |= E1000_CTRL_FD; ctrl |= E1000_CTRL_FD;
mii_ctrl_reg |= MII_CR_FULL_DUPLEX; mii_ctrl_reg |= MII_CR_FULL_DUPLEX;
e_dbg("Full Duplex\n"); e_dbg("Full Duplex\n");
} else { } else {
/* We want to force half duplex so we CLEAR the full duplex bits in /* We want to force half duplex so we CLEAR the full duplex bits
* the Device and MII Control Registers. * in the Device and MII Control Registers.
*/ */
ctrl &= ~E1000_CTRL_FD; ctrl &= ~E1000_CTRL_FD;
mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX; mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX;
...@@ -1762,8 +1778,8 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) ...@@ -1762,8 +1778,8 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
if (ret_val) if (ret_val)
return ret_val; return ret_val;
/* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI /* Clear Auto-Crossover to force MDI manually. M88E1000 requires
* forced whenever speed are duplex are forced. * MDI forced whenever speed are duplex are forced.
*/ */
phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
ret_val = ret_val =
...@@ -1814,10 +1830,10 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) ...@@ -1814,10 +1830,10 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
e_dbg("Waiting for forced speed/duplex link.\n"); e_dbg("Waiting for forced speed/duplex link.\n");
mii_status_reg = 0; mii_status_reg = 0;
/* We will wait for autoneg to complete or 4.5 seconds to expire. */ /* Wait for autoneg to complete or 4.5 seconds to expire */
for (i = PHY_FORCE_TIME; i > 0; i--) { for (i = PHY_FORCE_TIME; i > 0; i--) {
/* Read the MII Status Register and wait for Auto-Neg Complete bit /* Read the MII Status Register and wait for Auto-Neg
* to be set. * Complete bit to be set.
*/ */
ret_val = ret_val =
e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
...@@ -1834,20 +1850,24 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) ...@@ -1834,20 +1850,24 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
msleep(100); msleep(100);
} }
if ((i == 0) && (hw->phy_type == e1000_phy_m88)) { if ((i == 0) && (hw->phy_type == e1000_phy_m88)) {
/* We didn't get link. Reset the DSP and wait again for link. */ /* We didn't get link. Reset the DSP and wait again
* for link.
*/
ret_val = e1000_phy_reset_dsp(hw); ret_val = e1000_phy_reset_dsp(hw);
if (ret_val) { if (ret_val) {
e_dbg("Error Resetting PHY DSP\n"); e_dbg("Error Resetting PHY DSP\n");
return ret_val; return ret_val;
} }
} }
/* This loop will early-out if the link condition has been met. */ /* This loop will early-out if the link condition has been
* met
*/
for (i = PHY_FORCE_TIME; i > 0; i--) { for (i = PHY_FORCE_TIME; i > 0; i--) {
if (mii_status_reg & MII_SR_LINK_STATUS) if (mii_status_reg & MII_SR_LINK_STATUS)
break; break;
msleep(100); msleep(100);
/* Read the MII Status Register and wait for Auto-Neg Complete bit /* Read the MII Status Register and wait for Auto-Neg
* to be set. * Complete bit to be set.
*/ */
ret_val = ret_val =
e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
...@@ -1862,9 +1882,10 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) ...@@ -1862,9 +1882,10 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
} }
if (hw->phy_type == e1000_phy_m88) { if (hw->phy_type == e1000_phy_m88) {
/* Because we reset the PHY above, we need to re-force TX_CLK in the /* Because we reset the PHY above, we need to re-force TX_CLK in
* Extended PHY Specific Control Register to 25MHz clock. This value * the Extended PHY Specific Control Register to 25MHz clock.
* defaults back to a 2.5MHz clock when the PHY is reset. * This value defaults back to a 2.5MHz clock when the PHY is
* reset.
*/ */
ret_val = ret_val =
e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
...@@ -1879,8 +1900,9 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) ...@@ -1879,8 +1900,9 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
if (ret_val) if (ret_val)
return ret_val; return ret_val;
/* In addition, because of the s/w reset above, we need to enable CRS on /* In addition, because of the s/w reset above, we need to
* TX. This must be set for both full and half duplex operation. * enable CRS on Tx. This must be set for both full and half
* duplex operation.
*/ */
ret_val = ret_val =
e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
...@@ -1951,7 +1973,8 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw) ...@@ -1951,7 +1973,8 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
e_dbg("e1000_config_mac_to_phy"); e_dbg("e1000_config_mac_to_phy");
/* 82544 or newer MAC, Auto Speed Detection takes care of /* 82544 or newer MAC, Auto Speed Detection takes care of
* MAC speed/duplex configuration.*/ * MAC speed/duplex configuration.
*/
if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100))
return E1000_SUCCESS; return E1000_SUCCESS;
...@@ -1985,7 +2008,7 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw) ...@@ -1985,7 +2008,7 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
* registers depending on negotiated values. * registers depending on negotiated values.
*/ */
ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
&phy_data); &phy_data);
if (ret_val) if (ret_val)
return ret_val; return ret_val;
...@@ -2002,7 +2025,7 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw) ...@@ -2002,7 +2025,7 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
ctrl |= E1000_CTRL_SPD_1000; ctrl |= E1000_CTRL_SPD_1000;
else if ((phy_data & M88E1000_PSSR_SPEED) == else if ((phy_data & M88E1000_PSSR_SPEED) ==
M88E1000_PSSR_100MBS) M88E1000_PSSR_100MBS)
ctrl |= E1000_CTRL_SPD_100; ctrl |= E1000_CTRL_SPD_100;
} }
...@@ -2135,9 +2158,9 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw) ...@@ -2135,9 +2158,9 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) { if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) {
/* The AutoNeg process has completed, so we now need to /* The AutoNeg process has completed, so we now need to
* read both the Auto Negotiation Advertisement Register * read both the Auto Negotiation Advertisement Register
* (Address 4) and the Auto_Negotiation Base Page Ability * (Address 4) and the Auto_Negotiation Base Page
* Register (Address 5) to determine how flow control was * Ability Register (Address 5) to determine how flow
* negotiated. * control was negotiated.
*/ */
ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV,
&mii_nway_adv_reg); &mii_nway_adv_reg);
...@@ -2148,18 +2171,19 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw) ...@@ -2148,18 +2171,19 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
if (ret_val) if (ret_val)
return ret_val; return ret_val;
/* Two bits in the Auto Negotiation Advertisement Register /* Two bits in the Auto Negotiation Advertisement
* (Address 4) and two bits in the Auto Negotiation Base * Register (Address 4) and two bits in the Auto
* Page Ability Register (Address 5) determine flow control * Negotiation Base Page Ability Register (Address 5)
* for both the PHY and the link partner. The following * determine flow control for both the PHY and the link
* table, taken out of the IEEE 802.3ab/D6.0 dated March 25, * partner. The following table, taken out of the IEEE
* 1999, describes these PAUSE resolution bits and how flow * 802.3ab/D6.0 dated March 25, 1999, describes these
* control is determined based upon these settings. * PAUSE resolution bits and how flow control is
* determined based upon these settings.
* NOTE: DC = Don't Care * NOTE: DC = Don't Care
* *
* LOCAL DEVICE | LINK PARTNER * LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
*-------|---------|-------|---------|-------------------- *-------|---------|-------|---------|------------------
* 0 | 0 | DC | DC | E1000_FC_NONE * 0 | 0 | DC | DC | E1000_FC_NONE
* 0 | 1 | 0 | DC | E1000_FC_NONE * 0 | 1 | 0 | DC | E1000_FC_NONE
* 0 | 1 | 1 | 0 | E1000_FC_NONE * 0 | 1 | 1 | 0 | E1000_FC_NONE
...@@ -2178,17 +2202,18 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw) ...@@ -2178,17 +2202,18 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
* *
* LOCAL DEVICE | LINK PARTNER * LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
*-------|---------|-------|---------|-------------------- *-------|---------|-------|---------|------------------
* 1 | DC | 1 | DC | E1000_FC_FULL * 1 | DC | 1 | DC | E1000_FC_FULL
* *
*/ */
if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
/* Now we need to check if the user selected RX ONLY /* Now we need to check if the user selected Rx
* of pause frames. In this case, we had to advertise * ONLY of pause frames. In this case, we had
* FULL flow control because we could not advertise RX * to advertise FULL flow control because we
* ONLY. Hence, we must now check to see if we need to * could not advertise Rx ONLY. Hence, we must
* turn OFF the TRANSMISSION of PAUSE frames. * now check to see if we need to turn OFF the
* TRANSMISSION of PAUSE frames.
*/ */
if (hw->original_fc == E1000_FC_FULL) { if (hw->original_fc == E1000_FC_FULL) {
hw->fc = E1000_FC_FULL; hw->fc = E1000_FC_FULL;
...@@ -2203,7 +2228,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw) ...@@ -2203,7 +2228,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
* *
* LOCAL DEVICE | LINK PARTNER * LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
*-------|---------|-------|---------|-------------------- *-------|---------|-------|---------|------------------
* 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE
* *
*/ */
...@@ -2220,7 +2245,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw) ...@@ -2220,7 +2245,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
* *
* LOCAL DEVICE | LINK PARTNER * LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
*-------|---------|-------|---------|-------------------- *-------|---------|-------|---------|------------------
* 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE
* *
*/ */
...@@ -2233,25 +2258,27 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw) ...@@ -2233,25 +2258,27 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
e_dbg e_dbg
("Flow Control = RX PAUSE frames only.\n"); ("Flow Control = RX PAUSE frames only.\n");
} }
/* Per the IEEE spec, at this point flow control should be /* Per the IEEE spec, at this point flow control should
* disabled. However, we want to consider that we could * be disabled. However, we want to consider that we
* be connected to a legacy switch that doesn't advertise * could be connected to a legacy switch that doesn't
* desired flow control, but can be forced on the link * advertise desired flow control, but can be forced on
* partner. So if we advertised no flow control, that is * the link partner. So if we advertised no flow
* what we will resolve to. If we advertised some kind of * control, that is what we will resolve to. If we
* receive capability (Rx Pause Only or Full Flow Control) * advertised some kind of receive capability (Rx Pause
* and the link partner advertised none, we will configure * Only or Full Flow Control) and the link partner
* ourselves to enable Rx Flow Control only. We can do * advertised none, we will configure ourselves to
* this safely for two reasons: If the link partner really * enable Rx Flow Control only. We can do this safely
* didn't want flow control enabled, and we enable Rx, no * for two reasons: If the link partner really
* harm done since we won't be receiving any PAUSE frames * didn't want flow control enabled, and we enable Rx,
* anyway. If the intent on the link partner was to have * no harm done since we won't be receiving any PAUSE
* flow control enabled, then by us enabling RX only, we * frames anyway. If the intent on the link partner was
* can at least receive pause frames and process them. * to have flow control enabled, then by us enabling Rx
* This is a good idea because in most cases, since we are * only, we can at least receive pause frames and
* predominantly a server NIC, more times than not we will * process them. This is a good idea because in most
* be asked to delay transmission of packets than asking * cases, since we are predominantly a server NIC, more
* our link partner to pause transmission of frames. * times than not we will be asked to delay transmission
* of packets than asking our link partner to pause
* transmission of frames.
*/ */
else if ((hw->original_fc == E1000_FC_NONE || else if ((hw->original_fc == E1000_FC_NONE ||
hw->original_fc == E1000_FC_TX_PAUSE) || hw->original_fc == E1000_FC_TX_PAUSE) ||
...@@ -2316,8 +2343,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw) ...@@ -2316,8 +2343,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
status = er32(STATUS); status = er32(STATUS);
rxcw = er32(RXCW); rxcw = er32(RXCW);
/* /* If we don't have link (auto-negotiation failed or link partner
* If we don't have link (auto-negotiation failed or link partner
* cannot auto-negotiate), and our link partner is not trying to * cannot auto-negotiate), and our link partner is not trying to
* auto-negotiate with us (we are receiving idles or data), * auto-negotiate with us (we are receiving idles or data),
* we need to force link up. We also need to give auto-negotiation * we need to force link up. We also need to give auto-negotiation
...@@ -2346,8 +2372,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw) ...@@ -2346,8 +2372,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
goto out; goto out;
} }
} else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
/* /* If we are forcing link and we are receiving /C/ ordered
* If we are forcing link and we are receiving /C/ ordered
* sets, re-enable auto-negotiation in the TXCW register * sets, re-enable auto-negotiation in the TXCW register
* and disable forced link in the Device Control register * and disable forced link in the Device Control register
* in an attempt to auto-negotiate with our link partner. * in an attempt to auto-negotiate with our link partner.
...@@ -2358,8 +2383,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw) ...@@ -2358,8 +2383,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
hw->serdes_has_link = true; hw->serdes_has_link = true;
} else if (!(E1000_TXCW_ANE & er32(TXCW))) { } else if (!(E1000_TXCW_ANE & er32(TXCW))) {
/* /* If we force link for non-auto-negotiation switch, check
* If we force link for non-auto-negotiation switch, check
* link status based on MAC synchronization for internal * link status based on MAC synchronization for internal
* serdes media type. * serdes media type.
*/ */
...@@ -2468,15 +2492,17 @@ s32 e1000_check_for_link(struct e1000_hw *hw) ...@@ -2468,15 +2492,17 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
if (phy_data & MII_SR_LINK_STATUS) { if (phy_data & MII_SR_LINK_STATUS) {
hw->get_link_status = false; hw->get_link_status = false;
/* Check if there was DownShift, must be checked immediately after /* Check if there was DownShift, must be checked
* link-up */ * immediately after link-up
*/
e1000_check_downshift(hw); e1000_check_downshift(hw);
/* If we are on 82544 or 82543 silicon and speed/duplex /* If we are on 82544 or 82543 silicon and speed/duplex
* are forced to 10H or 10F, then we will implement the polarity * are forced to 10H or 10F, then we will implement the
* reversal workaround. We disable interrupts first, and upon * polarity reversal workaround. We disable interrupts
* returning, place the devices interrupt state to its previous * first, and upon returning, place the devices
* value except for the link status change interrupt which will * interrupt state to its previous value except for the
* link status change interrupt which will
* happen due to the execution of this workaround. * happen due to the execution of this workaround.
*/ */
...@@ -2527,9 +2553,10 @@ s32 e1000_check_for_link(struct e1000_hw *hw) ...@@ -2527,9 +2553,10 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
} }
} }
/* Configure Flow Control now that Auto-Neg has completed. First, we /* Configure Flow Control now that Auto-Neg has completed.
* need to restore the desired flow control settings because we may * First, we need to restore the desired flow control settings
* have had to re-autoneg with a different link partner. * because we may have had to re-autoneg with a different link
* partner.
*/ */
ret_val = e1000_config_fc_after_link_up(hw); ret_val = e1000_config_fc_after_link_up(hw);
if (ret_val) { if (ret_val) {
...@@ -2538,11 +2565,12 @@ s32 e1000_check_for_link(struct e1000_hw *hw) ...@@ -2538,11 +2565,12 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
} }
/* At this point we know that we are on copper and we have /* At this point we know that we are on copper and we have
* auto-negotiated link. These are conditions for checking the link * auto-negotiated link. These are conditions for checking the
* partner capability register. We use the link speed to determine if * link partner capability register. We use the link speed to
* TBI compatibility needs to be turned on or off. If the link is not * determine if TBI compatibility needs to be turned on or off.
* at gigabit speed, then TBI compatibility is not needed. If we are * If the link is not at gigabit speed, then TBI compatibility
* at gigabit speed, we turn on TBI compatibility. * is not needed. If we are at gigabit speed, we turn on TBI
* compatibility.
*/ */
if (hw->tbi_compatibility_en) { if (hw->tbi_compatibility_en) {
u16 speed, duplex; u16 speed, duplex;
...@@ -2554,20 +2582,23 @@ s32 e1000_check_for_link(struct e1000_hw *hw) ...@@ -2554,20 +2582,23 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
return ret_val; return ret_val;
} }
if (speed != SPEED_1000) { if (speed != SPEED_1000) {
/* If link speed is not set to gigabit speed, we do not need /* If link speed is not set to gigabit speed, we
* to enable TBI compatibility. * do not need to enable TBI compatibility.
*/ */
if (hw->tbi_compatibility_on) { if (hw->tbi_compatibility_on) {
/* If we previously were in the mode, turn it off. */ /* If we previously were in the mode,
* turn it off.
*/
rctl = er32(RCTL); rctl = er32(RCTL);
rctl &= ~E1000_RCTL_SBP; rctl &= ~E1000_RCTL_SBP;
ew32(RCTL, rctl); ew32(RCTL, rctl);
hw->tbi_compatibility_on = false; hw->tbi_compatibility_on = false;
} }
} else { } else {
/* If TBI compatibility is was previously off, turn it on. For /* If TBI compatibility is was previously off,
* compatibility with a TBI link partner, we will store bad * turn it on. For compatibility with a TBI link
* packets. Some frames have an additional byte on the end and * partner, we will store bad packets. Some
* frames have an additional byte on the end and
* will look like CRC errors to to the hardware. * will look like CRC errors to to the hardware.
*/ */
if (!hw->tbi_compatibility_on) { if (!hw->tbi_compatibility_on) {
...@@ -2629,9 +2660,9 @@ s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex) ...@@ -2629,9 +2660,9 @@ s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
*duplex = FULL_DUPLEX; *duplex = FULL_DUPLEX;
} }
/* IGP01 PHY may advertise full duplex operation after speed downgrade even /* IGP01 PHY may advertise full duplex operation after speed downgrade
* if it is operating at half duplex. Here we set the duplex settings to * even if it is operating at half duplex. Here we set the duplex
* match the duplex in the link partner's capabilities. * settings to match the duplex in the link partner's capabilities.
*/ */
if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) { if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) {
ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data); ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data);
...@@ -2697,8 +2728,8 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw) ...@@ -2697,8 +2728,8 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw)
*/ */
static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl) static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
{ {
/* Raise the clock input to the Management Data Clock (by setting the MDC /* Raise the clock input to the Management Data Clock (by setting the
* bit), and then delay 10 microseconds. * MDC bit), and then delay 10 microseconds.
*/ */
ew32(CTRL, (*ctrl | E1000_CTRL_MDC)); ew32(CTRL, (*ctrl | E1000_CTRL_MDC));
E1000_WRITE_FLUSH(); E1000_WRITE_FLUSH();
...@@ -2712,8 +2743,8 @@ static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl) ...@@ -2712,8 +2743,8 @@ static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
*/ */
static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl) static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
{ {
/* Lower the clock input to the Management Data Clock (by clearing the MDC /* Lower the clock input to the Management Data Clock (by clearing the
* bit), and then delay 10 microseconds. * MDC bit), and then delay 10 microseconds.
*/ */
ew32(CTRL, (*ctrl & ~E1000_CTRL_MDC)); ew32(CTRL, (*ctrl & ~E1000_CTRL_MDC));
E1000_WRITE_FLUSH(); E1000_WRITE_FLUSH();
...@@ -2746,10 +2777,10 @@ static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count) ...@@ -2746,10 +2777,10 @@ static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count)
ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR); ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR);
while (mask) { while (mask) {
/* A "1" is shifted out to the PHY by setting the MDIO bit to "1" and /* A "1" is shifted out to the PHY by setting the MDIO bit to
* then raising and lowering the Management Data Clock. A "0" is * "1" and then raising and lowering the Management Data Clock.
* shifted out to the PHY by setting the MDIO bit to "0" and then * A "0" is shifted out to the PHY by setting the MDIO bit to
* raising and lowering the clock. * "0" and then raising and lowering the clock.
*/ */
if (data & mask) if (data & mask)
ctrl |= E1000_CTRL_MDIO; ctrl |= E1000_CTRL_MDIO;
...@@ -2781,24 +2812,26 @@ static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw) ...@@ -2781,24 +2812,26 @@ static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw)
u8 i; u8 i;
/* In order to read a register from the PHY, we need to shift in a total /* In order to read a register from the PHY, we need to shift in a total
* of 18 bits from the PHY. The first two bit (turnaround) times are used * of 18 bits from the PHY. The first two bit (turnaround) times are
* to avoid contention on the MDIO pin when a read operation is performed. * used to avoid contention on the MDIO pin when a read operation is
* These two bits are ignored by us and thrown away. Bits are "shifted in" * performed. These two bits are ignored by us and thrown away. Bits are
* by raising the input to the Management Data Clock (setting the MDC bit), * "shifted in" by raising the input to the Management Data Clock
* and then reading the value of the MDIO bit. * (setting the MDC bit), and then reading the value of the MDIO bit.
*/ */
ctrl = er32(CTRL); ctrl = er32(CTRL);
/* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as input. */ /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as
* input.
*/
ctrl &= ~E1000_CTRL_MDIO_DIR; ctrl &= ~E1000_CTRL_MDIO_DIR;
ctrl &= ~E1000_CTRL_MDIO; ctrl &= ~E1000_CTRL_MDIO;
ew32(CTRL, ctrl); ew32(CTRL, ctrl);
E1000_WRITE_FLUSH(); E1000_WRITE_FLUSH();
/* Raise and Lower the clock before reading in the data. This accounts for /* Raise and Lower the clock before reading in the data. This accounts
* the turnaround bits. The first clock occurred when we clocked out the * for the turnaround bits. The first clock occurred when we clocked out
* last bit of the Register Address. * the last bit of the Register Address.
*/ */
e1000_raise_mdi_clk(hw, &ctrl); e1000_raise_mdi_clk(hw, &ctrl);
e1000_lower_mdi_clk(hw, &ctrl); e1000_lower_mdi_clk(hw, &ctrl);
...@@ -2870,8 +2903,8 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, ...@@ -2870,8 +2903,8 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
if (hw->mac_type > e1000_82543) { if (hw->mac_type > e1000_82543) {
/* Set up Op-code, Phy Address, and register address in the MDI /* Set up Op-code, Phy Address, and register address in the MDI
* Control register. The MAC will take care of interfacing with the * Control register. The MAC will take care of interfacing with
* PHY to retrieve the desired data. * the PHY to retrieve the desired data.
*/ */
if (hw->mac_type == e1000_ce4100) { if (hw->mac_type == e1000_ce4100) {
mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
...@@ -2929,31 +2962,32 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, ...@@ -2929,31 +2962,32 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
*phy_data = (u16) mdic; *phy_data = (u16) mdic;
} }
} else { } else {
/* We must first send a preamble through the MDIO pin to signal the /* We must first send a preamble through the MDIO pin to signal
* beginning of an MII instruction. This is done by sending 32 * the beginning of an MII instruction. This is done by sending
* consecutive "1" bits. * 32 consecutive "1" bits.
*/ */
e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
/* Now combine the next few fields that are required for a read /* Now combine the next few fields that are required for a read
* operation. We use this method instead of calling the * operation. We use this method instead of calling the
* e1000_shift_out_mdi_bits routine five different times. The format of * e1000_shift_out_mdi_bits routine five different times. The
* a MII read instruction consists of a shift out of 14 bits and is * format of a MII read instruction consists of a shift out of
* defined as follows: * 14 bits and is defined as follows:
* <Preamble><SOF><Op Code><Phy Addr><Reg Addr> * <Preamble><SOF><Op Code><Phy Addr><Reg Addr>
* followed by a shift in of 18 bits. This first two bits shifted in * followed by a shift in of 18 bits. This first two bits
* are TurnAround bits used to avoid contention on the MDIO pin when a * shifted in are TurnAround bits used to avoid contention on
* READ operation is performed. These two bits are thrown away * the MDIO pin when a READ operation is performed. These two
* followed by a shift in of 16 bits which contains the desired data. * bits are thrown away followed by a shift in of 16 bits which
* contains the desired data.
*/ */
mdic = ((reg_addr) | (phy_addr << 5) | mdic = ((reg_addr) | (phy_addr << 5) |
(PHY_OP_READ << 10) | (PHY_SOF << 12)); (PHY_OP_READ << 10) | (PHY_SOF << 12));
e1000_shift_out_mdi_bits(hw, mdic, 14); e1000_shift_out_mdi_bits(hw, mdic, 14);
/* Now that we've shifted out the read command to the MII, we need to /* Now that we've shifted out the read command to the MII, we
* "shift in" the 16-bit value (18 total bits) of the requested PHY * need to "shift in" the 16-bit value (18 total bits) of the
* register address. * requested PHY register address.
*/ */
*phy_data = e1000_shift_in_mdi_bits(hw); *phy_data = e1000_shift_in_mdi_bits(hw);
} }
...@@ -3060,18 +3094,18 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, ...@@ -3060,18 +3094,18 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
} }
} }
} else { } else {
/* We'll need to use the SW defined pins to shift the write command /* We'll need to use the SW defined pins to shift the write
* out to the PHY. We first send a preamble to the PHY to signal the * command out to the PHY. We first send a preamble to the PHY
* beginning of the MII instruction. This is done by sending 32 * to signal the beginning of the MII instruction. This is done
* consecutive "1" bits. * by sending 32 consecutive "1" bits.
*/ */
e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
/* Now combine the remaining required fields that will indicate a /* Now combine the remaining required fields that will indicate
* write operation. We use this method instead of calling the * a write operation. We use this method instead of calling the
* e1000_shift_out_mdi_bits routine for each field in the command. The * e1000_shift_out_mdi_bits routine for each field in the
* format of a MII write instruction is as follows: * command. The format of a MII write instruction is as follows:
* <Preamble><SOF><Op Code><Phy Addr><Reg Addr><Turnaround><Data>. * <Preamble><SOF><OpCode><PhyAddr><RegAddr><Turnaround><Data>.
*/ */
mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) | mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) |
(PHY_OP_WRITE << 12) | (PHY_SOF << 14)); (PHY_OP_WRITE << 12) | (PHY_SOF << 14));
...@@ -3100,10 +3134,10 @@ s32 e1000_phy_hw_reset(struct e1000_hw *hw) ...@@ -3100,10 +3134,10 @@ s32 e1000_phy_hw_reset(struct e1000_hw *hw)
e_dbg("Resetting Phy...\n"); e_dbg("Resetting Phy...\n");
if (hw->mac_type > e1000_82543) { if (hw->mac_type > e1000_82543) {
/* Read the device control register and assert the E1000_CTRL_PHY_RST /* Read the device control register and assert the
* bit. Then, take it out of reset. * E1000_CTRL_PHY_RST bit. Then, take it out of reset.
* For e1000 hardware, we delay for 10ms between the assert * For e1000 hardware, we delay for 10ms between the assert
* and deassert. * and de-assert.
*/ */
ctrl = er32(CTRL); ctrl = er32(CTRL);
ew32(CTRL, ctrl | E1000_CTRL_PHY_RST); ew32(CTRL, ctrl | E1000_CTRL_PHY_RST);
...@@ -3115,8 +3149,9 @@ s32 e1000_phy_hw_reset(struct e1000_hw *hw) ...@@ -3115,8 +3149,9 @@ s32 e1000_phy_hw_reset(struct e1000_hw *hw)
E1000_WRITE_FLUSH(); E1000_WRITE_FLUSH();
} else { } else {
/* Read the Extended Device Control Register, assert the PHY_RESET_DIR /* Read the Extended Device Control Register, assert the
* bit to put the PHY into reset. Then, take it out of reset. * PHY_RESET_DIR bit to put the PHY into reset. Then, take it
* out of reset.
*/ */
ctrl_ext = er32(CTRL_EXT); ctrl_ext = er32(CTRL_EXT);
ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR; ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR;
...@@ -3301,7 +3336,8 @@ static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, ...@@ -3301,7 +3336,8 @@ static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
e_dbg("e1000_phy_igp_get_info"); e_dbg("e1000_phy_igp_get_info");
/* The downshift status is checked only once, after link is established, /* The downshift status is checked only once, after link is established,
* and it stored in the hw->speed_downgraded parameter. */ * and it stored in the hw->speed_downgraded parameter.
*/
phy_info->downshift = (e1000_downshift) hw->speed_downgraded; phy_info->downshift = (e1000_downshift) hw->speed_downgraded;
/* IGP01E1000 does not need to support it. */ /* IGP01E1000 does not need to support it. */
...@@ -3327,7 +3363,9 @@ static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, ...@@ -3327,7 +3363,9 @@ static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
IGP01E1000_PSSR_SPEED_1000MBPS) { IGP01E1000_PSSR_SPEED_1000MBPS) {
/* Local/Remote Receiver Information are only valid at 1000 Mbps */ /* Local/Remote Receiver Information are only valid @ 1000
* Mbps
*/
ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
if (ret_val) if (ret_val)
return ret_val; return ret_val;
...@@ -3379,7 +3417,8 @@ static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, ...@@ -3379,7 +3417,8 @@ static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
e_dbg("e1000_phy_m88_get_info"); e_dbg("e1000_phy_m88_get_info");
/* The downshift status is checked only once, after link is established, /* The downshift status is checked only once, after link is established,
* and it stored in the hw->speed_downgraded parameter. */ * and it stored in the hw->speed_downgraded parameter.
*/
phy_info->downshift = (e1000_downshift) hw->speed_downgraded; phy_info->downshift = (e1000_downshift) hw->speed_downgraded;
ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
...@@ -3574,8 +3613,8 @@ s32 e1000_init_eeprom_params(struct e1000_hw *hw) ...@@ -3574,8 +3613,8 @@ s32 e1000_init_eeprom_params(struct e1000_hw *hw)
} }
if (eeprom->type == e1000_eeprom_spi) { if (eeprom->type == e1000_eeprom_spi) {
/* eeprom_size will be an enum [0..8] that maps to eeprom sizes 128B to /* eeprom_size will be an enum [0..8] that maps to eeprom sizes
* 32KB (incremented by powers of 2). * 128B to 32KB (incremented by powers of 2).
*/ */
/* Set to default value for initial eeprom read. */ /* Set to default value for initial eeprom read. */
eeprom->word_size = 64; eeprom->word_size = 64;
...@@ -3585,8 +3624,9 @@ s32 e1000_init_eeprom_params(struct e1000_hw *hw) ...@@ -3585,8 +3624,9 @@ s32 e1000_init_eeprom_params(struct e1000_hw *hw)
eeprom_size = eeprom_size =
(eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT; (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT;
/* 256B eeprom size was not supported in earlier hardware, so we /* 256B eeprom size was not supported in earlier hardware, so we
* bump eeprom_size up one to ensure that "1" (which maps to 256B) * bump eeprom_size up one to ensure that "1" (which maps to
* is never the result used in the shifting logic below. */ * 256B) is never the result used in the shifting logic below.
*/
if (eeprom_size) if (eeprom_size)
eeprom_size++; eeprom_size++;
...@@ -3618,8 +3658,8 @@ static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd) ...@@ -3618,8 +3658,8 @@ static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd)
*/ */
static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd) static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd)
{ {
/* Lower the clock input to the EEPROM (by clearing the SK bit), and then /* Lower the clock input to the EEPROM (by clearing the SK bit), and
* wait 50 microseconds. * then wait 50 microseconds.
*/ */
*eecd = *eecd & ~E1000_EECD_SK; *eecd = *eecd & ~E1000_EECD_SK;
ew32(EECD, *eecd); ew32(EECD, *eecd);
...@@ -3651,10 +3691,11 @@ static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count) ...@@ -3651,10 +3691,11 @@ static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count)
eecd |= E1000_EECD_DO; eecd |= E1000_EECD_DO;
} }
do { do {
/* A "1" is shifted out to the EEPROM by setting bit "DI" to a "1", /* A "1" is shifted out to the EEPROM by setting bit "DI" to a
* and then raising and then lowering the clock (the SK bit controls * "1", and then raising and then lowering the clock (the SK bit
* the clock input to the EEPROM). A "0" is shifted out to the EEPROM * controls the clock input to the EEPROM). A "0" is shifted
* by setting "DI" to "0" and then raising and then lowering the clock. * out to the EEPROM by setting "DI" to "0" and then raising and
* then lowering the clock.
*/ */
eecd &= ~E1000_EECD_DI; eecd &= ~E1000_EECD_DI;
...@@ -3691,9 +3732,9 @@ static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count) ...@@ -3691,9 +3732,9 @@ static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count)
/* In order to read a register from the EEPROM, we need to shift 'count' /* In order to read a register from the EEPROM, we need to shift 'count'
* bits in from the EEPROM. Bits are "shifted in" by raising the clock * bits in from the EEPROM. Bits are "shifted in" by raising the clock
* input to the EEPROM (setting the SK bit), and then reading the value of * input to the EEPROM (setting the SK bit), and then reading the value
* the "DO" bit. During this "shifting in" process the "DI" bit should * of the "DO" bit. During this "shifting in" process the "DI" bit
* always be clear. * should always be clear.
*/ */
eecd = er32(EECD); eecd = er32(EECD);
...@@ -3945,8 +3986,8 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, ...@@ -3945,8 +3986,8 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
if (eeprom->word_size == 0) if (eeprom->word_size == 0)
e1000_init_eeprom_params(hw); e1000_init_eeprom_params(hw);
/* A check for invalid values: offset too large, too many words, and not /* A check for invalid values: offset too large, too many words, and
* enough words. * not enough words.
*/ */
if ((offset >= eeprom->word_size) if ((offset >= eeprom->word_size)
|| (words > eeprom->word_size - offset) || (words == 0)) { || (words > eeprom->word_size - offset) || (words == 0)) {
...@@ -3964,7 +4005,8 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, ...@@ -3964,7 +4005,8 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
return -E1000_ERR_EEPROM; return -E1000_ERR_EEPROM;
/* Set up the SPI or Microwire EEPROM for bit-bang reading. We have /* Set up the SPI or Microwire EEPROM for bit-bang reading. We have
* acquired the EEPROM at this point, so any returns should release it */ * acquired the EEPROM at this point, so any returns should release it
*/
if (eeprom->type == e1000_eeprom_spi) { if (eeprom->type == e1000_eeprom_spi) {
u16 word_in; u16 word_in;
u8 read_opcode = EEPROM_READ_OPCODE_SPI; u8 read_opcode = EEPROM_READ_OPCODE_SPI;
...@@ -3976,7 +4018,9 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, ...@@ -3976,7 +4018,9 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
e1000_standby_eeprom(hw); e1000_standby_eeprom(hw);
/* Some SPI eeproms use the 8th address bit embedded in the opcode */ /* Some SPI eeproms use the 8th address bit embedded in the
* opcode
*/
if ((eeprom->address_bits == 8) && (offset >= 128)) if ((eeprom->address_bits == 8) && (offset >= 128))
read_opcode |= EEPROM_A8_OPCODE_SPI; read_opcode |= EEPROM_A8_OPCODE_SPI;
...@@ -3985,11 +4029,13 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, ...@@ -3985,11 +4029,13 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
e1000_shift_out_ee_bits(hw, (u16) (offset * 2), e1000_shift_out_ee_bits(hw, (u16) (offset * 2),
eeprom->address_bits); eeprom->address_bits);
/* Read the data. The address of the eeprom internally increments with /* Read the data. The address of the eeprom internally
* each byte (spi) being read, saving on the overhead of eeprom setup * increments with each byte (spi) being read, saving on the
* and tear-down. The address counter will roll over if reading beyond * overhead of eeprom setup and tear-down. The address counter
* the size of the eeprom, thus allowing the entire memory to be read * will roll over if reading beyond the size of the eeprom, thus
* starting from any offset. */ * allowing the entire memory to be read starting from any
* offset.
*/
for (i = 0; i < words; i++) { for (i = 0; i < words; i++) {
word_in = e1000_shift_in_ee_bits(hw, 16); word_in = e1000_shift_in_ee_bits(hw, 16);
data[i] = (word_in >> 8) | (word_in << 8); data[i] = (word_in >> 8) | (word_in << 8);
...@@ -4003,8 +4049,9 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, ...@@ -4003,8 +4049,9 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
e1000_shift_out_ee_bits(hw, (u16) (offset + i), e1000_shift_out_ee_bits(hw, (u16) (offset + i),
eeprom->address_bits); eeprom->address_bits);
/* Read the data. For microwire, each word requires the overhead /* Read the data. For microwire, each word requires the
* of eeprom setup and tear-down. */ * overhead of eeprom setup and tear-down.
*/
data[i] = e1000_shift_in_ee_bits(hw, 16); data[i] = e1000_shift_in_ee_bits(hw, 16);
e1000_standby_eeprom(hw); e1000_standby_eeprom(hw);
} }
...@@ -4119,8 +4166,8 @@ static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, ...@@ -4119,8 +4166,8 @@ static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
if (eeprom->word_size == 0) if (eeprom->word_size == 0)
e1000_init_eeprom_params(hw); e1000_init_eeprom_params(hw);
/* A check for invalid values: offset too large, too many words, and not /* A check for invalid values: offset too large, too many words, and
* enough words. * not enough words.
*/ */
if ((offset >= eeprom->word_size) if ((offset >= eeprom->word_size)
|| (words > eeprom->word_size - offset) || (words == 0)) { || (words > eeprom->word_size - offset) || (words == 0)) {
...@@ -4174,7 +4221,9 @@ static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words, ...@@ -4174,7 +4221,9 @@ static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words,
e1000_standby_eeprom(hw); e1000_standby_eeprom(hw);
/* Some SPI eeproms use the 8th address bit embedded in the opcode */ /* Some SPI eeproms use the 8th address bit embedded in the
* opcode
*/
if ((eeprom->address_bits == 8) && (offset >= 128)) if ((eeprom->address_bits == 8) && (offset >= 128))
write_opcode |= EEPROM_A8_OPCODE_SPI; write_opcode |= EEPROM_A8_OPCODE_SPI;
...@@ -4186,16 +4235,19 @@ static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words, ...@@ -4186,16 +4235,19 @@ static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words,
/* Send the data */ /* Send the data */
/* Loop to allow for up to whole page write (32 bytes) of eeprom */ /* Loop to allow for up to whole page write (32 bytes) of
* eeprom
*/
while (widx < words) { while (widx < words) {
u16 word_out = data[widx]; u16 word_out = data[widx];
word_out = (word_out >> 8) | (word_out << 8); word_out = (word_out >> 8) | (word_out << 8);
e1000_shift_out_ee_bits(hw, word_out, 16); e1000_shift_out_ee_bits(hw, word_out, 16);
widx++; widx++;
/* Some larger eeprom sizes are capable of a 32-byte PAGE WRITE /* Some larger eeprom sizes are capable of a 32-byte
* operation, while the smaller eeproms are capable of an 8-byte * PAGE WRITE operation, while the smaller eeproms are
* PAGE WRITE operation. Break the inner loop to pass new address * capable of an 8-byte PAGE WRITE operation. Break the
* inner loop to pass new address
*/ */
if ((((offset + widx) * 2) % eeprom->page_size) == 0) { if ((((offset + widx) * 2) % eeprom->page_size) == 0) {
e1000_standby_eeprom(hw); e1000_standby_eeprom(hw);
...@@ -4249,14 +4301,15 @@ static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, ...@@ -4249,14 +4301,15 @@ static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
/* Send the data */ /* Send the data */
e1000_shift_out_ee_bits(hw, data[words_written], 16); e1000_shift_out_ee_bits(hw, data[words_written], 16);
/* Toggle the CS line. This in effect tells the EEPROM to execute /* Toggle the CS line. This in effect tells the EEPROM to
* the previous command. * execute the previous command.
*/ */
e1000_standby_eeprom(hw); e1000_standby_eeprom(hw);
/* Read DO repeatedly until it is high (equal to '1'). The EEPROM will /* Read DO repeatedly until it is high (equal to '1'). The
* signal that the command has been completed by raising the DO signal. * EEPROM will signal that the command has been completed by
* If DO does not go high in 10 milliseconds, then error out. * raising the DO signal. If DO does not go high in 10
* milliseconds, then error out.
*/ */
for (i = 0; i < 200; i++) { for (i = 0; i < 200; i++) {
eecd = er32(EECD); eecd = er32(EECD);
...@@ -4483,7 +4536,8 @@ static void e1000_clear_vfta(struct e1000_hw *hw) ...@@ -4483,7 +4536,8 @@ static void e1000_clear_vfta(struct e1000_hw *hw)
for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
/* If the offset we want to clear is the same offset of the /* If the offset we want to clear is the same offset of the
* manageability VLAN ID, then clear all bits except that of the * manageability VLAN ID, then clear all bits except that of the
* manageability unit */ * manageability unit
*/
vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0; vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value); E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value);
E1000_WRITE_FLUSH(); E1000_WRITE_FLUSH();
...@@ -4911,12 +4965,12 @@ void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats, ...@@ -4911,12 +4965,12 @@ void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats,
* counters overcount this packet as a CRC error and undercount * counters overcount this packet as a CRC error and undercount
* the packet as a good packet * the packet as a good packet
*/ */
/* This packet should not be counted as a CRC error. */ /* This packet should not be counted as a CRC error. */
stats->crcerrs--; stats->crcerrs--;
/* This packet does count as a Good Packet Received. */ /* This packet does count as a Good Packet Received. */
stats->gprc++; stats->gprc++;
/* Adjust the Good Octets received counters */ /* Adjust the Good Octets received counters */
carry_bit = 0x80000000 & stats->gorcl; carry_bit = 0x80000000 & stats->gorcl;
stats->gorcl += frame_len; stats->gorcl += frame_len;
/* If the high bit of Gorcl (the low 32 bits of the Good Octets /* If the high bit of Gorcl (the low 32 bits of the Good Octets
...@@ -5196,8 +5250,9 @@ static s32 e1000_check_polarity(struct e1000_hw *hw, ...@@ -5196,8 +5250,9 @@ static s32 e1000_check_polarity(struct e1000_hw *hw,
if (ret_val) if (ret_val)
return ret_val; return ret_val;
/* If speed is 1000 Mbps, must read the IGP01E1000_PHY_PCS_INIT_REG to /* If speed is 1000 Mbps, must read the
* find the polarity status */ * IGP01E1000_PHY_PCS_INIT_REG to find the polarity status
*/
if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
IGP01E1000_PSSR_SPEED_1000MBPS) { IGP01E1000_PSSR_SPEED_1000MBPS) {
...@@ -5213,8 +5268,9 @@ static s32 e1000_check_polarity(struct e1000_hw *hw, ...@@ -5213,8 +5268,9 @@ static s32 e1000_check_polarity(struct e1000_hw *hw,
e1000_rev_polarity_reversed : e1000_rev_polarity_reversed :
e1000_rev_polarity_normal; e1000_rev_polarity_normal;
} else { } else {
/* For 10 Mbps, read the polarity bit in the status register. (for /* For 10 Mbps, read the polarity bit in the status
* 100 Mbps this bit is always 0) */ * register. (for 100 Mbps this bit is always 0)
*/
*polarity = *polarity =
(phy_data & IGP01E1000_PSSR_POLARITY_REVERSED) ? (phy_data & IGP01E1000_PSSR_POLARITY_REVERSED) ?
e1000_rev_polarity_reversed : e1000_rev_polarity_reversed :
...@@ -5374,8 +5430,9 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) ...@@ -5374,8 +5430,9 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
} }
} else { } else {
if (hw->dsp_config_state == e1000_dsp_config_activated) { if (hw->dsp_config_state == e1000_dsp_config_activated) {
/* Save off the current value of register 0x2F5B to be restored at /* Save off the current value of register 0x2F5B to be
* the end of the routines. */ * restored at the end of the routines.
*/
ret_val = ret_val =
e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
...@@ -5391,7 +5448,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) ...@@ -5391,7 +5448,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
msleep(20); msleep(20);
ret_val = e1000_write_phy_reg(hw, 0x0000, ret_val = e1000_write_phy_reg(hw, 0x0000,
IGP01E1000_IEEE_FORCE_GIGA); IGP01E1000_IEEE_FORCE_GIGA);
if (ret_val) if (ret_val)
return ret_val; return ret_val;
for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
...@@ -5412,7 +5469,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) ...@@ -5412,7 +5469,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
} }
ret_val = e1000_write_phy_reg(hw, 0x0000, ret_val = e1000_write_phy_reg(hw, 0x0000,
IGP01E1000_IEEE_RESTART_AUTONEG); IGP01E1000_IEEE_RESTART_AUTONEG);
if (ret_val) if (ret_val)
return ret_val; return ret_val;
...@@ -5429,8 +5486,9 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) ...@@ -5429,8 +5486,9 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
} }
if (hw->ffe_config_state == e1000_ffe_config_active) { if (hw->ffe_config_state == e1000_ffe_config_active) {
/* Save off the current value of register 0x2F5B to be restored at /* Save off the current value of register 0x2F5B to be
* the end of the routines. */ * restored at the end of the routines.
*/
ret_val = ret_val =
e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
...@@ -5446,7 +5504,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) ...@@ -5446,7 +5504,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
msleep(20); msleep(20);
ret_val = e1000_write_phy_reg(hw, 0x0000, ret_val = e1000_write_phy_reg(hw, 0x0000,
IGP01E1000_IEEE_FORCE_GIGA); IGP01E1000_IEEE_FORCE_GIGA);
if (ret_val) if (ret_val)
return ret_val; return ret_val;
ret_val = ret_val =
...@@ -5456,7 +5514,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) ...@@ -5456,7 +5514,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
return ret_val; return ret_val;
ret_val = e1000_write_phy_reg(hw, 0x0000, ret_val = e1000_write_phy_reg(hw, 0x0000,
IGP01E1000_IEEE_RESTART_AUTONEG); IGP01E1000_IEEE_RESTART_AUTONEG);
if (ret_val) if (ret_val)
return ret_val; return ret_val;
...@@ -5542,8 +5600,9 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) ...@@ -5542,8 +5600,9 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
return E1000_SUCCESS; return E1000_SUCCESS;
/* During driver activity LPLU should not be used or it will attain link /* During driver activity LPLU should not be used or it will attain link
* from the lowest speeds starting from 10Mbps. The capability is used for * from the lowest speeds starting from 10Mbps. The capability is used
* Dx transitions and states */ * for Dx transitions and states
*/
if (hw->mac_type == e1000_82541_rev_2 if (hw->mac_type == e1000_82541_rev_2
|| hw->mac_type == e1000_82547_rev_2) { || hw->mac_type == e1000_82547_rev_2) {
ret_val = ret_val =
...@@ -5563,10 +5622,11 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) ...@@ -5563,10 +5622,11 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
return ret_val; return ret_val;
} }
/* LPLU and SmartSpeed are mutually exclusive. LPLU is used during /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
* Dx states where the power conservation is most important. During * during Dx states where the power conservation is most
* driver activity we should enable SmartSpeed, so performance is * important. During driver activity we should enable
* maintained. */ * SmartSpeed, so performance is maintained.
*/
if (hw->smart_speed == e1000_smart_speed_on) { if (hw->smart_speed == e1000_smart_speed_on) {
ret_val = ret_val =
e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
......
...@@ -239,7 +239,6 @@ struct net_device *e1000_get_hw_dev(struct e1000_hw *hw) ...@@ -239,7 +239,6 @@ struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
* e1000_init_module is the first routine called when the driver is * e1000_init_module is the first routine called when the driver is
* loaded. All it does is register with the PCI subsystem. * loaded. All it does is register with the PCI subsystem.
**/ **/
static int __init e1000_init_module(void) static int __init e1000_init_module(void)
{ {
int ret; int ret;
...@@ -266,7 +265,6 @@ module_init(e1000_init_module); ...@@ -266,7 +265,6 @@ module_init(e1000_init_module);
* e1000_exit_module is called just before the driver is removed * e1000_exit_module is called just before the driver is removed
* from memory. * from memory.
**/ **/
static void __exit e1000_exit_module(void) static void __exit e1000_exit_module(void)
{ {
pci_unregister_driver(&e1000_driver); pci_unregister_driver(&e1000_driver);
...@@ -301,7 +299,6 @@ static void e1000_free_irq(struct e1000_adapter *adapter) ...@@ -301,7 +299,6 @@ static void e1000_free_irq(struct e1000_adapter *adapter)
* e1000_irq_disable - Mask off interrupt generation on the NIC * e1000_irq_disable - Mask off interrupt generation on the NIC
* @adapter: board private structure * @adapter: board private structure
**/ **/
static void e1000_irq_disable(struct e1000_adapter *adapter) static void e1000_irq_disable(struct e1000_adapter *adapter)
{ {
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
...@@ -315,7 +312,6 @@ static void e1000_irq_disable(struct e1000_adapter *adapter) ...@@ -315,7 +312,6 @@ static void e1000_irq_disable(struct e1000_adapter *adapter)
* e1000_irq_enable - Enable default interrupt generation settings * e1000_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure * @adapter: board private structure
**/ **/
static void e1000_irq_enable(struct e1000_adapter *adapter) static void e1000_irq_enable(struct e1000_adapter *adapter)
{ {
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
...@@ -398,11 +394,12 @@ static void e1000_configure(struct e1000_adapter *adapter) ...@@ -398,11 +394,12 @@ static void e1000_configure(struct e1000_adapter *adapter)
e1000_configure_rx(adapter); e1000_configure_rx(adapter);
/* call E1000_DESC_UNUSED which always leaves /* call E1000_DESC_UNUSED which always leaves
* at least 1 descriptor unused to make sure * at least 1 descriptor unused to make sure
* next_to_use != next_to_clean */ * next_to_use != next_to_clean
*/
for (i = 0; i < adapter->num_rx_queues; i++) { for (i = 0; i < adapter->num_rx_queues; i++) {
struct e1000_rx_ring *ring = &adapter->rx_ring[i]; struct e1000_rx_ring *ring = &adapter->rx_ring[i];
adapter->alloc_rx_buf(adapter, ring, adapter->alloc_rx_buf(adapter, ring,
E1000_DESC_UNUSED(ring)); E1000_DESC_UNUSED(ring));
} }
} }
...@@ -433,9 +430,7 @@ int e1000_up(struct e1000_adapter *adapter) ...@@ -433,9 +430,7 @@ int e1000_up(struct e1000_adapter *adapter)
* The phy may be powered down to save power and turn off link when the * The phy may be powered down to save power and turn off link when the
* driver is unloaded and wake on lan is not enabled (among others) * driver is unloaded and wake on lan is not enabled (among others)
* *** this routine MUST be followed by a call to e1000_reset *** * *** this routine MUST be followed by a call to e1000_reset ***
*
**/ **/
void e1000_power_up_phy(struct e1000_adapter *adapter) void e1000_power_up_phy(struct e1000_adapter *adapter)
{ {
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
...@@ -444,7 +439,8 @@ void e1000_power_up_phy(struct e1000_adapter *adapter) ...@@ -444,7 +439,8 @@ void e1000_power_up_phy(struct e1000_adapter *adapter)
/* Just clear the power down bit to wake the phy back up */ /* Just clear the power down bit to wake the phy back up */
if (hw->media_type == e1000_media_type_copper) { if (hw->media_type == e1000_media_type_copper) {
/* according to the manual, the phy will retain its /* according to the manual, the phy will retain its
* settings across a power-down/up cycle */ * settings across a power-down/up cycle
*/
e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
mii_reg &= ~MII_CR_POWER_DOWN; mii_reg &= ~MII_CR_POWER_DOWN;
e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
...@@ -459,7 +455,8 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter) ...@@ -459,7 +455,8 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
* The PHY cannot be powered down if any of the following is true * * The PHY cannot be powered down if any of the following is true *
* (a) WoL is enabled * (a) WoL is enabled
* (b) AMT is active * (b) AMT is active
* (c) SoL/IDER session is active */ * (c) SoL/IDER session is active
*/
if (!adapter->wol && hw->mac_type >= e1000_82540 && if (!adapter->wol && hw->mac_type >= e1000_82540 &&
hw->media_type == e1000_media_type_copper) { hw->media_type == e1000_media_type_copper) {
u16 mii_reg = 0; u16 mii_reg = 0;
...@@ -529,8 +526,7 @@ void e1000_down(struct e1000_adapter *adapter) ...@@ -529,8 +526,7 @@ void e1000_down(struct e1000_adapter *adapter)
e1000_irq_disable(adapter); e1000_irq_disable(adapter);
/* /* Setting DOWN must be after irq_disable to prevent
* Setting DOWN must be after irq_disable to prevent
* a screaming interrupt. Setting DOWN also prevents * a screaming interrupt. Setting DOWN also prevents
* tasks from rescheduling. * tasks from rescheduling.
*/ */
...@@ -627,14 +623,14 @@ void e1000_reset(struct e1000_adapter *adapter) ...@@ -627,14 +623,14 @@ void e1000_reset(struct e1000_adapter *adapter)
* rounded up to the next 1KB and expressed in KB. Likewise, * rounded up to the next 1KB and expressed in KB. Likewise,
* the Rx FIFO should be large enough to accommodate at least * the Rx FIFO should be large enough to accommodate at least
* one full receive packet and is similarly rounded up and * one full receive packet and is similarly rounded up and
* expressed in KB. */ * expressed in KB.
*/
pba = er32(PBA); pba = er32(PBA);
/* upper 16 bits has Tx packet buffer allocation size in KB */ /* upper 16 bits has Tx packet buffer allocation size in KB */
tx_space = pba >> 16; tx_space = pba >> 16;
/* lower 16 bits has Rx packet buffer allocation size in KB */ /* lower 16 bits has Rx packet buffer allocation size in KB */
pba &= 0xffff; pba &= 0xffff;
/* /* the Tx fifo also stores 16 bytes of information about the Tx
* the tx fifo also stores 16 bytes of information about the tx
* but don't include ethernet FCS because hardware appends it * but don't include ethernet FCS because hardware appends it
*/ */
min_tx_space = (hw->max_frame_size + min_tx_space = (hw->max_frame_size +
...@@ -649,7 +645,8 @@ void e1000_reset(struct e1000_adapter *adapter) ...@@ -649,7 +645,8 @@ void e1000_reset(struct e1000_adapter *adapter)
/* If current Tx allocation is less than the min Tx FIFO size, /* If current Tx allocation is less than the min Tx FIFO size,
* and the min Tx FIFO size is less than the current Rx FIFO * and the min Tx FIFO size is less than the current Rx FIFO
* allocation, take space away from current Rx allocation */ * allocation, take space away from current Rx allocation
*/
if (tx_space < min_tx_space && if (tx_space < min_tx_space &&
((min_tx_space - tx_space) < pba)) { ((min_tx_space - tx_space) < pba)) {
pba = pba - (min_tx_space - tx_space); pba = pba - (min_tx_space - tx_space);
...@@ -663,8 +660,9 @@ void e1000_reset(struct e1000_adapter *adapter) ...@@ -663,8 +660,9 @@ void e1000_reset(struct e1000_adapter *adapter)
break; break;
} }
/* if short on rx space, rx wins and must trump tx /* if short on Rx space, Rx wins and must trump Tx
* adjustment or use Early Receive if available */ * adjustment or use Early Receive if available
*/
if (pba < min_rx_space) if (pba < min_rx_space)
pba = min_rx_space; pba = min_rx_space;
} }
...@@ -672,8 +670,7 @@ void e1000_reset(struct e1000_adapter *adapter) ...@@ -672,8 +670,7 @@ void e1000_reset(struct e1000_adapter *adapter)
ew32(PBA, pba); ew32(PBA, pba);
/* /* flow control settings:
* flow control settings:
* The high water mark must be low enough to fit one full frame * The high water mark must be low enough to fit one full frame
* (or the size used for early receive) above it in the Rx FIFO. * (or the size used for early receive) above it in the Rx FIFO.
* Set it to the lower of: * Set it to the lower of:
...@@ -707,7 +704,8 @@ void e1000_reset(struct e1000_adapter *adapter) ...@@ -707,7 +704,8 @@ void e1000_reset(struct e1000_adapter *adapter)
u32 ctrl = er32(CTRL); u32 ctrl = er32(CTRL);
/* clear phy power management bit if we are in gig only mode, /* clear phy power management bit if we are in gig only mode,
* which if enabled will attempt negotiation to 100Mb, which * which if enabled will attempt negotiation to 100Mb, which
* can cause a loss of link at power off or driver unload */ * can cause a loss of link at power off or driver unload
*/
ctrl &= ~E1000_CTRL_SWDPIN3; ctrl &= ~E1000_CTRL_SWDPIN3;
ew32(CTRL, ctrl); ew32(CTRL, ctrl);
} }
...@@ -808,9 +806,8 @@ static int e1000_is_need_ioport(struct pci_dev *pdev) ...@@ -808,9 +806,8 @@ static int e1000_is_need_ioport(struct pci_dev *pdev)
static netdev_features_t e1000_fix_features(struct net_device *netdev, static netdev_features_t e1000_fix_features(struct net_device *netdev,
netdev_features_t features) netdev_features_t features)
{ {
/* /* Since there is no support for separate Rx/Tx vlan accel
* Since there is no support for separate rx/tx vlan accel * enable/disable make sure Tx flag is always in same state as Rx.
* enable/disable make sure tx flag is always in same state as rx.
*/ */
if (features & NETIF_F_HW_VLAN_RX) if (features & NETIF_F_HW_VLAN_RX)
features |= NETIF_F_HW_VLAN_TX; features |= NETIF_F_HW_VLAN_TX;
...@@ -1012,16 +1009,14 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -1012,16 +1009,14 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err) if (err)
goto err_sw_init; goto err_sw_init;
/* /* there is a workaround being applied below that limits
* there is a workaround being applied below that limits
* 64-bit DMA addresses to 64-bit hardware. There are some * 64-bit DMA addresses to 64-bit hardware. There are some
* 32-bit adapters that Tx hang when given 64-bit DMA addresses * 32-bit adapters that Tx hang when given 64-bit DMA addresses
*/ */
pci_using_dac = 0; pci_using_dac = 0;
if ((hw->bus_type == e1000_bus_type_pcix) && if ((hw->bus_type == e1000_bus_type_pcix) &&
!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
/* /* according to DMA-API-HOWTO, coherent calls will always
* according to DMA-API-HOWTO, coherent calls will always
* succeed if the set call did * succeed if the set call did
*/ */
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
...@@ -1099,7 +1094,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -1099,7 +1094,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
} }
/* before reading the EEPROM, reset the controller to /* before reading the EEPROM, reset the controller to
* put the device in a known good starting state */ * put the device in a known good starting state
*/
e1000_reset_hw(hw); e1000_reset_hw(hw);
...@@ -1107,8 +1103,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -1107,8 +1103,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (e1000_validate_eeprom_checksum(hw) < 0) { if (e1000_validate_eeprom_checksum(hw) < 0) {
e_err(probe, "The EEPROM Checksum Is Not Valid\n"); e_err(probe, "The EEPROM Checksum Is Not Valid\n");
e1000_dump_eeprom(adapter); e1000_dump_eeprom(adapter);
/* /* set MAC address to all zeroes to invalidate and temporary
* set MAC address to all zeroes to invalidate and temporary
* disable this device for the user. This blocks regular * disable this device for the user. This blocks regular
* traffic while still permitting ethtool ioctls from reaching * traffic while still permitting ethtool ioctls from reaching
* the hardware as well as allowing the user to run the * the hardware as well as allowing the user to run the
...@@ -1169,7 +1164,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -1169,7 +1164,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* now that we have the eeprom settings, apply the special cases /* now that we have the eeprom settings, apply the special cases
* where the eeprom may be wrong or the board simply won't support * where the eeprom may be wrong or the board simply won't support
* wake on lan on a particular port */ * wake on lan on a particular port
*/
switch (pdev->device) { switch (pdev->device) {
case E1000_DEV_ID_82546GB_PCIE: case E1000_DEV_ID_82546GB_PCIE:
adapter->eeprom_wol = 0; adapter->eeprom_wol = 0;
...@@ -1177,7 +1173,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -1177,7 +1173,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case E1000_DEV_ID_82546EB_FIBER: case E1000_DEV_ID_82546EB_FIBER:
case E1000_DEV_ID_82546GB_FIBER: case E1000_DEV_ID_82546GB_FIBER:
/* Wake events only supported on port A for dual fiber /* Wake events only supported on port A for dual fiber
* regardless of eeprom setting */ * regardless of eeprom setting
*/
if (er32(STATUS) & E1000_STATUS_FUNC_1) if (er32(STATUS) & E1000_STATUS_FUNC_1)
adapter->eeprom_wol = 0; adapter->eeprom_wol = 0;
break; break;
...@@ -1270,7 +1267,6 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -1270,7 +1267,6 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* Hot-Plug event, or because the driver is going to be removed from * Hot-Plug event, or because the driver is going to be removed from
* memory. * memory.
**/ **/
static void e1000_remove(struct pci_dev *pdev) static void e1000_remove(struct pci_dev *pdev)
{ {
struct net_device *netdev = pci_get_drvdata(pdev); struct net_device *netdev = pci_get_drvdata(pdev);
...@@ -1306,7 +1302,6 @@ static void e1000_remove(struct pci_dev *pdev) ...@@ -1306,7 +1302,6 @@ static void e1000_remove(struct pci_dev *pdev)
* e1000_sw_init initializes the Adapter private data structure. * e1000_sw_init initializes the Adapter private data structure.
* e1000_init_hw_struct MUST be called before this function * e1000_init_hw_struct MUST be called before this function
**/ **/
static int e1000_sw_init(struct e1000_adapter *adapter) static int e1000_sw_init(struct e1000_adapter *adapter)
{ {
adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
...@@ -1337,7 +1332,6 @@ static int e1000_sw_init(struct e1000_adapter *adapter) ...@@ -1337,7 +1332,6 @@ static int e1000_sw_init(struct e1000_adapter *adapter)
* We allocate one ring per queue at run-time since we don't know the * We allocate one ring per queue at run-time since we don't know the
* number of queues at compile-time. * number of queues at compile-time.
**/ **/
static int e1000_alloc_queues(struct e1000_adapter *adapter) static int e1000_alloc_queues(struct e1000_adapter *adapter)
{ {
adapter->tx_ring = kcalloc(adapter->num_tx_queues, adapter->tx_ring = kcalloc(adapter->num_tx_queues,
...@@ -1367,7 +1361,6 @@ static int e1000_alloc_queues(struct e1000_adapter *adapter) ...@@ -1367,7 +1361,6 @@ static int e1000_alloc_queues(struct e1000_adapter *adapter)
* handler is registered with the OS, the watchdog task is started, * handler is registered with the OS, the watchdog task is started,
* and the stack is notified that the interface is ready. * and the stack is notified that the interface is ready.
**/ **/
static int e1000_open(struct net_device *netdev) static int e1000_open(struct net_device *netdev)
{ {
struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_adapter *adapter = netdev_priv(netdev);
...@@ -1401,7 +1394,8 @@ static int e1000_open(struct net_device *netdev) ...@@ -1401,7 +1394,8 @@ static int e1000_open(struct net_device *netdev)
/* before we allocate an interrupt, we must be ready to handle it. /* before we allocate an interrupt, we must be ready to handle it.
* Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
* as soon as we call pci_request_irq, so we have to setup our * as soon as we call pci_request_irq, so we have to setup our
* clean_rx handler before we do so. */ * clean_rx handler before we do so.
*/
e1000_configure(adapter); e1000_configure(adapter);
err = e1000_request_irq(adapter); err = e1000_request_irq(adapter);
...@@ -1444,7 +1438,6 @@ static int e1000_open(struct net_device *netdev) ...@@ -1444,7 +1438,6 @@ static int e1000_open(struct net_device *netdev)
* needs to be disabled. A global MAC reset is issued to stop the * needs to be disabled. A global MAC reset is issued to stop the
* hardware, and all transmit and receive resources are freed. * hardware, and all transmit and receive resources are freed.
**/ **/
static int e1000_close(struct net_device *netdev) static int e1000_close(struct net_device *netdev)
{ {
struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_adapter *adapter = netdev_priv(netdev);
...@@ -1459,10 +1452,11 @@ static int e1000_close(struct net_device *netdev) ...@@ -1459,10 +1452,11 @@ static int e1000_close(struct net_device *netdev)
e1000_free_all_rx_resources(adapter); e1000_free_all_rx_resources(adapter);
/* kill manageability vlan ID if supported, but not if a vlan with /* kill manageability vlan ID if supported, but not if a vlan with
* the same ID is registered on the host OS (let 8021q kill it) */ * the same ID is registered on the host OS (let 8021q kill it)
*/
if ((hw->mng_cookie.status & if ((hw->mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
!test_bit(adapter->mng_vlan_id, adapter->active_vlans)) { !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
} }
...@@ -1483,7 +1477,8 @@ static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start, ...@@ -1483,7 +1477,8 @@ static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
unsigned long end = begin + len; unsigned long end = begin + len;
/* First rev 82545 and 82546 need to not allow any memory /* First rev 82545 and 82546 need to not allow any memory
* write location to cross 64k boundary due to errata 23 */ * write location to cross 64k boundary due to errata 23
*/
if (hw->mac_type == e1000_82545 || if (hw->mac_type == e1000_82545 ||
hw->mac_type == e1000_ce4100 || hw->mac_type == e1000_ce4100 ||
hw->mac_type == e1000_82546) { hw->mac_type == e1000_82546) {
...@@ -1500,7 +1495,6 @@ static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start, ...@@ -1500,7 +1495,6 @@ static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
* *
* Return 0 on success, negative on failure * Return 0 on success, negative on failure
**/ **/
static int e1000_setup_tx_resources(struct e1000_adapter *adapter, static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
struct e1000_tx_ring *txdr) struct e1000_tx_ring *txdr)
{ {
...@@ -1574,7 +1568,6 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter, ...@@ -1574,7 +1568,6 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
* *
* Return 0 on success, negative on failure * Return 0 on success, negative on failure
**/ **/
int e1000_setup_all_tx_resources(struct e1000_adapter *adapter) int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
{ {
int i, err = 0; int i, err = 0;
...@@ -1599,7 +1592,6 @@ int e1000_setup_all_tx_resources(struct e1000_adapter *adapter) ...@@ -1599,7 +1592,6 @@ int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
* *
* Configure the Tx unit of the MAC after a reset. * Configure the Tx unit of the MAC after a reset.
**/ **/
static void e1000_configure_tx(struct e1000_adapter *adapter) static void e1000_configure_tx(struct e1000_adapter *adapter)
{ {
u64 tdba; u64 tdba;
...@@ -1620,8 +1612,10 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) ...@@ -1620,8 +1612,10 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
ew32(TDBAL, (tdba & 0x00000000ffffffffULL)); ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
ew32(TDT, 0); ew32(TDT, 0);
ew32(TDH, 0); ew32(TDH, 0);
adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH); adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT); E1000_TDH : E1000_82542_TDH);
adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
E1000_TDT : E1000_82542_TDT);
break; break;
} }
...@@ -1676,7 +1670,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) ...@@ -1676,7 +1670,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
adapter->txd_cmd |= E1000_TXD_CMD_RS; adapter->txd_cmd |= E1000_TXD_CMD_RS;
/* Cache if we're 82544 running in PCI-X because we'll /* Cache if we're 82544 running in PCI-X because we'll
* need this to apply a workaround later in the send path. */ * need this to apply a workaround later in the send path.
*/
if (hw->mac_type == e1000_82544 && if (hw->mac_type == e1000_82544 &&
hw->bus_type == e1000_bus_type_pcix) hw->bus_type == e1000_bus_type_pcix)
adapter->pcix_82544 = true; adapter->pcix_82544 = true;
...@@ -1692,7 +1687,6 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) ...@@ -1692,7 +1687,6 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
* *
* Returns 0 on success, negative on failure * Returns 0 on success, negative on failure
**/ **/
static int e1000_setup_rx_resources(struct e1000_adapter *adapter, static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
struct e1000_rx_ring *rxdr) struct e1000_rx_ring *rxdr)
{ {
...@@ -1771,7 +1765,6 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter, ...@@ -1771,7 +1765,6 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
* *
* Return 0 on success, negative on failure * Return 0 on success, negative on failure
**/ **/
int e1000_setup_all_rx_resources(struct e1000_adapter *adapter) int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
{ {
int i, err = 0; int i, err = 0;
...@@ -1840,7 +1833,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) ...@@ -1840,7 +1833,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
/* This is useful for sniffing bad packets. */ /* This is useful for sniffing bad packets. */
if (adapter->netdev->features & NETIF_F_RXALL) { if (adapter->netdev->features & NETIF_F_RXALL) {
/* UPE and MPE will be handled by normal PROMISC logic /* UPE and MPE will be handled by normal PROMISC logic
* in e1000e_set_rx_mode */ * in e1000e_set_rx_mode
*/
rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
E1000_RCTL_BAM | /* RX All Bcast Pkts */ E1000_RCTL_BAM | /* RX All Bcast Pkts */
E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
...@@ -1862,7 +1856,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) ...@@ -1862,7 +1856,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
* *
* Configure the Rx unit of the MAC after a reset. * Configure the Rx unit of the MAC after a reset.
**/ **/
static void e1000_configure_rx(struct e1000_adapter *adapter) static void e1000_configure_rx(struct e1000_adapter *adapter)
{ {
u64 rdba; u64 rdba;
...@@ -1895,7 +1888,8 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) ...@@ -1895,7 +1888,8 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
} }
/* Setup the HW Rx Head and Tail Descriptor Pointers and /* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring */ * the Base and Length of the Rx Descriptor Ring
*/
switch (adapter->num_rx_queues) { switch (adapter->num_rx_queues) {
case 1: case 1:
default: default:
...@@ -1905,8 +1899,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) ...@@ -1905,8 +1899,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
ew32(RDBAL, (rdba & 0x00000000ffffffffULL)); ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
ew32(RDT, 0); ew32(RDT, 0);
ew32(RDH, 0); ew32(RDH, 0);
adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH); adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT); E1000_RDH : E1000_82542_RDH);
adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
E1000_RDT : E1000_82542_RDT);
break; break;
} }
...@@ -1932,7 +1928,6 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) ...@@ -1932,7 +1928,6 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
* *
* Free all transmit software resources * Free all transmit software resources
**/ **/
static void e1000_free_tx_resources(struct e1000_adapter *adapter, static void e1000_free_tx_resources(struct e1000_adapter *adapter,
struct e1000_tx_ring *tx_ring) struct e1000_tx_ring *tx_ring)
{ {
...@@ -1955,7 +1950,6 @@ static void e1000_free_tx_resources(struct e1000_adapter *adapter, ...@@ -1955,7 +1950,6 @@ static void e1000_free_tx_resources(struct e1000_adapter *adapter,
* *
* Free all transmit software resources * Free all transmit software resources
**/ **/
void e1000_free_all_tx_resources(struct e1000_adapter *adapter) void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
{ {
int i; int i;
...@@ -1990,7 +1984,6 @@ static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, ...@@ -1990,7 +1984,6 @@ static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
* @adapter: board private structure * @adapter: board private structure
* @tx_ring: ring to be cleaned * @tx_ring: ring to be cleaned
**/ **/
static void e1000_clean_tx_ring(struct e1000_adapter *adapter, static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
struct e1000_tx_ring *tx_ring) struct e1000_tx_ring *tx_ring)
{ {
...@@ -2026,7 +2019,6 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter, ...@@ -2026,7 +2019,6 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
* e1000_clean_all_tx_rings - Free Tx Buffers for all queues * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
* @adapter: board private structure * @adapter: board private structure
**/ **/
static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter) static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
{ {
int i; int i;
...@@ -2042,7 +2034,6 @@ static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter) ...@@ -2042,7 +2034,6 @@ static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
* *
* Free all receive software resources * Free all receive software resources
**/ **/
static void e1000_free_rx_resources(struct e1000_adapter *adapter, static void e1000_free_rx_resources(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring) struct e1000_rx_ring *rx_ring)
{ {
...@@ -2065,7 +2056,6 @@ static void e1000_free_rx_resources(struct e1000_adapter *adapter, ...@@ -2065,7 +2056,6 @@ static void e1000_free_rx_resources(struct e1000_adapter *adapter,
* *
* Free all receive software resources * Free all receive software resources
**/ **/
void e1000_free_all_rx_resources(struct e1000_adapter *adapter) void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
{ {
int i; int i;
...@@ -2079,7 +2069,6 @@ void e1000_free_all_rx_resources(struct e1000_adapter *adapter) ...@@ -2079,7 +2069,6 @@ void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
* @adapter: board private structure * @adapter: board private structure
* @rx_ring: ring to free buffers from * @rx_ring: ring to free buffers from
**/ **/
static void e1000_clean_rx_ring(struct e1000_adapter *adapter, static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring) struct e1000_rx_ring *rx_ring)
{ {
...@@ -2138,7 +2127,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter, ...@@ -2138,7 +2127,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
* e1000_clean_all_rx_rings - Free Rx Buffers for all queues * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
* @adapter: board private structure * @adapter: board private structure
**/ **/
static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter) static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
{ {
int i; int i;
...@@ -2198,7 +2186,6 @@ static void e1000_leave_82542_rst(struct e1000_adapter *adapter) ...@@ -2198,7 +2186,6 @@ static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
* *
* Returns 0 on success, negative on failure * Returns 0 on success, negative on failure
**/ **/
static int e1000_set_mac(struct net_device *netdev, void *p) static int e1000_set_mac(struct net_device *netdev, void *p)
{ {
struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_adapter *adapter = netdev_priv(netdev);
...@@ -2233,7 +2220,6 @@ static int e1000_set_mac(struct net_device *netdev, void *p) ...@@ -2233,7 +2220,6 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
* responsible for configuring the hardware for proper unicast, multicast, * responsible for configuring the hardware for proper unicast, multicast,
* promiscuous mode, and all-multi behavior. * promiscuous mode, and all-multi behavior.
**/ **/
static void e1000_set_rx_mode(struct net_device *netdev) static void e1000_set_rx_mode(struct net_device *netdev)
{ {
struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_adapter *adapter = netdev_priv(netdev);
...@@ -2317,10 +2303,10 @@ static void e1000_set_rx_mode(struct net_device *netdev) ...@@ -2317,10 +2303,10 @@ static void e1000_set_rx_mode(struct net_device *netdev)
} }
/* write the hash table completely, write from bottom to avoid /* write the hash table completely, write from bottom to avoid
* both stupid write combining chipsets, and flushing each write */ * both stupid write combining chipsets, and flushing each write
*/
for (i = mta_reg_count - 1; i >= 0 ; i--) { for (i = mta_reg_count - 1; i >= 0 ; i--) {
/* /* If we are on an 82544 has an errata where writing odd
* If we are on an 82544 has an errata where writing odd
* offsets overwrites the previous even offset, but writing * offsets overwrites the previous even offset, but writing
* backwards over the range solves the issue by always * backwards over the range solves the issue by always
* writing the odd offset first * writing the odd offset first
...@@ -2458,8 +2444,8 @@ static void e1000_watchdog(struct work_struct *work) ...@@ -2458,8 +2444,8 @@ static void e1000_watchdog(struct work_struct *work)
bool txb2b = true; bool txb2b = true;
/* update snapshot of PHY registers on LSC */ /* update snapshot of PHY registers on LSC */
e1000_get_speed_and_duplex(hw, e1000_get_speed_and_duplex(hw,
&adapter->link_speed, &adapter->link_speed,
&adapter->link_duplex); &adapter->link_duplex);
ctrl = er32(CTRL); ctrl = er32(CTRL);
pr_info("%s NIC Link is Up %d Mbps %s, " pr_info("%s NIC Link is Up %d Mbps %s, "
...@@ -2533,7 +2519,8 @@ static void e1000_watchdog(struct work_struct *work) ...@@ -2533,7 +2519,8 @@ static void e1000_watchdog(struct work_struct *work)
/* We've lost link, so the controller stops DMA, /* We've lost link, so the controller stops DMA,
* but we've got queued Tx work that's never going * but we've got queued Tx work that's never going
* to get done, so reset controller to flush Tx. * to get done, so reset controller to flush Tx.
* (Do the reset outside of interrupt context). */ * (Do the reset outside of interrupt context).
*/
adapter->tx_timeout_count++; adapter->tx_timeout_count++;
schedule_work(&adapter->reset_task); schedule_work(&adapter->reset_task);
/* exit immediately since reset is imminent */ /* exit immediately since reset is imminent */
...@@ -2543,8 +2530,7 @@ static void e1000_watchdog(struct work_struct *work) ...@@ -2543,8 +2530,7 @@ static void e1000_watchdog(struct work_struct *work)
/* Simple mode for Interrupt Throttle Rate (ITR) */ /* Simple mode for Interrupt Throttle Rate (ITR) */
if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) { if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
/* /* Symmetric Tx/Rx gets a reduced ITR=2000;
* Symmetric Tx/Rx gets a reduced ITR=2000;
* Total asymmetrical Tx or Rx gets ITR=8000; * Total asymmetrical Tx or Rx gets ITR=8000;
* everyone else is between 2000-8000. * everyone else is between 2000-8000.
*/ */
...@@ -2659,18 +2645,16 @@ static void e1000_set_itr(struct e1000_adapter *adapter) ...@@ -2659,18 +2645,16 @@ static void e1000_set_itr(struct e1000_adapter *adapter)
goto set_itr_now; goto set_itr_now;
} }
adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
adapter->tx_itr, adapter->total_tx_packets,
adapter->total_tx_packets, adapter->total_tx_bytes);
adapter->total_tx_bytes);
/* conservative mode (itr 3) eliminates the lowest_latency setting */ /* conservative mode (itr 3) eliminates the lowest_latency setting */
if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
adapter->tx_itr = low_latency; adapter->tx_itr = low_latency;
adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
adapter->rx_itr, adapter->total_rx_packets,
adapter->total_rx_packets, adapter->total_rx_bytes);
adapter->total_rx_bytes);
/* conservative mode (itr 3) eliminates the lowest_latency setting */ /* conservative mode (itr 3) eliminates the lowest_latency setting */
if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
adapter->rx_itr = low_latency; adapter->rx_itr = low_latency;
...@@ -2696,10 +2680,11 @@ static void e1000_set_itr(struct e1000_adapter *adapter) ...@@ -2696,10 +2680,11 @@ static void e1000_set_itr(struct e1000_adapter *adapter)
if (new_itr != adapter->itr) { if (new_itr != adapter->itr) {
/* this attempts to bias the interrupt rate towards Bulk /* this attempts to bias the interrupt rate towards Bulk
* by adding intermediate steps when interrupt rate is * by adding intermediate steps when interrupt rate is
* increasing */ * increasing
*/
new_itr = new_itr > adapter->itr ? new_itr = new_itr > adapter->itr ?
min(adapter->itr + (new_itr >> 2), new_itr) : min(adapter->itr + (new_itr >> 2), new_itr) :
new_itr; new_itr;
adapter->itr = new_itr; adapter->itr = new_itr;
ew32(ITR, 1000000000 / (new_itr * 256)); ew32(ITR, 1000000000 / (new_itr * 256));
} }
...@@ -2861,7 +2846,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter, ...@@ -2861,7 +2846,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
/* Workaround for Controller erratum -- /* Workaround for Controller erratum --
* descriptor for non-tso packet in a linear SKB that follows a * descriptor for non-tso packet in a linear SKB that follows a
* tso gets written back prematurely before the data is fully * tso gets written back prematurely before the data is fully
* DMA'd to the controller */ * DMA'd to the controller
*/
if (!skb->data_len && tx_ring->last_tx_tso && if (!skb->data_len && tx_ring->last_tx_tso &&
!skb_is_gso(skb)) { !skb_is_gso(skb)) {
tx_ring->last_tx_tso = false; tx_ring->last_tx_tso = false;
...@@ -2869,7 +2855,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter, ...@@ -2869,7 +2855,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
} }
/* Workaround for premature desc write-backs /* Workaround for premature desc write-backs
* in TSO mode. Append 4-byte sentinel desc */ * in TSO mode. Append 4-byte sentinel desc
*/
if (unlikely(mss && !nr_frags && size == len && size > 8)) if (unlikely(mss && !nr_frags && size == len && size > 8))
size -= 4; size -= 4;
/* work-around for errata 10 and it applies /* work-around for errata 10 and it applies
...@@ -2882,7 +2869,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter, ...@@ -2882,7 +2869,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
size = 2015; size = 2015;
/* Workaround for potential 82544 hang in PCI-X. Avoid /* Workaround for potential 82544 hang in PCI-X. Avoid
* terminating buffers within evenly-aligned dwords. */ * terminating buffers within evenly-aligned dwords.
*/
if (unlikely(adapter->pcix_82544 && if (unlikely(adapter->pcix_82544 &&
!((unsigned long)(skb->data + offset + size - 1) & 4) && !((unsigned long)(skb->data + offset + size - 1) & 4) &&
size > 4)) size > 4))
...@@ -2894,7 +2882,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter, ...@@ -2894,7 +2882,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
buffer_info->mapped_as_page = false; buffer_info->mapped_as_page = false;
buffer_info->dma = dma_map_single(&pdev->dev, buffer_info->dma = dma_map_single(&pdev->dev,
skb->data + offset, skb->data + offset,
size, DMA_TO_DEVICE); size, DMA_TO_DEVICE);
if (dma_mapping_error(&pdev->dev, buffer_info->dma)) if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error; goto dma_error;
buffer_info->next_to_watch = i; buffer_info->next_to_watch = i;
...@@ -2925,12 +2913,15 @@ static int e1000_tx_map(struct e1000_adapter *adapter, ...@@ -2925,12 +2913,15 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
buffer_info = &tx_ring->buffer_info[i]; buffer_info = &tx_ring->buffer_info[i];
size = min(len, max_per_txd); size = min(len, max_per_txd);
/* Workaround for premature desc write-backs /* Workaround for premature desc write-backs
* in TSO mode. Append 4-byte sentinel desc */ * in TSO mode. Append 4-byte sentinel desc
if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8)) */
if (unlikely(mss && f == (nr_frags-1) &&
size == len && size > 8))
size -= 4; size -= 4;
/* Workaround for potential 82544 hang in PCI-X. /* Workaround for potential 82544 hang in PCI-X.
* Avoid terminating buffers within evenly-aligned * Avoid terminating buffers within evenly-aligned
* dwords. */ * dwords.
*/
bufend = (unsigned long) bufend = (unsigned long)
page_to_phys(skb_frag_page(frag)); page_to_phys(skb_frag_page(frag));
bufend += offset + size - 1; bufend += offset + size - 1;
...@@ -2994,7 +2985,7 @@ static void e1000_tx_queue(struct e1000_adapter *adapter, ...@@ -2994,7 +2985,7 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
if (likely(tx_flags & E1000_TX_FLAGS_TSO)) { if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
E1000_TXD_CMD_TSE; E1000_TXD_CMD_TSE;
txd_upper |= E1000_TXD_POPTS_TXSM << 8; txd_upper |= E1000_TXD_POPTS_TXSM << 8;
if (likely(tx_flags & E1000_TX_FLAGS_IPV4)) if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
...@@ -3035,13 +3026,15 @@ static void e1000_tx_queue(struct e1000_adapter *adapter, ...@@ -3035,13 +3026,15 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
/* Force memory writes to complete before letting h/w /* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only * know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs, * applicable for weak-ordered memory model archs,
* such as IA-64). */ * such as IA-64).
*/
wmb(); wmb();
tx_ring->next_to_use = i; tx_ring->next_to_use = i;
writel(i, hw->hw_addr + tx_ring->tdt); writel(i, hw->hw_addr + tx_ring->tdt);
/* we need this if more than one processor can write to our tail /* we need this if more than one processor can write to our tail
* at a time, it syncronizes IO on IA64/Altix systems */ * at a time, it synchronizes IO on IA64/Altix systems
*/
mmiowb(); mmiowb();
} }
...@@ -3090,11 +3083,13 @@ static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) ...@@ -3090,11 +3083,13 @@ static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
netif_stop_queue(netdev); netif_stop_queue(netdev);
/* Herbert's original patch had: /* Herbert's original patch had:
* smp_mb__after_netif_stop_queue(); * smp_mb__after_netif_stop_queue();
* but since that doesn't exist yet, just open code it. */ * but since that doesn't exist yet, just open code it.
*/
smp_mb(); smp_mb();
/* We need to check again in a case another CPU has just /* We need to check again in a case another CPU has just
* made room available. */ * made room available.
*/
if (likely(E1000_DESC_UNUSED(tx_ring) < size)) if (likely(E1000_DESC_UNUSED(tx_ring) < size))
return -EBUSY; return -EBUSY;
...@@ -3105,7 +3100,7 @@ static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) ...@@ -3105,7 +3100,7 @@ static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
} }
static int e1000_maybe_stop_tx(struct net_device *netdev, static int e1000_maybe_stop_tx(struct net_device *netdev,
struct e1000_tx_ring *tx_ring, int size) struct e1000_tx_ring *tx_ring, int size)
{ {
if (likely(E1000_DESC_UNUSED(tx_ring) >= size)) if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
return 0; return 0;
...@@ -3129,10 +3124,11 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, ...@@ -3129,10 +3124,11 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
int tso; int tso;
unsigned int f; unsigned int f;
/* This goes back to the question of how to logically map a tx queue /* This goes back to the question of how to logically map a Tx queue
* to a flow. Right now, performance is impacted slightly negatively * to a flow. Right now, performance is impacted slightly negatively
* if using multiple tx queues. If the stack breaks away from a * if using multiple Tx queues. If the stack breaks away from a
* single qdisc implementation, we can look at this again. */ * single qdisc implementation, we can look at this again.
*/
tx_ring = adapter->tx_ring; tx_ring = adapter->tx_ring;
if (unlikely(skb->len <= 0)) { if (unlikely(skb->len <= 0)) {
...@@ -3157,7 +3153,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, ...@@ -3157,7 +3153,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
* initiating the DMA for each buffer. The calc is: * initiating the DMA for each buffer. The calc is:
* 4 = ceil(buffer len/mss). To make sure we don't * 4 = ceil(buffer len/mss). To make sure we don't
* overrun the FIFO, adjust the max buffer len if mss * overrun the FIFO, adjust the max buffer len if mss
* drops. */ * drops.
*/
if (mss) { if (mss) {
u8 hdr_len; u8 hdr_len;
max_per_txd = min(mss << 2, max_per_txd); max_per_txd = min(mss << 2, max_per_txd);
...@@ -3173,8 +3170,10 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, ...@@ -3173,8 +3170,10 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
* this hardware's requirements * this hardware's requirements
* NOTE: this is a TSO only workaround * NOTE: this is a TSO only workaround
* if end byte alignment not correct move us * if end byte alignment not correct move us
* into the next dword */ * into the next dword
if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4) */
if ((unsigned long)(skb_tail_pointer(skb) - 1)
& 4)
break; break;
/* fall through */ /* fall through */
pull_size = min((unsigned int)4, skb->data_len); pull_size = min((unsigned int)4, skb->data_len);
...@@ -3222,7 +3221,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, ...@@ -3222,7 +3221,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
count += nr_frags; count += nr_frags;
/* need: count + 2 desc gap to keep tail from touching /* need: count + 2 desc gap to keep tail from touching
* head, otherwise try next time */ * head, otherwise try next time
*/
if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
...@@ -3261,7 +3261,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, ...@@ -3261,7 +3261,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
tx_flags |= E1000_TX_FLAGS_NO_FCS; tx_flags |= E1000_TX_FLAGS_NO_FCS;
count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd, count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
nr_frags, mss); nr_frags, mss);
if (count) { if (count) {
netdev_sent_queue(netdev, skb->len); netdev_sent_queue(netdev, skb->len);
...@@ -3363,9 +3363,7 @@ static void e1000_dump(struct e1000_adapter *adapter) ...@@ -3363,9 +3363,7 @@ static void e1000_dump(struct e1000_adapter *adapter)
/* Print Registers */ /* Print Registers */
e1000_regdump(adapter); e1000_regdump(adapter);
/* /* transmit dump */
* transmit dump
*/
pr_info("TX Desc ring0 dump\n"); pr_info("TX Desc ring0 dump\n");
/* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
...@@ -3426,9 +3424,7 @@ static void e1000_dump(struct e1000_adapter *adapter) ...@@ -3426,9 +3424,7 @@ static void e1000_dump(struct e1000_adapter *adapter)
} }
rx_ring_summary: rx_ring_summary:
/* /* receive dump */
* receive dump
*/
pr_info("\nRX Desc ring dump\n"); pr_info("\nRX Desc ring dump\n");
/* Legacy Receive Descriptor Format /* Legacy Receive Descriptor Format
...@@ -3493,7 +3489,6 @@ static void e1000_dump(struct e1000_adapter *adapter) ...@@ -3493,7 +3489,6 @@ static void e1000_dump(struct e1000_adapter *adapter)
* e1000_tx_timeout - Respond to a Tx Hang * e1000_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure * @netdev: network interface device structure
**/ **/
static void e1000_tx_timeout(struct net_device *netdev) static void e1000_tx_timeout(struct net_device *netdev)
{ {
struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_adapter *adapter = netdev_priv(netdev);
...@@ -3521,7 +3516,6 @@ static void e1000_reset_task(struct work_struct *work) ...@@ -3521,7 +3516,6 @@ static void e1000_reset_task(struct work_struct *work)
* Returns the address of the device statistics structure. * Returns the address of the device statistics structure.
* The statistics are actually updated from the watchdog. * The statistics are actually updated from the watchdog.
**/ **/
static struct net_device_stats *e1000_get_stats(struct net_device *netdev) static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
{ {
/* only return the current stats */ /* only return the current stats */
...@@ -3535,7 +3529,6 @@ static struct net_device_stats *e1000_get_stats(struct net_device *netdev) ...@@ -3535,7 +3529,6 @@ static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
* *
* Returns 0 on success, negative on failure * Returns 0 on success, negative on failure
**/ **/
static int e1000_change_mtu(struct net_device *netdev, int new_mtu) static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
{ {
struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_adapter *adapter = netdev_priv(netdev);
...@@ -3572,8 +3565,9 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) ...@@ -3572,8 +3565,9 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
* means we reserve 2 more, this pushes us to allocate from the next * means we reserve 2 more, this pushes us to allocate from the next
* larger slab size. * larger slab size.
* i.e. RXBUFFER_2048 --> size-4096 slab * i.e. RXBUFFER_2048 --> size-4096 slab
* however with the new *_jumbo_rx* routines, jumbo receives will use * however with the new *_jumbo_rx* routines, jumbo receives will use
* fragmented skbs */ * fragmented skbs
*/
if (max_frame <= E1000_RXBUFFER_2048) if (max_frame <= E1000_RXBUFFER_2048)
adapter->rx_buffer_len = E1000_RXBUFFER_2048; adapter->rx_buffer_len = E1000_RXBUFFER_2048;
...@@ -3608,7 +3602,6 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) ...@@ -3608,7 +3602,6 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
* e1000_update_stats - Update the board statistics counters * e1000_update_stats - Update the board statistics counters
* @adapter: board private structure * @adapter: board private structure
**/ **/
void e1000_update_stats(struct e1000_adapter *adapter) void e1000_update_stats(struct e1000_adapter *adapter)
{ {
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
...@@ -3619,8 +3612,7 @@ void e1000_update_stats(struct e1000_adapter *adapter) ...@@ -3619,8 +3612,7 @@ void e1000_update_stats(struct e1000_adapter *adapter)
#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
/* /* Prevent stats update while adapter is being reset, or if the pci
* Prevent stats update while adapter is being reset, or if the pci
* connection is down. * connection is down.
*/ */
if (adapter->link_speed == 0) if (adapter->link_speed == 0)
...@@ -3710,7 +3702,8 @@ void e1000_update_stats(struct e1000_adapter *adapter) ...@@ -3710,7 +3702,8 @@ void e1000_update_stats(struct e1000_adapter *adapter)
/* Rx Errors */ /* Rx Errors */
/* RLEC on some newer hardware can be incorrect so build /* RLEC on some newer hardware can be incorrect so build
* our own version based on RUC and ROC */ * our own version based on RUC and ROC
*/
netdev->stats.rx_errors = adapter->stats.rxerrc + netdev->stats.rx_errors = adapter->stats.rxerrc +
adapter->stats.crcerrs + adapter->stats.algnerrc + adapter->stats.crcerrs + adapter->stats.algnerrc +
adapter->stats.ruc + adapter->stats.roc + adapter->stats.ruc + adapter->stats.roc +
...@@ -3764,7 +3757,6 @@ void e1000_update_stats(struct e1000_adapter *adapter) ...@@ -3764,7 +3757,6 @@ void e1000_update_stats(struct e1000_adapter *adapter)
* @irq: interrupt number * @irq: interrupt number
* @data: pointer to a network interface device structure * @data: pointer to a network interface device structure
**/ **/
static irqreturn_t e1000_intr(int irq, void *data) static irqreturn_t e1000_intr(int irq, void *data)
{ {
struct net_device *netdev = data; struct net_device *netdev = data;
...@@ -3775,8 +3767,7 @@ static irqreturn_t e1000_intr(int irq, void *data) ...@@ -3775,8 +3767,7 @@ static irqreturn_t e1000_intr(int irq, void *data)
if (unlikely((!icr))) if (unlikely((!icr)))
return IRQ_NONE; /* Not our interrupt */ return IRQ_NONE; /* Not our interrupt */
/* /* we might have caused the interrupt, but the above
* we might have caused the interrupt, but the above
* read cleared it, and just in case the driver is * read cleared it, and just in case the driver is
* down there is nothing to do so return handled * down there is nothing to do so return handled
*/ */
...@@ -3802,7 +3793,8 @@ static irqreturn_t e1000_intr(int irq, void *data) ...@@ -3802,7 +3793,8 @@ static irqreturn_t e1000_intr(int irq, void *data)
__napi_schedule(&adapter->napi); __napi_schedule(&adapter->napi);
} else { } else {
/* this really should not happen! if it does it is basically a /* this really should not happen! if it does it is basically a
* bug, but not a hard error, so enable ints and continue */ * bug, but not a hard error, so enable ints and continue
*/
if (!test_bit(__E1000_DOWN, &adapter->flags)) if (!test_bit(__E1000_DOWN, &adapter->flags))
e1000_irq_enable(adapter); e1000_irq_enable(adapter);
} }
...@@ -3816,7 +3808,8 @@ static irqreturn_t e1000_intr(int irq, void *data) ...@@ -3816,7 +3808,8 @@ static irqreturn_t e1000_intr(int irq, void *data)
**/ **/
static int e1000_clean(struct napi_struct *napi, int budget) static int e1000_clean(struct napi_struct *napi, int budget)
{ {
struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
napi);
int tx_clean_complete = 0, work_done = 0; int tx_clean_complete = 0, work_done = 0;
tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]); tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
...@@ -3907,11 +3900,12 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, ...@@ -3907,11 +3900,12 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
if (adapter->detect_tx_hung) { if (adapter->detect_tx_hung) {
/* Detect a transmit hang in hardware, this serializes the /* Detect a transmit hang in hardware, this serializes the
* check with the clearing of time_stamp and movement of i */ * check with the clearing of time_stamp and movement of i
*/
adapter->detect_tx_hung = false; adapter->detect_tx_hung = false;
if (tx_ring->buffer_info[eop].time_stamp && if (tx_ring->buffer_info[eop].time_stamp &&
time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
(adapter->tx_timeout_factor * HZ)) && (adapter->tx_timeout_factor * HZ)) &&
!(er32(STATUS) & E1000_STATUS_TXOFF)) { !(er32(STATUS) & E1000_STATUS_TXOFF)) {
/* detected Tx unit hang */ /* detected Tx unit hang */
...@@ -3954,7 +3948,6 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, ...@@ -3954,7 +3948,6 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
* @csum: receive descriptor csum field * @csum: receive descriptor csum field
* @sk_buff: socket buffer with received data * @sk_buff: socket buffer with received data
**/ **/
static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
u32 csum, struct sk_buff *skb) u32 csum, struct sk_buff *skb)
{ {
...@@ -3990,7 +3983,7 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, ...@@ -3990,7 +3983,7 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
* e1000_consume_page - helper function * e1000_consume_page - helper function
**/ **/
static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
u16 length) u16 length)
{ {
bi->page = NULL; bi->page = NULL;
skb->len += length; skb->len += length;
...@@ -4086,11 +4079,11 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, ...@@ -4086,11 +4079,11 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
if (TBI_ACCEPT(hw, status, rx_desc->errors, length, if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
last_byte)) { last_byte)) {
spin_lock_irqsave(&adapter->stats_lock, spin_lock_irqsave(&adapter->stats_lock,
irq_flags); irq_flags);
e1000_tbi_adjust_stats(hw, &adapter->stats, e1000_tbi_adjust_stats(hw, &adapter->stats,
length, mapped); length, mapped);
spin_unlock_irqrestore(&adapter->stats_lock, spin_unlock_irqrestore(&adapter->stats_lock,
irq_flags); irq_flags);
length--; length--;
} else { } else {
if (netdev->features & NETIF_F_RXALL) if (netdev->features & NETIF_F_RXALL)
...@@ -4098,7 +4091,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, ...@@ -4098,7 +4091,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
/* recycle both page and skb */ /* recycle both page and skb */
buffer_info->skb = skb; buffer_info->skb = skb;
/* an error means any chain goes out the window /* an error means any chain goes out the window
* too */ * too
*/
if (rx_ring->rx_skb_top) if (rx_ring->rx_skb_top)
dev_kfree_skb(rx_ring->rx_skb_top); dev_kfree_skb(rx_ring->rx_skb_top);
rx_ring->rx_skb_top = NULL; rx_ring->rx_skb_top = NULL;
...@@ -4114,7 +4108,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, ...@@ -4114,7 +4108,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
/* this is the beginning of a chain */ /* this is the beginning of a chain */
rxtop = skb; rxtop = skb;
skb_fill_page_desc(rxtop, 0, buffer_info->page, skb_fill_page_desc(rxtop, 0, buffer_info->page,
0, length); 0, length);
} else { } else {
/* this is the middle of a chain */ /* this is the middle of a chain */
skb_fill_page_desc(rxtop, skb_fill_page_desc(rxtop,
...@@ -4132,38 +4126,42 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, ...@@ -4132,38 +4126,42 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
skb_shinfo(rxtop)->nr_frags, skb_shinfo(rxtop)->nr_frags,
buffer_info->page, 0, length); buffer_info->page, 0, length);
/* re-use the current skb, we only consumed the /* re-use the current skb, we only consumed the
* page */ * page
*/
buffer_info->skb = skb; buffer_info->skb = skb;
skb = rxtop; skb = rxtop;
rxtop = NULL; rxtop = NULL;
e1000_consume_page(buffer_info, skb, length); e1000_consume_page(buffer_info, skb, length);
} else { } else {
/* no chain, got EOP, this buf is the packet /* no chain, got EOP, this buf is the packet
* copybreak to save the put_page/alloc_page */ * copybreak to save the put_page/alloc_page
*/
if (length <= copybreak && if (length <= copybreak &&
skb_tailroom(skb) >= length) { skb_tailroom(skb) >= length) {
u8 *vaddr; u8 *vaddr;
vaddr = kmap_atomic(buffer_info->page); vaddr = kmap_atomic(buffer_info->page);
memcpy(skb_tail_pointer(skb), vaddr, length); memcpy(skb_tail_pointer(skb), vaddr,
length);
kunmap_atomic(vaddr); kunmap_atomic(vaddr);
/* re-use the page, so don't erase /* re-use the page, so don't erase
* buffer_info->page */ * buffer_info->page
*/
skb_put(skb, length); skb_put(skb, length);
} else { } else {
skb_fill_page_desc(skb, 0, skb_fill_page_desc(skb, 0,
buffer_info->page, 0, buffer_info->page, 0,
length); length);
e1000_consume_page(buffer_info, skb, e1000_consume_page(buffer_info, skb,
length); length);
} }
} }
} }
/* Receive Checksum Offload XXX recompute due to CRC strip? */ /* Receive Checksum Offload XXX recompute due to CRC strip? */
e1000_rx_checksum(adapter, e1000_rx_checksum(adapter,
(u32)(status) | (u32)(status) |
((u32)(rx_desc->errors) << 24), ((u32)(rx_desc->errors) << 24),
le16_to_cpu(rx_desc->csum), skb); le16_to_cpu(rx_desc->csum), skb);
total_rx_bytes += (skb->len - 4); /* don't count FCS */ total_rx_bytes += (skb->len - 4); /* don't count FCS */
if (likely(!(netdev->features & NETIF_F_RXFCS))) if (likely(!(netdev->features & NETIF_F_RXFCS)))
...@@ -4205,8 +4203,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, ...@@ -4205,8 +4203,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
return cleaned; return cleaned;
} }
/* /* this should improve performance for small packets with large amounts
* this should improve performance for small packets with large amounts
* of reassembly being done in the stack * of reassembly being done in the stack
*/ */
static void e1000_check_copybreak(struct net_device *netdev, static void e1000_check_copybreak(struct net_device *netdev,
...@@ -4310,9 +4307,9 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, ...@@ -4310,9 +4307,9 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
last_byte)) { last_byte)) {
spin_lock_irqsave(&adapter->stats_lock, flags); spin_lock_irqsave(&adapter->stats_lock, flags);
e1000_tbi_adjust_stats(hw, &adapter->stats, e1000_tbi_adjust_stats(hw, &adapter->stats,
length, skb->data); length, skb->data);
spin_unlock_irqrestore(&adapter->stats_lock, spin_unlock_irqrestore(&adapter->stats_lock,
flags); flags);
length--; length--;
} else { } else {
if (netdev->features & NETIF_F_RXALL) if (netdev->features & NETIF_F_RXALL)
...@@ -4377,10 +4374,9 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, ...@@ -4377,10 +4374,9 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
* @rx_ring: pointer to receive ring structure * @rx_ring: pointer to receive ring structure
* @cleaned_count: number of buffers to allocate this pass * @cleaned_count: number of buffers to allocate this pass
**/ **/
static void static void
e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring, int cleaned_count) struct e1000_rx_ring *rx_ring, int cleaned_count)
{ {
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev; struct pci_dev *pdev = adapter->pdev;
...@@ -4421,7 +4417,7 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, ...@@ -4421,7 +4417,7 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
if (!buffer_info->dma) { if (!buffer_info->dma) {
buffer_info->dma = dma_map_page(&pdev->dev, buffer_info->dma = dma_map_page(&pdev->dev,
buffer_info->page, 0, buffer_info->page, 0,
buffer_info->length, buffer_info->length,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
...@@ -4451,7 +4447,8 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, ...@@ -4451,7 +4447,8 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
/* Force memory writes to complete before letting h/w /* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only * know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs, * applicable for weak-ordered memory model archs,
* such as IA-64). */ * such as IA-64).
*/
wmb(); wmb();
writel(i, adapter->hw.hw_addr + rx_ring->rdt); writel(i, adapter->hw.hw_addr + rx_ring->rdt);
} }
...@@ -4461,7 +4458,6 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, ...@@ -4461,7 +4458,6 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
* e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
* @adapter: address of board private structure * @adapter: address of board private structure
**/ **/
static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring, struct e1000_rx_ring *rx_ring,
int cleaned_count) int cleaned_count)
...@@ -4532,8 +4528,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, ...@@ -4532,8 +4528,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
break; /* while !buffer_info->skb */ break; /* while !buffer_info->skb */
} }
/* /* XXX if it was allocated cleanly it will never map to a
* XXX if it was allocated cleanly it will never map to a
* boundary crossing * boundary crossing
*/ */
...@@ -4571,7 +4566,8 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, ...@@ -4571,7 +4566,8 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
/* Force memory writes to complete before letting h/w /* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only * know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs, * applicable for weak-ordered memory model archs,
* such as IA-64). */ * such as IA-64).
*/
wmb(); wmb();
writel(i, hw->hw_addr + rx_ring->rdt); writel(i, hw->hw_addr + rx_ring->rdt);
} }
...@@ -4581,7 +4577,6 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, ...@@ -4581,7 +4577,6 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
* e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers. * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
* @adapter: * @adapter:
**/ **/
static void e1000_smartspeed(struct e1000_adapter *adapter) static void e1000_smartspeed(struct e1000_adapter *adapter)
{ {
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
...@@ -4594,7 +4589,8 @@ static void e1000_smartspeed(struct e1000_adapter *adapter) ...@@ -4594,7 +4589,8 @@ static void e1000_smartspeed(struct e1000_adapter *adapter)
if (adapter->smartspeed == 0) { if (adapter->smartspeed == 0) {
/* If Master/Slave config fault is asserted twice, /* If Master/Slave config fault is asserted twice,
* we assume back-to-back */ * we assume back-to-back
*/
e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
...@@ -4607,7 +4603,7 @@ static void e1000_smartspeed(struct e1000_adapter *adapter) ...@@ -4607,7 +4603,7 @@ static void e1000_smartspeed(struct e1000_adapter *adapter)
adapter->smartspeed++; adapter->smartspeed++;
if (!e1000_phy_setup_autoneg(hw) && if (!e1000_phy_setup_autoneg(hw) &&
!e1000_read_phy_reg(hw, PHY_CTRL, !e1000_read_phy_reg(hw, PHY_CTRL,
&phy_ctrl)) { &phy_ctrl)) {
phy_ctrl |= (MII_CR_AUTO_NEG_EN | phy_ctrl |= (MII_CR_AUTO_NEG_EN |
MII_CR_RESTART_AUTO_NEG); MII_CR_RESTART_AUTO_NEG);
e1000_write_phy_reg(hw, PHY_CTRL, e1000_write_phy_reg(hw, PHY_CTRL,
...@@ -4638,7 +4634,6 @@ static void e1000_smartspeed(struct e1000_adapter *adapter) ...@@ -4638,7 +4634,6 @@ static void e1000_smartspeed(struct e1000_adapter *adapter)
* @ifreq: * @ifreq:
* @cmd: * @cmd:
**/ **/
static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{ {
switch (cmd) { switch (cmd) {
...@@ -4657,7 +4652,6 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) ...@@ -4657,7 +4652,6 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
* @ifreq: * @ifreq:
* @cmd: * @cmd:
**/ **/
static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
int cmd) int cmd)
{ {
...@@ -4919,7 +4913,8 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) ...@@ -4919,7 +4913,8 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
hw->autoneg = 0; hw->autoneg = 0;
/* Make sure dplx is at most 1 bit and lsb of speed is not set /* Make sure dplx is at most 1 bit and lsb of speed is not set
* for the switch() below to work */ * for the switch() below to work
*/
if ((spd & 1) || (dplx & ~1)) if ((spd & 1) || (dplx & ~1))
goto err_inval; goto err_inval;
...@@ -5122,8 +5117,7 @@ static void e1000_shutdown(struct pci_dev *pdev) ...@@ -5122,8 +5117,7 @@ static void e1000_shutdown(struct pci_dev *pdev)
} }
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
/* /* Polling 'interrupt' - used by things like netconsole to send skbs
* Polling 'interrupt' - used by things like netconsole to send skbs
* without having to re-enable interrupts. It's not called while * without having to re-enable interrupts. It's not called while
* the interrupt routine is executing. * the interrupt routine is executing.
*/ */
......
...@@ -267,7 +267,6 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter); ...@@ -267,7 +267,6 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter);
* value exists, a default value is used. The final value is stored * value exists, a default value is used. The final value is stored
* in a variable in the adapter structure. * in a variable in the adapter structure.
**/ **/
void e1000_check_options(struct e1000_adapter *adapter) void e1000_check_options(struct e1000_adapter *adapter)
{ {
struct e1000_option opt; struct e1000_option opt;
...@@ -319,7 +318,8 @@ void e1000_check_options(struct e1000_adapter *adapter) ...@@ -319,7 +318,8 @@ void e1000_check_options(struct e1000_adapter *adapter)
.def = E1000_DEFAULT_RXD, .def = E1000_DEFAULT_RXD,
.arg = { .r = { .arg = { .r = {
.min = E1000_MIN_RXD, .min = E1000_MIN_RXD,
.max = mac_type < e1000_82544 ? E1000_MAX_RXD : E1000_MAX_82544_RXD .max = mac_type < e1000_82544 ? E1000_MAX_RXD :
E1000_MAX_82544_RXD
}} }}
}; };
...@@ -408,7 +408,7 @@ void e1000_check_options(struct e1000_adapter *adapter) ...@@ -408,7 +408,7 @@ void e1000_check_options(struct e1000_adapter *adapter)
if (num_TxAbsIntDelay > bd) { if (num_TxAbsIntDelay > bd) {
adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
e1000_validate_option(&adapter->tx_abs_int_delay, &opt, e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
adapter); adapter);
} else { } else {
adapter->tx_abs_int_delay = opt.def; adapter->tx_abs_int_delay = opt.def;
} }
...@@ -426,7 +426,7 @@ void e1000_check_options(struct e1000_adapter *adapter) ...@@ -426,7 +426,7 @@ void e1000_check_options(struct e1000_adapter *adapter)
if (num_RxIntDelay > bd) { if (num_RxIntDelay > bd) {
adapter->rx_int_delay = RxIntDelay[bd]; adapter->rx_int_delay = RxIntDelay[bd];
e1000_validate_option(&adapter->rx_int_delay, &opt, e1000_validate_option(&adapter->rx_int_delay, &opt,
adapter); adapter);
} else { } else {
adapter->rx_int_delay = opt.def; adapter->rx_int_delay = opt.def;
} }
...@@ -444,7 +444,7 @@ void e1000_check_options(struct e1000_adapter *adapter) ...@@ -444,7 +444,7 @@ void e1000_check_options(struct e1000_adapter *adapter)
if (num_RxAbsIntDelay > bd) { if (num_RxAbsIntDelay > bd) {
adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
e1000_validate_option(&adapter->rx_abs_int_delay, &opt, e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
adapter); adapter);
} else { } else {
adapter->rx_abs_int_delay = opt.def; adapter->rx_abs_int_delay = opt.def;
} }
...@@ -479,16 +479,17 @@ void e1000_check_options(struct e1000_adapter *adapter) ...@@ -479,16 +479,17 @@ void e1000_check_options(struct e1000_adapter *adapter)
break; break;
case 4: case 4:
e_dev_info("%s set to simplified " e_dev_info("%s set to simplified "
"(2000-8000) ints mode\n", opt.name); "(2000-8000) ints mode\n", opt.name);
adapter->itr_setting = adapter->itr; adapter->itr_setting = adapter->itr;
break; break;
default: default:
e1000_validate_option(&adapter->itr, &opt, e1000_validate_option(&adapter->itr, &opt,
adapter); adapter);
/* save the setting, because the dynamic bits /* save the setting, because the dynamic bits
* change itr. * change itr.
* clear the lower two bits because they are * clear the lower two bits because they are
* used as control */ * used as control
*/
adapter->itr_setting = adapter->itr & ~3; adapter->itr_setting = adapter->itr & ~3;
break; break;
} }
...@@ -533,7 +534,6 @@ void e1000_check_options(struct e1000_adapter *adapter) ...@@ -533,7 +534,6 @@ void e1000_check_options(struct e1000_adapter *adapter)
* *
* Handles speed and duplex options on fiber adapters * Handles speed and duplex options on fiber adapters
**/ **/
static void e1000_check_fiber_options(struct e1000_adapter *adapter) static void e1000_check_fiber_options(struct e1000_adapter *adapter)
{ {
int bd = adapter->bd_number; int bd = adapter->bd_number;
...@@ -559,7 +559,6 @@ static void e1000_check_fiber_options(struct e1000_adapter *adapter) ...@@ -559,7 +559,6 @@ static void e1000_check_fiber_options(struct e1000_adapter *adapter)
* *
* Handles speed and duplex options on copper adapters * Handles speed and duplex options on copper adapters
**/ **/
static void e1000_check_copper_options(struct e1000_adapter *adapter) static void e1000_check_copper_options(struct e1000_adapter *adapter)
{ {
struct e1000_option opt; struct e1000_option opt;
...@@ -681,22 +680,22 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter) ...@@ -681,22 +680,22 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter)
e_dev_info("Using Autonegotiation at Half Duplex only\n"); e_dev_info("Using Autonegotiation at Half Duplex only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1; adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
ADVERTISE_100_HALF; ADVERTISE_100_HALF;
break; break;
case FULL_DUPLEX: case FULL_DUPLEX:
e_dev_info("Full Duplex specified without Speed\n"); e_dev_info("Full Duplex specified without Speed\n");
e_dev_info("Using Autonegotiation at Full Duplex only\n"); e_dev_info("Using Autonegotiation at Full Duplex only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1; adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_FULL | adapter->hw.autoneg_advertised = ADVERTISE_10_FULL |
ADVERTISE_100_FULL | ADVERTISE_100_FULL |
ADVERTISE_1000_FULL; ADVERTISE_1000_FULL;
break; break;
case SPEED_10: case SPEED_10:
e_dev_info("10 Mbps Speed specified without Duplex\n"); e_dev_info("10 Mbps Speed specified without Duplex\n");
e_dev_info("Using Autonegotiation at 10 Mbps only\n"); e_dev_info("Using Autonegotiation at 10 Mbps only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1; adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
ADVERTISE_10_FULL; ADVERTISE_10_FULL;
break; break;
case SPEED_10 + HALF_DUPLEX: case SPEED_10 + HALF_DUPLEX:
e_dev_info("Forcing to 10 Mbps Half Duplex\n"); e_dev_info("Forcing to 10 Mbps Half Duplex\n");
...@@ -715,7 +714,7 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter) ...@@ -715,7 +714,7 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter)
e_dev_info("Using Autonegotiation at 100 Mbps only\n"); e_dev_info("Using Autonegotiation at 100 Mbps only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1; adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_100_HALF | adapter->hw.autoneg_advertised = ADVERTISE_100_HALF |
ADVERTISE_100_FULL; ADVERTISE_100_FULL;
break; break;
case SPEED_100 + HALF_DUPLEX: case SPEED_100 + HALF_DUPLEX:
e_dev_info("Forcing to 100 Mbps Half Duplex\n"); e_dev_info("Forcing to 100 Mbps Half Duplex\n");
......
...@@ -4830,6 +4830,13 @@ static void e1000_watchdog_task(struct work_struct *work) ...@@ -4830,6 +4830,13 @@ static void e1000_watchdog_task(struct work_struct *work)
&adapter->link_speed, &adapter->link_speed,
&adapter->link_duplex); &adapter->link_duplex);
e1000_print_link_info(adapter); e1000_print_link_info(adapter);
/* check if SmartSpeed worked */
e1000e_check_downshift(hw);
if (phy->speed_downgraded)
netdev_warn(netdev,
"Link Speed was downgraded by SmartSpeed\n");
/* On supported PHYs, check for duplex mismatch only /* On supported PHYs, check for duplex mismatch only
* if link has autonegotiated at 10/100 half * if link has autonegotiated at 10/100 half
*/ */
......
...@@ -1891,7 +1891,7 @@ static int igb_link_test(struct igb_adapter *adapter, u64 *data) ...@@ -1891,7 +1891,7 @@ static int igb_link_test(struct igb_adapter *adapter, u64 *data)
} else { } else {
hw->mac.ops.check_for_link(&adapter->hw); hw->mac.ops.check_for_link(&adapter->hw);
if (hw->mac.autoneg) if (hw->mac.autoneg)
msleep(4000); msleep(5000);
if (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) if (!(rd32(E1000_STATUS) & E1000_STATUS_LU))
*data = 1; *data = 1;
......
...@@ -127,8 +127,8 @@ struct igbvf_buffer { ...@@ -127,8 +127,8 @@ struct igbvf_buffer {
/* Tx */ /* Tx */
struct { struct {
unsigned long time_stamp; unsigned long time_stamp;
union e1000_adv_tx_desc *next_to_watch;
u16 length; u16 length;
u16 next_to_watch;
u16 mapped_as_page; u16 mapped_as_page;
}; };
/* Rx */ /* Rx */
......
...@@ -797,20 +797,31 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) ...@@ -797,20 +797,31 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
struct sk_buff *skb; struct sk_buff *skb;
union e1000_adv_tx_desc *tx_desc, *eop_desc; union e1000_adv_tx_desc *tx_desc, *eop_desc;
unsigned int total_bytes = 0, total_packets = 0; unsigned int total_bytes = 0, total_packets = 0;
unsigned int i, eop, count = 0; unsigned int i, count = 0;
bool cleaned = false; bool cleaned = false;
i = tx_ring->next_to_clean; i = tx_ring->next_to_clean;
eop = tx_ring->buffer_info[i].next_to_watch; buffer_info = &tx_ring->buffer_info[i];
eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop); eop_desc = buffer_info->next_to_watch;
do {
/* if next_to_watch is not set then there is no work pending */
if (!eop_desc)
break;
/* prevent any other reads prior to eop_desc */
read_barrier_depends();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
break;
/* clear next_to_watch to prevent false hangs */
buffer_info->next_to_watch = NULL;
while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
(count < tx_ring->count)) {
rmb(); /* read buffer_info after eop_desc status */
for (cleaned = false; !cleaned; count++) { for (cleaned = false; !cleaned; count++) {
tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
buffer_info = &tx_ring->buffer_info[i]; cleaned = (tx_desc == eop_desc);
cleaned = (i == eop);
skb = buffer_info->skb; skb = buffer_info->skb;
if (skb) { if (skb) {
...@@ -831,10 +842,12 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) ...@@ -831,10 +842,12 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
i++; i++;
if (i == tx_ring->count) if (i == tx_ring->count)
i = 0; i = 0;
buffer_info = &tx_ring->buffer_info[i];
} }
eop = tx_ring->buffer_info[i].next_to_watch;
eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop); eop_desc = buffer_info->next_to_watch;
} } while (count < tx_ring->count);
tx_ring->next_to_clean = i; tx_ring->next_to_clean = i;
...@@ -1961,7 +1974,6 @@ static int igbvf_tso(struct igbvf_adapter *adapter, ...@@ -1961,7 +1974,6 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
context_desc->seqnum_seed = 0; context_desc->seqnum_seed = 0;
buffer_info->time_stamp = jiffies; buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
buffer_info->dma = 0; buffer_info->dma = 0;
i++; i++;
if (i == tx_ring->count) if (i == tx_ring->count)
...@@ -2021,7 +2033,6 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, ...@@ -2021,7 +2033,6 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
context_desc->mss_l4len_idx = 0; context_desc->mss_l4len_idx = 0;
buffer_info->time_stamp = jiffies; buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
buffer_info->dma = 0; buffer_info->dma = 0;
i++; i++;
if (i == tx_ring->count) if (i == tx_ring->count)
...@@ -2061,8 +2072,7 @@ static int igbvf_maybe_stop_tx(struct net_device *netdev, int size) ...@@ -2061,8 +2072,7 @@ static int igbvf_maybe_stop_tx(struct net_device *netdev, int size)
static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
struct igbvf_ring *tx_ring, struct igbvf_ring *tx_ring,
struct sk_buff *skb, struct sk_buff *skb)
unsigned int first)
{ {
struct igbvf_buffer *buffer_info; struct igbvf_buffer *buffer_info;
struct pci_dev *pdev = adapter->pdev; struct pci_dev *pdev = adapter->pdev;
...@@ -2077,7 +2087,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, ...@@ -2077,7 +2087,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
buffer_info->length = len; buffer_info->length = len;
/* set time_stamp *before* dma to help avoid a possible race */ /* set time_stamp *before* dma to help avoid a possible race */
buffer_info->time_stamp = jiffies; buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
buffer_info->mapped_as_page = false; buffer_info->mapped_as_page = false;
buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len, buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
...@@ -2100,7 +2109,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, ...@@ -2100,7 +2109,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD); BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
buffer_info->length = len; buffer_info->length = len;
buffer_info->time_stamp = jiffies; buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
buffer_info->mapped_as_page = true; buffer_info->mapped_as_page = true;
buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len, buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
...@@ -2109,7 +2117,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, ...@@ -2109,7 +2117,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
} }
tx_ring->buffer_info[i].skb = skb; tx_ring->buffer_info[i].skb = skb;
tx_ring->buffer_info[first].next_to_watch = i;
return ++count; return ++count;
...@@ -2120,7 +2127,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, ...@@ -2120,7 +2127,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
buffer_info->dma = 0; buffer_info->dma = 0;
buffer_info->time_stamp = 0; buffer_info->time_stamp = 0;
buffer_info->length = 0; buffer_info->length = 0;
buffer_info->next_to_watch = 0;
buffer_info->mapped_as_page = false; buffer_info->mapped_as_page = false;
if (count) if (count)
count--; count--;
...@@ -2139,7 +2145,8 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, ...@@ -2139,7 +2145,8 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
struct igbvf_ring *tx_ring, struct igbvf_ring *tx_ring,
int tx_flags, int count, u32 paylen, int tx_flags, int count,
unsigned int first, u32 paylen,
u8 hdr_len) u8 hdr_len)
{ {
union e1000_adv_tx_desc *tx_desc = NULL; union e1000_adv_tx_desc *tx_desc = NULL;
...@@ -2189,6 +2196,7 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, ...@@ -2189,6 +2196,7 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
* such as IA-64). */ * such as IA-64). */
wmb(); wmb();
tx_ring->buffer_info[first].next_to_watch = tx_desc;
tx_ring->next_to_use = i; tx_ring->next_to_use = i;
writel(i, adapter->hw.hw_addr + tx_ring->tail); writel(i, adapter->hw.hw_addr + tx_ring->tail);
/* we need this if more than one processor can write to our tail /* we need this if more than one processor can write to our tail
...@@ -2255,11 +2263,11 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb, ...@@ -2255,11 +2263,11 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
* count reflects descriptors mapped, if 0 then mapping error * count reflects descriptors mapped, if 0 then mapping error
* has occurred and we need to rewind the descriptor queue * has occurred and we need to rewind the descriptor queue
*/ */
count = igbvf_tx_map_adv(adapter, tx_ring, skb, first); count = igbvf_tx_map_adv(adapter, tx_ring, skb);
if (count) { if (count) {
igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count, igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count,
skb->len, hdr_len); first, skb->len, hdr_len);
/* Make sure there is space in the ring for the next send. */ /* Make sure there is space in the ring for the next send. */
igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4); igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4);
} else { } else {
......
...@@ -156,7 +156,7 @@ struct vf_macvlans { ...@@ -156,7 +156,7 @@ struct vf_macvlans {
/* Tx Descriptors needed, worst case */ /* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4) #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
/* wrapper around a pointer to a socket buffer, /* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */ * so a DMA handle can be stored along with the buffer */
...@@ -201,6 +201,7 @@ struct ixgbe_rx_queue_stats { ...@@ -201,6 +201,7 @@ struct ixgbe_rx_queue_stats {
enum ixgbe_ring_state_t { enum ixgbe_ring_state_t {
__IXGBE_TX_FDIR_INIT_DONE, __IXGBE_TX_FDIR_INIT_DONE,
__IXGBE_TX_XPS_INIT_DONE,
__IXGBE_TX_DETECT_HANG, __IXGBE_TX_DETECT_HANG,
__IXGBE_HANG_CHECK_ARMED, __IXGBE_HANG_CHECK_ARMED,
__IXGBE_RX_RSC_ENABLED, __IXGBE_RX_RSC_ENABLED,
...@@ -278,15 +279,10 @@ enum ixgbe_ring_f_enum { ...@@ -278,15 +279,10 @@ enum ixgbe_ring_f_enum {
#define IXGBE_MAX_RSS_INDICES 16 #define IXGBE_MAX_RSS_INDICES 16
#define IXGBE_MAX_VMDQ_INDICES 64 #define IXGBE_MAX_VMDQ_INDICES 64
#define IXGBE_MAX_FDIR_INDICES 64 #define IXGBE_MAX_FDIR_INDICES 63 /* based on q_vector limit */
#ifdef IXGBE_FCOE
#define IXGBE_MAX_FCOE_INDICES 8 #define IXGBE_MAX_FCOE_INDICES 8
#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES) #define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES) #define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
#else
#define MAX_RX_QUEUES IXGBE_MAX_FDIR_INDICES
#define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES
#endif /* IXGBE_FCOE */
struct ixgbe_ring_feature { struct ixgbe_ring_feature {
u16 limit; /* upper limit on feature indices */ u16 limit; /* upper limit on feature indices */
u16 indices; /* current value of indices */ u16 indices; /* current value of indices */
...@@ -624,6 +620,7 @@ enum ixgbe_state_t { ...@@ -624,6 +620,7 @@ enum ixgbe_state_t {
__IXGBE_DOWN, __IXGBE_DOWN,
__IXGBE_SERVICE_SCHED, __IXGBE_SERVICE_SCHED,
__IXGBE_IN_SFP_INIT, __IXGBE_IN_SFP_INIT,
__IXGBE_READ_I2C,
}; };
struct ixgbe_cb { struct ixgbe_cb {
...@@ -704,8 +701,8 @@ extern bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw); ...@@ -704,8 +701,8 @@ extern bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
extern void ixgbe_set_rx_mode(struct net_device *netdev); extern void ixgbe_set_rx_mode(struct net_device *netdev);
#ifdef CONFIG_IXGBE_DCB #ifdef CONFIG_IXGBE_DCB
extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter); extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
#endif #endif
extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32); extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
extern void ixgbe_do_reset(struct net_device *netdev); extern void ixgbe_do_reset(struct net_device *netdev);
#ifdef CONFIG_IXGBE_HWMON #ifdef CONFIG_IXGBE_HWMON
......
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include "ixgbe.h" #include "ixgbe.h"
#include "ixgbe_phy.h"
#define IXGBE_ALL_RAR_ENTRIES 16 #define IXGBE_ALL_RAR_ENTRIES 16
...@@ -2112,13 +2113,17 @@ static int ixgbe_set_coalesce(struct net_device *netdev, ...@@ -2112,13 +2113,17 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_q_vector *q_vector; struct ixgbe_q_vector *q_vector;
int i; int i;
u16 tx_itr_param, rx_itr_param; u16 tx_itr_param, rx_itr_param, tx_itr_prev;
bool need_reset = false; bool need_reset = false;
/* don't accept tx specific changes if we've got mixed RxTx vectors */ if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) {
if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count /* reject Tx specific changes in case of mixed RxTx vectors */
&& ec->tx_coalesce_usecs) if (ec->tx_coalesce_usecs)
return -EINVAL; return -EINVAL;
tx_itr_prev = adapter->rx_itr_setting;
} else {
tx_itr_prev = adapter->tx_itr_setting;
}
if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) || if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
(ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2))) (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
...@@ -2144,8 +2149,25 @@ static int ixgbe_set_coalesce(struct net_device *netdev, ...@@ -2144,8 +2149,25 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
else else
tx_itr_param = adapter->tx_itr_setting; tx_itr_param = adapter->tx_itr_setting;
/* mixed Rx/Tx */
if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
adapter->tx_itr_setting = adapter->rx_itr_setting;
#if IS_ENABLED(CONFIG_BQL)
/* detect ITR changes that require update of TXDCTL.WTHRESH */
if ((adapter->tx_itr_setting > 1) &&
(adapter->tx_itr_setting < IXGBE_100K_ITR)) {
if ((tx_itr_prev == 1) ||
(tx_itr_prev > IXGBE_100K_ITR))
need_reset = true;
} else {
if ((tx_itr_prev > 1) &&
(tx_itr_prev < IXGBE_100K_ITR))
need_reset = true;
}
#endif
/* check the old value and enable RSC if necessary */ /* check the old value and enable RSC if necessary */
need_reset = ixgbe_update_rsc(adapter); need_reset |= ixgbe_update_rsc(adapter);
for (i = 0; i < adapter->num_q_vectors; i++) { for (i = 0; i < adapter->num_q_vectors; i++) {
q_vector = adapter->q_vector[i]; q_vector = adapter->q_vector[i];
...@@ -2731,6 +2753,225 @@ static int ixgbe_get_ts_info(struct net_device *dev, ...@@ -2731,6 +2753,225 @@ static int ixgbe_get_ts_info(struct net_device *dev,
return 0; return 0;
} }
static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
{
unsigned int max_combined;
u8 tcs = netdev_get_num_tc(adapter->netdev);
if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
/* We only support one q_vector without MSI-X */
max_combined = 1;
} else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
/* SR-IOV currently only allows one queue on the PF */
max_combined = 1;
} else if (tcs > 1) {
/* For DCB report channels per traffic class */
if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
/* 8 TC w/ 4 queues per TC */
max_combined = 4;
} else if (tcs > 4) {
/* 8 TC w/ 8 queues per TC */
max_combined = 8;
} else {
/* 4 TC w/ 16 queues per TC */
max_combined = 16;
}
} else if (adapter->atr_sample_rate) {
/* support up to 64 queues with ATR */
max_combined = IXGBE_MAX_FDIR_INDICES;
} else {
/* support up to 16 queues with RSS */
max_combined = IXGBE_MAX_RSS_INDICES;
}
return max_combined;
}
static void ixgbe_get_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
/* report maximum channels */
ch->max_combined = ixgbe_max_channels(adapter);
/* report info for other vector */
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
ch->max_other = NON_Q_VECTORS;
ch->other_count = NON_Q_VECTORS;
}
/* record RSS queues */
ch->combined_count = adapter->ring_feature[RING_F_RSS].indices;
/* nothing else to report if RSS is disabled */
if (ch->combined_count == 1)
return;
/* we do not support ATR queueing if SR-IOV is enabled */
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
return;
/* same thing goes for being DCB enabled */
if (netdev_get_num_tc(dev) > 1)
return;
/* if ATR is disabled we can exit */
if (!adapter->atr_sample_rate)
return;
/* report flow director queues as maximum channels */
ch->combined_count = adapter->ring_feature[RING_F_FDIR].indices;
}
static int ixgbe_set_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
unsigned int count = ch->combined_count;
/* verify they are not requesting separate vectors */
if (!count || ch->rx_count || ch->tx_count)
return -EINVAL;
/* verify other_count has not changed */
if (ch->other_count != NON_Q_VECTORS)
return -EINVAL;
/* verify the number of channels does not exceed hardware limits */
if (count > ixgbe_max_channels(adapter))
return -EINVAL;
/* update feature limits from largest to smallest supported values */
adapter->ring_feature[RING_F_FDIR].limit = count;
/* cap RSS limit at 16 */
if (count > IXGBE_MAX_RSS_INDICES)
count = IXGBE_MAX_RSS_INDICES;
adapter->ring_feature[RING_F_RSS].limit = count;
#ifdef IXGBE_FCOE
/* cap FCoE limit at 8 */
if (count > IXGBE_FCRETA_SIZE)
count = IXGBE_FCRETA_SIZE;
adapter->ring_feature[RING_F_FCOE].limit = count;
#endif
/* use setup TC to update any traffic class queue mapping */
return ixgbe_setup_tc(dev, netdev_get_num_tc(dev));
}
static int ixgbe_get_module_info(struct net_device *dev,
struct ethtool_modinfo *modinfo)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
u32 status;
u8 sff8472_rev, addr_mode;
int ret_val = 0;
bool page_swap = false;
/* avoid concurent i2c reads */
while (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
msleep(100);
/* used by the service task */
set_bit(__IXGBE_READ_I2C, &adapter->state);
/* Check whether we support SFF-8472 or not */
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_SFF_8472_COMP,
&sff8472_rev);
if (status != 0) {
ret_val = -EIO;
goto err_out;
}
/* addressing mode is not supported */
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_SFF_8472_SWAP,
&addr_mode);
if (status != 0) {
ret_val = -EIO;
goto err_out;
}
if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
page_swap = true;
}
if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) {
/* We have a SFP, but it does not support SFF-8472 */
modinfo->type = ETH_MODULE_SFF_8079;
modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
} else {
/* We have a SFP which supports a revision of SFF-8472. */
modinfo->type = ETH_MODULE_SFF_8472;
modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
}
err_out:
clear_bit(__IXGBE_READ_I2C, &adapter->state);
return ret_val;
}
static int ixgbe_get_module_eeprom(struct net_device *dev,
struct ethtool_eeprom *ee,
u8 *data)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
u32 status = IXGBE_ERR_PHY_ADDR_INVALID;
u8 databyte = 0xFF;
int i = 0;
int ret_val = 0;
/* ixgbe_get_module_info is called before this function in all
* cases, so we do not need any checks we already do above,
* and can trust ee->len to be a known value.
*/
while (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
msleep(100);
set_bit(__IXGBE_READ_I2C, &adapter->state);
/* Read the first block, SFF-8079 */
for (i = 0; i < ETH_MODULE_SFF_8079_LEN; i++) {
status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
if (status != 0) {
/* Error occured while reading module */
ret_val = -EIO;
goto err_out;
}
data[i] = databyte;
}
/* If the second block is requested, check if SFF-8472 is supported. */
if (ee->len == ETH_MODULE_SFF_8472_LEN) {
if (data[IXGBE_SFF_SFF_8472_COMP] == IXGBE_SFF_SFF_8472_UNSUP)
return -EOPNOTSUPP;
/* Read the second block, SFF-8472 */
for (i = ETH_MODULE_SFF_8079_LEN;
i < ETH_MODULE_SFF_8472_LEN; i++) {
status = hw->phy.ops.read_i2c_sff8472(hw,
i - ETH_MODULE_SFF_8079_LEN, &databyte);
if (status != 0) {
/* Error occured while reading module */
ret_val = -EIO;
goto err_out;
}
data[i] = databyte;
}
}
err_out:
clear_bit(__IXGBE_READ_I2C, &adapter->state);
return ret_val;
}
static const struct ethtool_ops ixgbe_ethtool_ops = { static const struct ethtool_ops ixgbe_ethtool_ops = {
.get_settings = ixgbe_get_settings, .get_settings = ixgbe_get_settings,
.set_settings = ixgbe_set_settings, .set_settings = ixgbe_set_settings,
...@@ -2759,7 +3000,11 @@ static const struct ethtool_ops ixgbe_ethtool_ops = { ...@@ -2759,7 +3000,11 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.set_coalesce = ixgbe_set_coalesce, .set_coalesce = ixgbe_set_coalesce,
.get_rxnfc = ixgbe_get_rxnfc, .get_rxnfc = ixgbe_get_rxnfc,
.set_rxnfc = ixgbe_set_rxnfc, .set_rxnfc = ixgbe_set_rxnfc,
.get_channels = ixgbe_get_channels,
.set_channels = ixgbe_set_channels,
.get_ts_info = ixgbe_get_ts_info, .get_ts_info = ixgbe_get_ts_info,
.get_module_info = ixgbe_get_module_info,
.get_module_eeprom = ixgbe_get_module_eeprom,
}; };
void ixgbe_set_ethtool_ops(struct net_device *netdev) void ixgbe_set_ethtool_ops(struct net_device *netdev)
......
...@@ -386,7 +386,6 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter) ...@@ -386,7 +386,6 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
fcoe = &adapter->ring_feature[RING_F_FCOE]; fcoe = &adapter->ring_feature[RING_F_FCOE];
/* limit ourselves based on feature limits */ /* limit ourselves based on feature limits */
fcoe_i = min_t(u16, fcoe_i, num_online_cpus());
fcoe_i = min_t(u16, fcoe_i, fcoe->limit); fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
if (fcoe_i) { if (fcoe_i) {
...@@ -562,9 +561,6 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) ...@@ -562,9 +561,6 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
fcoe_i = min_t(u16, fcoe_i, fcoe->limit); fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
if (vmdq_i > 1 && fcoe_i) { if (vmdq_i > 1 && fcoe_i) {
/* reserve no more than number of CPUs */
fcoe_i = min_t(u16, fcoe_i, num_online_cpus());
/* alloc queues for FCoE separately */ /* alloc queues for FCoE separately */
fcoe->indices = fcoe_i; fcoe->indices = fcoe_i;
fcoe->offset = vmdq_i * rss_i; fcoe->offset = vmdq_i * rss_i;
...@@ -623,8 +619,7 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) ...@@ -623,8 +619,7 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
if (rss_i > 1 && adapter->atr_sample_rate) { if (rss_i > 1 && adapter->atr_sample_rate) {
f = &adapter->ring_feature[RING_F_FDIR]; f = &adapter->ring_feature[RING_F_FDIR];
f->indices = min_t(u16, num_online_cpus(), f->limit); rss_i = f->indices = f->limit;
rss_i = max_t(u16, rss_i, f->indices);
if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
...@@ -776,19 +771,23 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, ...@@ -776,19 +771,23 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
{ {
struct ixgbe_q_vector *q_vector; struct ixgbe_q_vector *q_vector;
struct ixgbe_ring *ring; struct ixgbe_ring *ring;
int node = -1; int node = NUMA_NO_NODE;
int cpu = -1; int cpu = -1;
int ring_count, size; int ring_count, size;
u8 tcs = netdev_get_num_tc(adapter->netdev);
ring_count = txr_count + rxr_count; ring_count = txr_count + rxr_count;
size = sizeof(struct ixgbe_q_vector) + size = sizeof(struct ixgbe_q_vector) +
(sizeof(struct ixgbe_ring) * ring_count); (sizeof(struct ixgbe_ring) * ring_count);
/* customize cpu for Flow Director mapping */ /* customize cpu for Flow Director mapping */
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
if (cpu_online(v_idx)) { u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
cpu = v_idx; if (rss_i > 1 && adapter->atr_sample_rate) {
node = cpu_to_node(cpu); if (cpu_online(v_idx)) {
cpu = v_idx;
node = cpu_to_node(cpu);
}
} }
} }
......
...@@ -2786,13 +2786,19 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, ...@@ -2786,13 +2786,19 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
/* /*
* set WTHRESH to encourage burst writeback, it should not be set * set WTHRESH to encourage burst writeback, it should not be set
* higher than 1 when ITR is 0 as it could cause false TX hangs * higher than 1 when:
* - ITR is 0 as it could cause false TX hangs
* - ITR is set to > 100k int/sec and BQL is enabled
* *
* In order to avoid issues WTHRESH + PTHRESH should always be equal * In order to avoid issues WTHRESH + PTHRESH should always be equal
* to or less than the number of on chip descriptors, which is * to or less than the number of on chip descriptors, which is
* currently 40. * currently 40.
*/ */
#if IS_ENABLED(CONFIG_BQL)
if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
#else
if (!ring->q_vector || (ring->q_vector->itr < 8)) if (!ring->q_vector || (ring->q_vector->itr < 8))
#endif
txdctl |= (1 << 16); /* WTHRESH = 1 */ txdctl |= (1 << 16); /* WTHRESH = 1 */
else else
txdctl |= (8 << 16); /* WTHRESH = 8 */ txdctl |= (8 << 16); /* WTHRESH = 8 */
...@@ -2813,6 +2819,16 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, ...@@ -2813,6 +2819,16 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
ring->atr_sample_rate = 0; ring->atr_sample_rate = 0;
} }
/* initialize XPS */
if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) {
struct ixgbe_q_vector *q_vector = ring->q_vector;
if (q_vector)
netif_set_xps_queue(adapter->netdev,
&q_vector->affinity_mask,
ring->queue_index);
}
clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state); clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
/* enable queue */ /* enable queue */
...@@ -4465,7 +4481,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) ...@@ -4465,7 +4481,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
{ {
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev; struct pci_dev *pdev = adapter->pdev;
unsigned int rss; unsigned int rss, fdir;
u32 fwsm; u32 fwsm;
#ifdef CONFIG_IXGBE_DCB #ifdef CONFIG_IXGBE_DCB
int j; int j;
...@@ -4485,9 +4501,10 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) ...@@ -4485,9 +4501,10 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->ring_feature[RING_F_RSS].limit = rss; adapter->ring_feature[RING_F_RSS].limit = rss;
adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
adapter->ring_feature[RING_F_FDIR].limit = IXGBE_MAX_FDIR_INDICES;
adapter->max_q_vectors = MAX_Q_VECTORS_82599; adapter->max_q_vectors = MAX_Q_VECTORS_82599;
adapter->atr_sample_rate = 20; adapter->atr_sample_rate = 20;
fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus());
adapter->ring_feature[RING_F_FDIR].limit = fdir;
adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K; adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
#ifdef CONFIG_IXGBE_DCA #ifdef CONFIG_IXGBE_DCA
adapter->flags |= IXGBE_FLAG_DCA_CAPABLE; adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
...@@ -5698,6 +5715,10 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter) ...@@ -5698,6 +5715,10 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
return; return;
/* concurent i2c reads are not supported */
if (test_bit(__IXGBE_READ_I2C, &adapter->state))
return;
/* someone else is in init, wait until next service event */ /* someone else is in init, wait until next service event */
if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
return; return;
...@@ -6363,38 +6384,40 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) ...@@ -6363,38 +6384,40 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
return __ixgbe_maybe_stop_tx(tx_ring, size); return __ixgbe_maybe_stop_tx(tx_ring, size);
} }
#ifdef IXGBE_FCOE
static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
{ {
struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_adapter *adapter;
int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : struct ixgbe_ring_feature *f;
smp_processor_id(); int txq;
#ifdef IXGBE_FCOE
__be16 protocol = vlan_get_protocol(skb);
if (((protocol == htons(ETH_P_FCOE)) || /*
(protocol == htons(ETH_P_FIP))) && * only execute the code below if protocol is FCoE
(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) { * or FIP and we have FCoE enabled on the adapter
struct ixgbe_ring_feature *f; */
switch (vlan_get_protocol(skb)) {
case __constant_htons(ETH_P_FCOE):
case __constant_htons(ETH_P_FIP):
adapter = netdev_priv(dev);
f = &adapter->ring_feature[RING_F_FCOE]; if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
break;
default:
return __netdev_pick_tx(dev, skb);
}
while (txq >= f->indices) f = &adapter->ring_feature[RING_F_FCOE];
txq -= f->indices;
txq += adapter->ring_feature[RING_F_FCOE].offset;
return txq; txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
} smp_processor_id();
#endif
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { while (txq >= f->indices)
while (unlikely(txq >= dev->real_num_tx_queues)) txq -= f->indices;
txq -= dev->real_num_tx_queues;
return txq;
}
return skb_tx_hash(dev, skb); return txq + f->offset;
} }
#endif
netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
struct ixgbe_adapter *adapter, struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring) struct ixgbe_ring *tx_ring)
...@@ -6799,6 +6822,7 @@ static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter) ...@@ -6799,6 +6822,7 @@ static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
} }
} }
#endif /* CONFIG_IXGBE_DCB */
/** /**
* ixgbe_setup_tc - configure net_device for multiple traffic classes * ixgbe_setup_tc - configure net_device for multiple traffic classes
* *
...@@ -6824,6 +6848,7 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) ...@@ -6824,6 +6848,7 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
ixgbe_close(dev); ixgbe_close(dev);
ixgbe_clear_interrupt_scheme(adapter); ixgbe_clear_interrupt_scheme(adapter);
#ifdef CONFIG_IXGBE_DCB
if (tc) { if (tc) {
netdev_set_num_tc(dev, tc); netdev_set_num_tc(dev, tc);
ixgbe_set_prio_tc_map(adapter); ixgbe_set_prio_tc_map(adapter);
...@@ -6846,31 +6871,24 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) ...@@ -6846,31 +6871,24 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
adapter->dcb_cfg.pfc_mode_enable = false; adapter->dcb_cfg.pfc_mode_enable = false;
} }
ixgbe_init_interrupt_scheme(adapter);
ixgbe_validate_rtr(adapter, tc); ixgbe_validate_rtr(adapter, tc);
#endif /* CONFIG_IXGBE_DCB */
ixgbe_init_interrupt_scheme(adapter);
if (netif_running(dev)) if (netif_running(dev))
ixgbe_open(dev); return ixgbe_open(dev);
return 0; return 0;
} }
#endif /* CONFIG_IXGBE_DCB */
#ifdef CONFIG_PCI_IOV #ifdef CONFIG_PCI_IOV
void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter) void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
{ {
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
rtnl_lock(); rtnl_lock();
#ifdef CONFIG_IXGBE_DCB
ixgbe_setup_tc(netdev, netdev_get_num_tc(netdev)); ixgbe_setup_tc(netdev, netdev_get_num_tc(netdev));
#else
if (netif_running(netdev))
ixgbe_close(netdev);
ixgbe_clear_interrupt_scheme(adapter);
ixgbe_init_interrupt_scheme(adapter);
if (netif_running(netdev))
ixgbe_open(netdev);
#endif
rtnl_unlock(); rtnl_unlock();
} }
...@@ -7118,7 +7136,9 @@ static const struct net_device_ops ixgbe_netdev_ops = { ...@@ -7118,7 +7136,9 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_open = ixgbe_open, .ndo_open = ixgbe_open,
.ndo_stop = ixgbe_close, .ndo_stop = ixgbe_close,
.ndo_start_xmit = ixgbe_xmit_frame, .ndo_start_xmit = ixgbe_xmit_frame,
#ifdef IXGBE_FCOE
.ndo_select_queue = ixgbe_select_queue, .ndo_select_queue = ixgbe_select_queue,
#endif
.ndo_set_rx_mode = ixgbe_set_rx_mode, .ndo_set_rx_mode = ixgbe_set_rx_mode,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = ixgbe_set_mac, .ndo_set_mac_address = ixgbe_set_mac,
...@@ -7230,9 +7250,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -7230,9 +7250,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
static int cards_found; static int cards_found;
int i, err, pci_using_dac; int i, err, pci_using_dac;
unsigned int indices = MAX_TX_QUEUES;
u8 part_str[IXGBE_PBANUM_LENGTH]; u8 part_str[IXGBE_PBANUM_LENGTH];
unsigned int indices = num_possible_cpus();
unsigned int dcb_max = 0;
#ifdef IXGBE_FCOE #ifdef IXGBE_FCOE
u16 device_caps; u16 device_caps;
#endif #endif
...@@ -7281,25 +7300,15 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -7281,25 +7300,15 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev); pci_set_master(pdev);
pci_save_state(pdev); pci_save_state(pdev);
if (ii->mac == ixgbe_mac_82598EB) {
#ifdef CONFIG_IXGBE_DCB #ifdef CONFIG_IXGBE_DCB
if (ii->mac == ixgbe_mac_82598EB) /* 8 TC w/ 4 queues per TC */
dcb_max = min_t(unsigned int, indices * MAX_TRAFFIC_CLASS, indices = 4 * MAX_TRAFFIC_CLASS;
IXGBE_MAX_RSS_INDICES); #else
else indices = IXGBE_MAX_RSS_INDICES;
dcb_max = min_t(unsigned int, indices * MAX_TRAFFIC_CLASS,
IXGBE_MAX_FDIR_INDICES);
#endif #endif
}
if (ii->mac == ixgbe_mac_82598EB)
indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES);
else
indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
#ifdef IXGBE_FCOE
indices += min_t(unsigned int, num_possible_cpus(),
IXGBE_MAX_FCOE_INDICES);
#endif
indices = max_t(unsigned int, dcb_max, indices);
netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices); netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
if (!netdev) { if (!netdev) {
err = -ENOMEM; err = -ENOMEM;
...@@ -7454,13 +7463,17 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -7454,13 +7463,17 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#ifdef IXGBE_FCOE #ifdef IXGBE_FCOE
if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) { if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
unsigned int fcoe_l;
if (hw->mac.ops.get_device_caps) { if (hw->mac.ops.get_device_caps) {
hw->mac.ops.get_device_caps(hw, &device_caps); hw->mac.ops.get_device_caps(hw, &device_caps);
if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS) if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
} }
adapter->ring_feature[RING_F_FCOE].limit = IXGBE_FCRETA_SIZE;
fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus());
adapter->ring_feature[RING_F_FCOE].limit = fcoe_l;
netdev->features |= NETIF_F_FSO | netdev->features |= NETIF_F_FSO |
NETIF_F_FCOE_CRC; NETIF_F_FCOE_CRC;
......
...@@ -852,11 +852,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) ...@@ -852,11 +852,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
status = hw->phy.ops.read_i2c_eeprom(hw, status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_IDENTIFIER, IXGBE_SFF_IDENTIFIER,
&identifier); &identifier);
if (status == IXGBE_ERR_SWFW_SYNC || if (status != 0)
status == IXGBE_ERR_I2C ||
status == IXGBE_ERR_SFP_NOT_PRESENT)
goto err_read_i2c_eeprom; goto err_read_i2c_eeprom;
/* LAN ID is needed for sfp_type determination */ /* LAN ID is needed for sfp_type determination */
...@@ -870,26 +868,20 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) ...@@ -870,26 +868,20 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
IXGBE_SFF_1GBE_COMP_CODES, IXGBE_SFF_1GBE_COMP_CODES,
&comp_codes_1g); &comp_codes_1g);
if (status == IXGBE_ERR_SWFW_SYNC || if (status != 0)
status == IXGBE_ERR_I2C ||
status == IXGBE_ERR_SFP_NOT_PRESENT)
goto err_read_i2c_eeprom; goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw, status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_10GBE_COMP_CODES, IXGBE_SFF_10GBE_COMP_CODES,
&comp_codes_10g); &comp_codes_10g);
if (status == IXGBE_ERR_SWFW_SYNC || if (status != 0)
status == IXGBE_ERR_I2C ||
status == IXGBE_ERR_SFP_NOT_PRESENT)
goto err_read_i2c_eeprom; goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw, status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_CABLE_TECHNOLOGY, IXGBE_SFF_CABLE_TECHNOLOGY,
&cable_tech); &cable_tech);
if (status == IXGBE_ERR_SWFW_SYNC || if (status != 0)
status == IXGBE_ERR_I2C ||
status == IXGBE_ERR_SFP_NOT_PRESENT)
goto err_read_i2c_eeprom; goto err_read_i2c_eeprom;
/* ID Module /* ID Module
...@@ -984,30 +976,24 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) ...@@ -984,30 +976,24 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
if (hw->phy.type != ixgbe_phy_nl) { if (hw->phy.type != ixgbe_phy_nl) {
hw->phy.id = identifier; hw->phy.id = identifier;
status = hw->phy.ops.read_i2c_eeprom(hw, status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_VENDOR_OUI_BYTE0, IXGBE_SFF_VENDOR_OUI_BYTE0,
&oui_bytes[0]); &oui_bytes[0]);
if (status == IXGBE_ERR_SWFW_SYNC || if (status != 0)
status == IXGBE_ERR_I2C ||
status == IXGBE_ERR_SFP_NOT_PRESENT)
goto err_read_i2c_eeprom; goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw, status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_VENDOR_OUI_BYTE1, IXGBE_SFF_VENDOR_OUI_BYTE1,
&oui_bytes[1]); &oui_bytes[1]);
if (status == IXGBE_ERR_SWFW_SYNC || if (status != 0)
status == IXGBE_ERR_I2C ||
status == IXGBE_ERR_SFP_NOT_PRESENT)
goto err_read_i2c_eeprom; goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw, status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_VENDOR_OUI_BYTE2, IXGBE_SFF_VENDOR_OUI_BYTE2,
&oui_bytes[2]); &oui_bytes[2]);
if (status == IXGBE_ERR_SWFW_SYNC || if (status != 0)
status == IXGBE_ERR_I2C ||
status == IXGBE_ERR_SFP_NOT_PRESENT)
goto err_read_i2c_eeprom; goto err_read_i2c_eeprom;
vendor_oui = vendor_oui =
...@@ -1307,9 +1293,9 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, ...@@ -1307,9 +1293,9 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
break; break;
fail: fail:
ixgbe_i2c_bus_clear(hw);
hw->mac.ops.release_swfw_sync(hw, swfw_mask); hw->mac.ops.release_swfw_sync(hw, swfw_mask);
msleep(100); msleep(100);
ixgbe_i2c_bus_clear(hw);
retry++; retry++;
if (retry < max_retry) if (retry < max_retry)
hw_dbg(hw, "I2C byte read error - Retrying.\n"); hw_dbg(hw, "I2C byte read error - Retrying.\n");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment