Commit 64e771bb authored by David S. Miller's avatar David S. Miller
parents 3a7c1ee4 9cd9130d
...@@ -944,6 +944,14 @@ static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw) ...@@ -944,6 +944,14 @@ static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw)
else else
reg |= (1 << 28); reg |= (1 << 28);
ew32(TARC(1), reg); ew32(TARC(1), reg);
/*
* Disable IPv6 extension header parsing because some malformed
* IPv6 headers can hang the Rx.
*/
reg = er32(RFCTL);
reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
ew32(RFCTL, reg);
} }
/** /**
......
...@@ -1279,6 +1279,16 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) ...@@ -1279,6 +1279,16 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
ew32(CTRL_EXT, reg); ew32(CTRL_EXT, reg);
} }
/*
* Disable IPv6 extension header parsing because some malformed
* IPv6 headers can hang the Rx.
*/
if (hw->mac.type <= e1000_82573) {
reg = er32(RFCTL);
reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
ew32(RFCTL, reg);
}
/* PCI-Ex Control Registers */ /* PCI-Ex Control Registers */
switch (hw->mac.type) { switch (hw->mac.type) {
case e1000_82574: case e1000_82574:
...@@ -2062,8 +2072,9 @@ const struct e1000_info e1000_82574_info = { ...@@ -2062,8 +2072,9 @@ const struct e1000_info e1000_82574_info = {
| FLAG_HAS_SMART_POWER_DOWN | FLAG_HAS_SMART_POWER_DOWN
| FLAG_HAS_AMT | FLAG_HAS_AMT
| FLAG_HAS_CTRLEXT_ON_LOAD, | FLAG_HAS_CTRLEXT_ON_LOAD,
.flags2 = FLAG2_CHECK_PHY_HANG .flags2 = FLAG2_CHECK_PHY_HANG
| FLAG2_DISABLE_ASPM_L0S | FLAG2_DISABLE_ASPM_L0S
| FLAG2_DISABLE_ASPM_L1
| FLAG2_NO_DISABLE_RX | FLAG2_NO_DISABLE_RX
| FLAG2_DMA_BURST, | FLAG2_DMA_BURST,
.pba = 32, .pba = 32,
......
...@@ -673,11 +673,21 @@ static inline s32 e1e_rphy(struct e1000_hw *hw, u32 offset, u16 *data) ...@@ -673,11 +673,21 @@ static inline s32 e1e_rphy(struct e1000_hw *hw, u32 offset, u16 *data)
return hw->phy.ops.read_reg(hw, offset, data); return hw->phy.ops.read_reg(hw, offset, data);
} }
static inline s32 e1e_rphy_locked(struct e1000_hw *hw, u32 offset, u16 *data)
{
return hw->phy.ops.read_reg_locked(hw, offset, data);
}
static inline s32 e1e_wphy(struct e1000_hw *hw, u32 offset, u16 data) static inline s32 e1e_wphy(struct e1000_hw *hw, u32 offset, u16 data)
{ {
return hw->phy.ops.write_reg(hw, offset, data); return hw->phy.ops.write_reg(hw, offset, data);
} }
static inline s32 e1e_wphy_locked(struct e1000_hw *hw, u32 offset, u16 data)
{
return hw->phy.ops.write_reg_locked(hw, offset, data);
}
static inline s32 e1000_get_cable_length(struct e1000_hw *hw) static inline s32 e1000_get_cable_length(struct e1000_hw *hw)
{ {
return hw->phy.ops.get_cable_length(hw); return hw->phy.ops.get_cable_length(hw);
......
...@@ -304,9 +304,9 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw) ...@@ -304,9 +304,9 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
u16 phy_reg; u16 phy_reg;
u32 phy_id; u32 phy_id;
hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg); e1e_rphy_locked(hw, PHY_ID1, &phy_reg);
phy_id = (u32)(phy_reg << 16); phy_id = (u32)(phy_reg << 16);
hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg); e1e_rphy_locked(hw, PHY_ID2, &phy_reg);
phy_id |= (u32)(phy_reg & PHY_REVISION_MASK); phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
if (hw->phy.id) { if (hw->phy.id) {
...@@ -1271,8 +1271,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) ...@@ -1271,8 +1271,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
reg_addr &= PHY_REG_MASK; reg_addr &= PHY_REG_MASK;
reg_addr |= phy_page; reg_addr |= phy_page;
ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr, ret_val = e1e_wphy_locked(hw, (u32)reg_addr, reg_data);
reg_data);
if (ret_val) if (ret_val)
goto release; goto release;
} }
...@@ -1309,8 +1308,8 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) ...@@ -1309,8 +1308,8 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
/* Disable K1 when link is 1Gbps, otherwise use the NVM setting */ /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
if (link) { if (link) {
if (hw->phy.type == e1000_phy_82578) { if (hw->phy.type == e1000_phy_82578) {
ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS, ret_val = e1e_rphy_locked(hw, BM_CS_STATUS,
&status_reg); &status_reg);
if (ret_val) if (ret_val)
goto release; goto release;
...@@ -1325,8 +1324,7 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) ...@@ -1325,8 +1324,7 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
} }
if (hw->phy.type == e1000_phy_82577) { if (hw->phy.type == e1000_phy_82577) {
ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS, ret_val = e1e_rphy_locked(hw, HV_M_STATUS, &status_reg);
&status_reg);
if (ret_val) if (ret_val)
goto release; goto release;
...@@ -1341,15 +1339,13 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) ...@@ -1341,15 +1339,13 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
} }
/* Link stall fix for link up */ /* Link stall fix for link up */
ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x0100);
0x0100);
if (ret_val) if (ret_val)
goto release; goto release;
} else { } else {
/* Link stall fix for link down */ /* Link stall fix for link down */
ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x4100);
0x4100);
if (ret_val) if (ret_val)
goto release; goto release;
} }
...@@ -1448,7 +1444,7 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) ...@@ -1448,7 +1444,7 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
mac_reg = er32(PHY_CTRL); mac_reg = er32(PHY_CTRL);
ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg); ret_val = e1e_rphy_locked(hw, HV_OEM_BITS, &oem_reg);
if (ret_val) if (ret_val)
goto release; goto release;
...@@ -1475,7 +1471,7 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) ...@@ -1475,7 +1471,7 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
!hw->phy.ops.check_reset_block(hw)) !hw->phy.ops.check_reset_block(hw))
oem_reg |= HV_OEM_BITS_RESTART_AN; oem_reg |= HV_OEM_BITS_RESTART_AN;
ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg); ret_val = e1e_wphy_locked(hw, HV_OEM_BITS, oem_reg);
release: release:
hw->phy.ops.release(hw); hw->phy.ops.release(hw);
...@@ -1571,11 +1567,10 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) ...@@ -1571,11 +1567,10 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
ret_val = hw->phy.ops.acquire(hw); ret_val = hw->phy.ops.acquire(hw);
if (ret_val) if (ret_val)
return ret_val; return ret_val;
ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data); ret_val = e1e_rphy_locked(hw, BM_PORT_GEN_CFG, &phy_data);
if (ret_val) if (ret_val)
goto release; goto release;
ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG, ret_val = e1e_wphy_locked(hw, BM_PORT_GEN_CFG, phy_data & 0x00FF);
phy_data & 0x00FF);
release: release:
hw->phy.ops.release(hw); hw->phy.ops.release(hw);
...@@ -1807,20 +1802,18 @@ static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw) ...@@ -1807,20 +1802,18 @@ static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
ret_val = hw->phy.ops.acquire(hw); ret_val = hw->phy.ops.acquire(hw);
if (ret_val) if (ret_val)
return ret_val; return ret_val;
ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, I82579_MSE_THRESHOLD);
I82579_MSE_THRESHOLD);
if (ret_val) if (ret_val)
goto release; goto release;
/* set MSE higher to enable link to stay up when noise is high */ /* set MSE higher to enable link to stay up when noise is high */
ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA, 0x0034); ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x0034);
if (ret_val) if (ret_val)
goto release; goto release;
ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, I82579_MSE_LINK_DOWN);
I82579_MSE_LINK_DOWN);
if (ret_val) if (ret_val)
goto release; goto release;
/* drop link after 5 times MSE threshold was reached */ /* drop link after 5 times MSE threshold was reached */
ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA, 0x0005); ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x0005);
release: release:
hw->phy.ops.release(hw); hw->phy.ops.release(hw);
...@@ -1995,12 +1988,10 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw) ...@@ -1995,12 +1988,10 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
ret_val = hw->phy.ops.acquire(hw); ret_val = hw->phy.ops.acquire(hw);
if (ret_val) if (ret_val)
return ret_val; return ret_val;
ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR,
I82579_LPI_UPDATE_TIMER); I82579_LPI_UPDATE_TIMER);
if (!ret_val) if (!ret_val)
ret_val = hw->phy.ops.write_reg_locked(hw, ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x1387);
I82579_EMI_DATA,
0x1387);
hw->phy.ops.release(hw); hw->phy.ops.release(hw);
} }
...@@ -3477,6 +3468,13 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) ...@@ -3477,6 +3468,13 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
*/ */
reg = er32(RFCTL); reg = er32(RFCTL);
reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS); reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
/*
* Disable IPv6 extension header parsing because some malformed
* IPv6 headers can hang the Rx.
*/
if (hw->mac.type == e1000_ich8lan)
reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
ew32(RFCTL, reg); ew32(RFCTL, reg);
} }
......
...@@ -1084,6 +1084,10 @@ static void e1000_print_hw_hang(struct work_struct *work) ...@@ -1084,6 +1084,10 @@ static void e1000_print_hw_hang(struct work_struct *work)
phy_1000t_status, phy_1000t_status,
phy_ext_status, phy_ext_status,
pci_status); pci_status);
/* Suggest workaround for known h/w issue */
if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE))
e_err("Try turning off Tx pause (flow control) via ethtool\n");
} }
/** /**
...@@ -2935,6 +2939,7 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) ...@@ -2935,6 +2939,7 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
/* Enable Extended Status in all Receive Descriptors */ /* Enable Extended Status in all Receive Descriptors */
rfctl = er32(RFCTL); rfctl = er32(RFCTL);
rfctl |= E1000_RFCTL_EXTEN; rfctl |= E1000_RFCTL_EXTEN;
ew32(RFCTL, rfctl);
/* /*
* 82571 and greater support packet-split where the protocol * 82571 and greater support packet-split where the protocol
...@@ -2960,13 +2965,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) ...@@ -2960,13 +2965,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
if (adapter->rx_ps_pages) { if (adapter->rx_ps_pages) {
u32 psrctl = 0; u32 psrctl = 0;
/*
* disable packet split support for IPv6 extension headers,
* because some malformed IPv6 headers can hang the Rx
*/
rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
E1000_RFCTL_NEW_IPV6_EXT_DIS);
/* Enable Packet split descriptors */ /* Enable Packet split descriptors */
rctl |= E1000_RCTL_DTYP_PS; rctl |= E1000_RCTL_DTYP_PS;
...@@ -3005,7 +3003,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) ...@@ -3005,7 +3003,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
*/ */
} }
ew32(RFCTL, rfctl);
ew32(RCTL, rctl); ew32(RCTL, rctl);
/* just started the receive unit, no need to restart */ /* just started the receive unit, no need to restart */
adapter->flags &= ~FLAG_RX_RESTART_NOW; adapter->flags &= ~FLAG_RX_RESTART_NOW;
...@@ -5275,14 +5272,6 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) ...@@ -5275,14 +5272,6 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
return -EINVAL; return -EINVAL;
} }
/* 82573 Errata 17 */
if (((adapter->hw.mac.type == e1000_82573) ||
(adapter->hw.mac.type == e1000_82574)) &&
(max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) {
adapter->flags2 |= FLAG2_DISABLE_ASPM_L1;
e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1);
}
while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
usleep_range(1000, 2000); usleep_range(1000, 2000);
/* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
......
...@@ -722,8 +722,24 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw) ...@@ -722,8 +722,24 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
/* Enable downshift on BM (disabled by default) */ /* Enable downshift on BM (disabled by default) */
if (phy->type == e1000_phy_bm) if (phy->type == e1000_phy_bm) {
/* For 82574/82583, first disable then enable downshift */
if (phy->id == BME1000_E_PHY_ID_R2) {
phy_data &= ~BME1000_PSCR_ENABLE_DOWNSHIFT;
ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL,
phy_data);
if (ret_val)
return ret_val;
/* Commit the changes. */
ret_val = e1000e_commit_phy(hw);
if (ret_val) {
e_dbg("Error committing the PHY changes\n");
return ret_val;
}
}
phy_data |= BME1000_PSCR_ENABLE_DOWNSHIFT; phy_data |= BME1000_PSCR_ENABLE_DOWNSHIFT;
}
ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
if (ret_val) if (ret_val)
......
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
typedef u32 ixgbe_link_speed; typedef u32 ixgbe_link_speed;
#define IXGBE_LINK_SPEED_1GB_FULL 0x0020 #define IXGBE_LINK_SPEED_1GB_FULL 0x0020
#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 #define IXGBE_LINK_SPEED_10GB_FULL 0x0080
#define IXGBE_LINK_SPEED_100_FULL 0x0008
#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */ #define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */
#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ #define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
...@@ -48,6 +49,7 @@ typedef u32 ixgbe_link_speed; ...@@ -48,6 +49,7 @@ typedef u32 ixgbe_link_speed;
#define IXGBE_LINKS_SPEED_82599 0x30000000 #define IXGBE_LINKS_SPEED_82599 0x30000000
#define IXGBE_LINKS_SPEED_10G_82599 0x30000000 #define IXGBE_LINKS_SPEED_10G_82599 0x30000000
#define IXGBE_LINKS_SPEED_1G_82599 0x20000000 #define IXGBE_LINKS_SPEED_1G_82599 0x20000000
#define IXGBE_LINKS_SPEED_100_82599 0x10000000
/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ /* Number of Transmit and Receive Descriptors must be a multiple of 8 */
#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 #define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8
......
...@@ -107,10 +107,20 @@ static int ixgbevf_get_settings(struct net_device *netdev, ...@@ -107,10 +107,20 @@ static int ixgbevf_get_settings(struct net_device *netdev,
hw->mac.ops.check_link(hw, &link_speed, &link_up, false); hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
if (link_up) { if (link_up) {
ethtool_cmd_speed_set( __u32 speed = SPEED_10000;
ecmd, switch (link_speed) {
(link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? case IXGBE_LINK_SPEED_10GB_FULL:
SPEED_10000 : SPEED_1000); speed = SPEED_10000;
break;
case IXGBE_LINK_SPEED_1GB_FULL:
speed = SPEED_1000;
break;
case IXGBE_LINK_SPEED_100_FULL:
speed = SPEED_100;
break;
}
ethtool_cmd_speed_set(ecmd, speed);
ecmd->duplex = DUPLEX_FULL; ecmd->duplex = DUPLEX_FULL;
} else { } else {
ethtool_cmd_speed_set(ecmd, -1); ethtool_cmd_speed_set(ecmd, -1);
......
...@@ -287,7 +287,7 @@ extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops; ...@@ -287,7 +287,7 @@ extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
extern const char ixgbevf_driver_name[]; extern const char ixgbevf_driver_name[];
extern const char ixgbevf_driver_version[]; extern const char ixgbevf_driver_version[];
extern int ixgbevf_up(struct ixgbevf_adapter *adapter); extern void ixgbevf_up(struct ixgbevf_adapter *adapter);
extern void ixgbevf_down(struct ixgbevf_adapter *adapter); extern void ixgbevf_down(struct ixgbevf_adapter *adapter);
extern void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter); extern void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
extern void ixgbevf_reset(struct ixgbevf_adapter *adapter); extern void ixgbevf_reset(struct ixgbevf_adapter *adapter);
......
...@@ -57,7 +57,7 @@ const char ixgbevf_driver_name[] = "ixgbevf"; ...@@ -57,7 +57,7 @@ const char ixgbevf_driver_name[] = "ixgbevf";
static const char ixgbevf_driver_string[] = static const char ixgbevf_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
#define DRV_VERSION "2.2.0-k" #define DRV_VERSION "2.6.0-k"
const char ixgbevf_driver_version[] = DRV_VERSION; const char ixgbevf_driver_version[] = DRV_VERSION;
static char ixgbevf_copyright[] = static char ixgbevf_copyright[] =
"Copyright (c) 2009 - 2012 Intel Corporation."; "Copyright (c) 2009 - 2012 Intel Corporation.";
...@@ -1608,13 +1608,14 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) ...@@ -1608,13 +1608,14 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
adapter->stats.base_vfmprc = adapter->stats.last_vfmprc; adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
} }
static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter) static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
{ {
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
int i, j = 0; int i, j = 0;
int num_rx_rings = adapter->num_rx_queues; int num_rx_rings = adapter->num_rx_queues;
u32 txdctl, rxdctl; u32 txdctl, rxdctl;
u32 msg[2];
for (i = 0; i < adapter->num_tx_queues; i++) { for (i = 0; i < adapter->num_tx_queues; i++) {
j = adapter->tx_ring[i].reg_idx; j = adapter->tx_ring[i].reg_idx;
...@@ -1653,6 +1654,10 @@ static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter) ...@@ -1653,6 +1654,10 @@ static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0); hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
} }
msg[0] = IXGBE_VF_SET_LPE;
msg[1] = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
hw->mbx.ops.write_posted(hw, msg, 2);
clear_bit(__IXGBEVF_DOWN, &adapter->state); clear_bit(__IXGBEVF_DOWN, &adapter->state);
ixgbevf_napi_enable_all(adapter); ixgbevf_napi_enable_all(adapter);
...@@ -1667,24 +1672,20 @@ static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter) ...@@ -1667,24 +1672,20 @@ static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
adapter->link_check_timeout = jiffies; adapter->link_check_timeout = jiffies;
mod_timer(&adapter->watchdog_timer, jiffies); mod_timer(&adapter->watchdog_timer, jiffies);
return 0;
} }
int ixgbevf_up(struct ixgbevf_adapter *adapter) void ixgbevf_up(struct ixgbevf_adapter *adapter)
{ {
int err;
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
ixgbevf_configure(adapter); ixgbevf_configure(adapter);
err = ixgbevf_up_complete(adapter); ixgbevf_up_complete(adapter);
/* clear any pending interrupts, may auto mask */ /* clear any pending interrupts, may auto mask */
IXGBE_READ_REG(hw, IXGBE_VTEICR); IXGBE_READ_REG(hw, IXGBE_VTEICR);
ixgbevf_irq_enable(adapter, true, true); ixgbevf_irq_enable(adapter, true, true);
return err;
} }
/** /**
...@@ -2673,9 +2674,7 @@ static int ixgbevf_open(struct net_device *netdev) ...@@ -2673,9 +2674,7 @@ static int ixgbevf_open(struct net_device *netdev)
*/ */
ixgbevf_map_rings_to_vectors(adapter); ixgbevf_map_rings_to_vectors(adapter);
err = ixgbevf_up_complete(adapter); ixgbevf_up_complete(adapter);
if (err)
goto err_up;
/* clear any pending interrupts, may auto mask */ /* clear any pending interrupts, may auto mask */
IXGBE_READ_REG(hw, IXGBE_VTEICR); IXGBE_READ_REG(hw, IXGBE_VTEICR);
...@@ -2689,7 +2688,6 @@ static int ixgbevf_open(struct net_device *netdev) ...@@ -2689,7 +2688,6 @@ static int ixgbevf_open(struct net_device *netdev)
err_req_irq: err_req_irq:
ixgbevf_down(adapter); ixgbevf_down(adapter);
err_up:
ixgbevf_free_irq(adapter); ixgbevf_free_irq(adapter);
err_setup_rx: err_setup_rx:
ixgbevf_free_all_rx_resources(adapter); ixgbevf_free_all_rx_resources(adapter);
...@@ -3196,9 +3194,11 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) ...@@ -3196,9 +3194,11 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
/* must set new MTU before calling down or up */ /* must set new MTU before calling down or up */
netdev->mtu = new_mtu; netdev->mtu = new_mtu;
msg[0] = IXGBE_VF_SET_LPE; if (!netif_running(netdev)) {
msg[1] = max_frame; msg[0] = IXGBE_VF_SET_LPE;
hw->mbx.ops.write_posted(hw, msg, 2); msg[1] = max_frame;
hw->mbx.ops.write_posted(hw, msg, 2);
}
if (netif_running(netdev)) if (netif_running(netdev))
ixgbevf_reinit_locked(adapter); ixgbevf_reinit_locked(adapter);
......
...@@ -404,11 +404,17 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw, ...@@ -404,11 +404,17 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
else else
*link_up = false; *link_up = false;
if ((links_reg & IXGBE_LINKS_SPEED_82599) == switch (links_reg & IXGBE_LINKS_SPEED_82599) {
IXGBE_LINKS_SPEED_10G_82599) case IXGBE_LINKS_SPEED_10G_82599:
*speed = IXGBE_LINK_SPEED_10GB_FULL; *speed = IXGBE_LINK_SPEED_10GB_FULL;
else break;
case IXGBE_LINKS_SPEED_1G_82599:
*speed = IXGBE_LINK_SPEED_1GB_FULL; *speed = IXGBE_LINK_SPEED_1GB_FULL;
break;
case IXGBE_LINKS_SPEED_100_82599:
*speed = IXGBE_LINK_SPEED_100_FULL;
break;
}
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment