Commit f85b02c2 authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next

Jeff Kirsher says:

====================
This series contains updates to igb and ixgbe.  Most of the changes
are against igb, except for one patch against ixgbe.

There are 3 igb fixes from Carolyn which were reported by Dan
Carpenter which resolve issues found in the get_i2c_client().  Alex
does some cleanup of the igb driver to match similar functionality
in ixgbe on transmit.  Alex also makes it so that we can enable the use
of build_skb for cases where jumbo frames are disabled.  The advantage
to this is that we do not have to perform a memcpy to populate the header
and as a result we see a significant performance improvement.

Akeem provides 4 patches to initialize function pointers and do a
re-factoring of the function pointers in igb_get_variants() to assist
with driver debugging.

The ixgbe patch comes from Emil to reshuffle the switch/case structure
of the flag assignment to allow for the flags to be set for each MAC
type separately. This is needed for new hardware that does not have feature
parity with older hardware.

v2: updated patches 4 & 5 based on feedback from Ben Hutchings and Eric
    Dumazet
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 68c33163 8fc3bb6d
...@@ -111,162 +111,146 @@ static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw) ...@@ -111,162 +111,146 @@ static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw)
return ext_mdio; return ext_mdio;
} }
static s32 igb_get_invariants_82575(struct e1000_hw *hw) /**
* igb_init_phy_params_82575 - Init PHY func ptrs.
* @hw: pointer to the HW structure
**/
static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
{ {
struct e1000_phy_info *phy = &hw->phy; struct e1000_phy_info *phy = &hw->phy;
struct e1000_nvm_info *nvm = &hw->nvm; s32 ret_val = 0;
struct e1000_mac_info *mac = &hw->mac; u32 ctrl_ext;
struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575;
u32 eecd;
s32 ret_val;
u16 size;
u32 ctrl_ext = 0;
switch (hw->device_id) { if (hw->phy.media_type != e1000_media_type_copper) {
case E1000_DEV_ID_82575EB_COPPER: phy->type = e1000_phy_none;
case E1000_DEV_ID_82575EB_FIBER_SERDES: goto out;
case E1000_DEV_ID_82575GB_QUAD_COPPER:
mac->type = e1000_82575;
break;
case E1000_DEV_ID_82576:
case E1000_DEV_ID_82576_NS:
case E1000_DEV_ID_82576_NS_SERDES:
case E1000_DEV_ID_82576_FIBER:
case E1000_DEV_ID_82576_SERDES:
case E1000_DEV_ID_82576_QUAD_COPPER:
case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
case E1000_DEV_ID_82576_SERDES_QUAD:
mac->type = e1000_82576;
break;
case E1000_DEV_ID_82580_COPPER:
case E1000_DEV_ID_82580_FIBER:
case E1000_DEV_ID_82580_QUAD_FIBER:
case E1000_DEV_ID_82580_SERDES:
case E1000_DEV_ID_82580_SGMII:
case E1000_DEV_ID_82580_COPPER_DUAL:
case E1000_DEV_ID_DH89XXCC_SGMII:
case E1000_DEV_ID_DH89XXCC_SERDES:
case E1000_DEV_ID_DH89XXCC_BACKPLANE:
case E1000_DEV_ID_DH89XXCC_SFP:
mac->type = e1000_82580;
break;
case E1000_DEV_ID_I350_COPPER:
case E1000_DEV_ID_I350_FIBER:
case E1000_DEV_ID_I350_SERDES:
case E1000_DEV_ID_I350_SGMII:
mac->type = e1000_i350;
break;
case E1000_DEV_ID_I210_COPPER:
case E1000_DEV_ID_I210_COPPER_OEM1:
case E1000_DEV_ID_I210_COPPER_IT:
case E1000_DEV_ID_I210_FIBER:
case E1000_DEV_ID_I210_SERDES:
case E1000_DEV_ID_I210_SGMII:
mac->type = e1000_i210;
break;
case E1000_DEV_ID_I211_COPPER:
mac->type = e1000_i211;
break;
default:
return -E1000_ERR_MAC_INIT;
break;
} }
/* Set media type */ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
/* phy->reset_delay_us = 100;
* The 82575 uses bits 22:23 for link mode. The mode can be changed
* based on the EEPROM. We cannot rely upon device ID. There
* is no distinguishable difference between fiber and internal
* SerDes mode on the 82575. There can be an external PHY attached
* on the SGMII interface. For this, we'll set sgmii_active to true.
*/
phy->media_type = e1000_media_type_copper;
dev_spec->sgmii_active = false;
ctrl_ext = rd32(E1000_CTRL_EXT); ctrl_ext = rd32(E1000_CTRL_EXT);
switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
case E1000_CTRL_EXT_LINK_MODE_SGMII: if (igb_sgmii_active_82575(hw)) {
dev_spec->sgmii_active = true; phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
ctrl_ext |= E1000_CTRL_I2C_ENA;
} else {
phy->ops.reset = igb_phy_hw_reset;
ctrl_ext &= ~E1000_CTRL_I2C_ENA;
}
wr32(E1000_CTRL_EXT, ctrl_ext);
igb_reset_mdicnfg_82580(hw);
if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) {
phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
} else {
switch (hw->mac.type) {
case e1000_82580:
case e1000_i350:
phy->ops.read_reg = igb_read_phy_reg_82580;
phy->ops.write_reg = igb_write_phy_reg_82580;
break; break;
case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: case e1000_i210:
case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: case e1000_i211:
hw->phy.media_type = e1000_media_type_internal_serdes; phy->ops.read_reg = igb_read_phy_reg_gs40g;
phy->ops.write_reg = igb_write_phy_reg_gs40g;
break; break;
default: default:
break; phy->ops.read_reg = igb_read_phy_reg_igp;
phy->ops.write_reg = igb_write_phy_reg_igp;
}
} }
/* Set mta register count */ /* set lan id */
mac->mta_reg_count = 128; hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
/* Set rar entry count */ E1000_STATUS_FUNC_SHIFT;
switch (mac->type) {
case e1000_82576: /* Set phy->phy_addr and phy->id. */
mac->rar_entry_count = E1000_RAR_ENTRIES_82576; ret_val = igb_get_phy_id_82575(hw);
if (ret_val)
return ret_val;
/* Verify phy id and set remaining function pointers */
switch (phy->id) {
case I347AT4_E_PHY_ID:
case M88E1112_E_PHY_ID:
case M88E1111_I_PHY_ID:
phy->type = e1000_phy_m88;
phy->ops.get_phy_info = igb_get_phy_info_m88;
if (phy->id == I347AT4_E_PHY_ID ||
phy->id == M88E1112_E_PHY_ID)
phy->ops.get_cable_length =
igb_get_cable_length_m88_gen2;
else
phy->ops.get_cable_length = igb_get_cable_length_m88;
phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
break; break;
case e1000_82580: case IGP03E1000_E_PHY_ID:
mac->rar_entry_count = E1000_RAR_ENTRIES_82580; phy->type = e1000_phy_igp_3;
phy->ops.get_phy_info = igb_get_phy_info_igp;
phy->ops.get_cable_length = igb_get_cable_length_igp_2;
phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575;
phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
break; break;
case e1000_i350: case I82580_I_PHY_ID:
mac->rar_entry_count = E1000_RAR_ENTRIES_I350; case I350_I_PHY_ID:
phy->type = e1000_phy_82580;
phy->ops.force_speed_duplex =
igb_phy_force_speed_duplex_82580;
phy->ops.get_cable_length = igb_get_cable_length_82580;
phy->ops.get_phy_info = igb_get_phy_info_82580;
phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
break; break;
default: case I210_I_PHY_ID:
mac->rar_entry_count = E1000_RAR_ENTRIES_82575; phy->type = e1000_phy_i210;
phy->ops.check_polarity = igb_check_polarity_m88;
phy->ops.get_phy_info = igb_get_phy_info_m88;
phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
break; break;
default:
ret_val = -E1000_ERR_PHY;
goto out;
} }
/* reset */
if (mac->type >= e1000_82580)
mac->ops.reset_hw = igb_reset_hw_82580;
else
mac->ops.reset_hw = igb_reset_hw_82575;
if (mac->type >= e1000_i210) { out:
mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210; return ret_val;
mac->ops.release_swfw_sync = igb_release_swfw_sync_i210; }
} else {
mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575;
mac->ops.release_swfw_sync = igb_release_swfw_sync_82575;
}
/* Set if part includes ASF firmware */ /**
mac->asf_firmware_present = true; * igb_init_nvm_params_82575 - Init NVM func ptrs.
/* Set if manageability features are enabled. */ * @hw: pointer to the HW structure
mac->arc_subsystem_valid = **/
(rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK) s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
? true : false; {
/* enable EEE on i350 parts and later parts */ struct e1000_nvm_info *nvm = &hw->nvm;
if (mac->type >= e1000_i350) u32 eecd = rd32(E1000_EECD);
dev_spec->eee_disable = false; u16 size;
else
dev_spec->eee_disable = true;
/* physical interface link setup */
mac->ops.setup_physical_interface =
(hw->phy.media_type == e1000_media_type_copper)
? igb_setup_copper_link_82575
: igb_setup_serdes_link_82575;
/* NVM initialization */
eecd = rd32(E1000_EECD);
size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
E1000_EECD_SIZE_EX_SHIFT); E1000_EECD_SIZE_EX_SHIFT);
/* Added to a constant, "size" becomes the left-shift value
/*
* Added to a constant, "size" becomes the left-shift value
* for setting word_size. * for setting word_size.
*/ */
size += NVM_WORD_SIZE_BASE_SHIFT; size += NVM_WORD_SIZE_BASE_SHIFT;
/* /* Just in case size is out of range, cap it to the largest
* Check for invalid size * EEPROM size supported
*/ */
if ((hw->mac.type == e1000_82576) && (size > 15)) { if (size > 15)
pr_notice("The NVM size is not valid, defaulting to 32K\n");
size = 15; size = 15;
}
nvm->word_size = 1 << size; nvm->word_size = 1 << size;
if (hw->mac.type < e1000_i210) { if (hw->mac.type < e1000_i210) {
nvm->opcode_bits = 8; nvm->opcode_bits = 8;
nvm->delay_usec = 1; nvm->delay_usec = 1;
switch (nvm->override) { switch (nvm->override) {
case e1000_nvm_override_spi_large: case e1000_nvm_override_spi_large:
nvm->page_size = 32; nvm->page_size = 32;
...@@ -277,18 +261,18 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) ...@@ -277,18 +261,18 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
nvm->address_bits = 8; nvm->address_bits = 8;
break; break;
default: default:
nvm->page_size = eecd nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
& E1000_EECD_ADDR_BITS ? 32 : 8; nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ?
nvm->address_bits = eecd 16 : 8;
& E1000_EECD_ADDR_BITS ? 16 : 8;
break; break;
} }
if (nvm->word_size == (1 << 15)) if (nvm->word_size == (1 << 15))
nvm->page_size = 128; nvm->page_size = 128;
nvm->type = e1000_nvm_eeprom_spi; nvm->type = e1000_nvm_eeprom_spi;
} else } else {
nvm->type = e1000_nvm_flash_hw; nvm->type = e1000_nvm_flash_hw;
}
/* NVM Function Pointers */ /* NVM Function Pointers */
switch (hw->mac.type) { switch (hw->mac.type) {
...@@ -345,118 +329,176 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) ...@@ -345,118 +329,176 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
break; break;
} }
/* if part supports SR-IOV then initialize mailbox parameters */ return 0;
}
/**
* igb_init_mac_params_82575 - Init MAC func ptrs.
* @hw: pointer to the HW structure
**/
static s32 igb_init_mac_params_82575(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
/* Set mta register count */
mac->mta_reg_count = 128;
/* Set rar entry count */
switch (mac->type) { switch (mac->type) {
case e1000_82576: case e1000_82576:
mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
break;
case e1000_82580:
mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
break;
case e1000_i350: case e1000_i350:
igb_init_mbx_params_pf(hw); mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
break; break;
default: default:
mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
break; break;
} }
/* reset */
if (mac->type >= e1000_82580)
mac->ops.reset_hw = igb_reset_hw_82580;
else
mac->ops.reset_hw = igb_reset_hw_82575;
/* setup PHY parameters */ if (mac->type >= e1000_i210) {
if (phy->media_type != e1000_media_type_copper) { mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210;
phy->type = e1000_phy_none; mac->ops.release_swfw_sync = igb_release_swfw_sync_i210;
return 0;
}
phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
phy->reset_delay_us = 100;
ctrl_ext = rd32(E1000_CTRL_EXT);
/* PHY function pointers */
if (igb_sgmii_active_82575(hw)) {
phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
ctrl_ext |= E1000_CTRL_I2C_ENA;
} else { } else {
phy->ops.reset = igb_phy_hw_reset; mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575;
ctrl_ext &= ~E1000_CTRL_I2C_ENA; mac->ops.release_swfw_sync = igb_release_swfw_sync_82575;
} }
wr32(E1000_CTRL_EXT, ctrl_ext); /* Set if part includes ASF firmware */
igb_reset_mdicnfg_82580(hw); mac->asf_firmware_present = true;
/* Set if manageability features are enabled. */
if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) { mac->arc_subsystem_valid =
phy->ops.read_reg = igb_read_phy_reg_sgmii_82575; (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK)
phy->ops.write_reg = igb_write_phy_reg_sgmii_82575; ? true : false;
} else if ((hw->mac.type == e1000_82580) /* enable EEE on i350 parts and later parts */
|| (hw->mac.type == e1000_i350)) { if (mac->type >= e1000_i350)
phy->ops.read_reg = igb_read_phy_reg_82580; dev_spec->eee_disable = false;
phy->ops.write_reg = igb_write_phy_reg_82580; else
} else if (hw->phy.type >= e1000_phy_i210) { dev_spec->eee_disable = true;
phy->ops.read_reg = igb_read_phy_reg_gs40g; /* physical interface link setup */
phy->ops.write_reg = igb_write_phy_reg_gs40g; mac->ops.setup_physical_interface =
} else { (hw->phy.media_type == e1000_media_type_copper)
phy->ops.read_reg = igb_read_phy_reg_igp; ? igb_setup_copper_link_82575
phy->ops.write_reg = igb_write_phy_reg_igp; : igb_setup_serdes_link_82575;
}
/* set lan id */ return 0;
hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >> }
E1000_STATUS_FUNC_SHIFT;
/* Set phy->phy_addr and phy->id. */ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
ret_val = igb_get_phy_id_82575(hw); {
if (ret_val) struct e1000_mac_info *mac = &hw->mac;
return ret_val; struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575;
s32 ret_val;
u32 ctrl_ext = 0;
/* Verify phy id and set remaining function pointers */ switch (hw->device_id) {
switch (phy->id) { case E1000_DEV_ID_82575EB_COPPER:
case I347AT4_E_PHY_ID: case E1000_DEV_ID_82575EB_FIBER_SERDES:
case M88E1112_E_PHY_ID: case E1000_DEV_ID_82575GB_QUAD_COPPER:
case M88E1111_I_PHY_ID: mac->type = e1000_82575;
phy->type = e1000_phy_m88; break;
phy->ops.get_phy_info = igb_get_phy_info_m88; case E1000_DEV_ID_82576:
case E1000_DEV_ID_82576_NS:
case E1000_DEV_ID_82576_NS_SERDES:
case E1000_DEV_ID_82576_FIBER:
case E1000_DEV_ID_82576_SERDES:
case E1000_DEV_ID_82576_QUAD_COPPER:
case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
case E1000_DEV_ID_82576_SERDES_QUAD:
mac->type = e1000_82576;
break;
case E1000_DEV_ID_82580_COPPER:
case E1000_DEV_ID_82580_FIBER:
case E1000_DEV_ID_82580_QUAD_FIBER:
case E1000_DEV_ID_82580_SERDES:
case E1000_DEV_ID_82580_SGMII:
case E1000_DEV_ID_82580_COPPER_DUAL:
case E1000_DEV_ID_DH89XXCC_SGMII:
case E1000_DEV_ID_DH89XXCC_SERDES:
case E1000_DEV_ID_DH89XXCC_BACKPLANE:
case E1000_DEV_ID_DH89XXCC_SFP:
mac->type = e1000_82580;
break;
case E1000_DEV_ID_I350_COPPER:
case E1000_DEV_ID_I350_FIBER:
case E1000_DEV_ID_I350_SERDES:
case E1000_DEV_ID_I350_SGMII:
mac->type = e1000_i350;
break;
case E1000_DEV_ID_I210_COPPER:
case E1000_DEV_ID_I210_COPPER_OEM1:
case E1000_DEV_ID_I210_COPPER_IT:
case E1000_DEV_ID_I210_FIBER:
case E1000_DEV_ID_I210_SERDES:
case E1000_DEV_ID_I210_SGMII:
mac->type = e1000_i210;
break;
case E1000_DEV_ID_I211_COPPER:
mac->type = e1000_i211;
break;
default:
return -E1000_ERR_MAC_INIT;
break;
}
if (phy->id == I347AT4_E_PHY_ID || /* Set media type */
phy->id == M88E1112_E_PHY_ID) /*
phy->ops.get_cable_length = igb_get_cable_length_m88_gen2; * The 82575 uses bits 22:23 for link mode. The mode can be changed
else * based on the EEPROM. We cannot rely upon device ID. There
phy->ops.get_cable_length = igb_get_cable_length_m88; * is no distinguishable difference between fiber and internal
* SerDes mode on the 82575. There can be an external PHY attached
* on the SGMII interface. For this, we'll set sgmii_active to true.
*/
hw->phy.media_type = e1000_media_type_copper;
dev_spec->sgmii_active = false;
if (phy->id == I210_I_PHY_ID) { ctrl_ext = rd32(E1000_CTRL_EXT);
phy->ops.get_cable_length = switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
igb_get_cable_length_m88_gen2; case E1000_CTRL_EXT_LINK_MODE_SGMII:
phy->ops.set_d0_lplu_state = dev_spec->sgmii_active = true;
igb_set_d0_lplu_state_82580;
phy->ops.set_d3_lplu_state =
igb_set_d3_lplu_state_82580;
}
phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
break; break;
case IGP03E1000_E_PHY_ID: case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
phy->type = e1000_phy_igp_3; case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
phy->ops.get_phy_info = igb_get_phy_info_igp; hw->phy.media_type = e1000_media_type_internal_serdes;
phy->ops.get_cable_length = igb_get_cable_length_igp_2;
phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575;
phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
break; break;
case I82580_I_PHY_ID: default:
case I350_I_PHY_ID:
phy->type = e1000_phy_82580;
phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_82580;
phy->ops.get_cable_length = igb_get_cable_length_82580;
phy->ops.get_phy_info = igb_get_phy_info_82580;
phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
break; break;
case I210_I_PHY_ID: }
phy->type = e1000_phy_i210;
phy->ops.get_phy_info = igb_get_phy_info_m88; /* mac initialization and operations */
phy->ops.check_polarity = igb_check_polarity_m88; ret_val = igb_init_mac_params_82575(hw);
phy->ops.get_cable_length = igb_get_cable_length_m88_gen2; if (ret_val)
phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580; goto out;
phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; /* NVM initialization */
ret_val = igb_init_nvm_params_82575(hw);
if (ret_val)
goto out;
/* if part supports SR-IOV then initialize mailbox parameters */
switch (mac->type) {
case e1000_82576:
case e1000_i350:
igb_init_mbx_params_pf(hw);
break; break;
default: default:
return -E1000_ERR_PHY; break;
} }
return 0; /* setup PHY parameters */
ret_val = igb_init_phy_params_82575(hw);
out:
return ret_val;
} }
/** /**
......
...@@ -139,8 +139,6 @@ struct vf_data_storage { ...@@ -139,8 +139,6 @@ struct vf_data_storage {
#define IGB_RX_HDR_LEN IGB_RXBUFFER_256 #define IGB_RX_HDR_LEN IGB_RXBUFFER_256
#define IGB_RX_BUFSZ IGB_RXBUFFER_2048 #define IGB_RX_BUFSZ IGB_RXBUFFER_2048
/* How many Tx Descriptors do we need to call netif_wake_queue ? */
#define IGB_TX_QUEUE_WAKE 16
/* How many Rx Buffers do we bundle into one write to the hardware ? */ /* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */ #define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */
...@@ -169,6 +167,17 @@ enum igb_tx_flags { ...@@ -169,6 +167,17 @@ enum igb_tx_flags {
#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 #define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
#define IGB_TX_FLAGS_VLAN_SHIFT 16 #define IGB_TX_FLAGS_VLAN_SHIFT 16
/*
* The largest size we can write to the descriptor is 65535. In order to
* maintain a power of two alignment we have to limit ourselves to 32K.
*/
#define IGB_MAX_TXD_PWR 15
#define IGB_MAX_DATA_PER_TXD (1 << IGB_MAX_TXD_PWR)
/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD)
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
/* wrapper around a pointer to a socket buffer, /* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */ * so a DMA handle can be stored along with the buffer */
struct igb_tx_buffer { struct igb_tx_buffer {
...@@ -275,10 +284,18 @@ struct igb_q_vector { ...@@ -275,10 +284,18 @@ struct igb_q_vector {
enum e1000_ring_flags_t { enum e1000_ring_flags_t {
IGB_RING_FLAG_RX_SCTP_CSUM, IGB_RING_FLAG_RX_SCTP_CSUM,
IGB_RING_FLAG_RX_LB_VLAN_BSWAP, IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
IGB_RING_FLAG_RX_BUILD_SKB_ENABLED,
IGB_RING_FLAG_TX_CTX_IDX, IGB_RING_FLAG_TX_CTX_IDX,
IGB_RING_FLAG_TX_DETECT_HANG IGB_RING_FLAG_TX_DETECT_HANG
}; };
#define ring_uses_build_skb(ring) \
test_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
#define set_ring_build_skb_enabled(ring) \
set_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
#define clear_ring_build_skb_enabled(ring) \
clear_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS) #define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
#define IGB_RX_DESC(R, i) \ #define IGB_RX_DESC(R, i) \
......
...@@ -3354,6 +3354,20 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, ...@@ -3354,6 +3354,20 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
wr32(E1000_RXDCTL(reg_idx), rxdctl); wr32(E1000_RXDCTL(reg_idx), rxdctl);
} }
static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
struct igb_ring *rx_ring)
{
#define IGB_MAX_BUILD_SKB_SIZE \
(SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) - \
(NET_SKB_PAD + NET_IP_ALIGN + IGB_TS_HDR_LEN))
/* set build_skb flag */
if (adapter->max_frame_size <= IGB_MAX_BUILD_SKB_SIZE)
set_ring_build_skb_enabled(rx_ring);
else
clear_ring_build_skb_enabled(rx_ring);
}
/** /**
* igb_configure_rx - Configure receive Unit after Reset * igb_configure_rx - Configure receive Unit after Reset
* @adapter: board private structure * @adapter: board private structure
...@@ -3373,8 +3387,11 @@ static void igb_configure_rx(struct igb_adapter *adapter) ...@@ -3373,8 +3387,11 @@ static void igb_configure_rx(struct igb_adapter *adapter)
/* Setup the HW Rx Head and Tail Descriptor Pointers and /* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring */ * the Base and Length of the Rx Descriptor Ring */
for (i = 0; i < adapter->num_rx_queues; i++) for (i = 0; i < adapter->num_rx_queues; i++) {
igb_configure_rx_ring(adapter, adapter->rx_ring[i]); struct igb_ring *rx_ring = adapter->rx_ring[i];
igb_set_rx_buffer_len(adapter, rx_ring);
igb_configure_rx_ring(adapter, rx_ring);
}
} }
/** /**
...@@ -4417,13 +4434,6 @@ static void igb_tx_olinfo_status(struct igb_ring *tx_ring, ...@@ -4417,13 +4434,6 @@ static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
} }
/*
* The largest size we can write to the descriptor is 65535. In order to
* maintain a power of two alignment we have to limit ourselves to 32K.
*/
#define IGB_MAX_TXD_PWR 15
#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
static void igb_tx_map(struct igb_ring *tx_ring, static void igb_tx_map(struct igb_ring *tx_ring,
struct igb_tx_buffer *first, struct igb_tx_buffer *first,
const u8 hdr_len) const u8 hdr_len)
...@@ -4592,15 +4602,25 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, ...@@ -4592,15 +4602,25 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
struct igb_tx_buffer *first; struct igb_tx_buffer *first;
int tso; int tso;
u32 tx_flags = 0; u32 tx_flags = 0;
u16 count = TXD_USE_COUNT(skb_headlen(skb));
__be16 protocol = vlan_get_protocol(skb); __be16 protocol = vlan_get_protocol(skb);
u8 hdr_len = 0; u8 hdr_len = 0;
/* need: 1 descriptor per page, /* need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD,
* + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD,
* + 2 desc gap to keep tail from touching head, * + 2 desc gap to keep tail from touching head,
* + 1 desc for skb->data,
* + 1 desc for context descriptor, * + 1 desc for context descriptor,
* otherwise try next time */ * otherwise try next time
if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) { */
if (NETDEV_FRAG_PAGE_MAX_SIZE > IGB_MAX_DATA_PER_TXD) {
unsigned short f;
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
} else {
count += skb_shinfo(skb)->nr_frags;
}
if (igb_maybe_stop_tx(tx_ring, count + 3)) {
/* this is a hard error */ /* this is a hard error */
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
...@@ -4642,7 +4662,7 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, ...@@ -4642,7 +4662,7 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
igb_tx_map(tx_ring, first, hdr_len); igb_tx_map(tx_ring, first, hdr_len);
/* Make sure there is space in the ring for the next send. */ /* Make sure there is space in the ring for the next send. */
igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4); igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
return NETDEV_TX_OK; return NETDEV_TX_OK;
...@@ -6046,9 +6066,10 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) ...@@ -6046,9 +6066,10 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
} }
} }
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
if (unlikely(total_packets && if (unlikely(total_packets &&
netif_carrier_ok(tx_ring->netdev) && netif_carrier_ok(tx_ring->netdev) &&
igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) { igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
/* Make sure that anybody stopping the queue after this /* Make sure that anybody stopping the queue after this
* sees the new next_to_clean. * sees the new next_to_clean.
*/ */
...@@ -6097,6 +6118,41 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring, ...@@ -6097,6 +6118,41 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
} }
static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
struct page *page,
unsigned int truesize)
{
/* avoid re-using remote pages */
if (unlikely(page_to_nid(page) != numa_node_id()))
return false;
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
if (unlikely(page_count(page) != 1))
return false;
/* flip page offset to other buffer */
rx_buffer->page_offset ^= IGB_RX_BUFSZ;
/* since we are the only owner of the page and we need to
* increment it, just set the value to 2 in order to avoid
* an unnecessary locked operation
*/
atomic_set(&page->_count, 2);
#else
/* move offset up to the next cache line */
rx_buffer->page_offset += truesize;
if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
return false;
/* bump ref count on page before it is given to the stack */
get_page(page);
#endif
return true;
}
/** /**
* igb_add_rx_frag - Add contents of Rx buffer to sk_buff * igb_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on * @rx_ring: rx descriptor ring to transact packets on
...@@ -6119,6 +6175,11 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring, ...@@ -6119,6 +6175,11 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
{ {
struct page *page = rx_buffer->page; struct page *page = rx_buffer->page;
unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
#if (PAGE_SIZE < 8192)
unsigned int truesize = IGB_RX_BUFSZ;
#else
unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
#endif
if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) { if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) {
unsigned char *va = page_address(page) + rx_buffer->page_offset; unsigned char *va = page_address(page) + rx_buffer->page_offset;
...@@ -6141,38 +6202,88 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring, ...@@ -6141,38 +6202,88 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
} }
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
rx_buffer->page_offset, size, IGB_RX_BUFSZ); rx_buffer->page_offset, size, truesize);
/* avoid re-using remote pages */ return igb_can_reuse_rx_page(rx_buffer, page, truesize);
if (unlikely(page_to_nid(page) != numa_node_id())) }
return false;
static struct sk_buff *igb_build_rx_buffer(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc)
{
struct igb_rx_buffer *rx_buffer;
struct sk_buff *skb;
struct page *page;
void *page_addr;
unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */ unsigned int truesize = IGB_RX_BUFSZ;
if (unlikely(page_count(page) != 1)) #else
return false; unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
SKB_DATA_ALIGN(NET_SKB_PAD +
NET_IP_ALIGN +
size);
#endif
/* flip page offset to other buffer */ /* If we spanned a buffer we have a huge mess so test for it */
rx_buffer->page_offset ^= IGB_RX_BUFSZ; BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)));
/* /* Guarantee this function can be used by verifying buffer sizes */
* since we are the only owner of the page and we need to BUILD_BUG_ON(SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) < (NET_SKB_PAD +
* increment it, just set the value to 2 in order to avoid NET_IP_ALIGN +
* an unnecessary locked operation IGB_TS_HDR_LEN +
*/ ETH_FRAME_LEN +
atomic_set(&page->_count, 2); ETH_FCS_LEN));
#else
/* move offset up to the next cache line */
rx_buffer->page_offset += SKB_DATA_ALIGN(size);
if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ)) rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
return false; page = rx_buffer->page;
prefetchw(page);
/* bump ref count on page before it is given to the stack */ page_addr = page_address(page) + rx_buffer->page_offset;
get_page(page);
/* prefetch first cache line of first page */
prefetch(page_addr + NET_SKB_PAD + NET_IP_ALIGN);
#if L1_CACHE_BYTES < 128
prefetch(page_addr + L1_CACHE_BYTES + NET_SKB_PAD + NET_IP_ALIGN);
#endif #endif
return true; /* build an skb to around the page buffer */
skb = build_skb(page_addr, truesize);
if (unlikely(!skb)) {
rx_ring->rx_stats.alloc_failed++;
return NULL;
}
/* we are reusing so sync this buffer for CPU use */
dma_sync_single_range_for_cpu(rx_ring->dev,
rx_buffer->dma,
rx_buffer->page_offset,
IGB_RX_BUFSZ,
DMA_FROM_DEVICE);
/* update pointers within the skb to store the data */
skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD);
__skb_put(skb, size);
/* pull timestamp out of packet data */
if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
__skb_pull(skb, IGB_TS_HDR_LEN);
}
if (igb_can_reuse_rx_page(rx_buffer, page, truesize)) {
/* hand second half of page back to the ring */
igb_reuse_rx_page(rx_ring, rx_buffer);
} else {
/* we are not reusing the buffer so unmap it */
dma_unmap_page(rx_ring->dev, rx_buffer->dma,
PAGE_SIZE, DMA_FROM_DEVICE);
}
/* clear contents of buffer_info */
rx_buffer->dma = 0;
rx_buffer->page = NULL;
return skb;
} }
static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
...@@ -6184,13 +6295,6 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, ...@@ -6184,13 +6295,6 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
/*
* This memory barrier is needed to keep us from reading
* any other fields out of the rx_desc until we know the
* RXD_STAT_DD bit is set
*/
rmb();
page = rx_buffer->page; page = rx_buffer->page;
prefetchw(page); prefetchw(page);
...@@ -6590,7 +6694,16 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) ...@@ -6590,7 +6694,16 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD))
break; break;
/* This memory barrier is needed to keep us from reading
* any other fields out of the rx_desc until we know the
* RXD_STAT_DD bit is set
*/
rmb();
/* retrieve a buffer from the ring */ /* retrieve a buffer from the ring */
if (ring_uses_build_skb(rx_ring))
skb = igb_build_rx_buffer(rx_ring, rx_desc);
else
skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb); skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
/* exit if we failed to retrieve a buffer */ /* exit if we failed to retrieve a buffer */
...@@ -6678,6 +6791,14 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, ...@@ -6678,6 +6791,14 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
return true; return true;
} }
static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
{
if (ring_uses_build_skb(rx_ring))
return NET_SKB_PAD + NET_IP_ALIGN;
else
return 0;
}
/** /**
* igb_alloc_rx_buffers - Replace used receive buffers; packet split * igb_alloc_rx_buffers - Replace used receive buffers; packet split
* @adapter: address of board private structure * @adapter: address of board private structure
...@@ -6704,7 +6825,9 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) ...@@ -6704,7 +6825,9 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
* Refresh the desc even if buffer_addrs didn't change * Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info. * because each write-back erases this info.
*/ */
rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); rx_desc->read.pkt_addr = cpu_to_le64(bi->dma +
bi->page_offset +
igb_rx_offset(rx_ring));
rx_desc++; rx_desc++;
bi++; bi++;
...@@ -7608,7 +7731,7 @@ static DEFINE_SPINLOCK(i2c_clients_lock); ...@@ -7608,7 +7731,7 @@ static DEFINE_SPINLOCK(i2c_clients_lock);
* @adapter: adapter struct * @adapter: adapter struct
* @dev_addr: device address of i2c needed. * @dev_addr: device address of i2c needed.
*/ */
struct i2c_client * static struct i2c_client *
igb_get_i2c_client(struct igb_adapter *adapter, u8 dev_addr) igb_get_i2c_client(struct igb_adapter *adapter, u8 dev_addr)
{ {
ulong flags; ulong flags;
...@@ -7631,13 +7754,8 @@ igb_get_i2c_client(struct igb_adapter *adapter, u8 dev_addr) ...@@ -7631,13 +7754,8 @@ igb_get_i2c_client(struct igb_adapter *adapter, u8 dev_addr)
} }
} }
/* no client_list found, create a new one as long as /* no client_list found, create a new one */
* irqs are not disabled client_list = kzalloc(sizeof(*client_list), GFP_ATOMIC);
*/
if (unlikely(irqs_disabled()))
goto exit;
client_list = kzalloc(sizeof(*client_list), GFP_KERNEL);
if (client_list == NULL) if (client_list == NULL)
goto exit; goto exit;
......
...@@ -4480,39 +4480,57 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) ...@@ -4480,39 +4480,57 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
hw->subsystem_vendor_id = pdev->subsystem_vendor; hw->subsystem_vendor_id = pdev->subsystem_vendor;
hw->subsystem_device_id = pdev->subsystem_device; hw->subsystem_device_id = pdev->subsystem_device;
/* Set capability flags */ /* Set common capability flags and settings */
rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus()); rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
adapter->ring_feature[RING_F_RSS].limit = rss; adapter->ring_feature[RING_F_RSS].limit = rss;
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
if (hw->device_id == IXGBE_DEV_ID_82598AT)
adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
adapter->max_q_vectors = MAX_Q_VECTORS_82598;
break;
case ixgbe_mac_X540:
fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
if (fwsm & IXGBE_FWSM_TS_ENABLED)
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
case ixgbe_mac_82599EB:
adapter->max_q_vectors = MAX_Q_VECTORS_82599;
adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) adapter->ring_feature[RING_F_FDIR].limit = IXGBE_MAX_FDIR_INDICES;
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; adapter->max_q_vectors = MAX_Q_VECTORS_82599;
/* Flow Director hash filters enabled */
adapter->atr_sample_rate = 20; adapter->atr_sample_rate = 20;
adapter->ring_feature[RING_F_FDIR].limit =
IXGBE_MAX_FDIR_INDICES;
adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K; adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
#ifdef CONFIG_IXGBE_DCA
adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
#endif
#ifdef IXGBE_FCOE #ifdef IXGBE_FCOE
adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
#ifdef CONFIG_IXGBE_DCB #ifdef CONFIG_IXGBE_DCB
/* Default traffic class to use for FCoE */ /* Default traffic class to use for FCoE */
adapter->fcoe.up = IXGBE_FCOE_DEFTC; adapter->fcoe.up = IXGBE_FCOE_DEFTC;
#endif #endif /* CONFIG_IXGBE_DCB */
#endif /* IXGBE_FCOE */
/* Set MAC specific capability flags and exceptions */
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE;
adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
if (hw->device_id == IXGBE_DEV_ID_82598AT)
adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
adapter->max_q_vectors = MAX_Q_VECTORS_82598;
adapter->ring_feature[RING_F_FDIR].limit = 0;
adapter->atr_sample_rate = 0;
adapter->fdir_pballoc = 0;
#ifdef IXGBE_FCOE
adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
#ifdef CONFIG_IXGBE_DCB
adapter->fcoe.up = 0;
#endif /* IXGBE_DCB */
#endif /* IXGBE_FCOE */ #endif /* IXGBE_FCOE */
break; break;
case ixgbe_mac_82599EB:
if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
break;
case ixgbe_mac_X540:
fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
if (fwsm & IXGBE_FWSM_TS_ENABLED)
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
break;
default: default:
break; break;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment