Commit 97c7b179 authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/jkirsher/net-next-2.6

parents e933d019 082757af
...@@ -2402,13 +2402,16 @@ bool e1000_has_link(struct e1000_adapter *adapter) ...@@ -2402,13 +2402,16 @@ bool e1000_has_link(struct e1000_adapter *adapter)
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
bool link_active = false; bool link_active = false;
/* get_link_status is set on LSC (link status) interrupt or /* get_link_status is set on LSC (link status) interrupt or rx
* rx sequence error interrupt. get_link_status will stay * sequence error interrupt (except on intel ce4100).
* false until the e1000_check_for_link establishes link * get_link_status will stay false until the
* for copper adapters ONLY * e1000_check_for_link establishes link for copper adapters
* ONLY
*/ */
switch (hw->media_type) { switch (hw->media_type) {
case e1000_media_type_copper: case e1000_media_type_copper:
if (hw->mac_type == e1000_ce4100)
hw->get_link_status = 1;
if (hw->get_link_status) { if (hw->get_link_status) {
e1000_check_for_link(hw); e1000_check_for_link(hw);
link_active = !hw->get_link_status; link_active = !hw->get_link_status;
......
...@@ -512,6 +512,16 @@ ...@@ -512,6 +512,16 @@
#define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000 #define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000
#define E1000_GCR_CAP_VER2 0x00040000 #define E1000_GCR_CAP_VER2 0x00040000
/* mPHY Address Control and Data Registers */
#define E1000_MPHY_ADDR_CTL 0x0024 /* mPHY Address Control Register */
#define E1000_MPHY_ADDR_CTL_OFFSET_MASK 0xFFFF0000
#define E1000_MPHY_DATA 0x0E10 /* mPHY Data Register */
/* mPHY PCS CLK Register */
#define E1000_MPHY_PCS_CLK_REG_OFFSET 0x0004 /* mPHY PCS CLK AFE CSR Offset */
/* mPHY Near End Digital Loopback Override Bit */
#define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10
/* PHY Control Register */ /* PHY Control Register */
#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ #define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ #define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
......
...@@ -1461,6 +1461,22 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter) ...@@ -1461,6 +1461,22 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
/* use CTRL_EXT to identify link type as SGMII can appear as copper */ /* use CTRL_EXT to identify link type as SGMII can appear as copper */
if (reg & E1000_CTRL_EXT_LINK_MODE_MASK) { if (reg & E1000_CTRL_EXT_LINK_MODE_MASK) {
if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
(hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
(hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
(hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
/* Enable DH89xxCC MPHY for near end loopback */
reg = rd32(E1000_MPHY_ADDR_CTL);
reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) |
E1000_MPHY_PCS_CLK_REG_OFFSET;
wr32(E1000_MPHY_ADDR_CTL, reg);
reg = rd32(E1000_MPHY_DATA);
reg |= E1000_MPHY_PCS_CLK_REG_DIGINELBEN;
wr32(E1000_MPHY_DATA, reg);
}
reg = rd32(E1000_RCTL); reg = rd32(E1000_RCTL);
reg |= E1000_RCTL_LBM_TCVR; reg |= E1000_RCTL_LBM_TCVR;
wr32(E1000_RCTL, reg); wr32(E1000_RCTL, reg);
...@@ -1502,6 +1518,23 @@ static void igb_loopback_cleanup(struct igb_adapter *adapter) ...@@ -1502,6 +1518,23 @@ static void igb_loopback_cleanup(struct igb_adapter *adapter)
u32 rctl; u32 rctl;
u16 phy_reg; u16 phy_reg;
if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
(hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
(hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
(hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
u32 reg;
/* Disable near end loopback on DH89xxCC */
reg = rd32(E1000_MPHY_ADDR_CTL);
reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) |
E1000_MPHY_PCS_CLK_REG_OFFSET;
wr32(E1000_MPHY_ADDR_CTL, reg);
reg = rd32(E1000_MPHY_DATA);
reg &= ~E1000_MPHY_PCS_CLK_REG_DIGINELBEN;
wr32(E1000_MPHY_DATA, reg);
}
rctl = rd32(E1000_RCTL); rctl = rd32(E1000_RCTL);
rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
wr32(E1000_RCTL, rctl); wr32(E1000_RCTL, rctl);
......
...@@ -214,12 +214,10 @@ struct ixgbe_ring { ...@@ -214,12 +214,10 @@ struct ixgbe_ring {
struct ixgbe_rx_buffer *rx_buffer_info; struct ixgbe_rx_buffer *rx_buffer_info;
}; };
unsigned long state; unsigned long state;
u8 atr_sample_rate; u8 __iomem *tail;
u8 atr_count;
u16 count; /* amount of descriptors */ u16 count; /* amount of descriptors */
u16 rx_buf_len; u16 rx_buf_len;
u16 next_to_use;
u16 next_to_clean;
u8 queue_index; /* needed for multiqueue queue management */ u8 queue_index; /* needed for multiqueue queue management */
u8 reg_idx; /* holds the special value that gets u8 reg_idx; /* holds the special value that gets
...@@ -227,15 +225,13 @@ struct ixgbe_ring { ...@@ -227,15 +225,13 @@ struct ixgbe_ring {
* associated with this ring, which is * associated with this ring, which is
* different for DCB and RSS modes * different for DCB and RSS modes
*/ */
u8 dcb_tc; u8 atr_sample_rate;
u8 atr_count;
u16 work_limit; /* max work per interrupt */
u8 __iomem *tail;
unsigned int total_bytes; u16 next_to_use;
unsigned int total_packets; u16 next_to_clean;
u8 dcb_tc;
struct ixgbe_queue_stats stats; struct ixgbe_queue_stats stats;
struct u64_stats_sync syncp; struct u64_stats_sync syncp;
union { union {
...@@ -277,6 +273,18 @@ struct ixgbe_ring_feature { ...@@ -277,6 +273,18 @@ struct ixgbe_ring_feature {
int mask; int mask;
} ____cacheline_internodealigned_in_smp; } ____cacheline_internodealigned_in_smp;
struct ixgbe_ring_container {
#if MAX_RX_QUEUES > MAX_TX_QUEUES
DECLARE_BITMAP(idx, MAX_RX_QUEUES);
#else
DECLARE_BITMAP(idx, MAX_TX_QUEUES);
#endif
unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_packets; /* total packets processed this int */
u16 work_limit; /* total work allowed per interrupt */
u8 count; /* total number of rings in vector */
u8 itr; /* current ITR setting for ring */
};
#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \ #define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
? 8 : 1) ? 8 : 1)
...@@ -294,12 +302,7 @@ struct ixgbe_q_vector { ...@@ -294,12 +302,7 @@ struct ixgbe_q_vector {
int cpu; /* CPU for DCA */ int cpu; /* CPU for DCA */
#endif #endif
struct napi_struct napi; struct napi_struct napi;
DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */ struct ixgbe_ring_container rx, tx;
DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
u8 rxr_count; /* Rx ring count assigned to this vector */
u8 txr_count; /* Tx ring count assigned to this vector */
u8 tx_itr;
u8 rx_itr;
u32 eitr; u32 eitr;
cpumask_var_t affinity_mask; cpumask_var_t affinity_mask;
char name[IFNAMSIZ + 9]; char name[IFNAMSIZ + 9];
...@@ -413,6 +416,9 @@ struct ixgbe_adapter { ...@@ -413,6 +416,9 @@ struct ixgbe_adapter {
u16 eitr_low; u16 eitr_low;
u16 eitr_high; u16 eitr_high;
/* Work limits */
u16 tx_work_limit;
/* TX */ /* TX */
struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
int num_tx_queues; int num_tx_queues;
...@@ -581,13 +587,10 @@ extern s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, ...@@ -581,13 +587,10 @@ extern s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
u16 soft_id); u16 soft_id);
extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
union ixgbe_atr_input *mask); union ixgbe_atr_input *mask);
extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring);
extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring);
extern void ixgbe_set_rx_mode(struct net_device *netdev); extern void ixgbe_set_rx_mode(struct net_device *netdev);
extern int ixgbe_setup_tc(struct net_device *dev, u8 tc); extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32); extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
extern void ixgbe_do_reset(struct net_device *netdev);
#ifdef IXGBE_FCOE #ifdef IXGBE_FCOE
extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
extern int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb, extern int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb,
...@@ -595,7 +598,8 @@ extern int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb, ...@@ -595,7 +598,8 @@ extern int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb,
extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter); extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter);
extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
union ixgbe_adv_rx_desc *rx_desc, union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb); struct sk_buff *skb,
u32 staterr);
extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
struct scatterlist *sgl, unsigned int sgc); struct scatterlist *sgl, unsigned int sgc);
extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
......
...@@ -442,109 +442,6 @@ static int ixgbe_set_pauseparam(struct net_device *netdev, ...@@ -442,109 +442,6 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
return 0; return 0;
} }
static void ixgbe_do_reset(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
if (netif_running(netdev))
ixgbe_reinit_locked(adapter);
else
ixgbe_reset(adapter);
}
static u32 ixgbe_get_rx_csum(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
return adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED;
}
static void ixgbe_set_rsc(struct ixgbe_adapter *adapter)
{
int i;
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = adapter->rx_ring[i];
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
set_ring_rsc_enabled(ring);
ixgbe_configure_rscctl(adapter, ring);
} else {
ixgbe_clear_rscctl(adapter, ring);
}
}
}
static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
bool need_reset = false;
if (data) {
adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
} else {
adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) {
adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
netdev->features &= ~NETIF_F_LRO;
}
switch (adapter->hw.mac.type) {
case ixgbe_mac_X540:
ixgbe_set_rsc(adapter);
break;
case ixgbe_mac_82599EB:
need_reset = true;
break;
default:
break;
}
}
if (need_reset)
ixgbe_do_reset(netdev);
return 0;
}
static u32 ixgbe_get_tx_csum(struct net_device *netdev)
{
return (netdev->features & NETIF_F_IP_CSUM) != 0;
}
static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
u32 feature_list;
feature_list = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
feature_list |= NETIF_F_SCTP_CSUM;
break;
default:
break;
}
if (data)
netdev->features |= feature_list;
else
netdev->features &= ~feature_list;
return 0;
}
static int ixgbe_set_tso(struct net_device *netdev, u32 data)
{
if (data) {
netdev->features |= NETIF_F_TSO;
netdev->features |= NETIF_F_TSO6;
} else {
netdev->features &= ~NETIF_F_TSO;
netdev->features &= ~NETIF_F_TSO6;
}
return 0;
}
static u32 ixgbe_get_msglevel(struct net_device *netdev) static u32 ixgbe_get_msglevel(struct net_device *netdev)
{ {
struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_adapter *adapter = netdev_priv(netdev);
...@@ -2103,7 +2000,7 @@ static int ixgbe_get_coalesce(struct net_device *netdev, ...@@ -2103,7 +2000,7 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
{ {
struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_adapter *adapter = netdev_priv(netdev);
ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0]->work_limit; ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit;
/* only valid if in constant ITR mode */ /* only valid if in constant ITR mode */
switch (adapter->rx_itr_setting) { switch (adapter->rx_itr_setting) {
...@@ -2122,7 +2019,7 @@ static int ixgbe_get_coalesce(struct net_device *netdev, ...@@ -2122,7 +2019,7 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
} }
/* if in mixed tx/rx queues per vector mode, report only rx settings */ /* if in mixed tx/rx queues per vector mode, report only rx settings */
if (adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count) if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
return 0; return 0;
/* only valid if in constant ITR mode */ /* only valid if in constant ITR mode */
...@@ -2187,12 +2084,12 @@ static int ixgbe_set_coalesce(struct net_device *netdev, ...@@ -2187,12 +2084,12 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
bool need_reset = false; bool need_reset = false;
/* don't accept tx specific changes if we've got mixed RxTx vectors */ /* don't accept tx specific changes if we've got mixed RxTx vectors */
if (adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count
&& ec->tx_coalesce_usecs) && ec->tx_coalesce_usecs)
return -EINVAL; return -EINVAL;
if (ec->tx_max_coalesced_frames_irq) if (ec->tx_max_coalesced_frames_irq)
adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq; adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq;
if (ec->rx_coalesce_usecs > 1) { if (ec->rx_coalesce_usecs > 1) {
/* check the limits */ /* check the limits */
...@@ -2261,18 +2158,20 @@ static int ixgbe_set_coalesce(struct net_device *netdev, ...@@ -2261,18 +2158,20 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
int num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; int num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
for (i = 0; i < num_vectors; i++) { for (i = 0; i < num_vectors; i++) {
q_vector = adapter->q_vector[i]; q_vector = adapter->q_vector[i];
if (q_vector->txr_count && !q_vector->rxr_count) if (q_vector->tx.count && !q_vector->rx.count)
/* tx only */ /* tx only */
q_vector->eitr = adapter->tx_eitr_param; q_vector->eitr = adapter->tx_eitr_param;
else else
/* rx only or mixed */ /* rx only or mixed */
q_vector->eitr = adapter->rx_eitr_param; q_vector->eitr = adapter->rx_eitr_param;
q_vector->tx.work_limit = adapter->tx_work_limit;
ixgbe_write_eitr(q_vector); ixgbe_write_eitr(q_vector);
} }
/* Legacy Interrupt Mode */ /* Legacy Interrupt Mode */
} else { } else {
q_vector = adapter->q_vector[0]; q_vector = adapter->q_vector[0];
q_vector->eitr = adapter->rx_eitr_param; q_vector->eitr = adapter->rx_eitr_param;
q_vector->tx.work_limit = adapter->tx_work_limit;
ixgbe_write_eitr(q_vector); ixgbe_write_eitr(q_vector);
} }
...@@ -2287,81 +2186,6 @@ static int ixgbe_set_coalesce(struct net_device *netdev, ...@@ -2287,81 +2186,6 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
return 0; return 0;
} }
static int ixgbe_set_flags(struct net_device *netdev, u32 data)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
bool need_reset = false;
int rc;
#ifdef CONFIG_IXGBE_DCB
if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
!(data & ETH_FLAG_RXVLAN))
return -EINVAL;
#endif
need_reset = (data & ETH_FLAG_RXVLAN) !=
(netdev->features & NETIF_F_HW_VLAN_RX);
if ((data & ETH_FLAG_RXHASH) &&
!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
return -EOPNOTSUPP;
rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_LRO | ETH_FLAG_NTUPLE |
ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN |
ETH_FLAG_RXHASH);
if (rc)
return rc;
/* if state changes we need to update adapter->flags and reset */
if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
(!!(data & ETH_FLAG_LRO) !=
!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) {
if ((data & ETH_FLAG_LRO) &&
(!adapter->rx_itr_setting ||
(adapter->rx_itr_setting > IXGBE_MAX_RSC_INT_RATE))) {
e_info(probe, "rx-usecs set too low, "
"not enabling RSC.\n");
} else {
adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
switch (adapter->hw.mac.type) {
case ixgbe_mac_X540:
ixgbe_set_rsc(adapter);
break;
case ixgbe_mac_82599EB:
need_reset = true;
break;
default:
break;
}
}
}
/*
* Check if Flow Director n-tuple support was enabled or disabled. If
* the state changed, we need to reset.
*/
if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
/* turn off ATR, enable perfect filters and reset */
if (data & ETH_FLAG_NTUPLE) {
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
need_reset = true;
}
} else if (!(data & ETH_FLAG_NTUPLE)) {
/* turn off Flow Director, set ATR and reset */
adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
need_reset = true;
}
if (need_reset)
ixgbe_do_reset(netdev);
return 0;
}
static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter, static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
struct ethtool_rxnfc *cmd) struct ethtool_rxnfc *cmd)
{ {
...@@ -2744,16 +2568,8 @@ static const struct ethtool_ops ixgbe_ethtool_ops = { ...@@ -2744,16 +2568,8 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.set_ringparam = ixgbe_set_ringparam, .set_ringparam = ixgbe_set_ringparam,
.get_pauseparam = ixgbe_get_pauseparam, .get_pauseparam = ixgbe_get_pauseparam,
.set_pauseparam = ixgbe_set_pauseparam, .set_pauseparam = ixgbe_set_pauseparam,
.get_rx_csum = ixgbe_get_rx_csum,
.set_rx_csum = ixgbe_set_rx_csum,
.get_tx_csum = ixgbe_get_tx_csum,
.set_tx_csum = ixgbe_set_tx_csum,
.get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg,
.get_msglevel = ixgbe_get_msglevel, .get_msglevel = ixgbe_get_msglevel,
.set_msglevel = ixgbe_set_msglevel, .set_msglevel = ixgbe_set_msglevel,
.get_tso = ethtool_op_get_tso,
.set_tso = ixgbe_set_tso,
.self_test = ixgbe_diag_test, .self_test = ixgbe_diag_test,
.get_strings = ixgbe_get_strings, .get_strings = ixgbe_get_strings,
.set_phys_id = ixgbe_set_phys_id, .set_phys_id = ixgbe_set_phys_id,
...@@ -2761,8 +2577,6 @@ static const struct ethtool_ops ixgbe_ethtool_ops = { ...@@ -2761,8 +2577,6 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.get_ethtool_stats = ixgbe_get_ethtool_stats, .get_ethtool_stats = ixgbe_get_ethtool_stats,
.get_coalesce = ixgbe_get_coalesce, .get_coalesce = ixgbe_get_coalesce,
.set_coalesce = ixgbe_set_coalesce, .set_coalesce = ixgbe_set_coalesce,
.get_flags = ethtool_op_get_flags,
.set_flags = ixgbe_set_flags,
.get_rxnfc = ixgbe_get_rxnfc, .get_rxnfc = ixgbe_get_rxnfc,
.set_rxnfc = ixgbe_set_rxnfc, .set_rxnfc = ixgbe_set_rxnfc,
}; };
......
...@@ -36,25 +36,6 @@ ...@@ -36,25 +36,6 @@
#include <scsi/libfc.h> #include <scsi/libfc.h>
#include <scsi/libfcoe.h> #include <scsi/libfcoe.h>
/**
* ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type
* @rx_desc: advanced rx descriptor
*
* Returns : true if it is FCoE pkt
*/
static inline bool ixgbe_rx_is_fcoe(union ixgbe_adv_rx_desc *rx_desc)
{
u16 p;
p = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info);
if (p & IXGBE_RXDADV_PKTTYPE_ETQF) {
p &= IXGBE_RXDADV_PKTTYPE_ETQF_MASK;
p >>= IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT;
return p == IXGBE_ETQF_FILTER_FCOE;
}
return false;
}
/** /**
* ixgbe_fcoe_clear_ddp - clear the given ddp context * ixgbe_fcoe_clear_ddp - clear the given ddp context
* @ddp - ptr to the ixgbe_fcoe_ddp * @ddp - ptr to the ixgbe_fcoe_ddp
...@@ -136,7 +117,6 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) ...@@ -136,7 +117,6 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
return len; return len;
} }
/** /**
* ixgbe_fcoe_ddp_setup - called to set up ddp context * ixgbe_fcoe_ddp_setup - called to set up ddp context
* @netdev: the corresponding net_device * @netdev: the corresponding net_device
...@@ -380,23 +360,20 @@ int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, ...@@ -380,23 +360,20 @@ int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
*/ */
int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
union ixgbe_adv_rx_desc *rx_desc, union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb) struct sk_buff *skb,
u32 staterr)
{ {
u16 xid; u16 xid;
u32 fctl; u32 fctl;
u32 sterr, fceofe, fcerr, fcstat; u32 fceofe, fcerr, fcstat;
int rc = -EINVAL; int rc = -EINVAL;
struct ixgbe_fcoe *fcoe; struct ixgbe_fcoe *fcoe;
struct ixgbe_fcoe_ddp *ddp; struct ixgbe_fcoe_ddp *ddp;
struct fc_frame_header *fh; struct fc_frame_header *fh;
struct fcoe_crc_eof *crc; struct fcoe_crc_eof *crc;
if (!ixgbe_rx_is_fcoe(rx_desc)) fcerr = (staterr & IXGBE_RXDADV_ERR_FCERR);
goto ddp_out; fceofe = (staterr & IXGBE_RXDADV_ERR_FCEOFE);
sterr = le32_to_cpu(rx_desc->wb.upper.status_error);
fcerr = (sterr & IXGBE_RXDADV_ERR_FCERR);
fceofe = (sterr & IXGBE_RXDADV_ERR_FCEOFE);
if (fcerr == IXGBE_FCERR_BADCRC) if (fcerr == IXGBE_FCERR_BADCRC)
skb_checksum_none_assert(skb); skb_checksum_none_assert(skb);
else else
...@@ -425,7 +402,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, ...@@ -425,7 +402,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
if (fcerr | fceofe) if (fcerr | fceofe)
goto ddp_out; goto ddp_out;
fcstat = (sterr & IXGBE_RXDADV_STAT_FCSTAT); fcstat = (staterr & IXGBE_RXDADV_STAT_FCSTAT);
if (fcstat) { if (fcstat) {
/* update length of DDPed data */ /* update length of DDPed data */
ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
......
...@@ -805,7 +805,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, ...@@ -805,7 +805,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
(count < tx_ring->work_limit)) { (count < q_vector->tx.work_limit)) {
bool cleaned = false; bool cleaned = false;
rmb(); /* read buffer_info after eop_desc */ rmb(); /* read buffer_info after eop_desc */
for ( ; !cleaned; count++) { for ( ; !cleaned; count++) {
...@@ -834,11 +834,11 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, ...@@ -834,11 +834,11 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
} }
tx_ring->next_to_clean = i; tx_ring->next_to_clean = i;
tx_ring->total_bytes += total_bytes;
tx_ring->total_packets += total_packets;
u64_stats_update_begin(&tx_ring->syncp);
tx_ring->stats.packets += total_packets;
tx_ring->stats.bytes += total_bytes; tx_ring->stats.bytes += total_bytes;
tx_ring->stats.packets += total_packets;
u64_stats_update_begin(&tx_ring->syncp);
q_vector->tx.total_bytes += total_bytes;
q_vector->tx.total_packets += total_packets;
u64_stats_update_end(&tx_ring->syncp); u64_stats_update_end(&tx_ring->syncp);
if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) { if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
...@@ -886,7 +886,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, ...@@ -886,7 +886,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
} }
} }
return count < tx_ring->work_limit; return count < q_vector->tx.work_limit;
} }
#ifdef CONFIG_IXGBE_DCA #ifdef CONFIG_IXGBE_DCA
...@@ -959,17 +959,17 @@ static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector) ...@@ -959,17 +959,17 @@ static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
if (q_vector->cpu == cpu) if (q_vector->cpu == cpu)
goto out_no_update; goto out_no_update;
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) { for (i = 0; i < q_vector->tx.count; i++) {
ixgbe_update_tx_dca(adapter, adapter->tx_ring[r_idx], cpu); ixgbe_update_tx_dca(adapter, adapter->tx_ring[r_idx], cpu);
r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues,
r_idx + 1); r_idx + 1);
} }
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) { for (i = 0; i < q_vector->rx.count; i++) {
ixgbe_update_rx_dca(adapter, adapter->rx_ring[r_idx], cpu); ixgbe_update_rx_dca(adapter, adapter->rx_ring[r_idx], cpu);
r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues,
r_idx + 1); r_idx + 1);
} }
...@@ -1038,6 +1038,24 @@ static inline void ixgbe_rx_hash(union ixgbe_adv_rx_desc *rx_desc, ...@@ -1038,6 +1038,24 @@ static inline void ixgbe_rx_hash(union ixgbe_adv_rx_desc *rx_desc,
skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
} }
/**
* ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type
* @adapter: address of board private structure
* @rx_desc: advanced rx descriptor
*
* Returns : true if it is FCoE pkt
*/
static inline bool ixgbe_rx_is_fcoe(struct ixgbe_adapter *adapter,
union ixgbe_adv_rx_desc *rx_desc)
{
__le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
return (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) ==
(cpu_to_le16(IXGBE_ETQF_FILTER_FCOE <<
IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
}
/** /**
* ixgbe_receive_skb - Send a completed packet up the stack * ixgbe_receive_skb - Send a completed packet up the stack
* @adapter: board private structure * @adapter: board private structure
...@@ -1070,14 +1088,14 @@ static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector, ...@@ -1070,14 +1088,14 @@ static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
* @adapter: address of board private structure * @adapter: address of board private structure
* @status_err: hardware indication of status of receive * @status_err: hardware indication of status of receive
* @skb: skb currently being received and modified * @skb: skb currently being received and modified
* @status_err: status error value of last descriptor in packet
**/ **/
static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter, static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
union ixgbe_adv_rx_desc *rx_desc, union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb) struct sk_buff *skb,
u32 status_err)
{ {
u32 status_err = le32_to_cpu(rx_desc->wb.upper.status_error); skb->ip_summed = CHECKSUM_NONE;
skb_checksum_none_assert(skb);
/* Rx csum disabled */ /* Rx csum disabled */
if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED)) if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
...@@ -1421,14 +1439,12 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, ...@@ -1421,14 +1439,12 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
} }
/* ERR_MASK will only have valid bits if EOP set */ /* ERR_MASK will only have valid bits if EOP set */
if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) { if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
/* trim packet back to size 0 and recycle it */ dev_kfree_skb_any(skb);
__pskb_trim(skb, 0);
rx_buffer_info->skb = skb;
goto next_desc; goto next_desc;
} }
ixgbe_rx_checksum(adapter, rx_desc, skb); ixgbe_rx_checksum(adapter, rx_desc, skb, staterr);
if (adapter->netdev->features & NETIF_F_RXHASH) if (adapter->netdev->features & NETIF_F_RXHASH)
ixgbe_rx_hash(rx_desc, skb); ixgbe_rx_hash(rx_desc, skb);
...@@ -1439,8 +1455,9 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, ...@@ -1439,8 +1455,9 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
skb->protocol = eth_type_trans(skb, rx_ring->netdev); skb->protocol = eth_type_trans(skb, rx_ring->netdev);
#ifdef IXGBE_FCOE #ifdef IXGBE_FCOE
/* if ddp, not passing to ULD unless for FCP_RSP or error */ /* if ddp, not passing to ULD unless for FCP_RSP or error */
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { if (ixgbe_rx_is_fcoe(adapter, rx_desc)) {
ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb); ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb,
staterr);
if (!ddp_bytes) if (!ddp_bytes)
goto next_desc; goto next_desc;
} }
...@@ -1486,12 +1503,12 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, ...@@ -1486,12 +1503,12 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
} }
#endif /* IXGBE_FCOE */ #endif /* IXGBE_FCOE */
rx_ring->total_packets += total_rx_packets;
rx_ring->total_bytes += total_rx_bytes;
u64_stats_update_begin(&rx_ring->syncp); u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets += total_rx_packets; rx_ring->stats.packets += total_rx_packets;
rx_ring->stats.bytes += total_rx_bytes; rx_ring->stats.bytes += total_rx_bytes;
u64_stats_update_end(&rx_ring->syncp); u64_stats_update_end(&rx_ring->syncp);
q_vector->rx.total_packets += total_rx_packets;
q_vector->rx.total_bytes += total_rx_bytes;
} }
static int ixgbe_clean_rxonly(struct napi_struct *, int); static int ixgbe_clean_rxonly(struct napi_struct *, int);
...@@ -1517,31 +1534,31 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) ...@@ -1517,31 +1534,31 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
for (v_idx = 0; v_idx < q_vectors; v_idx++) { for (v_idx = 0; v_idx < q_vectors; v_idx++) {
q_vector = adapter->q_vector[v_idx]; q_vector = adapter->q_vector[v_idx];
/* XXX for_each_set_bit(...) */ /* XXX for_each_set_bit(...) */
r_idx = find_first_bit(q_vector->rxr_idx, r_idx = find_first_bit(q_vector->rx.idx,
adapter->num_rx_queues); adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) { for (i = 0; i < q_vector->rx.count; i++) {
u8 reg_idx = adapter->rx_ring[r_idx]->reg_idx; u8 reg_idx = adapter->rx_ring[r_idx]->reg_idx;
ixgbe_set_ivar(adapter, 0, reg_idx, v_idx); ixgbe_set_ivar(adapter, 0, reg_idx, v_idx);
r_idx = find_next_bit(q_vector->rxr_idx, r_idx = find_next_bit(q_vector->rx.idx,
adapter->num_rx_queues, adapter->num_rx_queues,
r_idx + 1); r_idx + 1);
} }
r_idx = find_first_bit(q_vector->txr_idx, r_idx = find_first_bit(q_vector->tx.idx,
adapter->num_tx_queues); adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) { for (i = 0; i < q_vector->tx.count; i++) {
u8 reg_idx = adapter->tx_ring[r_idx]->reg_idx; u8 reg_idx = adapter->tx_ring[r_idx]->reg_idx;
ixgbe_set_ivar(adapter, 1, reg_idx, v_idx); ixgbe_set_ivar(adapter, 1, reg_idx, v_idx);
r_idx = find_next_bit(q_vector->txr_idx, r_idx = find_next_bit(q_vector->tx.idx,
adapter->num_tx_queues, adapter->num_tx_queues,
r_idx + 1); r_idx + 1);
} }
if (q_vector->txr_count && !q_vector->rxr_count) if (q_vector->tx.count && !q_vector->rx.count)
/* tx only */ /* tx only */
q_vector->eitr = adapter->tx_eitr_param; q_vector->eitr = adapter->tx_eitr_param;
else if (q_vector->rxr_count) else if (q_vector->rx.count)
/* rx or mixed */ /* rx or mixed */
q_vector->eitr = adapter->rx_eitr_param; q_vector->eitr = adapter->rx_eitr_param;
...@@ -1597,11 +1614,8 @@ enum latency_range { ...@@ -1597,11 +1614,8 @@ enum latency_range {
/** /**
* ixgbe_update_itr - update the dynamic ITR value based on statistics * ixgbe_update_itr - update the dynamic ITR value based on statistics
* @adapter: pointer to adapter * @q_vector: structure containing interrupt and ring information
* @eitr: eitr setting (ints per sec) to give last timeslice * @ring_container: structure containing ring performance data
* @itr_setting: current throttle rate in ints/second
* @packets: the number of packets during this measurement interval
* @bytes: the number of bytes during this measurement interval
* *
* Stores a new ITR value based on packets and byte * Stores a new ITR value based on packets and byte
* counts during the last interrupt. The advantage of per interrupt * counts during the last interrupt. The advantage of per interrupt
...@@ -1613,17 +1627,18 @@ enum latency_range { ...@@ -1613,17 +1627,18 @@ enum latency_range {
* this functionality is controlled by the InterruptThrottleRate module * this functionality is controlled by the InterruptThrottleRate module
* parameter (see ixgbe_param.c) * parameter (see ixgbe_param.c)
**/ **/
static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter, static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
u32 eitr, u8 itr_setting, struct ixgbe_ring_container *ring_container)
int packets, int bytes)
{ {
unsigned int retval = itr_setting;
u32 timepassed_us;
u64 bytes_perint; u64 bytes_perint;
struct ixgbe_adapter *adapter = q_vector->adapter;
int bytes = ring_container->total_bytes;
int packets = ring_container->total_packets;
u32 timepassed_us;
u8 itr_setting = ring_container->itr;
if (packets == 0) if (packets == 0)
goto update_itr_done; return;
/* simple throttlerate management /* simple throttlerate management
* 0-20MB/s lowest (100000 ints/s) * 0-20MB/s lowest (100000 ints/s)
...@@ -1631,28 +1646,32 @@ static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter, ...@@ -1631,28 +1646,32 @@ static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
* 100-1249MB/s bulk (8000 ints/s) * 100-1249MB/s bulk (8000 ints/s)
*/ */
/* what was last interrupt timeslice? */ /* what was last interrupt timeslice? */
timepassed_us = 1000000/eitr; timepassed_us = 1000000/q_vector->eitr;
bytes_perint = bytes / timepassed_us; /* bytes/usec */ bytes_perint = bytes / timepassed_us; /* bytes/usec */
switch (itr_setting) { switch (itr_setting) {
case lowest_latency: case lowest_latency:
if (bytes_perint > adapter->eitr_low) if (bytes_perint > adapter->eitr_low)
retval = low_latency; itr_setting = low_latency;
break; break;
case low_latency: case low_latency:
if (bytes_perint > adapter->eitr_high) if (bytes_perint > adapter->eitr_high)
retval = bulk_latency; itr_setting = bulk_latency;
else if (bytes_perint <= adapter->eitr_low) else if (bytes_perint <= adapter->eitr_low)
retval = lowest_latency; itr_setting = lowest_latency;
break; break;
case bulk_latency: case bulk_latency:
if (bytes_perint <= adapter->eitr_high) if (bytes_perint <= adapter->eitr_high)
retval = low_latency; itr_setting = low_latency;
break; break;
} }
update_itr_done: /* clear work counters since we have the values we need */
return retval; ring_container->total_bytes = 0;
ring_container->total_packets = 0;
/* write updated itr to ring container */
ring_container->itr = itr_setting;
} }
/** /**
...@@ -1698,44 +1717,15 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) ...@@ -1698,44 +1717,15 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg); IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
} }
static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)
{ {
struct ixgbe_adapter *adapter = q_vector->adapter; u32 new_itr = q_vector->eitr;
int i, r_idx; u8 current_itr;
u32 new_itr;
u8 current_itr, ret_itr;
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
struct ixgbe_ring *tx_ring = adapter->tx_ring[r_idx];
ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
q_vector->tx_itr,
tx_ring->total_packets,
tx_ring->total_bytes);
/* if the result for this queue would decrease interrupt
* rate for this vector then use that result */
q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
q_vector->tx_itr - 1 : ret_itr);
r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
r_idx + 1);
}
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); ixgbe_update_itr(q_vector, &q_vector->tx);
for (i = 0; i < q_vector->rxr_count; i++) { ixgbe_update_itr(q_vector, &q_vector->rx);
struct ixgbe_ring *rx_ring = adapter->rx_ring[r_idx];
ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
q_vector->rx_itr,
rx_ring->total_packets,
rx_ring->total_bytes);
/* if the result for this queue would decrease interrupt
* rate for this vector then use that result */
q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
q_vector->rx_itr - 1 : ret_itr);
r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
r_idx + 1);
}
current_itr = max(q_vector->rx_itr, q_vector->tx_itr); current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
switch (current_itr) { switch (current_itr) {
/* counts and packets in update_itr are dependent on these numbers */ /* counts and packets in update_itr are dependent on these numbers */
...@@ -1746,16 +1736,17 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) ...@@ -1746,16 +1736,17 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
new_itr = 20000; /* aka hwitr = ~200 */ new_itr = 20000; /* aka hwitr = ~200 */
break; break;
case bulk_latency: case bulk_latency:
default:
new_itr = 8000; new_itr = 8000;
break; break;
default:
break;
} }
if (new_itr != q_vector->eitr) { if (new_itr != q_vector->eitr) {
/* do an exponential smoothing */ /* do an exponential smoothing */
new_itr = ((q_vector->eitr * 9) + new_itr)/10; new_itr = ((q_vector->eitr * 9) + new_itr)/10;
/* save the algorithm value here, not the smoothed one */ /* save the algorithm value here */
q_vector->eitr = new_itr; q_vector->eitr = new_itr;
ixgbe_write_eitr(q_vector); ixgbe_write_eitr(q_vector);
...@@ -1995,15 +1986,13 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data) ...@@ -1995,15 +1986,13 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
struct ixgbe_ring *tx_ring; struct ixgbe_ring *tx_ring;
int i, r_idx; int i, r_idx;
if (!q_vector->txr_count) if (!q_vector->tx.count)
return IRQ_HANDLED; return IRQ_HANDLED;
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) { for (i = 0; i < q_vector->tx.count; i++) {
tx_ring = adapter->tx_ring[r_idx]; tx_ring = adapter->tx_ring[r_idx];
tx_ring->total_bytes = 0; r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues,
tx_ring->total_packets = 0;
r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
r_idx + 1); r_idx + 1);
} }
...@@ -2031,16 +2020,14 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data) ...@@ -2031,16 +2020,14 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
ixgbe_update_dca(q_vector); ixgbe_update_dca(q_vector);
#endif #endif
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) { for (i = 0; i < q_vector->rx.count; i++) {
rx_ring = adapter->rx_ring[r_idx]; rx_ring = adapter->rx_ring[r_idx];
rx_ring->total_bytes = 0; r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues,
rx_ring->total_packets = 0;
r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
r_idx + 1); r_idx + 1);
} }
if (!q_vector->rxr_count) if (!q_vector->rx.count)
return IRQ_HANDLED; return IRQ_HANDLED;
/* EIAM disabled interrupts (on this vector) for us */ /* EIAM disabled interrupts (on this vector) for us */
...@@ -2057,24 +2044,20 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data) ...@@ -2057,24 +2044,20 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
int r_idx; int r_idx;
int i; int i;
if (!q_vector->txr_count && !q_vector->rxr_count) if (!q_vector->tx.count && !q_vector->rx.count)
return IRQ_HANDLED; return IRQ_HANDLED;
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) { for (i = 0; i < q_vector->tx.count; i++) {
ring = adapter->tx_ring[r_idx]; ring = adapter->tx_ring[r_idx];
ring->total_bytes = 0; r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues,
ring->total_packets = 0;
r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
r_idx + 1); r_idx + 1);
} }
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) { for (i = 0; i < q_vector->rx.count; i++) {
ring = adapter->rx_ring[r_idx]; ring = adapter->rx_ring[r_idx];
ring->total_bytes = 0; r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues,
ring->total_packets = 0;
r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
r_idx + 1); r_idx + 1);
} }
...@@ -2106,7 +2089,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) ...@@ -2106,7 +2089,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
ixgbe_update_dca(q_vector); ixgbe_update_dca(q_vector);
#endif #endif
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
rx_ring = adapter->rx_ring[r_idx]; rx_ring = adapter->rx_ring[r_idx];
ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget); ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
...@@ -2115,7 +2098,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) ...@@ -2115,7 +2098,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
if (work_done < budget) { if (work_done < budget) {
napi_complete(napi); napi_complete(napi);
if (adapter->rx_itr_setting & 1) if (adapter->rx_itr_setting & 1)
ixgbe_set_itr_msix(q_vector); ixgbe_set_itr(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state)) if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_enable_queues(adapter, ixgbe_irq_enable_queues(adapter,
((u64)1 << q_vector->v_idx)); ((u64)1 << q_vector->v_idx));
...@@ -2147,33 +2130,33 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget) ...@@ -2147,33 +2130,33 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
ixgbe_update_dca(q_vector); ixgbe_update_dca(q_vector);
#endif #endif
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) { for (i = 0; i < q_vector->tx.count; i++) {
ring = adapter->tx_ring[r_idx]; ring = adapter->tx_ring[r_idx];
tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring); tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues,
r_idx + 1); r_idx + 1);
} }
/* attempt to distribute budget to each queue fairly, but don't allow /* attempt to distribute budget to each queue fairly, but don't allow
* the budget to go below 1 because we'll exit polling */ * the budget to go below 1 because we'll exit polling */
budget /= (q_vector->rxr_count ?: 1); budget /= (q_vector->rx.count ?: 1);
budget = max(budget, 1); budget = max(budget, 1);
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) { for (i = 0; i < q_vector->rx.count; i++) {
ring = adapter->rx_ring[r_idx]; ring = adapter->rx_ring[r_idx];
ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget); ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues,
r_idx + 1); r_idx + 1);
} }
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
ring = adapter->rx_ring[r_idx]; ring = adapter->rx_ring[r_idx];
/* If all Rx work done, exit the polling mode */ /* If all Rx work done, exit the polling mode */
if (work_done < budget) { if (work_done < budget) {
napi_complete(napi); napi_complete(napi);
if (adapter->rx_itr_setting & 1) if (adapter->rx_itr_setting & 1)
ixgbe_set_itr_msix(q_vector); ixgbe_set_itr(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state)) if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_enable_queues(adapter, ixgbe_irq_enable_queues(adapter,
((u64)1 << q_vector->v_idx)); ((u64)1 << q_vector->v_idx));
...@@ -2205,7 +2188,7 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget) ...@@ -2205,7 +2188,7 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
ixgbe_update_dca(q_vector); ixgbe_update_dca(q_vector);
#endif #endif
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues);
tx_ring = adapter->tx_ring[r_idx]; tx_ring = adapter->tx_ring[r_idx];
if (!ixgbe_clean_tx_irq(q_vector, tx_ring)) if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
...@@ -2215,7 +2198,7 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget) ...@@ -2215,7 +2198,7 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
if (work_done < budget) { if (work_done < budget) {
napi_complete(napi); napi_complete(napi);
if (adapter->tx_itr_setting & 1) if (adapter->tx_itr_setting & 1)
ixgbe_set_itr_msix(q_vector); ixgbe_set_itr(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state)) if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_enable_queues(adapter, ixgbe_irq_enable_queues(adapter,
((u64)1 << q_vector->v_idx)); ((u64)1 << q_vector->v_idx));
...@@ -2230,8 +2213,8 @@ static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, ...@@ -2230,8 +2213,8 @@ static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
struct ixgbe_ring *rx_ring = a->rx_ring[r_idx]; struct ixgbe_ring *rx_ring = a->rx_ring[r_idx];
set_bit(r_idx, q_vector->rxr_idx); set_bit(r_idx, q_vector->rx.idx);
q_vector->rxr_count++; q_vector->rx.count++;
rx_ring->q_vector = q_vector; rx_ring->q_vector = q_vector;
} }
...@@ -2241,9 +2224,10 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, ...@@ -2241,9 +2224,10 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
struct ixgbe_ring *tx_ring = a->tx_ring[t_idx]; struct ixgbe_ring *tx_ring = a->tx_ring[t_idx];
set_bit(t_idx, q_vector->txr_idx); set_bit(t_idx, q_vector->tx.idx);
q_vector->txr_count++; q_vector->tx.count++;
tx_ring->q_vector = q_vector; tx_ring->q_vector = q_vector;
q_vector->tx.work_limit = a->tx_work_limit;
} }
/** /**
...@@ -2332,10 +2316,10 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) ...@@ -2332,10 +2316,10 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
if (err) if (err)
return err; return err;
#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \ #define SET_HANDLER(_v) (((_v)->rx.count && (_v)->tx.count) \
? &ixgbe_msix_clean_many : \ ? &ixgbe_msix_clean_many : \
(_v)->rxr_count ? &ixgbe_msix_clean_rx : \ (_v)->rx.count ? &ixgbe_msix_clean_rx : \
(_v)->txr_count ? &ixgbe_msix_clean_tx : \ (_v)->tx.count ? &ixgbe_msix_clean_tx : \
NULL) NULL)
for (vector = 0; vector < q_vectors; vector++) { for (vector = 0; vector < q_vectors; vector++) {
struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
...@@ -2386,51 +2370,6 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) ...@@ -2386,51 +2370,6 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
return err; return err;
} }
static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
{
struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
u32 new_itr = q_vector->eitr;
u8 current_itr;
q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
q_vector->tx_itr,
tx_ring->total_packets,
tx_ring->total_bytes);
q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr,
q_vector->rx_itr,
rx_ring->total_packets,
rx_ring->total_bytes);
current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
switch (current_itr) {
/* counts and packets in update_itr are dependent on these numbers */
case lowest_latency:
new_itr = 100000;
break;
case low_latency:
new_itr = 20000; /* aka hwitr = ~200 */
break;
case bulk_latency:
new_itr = 8000;
break;
default:
break;
}
if (new_itr != q_vector->eitr) {
/* do an exponential smoothing */
new_itr = ((q_vector->eitr * 9) + new_itr)/10;
/* save the algorithm value here */
q_vector->eitr = new_itr;
ixgbe_write_eitr(q_vector);
}
}
/** /**
* ixgbe_irq_enable - Enable default interrupt generation settings * ixgbe_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure * @adapter: board private structure
...@@ -2528,10 +2467,6 @@ static irqreturn_t ixgbe_intr(int irq, void *data) ...@@ -2528,10 +2467,6 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
ixgbe_check_fan_failure(adapter, eicr); ixgbe_check_fan_failure(adapter, eicr);
if (napi_schedule_prep(&(q_vector->napi))) { if (napi_schedule_prep(&(q_vector->napi))) {
adapter->tx_ring[0]->total_packets = 0;
adapter->tx_ring[0]->total_bytes = 0;
adapter->rx_ring[0]->total_packets = 0;
adapter->rx_ring[0]->total_bytes = 0;
/* would disable interrupts here but EIAM disabled it */ /* would disable interrupts here but EIAM disabled it */
__napi_schedule(&(q_vector->napi)); __napi_schedule(&(q_vector->napi));
} }
...@@ -2553,10 +2488,10 @@ static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter) ...@@ -2553,10 +2488,10 @@ static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
for (i = 0; i < q_vectors; i++) { for (i = 0; i < q_vectors; i++) {
struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES); bitmap_zero(q_vector->rx.idx, MAX_RX_QUEUES);
bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES); bitmap_zero(q_vector->tx.idx, MAX_TX_QUEUES);
q_vector->rxr_count = 0; q_vector->rx.count = 0;
q_vector->txr_count = 0; q_vector->tx.count = 0;
} }
} }
...@@ -2601,8 +2536,8 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter) ...@@ -2601,8 +2536,8 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
i--; i--;
for (; i >= 0; i--) { for (; i >= 0; i--) {
/* free only the irqs that were actually requested */ /* free only the irqs that were actually requested */
if (!adapter->q_vector[i]->rxr_count && if (!adapter->q_vector[i]->rx.count &&
!adapter->q_vector[i]->txr_count) !adapter->q_vector[i]->tx.count)
continue; continue;
free_irq(adapter->msix_entries[i].vector, free_irq(adapter->msix_entries[i].vector,
...@@ -2926,29 +2861,12 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) ...@@ -2926,29 +2861,12 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
} }
/**
* ixgbe_clear_rscctl - disable RSC for the indicated ring
* @adapter: address of board private structure
* @ring: structure containing ring specific data
**/
void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 rscctrl;
u8 reg_idx = ring->reg_idx;
rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
rscctrl &= ~IXGBE_RSCCTL_RSCEN;
IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
}
/** /**
* ixgbe_configure_rscctl - enable RSC for the indicated ring * ixgbe_configure_rscctl - enable RSC for the indicated ring
* @adapter: address of board private structure * @adapter: address of board private structure
* @index: index of ring to set * @index: index of ring to set
**/ **/
void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring) struct ixgbe_ring *ring)
{ {
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
...@@ -3616,10 +3534,10 @@ static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) ...@@ -3616,10 +3534,10 @@ static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
q_vector = adapter->q_vector[q_idx]; q_vector = adapter->q_vector[q_idx];
napi = &q_vector->napi; napi = &q_vector->napi;
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
if (!q_vector->rxr_count || !q_vector->txr_count) { if (!q_vector->rx.count || !q_vector->tx.count) {
if (q_vector->txr_count == 1) if (q_vector->tx.count == 1)
napi->poll = &ixgbe_clean_txonly; napi->poll = &ixgbe_clean_txonly;
else if (q_vector->rxr_count == 1) else if (q_vector->rx.count == 1)
napi->poll = &ixgbe_clean_rxonly; napi->poll = &ixgbe_clean_rxonly;
} }
} }
...@@ -4299,7 +4217,7 @@ static int ixgbe_poll(struct napi_struct *napi, int budget) ...@@ -4299,7 +4217,7 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
if (work_done < budget) { if (work_done < budget) {
napi_complete(napi); napi_complete(napi);
if (adapter->rx_itr_setting & 1) if (adapter->rx_itr_setting & 1)
ixgbe_set_itr(adapter); ixgbe_set_itr(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state)) if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE); ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE);
} }
...@@ -4965,7 +4883,7 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) ...@@ -4965,7 +4883,7 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
if (!q_vector) if (!q_vector)
goto err_out; goto err_out;
q_vector->adapter = adapter; q_vector->adapter = adapter;
if (q_vector->txr_count && !q_vector->rxr_count) if (q_vector->tx.count && !q_vector->rx.count)
q_vector->eitr = adapter->tx_eitr_param; q_vector->eitr = adapter->tx_eitr_param;
else else
q_vector->eitr = adapter->rx_eitr_param; q_vector->eitr = adapter->rx_eitr_param;
...@@ -5224,6 +5142,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) ...@@ -5224,6 +5142,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->tx_ring_count = IXGBE_DEFAULT_TXD; adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
adapter->rx_ring_count = IXGBE_DEFAULT_RXD; adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
/* set default work limits */
adapter->tx_work_limit = adapter->tx_ring_count;
/* initialize eeprom parameters */ /* initialize eeprom parameters */
if (ixgbe_init_eeprom_params_generic(hw)) { if (ixgbe_init_eeprom_params_generic(hw)) {
e_dev_err("EEPROM initialization failed\n"); e_dev_err("EEPROM initialization failed\n");
...@@ -5270,7 +5191,6 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) ...@@ -5270,7 +5191,6 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
tx_ring->next_to_use = 0; tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0; tx_ring->next_to_clean = 0;
tx_ring->work_limit = tx_ring->count;
return 0; return 0;
err: err:
...@@ -5979,7 +5899,7 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter) ...@@ -5979,7 +5899,7 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
/* get one bit for every active tx/rx interrupt vector */ /* get one bit for every active tx/rx interrupt vector */
for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
struct ixgbe_q_vector *qv = adapter->q_vector[i]; struct ixgbe_q_vector *qv = adapter->q_vector[i];
if (qv->rxr_count || qv->txr_count) if (qv->rx.count || qv->tx.count)
eics |= ((u64)1 << i); eics |= ((u64)1 << i);
} }
} }
...@@ -6084,9 +6004,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) ...@@ -6084,9 +6004,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
(flow_tx ? "TX" : "None")))); (flow_tx ? "TX" : "None"))));
netif_carrier_on(netdev); netif_carrier_on(netdev);
#ifdef HAVE_IPLINK_VF_CONFIG
ixgbe_check_vf_rate_limit(adapter); ixgbe_check_vf_rate_limit(adapter);
#endif /* HAVE_IPLINK_VF_CONFIG */
} }
/** /**
...@@ -6785,7 +6703,7 @@ static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) ...@@ -6785,7 +6703,7 @@ static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
return 0; return 0;
} }
static int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
{ {
if (likely(ixgbe_desc_unused(tx_ring) >= size)) if (likely(ixgbe_desc_unused(tx_ring) >= size))
return 0; return 0;
...@@ -6795,11 +6713,10 @@ static int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) ...@@ -6795,11 +6713,10 @@ static int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
{ {
struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_adapter *adapter = netdev_priv(dev);
int txq = smp_processor_id(); int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
smp_processor_id();
#ifdef IXGBE_FCOE #ifdef IXGBE_FCOE
__be16 protocol; __be16 protocol = vlan_get_protocol(skb);
protocol = vlan_get_protocol(skb);
if (((protocol == htons(ETH_P_FCOE)) || if (((protocol == htons(ETH_P_FCOE)) ||
(protocol == htons(ETH_P_FIP))) && (protocol == htons(ETH_P_FIP))) &&
...@@ -7188,6 +7105,98 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) ...@@ -7188,6 +7105,98 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
return 0; return 0;
} }
void ixgbe_do_reset(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
if (netif_running(netdev))
ixgbe_reinit_locked(adapter);
else
ixgbe_reset(adapter);
}
static u32 ixgbe_fix_features(struct net_device *netdev, u32 data)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
#ifdef CONFIG_DCB
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
data &= ~NETIF_F_HW_VLAN_RX;
#endif
/* return error if RXHASH is being enabled when RSS is not supported */
if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
data &= ~NETIF_F_RXHASH;
/* If Rx checksum is disabled, then RSC/LRO should also be disabled */
if (!(data & NETIF_F_RXCSUM))
data &= ~NETIF_F_LRO;
/* Turn off LRO if not RSC capable or invalid ITR settings */
if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) {
data &= ~NETIF_F_LRO;
} else if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
(adapter->rx_itr_setting != 1 &&
adapter->rx_itr_setting > IXGBE_MAX_RSC_INT_RATE)) {
data &= ~NETIF_F_LRO;
e_info(probe, "rx-usecs set too low, not enabling RSC\n");
}
return data;
}
static int ixgbe_set_features(struct net_device *netdev, u32 data)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
bool need_reset = false;
/* If Rx checksum is disabled, then RSC/LRO should also be disabled */
if (!(data & NETIF_F_RXCSUM))
adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
else
adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
/* Make sure RSC matches LRO, reset if change */
if (!!(data & NETIF_F_LRO) !=
!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
switch (adapter->hw.mac.type) {
case ixgbe_mac_X540:
case ixgbe_mac_82599EB:
need_reset = true;
break;
default:
break;
}
}
/*
* Check if Flow Director n-tuple support was enabled or disabled. If
* the state changed, we need to reset.
*/
if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
/* turn off ATR, enable perfect filters and reset */
if (data & NETIF_F_NTUPLE) {
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
need_reset = true;
}
} else if (!(data & NETIF_F_NTUPLE)) {
/* turn off Flow Director, set ATR and reset */
adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
need_reset = true;
}
if (need_reset)
ixgbe_do_reset(netdev);
return 0;
}
static const struct net_device_ops ixgbe_netdev_ops = { static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_open = ixgbe_open, .ndo_open = ixgbe_open,
.ndo_stop = ixgbe_close, .ndo_stop = ixgbe_close,
...@@ -7219,6 +7228,8 @@ static const struct net_device_ops ixgbe_netdev_ops = { ...@@ -7219,6 +7228,8 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_fcoe_disable = ixgbe_fcoe_disable, .ndo_fcoe_disable = ixgbe_fcoe_disable,
.ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn, .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
#endif /* IXGBE_FCOE */ #endif /* IXGBE_FCOE */
.ndo_set_features = ixgbe_set_features,
.ndo_fix_features = ixgbe_fix_features,
}; };
static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter, static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
...@@ -7486,20 +7497,24 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, ...@@ -7486,20 +7497,24 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
netdev->features = NETIF_F_SG | netdev->features = NETIF_F_SG |
NETIF_F_IP_CSUM | NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM |
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_TX |
NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_RX |
NETIF_F_HW_VLAN_FILTER; NETIF_F_HW_VLAN_FILTER |
NETIF_F_TSO |
NETIF_F_TSO6 |
NETIF_F_GRO |
NETIF_F_RXHASH |
NETIF_F_RXCSUM;
netdev->features |= NETIF_F_IPV6_CSUM; netdev->hw_features = netdev->features;
netdev->features |= NETIF_F_TSO;
netdev->features |= NETIF_F_TSO6;
netdev->features |= NETIF_F_GRO;
netdev->features |= NETIF_F_RXHASH;
switch (adapter->hw.mac.type) { switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB: case ixgbe_mac_82599EB:
case ixgbe_mac_X540: case ixgbe_mac_X540:
netdev->features |= NETIF_F_SCTP_CSUM; netdev->features |= NETIF_F_SCTP_CSUM;
netdev->hw_features |= NETIF_F_SCTP_CSUM |
NETIF_F_NTUPLE;
break; break;
default: default:
break; break;
...@@ -7538,6 +7553,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, ...@@ -7538,6 +7553,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
netdev->vlan_features |= NETIF_F_HIGHDMA; netdev->vlan_features |= NETIF_F_HIGHDMA;
} }
if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
netdev->hw_features |= NETIF_F_LRO;
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
netdev->features |= NETIF_F_LRO; netdev->features |= NETIF_F_LRO;
...@@ -7574,25 +7591,24 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, ...@@ -7574,25 +7591,24 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
if (err) if (err)
goto err_sw_init; goto err_sw_init;
if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) {
netdev->hw_features &= ~NETIF_F_RXHASH;
netdev->features &= ~NETIF_F_RXHASH; netdev->features &= ~NETIF_F_RXHASH;
}
switch (pdev->device) { switch (pdev->device) {
case IXGBE_DEV_ID_82599_SFP: case IXGBE_DEV_ID_82599_SFP:
/* Only this subdevice supports WOL */ /* Only this subdevice supports WOL */
if (pdev->subsystem_device == IXGBE_SUBDEV_ID_82599_SFP) if (pdev->subsystem_device == IXGBE_SUBDEV_ID_82599_SFP)
adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX | adapter->wol = IXGBE_WUFC_MAG;
IXGBE_WUFC_MC | IXGBE_WUFC_BC);
break; break;
case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
/* All except this subdevice support WOL */ /* All except this subdevice support WOL */
if (pdev->subsystem_device != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) if (pdev->subsystem_device != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX | adapter->wol = IXGBE_WUFC_MAG;
IXGBE_WUFC_MC | IXGBE_WUFC_BC);
break; break;
case IXGBE_DEV_ID_82599_KX4: case IXGBE_DEV_ID_82599_KX4:
adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX | adapter->wol = IXGBE_WUFC_MAG;
IXGBE_WUFC_MC | IXGBE_WUFC_BC);
break; break;
default: default:
adapter->wol = 0; adapter->wol = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment