Commit 6506f52d authored by Robert Beckett's avatar Robert Beckett Committed by Jeff Kirsher

igb: dont drop packets if rx flow control is enabled

If Rx flow control has been enabled (via autoneg or forced), packets
should not be dropped due to Rx descriptor ring exhaustion. Instead
pause frames should be used to apply back pressure. This only applies
if VFs are not in use.

Move SRRCTL setup to its own function for easy reuse and only set drop
enable bit if Rx flow control is not enabled.

Since v1: always enable dropping of packets if VFs in use.
Signed-off-by: default avatarRobert Beckett <bob.beckett@collabora.com>
Tested-by: default avatarAaron Brown <aaron.f.brown@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 9e860947
...@@ -661,6 +661,7 @@ void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *); ...@@ -661,6 +661,7 @@ void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *); void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
void igb_setup_tctl(struct igb_adapter *); void igb_setup_tctl(struct igb_adapter *);
void igb_setup_rctl(struct igb_adapter *); void igb_setup_rctl(struct igb_adapter *);
void igb_setup_srrctl(struct igb_adapter *, struct igb_ring *);
netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *); netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
void igb_alloc_rx_buffers(struct igb_ring *, u16); void igb_alloc_rx_buffers(struct igb_ring *, u16);
void igb_update_stats(struct igb_adapter *); void igb_update_stats(struct igb_adapter *);
......
...@@ -396,6 +396,7 @@ static int igb_set_pauseparam(struct net_device *netdev, ...@@ -396,6 +396,7 @@ static int igb_set_pauseparam(struct net_device *netdev,
struct igb_adapter *adapter = netdev_priv(netdev); struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
int retval = 0; int retval = 0;
int i;
/* 100basefx does not support setting link flow control */ /* 100basefx does not support setting link flow control */
if (hw->dev_spec._82575.eth_flags.e100_base_fx) if (hw->dev_spec._82575.eth_flags.e100_base_fx)
...@@ -428,6 +429,13 @@ static int igb_set_pauseparam(struct net_device *netdev, ...@@ -428,6 +429,13 @@ static int igb_set_pauseparam(struct net_device *netdev,
retval = ((hw->phy.media_type == e1000_media_type_copper) ? retval = ((hw->phy.media_type == e1000_media_type_copper) ?
igb_force_mac_fc(hw) : igb_setup_link(hw)); igb_force_mac_fc(hw) : igb_setup_link(hw));
/* Make sure SRRCTL considers new fc settings for each ring */
for (i = 0; i < adapter->num_rx_queues; i++) {
struct igb_ring *ring = adapter->rx_ring[i];
igb_setup_srrctl(adapter, ring);
}
} }
clear_bit(__IGB_RESETTING, &adapter->state); clear_bit(__IGB_RESETTING, &adapter->state);
......
...@@ -4467,6 +4467,37 @@ static inline void igb_set_vmolr(struct igb_adapter *adapter, ...@@ -4467,6 +4467,37 @@ static inline void igb_set_vmolr(struct igb_adapter *adapter,
wr32(E1000_VMOLR(vfn), vmolr); wr32(E1000_VMOLR(vfn), vmolr);
} }
/**
* igb_setup_srrctl - configure the split and replication receive control
* registers
* @adapter: Board private structure
* @ring: receive ring to be configured
**/
void igb_setup_srrctl(struct igb_adapter *adapter, struct igb_ring *ring)
{
struct e1000_hw *hw = &adapter->hw;
int reg_idx = ring->reg_idx;
u32 srrctl = 0;
srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
if (ring_uses_large_buffer(ring))
srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
else
srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
if (hw->mac.type >= e1000_82580)
srrctl |= E1000_SRRCTL_TIMESTAMP;
/* Only set Drop Enable if VFs allocated, or we are supporting multiple
* queues and rx flow control is disabled
*/
if (adapter->vfs_allocated_count ||
(!(hw->fc.current_mode & e1000_fc_rx_pause) &&
adapter->num_rx_queues > 1))
srrctl |= E1000_SRRCTL_DROP_EN;
wr32(E1000_SRRCTL(reg_idx), srrctl);
}
/** /**
* igb_configure_rx_ring - Configure a receive ring after Reset * igb_configure_rx_ring - Configure a receive ring after Reset
* @adapter: board private structure * @adapter: board private structure
...@@ -4481,7 +4512,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, ...@@ -4481,7 +4512,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
union e1000_adv_rx_desc *rx_desc; union e1000_adv_rx_desc *rx_desc;
u64 rdba = ring->dma; u64 rdba = ring->dma;
int reg_idx = ring->reg_idx; int reg_idx = ring->reg_idx;
u32 srrctl = 0, rxdctl = 0; u32 rxdctl = 0;
/* disable the queue */ /* disable the queue */
wr32(E1000_RXDCTL(reg_idx), 0); wr32(E1000_RXDCTL(reg_idx), 0);
...@@ -4499,19 +4530,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, ...@@ -4499,19 +4530,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
writel(0, ring->tail); writel(0, ring->tail);
/* set descriptor configuration */ /* set descriptor configuration */
srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; igb_setup_srrctl(adapter, ring);
if (ring_uses_large_buffer(ring))
srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
else
srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
if (hw->mac.type >= e1000_82580)
srrctl |= E1000_SRRCTL_TIMESTAMP;
/* Only set Drop Enable if we are supporting multiple queues */
if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
srrctl |= E1000_SRRCTL_DROP_EN;
wr32(E1000_SRRCTL(reg_idx), srrctl);
/* set filtering for VMDQ pools */ /* set filtering for VMDQ pools */
igb_set_vmolr(adapter, reg_idx & 0x7, true); igb_set_vmolr(adapter, reg_idx & 0x7, true);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment