Commit b9974d76 authored by David S. Miller's avatar David S. Miller

Merge branch '1GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
1GbE Intel Wired LAN Driver Updates 2017-03-17

This series contains updates to mainly igb, with one fix for ixgbe.

Alex does all the changes in the series, starting with adding support
for DMA_ATTR_WEAK_ORDERING to improve performance on some platforms.
Modified igb to use the length of the packet instead of the DD status
bit to determine if a new descriptor is ready to be processed.  Modified
the driver to only go through the region in the receive ring that was
designated to be cleaned up, instead of going through the entire ring
on cleanup.  Cleaned up the transmit side, by clearing the transmit
buffer_info only when resetting the rings.  Added a new upper limit for
receive, which is based on the size of a 2K buffer minus padding, which
will allow us to support build_skb going forward.  Fixed ethtool testing
to only sync on the size of the frame that is being tested, instead of
the entire receive buffer.  Updated the handling of page addresses to
always use a void pointer with the consistent name of "va" to indicate
that we are working with a virtual address.  Added a "chicken bit" so
that we can turn off the new receive allocation feature, in the case
where we need to fallback to the legacy receive path.  Added support for
using 3K buffers in order 1 pages the same way we were using 2K buffers
in 4K pages.  Added support for padding packet, since we limit the size
of the frame, we are able to write to an offset within the buffer instead
of having to write at the very start of the buffer.  This allows us to
leaving padding room for things like supporting XDP in the future.
Refactored the receive buffer page management, since there are 2-3 paths
that can be taken depending on what receive modes are enabled, so to
improve maintainability, break out the common bits into their own
functions.  Add support for build_skb, again.  Lastly, fixed a typo in
igb and ixgbe code comments.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents fe723dff 3a1eb6d1
...@@ -142,12 +142,24 @@ struct vf_data_storage { ...@@ -142,12 +142,24 @@ struct vf_data_storage {
/* Supported Rx Buffer Sizes */ /* Supported Rx Buffer Sizes */
#define IGB_RXBUFFER_256 256 #define IGB_RXBUFFER_256 256
#define IGB_RXBUFFER_2048 2048 #define IGB_RXBUFFER_2048 2048
#define IGB_RXBUFFER_3072 3072
#define IGB_RX_HDR_LEN IGB_RXBUFFER_256 #define IGB_RX_HDR_LEN IGB_RXBUFFER_256
#define IGB_RX_BUFSZ IGB_RXBUFFER_2048 #define IGB_TS_HDR_LEN 16
#define IGB_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
#if (PAGE_SIZE < 8192)
#define IGB_MAX_FRAME_BUILD_SKB \
(SKB_WITH_OVERHEAD(IGB_RXBUFFER_2048) - IGB_SKB_PAD - IGB_TS_HDR_LEN)
#else
#define IGB_MAX_FRAME_BUILD_SKB (IGB_RXBUFFER_2048 - IGB_TS_HDR_LEN)
#endif
/* How many Rx Buffers do we bundle into one write to the hardware ? */ /* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */ #define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */
#define IGB_RX_DMA_ATTR \
(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
#define AUTO_ALL_MODES 0 #define AUTO_ALL_MODES 0
#define IGB_EEPROM_APME 0x0400 #define IGB_EEPROM_APME 0x0400
...@@ -301,12 +313,51 @@ struct igb_q_vector { ...@@ -301,12 +313,51 @@ struct igb_q_vector {
}; };
enum e1000_ring_flags_t { enum e1000_ring_flags_t {
IGB_RING_FLAG_RX_3K_BUFFER,
IGB_RING_FLAG_RX_BUILD_SKB_ENABLED,
IGB_RING_FLAG_RX_SCTP_CSUM, IGB_RING_FLAG_RX_SCTP_CSUM,
IGB_RING_FLAG_RX_LB_VLAN_BSWAP, IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
IGB_RING_FLAG_TX_CTX_IDX, IGB_RING_FLAG_TX_CTX_IDX,
IGB_RING_FLAG_TX_DETECT_HANG IGB_RING_FLAG_TX_DETECT_HANG
}; };
#define ring_uses_large_buffer(ring) \
test_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
#define set_ring_uses_large_buffer(ring) \
set_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
#define clear_ring_uses_large_buffer(ring) \
clear_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
#define ring_uses_build_skb(ring) \
test_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
#define set_ring_build_skb_enabled(ring) \
set_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
#define clear_ring_build_skb_enabled(ring) \
clear_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
static inline unsigned int igb_rx_bufsz(struct igb_ring *ring)
{
#if (PAGE_SIZE < 8192)
if (ring_uses_large_buffer(ring))
return IGB_RXBUFFER_3072;
if (ring_uses_build_skb(ring))
return IGB_MAX_FRAME_BUILD_SKB + IGB_TS_HDR_LEN;
#endif
return IGB_RXBUFFER_2048;
}
static inline unsigned int igb_rx_pg_order(struct igb_ring *ring)
{
#if (PAGE_SIZE < 8192)
if (ring_uses_large_buffer(ring))
return 1;
#endif
return 0;
}
#define igb_rx_pg_size(_ring) (PAGE_SIZE << igb_rx_pg_order(_ring))
#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS) #define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
#define IGB_RX_DESC(R, i) \ #define IGB_RX_DESC(R, i) \
...@@ -545,6 +596,7 @@ struct igb_adapter { ...@@ -545,6 +596,7 @@ struct igb_adapter {
#define IGB_FLAG_HAS_MSIX BIT(13) #define IGB_FLAG_HAS_MSIX BIT(13)
#define IGB_FLAG_EEE BIT(14) #define IGB_FLAG_EEE BIT(14)
#define IGB_FLAG_VLAN_PROMISC BIT(15) #define IGB_FLAG_VLAN_PROMISC BIT(15)
#define IGB_FLAG_RX_LEGACY BIT(16)
/* Media Auto Sense */ /* Media Auto Sense */
#define IGB_MAS_ENABLE_0 0X0001 #define IGB_MAS_ENABLE_0 0X0001
...@@ -558,7 +610,6 @@ struct igb_adapter { ...@@ -558,7 +610,6 @@ struct igb_adapter {
#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */ #define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */
#define IGB_82576_TSYNC_SHIFT 19 #define IGB_82576_TSYNC_SHIFT 19
#define IGB_TS_HDR_LEN 16
enum e1000_state_t { enum e1000_state_t {
__IGB_TESTING, __IGB_TESTING,
__IGB_RESETTING, __IGB_RESETTING,
...@@ -591,7 +642,6 @@ void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *); ...@@ -591,7 +642,6 @@ void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
void igb_setup_tctl(struct igb_adapter *); void igb_setup_tctl(struct igb_adapter *);
void igb_setup_rctl(struct igb_adapter *); void igb_setup_rctl(struct igb_adapter *);
netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *); netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
void igb_unmap_and_free_tx_resource(struct igb_ring *, struct igb_tx_buffer *);
void igb_alloc_rx_buffers(struct igb_ring *, u16); void igb_alloc_rx_buffers(struct igb_ring *, u16);
void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *); void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
bool igb_has_link(struct igb_adapter *adapter); bool igb_has_link(struct igb_adapter *adapter);
...@@ -604,7 +654,7 @@ void igb_ptp_reset(struct igb_adapter *adapter); ...@@ -604,7 +654,7 @@ void igb_ptp_reset(struct igb_adapter *adapter);
void igb_ptp_suspend(struct igb_adapter *adapter); void igb_ptp_suspend(struct igb_adapter *adapter);
void igb_ptp_rx_hang(struct igb_adapter *adapter); void igb_ptp_rx_hang(struct igb_adapter *adapter);
void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb); void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va, void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
struct sk_buff *skb); struct sk_buff *skb);
int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
......
...@@ -144,6 +144,13 @@ static const char igb_gstrings_test[][ETH_GSTRING_LEN] = { ...@@ -144,6 +144,13 @@ static const char igb_gstrings_test[][ETH_GSTRING_LEN] = {
}; };
#define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN) #define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN)
static const char igb_priv_flags_strings[][ETH_GSTRING_LEN] = {
#define IGB_PRIV_FLAGS_LEGACY_RX BIT(0)
"legacy-rx",
};
#define IGB_PRIV_FLAGS_STR_LEN ARRAY_SIZE(igb_priv_flags_strings)
static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
{ {
struct igb_adapter *adapter = netdev_priv(netdev); struct igb_adapter *adapter = netdev_priv(netdev);
...@@ -852,6 +859,8 @@ static void igb_get_drvinfo(struct net_device *netdev, ...@@ -852,6 +859,8 @@ static void igb_get_drvinfo(struct net_device *netdev,
sizeof(drvinfo->fw_version)); sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info)); sizeof(drvinfo->bus_info));
drvinfo->n_priv_flags = IGB_PRIV_FLAGS_STR_LEN;
} }
static void igb_get_ringparam(struct net_device *netdev, static void igb_get_ringparam(struct net_device *netdev,
...@@ -1811,14 +1820,14 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring, ...@@ -1811,14 +1820,14 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
tx_ntc = tx_ring->next_to_clean; tx_ntc = tx_ring->next_to_clean;
rx_desc = IGB_RX_DESC(rx_ring, rx_ntc); rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) { while (rx_desc->wb.upper.length) {
/* check Rx buffer */ /* check Rx buffer */
rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc]; rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
/* sync Rx buffer for CPU read */ /* sync Rx buffer for CPU read */
dma_sync_single_for_cpu(rx_ring->dev, dma_sync_single_for_cpu(rx_ring->dev,
rx_buffer_info->dma, rx_buffer_info->dma,
IGB_RX_BUFSZ, size,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
/* verify contents of skb */ /* verify contents of skb */
...@@ -1828,12 +1837,21 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring, ...@@ -1828,12 +1837,21 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
/* sync Rx buffer for device write */ /* sync Rx buffer for device write */
dma_sync_single_for_device(rx_ring->dev, dma_sync_single_for_device(rx_ring->dev,
rx_buffer_info->dma, rx_buffer_info->dma,
IGB_RX_BUFSZ, size,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
/* unmap buffer on Tx side */ /* unmap buffer on Tx side */
tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc]; tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
/* Free all the Tx ring sk_buffs */
dev_kfree_skb_any(tx_buffer_info->skb);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
dma_unmap_addr(tx_buffer_info, dma),
dma_unmap_len(tx_buffer_info, len),
DMA_TO_DEVICE);
dma_unmap_len_set(tx_buffer_info, len, 0);
/* increment Rx/Tx next to clean counters */ /* increment Rx/Tx next to clean counters */
rx_ntc++; rx_ntc++;
...@@ -2271,6 +2289,8 @@ static int igb_get_sset_count(struct net_device *netdev, int sset) ...@@ -2271,6 +2289,8 @@ static int igb_get_sset_count(struct net_device *netdev, int sset)
return IGB_STATS_LEN; return IGB_STATS_LEN;
case ETH_SS_TEST: case ETH_SS_TEST:
return IGB_TEST_LEN; return IGB_TEST_LEN;
case ETH_SS_PRIV_FLAGS:
return IGB_PRIV_FLAGS_STR_LEN;
default: default:
return -ENOTSUPP; return -ENOTSUPP;
} }
...@@ -2376,6 +2396,10 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) ...@@ -2376,6 +2396,10 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
} }
/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */ /* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
break; break;
case ETH_SS_PRIV_FLAGS:
memcpy(data, igb_priv_flags_strings,
IGB_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
break;
} }
} }
...@@ -3388,6 +3412,37 @@ static int igb_set_channels(struct net_device *netdev, ...@@ -3388,6 +3412,37 @@ static int igb_set_channels(struct net_device *netdev,
return 0; return 0;
} }
static u32 igb_get_priv_flags(struct net_device *netdev)
{
struct igb_adapter *adapter = netdev_priv(netdev);
u32 priv_flags = 0;
if (adapter->flags & IGB_FLAG_RX_LEGACY)
priv_flags |= IGB_PRIV_FLAGS_LEGACY_RX;
return priv_flags;
}
static int igb_set_priv_flags(struct net_device *netdev, u32 priv_flags)
{
struct igb_adapter *adapter = netdev_priv(netdev);
unsigned int flags = adapter->flags;
flags &= ~IGB_FLAG_RX_LEGACY;
if (priv_flags & IGB_PRIV_FLAGS_LEGACY_RX)
flags |= IGB_FLAG_RX_LEGACY;
if (flags != adapter->flags) {
adapter->flags = flags;
/* reset interface to repopulate queues */
if (netif_running(netdev))
igb_reinit_locked(adapter);
}
return 0;
}
static const struct ethtool_ops igb_ethtool_ops = { static const struct ethtool_ops igb_ethtool_ops = {
.get_settings = igb_get_settings, .get_settings = igb_get_settings,
.set_settings = igb_set_settings, .set_settings = igb_set_settings,
...@@ -3426,6 +3481,8 @@ static const struct ethtool_ops igb_ethtool_ops = { ...@@ -3426,6 +3481,8 @@ static const struct ethtool_ops igb_ethtool_ops = {
.set_rxfh = igb_set_rxfh, .set_rxfh = igb_set_rxfh,
.get_channels = igb_get_channels, .get_channels = igb_get_channels,
.set_channels = igb_set_channels, .set_channels = igb_set_channels,
.get_priv_flags = igb_get_priv_flags,
.set_priv_flags = igb_set_priv_flags,
.begin = igb_ethtool_begin, .begin = igb_ethtool_begin,
.complete = igb_ethtool_complete, .complete = igb_ethtool_complete,
}; };
......
...@@ -554,7 +554,7 @@ static void igb_dump(struct igb_adapter *adapter) ...@@ -554,7 +554,7 @@ static void igb_dump(struct igb_adapter *adapter)
16, 1, 16, 1,
page_address(buffer_info->page) + page_address(buffer_info->page) +
buffer_info->page_offset, buffer_info->page_offset,
IGB_RX_BUFSZ, true); igb_rx_bufsz(rx_ring), true);
} }
} }
} }
...@@ -3293,7 +3293,7 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring) ...@@ -3293,7 +3293,7 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
size = sizeof(struct igb_tx_buffer) * tx_ring->count; size = sizeof(struct igb_tx_buffer) * tx_ring->count;
tx_ring->tx_buffer_info = vzalloc(size); tx_ring->tx_buffer_info = vmalloc(size);
if (!tx_ring->tx_buffer_info) if (!tx_ring->tx_buffer_info)
goto err; goto err;
...@@ -3404,6 +3404,10 @@ void igb_configure_tx_ring(struct igb_adapter *adapter, ...@@ -3404,6 +3404,10 @@ void igb_configure_tx_ring(struct igb_adapter *adapter,
txdctl |= IGB_TX_HTHRESH << 8; txdctl |= IGB_TX_HTHRESH << 8;
txdctl |= IGB_TX_WTHRESH << 16; txdctl |= IGB_TX_WTHRESH << 16;
/* reinitialize tx_buffer_info */
memset(ring->tx_buffer_info, 0,
sizeof(struct igb_tx_buffer) * ring->count);
txdctl |= E1000_TXDCTL_QUEUE_ENABLE; txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
wr32(E1000_TXDCTL(reg_idx), txdctl); wr32(E1000_TXDCTL(reg_idx), txdctl);
} }
...@@ -3435,7 +3439,7 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring) ...@@ -3435,7 +3439,7 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
size = sizeof(struct igb_rx_buffer) * rx_ring->count; size = sizeof(struct igb_rx_buffer) * rx_ring->count;
rx_ring->rx_buffer_info = vzalloc(size); rx_ring->rx_buffer_info = vmalloc(size);
if (!rx_ring->rx_buffer_info) if (!rx_ring->rx_buffer_info)
goto err; goto err;
...@@ -3720,6 +3724,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, ...@@ -3720,6 +3724,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
struct igb_ring *ring) struct igb_ring *ring)
{ {
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
union e1000_adv_rx_desc *rx_desc;
u64 rdba = ring->dma; u64 rdba = ring->dma;
int reg_idx = ring->reg_idx; int reg_idx = ring->reg_idx;
u32 srrctl = 0, rxdctl = 0; u32 srrctl = 0, rxdctl = 0;
...@@ -3741,7 +3746,10 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, ...@@ -3741,7 +3746,10 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
/* set descriptor configuration */ /* set descriptor configuration */
srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT; if (ring_uses_large_buffer(ring))
srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
else
srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
if (hw->mac.type >= e1000_82580) if (hw->mac.type >= e1000_82580)
srrctl |= E1000_SRRCTL_TIMESTAMP; srrctl |= E1000_SRRCTL_TIMESTAMP;
...@@ -3758,11 +3766,39 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, ...@@ -3758,11 +3766,39 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
rxdctl |= IGB_RX_HTHRESH << 8; rxdctl |= IGB_RX_HTHRESH << 8;
rxdctl |= IGB_RX_WTHRESH << 16; rxdctl |= IGB_RX_WTHRESH << 16;
/* initialize rx_buffer_info */
memset(ring->rx_buffer_info, 0,
sizeof(struct igb_rx_buffer) * ring->count);
/* initialize Rx descriptor 0 */
rx_desc = IGB_RX_DESC(ring, 0);
rx_desc->wb.upper.length = 0;
/* enable receive descriptor fetching */ /* enable receive descriptor fetching */
rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
wr32(E1000_RXDCTL(reg_idx), rxdctl); wr32(E1000_RXDCTL(reg_idx), rxdctl);
} }
static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
struct igb_ring *rx_ring)
{
/* set build_skb and buffer size flags */
clear_ring_build_skb_enabled(rx_ring);
clear_ring_uses_large_buffer(rx_ring);
if (adapter->flags & IGB_FLAG_RX_LEGACY)
return;
set_ring_build_skb_enabled(rx_ring);
#if (PAGE_SIZE < 8192)
if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
return;
set_ring_uses_large_buffer(rx_ring);
#endif
}
/** /**
* igb_configure_rx - Configure receive Unit after Reset * igb_configure_rx - Configure receive Unit after Reset
* @adapter: board private structure * @adapter: board private structure
...@@ -3780,8 +3816,12 @@ static void igb_configure_rx(struct igb_adapter *adapter) ...@@ -3780,8 +3816,12 @@ static void igb_configure_rx(struct igb_adapter *adapter)
/* Setup the HW Rx Head and Tail Descriptor Pointers and /* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring * the Base and Length of the Rx Descriptor Ring
*/ */
for (i = 0; i < adapter->num_rx_queues; i++) for (i = 0; i < adapter->num_rx_queues; i++) {
igb_configure_rx_ring(adapter, adapter->rx_ring[i]); struct igb_ring *rx_ring = adapter->rx_ring[i];
igb_set_rx_buffer_len(adapter, rx_ring);
igb_configure_rx_ring(adapter, rx_ring);
}
} }
/** /**
...@@ -3822,55 +3862,63 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter) ...@@ -3822,55 +3862,63 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter)
igb_free_tx_resources(adapter->tx_ring[i]); igb_free_tx_resources(adapter->tx_ring[i]);
} }
void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
struct igb_tx_buffer *tx_buffer)
{
if (tx_buffer->skb) {
dev_kfree_skb_any(tx_buffer->skb);
if (dma_unmap_len(tx_buffer, len))
dma_unmap_single(ring->dev,
dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
} else if (dma_unmap_len(tx_buffer, len)) {
dma_unmap_page(ring->dev,
dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
}
tx_buffer->next_to_watch = NULL;
tx_buffer->skb = NULL;
dma_unmap_len_set(tx_buffer, len, 0);
/* buffer_info must be completely set up in the transmit path */
}
/** /**
* igb_clean_tx_ring - Free Tx Buffers * igb_clean_tx_ring - Free Tx Buffers
* @tx_ring: ring to be cleaned * @tx_ring: ring to be cleaned
**/ **/
static void igb_clean_tx_ring(struct igb_ring *tx_ring) static void igb_clean_tx_ring(struct igb_ring *tx_ring)
{ {
struct igb_tx_buffer *buffer_info; u16 i = tx_ring->next_to_clean;
unsigned long size; struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
u16 i;
while (i != tx_ring->next_to_use) {
union e1000_adv_tx_desc *eop_desc, *tx_desc;
if (!tx_ring->tx_buffer_info)
return;
/* Free all the Tx ring sk_buffs */ /* Free all the Tx ring sk_buffs */
dev_kfree_skb_any(tx_buffer->skb);
for (i = 0; i < tx_ring->count; i++) { /* unmap skb header data */
buffer_info = &tx_ring->tx_buffer_info[i]; dma_unmap_single(tx_ring->dev,
igb_unmap_and_free_tx_resource(tx_ring, buffer_info); dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
/* check for eop_desc to determine the end of the packet */
eop_desc = tx_buffer->next_to_watch;
tx_desc = IGB_TX_DESC(tx_ring, i);
/* unmap remaining buffers */
while (tx_desc != eop_desc) {
tx_buffer++;
tx_desc++;
i++;
if (unlikely(i == tx_ring->count)) {
i = 0;
tx_buffer = tx_ring->tx_buffer_info;
tx_desc = IGB_TX_DESC(tx_ring, 0);
} }
netdev_tx_reset_queue(txring_txq(tx_ring)); /* unmap any remaining paged data */
if (dma_unmap_len(tx_buffer, len))
dma_unmap_page(tx_ring->dev,
dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
}
size = sizeof(struct igb_tx_buffer) * tx_ring->count; /* move us one more past the eop_desc for start of next pkt */
memset(tx_ring->tx_buffer_info, 0, size); tx_buffer++;
i++;
if (unlikely(i == tx_ring->count)) {
i = 0;
tx_buffer = tx_ring->tx_buffer_info;
}
}
/* Zero out the descriptor ring */ /* reset BQL for queue */
memset(tx_ring->desc, 0, tx_ring->size); netdev_tx_reset_queue(txring_txq(tx_ring));
/* reset next_to_use and next_to_clean */
tx_ring->next_to_use = 0; tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0; tx_ring->next_to_clean = 0;
} }
...@@ -3932,50 +3980,39 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter) ...@@ -3932,50 +3980,39 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
**/ **/
static void igb_clean_rx_ring(struct igb_ring *rx_ring) static void igb_clean_rx_ring(struct igb_ring *rx_ring)
{ {
unsigned long size; u16 i = rx_ring->next_to_clean;
u16 i;
if (rx_ring->skb) if (rx_ring->skb)
dev_kfree_skb(rx_ring->skb); dev_kfree_skb(rx_ring->skb);
rx_ring->skb = NULL; rx_ring->skb = NULL;
if (!rx_ring->rx_buffer_info)
return;
/* Free all the Rx ring sk_buffs */ /* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) { while (i != rx_ring->next_to_alloc) {
struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
if (!buffer_info->page)
continue;
/* Invalidate cache lines that may have been written to by /* Invalidate cache lines that may have been written to by
* device so that we avoid corrupting memory. * device so that we avoid corrupting memory.
*/ */
dma_sync_single_range_for_cpu(rx_ring->dev, dma_sync_single_range_for_cpu(rx_ring->dev,
buffer_info->dma, buffer_info->dma,
buffer_info->page_offset, buffer_info->page_offset,
IGB_RX_BUFSZ, igb_rx_bufsz(rx_ring),
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
/* free resources associated with mapping */ /* free resources associated with mapping */
dma_unmap_page_attrs(rx_ring->dev, dma_unmap_page_attrs(rx_ring->dev,
buffer_info->dma, buffer_info->dma,
PAGE_SIZE, igb_rx_pg_size(rx_ring),
DMA_FROM_DEVICE, DMA_FROM_DEVICE,
DMA_ATTR_SKIP_CPU_SYNC); IGB_RX_DMA_ATTR);
__page_frag_cache_drain(buffer_info->page, __page_frag_cache_drain(buffer_info->page,
buffer_info->pagecnt_bias); buffer_info->pagecnt_bias);
buffer_info->page = NULL; i++;
if (i == rx_ring->count)
i = 0;
} }
size = sizeof(struct igb_rx_buffer) * rx_ring->count;
memset(rx_ring->rx_buffer_info, 0, size);
/* Zero out the descriptor ring */
memset(rx_ring->desc, 0, rx_ring->size);
rx_ring->next_to_alloc = 0; rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0; rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0; rx_ring->next_to_use = 0;
...@@ -4240,7 +4277,7 @@ static void igb_set_rx_mode(struct net_device *netdev) ...@@ -4240,7 +4277,7 @@ static void igb_set_rx_mode(struct net_device *netdev)
struct igb_adapter *adapter = netdev_priv(netdev); struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
unsigned int vfn = adapter->vfs_allocated_count; unsigned int vfn = adapter->vfs_allocated_count;
u32 rctl = 0, vmolr = 0; u32 rctl = 0, vmolr = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
int count; int count;
/* Check for Promiscuous and All Multicast modes */ /* Check for Promiscuous and All Multicast modes */
...@@ -4298,6 +4335,14 @@ static void igb_set_rx_mode(struct net_device *netdev) ...@@ -4298,6 +4335,14 @@ static void igb_set_rx_mode(struct net_device *netdev)
E1000_RCTL_VFE); E1000_RCTL_VFE);
wr32(E1000_RCTL, rctl); wr32(E1000_RCTL, rctl);
#if (PAGE_SIZE < 8192)
if (!adapter->vfs_allocated_count) {
if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
rlpml = IGB_MAX_FRAME_BUILD_SKB;
}
#endif
wr32(E1000_RLPML, rlpml);
/* In order to support SR-IOV and eventually VMDq it is necessary to set /* In order to support SR-IOV and eventually VMDq it is necessary to set
* the VMOLR to enable the appropriate modes. Without this workaround * the VMOLR to enable the appropriate modes. Without this workaround
* we will have issues with VLAN tag stripping not being done for frames * we will have issues with VLAN tag stripping not being done for frames
...@@ -4312,12 +4357,17 @@ static void igb_set_rx_mode(struct net_device *netdev) ...@@ -4312,12 +4357,17 @@ static void igb_set_rx_mode(struct net_device *netdev)
vmolr |= rd32(E1000_VMOLR(vfn)) & vmolr |= rd32(E1000_VMOLR(vfn)) &
~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE); ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
/* enable Rx jumbo frames, no need for restriction */ /* enable Rx jumbo frames, restrict as needed to support build_skb */
vmolr &= ~E1000_VMOLR_RLPML_MASK; vmolr &= ~E1000_VMOLR_RLPML_MASK;
vmolr |= MAX_JUMBO_FRAME_SIZE | E1000_VMOLR_LPE; #if (PAGE_SIZE < 8192)
if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
vmolr |= IGB_MAX_FRAME_BUILD_SKB;
else
#endif
vmolr |= MAX_JUMBO_FRAME_SIZE;
vmolr |= E1000_VMOLR_LPE;
wr32(E1000_VMOLR(vfn), vmolr); wr32(E1000_VMOLR(vfn), vmolr);
wr32(E1000_RLPML, MAX_JUMBO_FRAME_SIZE);
igb_restore_vf_multicasts(adapter); igb_restore_vf_multicasts(adapter);
} }
...@@ -5256,18 +5306,32 @@ static void igb_tx_map(struct igb_ring *tx_ring, ...@@ -5256,18 +5306,32 @@ static void igb_tx_map(struct igb_ring *tx_ring,
dma_error: dma_error:
dev_err(tx_ring->dev, "TX DMA map failed\n"); dev_err(tx_ring->dev, "TX DMA map failed\n");
tx_buffer = &tx_ring->tx_buffer_info[i];
/* clear dma mappings for failed tx_buffer_info map */ /* clear dma mappings for failed tx_buffer_info map */
for (;;) { while (tx_buffer != first) {
if (dma_unmap_len(tx_buffer, len))
dma_unmap_page(tx_ring->dev,
dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
dma_unmap_len_set(tx_buffer, len, 0);
if (i--)
i += tx_ring->count;
tx_buffer = &tx_ring->tx_buffer_info[i]; tx_buffer = &tx_ring->tx_buffer_info[i];
igb_unmap_and_free_tx_resource(tx_ring, tx_buffer);
if (tx_buffer == first)
break;
if (i == 0)
i = tx_ring->count;
i--;
} }
if (dma_unmap_len(tx_buffer, len))
dma_unmap_single(tx_ring->dev,
dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
dma_unmap_len_set(tx_buffer, len, 0);
dev_kfree_skb_any(tx_buffer->skb);
tx_buffer->skb = NULL;
tx_ring->next_to_use = i; tx_ring->next_to_use = i;
} }
...@@ -5339,7 +5403,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, ...@@ -5339,7 +5403,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
return NETDEV_TX_OK; return NETDEV_TX_OK;
out_drop: out_drop:
igb_unmap_and_free_tx_resource(tx_ring, first); dev_kfree_skb_any(first->skb);
first->skb = NULL;
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
...@@ -6686,7 +6751,6 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) ...@@ -6686,7 +6751,6 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
DMA_TO_DEVICE); DMA_TO_DEVICE);
/* clear tx_buffer data */ /* clear tx_buffer data */
tx_buffer->skb = NULL;
dma_unmap_len_set(tx_buffer, len, 0); dma_unmap_len_set(tx_buffer, len, 0);
/* clear last DMA location and unmap remaining buffers */ /* clear last DMA location and unmap remaining buffers */
...@@ -6822,8 +6886,14 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring, ...@@ -6822,8 +6886,14 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
nta++; nta++;
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
/* transfer page from old buffer to new buffer */ /* Transfer page from old buffer to new buffer.
*new_buff = *old_buff; * Move each member individually to avoid possible store
* forwarding stalls.
*/
new_buff->dma = old_buff->dma;
new_buff->page = old_buff->page;
new_buff->page_offset = old_buff->page_offset;
new_buff->pagecnt_bias = old_buff->pagecnt_bias;
} }
static inline bool igb_page_is_reserved(struct page *page) static inline bool igb_page_is_reserved(struct page *page)
...@@ -6831,11 +6901,10 @@ static inline bool igb_page_is_reserved(struct page *page) ...@@ -6831,11 +6901,10 @@ static inline bool igb_page_is_reserved(struct page *page)
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
} }
static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer)
struct page *page,
unsigned int truesize)
{ {
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--; unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page;
/* avoid re-using remote pages */ /* avoid re-using remote pages */
if (unlikely(igb_page_is_reserved(page))) if (unlikely(igb_page_is_reserved(page)))
...@@ -6843,16 +6912,13 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, ...@@ -6843,16 +6912,13 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */ /* if we are only owner of page we can reuse it */
if (unlikely(page_ref_count(page) != pagecnt_bias)) if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
return false; return false;
/* flip page offset to other buffer */
rx_buffer->page_offset ^= IGB_RX_BUFSZ;
#else #else
/* move offset up to the next cache line */ #define IGB_LAST_OFFSET \
rx_buffer->page_offset += truesize; (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGB_RXBUFFER_2048)
if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ)) if (rx_buffer->page_offset > IGB_LAST_OFFSET)
return false; return false;
#endif #endif
...@@ -6860,7 +6926,7 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, ...@@ -6860,7 +6926,7 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
* the pagecnt_bias and page count so that we fully restock the * the pagecnt_bias and page count so that we fully restock the
* number of references the driver holds. * number of references the driver holds.
*/ */
if (unlikely(pagecnt_bias == 1)) { if (unlikely(!pagecnt_bias)) {
page_ref_add(page, USHRT_MAX); page_ref_add(page, USHRT_MAX);
rx_buffer->pagecnt_bias = USHRT_MAX; rx_buffer->pagecnt_bias = USHRT_MAX;
} }
...@@ -6872,34 +6938,56 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, ...@@ -6872,34 +6938,56 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
* igb_add_rx_frag - Add contents of Rx buffer to sk_buff * igb_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on * @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: buffer containing page to add * @rx_buffer: buffer containing page to add
* @rx_desc: descriptor containing length of buffer written by hardware
* @skb: sk_buff to place the data into * @skb: sk_buff to place the data into
* @size: size of buffer to be added
* *
* This function will add the data contained in rx_buffer->page to the skb. * This function will add the data contained in rx_buffer->page to the skb.
* This is done either through a direct copy if the data in the buffer is
* less than the skb header size, otherwise it will just attach the page as
* a frag to the skb.
*
* The function will then update the page offset if necessary and return
* true if the buffer can be reused by the adapter.
**/ **/
static bool igb_add_rx_frag(struct igb_ring *rx_ring, static void igb_add_rx_frag(struct igb_ring *rx_ring,
struct igb_rx_buffer *rx_buffer,
struct sk_buff *skb,
unsigned int size)
{
#if (PAGE_SIZE < 8192)
unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
#else
unsigned int truesize = ring_uses_build_skb(rx_ring) ?
SKB_DATA_ALIGN(IGB_SKB_PAD + size) :
SKB_DATA_ALIGN(size);
#endif
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
rx_buffer->page_offset, size, truesize);
#if (PAGE_SIZE < 8192)
rx_buffer->page_offset ^= truesize;
#else
rx_buffer->page_offset += truesize;
#endif
}
static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
struct igb_rx_buffer *rx_buffer, struct igb_rx_buffer *rx_buffer,
unsigned int size,
union e1000_adv_rx_desc *rx_desc, union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb) unsigned int size)
{ {
struct page *page = rx_buffer->page; void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
unsigned char *va = page_address(page) + rx_buffer->page_offset;
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
unsigned int truesize = IGB_RX_BUFSZ; unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
#else #else
unsigned int truesize = SKB_DATA_ALIGN(size); unsigned int truesize = SKB_DATA_ALIGN(size);
#endif #endif
unsigned int pull_len; unsigned int headlen;
struct sk_buff *skb;
if (unlikely(skb_is_nonlinear(skb))) /* prefetch first cache line of first page */
goto add_tail_frag; prefetch(va);
#if L1_CACHE_BYTES < 128
prefetch(va + L1_CACHE_BYTES);
#endif
/* allocate a skb to store the frags */
skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN);
if (unlikely(!skb))
return NULL;
if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) { if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
...@@ -6907,95 +6995,73 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring, ...@@ -6907,95 +6995,73 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
size -= IGB_TS_HDR_LEN; size -= IGB_TS_HDR_LEN;
} }
if (likely(size <= IGB_RX_HDR_LEN)) { /* Determine available headroom for copy */
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); headlen = size;
if (headlen > IGB_RX_HDR_LEN)
/* page is not reserved, we can reuse buffer as-is */ headlen = eth_get_headlen(va, IGB_RX_HDR_LEN);
if (likely(!igb_page_is_reserved(page)))
return true;
/* this page cannot be reused so discard it */
return false;
}
/* we need the header to contain the greater of either ETH_HLEN or
* 60 bytes if the skb->len is less than 60 for skb_pad.
*/
pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN);
/* align pull length to size of long to optimize memcpy performance */ /* align pull length to size of long to optimize memcpy performance */
memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long))); memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
/* update all of the pointers */ /* update all of the pointers */
va += pull_len; size -= headlen;
size -= pull_len; if (size) {
skb_add_rx_frag(skb, 0, rx_buffer->page,
add_tail_frag: (va + headlen) - page_address(rx_buffer->page),
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, size, truesize);
(unsigned long)va & ~PAGE_MASK, size, truesize); #if (PAGE_SIZE < 8192)
rx_buffer->page_offset ^= truesize;
#else
rx_buffer->page_offset += truesize;
#endif
} else {
rx_buffer->pagecnt_bias++;
}
return igb_can_reuse_rx_page(rx_buffer, page, truesize); return skb;
} }
static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
struct igb_rx_buffer *rx_buffer,
union e1000_adv_rx_desc *rx_desc, union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb) unsigned int size)
{ {
unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
struct igb_rx_buffer *rx_buffer; #if (PAGE_SIZE < 8192)
struct page *page; unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
#else
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
page = rx_buffer->page; SKB_DATA_ALIGN(IGB_SKB_PAD + size);
prefetchw(page); #endif
struct sk_buff *skb;
/* we are reusing so sync this buffer for CPU use */
dma_sync_single_range_for_cpu(rx_ring->dev,
rx_buffer->dma,
rx_buffer->page_offset,
size,
DMA_FROM_DEVICE);
if (likely(!skb)) {
void *page_addr = page_address(page) +
rx_buffer->page_offset;
/* prefetch first cache line of first page */ /* prefetch first cache line of first page */
prefetch(page_addr); prefetch(va);
#if L1_CACHE_BYTES < 128 #if L1_CACHE_BYTES < 128
prefetch(page_addr + L1_CACHE_BYTES); prefetch(va + L1_CACHE_BYTES);
#endif #endif
/* allocate a skb to store the frags */ /* build an skb around the page buffer */
skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN); skb = build_skb(va - IGB_SKB_PAD, truesize);
if (unlikely(!skb)) { if (unlikely(!skb))
rx_ring->rx_stats.alloc_failed++;
return NULL; return NULL;
}
/* we will be copying header into skb->data in /* update pointers within the skb to store the data */
* pskb_may_pull so it is in our interest to prefetch skb_reserve(skb, IGB_SKB_PAD);
* it now to avoid a possible cache miss __skb_put(skb, size);
*/
prefetchw(skb->data);
}
/* pull page into skb */ /* pull timestamp out of packet data */
if (igb_add_rx_frag(rx_ring, rx_buffer, size, rx_desc, skb)) { if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
/* hand second half of page back to the ring */ igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
igb_reuse_rx_page(rx_ring, rx_buffer); __skb_pull(skb, IGB_TS_HDR_LEN);
} else {
/* We are not reusing the buffer so unmap it and free
* any references we are holding to it
*/
dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
PAGE_SIZE, DMA_FROM_DEVICE,
DMA_ATTR_SKIP_CPU_SYNC);
__page_frag_cache_drain(page, rx_buffer->pagecnt_bias);
} }
/* clear contents of rx_buffer */ /* update buffer offset */
rx_buffer->page = NULL; #if (PAGE_SIZE < 8192)
rx_buffer->page_offset ^= truesize;
#else
rx_buffer->page_offset += truesize;
#endif
return skb; return skb;
} }
...@@ -7154,6 +7220,47 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring, ...@@ -7154,6 +7220,47 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
skb->protocol = eth_type_trans(skb, rx_ring->netdev); skb->protocol = eth_type_trans(skb, rx_ring->netdev);
} }
static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,
const unsigned int size)
{
struct igb_rx_buffer *rx_buffer;
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
prefetchw(rx_buffer->page);
/* we are reusing so sync this buffer for CPU use */
dma_sync_single_range_for_cpu(rx_ring->dev,
rx_buffer->dma,
rx_buffer->page_offset,
size,
DMA_FROM_DEVICE);
rx_buffer->pagecnt_bias--;
return rx_buffer;
}
static void igb_put_rx_buffer(struct igb_ring *rx_ring,
struct igb_rx_buffer *rx_buffer)
{
if (igb_can_reuse_rx_page(rx_buffer)) {
/* hand second half of page back to the ring */
igb_reuse_rx_page(rx_ring, rx_buffer);
} else {
/* We are not reusing the buffer so unmap it and free
* any references we are holding to it
*/
dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
igb_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
IGB_RX_DMA_ATTR);
__page_frag_cache_drain(rx_buffer->page,
rx_buffer->pagecnt_bias);
}
/* clear contents of rx_buffer */
rx_buffer->page = NULL;
}
static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
{ {
struct igb_ring *rx_ring = q_vector->rx.ring; struct igb_ring *rx_ring = q_vector->rx.ring;
...@@ -7163,6 +7270,8 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) ...@@ -7163,6 +7270,8 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
while (likely(total_packets < budget)) { while (likely(total_packets < budget)) {
union e1000_adv_rx_desc *rx_desc; union e1000_adv_rx_desc *rx_desc;
struct igb_rx_buffer *rx_buffer;
unsigned int size;
/* return some buffers to hardware, one at a time is too slow */ /* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IGB_RX_BUFFER_WRITE) { if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
...@@ -7171,8 +7280,8 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) ...@@ -7171,8 +7280,8 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
} }
rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean); rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
size = le16_to_cpu(rx_desc->wb.upper.length);
if (!rx_desc->wb.upper.status_error) if (!size)
break; break;
/* This memory barrier is needed to keep us from reading /* This memory barrier is needed to keep us from reading
...@@ -7181,13 +7290,25 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) ...@@ -7181,13 +7290,25 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
*/ */
dma_rmb(); dma_rmb();
rx_buffer = igb_get_rx_buffer(rx_ring, size);
/* retrieve a buffer from the ring */ /* retrieve a buffer from the ring */
skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb); if (skb)
igb_add_rx_frag(rx_ring, rx_buffer, skb, size);
else if (ring_uses_build_skb(rx_ring))
skb = igb_build_skb(rx_ring, rx_buffer, rx_desc, size);
else
skb = igb_construct_skb(rx_ring, rx_buffer,
rx_desc, size);
/* exit if we failed to retrieve a buffer */ /* exit if we failed to retrieve a buffer */
if (!skb) if (!skb) {
rx_ring->rx_stats.alloc_failed++;
rx_buffer->pagecnt_bias++;
break; break;
}
igb_put_rx_buffer(rx_ring, rx_buffer);
cleaned_count++; cleaned_count++;
/* fetch next buffer in frame if non-eop */ /* fetch next buffer in frame if non-eop */
...@@ -7231,6 +7352,11 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) ...@@ -7231,6 +7352,11 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
return total_packets; return total_packets;
} }
static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
{
return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0;
}
static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
struct igb_rx_buffer *bi) struct igb_rx_buffer *bi)
{ {
...@@ -7242,21 +7368,23 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, ...@@ -7242,21 +7368,23 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
return true; return true;
/* alloc new page for storage */ /* alloc new page for storage */
page = dev_alloc_page(); page = dev_alloc_pages(igb_rx_pg_order(rx_ring));
if (unlikely(!page)) { if (unlikely(!page)) {
rx_ring->rx_stats.alloc_failed++; rx_ring->rx_stats.alloc_failed++;
return false; return false;
} }
/* map page for use */ /* map page for use */
dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE, dma = dma_map_page_attrs(rx_ring->dev, page, 0,
DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); igb_rx_pg_size(rx_ring),
DMA_FROM_DEVICE,
IGB_RX_DMA_ATTR);
/* if mapping failed free memory back to system since /* if mapping failed free memory back to system since
* there isn't much point in holding memory we can't use * there isn't much point in holding memory we can't use
*/ */
if (dma_mapping_error(rx_ring->dev, dma)) { if (dma_mapping_error(rx_ring->dev, dma)) {
__free_page(page); __free_pages(page, igb_rx_pg_order(rx_ring));
rx_ring->rx_stats.alloc_failed++; rx_ring->rx_stats.alloc_failed++;
return false; return false;
...@@ -7264,7 +7392,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, ...@@ -7264,7 +7392,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
bi->dma = dma; bi->dma = dma;
bi->page = page; bi->page = page;
bi->page_offset = 0; bi->page_offset = igb_rx_offset(rx_ring);
bi->pagecnt_bias = 1; bi->pagecnt_bias = 1;
return true; return true;
...@@ -7279,6 +7407,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) ...@@ -7279,6 +7407,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
union e1000_adv_rx_desc *rx_desc; union e1000_adv_rx_desc *rx_desc;
struct igb_rx_buffer *bi; struct igb_rx_buffer *bi;
u16 i = rx_ring->next_to_use; u16 i = rx_ring->next_to_use;
u16 bufsz;
/* nothing to do */ /* nothing to do */
if (!cleaned_count) if (!cleaned_count)
...@@ -7288,14 +7417,15 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) ...@@ -7288,14 +7417,15 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
bi = &rx_ring->rx_buffer_info[i]; bi = &rx_ring->rx_buffer_info[i];
i -= rx_ring->count; i -= rx_ring->count;
bufsz = igb_rx_bufsz(rx_ring);
do { do {
if (!igb_alloc_mapped_page(rx_ring, bi)) if (!igb_alloc_mapped_page(rx_ring, bi))
break; break;
/* sync the buffer for use by the device */ /* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, bi->dma, dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
bi->page_offset, bi->page_offset, bufsz,
IGB_RX_BUFSZ,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
/* Refresh the desc even if buffer_addrs didn't change /* Refresh the desc even if buffer_addrs didn't change
...@@ -7312,8 +7442,8 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) ...@@ -7312,8 +7442,8 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
i -= rx_ring->count; i -= rx_ring->count;
} }
/* clear the status bits for the next_to_use descriptor */ /* clear the length for the next_to_use descriptor */
rx_desc->wb.upper.status_error = 0; rx_desc->wb.upper.length = 0;
cleaned_count--; cleaned_count--;
} while (cleaned_count); } while (cleaned_count);
......
...@@ -764,8 +764,7 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) ...@@ -764,8 +764,7 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
* incoming frame. The value is stored in little endian format starting on * incoming frame. The value is stored in little endian format starting on
* byte 8. * byte 8.
**/ **/
void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
unsigned char *va,
struct sk_buff *skb) struct sk_buff *skb)
{ {
__le64 *regval = (__le64 *)va; __le64 *regval = (__le64 *)va;
......
...@@ -2122,7 +2122,7 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring, ...@@ -2122,7 +2122,7 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
prefetch(va + L1_CACHE_BYTES); prefetch(va + L1_CACHE_BYTES);
#endif #endif
/* build an skb to around the page buffer */ /* build an skb around the page buffer */
skb = build_skb(va - IXGBE_SKB_PAD, truesize); skb = build_skb(va - IXGBE_SKB_PAD, truesize);
if (unlikely(!skb)) if (unlikely(!skb))
return NULL; return NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment