Commit 3808b519 authored by David S. Miller's avatar David S. Miller

Merge branch '10GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
10GbE Intel Wired LAN Driver Updates 2018-02-26

This series contains updates to ixgbe and ixgbevf only.

Colin Ian King cleans up redundant variable assignments.

Tonghao Zhang updates ixgbe to avoid writing to the hardware when the
redirection table has not changed.

Jake fixes the driver logic for checking and clearing receive timestamp
hangs so that when the PTP_RX_TIMESTAMP_IN_REGISTER flag is set, we no
longer need to check for receive timestamp hangs, which in turn will
stop the spurious log messages.

Emil updates ixgbevf with several features and improvements done in
other drivers, starting with the handling of page addresses so that we
always refer to them using a void pointer.  Added a 'legacy-rx' flag to
allow switching between the old and new receive code paths.  Added
support for using 3K buggers in order 1 page.  Updated the driver to
ensure that calls to ixgbevf_open() are rtnl lock protected and improved
the error handling when setting up multiple queues.  Added support for
providing a buffer with head room and tail room to allow for shared
info, NET_SKB_PAD, and NET_IP_ALIGN, so that we can start using
build_skb to build frames instead of using memcpy() the headers.
Updated the logic of handling rings closer to ixgbe.  Consolidated the
receive paths to reduce duplication when we expand them in the future.
Added build_skb() support to ixgbevf.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 08009a76 93a6a37c
...@@ -3059,6 +3059,8 @@ static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir, ...@@ -3059,6 +3059,8 @@ static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
for (i = 0; i < reta_entries; i++) for (i = 0; i < reta_entries; i++)
adapter->rss_indir_tbl[i] = indir[i]; adapter->rss_indir_tbl[i] = indir[i];
ixgbe_store_reta(adapter);
} }
/* Fill out the rss hash key */ /* Fill out the rss hash key */
...@@ -3067,8 +3069,6 @@ static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir, ...@@ -3067,8 +3069,6 @@ static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
ixgbe_store_key(adapter); ixgbe_store_key(adapter);
} }
ixgbe_store_reta(adapter);
return 0; return 0;
} }
......
...@@ -58,7 +58,6 @@ static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter) ...@@ -58,7 +58,6 @@ static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
return false; return false;
/* start at VMDq register offset for SR-IOV enabled setups */ /* start at VMDq register offset for SR-IOV enabled setups */
pool = 0;
reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
for (i = 0, pool = 0; i < adapter->num_rx_queues; i++, reg_idx++) { for (i = 0, pool = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
/* If we are greater than indices move to next pool */ /* If we are greater than indices move to next pool */
......
...@@ -7703,7 +7703,8 @@ static void ixgbe_service_task(struct work_struct *work) ...@@ -7703,7 +7703,8 @@ static void ixgbe_service_task(struct work_struct *work)
if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) { if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) {
ixgbe_ptp_overflow_check(adapter); ixgbe_ptp_overflow_check(adapter);
ixgbe_ptp_rx_hang(adapter); if (adapter->flags & IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER)
ixgbe_ptp_rx_hang(adapter);
ixgbe_ptp_tx_hang(adapter); ixgbe_ptp_tx_hang(adapter);
} }
......
...@@ -94,6 +94,13 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { ...@@ -94,6 +94,13 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
#define IXGBEVF_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN) #define IXGBEVF_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
static const char ixgbevf_priv_flags_strings[][ETH_GSTRING_LEN] = {
#define IXGBEVF_PRIV_FLAGS_LEGACY_RX BIT(0)
"legacy-rx",
};
#define IXGBEVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbevf_priv_flags_strings)
static int ixgbevf_get_link_ksettings(struct net_device *netdev, static int ixgbevf_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd) struct ethtool_link_ksettings *cmd)
{ {
...@@ -241,6 +248,8 @@ static void ixgbevf_get_drvinfo(struct net_device *netdev, ...@@ -241,6 +248,8 @@ static void ixgbevf_get_drvinfo(struct net_device *netdev,
sizeof(drvinfo->version)); sizeof(drvinfo->version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info)); sizeof(drvinfo->bus_info));
drvinfo->n_priv_flags = IXGBEVF_PRIV_FLAGS_STR_LEN;
} }
static void ixgbevf_get_ringparam(struct net_device *netdev, static void ixgbevf_get_ringparam(struct net_device *netdev,
...@@ -392,6 +401,8 @@ static int ixgbevf_get_sset_count(struct net_device *netdev, int stringset) ...@@ -392,6 +401,8 @@ static int ixgbevf_get_sset_count(struct net_device *netdev, int stringset)
return IXGBEVF_TEST_LEN; return IXGBEVF_TEST_LEN;
case ETH_SS_STATS: case ETH_SS_STATS:
return IXGBEVF_STATS_LEN; return IXGBEVF_STATS_LEN;
case ETH_SS_PRIV_FLAGS:
return IXGBEVF_PRIV_FLAGS_STR_LEN;
default: default:
return -EINVAL; return -EINVAL;
} }
...@@ -496,6 +507,10 @@ static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset, ...@@ -496,6 +507,10 @@ static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset,
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
} }
break; break;
case ETH_SS_PRIV_FLAGS:
memcpy(data, ixgbevf_priv_flags_strings,
IXGBEVF_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
break;
} }
} }
...@@ -888,6 +903,37 @@ static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, ...@@ -888,6 +903,37 @@ static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
return err; return err;
} }
static u32 ixgbevf_get_priv_flags(struct net_device *netdev)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
u32 priv_flags = 0;
if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX)
priv_flags |= IXGBEVF_PRIV_FLAGS_LEGACY_RX;
return priv_flags;
}
static int ixgbevf_set_priv_flags(struct net_device *netdev, u32 priv_flags)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
unsigned int flags = adapter->flags;
flags &= ~IXGBEVF_FLAGS_LEGACY_RX;
if (priv_flags & IXGBEVF_PRIV_FLAGS_LEGACY_RX)
flags |= IXGBEVF_FLAGS_LEGACY_RX;
if (flags != adapter->flags) {
adapter->flags = flags;
/* reset interface to repopulate queues */
if (netif_running(netdev))
ixgbevf_reinit_locked(adapter);
}
return 0;
}
static const struct ethtool_ops ixgbevf_ethtool_ops = { static const struct ethtool_ops ixgbevf_ethtool_ops = {
.get_drvinfo = ixgbevf_get_drvinfo, .get_drvinfo = ixgbevf_get_drvinfo,
.get_regs_len = ixgbevf_get_regs_len, .get_regs_len = ixgbevf_get_regs_len,
...@@ -909,6 +955,8 @@ static const struct ethtool_ops ixgbevf_ethtool_ops = { ...@@ -909,6 +955,8 @@ static const struct ethtool_ops ixgbevf_ethtool_ops = {
.get_rxfh_key_size = ixgbevf_get_rxfh_key_size, .get_rxfh_key_size = ixgbevf_get_rxfh_key_size,
.get_rxfh = ixgbevf_get_rxfh, .get_rxfh = ixgbevf_get_rxfh,
.get_link_ksettings = ixgbevf_get_link_ksettings, .get_link_ksettings = ixgbevf_get_link_ksettings,
.get_priv_flags = ixgbevf_get_priv_flags,
.set_priv_flags = ixgbevf_set_priv_flags,
}; };
void ixgbevf_set_ethtool_ops(struct net_device *netdev) void ixgbevf_set_ethtool_ops(struct net_device *netdev)
......
...@@ -89,19 +89,15 @@ struct ixgbevf_rx_queue_stats { ...@@ -89,19 +89,15 @@ struct ixgbevf_rx_queue_stats {
}; };
enum ixgbevf_ring_state_t { enum ixgbevf_ring_state_t {
__IXGBEVF_RX_3K_BUFFER,
__IXGBEVF_RX_BUILD_SKB_ENABLED,
__IXGBEVF_TX_DETECT_HANG, __IXGBEVF_TX_DETECT_HANG,
__IXGBEVF_HANG_CHECK_ARMED, __IXGBEVF_HANG_CHECK_ARMED,
}; };
#define check_for_tx_hang(ring) \
test_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
#define set_check_for_tx_hang(ring) \
set_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
#define clear_check_for_tx_hang(ring) \
clear_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
struct ixgbevf_ring { struct ixgbevf_ring {
struct ixgbevf_ring *next; struct ixgbevf_ring *next;
struct ixgbevf_q_vector *q_vector; /* backpointer to q_vector */
struct net_device *netdev; struct net_device *netdev;
struct device *dev; struct device *dev;
void *desc; /* descriptor ring memory */ void *desc; /* descriptor ring memory */
...@@ -133,7 +129,7 @@ struct ixgbevf_ring { ...@@ -133,7 +129,7 @@ struct ixgbevf_ring {
*/ */
u16 reg_idx; u16 reg_idx;
int queue_index; /* needed for multiqueue queue management */ int queue_index; /* needed for multiqueue queue management */
}; } ____cacheline_internodealigned_in_smp;
/* How many Rx Buffers do we bundle into one write to the hardware ? */ /* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IXGBEVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ #define IXGBEVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
...@@ -156,12 +152,20 @@ struct ixgbevf_ring { ...@@ -156,12 +152,20 @@ struct ixgbevf_ring {
/* Supported Rx Buffer Sizes */ /* Supported Rx Buffer Sizes */
#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */ #define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */
#define IXGBEVF_RXBUFFER_2048 2048 #define IXGBEVF_RXBUFFER_2048 2048
#define IXGBEVF_RXBUFFER_3072 3072
#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256 #define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256
#define IXGBEVF_RX_BUFSZ IXGBEVF_RXBUFFER_2048
#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) #define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
#define IXGBEVF_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
#if (PAGE_SIZE < 8192)
#define IXGBEVF_MAX_FRAME_BUILD_SKB \
(SKB_WITH_OVERHEAD(IXGBEVF_RXBUFFER_2048) - IXGBEVF_SKB_PAD)
#else
#define IXGBEVF_MAX_FRAME_BUILD_SKB IXGBEVF_RXBUFFER_2048
#endif
#define IXGBE_TX_FLAGS_CSUM BIT(0) #define IXGBE_TX_FLAGS_CSUM BIT(0)
#define IXGBE_TX_FLAGS_VLAN BIT(1) #define IXGBE_TX_FLAGS_VLAN BIT(1)
#define IXGBE_TX_FLAGS_TSO BIT(2) #define IXGBE_TX_FLAGS_TSO BIT(2)
...@@ -170,6 +174,50 @@ struct ixgbevf_ring { ...@@ -170,6 +174,50 @@ struct ixgbevf_ring {
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
#define IXGBE_TX_FLAGS_VLAN_SHIFT 16 #define IXGBE_TX_FLAGS_VLAN_SHIFT 16
#define ring_uses_large_buffer(ring) \
test_bit(__IXGBEVF_RX_3K_BUFFER, &(ring)->state)
#define set_ring_uses_large_buffer(ring) \
set_bit(__IXGBEVF_RX_3K_BUFFER, &(ring)->state)
#define clear_ring_uses_large_buffer(ring) \
clear_bit(__IXGBEVF_RX_3K_BUFFER, &(ring)->state)
#define ring_uses_build_skb(ring) \
test_bit(__IXGBEVF_RX_BUILD_SKB_ENABLED, &(ring)->state)
#define set_ring_build_skb_enabled(ring) \
set_bit(__IXGBEVF_RX_BUILD_SKB_ENABLED, &(ring)->state)
#define clear_ring_build_skb_enabled(ring) \
clear_bit(__IXGBEVF_RX_BUILD_SKB_ENABLED, &(ring)->state)
static inline unsigned int ixgbevf_rx_bufsz(struct ixgbevf_ring *ring)
{
#if (PAGE_SIZE < 8192)
if (ring_uses_large_buffer(ring))
return IXGBEVF_RXBUFFER_3072;
if (ring_uses_build_skb(ring))
return IXGBEVF_MAX_FRAME_BUILD_SKB;
#endif
return IXGBEVF_RXBUFFER_2048;
}
static inline unsigned int ixgbevf_rx_pg_order(struct ixgbevf_ring *ring)
{
#if (PAGE_SIZE < 8192)
if (ring_uses_large_buffer(ring))
return 1;
#endif
return 0;
}
#define ixgbevf_rx_pg_size(_ring) (PAGE_SIZE << ixgbevf_rx_pg_order(_ring))
#define check_for_tx_hang(ring) \
test_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
#define set_check_for_tx_hang(ring) \
set_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
#define clear_check_for_tx_hang(ring) \
clear_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
struct ixgbevf_ring_container { struct ixgbevf_ring_container {
struct ixgbevf_ring *ring; /* pointer to linked list of rings */ struct ixgbevf_ring *ring; /* pointer to linked list of rings */
unsigned int total_bytes; /* total bytes processed this int */ unsigned int total_bytes; /* total bytes processed this int */
...@@ -194,7 +242,11 @@ struct ixgbevf_q_vector { ...@@ -194,7 +242,11 @@ struct ixgbevf_q_vector {
u16 itr; /* Interrupt throttle rate written to EITR */ u16 itr; /* Interrupt throttle rate written to EITR */
struct napi_struct napi; struct napi_struct napi;
struct ixgbevf_ring_container rx, tx; struct ixgbevf_ring_container rx, tx;
struct rcu_head rcu; /* to avoid race with update stats on free */
char name[IFNAMSIZ + 9]; char name[IFNAMSIZ + 9];
/* for dynamic allocation of rings associated with this q_vector */
struct ixgbevf_ring ring[0] ____cacheline_internodealigned_in_smp;
#ifdef CONFIG_NET_RX_BUSY_POLL #ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int state; unsigned int state;
#define IXGBEVF_QV_STATE_IDLE 0 #define IXGBEVF_QV_STATE_IDLE 0
...@@ -331,6 +383,8 @@ struct ixgbevf_adapter { ...@@ -331,6 +383,8 @@ struct ixgbevf_adapter {
u32 *rss_key; u32 *rss_key;
u8 rss_indir_tbl[IXGBEVF_X550_VFRETA_SIZE]; u8 rss_indir_tbl[IXGBEVF_X550_VFRETA_SIZE];
u32 flags;
#define IXGBEVF_FLAGS_LEGACY_RX BIT(1)
}; };
enum ixbgevf_state_t { enum ixbgevf_state_t {
......
...@@ -130,6 +130,9 @@ static void ixgbevf_service_event_complete(struct ixgbevf_adapter *adapter) ...@@ -130,6 +130,9 @@ static void ixgbevf_service_event_complete(struct ixgbevf_adapter *adapter)
static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter); static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector); static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter); static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer);
static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
struct ixgbevf_rx_buffer *old_buff);
static void ixgbevf_remove_adapter(struct ixgbe_hw *hw) static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
{ {
...@@ -527,6 +530,49 @@ static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring, ...@@ -527,6 +530,49 @@ static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
skb->protocol = eth_type_trans(skb, rx_ring->netdev); skb->protocol = eth_type_trans(skb, rx_ring->netdev);
} }
static
struct ixgbevf_rx_buffer *ixgbevf_get_rx_buffer(struct ixgbevf_ring *rx_ring,
const unsigned int size)
{
struct ixgbevf_rx_buffer *rx_buffer;
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
prefetchw(rx_buffer->page);
/* we are reusing so sync this buffer for CPU use */
dma_sync_single_range_for_cpu(rx_ring->dev,
rx_buffer->dma,
rx_buffer->page_offset,
size,
DMA_FROM_DEVICE);
rx_buffer->pagecnt_bias--;
return rx_buffer;
}
static void ixgbevf_put_rx_buffer(struct ixgbevf_ring *rx_ring,
struct ixgbevf_rx_buffer *rx_buffer)
{
if (ixgbevf_can_reuse_rx_page(rx_buffer)) {
/* hand second half of page back to the ring */
ixgbevf_reuse_rx_page(rx_ring, rx_buffer);
} else {
/* We are not reusing the buffer so unmap it and free
* any references we are holding to it
*/
dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
ixgbevf_rx_pg_size(rx_ring),
DMA_FROM_DEVICE,
IXGBEVF_RX_DMA_ATTR);
__page_frag_cache_drain(rx_buffer->page,
rx_buffer->pagecnt_bias);
}
/* clear contents of rx_buffer */
rx_buffer->page = NULL;
}
/** /**
* ixgbevf_is_non_eop - process handling of non-EOP buffers * ixgbevf_is_non_eop - process handling of non-EOP buffers
* @rx_ring: Rx ring being processed * @rx_ring: Rx ring being processed
...@@ -554,32 +600,38 @@ static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring, ...@@ -554,32 +600,38 @@ static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring,
return true; return true;
} }
static inline unsigned int ixgbevf_rx_offset(struct ixgbevf_ring *rx_ring)
{
return ring_uses_build_skb(rx_ring) ? IXGBEVF_SKB_PAD : 0;
}
static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring, static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
struct ixgbevf_rx_buffer *bi) struct ixgbevf_rx_buffer *bi)
{ {
struct page *page = bi->page; struct page *page = bi->page;
dma_addr_t dma = bi->dma; dma_addr_t dma;
/* since we are recycling buffers we should seldom need to alloc */ /* since we are recycling buffers we should seldom need to alloc */
if (likely(page)) if (likely(page))
return true; return true;
/* alloc new page for storage */ /* alloc new page for storage */
page = dev_alloc_page(); page = dev_alloc_pages(ixgbevf_rx_pg_order(rx_ring));
if (unlikely(!page)) { if (unlikely(!page)) {
rx_ring->rx_stats.alloc_rx_page_failed++; rx_ring->rx_stats.alloc_rx_page_failed++;
return false; return false;
} }
/* map page for use */ /* map page for use */
dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE, dma = dma_map_page_attrs(rx_ring->dev, page, 0,
ixgbevf_rx_pg_size(rx_ring),
DMA_FROM_DEVICE, IXGBEVF_RX_DMA_ATTR); DMA_FROM_DEVICE, IXGBEVF_RX_DMA_ATTR);
/* if mapping failed free memory back to system since /* if mapping failed free memory back to system since
* there isn't much point in holding memory we can't use * there isn't much point in holding memory we can't use
*/ */
if (dma_mapping_error(rx_ring->dev, dma)) { if (dma_mapping_error(rx_ring->dev, dma)) {
__free_page(page); __free_pages(page, ixgbevf_rx_pg_order(rx_ring));
rx_ring->rx_stats.alloc_rx_page_failed++; rx_ring->rx_stats.alloc_rx_page_failed++;
return false; return false;
...@@ -587,7 +639,7 @@ static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring, ...@@ -587,7 +639,7 @@ static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
bi->dma = dma; bi->dma = dma;
bi->page = page; bi->page = page;
bi->page_offset = 0; bi->page_offset = ixgbevf_rx_offset(rx_ring);
bi->pagecnt_bias = 1; bi->pagecnt_bias = 1;
rx_ring->rx_stats.alloc_rx_page++; rx_ring->rx_stats.alloc_rx_page++;
...@@ -621,7 +673,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring, ...@@ -621,7 +673,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
/* sync the buffer for use by the device */ /* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, bi->dma, dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
bi->page_offset, bi->page_offset,
IXGBEVF_RX_BUFSZ, ixgbevf_rx_bufsz(rx_ring),
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
/* Refresh the desc even if pkt_addr didn't change /* Refresh the desc even if pkt_addr didn't change
...@@ -734,11 +786,10 @@ static inline bool ixgbevf_page_is_reserved(struct page *page) ...@@ -734,11 +786,10 @@ static inline bool ixgbevf_page_is_reserved(struct page *page)
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
} }
static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer, static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer)
struct page *page,
const unsigned int truesize)
{ {
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--; unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page;
/* avoid re-using remote pages */ /* avoid re-using remote pages */
if (unlikely(ixgbevf_page_is_reserved(page))) if (unlikely(ixgbevf_page_is_reserved(page)))
...@@ -746,17 +797,13 @@ static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer, ...@@ -746,17 +797,13 @@ static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer,
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */ /* if we are only owner of page we can reuse it */
if (unlikely(page_ref_count(page) != pagecnt_bias)) if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
return false; return false;
/* flip page offset to other buffer */
rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ;
#else #else
/* move offset up to the next cache line */ #define IXGBEVF_LAST_OFFSET \
rx_buffer->page_offset += truesize; (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBEVF_RXBUFFER_2048)
if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ)) if (rx_buffer->page_offset > IXGBEVF_LAST_OFFSET)
return false; return false;
#endif #endif
...@@ -765,7 +812,7 @@ static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer, ...@@ -765,7 +812,7 @@ static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer,
* the pagecnt_bias and page count so that we fully restock the * the pagecnt_bias and page count so that we fully restock the
* number of references the driver holds. * number of references the driver holds.
*/ */
if (unlikely(pagecnt_bias == 1)) { if (unlikely(!pagecnt_bias)) {
page_ref_add(page, USHRT_MAX); page_ref_add(page, USHRT_MAX);
rx_buffer->pagecnt_bias = USHRT_MAX; rx_buffer->pagecnt_bias = USHRT_MAX;
} }
...@@ -777,127 +824,81 @@ static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer, ...@@ -777,127 +824,81 @@ static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer,
* ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff * ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on * @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: buffer containing page to add * @rx_buffer: buffer containing page to add
* @rx_desc: descriptor containing length of buffer written by hardware
* @skb: sk_buff to place the data into * @skb: sk_buff to place the data into
* @size: size of buffer to be added
* *
* This function will add the data contained in rx_buffer->page to the skb. * This function will add the data contained in rx_buffer->page to the skb.
* This is done either through a direct copy if the data in the buffer is
* less than the skb header size, otherwise it will just attach the page as
* a frag to the skb.
*
* The function will then update the page offset if necessary and return
* true if the buffer can be reused by the adapter.
**/ **/
static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring, static void ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
struct ixgbevf_rx_buffer *rx_buffer, struct ixgbevf_rx_buffer *rx_buffer,
u16 size, struct sk_buff *skb,
union ixgbe_adv_rx_desc *rx_desc, unsigned int size)
struct sk_buff *skb)
{ {
struct page *page = rx_buffer->page;
unsigned char *va = page_address(page) + rx_buffer->page_offset;
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
unsigned int truesize = IXGBEVF_RX_BUFSZ; unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
#else #else
unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); unsigned int truesize = ring_uses_build_skb(rx_ring) ?
SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) :
SKB_DATA_ALIGN(size);
#endif
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
rx_buffer->page_offset, size, truesize);
#if (PAGE_SIZE < 8192)
rx_buffer->page_offset ^= truesize;
#else
rx_buffer->page_offset += truesize;
#endif #endif
unsigned int pull_len;
if (unlikely(skb_is_nonlinear(skb)))
goto add_tail_frag;
if (likely(size <= IXGBEVF_RX_HDR_SIZE)) {
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
/* page is not reserved, we can reuse buffer as is */
if (likely(!ixgbevf_page_is_reserved(page)))
return true;
/* this page cannot be reused so discard it */
return false;
}
/* we need the header to contain the greater of either ETH_HLEN or
* 60 bytes if the skb->len is less than 60 for skb_pad.
*/
pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE);
/* align pull length to size of long to optimize memcpy performance */
memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
/* update all of the pointers */
va += pull_len;
size -= pull_len;
add_tail_frag:
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
(unsigned long)va & ~PAGE_MASK, size, truesize);
return ixgbevf_can_reuse_rx_page(rx_buffer, page, truesize);
} }
static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring, static
union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *ixgbevf_construct_skb(struct ixgbevf_ring *rx_ring,
struct sk_buff *skb) struct ixgbevf_rx_buffer *rx_buffer,
union ixgbe_adv_rx_desc *rx_desc,
unsigned int size)
{ {
struct ixgbevf_rx_buffer *rx_buffer; void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
struct page *page; #if (PAGE_SIZE < 8192)
u16 size = le16_to_cpu(rx_desc->wb.upper.length); unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
#else
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; unsigned int truesize = SKB_DATA_ALIGN(size);
page = rx_buffer->page; #endif
prefetchw(page); unsigned int headlen;
struct sk_buff *skb;
/* we are reusing so sync this buffer for CPU use */
dma_sync_single_range_for_cpu(rx_ring->dev,
rx_buffer->dma,
rx_buffer->page_offset,
size,
DMA_FROM_DEVICE);
if (likely(!skb)) {
void *page_addr = page_address(page) +
rx_buffer->page_offset;
/* prefetch first cache line of first page */ /* prefetch first cache line of first page */
prefetch(page_addr); prefetch(va);
#if L1_CACHE_BYTES < 128 #if L1_CACHE_BYTES < 128
prefetch(page_addr + L1_CACHE_BYTES); prefetch(va + L1_CACHE_BYTES);
#endif #endif
/* allocate a skb to store the frags */ /* allocate a skb to store the frags */
skb = netdev_alloc_skb_ip_align(rx_ring->netdev, skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBEVF_RX_HDR_SIZE);
IXGBEVF_RX_HDR_SIZE); if (unlikely(!skb))
if (unlikely(!skb)) { return NULL;
rx_ring->rx_stats.alloc_rx_buff_failed++;
return NULL;
}
/* we will be copying header into skb->data in /* Determine available headroom for copy */
* pskb_may_pull so it is in our interest to prefetch headlen = size;
* it now to avoid a possible cache miss if (headlen > IXGBEVF_RX_HDR_SIZE)
*/ headlen = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE);
prefetchw(skb->data);
}
/* pull page into skb */ /* align pull length to size of long to optimize memcpy performance */
if (ixgbevf_add_rx_frag(rx_ring, rx_buffer, size, rx_desc, skb)) { memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
/* hand second half of page back to the ring */
ixgbevf_reuse_rx_page(rx_ring, rx_buffer); /* update all of the pointers */
size -= headlen;
if (size) {
skb_add_rx_frag(skb, 0, rx_buffer->page,
(va + headlen) - page_address(rx_buffer->page),
size, truesize);
#if (PAGE_SIZE < 8192)
rx_buffer->page_offset ^= truesize;
#else
rx_buffer->page_offset += truesize;
#endif
} else { } else {
/* We are not reusing the buffer so unmap it and free rx_buffer->pagecnt_bias++;
* any references we are holding to it
*/
dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
PAGE_SIZE, DMA_FROM_DEVICE,
IXGBEVF_RX_DMA_ATTR);
__page_frag_cache_drain(page, rx_buffer->pagecnt_bias);
} }
/* clear contents of buffer_info */
rx_buffer->dma = 0;
rx_buffer->page = NULL;
return skb; return skb;
} }
...@@ -909,6 +910,44 @@ static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter, ...@@ -909,6 +910,44 @@ static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask); IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
} }
static struct sk_buff *ixgbevf_build_skb(struct ixgbevf_ring *rx_ring,
struct ixgbevf_rx_buffer *rx_buffer,
union ixgbe_adv_rx_desc *rx_desc,
unsigned int size)
{
void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
#if (PAGE_SIZE < 8192)
unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
#else
unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size);
#endif
struct sk_buff *skb;
/* prefetch first cache line of first page */
prefetch(va);
#if L1_CACHE_BYTES < 128
prefetch(va + L1_CACHE_BYTES);
#endif
/* build an skb to around the page buffer */
skb = build_skb(va - IXGBEVF_SKB_PAD, truesize);
if (unlikely(!skb))
return NULL;
/* update pointers within the skb to store the data */
skb_reserve(skb, IXGBEVF_SKB_PAD);
__skb_put(skb, size);
/* update buffer offset */
#if (PAGE_SIZE < 8192)
rx_buffer->page_offset ^= truesize;
#else
rx_buffer->page_offset += truesize;
#endif
return skb;
}
static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
struct ixgbevf_ring *rx_ring, struct ixgbevf_ring *rx_ring,
int budget) int budget)
...@@ -919,6 +958,8 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, ...@@ -919,6 +958,8 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
while (likely(total_rx_packets < budget)) { while (likely(total_rx_packets < budget)) {
union ixgbe_adv_rx_desc *rx_desc; union ixgbe_adv_rx_desc *rx_desc;
struct ixgbevf_rx_buffer *rx_buffer;
unsigned int size;
/* return some buffers to hardware, one at a time is too slow */ /* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) { if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
...@@ -927,8 +968,8 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, ...@@ -927,8 +968,8 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
} }
rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean); rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
size = le16_to_cpu(rx_desc->wb.upper.length);
if (!rx_desc->wb.upper.length) if (!size)
break; break;
/* This memory barrier is needed to keep us from reading /* This memory barrier is needed to keep us from reading
...@@ -937,15 +978,26 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, ...@@ -937,15 +978,26 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
*/ */
rmb(); rmb();
rx_buffer = ixgbevf_get_rx_buffer(rx_ring, size);
/* retrieve a buffer from the ring */ /* retrieve a buffer from the ring */
skb = ixgbevf_fetch_rx_buffer(rx_ring, rx_desc, skb); if (skb)
ixgbevf_add_rx_frag(rx_ring, rx_buffer, skb, size);
else if (ring_uses_build_skb(rx_ring))
skb = ixgbevf_build_skb(rx_ring, rx_buffer,
rx_desc, size);
else
skb = ixgbevf_construct_skb(rx_ring, rx_buffer,
rx_desc, size);
/* exit if we failed to retrieve a buffer */ /* exit if we failed to retrieve a buffer */
if (!skb) { if (!skb) {
rx_ring->rx_stats.alloc_rx_buff_failed++; rx_ring->rx_stats.alloc_rx_buff_failed++;
rx_buffer->pagecnt_bias++;
break; break;
} }
ixgbevf_put_rx_buffer(rx_ring, rx_buffer);
cleaned_count++; cleaned_count++;
/* fetch next buffer in frame if non-eop */ /* fetch next buffer in frame if non-eop */
...@@ -1260,85 +1312,6 @@ static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data) ...@@ -1260,85 +1312,6 @@ static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
int r_idx)
{
struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
a->rx_ring[r_idx]->next = q_vector->rx.ring;
q_vector->rx.ring = a->rx_ring[r_idx];
q_vector->rx.count++;
}
static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
int t_idx)
{
struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
a->tx_ring[t_idx]->next = q_vector->tx.ring;
q_vector->tx.ring = a->tx_ring[t_idx];
q_vector->tx.count++;
}
/**
* ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
* @adapter: board private structure to initialize
*
* This function maps descriptor rings to the queue-specific vectors
* we were allotted through the MSI-X enabling code. Ideally, we'd have
* one vector per ring/queue, but on a constrained vector budget, we
* group the rings as "efficiently" as possible. You would add new
* mapping configurations in here.
**/
static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
{
int q_vectors;
int v_start = 0;
int rxr_idx = 0, txr_idx = 0;
int rxr_remaining = adapter->num_rx_queues;
int txr_remaining = adapter->num_tx_queues;
int i, j;
int rqpv, tqpv;
q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
/* The ideal configuration...
* We have enough vectors to map one per queue.
*/
if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
map_vector_to_rxq(adapter, v_start, rxr_idx);
for (; txr_idx < txr_remaining; v_start++, txr_idx++)
map_vector_to_txq(adapter, v_start, txr_idx);
return 0;
}
/* If we don't have enough vectors for a 1-to-1
* mapping, we'll have to group them so there are
* multiple queues per vector.
*/
/* Re-adjusting *qpv takes care of the remainder. */
for (i = v_start; i < q_vectors; i++) {
rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
for (j = 0; j < rqpv; j++) {
map_vector_to_rxq(adapter, i, rxr_idx);
rxr_idx++;
rxr_remaining--;
}
}
for (i = v_start; i < q_vectors; i++) {
tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
for (j = 0; j < tqpv; j++) {
map_vector_to_txq(adapter, i, txr_idx);
txr_idx++;
txr_remaining--;
}
}
return 0;
}
/** /**
* ixgbevf_request_msix_irqs - Initialize MSI-X interrupts * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
* @adapter: board private structure * @adapter: board private structure
...@@ -1411,20 +1384,6 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) ...@@ -1411,20 +1384,6 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
return err; return err;
} }
static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
{
int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
for (i = 0; i < q_vectors; i++) {
struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
q_vector->rx.ring = NULL;
q_vector->tx.ring = NULL;
q_vector->rx.count = 0;
q_vector->tx.count = 0;
}
}
/** /**
* ixgbevf_request_irq - initialize interrupts * ixgbevf_request_irq - initialize interrupts
* @adapter: board private structure * @adapter: board private structure
...@@ -1464,8 +1423,6 @@ static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter) ...@@ -1464,8 +1423,6 @@ static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
free_irq(adapter->msix_entries[i].vector, free_irq(adapter->msix_entries[i].vector,
adapter->q_vector[i]); adapter->q_vector[i]);
} }
ixgbevf_reset_q_vectors(adapter);
} }
/** /**
...@@ -1587,7 +1544,8 @@ static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter) ...@@ -1587,7 +1544,8 @@ static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index) static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter,
struct ixgbevf_ring *ring, int index)
{ {
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
u32 srrctl; u32 srrctl;
...@@ -1595,7 +1553,10 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index) ...@@ -1595,7 +1553,10 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
srrctl = IXGBE_SRRCTL_DROP_EN; srrctl = IXGBE_SRRCTL_DROP_EN;
srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
srrctl |= IXGBEVF_RX_BUFSZ >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; if (ring_uses_large_buffer(ring))
srrctl |= IXGBEVF_RXBUFFER_3072 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
else
srrctl |= IXGBEVF_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl); IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
...@@ -1767,10 +1728,21 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter, ...@@ -1767,10 +1728,21 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
ring->next_to_use = 0; ring->next_to_use = 0;
ring->next_to_alloc = 0; ring->next_to_alloc = 0;
ixgbevf_configure_srrctl(adapter, reg_idx); ixgbevf_configure_srrctl(adapter, ring, reg_idx);
/* RXDCTL.RLPML does not work on 82599 */
if (adapter->hw.mac.type != ixgbe_mac_82599_vf) {
rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
IXGBE_RXDCTL_RLPML_EN);
/* allow any size packet since we can handle overflow */ #if (PAGE_SIZE < 8192)
rxdctl &= ~IXGBE_RXDCTL_RLPML_EN; /* Limit the maximum frame size so we don't overrun the skb */
if (ring_uses_build_skb(ring) &&
!ring_uses_large_buffer(ring))
rxdctl |= IXGBEVF_MAX_FRAME_BUILD_SKB |
IXGBE_RXDCTL_RLPML_EN;
#endif
}
rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME; rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl); IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
...@@ -1779,6 +1751,29 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter, ...@@ -1779,6 +1751,29 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring)); ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring));
} }
static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter,
struct ixgbevf_ring *rx_ring)
{
struct net_device *netdev = adapter->netdev;
unsigned int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
/* set build_skb and buffer size flags */
clear_ring_build_skb_enabled(rx_ring);
clear_ring_uses_large_buffer(rx_ring);
if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX)
return;
set_ring_build_skb_enabled(rx_ring);
#if (PAGE_SIZE < 8192)
if (max_frame <= IXGBEVF_MAX_FRAME_BUILD_SKB)
return;
set_ring_uses_large_buffer(rx_ring);
#endif
}
/** /**
* ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
* @adapter: board private structure * @adapter: board private structure
...@@ -1806,8 +1801,12 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) ...@@ -1806,8 +1801,12 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
/* Setup the HW Rx Head and Tail Descriptor Pointers and /* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring * the Base and Length of the Rx Descriptor Ring
*/ */
for (i = 0; i < adapter->num_rx_queues; i++) for (i = 0; i < adapter->num_rx_queues; i++) {
ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]); struct ixgbevf_ring *rx_ring = adapter->rx_ring[i];
ixgbevf_set_rx_buffer_len(adapter, rx_ring);
ixgbevf_configure_rx_ring(adapter, rx_ring);
}
} }
static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
...@@ -2136,13 +2135,13 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring) ...@@ -2136,13 +2135,13 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
dma_sync_single_range_for_cpu(rx_ring->dev, dma_sync_single_range_for_cpu(rx_ring->dev,
rx_buffer->dma, rx_buffer->dma,
rx_buffer->page_offset, rx_buffer->page_offset,
IXGBEVF_RX_BUFSZ, ixgbevf_rx_bufsz(rx_ring),
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
/* free resources associated with mapping */ /* free resources associated with mapping */
dma_unmap_page_attrs(rx_ring->dev, dma_unmap_page_attrs(rx_ring->dev,
rx_buffer->dma, rx_buffer->dma,
PAGE_SIZE, ixgbevf_rx_pg_size(rx_ring),
DMA_FROM_DEVICE, DMA_FROM_DEVICE,
IXGBEVF_RX_DMA_ATTR); IXGBEVF_RX_DMA_ATTR);
...@@ -2405,105 +2404,171 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) ...@@ -2405,105 +2404,171 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
} }
/** /**
* ixgbevf_alloc_queues - Allocate memory for all rings * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
* @adapter: board private structure to initialize
*
* Attempt to configure the interrupts using the best available
* capabilities of the hardware and the kernel.
**/
static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
{
int vector, v_budget;
/* It's easy to be greedy for MSI-X vectors, but it really
* doesn't do us much good if we have a lot more vectors
* than CPU's. So let's be conservative and only ask for
* (roughly) the same number of vectors as there are CPU's.
* The default is to use pairs of vectors.
*/
v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
v_budget = min_t(int, v_budget, num_online_cpus());
v_budget += NON_Q_VECTORS;
adapter->msix_entries = kcalloc(v_budget,
sizeof(struct msix_entry), GFP_KERNEL);
if (!adapter->msix_entries)
return -ENOMEM;
for (vector = 0; vector < v_budget; vector++)
adapter->msix_entries[vector].entry = vector;
/* A failure in MSI-X entry allocation isn't fatal, but the VF driver
* does not support any other modes, so we will simply fail here. Note
* that we clean up the msix_entries pointer else-where.
*/
return ixgbevf_acquire_msix_vectors(adapter, v_budget);
}
static void ixgbevf_add_ring(struct ixgbevf_ring *ring,
struct ixgbevf_ring_container *head)
{
ring->next = head->ring;
head->ring = ring;
head->count++;
}
/**
* ixgbevf_alloc_q_vector - Allocate memory for a single interrupt vector
* @adapter: board private structure to initialize * @adapter: board private structure to initialize
* @v_idx: index of vector in adapter struct
* @txr_count: number of Tx rings for q vector
* @txr_idx: index of first Tx ring to assign
* @rxr_count: number of Rx rings for q vector
* @rxr_idx: index of first Rx ring to assign
* *
* We allocate one ring per queue at run-time since we don't know the * We allocate one q_vector. If allocation fails we return -ENOMEM.
* number of queues at compile-time. The polling_netdev array is
* intended for Multiqueue, but should work fine with a single queue.
**/ **/
static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter) static int ixgbevf_alloc_q_vector(struct ixgbevf_adapter *adapter, int v_idx,
int txr_count, int txr_idx,
int rxr_count, int rxr_idx)
{ {
struct ixgbevf_q_vector *q_vector;
struct ixgbevf_ring *ring; struct ixgbevf_ring *ring;
int rx = 0, tx = 0; int ring_count, size;
ring_count = txr_count + rxr_count;
size = sizeof(*q_vector) + (sizeof(*ring) * ring_count);
/* allocate q_vector and rings */
q_vector = kzalloc(size, GFP_KERNEL);
if (!q_vector)
return -ENOMEM;
/* initialize NAPI */
netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll, 64);
/* tie q_vector and adapter together */
adapter->q_vector[v_idx] = q_vector;
q_vector->adapter = adapter;
q_vector->v_idx = v_idx;
for (; tx < adapter->num_tx_queues; tx++) { /* initialize pointer to rings */
ring = kzalloc(sizeof(*ring), GFP_KERNEL); ring = q_vector->ring;
if (!ring)
goto err_allocation;
while (txr_count) {
/* assign generic ring traits */
ring->dev = &adapter->pdev->dev; ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev; ring->netdev = adapter->netdev;
/* configure backlink on ring */
ring->q_vector = q_vector;
/* update q_vector Tx values */
ixgbevf_add_ring(ring, &q_vector->tx);
/* apply Tx specific ring traits */
ring->count = adapter->tx_ring_count; ring->count = adapter->tx_ring_count;
ring->queue_index = tx; ring->queue_index = txr_idx;
ring->reg_idx = tx; ring->reg_idx = txr_idx;
adapter->tx_ring[tx] = ring; /* assign ring to adapter */
} adapter->tx_ring[txr_idx] = ring;
/* update count and index */
txr_count--;
txr_idx++;
for (; rx < adapter->num_rx_queues; rx++) { /* push pointer to next ring */
ring = kzalloc(sizeof(*ring), GFP_KERNEL); ring++;
if (!ring) }
goto err_allocation;
while (rxr_count) {
/* assign generic ring traits */
ring->dev = &adapter->pdev->dev; ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev; ring->netdev = adapter->netdev;
/* configure backlink on ring */
ring->q_vector = q_vector;
/* update q_vector Rx values */
ixgbevf_add_ring(ring, &q_vector->rx);
/* apply Rx specific ring traits */
ring->count = adapter->rx_ring_count; ring->count = adapter->rx_ring_count;
ring->queue_index = rx; ring->queue_index = rxr_idx;
ring->reg_idx = rx; ring->reg_idx = rxr_idx;
adapter->rx_ring[rx] = ring; /* assign ring to adapter */
} adapter->rx_ring[rxr_idx] = ring;
return 0; /* update count and index */
rxr_count--;
rxr_idx++;
err_allocation: /* push pointer to next ring */
while (tx) { ring++;
kfree(adapter->tx_ring[--tx]);
adapter->tx_ring[tx] = NULL;
} }
while (rx) { return 0;
kfree(adapter->rx_ring[--rx]);
adapter->rx_ring[rx] = NULL;
}
return -ENOMEM;
} }
/** /**
* ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported * ixgbevf_free_q_vector - Free memory allocated for specific interrupt vector
* @adapter: board private structure to initialize * @adapter: board private structure to initialize
* @v_idx: index of vector in adapter struct
* *
* Attempt to configure the interrupts using the best available * This function frees the memory allocated to the q_vector. In addition if
* capabilities of the hardware and the kernel. * NAPI is enabled it will delete any references to the NAPI struct prior
* to freeing the q_vector.
**/ **/
static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) static void ixgbevf_free_q_vector(struct ixgbevf_adapter *adapter, int v_idx)
{ {
struct net_device *netdev = adapter->netdev; struct ixgbevf_q_vector *q_vector = adapter->q_vector[v_idx];
int err; struct ixgbevf_ring *ring;
int vector, v_budget;
/* It's easy to be greedy for MSI-X vectors, but it really
* doesn't do us much good if we have a lot more vectors
* than CPU's. So let's be conservative and only ask for
* (roughly) the same number of vectors as there are CPU's.
* The default is to use pairs of vectors.
*/
v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
v_budget = min_t(int, v_budget, num_online_cpus());
v_budget += NON_Q_VECTORS;
/* A failure in MSI-X entry allocation isn't fatal, but it does
* mean we disable MSI-X capabilities of the adapter.
*/
adapter->msix_entries = kcalloc(v_budget,
sizeof(struct msix_entry), GFP_KERNEL);
if (!adapter->msix_entries)
return -ENOMEM;
for (vector = 0; vector < v_budget; vector++) ixgbevf_for_each_ring(ring, q_vector->tx)
adapter->msix_entries[vector].entry = vector; adapter->tx_ring[ring->queue_index] = NULL;
err = ixgbevf_acquire_msix_vectors(adapter, v_budget); ixgbevf_for_each_ring(ring, q_vector->rx)
if (err) adapter->rx_ring[ring->queue_index] = NULL;
return err;
err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); adapter->q_vector[v_idx] = NULL;
if (err) netif_napi_del(&q_vector->napi);
return err;
return netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); /* ixgbevf_get_stats() might access the rings on this vector,
* we must wait a grace period before freeing it.
*/
kfree_rcu(q_vector, rcu);
} }
/** /**
...@@ -2515,35 +2580,53 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) ...@@ -2515,35 +2580,53 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
**/ **/
static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
{ {
int q_idx, num_q_vectors; int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
struct ixgbevf_q_vector *q_vector; int rxr_remaining = adapter->num_rx_queues;
int txr_remaining = adapter->num_tx_queues;
int rxr_idx = 0, txr_idx = 0, v_idx = 0;
int err;
if (q_vectors >= (rxr_remaining + txr_remaining)) {
for (; rxr_remaining; v_idx++, q_vectors--) {
int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
err = ixgbevf_alloc_q_vector(adapter, v_idx,
0, 0, rqpv, rxr_idx);
if (err)
goto err_out;
num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; /* update counts and index */
rxr_remaining -= rqpv;
rxr_idx += rqpv;
}
}
for (; q_vectors; v_idx++, q_vectors--) {
int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors);
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { err = ixgbevf_alloc_q_vector(adapter, v_idx,
q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL); tqpv, txr_idx,
if (!q_vector) rqpv, rxr_idx);
if (err)
goto err_out; goto err_out;
q_vector->adapter = adapter;
q_vector->v_idx = q_idx; /* update counts and index */
netif_napi_add(adapter->netdev, &q_vector->napi, rxr_remaining -= rqpv;
ixgbevf_poll, 64); rxr_idx += rqpv;
adapter->q_vector[q_idx] = q_vector; txr_remaining -= tqpv;
txr_idx += tqpv;
} }
return 0; return 0;
err_out: err_out:
while (q_idx) { while (v_idx) {
q_idx--; v_idx--;
q_vector = adapter->q_vector[q_idx]; ixgbevf_free_q_vector(adapter, v_idx);
#ifdef CONFIG_NET_RX_BUSY_POLL
napi_hash_del(&q_vector->napi);
#endif
netif_napi_del(&q_vector->napi);
kfree(q_vector);
adapter->q_vector[q_idx] = NULL;
} }
return -ENOMEM; return -ENOMEM;
} }
...@@ -2557,17 +2640,11 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) ...@@ -2557,17 +2640,11 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
**/ **/
static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter) static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
{ {
int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
adapter->q_vector[q_idx] = NULL; while (q_vectors) {
#ifdef CONFIG_NET_RX_BUSY_POLL q_vectors--;
napi_hash_del(&q_vector->napi); ixgbevf_free_q_vector(adapter, q_vectors);
#endif
netif_napi_del(&q_vector->napi);
kfree(q_vector);
} }
} }
...@@ -2611,12 +2688,6 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter) ...@@ -2611,12 +2688,6 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
goto err_alloc_q_vectors; goto err_alloc_q_vectors;
} }
err = ixgbevf_alloc_queues(adapter);
if (err) {
pr_err("Unable to allocate memory for queues\n");
goto err_alloc_queues;
}
hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n", hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
(adapter->num_rx_queues > 1) ? "Enabled" : (adapter->num_rx_queues > 1) ? "Enabled" :
"Disabled", adapter->num_rx_queues, adapter->num_tx_queues); "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
...@@ -2624,8 +2695,6 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter) ...@@ -2624,8 +2695,6 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
set_bit(__IXGBEVF_DOWN, &adapter->state); set_bit(__IXGBEVF_DOWN, &adapter->state);
return 0; return 0;
err_alloc_queues:
ixgbevf_free_q_vectors(adapter);
err_alloc_q_vectors: err_alloc_q_vectors:
ixgbevf_reset_interrupt_capability(adapter); ixgbevf_reset_interrupt_capability(adapter);
err_set_interrupt: err_set_interrupt:
...@@ -2641,17 +2710,6 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter) ...@@ -2641,17 +2710,6 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
**/ **/
static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter) static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
{ {
int i;
for (i = 0; i < adapter->num_tx_queues; i++) {
kfree(adapter->tx_ring[i]);
adapter->tx_ring[i] = NULL;
}
for (i = 0; i < adapter->num_rx_queues; i++) {
kfree(adapter->rx_ring[i]);
adapter->rx_ring[i] = NULL;
}
adapter->num_tx_queues = 0; adapter->num_tx_queues = 0;
adapter->num_rx_queues = 0; adapter->num_rx_queues = 0;
...@@ -3088,9 +3146,14 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) ...@@ -3088,9 +3146,14 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
if (!err) if (!err)
continue; continue;
hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i); hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i);
break; goto err_setup_tx;
} }
return 0;
err_setup_tx:
/* rewind the index freeing the rings as we go */
while (i--)
ixgbevf_free_tx_resources(adapter->tx_ring[i]);
return err; return err;
} }
...@@ -3148,8 +3211,14 @@ static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter) ...@@ -3148,8 +3211,14 @@ static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
if (!err) if (!err)
continue; continue;
hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i); hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i);
break; goto err_setup_rx;
} }
return 0;
err_setup_rx:
/* rewind the index freeing the rings as we go */
while (i--)
ixgbevf_free_rx_resources(adapter->rx_ring[i]);
return err; return err;
} }
...@@ -3244,28 +3313,31 @@ int ixgbevf_open(struct net_device *netdev) ...@@ -3244,28 +3313,31 @@ int ixgbevf_open(struct net_device *netdev)
ixgbevf_configure(adapter); ixgbevf_configure(adapter);
/* Map the Tx/Rx rings to the vectors we were allotted.
* if request_irq will be called in this function map_rings
* must be called *before* up_complete
*/
ixgbevf_map_rings_to_vectors(adapter);
err = ixgbevf_request_irq(adapter); err = ixgbevf_request_irq(adapter);
if (err) if (err)
goto err_req_irq; goto err_req_irq;
/* Notify the stack of the actual queue counts. */
err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
if (err)
goto err_set_queues;
err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
if (err)
goto err_set_queues;
ixgbevf_up_complete(adapter); ixgbevf_up_complete(adapter);
return 0; return 0;
err_set_queues:
ixgbevf_free_irq(adapter);
err_req_irq: err_req_irq:
ixgbevf_down(adapter);
err_setup_rx:
ixgbevf_free_all_rx_resources(adapter); ixgbevf_free_all_rx_resources(adapter);
err_setup_tx: err_setup_rx:
ixgbevf_free_all_tx_resources(adapter); ixgbevf_free_all_tx_resources(adapter);
err_setup_tx:
ixgbevf_reset(adapter); ixgbevf_reset(adapter);
err_setup_reset: err_setup_reset:
return err; return err;
...@@ -3707,11 +3779,10 @@ static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) ...@@ -3707,11 +3779,10 @@ static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
return __ixgbevf_maybe_stop_tx(tx_ring, size); return __ixgbevf_maybe_stop_tx(tx_ring, size);
} }
static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) static int ixgbevf_xmit_frame_ring(struct sk_buff *skb,
struct ixgbevf_ring *tx_ring)
{ {
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbevf_tx_buffer *first; struct ixgbevf_tx_buffer *first;
struct ixgbevf_ring *tx_ring;
int tso; int tso;
u32 tx_flags = 0; u32 tx_flags = 0;
u16 count = TXD_USE_COUNT(skb_headlen(skb)); u16 count = TXD_USE_COUNT(skb_headlen(skb));
...@@ -3726,8 +3797,6 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) ...@@ -3726,8 +3797,6 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
tx_ring = adapter->tx_ring[skb->queue_mapping];
/* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, /* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
* + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD, * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
* + 2 desc gap to keep tail from touching head, * + 2 desc gap to keep tail from touching head,
...@@ -3780,6 +3849,29 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) ...@@ -3780,6 +3849,29 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbevf_ring *tx_ring;
if (skb->len <= 0) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
/* The minimum packet size for olinfo paylen is 17 so pad the skb
* in order to meet this minimum size requirement.
*/
if (skb->len < 17) {
if (skb_padto(skb, 17))
return NETDEV_TX_OK;
skb->len = 17;
}
tx_ring = adapter->tx_ring[skb->queue_mapping];
return ixgbevf_xmit_frame_ring(skb, tx_ring);
}
/** /**
* ixgbevf_set_mac - Change the Ethernet Address of the NIC * ixgbevf_set_mac - Change the Ethernet Address of the NIC
* @netdev: network interface device structure * @netdev: network interface device structure
...@@ -3839,6 +3931,9 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) ...@@ -3839,6 +3931,9 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
/* must set new MTU before calling down or up */ /* must set new MTU before calling down or up */
netdev->mtu = new_mtu; netdev->mtu = new_mtu;
if (netif_running(netdev))
ixgbevf_reinit_locked(adapter);
return 0; return 0;
} }
...@@ -3917,17 +4012,11 @@ static int ixgbevf_resume(struct pci_dev *pdev) ...@@ -3917,17 +4012,11 @@ static int ixgbevf_resume(struct pci_dev *pdev)
rtnl_lock(); rtnl_lock();
err = ixgbevf_init_interrupt_scheme(adapter); err = ixgbevf_init_interrupt_scheme(adapter);
if (!err && netif_running(netdev))
err = ixgbevf_open(netdev);
rtnl_unlock(); rtnl_unlock();
if (err) { if (err)
dev_err(&pdev->dev, "Cannot initialize interrupts\n");
return err; return err;
}
if (netif_running(netdev)) {
err = ixgbevf_open(netdev);
if (err)
return err;
}
netif_device_attach(netdev); netif_device_attach(netdev);
...@@ -3953,6 +4042,7 @@ static void ixgbevf_get_stats(struct net_device *netdev, ...@@ -3953,6 +4042,7 @@ static void ixgbevf_get_stats(struct net_device *netdev,
stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc; stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
rcu_read_lock();
for (i = 0; i < adapter->num_rx_queues; i++) { for (i = 0; i < adapter->num_rx_queues; i++) {
ring = adapter->rx_ring[i]; ring = adapter->rx_ring[i];
do { do {
...@@ -3974,6 +4064,7 @@ static void ixgbevf_get_stats(struct net_device *netdev, ...@@ -3974,6 +4064,7 @@ static void ixgbevf_get_stats(struct net_device *netdev,
stats->tx_bytes += bytes; stats->tx_bytes += bytes;
stats->tx_packets += packets; stats->tx_packets += packets;
} }
rcu_read_unlock();
} }
#define IXGBEVF_MAX_MAC_HDR_LEN 127 #define IXGBEVF_MAX_MAC_HDR_LEN 127
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment