Commit 81c2fc22 authored by Alexander Duyck's avatar Alexander Duyck Committed by Jeff Kirsher

igb: Use node specific allocations for the q_vectors and rings

This change is meant to update the ring and vector allocations so that they
are per node instead of allocating everything on the node that
ifconfig/modprobe is called on.  By doing this we can cut down
significantly on cross node traffic.
Signed-off-by: default avatarAlexander Duyck <alexander.h.duyck@intel.com>
Tested-by: default avatarAaron Brown  <aaron.f.brown@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 7af40ad9
...@@ -185,6 +185,8 @@ struct igb_q_vector { ...@@ -185,6 +185,8 @@ struct igb_q_vector {
u16 cpu; u16 cpu;
u16 tx_work_limit; u16 tx_work_limit;
int numa_node;
u16 itr_val; u16 itr_val;
u8 set_itr; u8 set_itr;
void __iomem *itr_register; void __iomem *itr_register;
...@@ -232,6 +234,7 @@ struct igb_ring { ...@@ -232,6 +234,7 @@ struct igb_ring {
}; };
/* Items past this point are only used during ring alloc / free */ /* Items past this point are only used during ring alloc / free */
dma_addr_t dma; /* phys address of the ring */ dma_addr_t dma; /* phys address of the ring */
int numa_node; /* node to alloc ring memory on */
}; };
#define IGB_RING_FLAG_RX_CSUM 0x00000001 /* RX CSUM enabled */ #define IGB_RING_FLAG_RX_CSUM 0x00000001 /* RX CSUM enabled */
...@@ -341,6 +344,7 @@ struct igb_adapter { ...@@ -341,6 +344,7 @@ struct igb_adapter {
int vf_rate_link_speed; int vf_rate_link_speed;
u32 rss_queues; u32 rss_queues;
u32 wvbr; u32 wvbr;
int node;
}; };
#define IGB_FLAG_HAS_MSI (1 << 0) #define IGB_FLAG_HAS_MSI (1 << 0)
......
...@@ -687,41 +687,68 @@ static int igb_alloc_queues(struct igb_adapter *adapter) ...@@ -687,41 +687,68 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
{ {
struct igb_ring *ring; struct igb_ring *ring;
int i; int i;
int orig_node = adapter->node;
for (i = 0; i < adapter->num_tx_queues; i++) { for (i = 0; i < adapter->num_tx_queues; i++) {
ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL); if (orig_node == -1) {
int cur_node = next_online_node(adapter->node);
if (cur_node == MAX_NUMNODES)
cur_node = first_online_node;
adapter->node = cur_node;
}
ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
adapter->node);
if (!ring)
ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
if (!ring) if (!ring)
goto err; goto err;
ring->count = adapter->tx_ring_count; ring->count = adapter->tx_ring_count;
ring->queue_index = i; ring->queue_index = i;
ring->dev = &adapter->pdev->dev; ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev; ring->netdev = adapter->netdev;
ring->numa_node = adapter->node;
/* For 82575, context index must be unique per ring. */ /* For 82575, context index must be unique per ring. */
if (adapter->hw.mac.type == e1000_82575) if (adapter->hw.mac.type == e1000_82575)
ring->flags = IGB_RING_FLAG_TX_CTX_IDX; ring->flags = IGB_RING_FLAG_TX_CTX_IDX;
adapter->tx_ring[i] = ring; adapter->tx_ring[i] = ring;
} }
/* Restore the adapter's original node */
adapter->node = orig_node;
for (i = 0; i < adapter->num_rx_queues; i++) { for (i = 0; i < adapter->num_rx_queues; i++) {
ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL); if (orig_node == -1) {
int cur_node = next_online_node(adapter->node);
if (cur_node == MAX_NUMNODES)
cur_node = first_online_node;
adapter->node = cur_node;
}
ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
adapter->node);
if (!ring)
ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
if (!ring) if (!ring)
goto err; goto err;
ring->count = adapter->rx_ring_count; ring->count = adapter->rx_ring_count;
ring->queue_index = i; ring->queue_index = i;
ring->dev = &adapter->pdev->dev; ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev; ring->netdev = adapter->netdev;
ring->numa_node = adapter->node;
ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */ ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
/* set flag indicating ring supports SCTP checksum offload */ /* set flag indicating ring supports SCTP checksum offload */
if (adapter->hw.mac.type >= e1000_82576) if (adapter->hw.mac.type >= e1000_82576)
ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM; ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM;
adapter->rx_ring[i] = ring; adapter->rx_ring[i] = ring;
} }
/* Restore the adapter's original node */
adapter->node = orig_node;
igb_cache_ring_register(adapter); igb_cache_ring_register(adapter);
return 0; return 0;
err: err:
/* Restore the adapter's original node */
adapter->node = orig_node;
igb_free_queues(adapter); igb_free_queues(adapter);
return -ENOMEM; return -ENOMEM;
...@@ -1087,9 +1114,24 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter) ...@@ -1087,9 +1114,24 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter)
struct igb_q_vector *q_vector; struct igb_q_vector *q_vector;
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
int v_idx; int v_idx;
int orig_node = adapter->node;
for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL); if ((adapter->num_q_vectors == (adapter->num_rx_queues +
adapter->num_tx_queues)) &&
(adapter->num_rx_queues == v_idx))
adapter->node = orig_node;
if (orig_node == -1) {
int cur_node = next_online_node(adapter->node);
if (cur_node == MAX_NUMNODES)
cur_node = first_online_node;
adapter->node = cur_node;
}
q_vector = kzalloc_node(sizeof(struct igb_q_vector), GFP_KERNEL,
adapter->node);
if (!q_vector)
q_vector = kzalloc(sizeof(struct igb_q_vector),
GFP_KERNEL);
if (!q_vector) if (!q_vector)
goto err_out; goto err_out;
q_vector->adapter = adapter; q_vector->adapter = adapter;
...@@ -1098,9 +1140,14 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter) ...@@ -1098,9 +1140,14 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter)
netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64); netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
adapter->q_vector[v_idx] = q_vector; adapter->q_vector[v_idx] = q_vector;
} }
/* Restore the adapter's original node */
adapter->node = orig_node;
return 0; return 0;
err_out: err_out:
/* Restore the adapter's original node */
adapter->node = orig_node;
igb_free_q_vectors(adapter); igb_free_q_vectors(adapter);
return -ENOMEM; return -ENOMEM;
} }
...@@ -2409,6 +2456,8 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter) ...@@ -2409,6 +2456,8 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
VLAN_HLEN; VLAN_HLEN;
adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
adapter->node = -1;
spin_lock_init(&adapter->stats64_lock); spin_lock_init(&adapter->stats64_lock);
#ifdef CONFIG_PCI_IOV #ifdef CONFIG_PCI_IOV
switch (hw->mac.type) { switch (hw->mac.type) {
...@@ -2579,10 +2628,13 @@ static int igb_close(struct net_device *netdev) ...@@ -2579,10 +2628,13 @@ static int igb_close(struct net_device *netdev)
int igb_setup_tx_resources(struct igb_ring *tx_ring) int igb_setup_tx_resources(struct igb_ring *tx_ring)
{ {
struct device *dev = tx_ring->dev; struct device *dev = tx_ring->dev;
int orig_node = dev_to_node(dev);
int size; int size;
size = sizeof(struct igb_tx_buffer) * tx_ring->count; size = sizeof(struct igb_tx_buffer) * tx_ring->count;
tx_ring->tx_buffer_info = vzalloc(size); tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node);
if (!tx_ring->tx_buffer_info)
tx_ring->tx_buffer_info = vzalloc(size);
if (!tx_ring->tx_buffer_info) if (!tx_ring->tx_buffer_info)
goto err; goto err;
...@@ -2590,16 +2642,24 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring) ...@@ -2590,16 +2642,24 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096); tx_ring->size = ALIGN(tx_ring->size, 4096);
set_dev_node(dev, tx_ring->numa_node);
tx_ring->desc = dma_alloc_coherent(dev, tx_ring->desc = dma_alloc_coherent(dev,
tx_ring->size, tx_ring->size,
&tx_ring->dma, &tx_ring->dma,
GFP_KERNEL); GFP_KERNEL);
set_dev_node(dev, orig_node);
if (!tx_ring->desc)
tx_ring->desc = dma_alloc_coherent(dev,
tx_ring->size,
&tx_ring->dma,
GFP_KERNEL);
if (!tx_ring->desc) if (!tx_ring->desc)
goto err; goto err;
tx_ring->next_to_use = 0; tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0; tx_ring->next_to_clean = 0;
return 0; return 0;
err: err:
...@@ -2722,10 +2782,13 @@ static void igb_configure_tx(struct igb_adapter *adapter) ...@@ -2722,10 +2782,13 @@ static void igb_configure_tx(struct igb_adapter *adapter)
int igb_setup_rx_resources(struct igb_ring *rx_ring) int igb_setup_rx_resources(struct igb_ring *rx_ring)
{ {
struct device *dev = rx_ring->dev; struct device *dev = rx_ring->dev;
int orig_node = dev_to_node(dev);
int size, desc_len; int size, desc_len;
size = sizeof(struct igb_rx_buffer) * rx_ring->count; size = sizeof(struct igb_rx_buffer) * rx_ring->count;
rx_ring->rx_buffer_info = vzalloc(size); rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node);
if (!rx_ring->rx_buffer_info)
rx_ring->rx_buffer_info = vzalloc(size);
if (!rx_ring->rx_buffer_info) if (!rx_ring->rx_buffer_info)
goto err; goto err;
...@@ -2735,10 +2798,17 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring) ...@@ -2735,10 +2798,17 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
rx_ring->size = rx_ring->count * desc_len; rx_ring->size = rx_ring->count * desc_len;
rx_ring->size = ALIGN(rx_ring->size, 4096); rx_ring->size = ALIGN(rx_ring->size, 4096);
set_dev_node(dev, rx_ring->numa_node);
rx_ring->desc = dma_alloc_coherent(dev, rx_ring->desc = dma_alloc_coherent(dev,
rx_ring->size, rx_ring->size,
&rx_ring->dma, &rx_ring->dma,
GFP_KERNEL); GFP_KERNEL);
set_dev_node(dev, orig_node);
if (!rx_ring->desc)
rx_ring->desc = dma_alloc_coherent(dev,
rx_ring->size,
&rx_ring->dma,
GFP_KERNEL);
if (!rx_ring->desc) if (!rx_ring->desc)
goto err; goto err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment