Commit de88eeeb authored by Alexander Duyck's avatar Alexander Duyck Committed by Jeff Kirsher

ixgbe: Allocate rings as part of the q_vector

This patch makes the rings a part of the q_vector directly instead of
indirectly.  Specifically on x86 systems this helps to avoid any cache
set conflicts between the q_vector, the tx_rings, and the rx_rings as the
critical stride is 4K and in order to cross that boundary you would need to
have over 15 rings on a single q_vector.

In addition this allows for smarter allocations when Flow Director is
enabled.  Previously Flow Director would set the irq_affinity hints based
on the CPU and was still using a node interleaving approach which on some
systems would end up with the two values mismatched.  With the new approach
we can set the affinity for the irq_vector and use the CPU for that
affinity to determine the node value for the node and the rings.
Signed-off-by: default avatarAlexander Duyck <alexander.h.duyck@intel.com>
Tested-by: default avatarStephen Ko <stephen.s.ko@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent b9f6ed2b
...@@ -254,10 +254,8 @@ struct ixgbe_ring { ...@@ -254,10 +254,8 @@ struct ixgbe_ring {
struct ixgbe_tx_queue_stats tx_stats; struct ixgbe_tx_queue_stats tx_stats;
struct ixgbe_rx_queue_stats rx_stats; struct ixgbe_rx_queue_stats rx_stats;
}; };
int numa_node;
unsigned int size; /* length in bytes */ unsigned int size; /* length in bytes */
dma_addr_t dma; /* phys. address of descriptor ring */ dma_addr_t dma; /* phys. address of descriptor ring */
struct rcu_head rcu;
struct ixgbe_q_vector *q_vector; /* back-pointer to host q_vector */ struct ixgbe_q_vector *q_vector; /* back-pointer to host q_vector */
} ____cacheline_internodealigned_in_smp; } ____cacheline_internodealigned_in_smp;
...@@ -317,8 +315,13 @@ struct ixgbe_q_vector { ...@@ -317,8 +315,13 @@ struct ixgbe_q_vector {
struct ixgbe_ring_container rx, tx; struct ixgbe_ring_container rx, tx;
struct napi_struct napi; struct napi_struct napi;
cpumask_var_t affinity_mask; cpumask_t affinity_mask;
int numa_node;
struct rcu_head rcu; /* to avoid race with update stats on free */
char name[IFNAMSIZ + 9]; char name[IFNAMSIZ + 9];
/* for dynamic allocation of rings associated with this q_vector */
struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
}; };
/* /*
...@@ -514,7 +517,6 @@ struct ixgbe_adapter { ...@@ -514,7 +517,6 @@ struct ixgbe_adapter {
u16 eeprom_verl; u16 eeprom_verl;
u16 eeprom_cap; u16 eeprom_cap;
int node;
u32 interrupt_event; u32 interrupt_event;
u32 led_reg; u32 led_reg;
......
...@@ -1591,7 +1591,6 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) ...@@ -1591,7 +1591,6 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
tx_ring->dev = &adapter->pdev->dev; tx_ring->dev = &adapter->pdev->dev;
tx_ring->netdev = adapter->netdev; tx_ring->netdev = adapter->netdev;
tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx; tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
tx_ring->numa_node = adapter->node;
err = ixgbe_setup_tx_resources(tx_ring); err = ixgbe_setup_tx_resources(tx_ring);
if (err) if (err)
...@@ -1617,7 +1616,6 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) ...@@ -1617,7 +1616,6 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
rx_ring->netdev = adapter->netdev; rx_ring->netdev = adapter->netdev;
rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx; rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
rx_ring->rx_buf_len = IXGBE_RXBUFFER_2K; rx_ring->rx_buf_len = IXGBE_RXBUFFER_2K;
rx_ring->numa_node = adapter->node;
err = ixgbe_setup_rx_resources(rx_ring); err = ixgbe_setup_rx_resources(rx_ring);
if (err) { if (err) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment