Commit 0ba82994 authored by Alexander Duyck's avatar Alexander Duyck Committed by Jeff Kirsher

igb: Move ITR related data into work container within the q_vector

This change moves information related to interrupt throttle rate
configuration into a separate q_vector sub-structure called a work
container. A similar change has already been made for ixgbe and this work
is based off of that.
Signed-off-by: default avatarAlexander Duyck <alexander.h.duyck@intel.com>
Tested-by: default avatarAaron Brown  <aaron.f.brown@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 866cff06
...@@ -409,6 +409,9 @@ ...@@ -409,6 +409,9 @@
#define E1000_ICS_DRSTA E1000_ICR_DRSTA /* Device Reset Aserted */ #define E1000_ICS_DRSTA E1000_ICR_DRSTA /* Device Reset Aserted */
/* Extended Interrupt Cause Set */ /* Extended Interrupt Cause Set */
/* E1000_EITR_CNT_IGNR is only for 82576 and newer */
#define E1000_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */
/* Transmit Descriptor Control */ /* Transmit Descriptor Control */
/* Enable the counting of descriptors still to be processed. */ /* Enable the counting of descriptors still to be processed. */
......
...@@ -42,8 +42,11 @@ ...@@ -42,8 +42,11 @@
struct igb_adapter; struct igb_adapter;
/* ((1000000000ns / (6000ints/s * 1024ns)) << 2 = 648 */ /* Interrupt defines */
#define IGB_START_ITR 648 #define IGB_START_ITR 648 /* ~6000 ints/sec */
#define IGB_4K_ITR 980
#define IGB_20K_ITR 196
#define IGB_70K_ITR 56
/* TX/RX descriptor defines */ /* TX/RX descriptor defines */
#define IGB_DEFAULT_TXD 256 #define IGB_DEFAULT_TXD 256
...@@ -175,16 +178,23 @@ struct igb_rx_queue_stats { ...@@ -175,16 +178,23 @@ struct igb_rx_queue_stats {
u64 alloc_failed; u64 alloc_failed;
}; };
struct igb_ring_container {
struct igb_ring *ring; /* pointer to linked list of rings */
unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_packets; /* total packets processed this int */
u16 work_limit; /* total work allowed per interrupt */
u8 count; /* total number of rings in vector */
u8 itr; /* current ITR setting for ring */
};
struct igb_q_vector { struct igb_q_vector {
struct igb_adapter *adapter; /* backlink */ struct igb_adapter *adapter; /* backlink */
struct igb_ring *rx_ring; int cpu; /* CPU for DCA */
struct igb_ring *tx_ring; u32 eims_value; /* EIMS mask value */
struct napi_struct napi;
u32 eims_value; struct igb_ring_container rx, tx;
u16 cpu;
u16 tx_work_limit;
struct napi_struct napi;
int numa_node; int numa_node;
u16 itr_val; u16 itr_val;
...@@ -215,9 +225,6 @@ struct igb_ring { ...@@ -215,9 +225,6 @@ struct igb_ring {
u16 next_to_clean ____cacheline_aligned_in_smp; u16 next_to_clean ____cacheline_aligned_in_smp;
u16 next_to_use; u16 next_to_use;
unsigned int total_bytes;
unsigned int total_packets;
union { union {
/* TX */ /* TX */
struct { struct {
......
...@@ -2013,8 +2013,8 @@ static int igb_set_coalesce(struct net_device *netdev, ...@@ -2013,8 +2013,8 @@ static int igb_set_coalesce(struct net_device *netdev,
for (i = 0; i < adapter->num_q_vectors; i++) { for (i = 0; i < adapter->num_q_vectors; i++) {
struct igb_q_vector *q_vector = adapter->q_vector[i]; struct igb_q_vector *q_vector = adapter->q_vector[i];
q_vector->tx_work_limit = adapter->tx_work_limit; q_vector->tx.work_limit = adapter->tx_work_limit;
if (q_vector->rx_ring) if (q_vector->rx.ring)
q_vector->itr_val = adapter->rx_itr_setting; q_vector->itr_val = adapter->rx_itr_setting;
else else
q_vector->itr_val = adapter->tx_itr_setting; q_vector->itr_val = adapter->tx_itr_setting;
......
...@@ -764,10 +764,10 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector) ...@@ -764,10 +764,10 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
int rx_queue = IGB_N0_QUEUE; int rx_queue = IGB_N0_QUEUE;
int tx_queue = IGB_N0_QUEUE; int tx_queue = IGB_N0_QUEUE;
if (q_vector->rx_ring) if (q_vector->rx.ring)
rx_queue = q_vector->rx_ring->reg_idx; rx_queue = q_vector->rx.ring->reg_idx;
if (q_vector->tx_ring) if (q_vector->tx.ring)
tx_queue = q_vector->tx_ring->reg_idx; tx_queue = q_vector->tx.ring->reg_idx;
switch (hw->mac.type) { switch (hw->mac.type) {
case e1000_82575: case e1000_82575:
...@@ -950,15 +950,15 @@ static int igb_request_msix(struct igb_adapter *adapter) ...@@ -950,15 +950,15 @@ static int igb_request_msix(struct igb_adapter *adapter)
q_vector->itr_register = hw->hw_addr + E1000_EITR(vector); q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
if (q_vector->rx_ring && q_vector->tx_ring) if (q_vector->rx.ring && q_vector->tx.ring)
sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
q_vector->rx_ring->queue_index); q_vector->rx.ring->queue_index);
else if (q_vector->tx_ring) else if (q_vector->tx.ring)
sprintf(q_vector->name, "%s-tx-%u", netdev->name, sprintf(q_vector->name, "%s-tx-%u", netdev->name,
q_vector->tx_ring->queue_index); q_vector->tx.ring->queue_index);
else if (q_vector->rx_ring) else if (q_vector->rx.ring)
sprintf(q_vector->name, "%s-rx-%u", netdev->name, sprintf(q_vector->name, "%s-rx-%u", netdev->name,
q_vector->rx_ring->queue_index); q_vector->rx.ring->queue_index);
else else
sprintf(q_vector->name, "%s-unused", netdev->name); sprintf(q_vector->name, "%s-unused", netdev->name);
...@@ -1157,8 +1157,9 @@ static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter, ...@@ -1157,8 +1157,9 @@ static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
{ {
struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
q_vector->rx_ring = adapter->rx_ring[ring_idx]; q_vector->rx.ring = adapter->rx_ring[ring_idx];
q_vector->rx_ring->q_vector = q_vector; q_vector->rx.ring->q_vector = q_vector;
q_vector->rx.count++;
q_vector->itr_val = adapter->rx_itr_setting; q_vector->itr_val = adapter->rx_itr_setting;
if (q_vector->itr_val && q_vector->itr_val <= 3) if (q_vector->itr_val && q_vector->itr_val <= 3)
q_vector->itr_val = IGB_START_ITR; q_vector->itr_val = IGB_START_ITR;
...@@ -1169,10 +1170,11 @@ static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter, ...@@ -1169,10 +1170,11 @@ static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
{ {
struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
q_vector->tx_ring = adapter->tx_ring[ring_idx]; q_vector->tx.ring = adapter->tx_ring[ring_idx];
q_vector->tx_ring->q_vector = q_vector; q_vector->tx.ring->q_vector = q_vector;
q_vector->tx.count++;
q_vector->itr_val = adapter->tx_itr_setting; q_vector->itr_val = adapter->tx_itr_setting;
q_vector->tx_work_limit = adapter->tx_work_limit; q_vector->tx.work_limit = adapter->tx_work_limit;
if (q_vector->itr_val && q_vector->itr_val <= 3) if (q_vector->itr_val && q_vector->itr_val <= 3)
q_vector->itr_val = IGB_START_ITR; q_vector->itr_val = IGB_START_ITR;
} }
...@@ -3826,33 +3828,24 @@ static void igb_update_ring_itr(struct igb_q_vector *q_vector) ...@@ -3826,33 +3828,24 @@ static void igb_update_ring_itr(struct igb_q_vector *q_vector)
int new_val = q_vector->itr_val; int new_val = q_vector->itr_val;
int avg_wire_size = 0; int avg_wire_size = 0;
struct igb_adapter *adapter = q_vector->adapter; struct igb_adapter *adapter = q_vector->adapter;
struct igb_ring *ring;
unsigned int packets; unsigned int packets;
/* For non-gigabit speeds, just fix the interrupt rate at 4000 /* For non-gigabit speeds, just fix the interrupt rate at 4000
* ints/sec - ITR timer value of 120 ticks. * ints/sec - ITR timer value of 120 ticks.
*/ */
if (adapter->link_speed != SPEED_1000) { if (adapter->link_speed != SPEED_1000) {
new_val = 976; new_val = IGB_4K_ITR;
goto set_itr_val; goto set_itr_val;
} }
ring = q_vector->rx_ring; packets = q_vector->rx.total_packets;
if (ring) { if (packets)
packets = ACCESS_ONCE(ring->total_packets); avg_wire_size = q_vector->rx.total_bytes / packets;
if (packets)
avg_wire_size = ring->total_bytes / packets;
}
ring = q_vector->tx_ring; packets = q_vector->tx.total_packets;
if (ring) { if (packets)
packets = ACCESS_ONCE(ring->total_packets); avg_wire_size = max_t(u32, avg_wire_size,
q_vector->tx.total_bytes / packets);
if (packets)
avg_wire_size = max_t(u32, avg_wire_size,
ring->total_bytes / packets);
}
/* if avg_wire_size isn't set no work was done */ /* if avg_wire_size isn't set no work was done */
if (!avg_wire_size) if (!avg_wire_size)
...@@ -3870,9 +3863,11 @@ static void igb_update_ring_itr(struct igb_q_vector *q_vector) ...@@ -3870,9 +3863,11 @@ static void igb_update_ring_itr(struct igb_q_vector *q_vector)
else else
new_val = avg_wire_size / 2; new_val = avg_wire_size / 2;
/* when in itr mode 3 do not exceed 20K ints/sec */ /* conservative mode (itr 3) eliminates the lowest_latency setting */
if (adapter->rx_itr_setting == 3 && new_val < 196) if (new_val < IGB_20K_ITR &&
new_val = 196; ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
(!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
new_val = IGB_20K_ITR;
set_itr_val: set_itr_val:
if (new_val != q_vector->itr_val) { if (new_val != q_vector->itr_val) {
...@@ -3880,14 +3875,10 @@ static void igb_update_ring_itr(struct igb_q_vector *q_vector) ...@@ -3880,14 +3875,10 @@ static void igb_update_ring_itr(struct igb_q_vector *q_vector)
q_vector->set_itr = 1; q_vector->set_itr = 1;
} }
clear_counts: clear_counts:
if (q_vector->rx_ring) { q_vector->rx.total_bytes = 0;
q_vector->rx_ring->total_bytes = 0; q_vector->rx.total_packets = 0;
q_vector->rx_ring->total_packets = 0; q_vector->tx.total_bytes = 0;
} q_vector->tx.total_packets = 0;
if (q_vector->tx_ring) {
q_vector->tx_ring->total_bytes = 0;
q_vector->tx_ring->total_packets = 0;
}
} }
/** /**
...@@ -3903,106 +3894,102 @@ static void igb_update_ring_itr(struct igb_q_vector *q_vector) ...@@ -3903,106 +3894,102 @@ static void igb_update_ring_itr(struct igb_q_vector *q_vector)
* parameter (see igb_param.c) * parameter (see igb_param.c)
* NOTE: These calculations are only valid when operating in a single- * NOTE: These calculations are only valid when operating in a single-
* queue environment. * queue environment.
* @adapter: pointer to adapter * @q_vector: pointer to q_vector
* @itr_setting: current q_vector->itr_val * @ring_container: ring info to update the itr for
* @packets: the number of packets during this measurement interval
* @bytes: the number of bytes during this measurement interval
**/ **/
static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting, static void igb_update_itr(struct igb_q_vector *q_vector,
int packets, int bytes) struct igb_ring_container *ring_container)
{ {
unsigned int retval = itr_setting; unsigned int packets = ring_container->total_packets;
unsigned int bytes = ring_container->total_bytes;
u8 itrval = ring_container->itr;
/* no packets, exit with status unchanged */
if (packets == 0) if (packets == 0)
goto update_itr_done; return;
switch (itr_setting) { switch (itrval) {
case lowest_latency: case lowest_latency:
/* handle TSO and jumbo frames */ /* handle TSO and jumbo frames */
if (bytes/packets > 8000) if (bytes/packets > 8000)
retval = bulk_latency; itrval = bulk_latency;
else if ((packets < 5) && (bytes > 512)) else if ((packets < 5) && (bytes > 512))
retval = low_latency; itrval = low_latency;
break; break;
case low_latency: /* 50 usec aka 20000 ints/s */ case low_latency: /* 50 usec aka 20000 ints/s */
if (bytes > 10000) { if (bytes > 10000) {
/* this if handles the TSO accounting */ /* this if handles the TSO accounting */
if (bytes/packets > 8000) { if (bytes/packets > 8000) {
retval = bulk_latency; itrval = bulk_latency;
} else if ((packets < 10) || ((bytes/packets) > 1200)) { } else if ((packets < 10) || ((bytes/packets) > 1200)) {
retval = bulk_latency; itrval = bulk_latency;
} else if ((packets > 35)) { } else if ((packets > 35)) {
retval = lowest_latency; itrval = lowest_latency;
} }
} else if (bytes/packets > 2000) { } else if (bytes/packets > 2000) {
retval = bulk_latency; itrval = bulk_latency;
} else if (packets <= 2 && bytes < 512) { } else if (packets <= 2 && bytes < 512) {
retval = lowest_latency; itrval = lowest_latency;
} }
break; break;
case bulk_latency: /* 250 usec aka 4000 ints/s */ case bulk_latency: /* 250 usec aka 4000 ints/s */
if (bytes > 25000) { if (bytes > 25000) {
if (packets > 35) if (packets > 35)
retval = low_latency; itrval = low_latency;
} else if (bytes < 1500) { } else if (bytes < 1500) {
retval = low_latency; itrval = low_latency;
} }
break; break;
} }
update_itr_done: /* clear work counters since we have the values we need */
return retval; ring_container->total_bytes = 0;
ring_container->total_packets = 0;
/* write updated itr to ring container */
ring_container->itr = itrval;
} }
static void igb_set_itr(struct igb_adapter *adapter) static void igb_set_itr(struct igb_q_vector *q_vector)
{ {
struct igb_q_vector *q_vector = adapter->q_vector[0]; struct igb_adapter *adapter = q_vector->adapter;
u16 current_itr;
u32 new_itr = q_vector->itr_val; u32 new_itr = q_vector->itr_val;
u8 current_itr = 0;
/* for non-gigabit speeds, just fix the interrupt rate at 4000 */ /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
if (adapter->link_speed != SPEED_1000) { if (adapter->link_speed != SPEED_1000) {
current_itr = 0; current_itr = 0;
new_itr = 4000; new_itr = IGB_4K_ITR;
goto set_itr_now; goto set_itr_now;
} }
adapter->rx_itr = igb_update_itr(adapter, igb_update_itr(q_vector, &q_vector->tx);
adapter->rx_itr, igb_update_itr(q_vector, &q_vector->rx);
q_vector->rx_ring->total_packets,
q_vector->rx_ring->total_bytes);
adapter->tx_itr = igb_update_itr(adapter, current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
adapter->tx_itr,
q_vector->tx_ring->total_packets,
q_vector->tx_ring->total_bytes);
current_itr = max(adapter->rx_itr, adapter->tx_itr);
/* conservative mode (itr 3) eliminates the lowest_latency setting */ /* conservative mode (itr 3) eliminates the lowest_latency setting */
if (adapter->rx_itr_setting == 3 && current_itr == lowest_latency) if (current_itr == lowest_latency &&
((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
(!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
current_itr = low_latency; current_itr = low_latency;
switch (current_itr) { switch (current_itr) {
/* counts and packets in update_itr are dependent on these numbers */ /* counts and packets in update_itr are dependent on these numbers */
case lowest_latency: case lowest_latency:
new_itr = 56; /* aka 70,000 ints/sec */ new_itr = IGB_70K_ITR; /* 70,000 ints/sec */
break; break;
case low_latency: case low_latency:
new_itr = 196; /* aka 20,000 ints/sec */ new_itr = IGB_20K_ITR; /* 20,000 ints/sec */
break; break;
case bulk_latency: case bulk_latency:
new_itr = 980; /* aka 4,000 ints/sec */ new_itr = IGB_4K_ITR; /* 4,000 ints/sec */
break; break;
default: default:
break; break;
} }
set_itr_now: set_itr_now:
q_vector->rx_ring->total_bytes = 0;
q_vector->rx_ring->total_packets = 0;
q_vector->tx_ring->total_bytes = 0;
q_vector->tx_ring->total_packets = 0;
if (new_itr != q_vector->itr_val) { if (new_itr != q_vector->itr_val) {
/* this attempts to bias the interrupt rate towards Bulk /* this attempts to bias the interrupt rate towards Bulk
* by adding intermediate steps when interrupt rate is * by adding intermediate steps when interrupt rate is
...@@ -4010,7 +3997,7 @@ static void igb_set_itr(struct igb_adapter *adapter) ...@@ -4010,7 +3997,7 @@ static void igb_set_itr(struct igb_adapter *adapter)
new_itr = new_itr > q_vector->itr_val ? new_itr = new_itr > q_vector->itr_val ?
max((new_itr * q_vector->itr_val) / max((new_itr * q_vector->itr_val) /
(new_itr + (q_vector->itr_val >> 2)), (new_itr + (q_vector->itr_val >> 2)),
new_itr) : new_itr) :
new_itr; new_itr;
/* Don't write the value here; it resets the adapter's /* Don't write the value here; it resets the adapter's
* internal timer, and causes us to delay far longer than * internal timer, and causes us to delay far longer than
...@@ -4830,7 +4817,7 @@ static void igb_write_itr(struct igb_q_vector *q_vector) ...@@ -4830,7 +4817,7 @@ static void igb_write_itr(struct igb_q_vector *q_vector)
if (adapter->hw.mac.type == e1000_82575) if (adapter->hw.mac.type == e1000_82575)
itr_val |= itr_val << 16; itr_val |= itr_val << 16;
else else
itr_val |= 0x8000000; itr_val |= E1000_EITR_CNT_IGNR;
writel(itr_val, q_vector->itr_register); writel(itr_val, q_vector->itr_register);
q_vector->set_itr = 0; q_vector->set_itr = 0;
...@@ -4858,8 +4845,8 @@ static void igb_update_dca(struct igb_q_vector *q_vector) ...@@ -4858,8 +4845,8 @@ static void igb_update_dca(struct igb_q_vector *q_vector)
if (q_vector->cpu == cpu) if (q_vector->cpu == cpu)
goto out_no_update; goto out_no_update;
if (q_vector->tx_ring) { if (q_vector->tx.ring) {
int q = q_vector->tx_ring->reg_idx; int q = q_vector->tx.ring->reg_idx;
u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q)); u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
if (hw->mac.type == e1000_82575) { if (hw->mac.type == e1000_82575) {
dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK; dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
...@@ -4872,8 +4859,8 @@ static void igb_update_dca(struct igb_q_vector *q_vector) ...@@ -4872,8 +4859,8 @@ static void igb_update_dca(struct igb_q_vector *q_vector)
dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN; dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
wr32(E1000_DCA_TXCTRL(q), dca_txctrl); wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
} }
if (q_vector->rx_ring) { if (q_vector->rx.ring) {
int q = q_vector->rx_ring->reg_idx; int q = q_vector->rx.ring->reg_idx;
u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q)); u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
if (hw->mac.type == e1000_82575) { if (hw->mac.type == e1000_82575) {
dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK; dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
...@@ -5517,16 +5504,14 @@ static irqreturn_t igb_intr(int irq, void *data) ...@@ -5517,16 +5504,14 @@ static irqreturn_t igb_intr(int irq, void *data)
/* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
* need for the IMC write */ * need for the IMC write */
u32 icr = rd32(E1000_ICR); u32 icr = rd32(E1000_ICR);
if (!icr)
return IRQ_NONE; /* Not our interrupt */
igb_write_itr(q_vector);
/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
* not set, then the adapter didn't send an interrupt */ * not set, then the adapter didn't send an interrupt */
if (!(icr & E1000_ICR_INT_ASSERTED)) if (!(icr & E1000_ICR_INT_ASSERTED))
return IRQ_NONE; return IRQ_NONE;
igb_write_itr(q_vector);
if (icr & E1000_ICR_DRSTA) if (icr & E1000_ICR_DRSTA)
schedule_work(&adapter->reset_task); schedule_work(&adapter->reset_task);
...@@ -5547,15 +5532,15 @@ static irqreturn_t igb_intr(int irq, void *data) ...@@ -5547,15 +5532,15 @@ static irqreturn_t igb_intr(int irq, void *data)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector) void igb_ring_irq_enable(struct igb_q_vector *q_vector)
{ {
struct igb_adapter *adapter = q_vector->adapter; struct igb_adapter *adapter = q_vector->adapter;
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
if ((q_vector->rx_ring && (adapter->rx_itr_setting & 3)) || if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
(!q_vector->rx_ring && (adapter->tx_itr_setting & 3))) { (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
if (!adapter->msix_entries) if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
igb_set_itr(adapter); igb_set_itr(q_vector);
else else
igb_update_ring_itr(q_vector); igb_update_ring_itr(q_vector);
} }
...@@ -5584,10 +5569,10 @@ static int igb_poll(struct napi_struct *napi, int budget) ...@@ -5584,10 +5569,10 @@ static int igb_poll(struct napi_struct *napi, int budget)
if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED) if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
igb_update_dca(q_vector); igb_update_dca(q_vector);
#endif #endif
if (q_vector->tx_ring) if (q_vector->tx.ring)
clean_complete = igb_clean_tx_irq(q_vector); clean_complete = igb_clean_tx_irq(q_vector);
if (q_vector->rx_ring) if (q_vector->rx.ring)
clean_complete &= igb_clean_rx_irq(q_vector, budget); clean_complete &= igb_clean_rx_irq(q_vector, budget);
/* If all work not completed, return budget and keep polling */ /* If all work not completed, return budget and keep polling */
...@@ -5667,11 +5652,11 @@ static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, ...@@ -5667,11 +5652,11 @@ static void igb_tx_hwtstamp(struct igb_q_vector *q_vector,
static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
{ {
struct igb_adapter *adapter = q_vector->adapter; struct igb_adapter *adapter = q_vector->adapter;
struct igb_ring *tx_ring = q_vector->tx_ring; struct igb_ring *tx_ring = q_vector->tx.ring;
struct igb_tx_buffer *tx_buffer; struct igb_tx_buffer *tx_buffer;
union e1000_adv_tx_desc *tx_desc, *eop_desc; union e1000_adv_tx_desc *tx_desc, *eop_desc;
unsigned int total_bytes = 0, total_packets = 0; unsigned int total_bytes = 0, total_packets = 0;
unsigned int budget = q_vector->tx_work_limit; unsigned int budget = q_vector->tx.work_limit;
unsigned int i = tx_ring->next_to_clean; unsigned int i = tx_ring->next_to_clean;
if (test_bit(__IGB_DOWN, &adapter->state)) if (test_bit(__IGB_DOWN, &adapter->state))
...@@ -5757,8 +5742,8 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) ...@@ -5757,8 +5742,8 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
tx_ring->tx_stats.bytes += total_bytes; tx_ring->tx_stats.bytes += total_bytes;
tx_ring->tx_stats.packets += total_packets; tx_ring->tx_stats.packets += total_packets;
u64_stats_update_end(&tx_ring->tx_syncp); u64_stats_update_end(&tx_ring->tx_syncp);
tx_ring->total_bytes += total_bytes; q_vector->tx.total_bytes += total_bytes;
tx_ring->total_packets += total_packets; q_vector->tx.total_packets += total_packets;
if (tx_ring->detect_tx_hung) { if (tx_ring->detect_tx_hung) {
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
...@@ -5907,7 +5892,7 @@ static inline u16 igb_get_hlen(union e1000_adv_rx_desc *rx_desc) ...@@ -5907,7 +5892,7 @@ static inline u16 igb_get_hlen(union e1000_adv_rx_desc *rx_desc)
static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget) static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
{ {
struct igb_ring *rx_ring = q_vector->rx_ring; struct igb_ring *rx_ring = q_vector->rx.ring;
union e1000_adv_rx_desc *rx_desc; union e1000_adv_rx_desc *rx_desc;
const int current_node = numa_node_id(); const int current_node = numa_node_id();
unsigned int total_bytes = 0, total_packets = 0; unsigned int total_bytes = 0, total_packets = 0;
...@@ -6024,8 +6009,8 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget) ...@@ -6024,8 +6009,8 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
rx_ring->rx_stats.packets += total_packets; rx_ring->rx_stats.packets += total_packets;
rx_ring->rx_stats.bytes += total_bytes; rx_ring->rx_stats.bytes += total_bytes;
u64_stats_update_end(&rx_ring->rx_syncp); u64_stats_update_end(&rx_ring->rx_syncp);
rx_ring->total_packets += total_packets; q_vector->rx.total_packets += total_packets;
rx_ring->total_bytes += total_bytes; q_vector->rx.total_bytes += total_bytes;
if (cleaned_count) if (cleaned_count)
igb_alloc_rx_buffers(rx_ring, cleaned_count); igb_alloc_rx_buffers(rx_ring, cleaned_count);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment