Commit 742c9875 authored by Jacob Keller's avatar Jacob Keller Committed by Jeff Kirsher

i40e/i40evf: avoid dynamic ITR updates when polling or low packet rate

The dynamic ITR algorithm depends on a calculation of usecs which
assumes that the interrupts have been firing constantly at the interrupt
throttle rate. This is not guaranteed because we could have a low packet
rate, or have been polling in software.

We'll estimate whether this is the case by using jiffies to determine if
we've been too long. If the time difference of jiffies is larger we are
guaranteed to have an incorrect calculation. If the time difference of
jiffies is smaller we might have been polling some but the difference
shouldn't affect the calculation too much.

This ensures that we don't get stuck in BULK latency during certain rare
situations where we receive bursts of packets that force us into NAPI
polling.
Signed-off-by: default avatarJacob Keller <jacob.e.keller@intel.com>
Tested-by: default avatarAndrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 0a2c7722
...@@ -961,11 +961,25 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) ...@@ -961,11 +961,25 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
enum i40e_latency_range new_latency_range = rc->latency_range; enum i40e_latency_range new_latency_range = rc->latency_range;
u32 new_itr = rc->itr; u32 new_itr = rc->itr;
int bytes_per_int; int bytes_per_int;
int usecs; unsigned int usecs, estimated_usecs;
if (rc->total_packets == 0 || !rc->itr) if (rc->total_packets == 0 || !rc->itr)
return false; return false;
usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
bytes_per_int = rc->total_bytes / usecs;
/* The calculations in this algorithm depend on interrupts actually
* firing at the ITR rate. This may not happen if the packet rate is
* really low, or if we've been napi polling. Check to make sure
* that's not the case before we continue.
*/
estimated_usecs = jiffies_to_usecs(jiffies - rc->last_itr_update);
if (estimated_usecs > usecs) {
new_latency_range = I40E_LOW_LATENCY;
goto reset_latency;
}
/* simple throttlerate management /* simple throttlerate management
* 0-10MB/s lowest (50000 ints/s) * 0-10MB/s lowest (50000 ints/s)
* 10-20MB/s low (20000 ints/s) * 10-20MB/s low (20000 ints/s)
...@@ -977,9 +991,6 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) ...@@ -977,9 +991,6 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
* are in 2 usec increments in the ITR registers, and make sure * are in 2 usec increments in the ITR registers, and make sure
* to use the smoothed values that the countdown timer gives us. * to use the smoothed values that the countdown timer gives us.
*/ */
usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
bytes_per_int = rc->total_bytes / usecs;
switch (new_latency_range) { switch (new_latency_range) {
case I40E_LOWEST_LATENCY: case I40E_LOWEST_LATENCY:
if (bytes_per_int > 10) if (bytes_per_int > 10)
...@@ -998,6 +1009,7 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) ...@@ -998,6 +1009,7 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
break; break;
} }
reset_latency:
rc->latency_range = new_latency_range; rc->latency_range = new_latency_range;
switch (new_latency_range) { switch (new_latency_range) {
...@@ -1016,12 +1028,12 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) ...@@ -1016,12 +1028,12 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
rc->total_bytes = 0; rc->total_bytes = 0;
rc->total_packets = 0; rc->total_packets = 0;
rc->last_itr_update = jiffies;
if (new_itr != rc->itr) { if (new_itr != rc->itr) {
rc->itr = new_itr; rc->itr = new_itr;
return true; return true;
} }
return false; return false;
} }
......
...@@ -461,6 +461,7 @@ struct i40e_ring_container { ...@@ -461,6 +461,7 @@ struct i40e_ring_container {
struct i40e_ring *ring; struct i40e_ring *ring;
unsigned int total_bytes; /* total bytes processed this int */ unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_packets; /* total packets processed this int */ unsigned int total_packets; /* total packets processed this int */
unsigned long last_itr_update; /* jiffies of last ITR update */
u16 count; u16 count;
enum i40e_latency_range latency_range; enum i40e_latency_range latency_range;
u16 itr; u16 itr;
......
...@@ -359,11 +359,25 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) ...@@ -359,11 +359,25 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
enum i40e_latency_range new_latency_range = rc->latency_range; enum i40e_latency_range new_latency_range = rc->latency_range;
u32 new_itr = rc->itr; u32 new_itr = rc->itr;
int bytes_per_int; int bytes_per_int;
int usecs; unsigned int usecs, estimated_usecs;
if (rc->total_packets == 0 || !rc->itr) if (rc->total_packets == 0 || !rc->itr)
return false; return false;
usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
bytes_per_int = rc->total_bytes / usecs;
/* The calculations in this algorithm depend on interrupts actually
* firing at the ITR rate. This may not happen if the packet rate is
* really low, or if we've been napi polling. Check to make sure
* that's not the case before we continue.
*/
estimated_usecs = jiffies_to_usecs(jiffies - rc->last_itr_update);
if (estimated_usecs > usecs) {
new_latency_range = I40E_LOW_LATENCY;
goto reset_latency;
}
/* simple throttlerate management /* simple throttlerate management
* 0-10MB/s lowest (50000 ints/s) * 0-10MB/s lowest (50000 ints/s)
* 10-20MB/s low (20000 ints/s) * 10-20MB/s low (20000 ints/s)
...@@ -375,9 +389,6 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) ...@@ -375,9 +389,6 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
* are in 2 usec increments in the ITR registers, and make sure * are in 2 usec increments in the ITR registers, and make sure
* to use the smoothed values that the countdown timer gives us. * to use the smoothed values that the countdown timer gives us.
*/ */
usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
bytes_per_int = rc->total_bytes / usecs;
switch (new_latency_range) { switch (new_latency_range) {
case I40E_LOWEST_LATENCY: case I40E_LOWEST_LATENCY:
if (bytes_per_int > 10) if (bytes_per_int > 10)
...@@ -396,6 +407,7 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) ...@@ -396,6 +407,7 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
break; break;
} }
reset_latency:
rc->latency_range = new_latency_range; rc->latency_range = new_latency_range;
switch (new_latency_range) { switch (new_latency_range) {
...@@ -414,12 +426,12 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) ...@@ -414,12 +426,12 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
rc->total_bytes = 0; rc->total_bytes = 0;
rc->total_packets = 0; rc->total_packets = 0;
rc->last_itr_update = jiffies;
if (new_itr != rc->itr) { if (new_itr != rc->itr) {
rc->itr = new_itr; rc->itr = new_itr;
return true; return true;
} }
return false; return false;
} }
......
...@@ -432,6 +432,7 @@ struct i40e_ring_container { ...@@ -432,6 +432,7 @@ struct i40e_ring_container {
struct i40e_ring *ring; struct i40e_ring *ring;
unsigned int total_bytes; /* total bytes processed this int */ unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_packets; /* total packets processed this int */ unsigned int total_packets; /* total packets processed this int */
unsigned long last_itr_update; /* jiffies of last ITR update */
u16 count; u16 count;
enum i40e_latency_range latency_range; enum i40e_latency_range latency_range;
u16 itr; u16 itr;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment