Commit d2b464a7 authored by Brett Creeley's avatar Brett Creeley Committed by Jeff Kirsher

ice: Add more flexibility on how we assign an ITR index

This issue came about when looking at the VF function
ice_vc_cfg_irq_map_msg. Currently we are assigning the itr_setting value
to the itr_idx received from the AVF driver, which is not correct and is
not used for the VF flow anyway. Currently the only way we set the ITR
index for both the PF and VF driver is by hard coding ICE_TX_ITR or
ICE_RX_ITR for the ITR index on each q_vector.

To fix this, add the member itr_idx in struct ice_ring_container. This
can then be used to dynamically program the correct ITR index. This change
also affected the PF driver so make the necessary changes there as well.

Also, removed the itr_setting member in struct ice_ring because it is not
being used meaningfully and is going to be removed in a future patch that
includes dynamic ITR.

On another note, this will be useful moving forward if we decide to split
Rx/Tx rings on different q_vectors instead of sharing them as queue pairs.
Signed-off-by: default avatarBrett Creeley <brett.creeley@intel.com>
Signed-off-by: default avatarAnirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Tested-by: default avatarAndrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 072f0c3d
...@@ -1204,7 +1204,6 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) ...@@ -1204,7 +1204,6 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->vsi = vsi; ring->vsi = vsi;
ring->dev = &pf->pdev->dev; ring->dev = &pf->pdev->dev;
ring->count = vsi->num_desc; ring->count = vsi->num_desc;
ring->itr_setting = ICE_DFLT_TX_ITR;
vsi->tx_rings[i] = ring; vsi->tx_rings[i] = ring;
} }
...@@ -1224,7 +1223,6 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) ...@@ -1224,7 +1223,6 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->netdev = vsi->netdev; ring->netdev = vsi->netdev;
ring->dev = &pf->pdev->dev; ring->dev = &pf->pdev->dev;
ring->count = vsi->num_desc; ring->count = vsi->num_desc;
ring->itr_setting = ICE_DFLT_RX_ITR;
vsi->rx_rings[i] = ring; vsi->rx_rings[i] = ring;
} }
...@@ -1261,6 +1259,7 @@ static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) ...@@ -1261,6 +1259,7 @@ static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id); tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id);
q_vector->num_ring_tx = tx_rings_per_v; q_vector->num_ring_tx = tx_rings_per_v;
q_vector->tx.ring = NULL; q_vector->tx.ring = NULL;
q_vector->tx.itr_idx = ICE_TX_ITR;
q_base = vsi->num_txq - tx_rings_rem; q_base = vsi->num_txq - tx_rings_rem;
for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) { for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
...@@ -1276,6 +1275,7 @@ static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) ...@@ -1276,6 +1275,7 @@ static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id); rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id);
q_vector->num_ring_rx = rx_rings_per_v; q_vector->num_ring_rx = rx_rings_per_v;
q_vector->rx.ring = NULL; q_vector->rx.ring = NULL;
q_vector->rx.itr_idx = ICE_RX_ITR;
q_base = vsi->num_rxq - rx_rings_rem; q_base = vsi->num_rxq - rx_rings_rem;
for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) { for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
...@@ -1683,6 +1683,37 @@ static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran) ...@@ -1683,6 +1683,37 @@ static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
return 0; return 0;
} }
/**
* ice_cfg_itr - configure the initial interrupt throttle values
* @hw: pointer to the HW structure
* @q_vector: interrupt vector that's being configured
* @vector: HW vector index to apply the interrupt throttling to
*
* Configure interrupt throttling values for the ring containers that are
* associated with the interrupt vector passed in.
*/
static void
ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector)
{
u8 itr_gran = hw->itr_gran;
if (q_vector->num_ring_rx) {
struct ice_ring_container *rc = &q_vector->rx;
rc->itr = ITR_TO_REG(ICE_DFLT_RX_ITR, itr_gran);
rc->latency_range = ICE_LOW_LATENCY;
wr32(hw, GLINT_ITR(rc->itr_idx, vector), rc->itr);
}
if (q_vector->num_ring_tx) {
struct ice_ring_container *rc = &q_vector->tx;
rc->itr = ITR_TO_REG(ICE_DFLT_TX_ITR, itr_gran);
rc->latency_range = ICE_LOW_LATENCY;
wr32(hw, GLINT_ITR(rc->itr_idx, vector), rc->itr);
}
}
/** /**
* ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
* @vsi: the VSI being configured * @vsi: the VSI being configured
...@@ -1693,31 +1724,13 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi) ...@@ -1693,31 +1724,13 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi)
u16 vector = vsi->hw_base_vector; u16 vector = vsi->hw_base_vector;
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
u32 txq = 0, rxq = 0; u32 txq = 0, rxq = 0;
int i, q, itr; int i, q;
u8 itr_gran;
for (i = 0; i < vsi->num_q_vectors; i++, vector++) { for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
struct ice_q_vector *q_vector = vsi->q_vectors[i]; struct ice_q_vector *q_vector = vsi->q_vectors[i];
itr_gran = hw->itr_gran; ice_cfg_itr(hw, q_vector, vector);
q_vector->intrl = ICE_DFLT_INTRL;
if (q_vector->num_ring_rx) {
q_vector->rx.itr =
ITR_TO_REG(vsi->rx_rings[rxq]->itr_setting,
itr_gran);
q_vector->rx.latency_range = ICE_LOW_LATENCY;
}
if (q_vector->num_ring_tx) {
q_vector->tx.itr =
ITR_TO_REG(vsi->tx_rings[txq]->itr_setting,
itr_gran);
q_vector->tx.latency_range = ICE_LOW_LATENCY;
}
wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), q_vector->rx.itr);
wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), q_vector->tx.itr);
wr32(hw, GLINT_RATE(vector), wr32(hw, GLINT_RATE(vector),
ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran)); ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
...@@ -1733,32 +1746,32 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi) ...@@ -1733,32 +1746,32 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi)
* tracked for this PF. * tracked for this PF.
*/ */
for (q = 0; q < q_vector->num_ring_tx; q++) { for (q = 0; q < q_vector->num_ring_tx; q++) {
int itr_idx = q_vector->tx.itr_idx;
u32 val; u32 val;
itr = ICE_ITR_NONE;
if (vsi->type == ICE_VSI_VF) if (vsi->type == ICE_VSI_VF)
val = QINT_TQCTL_CAUSE_ENA_M | val = QINT_TQCTL_CAUSE_ENA_M |
(itr << QINT_TQCTL_ITR_INDX_S) | (itr_idx << QINT_TQCTL_ITR_INDX_S) |
((i + 1) << QINT_TQCTL_MSIX_INDX_S); ((i + 1) << QINT_TQCTL_MSIX_INDX_S);
else else
val = QINT_TQCTL_CAUSE_ENA_M | val = QINT_TQCTL_CAUSE_ENA_M |
(itr << QINT_TQCTL_ITR_INDX_S) | (itr_idx << QINT_TQCTL_ITR_INDX_S) |
(vector << QINT_TQCTL_MSIX_INDX_S); (vector << QINT_TQCTL_MSIX_INDX_S);
wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val); wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
txq++; txq++;
} }
for (q = 0; q < q_vector->num_ring_rx; q++) { for (q = 0; q < q_vector->num_ring_rx; q++) {
int itr_idx = q_vector->rx.itr_idx;
u32 val; u32 val;
itr = ICE_ITR_NONE;
if (vsi->type == ICE_VSI_VF) if (vsi->type == ICE_VSI_VF)
val = QINT_RQCTL_CAUSE_ENA_M | val = QINT_RQCTL_CAUSE_ENA_M |
(itr << QINT_RQCTL_ITR_INDX_S) | (itr_idx << QINT_RQCTL_ITR_INDX_S) |
((i + 1) << QINT_RQCTL_MSIX_INDX_S); ((i + 1) << QINT_RQCTL_MSIX_INDX_S);
else else
val = QINT_RQCTL_CAUSE_ENA_M | val = QINT_RQCTL_CAUSE_ENA_M |
(itr << QINT_RQCTL_ITR_INDX_S) | (itr_idx << QINT_RQCTL_ITR_INDX_S) |
(vector << QINT_RQCTL_MSIX_INDX_S); (vector << QINT_RQCTL_MSIX_INDX_S);
wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val); wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
rxq++; rxq++;
...@@ -2157,8 +2170,8 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi) ...@@ -2157,8 +2170,8 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi)
for (i = 0; i < vsi->num_q_vectors; i++, vector++) { for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
struct ice_q_vector *q_vector = vsi->q_vectors[i]; struct ice_q_vector *q_vector = vsi->q_vectors[i];
wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), 0); wr32(hw, GLINT_ITR(ICE_IDX_ITR0, vector), 0);
wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), 0); wr32(hw, GLINT_ITR(ICE_IDX_ITR1, vector), 0);
for (q = 0; q < q_vector->num_ring_tx; q++) { for (q = 0; q < q_vector->num_ring_tx; q++) {
wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0); wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
txq++; txq++;
......
...@@ -105,8 +105,9 @@ enum ice_rx_dtype { ...@@ -105,8 +105,9 @@ enum ice_rx_dtype {
#define ICE_TX_ITR ICE_IDX_ITR1 #define ICE_TX_ITR ICE_IDX_ITR1
#define ICE_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ #define ICE_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
#define ICE_ITR_8K 125 #define ICE_ITR_8K 125
#define ICE_DFLT_TX_ITR ICE_ITR_8K #define ICE_ITR_20K 50
#define ICE_DFLT_RX_ITR ICE_ITR_8K #define ICE_DFLT_TX_ITR ICE_ITR_20K
#define ICE_DFLT_RX_ITR ICE_ITR_20K
/* apply ITR granularity translation to program the register. itr_gran is either /* apply ITR granularity translation to program the register. itr_gran is either
* 2 or 4 usecs so we need to divide by 2 first then shift by that value * 2 or 4 usecs so we need to divide by 2 first then shift by that value
*/ */
...@@ -135,13 +136,6 @@ struct ice_ring { ...@@ -135,13 +136,6 @@ struct ice_ring {
u16 q_index; /* Queue number of ring */ u16 q_index; /* Queue number of ring */
u32 txq_teid; /* Added Tx queue TEID */ u32 txq_teid; /* Added Tx queue TEID */
/* high bit set means dynamic, use accessor routines to read/write.
* hardware supports 4us/2us resolution for the ITR registers.
* these values always store the USER setting, and must be converted
* before programming to a register.
*/
u16 itr_setting;
u16 count; /* Number of descriptors */ u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */ u16 reg_idx; /* HW register index of the ring */
...@@ -178,6 +172,7 @@ struct ice_ring_container { ...@@ -178,6 +172,7 @@ struct ice_ring_container {
unsigned int total_bytes; /* total bytes processed this int */ unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_pkts; /* total packets processed this int */ unsigned int total_pkts; /* total packets processed this int */
enum ice_latency_range latency_range; enum ice_latency_range latency_range;
int itr_idx; /* index in the interrupt vector */
u16 itr; u16 itr;
}; };
......
...@@ -1678,26 +1678,30 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) ...@@ -1678,26 +1678,30 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
/* lookout for the invalid queue index */ /* lookout for the invalid queue index */
qmap = map->rxq_map; qmap = map->rxq_map;
for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) { for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
struct ice_q_vector *q_vector;
if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) { if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
aq_ret = ICE_ERR_PARAM; aq_ret = ICE_ERR_PARAM;
goto error_param; goto error_param;
} }
vsi->q_vectors[i]->num_ring_rx++; q_vector = vsi->q_vectors[i];
vsi->rx_rings[vsi_q_id]->itr_setting = q_vector->num_ring_rx++;
map->rxitr_idx; q_vector->rx.itr_idx = map->rxitr_idx;
vsi->rx_rings[vsi_q_id]->q_vector = vsi->q_vectors[i]; vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
} }
qmap = map->txq_map; qmap = map->txq_map;
for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) { for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
struct ice_q_vector *q_vector;
if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) { if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
aq_ret = ICE_ERR_PARAM; aq_ret = ICE_ERR_PARAM;
goto error_param; goto error_param;
} }
vsi->q_vectors[i]->num_ring_tx++; q_vector = vsi->q_vectors[i];
vsi->tx_rings[vsi_q_id]->itr_setting = q_vector->num_ring_tx++;
map->txitr_idx; q_vector->tx.itr_idx = map->txitr_idx;
vsi->tx_rings[vsi_q_id]->q_vector = vsi->q_vectors[i]; vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment