Commit e75d1b2c authored by Maciej Fijalkowski's avatar Maciej Fijalkowski Committed by Jeff Kirsher

ice: get rid of per-tc flow in Tx queue configuration routines

There's no reason for treating DCB as first class citizen when configuring
the Tx queues and going through TCs. Reverse the logic and base the
configuration logic on rings, which is the object of interest anyway.
Signed-off-by: default avatarMaciej Fijalkowski <maciej.fijalkowski@intel.com>
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
Tested-by: default avatarAndrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent eff380aa
...@@ -190,6 +190,21 @@ static void ice_cfg_itr_gran(struct ice_hw *hw) ...@@ -190,6 +190,21 @@ static void ice_cfg_itr_gran(struct ice_hw *hw)
wr32(hw, GLINT_CTL, regval); wr32(hw, GLINT_CTL, regval);
} }
/**
* ice_calc_q_handle - calculate the queue handle
* @vsi: VSI that ring belongs to
* @ring: ring to get the absolute queue index
* @tc: traffic class number
*/
static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc)
{
/* Idea here for calculation is that we subtract the number of queue
* count from TC that ring belongs to from it's absolute queue index
* and as a result we get the queue's index within TC.
*/
return ring->q_index - vsi->tc_cfg.tc_info[tc].qoffset;
}
/** /**
* ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
* @ring: The Tx ring to configure * @ring: The Tx ring to configure
...@@ -522,13 +537,11 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi) ...@@ -522,13 +537,11 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
* ice_vsi_cfg_txq - Configure single Tx queue * ice_vsi_cfg_txq - Configure single Tx queue
* @vsi: the VSI that queue belongs to * @vsi: the VSI that queue belongs to
* @ring: Tx ring to be configured * @ring: Tx ring to be configured
* @tc_q_idx: queue index within given TC
* @qg_buf: queue group buffer * @qg_buf: queue group buffer
* @tc: TC that Tx ring belongs to
*/ */
int int
ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, u16 tc_q_idx, ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
struct ice_aqc_add_tx_qgrp *qg_buf, u8 tc) struct ice_aqc_add_tx_qgrp *qg_buf)
{ {
struct ice_tlan_ctx tlan_ctx = { 0 }; struct ice_tlan_ctx tlan_ctx = { 0 };
struct ice_aqc_add_txqs_perq *txq; struct ice_aqc_add_txqs_perq *txq;
...@@ -536,6 +549,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, u16 tc_q_idx, ...@@ -536,6 +549,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, u16 tc_q_idx,
u8 buf_len = sizeof(*qg_buf); u8 buf_len = sizeof(*qg_buf);
enum ice_status status; enum ice_status status;
u16 pf_q; u16 pf_q;
u8 tc;
pf_q = ring->reg_idx; pf_q = ring->reg_idx;
ice_setup_tx_ctx(ring, &tlan_ctx, pf_q); ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
...@@ -549,10 +563,15 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, u16 tc_q_idx, ...@@ -549,10 +563,15 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, u16 tc_q_idx,
*/ */
ring->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q); ring->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
if (IS_ENABLED(CONFIG_DCB))
tc = ring->dcb_tc;
else
tc = 0;
/* Add unique software queue handle of the Tx queue per /* Add unique software queue handle of the Tx queue per
* TC into the VSI Tx ring * TC into the VSI Tx ring
*/ */
ring->q_handle = tc_q_idx; ring->q_handle = ice_calc_q_handle(vsi, ring, tc);
status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle, status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle,
1, qg_buf, buf_len, NULL); 1, qg_buf, buf_len, NULL);
......
...@@ -13,8 +13,8 @@ int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi); ...@@ -13,8 +13,8 @@ int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi);
void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi); void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi);
void ice_vsi_free_q_vectors(struct ice_vsi *vsi); void ice_vsi_free_q_vectors(struct ice_vsi *vsi);
int int
ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, u16 tc_q_idx, ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
struct ice_aqc_add_tx_qgrp *qg_buf, u8 tc); struct ice_aqc_add_tx_qgrp *qg_buf);
void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector); void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector);
void void
ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx); ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx);
......
...@@ -1225,42 +1225,31 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi) ...@@ -1225,42 +1225,31 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
* ice_vsi_cfg_txqs - Configure the VSI for Tx * ice_vsi_cfg_txqs - Configure the VSI for Tx
* @vsi: the VSI being configured * @vsi: the VSI being configured
* @rings: Tx ring array to be configured * @rings: Tx ring array to be configured
* @offset: offset within vsi->txq_map
* *
* Return 0 on success and a negative value on error * Return 0 on success and a negative value on error
* Configure the Tx VSI for operation. * Configure the Tx VSI for operation.
*/ */
static int static int
ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset) ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings)
{ {
struct ice_aqc_add_tx_qgrp *qg_buf; struct ice_aqc_add_tx_qgrp *qg_buf;
struct ice_pf *pf = vsi->back; u16 q_idx = 0;
u16 q_idx = 0, i;
int err = 0; int err = 0;
u8 tc;
qg_buf = devm_kzalloc(&pf->pdev->dev, sizeof(*qg_buf), GFP_KERNEL); qg_buf = kzalloc(sizeof(*qg_buf), GFP_KERNEL);
if (!qg_buf) if (!qg_buf)
return -ENOMEM; return -ENOMEM;
qg_buf->num_txqs = 1; qg_buf->num_txqs = 1;
/* set up and configure the Tx queues for each enabled TC */ for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
ice_for_each_traffic_class(tc) { err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
if (!(vsi->tc_cfg.ena_tc & BIT(tc)))
break;
for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) {
err = ice_vsi_cfg_txq(vsi, rings[q_idx], i + offset,
qg_buf, tc);
if (err) if (err)
goto err_cfg_txqs; goto err_cfg_txqs;
q_idx++;
}
} }
err_cfg_txqs: err_cfg_txqs:
devm_kfree(&pf->pdev->dev, qg_buf); kfree(qg_buf);
return err; return err;
} }
...@@ -1273,7 +1262,7 @@ ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset) ...@@ -1273,7 +1262,7 @@ ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset)
*/ */
int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi) int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
{ {
return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, 0); return ice_vsi_cfg_txqs(vsi, vsi->tx_rings);
} }
/** /**
...@@ -1463,34 +1452,24 @@ static int ...@@ -1463,34 +1452,24 @@ static int
ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num, struct ice_ring **rings) u16 rel_vmvf_num, struct ice_ring **rings)
{ {
u16 i, q_idx = 0; u16 q_idx;
int status;
u8 tc;
if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS) if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
return -EINVAL; return -EINVAL;
/* set up the Tx queue list to be disabled for each enabled TC */ for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
ice_for_each_traffic_class(tc) {
if (!(vsi->tc_cfg.ena_tc & BIT(tc)))
break;
for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) {
struct ice_txq_meta txq_meta = { }; struct ice_txq_meta txq_meta = { };
int status;
if (!rings || !rings[q_idx]) if (!rings || !rings[q_idx])
return -EINVAL; return -EINVAL;
ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta); ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta);
status = ice_vsi_stop_tx_ring(vsi, rst_src, status = ice_vsi_stop_tx_ring(vsi, rst_src, rel_vmvf_num,
rel_vmvf_num,
rings[q_idx], &txq_meta); rings[q_idx], &txq_meta);
if (status) if (status)
return status; return status;
q_idx++;
}
} }
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment