Commit 8ede0178 authored by Anirudh Venkataramanan's avatar Anirudh Venkataramanan Committed by Jeff Kirsher

ice: Update VSI and queue management code to handle VF VSI

Until now, all the VSI and queue management code supported only the PF
VSI type (ICE_VSI_PF). Update these flows to handle the VF VSI type
(ICE_VSI_VF) type as well.
Signed-off-by: default avatarAnirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Tested-by: default avatarAndrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent ddf30f7f
...@@ -202,6 +202,8 @@ struct ice_vsi { ...@@ -202,6 +202,8 @@ struct ice_vsi {
/* Interrupt thresholds */ /* Interrupt thresholds */
u16 work_lmt; u16 work_lmt;
s16 vf_id; /* VF ID for SR-IOV VSIs */
/* RSS config */ /* RSS config */
u16 rss_table_size; /* HW RSS table size */ u16 rss_table_size; /* HW RSS table size */
u16 rss_size; /* Allocated RSS queues */ u16 rss_size; /* Allocated RSS queues */
......
...@@ -312,6 +312,7 @@ ...@@ -312,6 +312,7 @@
#define GLV_UPTCH(_i) (0x0030A004 + ((_i) * 8)) #define GLV_UPTCH(_i) (0x0030A004 + ((_i) * 8))
#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8)) #define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8))
#define VSIQF_HKEY_MAX_INDEX 12 #define VSIQF_HKEY_MAX_INDEX 12
#define VSIQF_HLUT_MAX_INDEX 15
#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4)) #define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
#define VFINT_DYN_CTLN_CLEARPBA_M BIT(1) #define VFINT_DYN_CTLN_CLEARPBA_M BIT(1)
......
...@@ -418,6 +418,7 @@ struct ice_tlan_ctx { ...@@ -418,6 +418,7 @@ struct ice_tlan_ctx {
u8 pf_num; u8 pf_num;
u16 vmvf_num; u16 vmvf_num;
u8 vmvf_type; u8 vmvf_type;
#define ICE_TLAN_CTX_VMVF_TYPE_VF 0
#define ICE_TLAN_CTX_VMVF_TYPE_VMQ 1 #define ICE_TLAN_CTX_VMVF_TYPE_VMQ 1
#define ICE_TLAN_CTX_VMVF_TYPE_PF 2 #define ICE_TLAN_CTX_VMVF_TYPE_PF 2
u16 src_vsi; u16 src_vsi;
......
...@@ -68,18 +68,20 @@ static int ice_setup_rx_ctx(struct ice_ring *ring) ...@@ -68,18 +68,20 @@ static int ice_setup_rx_ctx(struct ice_ring *ring)
/* Enable Flexible Descriptors in the queue context which /* Enable Flexible Descriptors in the queue context which
* allows this driver to select a specific receive descriptor format * allows this driver to select a specific receive descriptor format
*/ */
regval = rd32(hw, QRXFLXP_CNTXT(pf_q)); if (vsi->type != ICE_VSI_VF) {
regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
QRXFLXP_CNTXT_RXDID_IDX_M; regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
QRXFLXP_CNTXT_RXDID_IDX_M;
/* increasing context priority to pick up profile id;
* default is 0x01; setting to 0x03 to ensure profile /* increasing context priority to pick up profile id;
* is programming if prev context is of same priority * default is 0x01; setting to 0x03 to ensure profile
*/ * is programming if prev context is of same priority
regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) & */
QRXFLXP_CNTXT_RXDID_PRIO_M; regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
QRXFLXP_CNTXT_RXDID_PRIO_M;
wr32(hw, QRXFLXP_CNTXT(pf_q), regval); wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
}
/* Absolute queue number out of 2K needs to be passed */ /* Absolute queue number out of 2K needs to be passed */
err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q); err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
...@@ -90,6 +92,9 @@ static int ice_setup_rx_ctx(struct ice_ring *ring) ...@@ -90,6 +92,9 @@ static int ice_setup_rx_ctx(struct ice_ring *ring)
return -EIO; return -EIO;
} }
if (vsi->type == ICE_VSI_VF)
return 0;
/* init queue specific tail register */ /* init queue specific tail register */
ring->tail = hw->hw_addr + QRX_TAIL(pf_q); ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
writel(0, ring->tail); writel(0, ring->tail);
...@@ -132,6 +137,11 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) ...@@ -132,6 +137,11 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
case ICE_VSI_PF: case ICE_VSI_PF:
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
break; break;
case ICE_VSI_VF:
/* Firmware expects vmvf_num to be absolute VF id */
tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id;
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
break;
default: default:
return; return;
} }
...@@ -285,6 +295,16 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi) ...@@ -285,6 +295,16 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
vsi->num_desc = ALIGN(ICE_DFLT_NUM_DESC, ICE_REQ_DESC_MULTIPLE); vsi->num_desc = ALIGN(ICE_DFLT_NUM_DESC, ICE_REQ_DESC_MULTIPLE);
vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx); vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx);
break; break;
case ICE_VSI_VF:
vsi->alloc_txq = pf->num_vf_qps;
vsi->alloc_rxq = pf->num_vf_qps;
/* pf->num_vf_msix includes (VF miscellaneous vector +
* data queue interrupts). Since vsi->num_q_vectors is number
* of queues vectors, subtract 1 from the original vector
* count
*/
vsi->num_q_vectors = pf->num_vf_msix - 1;
break;
default: default:
dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n", dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
vsi->type); vsi->type);
...@@ -331,6 +351,8 @@ void ice_vsi_delete(struct ice_vsi *vsi) ...@@ -331,6 +351,8 @@ void ice_vsi_delete(struct ice_vsi *vsi)
struct ice_vsi_ctx ctxt; struct ice_vsi_ctx ctxt;
enum ice_status status; enum ice_status status;
if (vsi->type == ICE_VSI_VF)
ctxt.vf_num = vsi->vf_id;
ctxt.vsi_num = vsi->vsi_num; ctxt.vsi_num = vsi->vsi_num;
memcpy(&ctxt.info, &vsi->info, sizeof(struct ice_aqc_vsi_props)); memcpy(&ctxt.info, &vsi->info, sizeof(struct ice_aqc_vsi_props));
...@@ -466,6 +488,10 @@ static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type) ...@@ -466,6 +488,10 @@ static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type)
/* Setup default MSIX irq handler for VSI */ /* Setup default MSIX irq handler for VSI */
vsi->irq_handler = ice_msix_clean_rings; vsi->irq_handler = ice_msix_clean_rings;
break; break;
case ICE_VSI_VF:
if (ice_vsi_alloc_arrays(vsi, true))
goto err_rings;
break;
default: default:
dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type); dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
goto unlock_pf; goto unlock_pf;
...@@ -685,6 +711,15 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi) ...@@ -685,6 +711,15 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
BIT(cap->rss_table_entry_width)); BIT(cap->rss_table_entry_width));
vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF; vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
break; break;
case ICE_VSI_VF:
/* VF VSI will gets a small RSS table
* For VSI_LUT, LUT size should be set to 64 bytes
*/
vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
vsi->rss_size = min_t(int, num_online_cpus(),
BIT(cap->rss_table_entry_width));
vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI;
break;
default: default:
dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n",
vsi->type); vsi->type);
...@@ -773,17 +808,17 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) ...@@ -773,17 +808,17 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
* Setup number and offset of Rx queues for all TCs for the VSI * Setup number and offset of Rx queues for all TCs for the VSI
*/ */
qcount = numq_tc;
/* qcount will change if RSS is enabled */ /* qcount will change if RSS is enabled */
if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) { if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) {
if (vsi->type == ICE_VSI_PF) if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF) {
max_rss = ICE_MAX_LG_RSS_QS; if (vsi->type == ICE_VSI_PF)
else max_rss = ICE_MAX_LG_RSS_QS;
max_rss = ICE_MAX_SMALL_RSS_QS; else
max_rss = ICE_MAX_SMALL_RSS_QS;
qcount = min_t(int, numq_tc, max_rss); qcount = min_t(int, numq_tc, max_rss);
qcount = min_t(int, qcount, vsi->rss_size); qcount = min_t(int, qcount, vsi->rss_size);
} else { }
qcount = numq_tc;
} }
/* find the (rounded up) power-of-2 of qcount */ /* find the (rounded up) power-of-2 of qcount */
...@@ -813,6 +848,14 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) ...@@ -813,6 +848,14 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
vsi->num_txq = qcount_tx; vsi->num_txq = qcount_tx;
vsi->num_rxq = offset; vsi->num_rxq = offset;
if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
dev_dbg(&vsi->back->pdev->dev, "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
/* since there is a chance that num_rxq could have been changed
* in the above for loop, make num_txq equal to num_rxq.
*/
vsi->num_txq = vsi->num_rxq;
}
/* Rx queue mapping */ /* Rx queue mapping */
ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG); ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
/* q_mapping buffer holds the info for the first queue allocated for /* q_mapping buffer holds the info for the first queue allocated for
...@@ -838,6 +881,11 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) ...@@ -838,6 +881,11 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF; lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
break; break;
case ICE_VSI_VF:
/* VF VSI will gets a small RSS table which is a VSI LUT type */
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
break;
default: default:
dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n", dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
vsi->type); vsi->type);
...@@ -868,6 +916,11 @@ static int ice_vsi_init(struct ice_vsi *vsi) ...@@ -868,6 +916,11 @@ static int ice_vsi_init(struct ice_vsi *vsi)
case ICE_VSI_PF: case ICE_VSI_PF:
ctxt.flags = ICE_AQ_VSI_TYPE_PF; ctxt.flags = ICE_AQ_VSI_TYPE_PF;
break; break;
case ICE_VSI_VF:
ctxt.flags = ICE_AQ_VSI_TYPE_VF;
/* VF number here is the absolute VF number (0-255) */
ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
break;
default: default:
return -ENODEV; return -ENODEV;
} }
...@@ -961,6 +1014,8 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx) ...@@ -961,6 +1014,8 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
q_vector->vsi = vsi; q_vector->vsi = vsi;
q_vector->v_idx = v_idx; q_vector->v_idx = v_idx;
if (vsi->type == ICE_VSI_VF)
goto out;
/* only set affinity_mask if the CPU is online */ /* only set affinity_mask if the CPU is online */
if (cpu_online(v_idx)) if (cpu_online(v_idx))
cpumask_set_cpu(v_idx, &q_vector->affinity_mask); cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
...@@ -973,6 +1028,7 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx) ...@@ -973,6 +1028,7 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll, netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,
NAPI_POLL_WEIGHT); NAPI_POLL_WEIGHT);
out:
/* tie q_vector and VSI together */ /* tie q_vector and VSI together */
vsi->q_vectors[v_idx] = q_vector; vsi->q_vectors[v_idx] = q_vector;
...@@ -1067,6 +1123,13 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi) ...@@ -1067,6 +1123,13 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
vsi->hw_base_vector = ice_get_res(pf, pf->hw_irq_tracker, vsi->hw_base_vector = ice_get_res(pf, pf->hw_irq_tracker,
num_q_vectors, vsi->idx); num_q_vectors, vsi->idx);
break; break;
case ICE_VSI_VF:
/* take VF misc vector and data vectors into account */
num_q_vectors = pf->num_vf_msix;
/* For VF VSI, reserve slots only from HW interrupts */
vsi->hw_base_vector = ice_get_res(pf, pf->hw_irq_tracker,
num_q_vectors, vsi->idx);
break;
default: default:
dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n", dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
vsi->type); vsi->type);
...@@ -1077,9 +1140,11 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi) ...@@ -1077,9 +1140,11 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
dev_err(&pf->pdev->dev, dev_err(&pf->pdev->dev,
"Failed to get tracking for %d HW vectors for VSI %d, err=%d\n", "Failed to get tracking for %d HW vectors for VSI %d, err=%d\n",
num_q_vectors, vsi->vsi_num, vsi->hw_base_vector); num_q_vectors, vsi->vsi_num, vsi->hw_base_vector);
ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, if (vsi->type != ICE_VSI_VF) {
vsi->idx); ice_free_res(vsi->back->sw_irq_tracker,
pf->num_avail_sw_msix += num_q_vectors; vsi->sw_base_vector, vsi->idx);
pf->num_avail_sw_msix += num_q_vectors;
}
return -ENOENT; return -ENOENT;
} }
...@@ -1512,6 +1577,9 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi) ...@@ -1512,6 +1577,9 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
int err = 0; int err = 0;
u16 i; u16 i;
if (vsi->type == ICE_VSI_VF)
goto setup_rings;
if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN) if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN)
vsi->max_frame = vsi->netdev->mtu + vsi->max_frame = vsi->netdev->mtu +
ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
...@@ -1519,6 +1587,7 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi) ...@@ -1519,6 +1587,7 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
vsi->max_frame = ICE_RXBUF_2048; vsi->max_frame = ICE_RXBUF_2048;
vsi->rx_buf_len = ICE_RXBUF_2048; vsi->rx_buf_len = ICE_RXBUF_2048;
setup_rings:
/* set up individual rings */ /* set up individual rings */
for (i = 0; i < vsi->num_rxq && !err; i++) for (i = 0; i < vsi->num_rxq && !err; i++)
err = ice_setup_rx_ctx(vsi->rx_rings[i]); err = ice_setup_rx_ctx(vsi->rx_rings[i]);
...@@ -1667,9 +1736,14 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi) ...@@ -1667,9 +1736,14 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi)
u32 val; u32 val;
itr = ICE_ITR_NONE; itr = ICE_ITR_NONE;
val = QINT_TQCTL_CAUSE_ENA_M | if (vsi->type == ICE_VSI_VF)
(itr << QINT_TQCTL_ITR_INDX_S) | val = QINT_TQCTL_CAUSE_ENA_M |
(vector << QINT_TQCTL_MSIX_INDX_S); (itr << QINT_TQCTL_ITR_INDX_S) |
((i + 1) << QINT_TQCTL_MSIX_INDX_S);
else
val = QINT_TQCTL_CAUSE_ENA_M |
(itr << QINT_TQCTL_ITR_INDX_S) |
(vector << QINT_TQCTL_MSIX_INDX_S);
wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val); wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
txq++; txq++;
} }
...@@ -1678,9 +1752,14 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi) ...@@ -1678,9 +1752,14 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi)
u32 val; u32 val;
itr = ICE_ITR_NONE; itr = ICE_ITR_NONE;
val = QINT_RQCTL_CAUSE_ENA_M | if (vsi->type == ICE_VSI_VF)
(itr << QINT_RQCTL_ITR_INDX_S) | val = QINT_RQCTL_CAUSE_ENA_M |
(vector << QINT_RQCTL_MSIX_INDX_S); (itr << QINT_RQCTL_ITR_INDX_S) |
((i + 1) << QINT_RQCTL_MSIX_INDX_S);
else
val = QINT_RQCTL_CAUSE_ENA_M |
(itr << QINT_RQCTL_ITR_INDX_S) |
(vector << QINT_RQCTL_MSIX_INDX_S);
wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val); wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
rxq++; rxq++;
} }
...@@ -1937,7 +2016,7 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena) ...@@ -1937,7 +2016,7 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
*/ */
struct ice_vsi * struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
enum ice_vsi_type type, u16 __always_unused vf_id) enum ice_vsi_type type, u16 vf_id)
{ {
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct device *dev = &pf->pdev->dev; struct device *dev = &pf->pdev->dev;
...@@ -1952,6 +2031,8 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, ...@@ -1952,6 +2031,8 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
vsi->port_info = pi; vsi->port_info = pi;
vsi->vsw = pf->first_sw; vsi->vsw = pf->first_sw;
if (vsi->type == ICE_VSI_VF)
vsi->vf_id = vf_id;
if (ice_vsi_get_qs(vsi)) { if (ice_vsi_get_qs(vsi)) {
dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n", dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
...@@ -1990,6 +2071,34 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, ...@@ -1990,6 +2071,34 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
ice_vsi_cfg_rss_lut_key(vsi); ice_vsi_cfg_rss_lut_key(vsi);
break; break;
case ICE_VSI_VF:
/* VF driver will take care of creating netdev for this type and
* map queues to vectors through Virtchnl, PF driver only
* creates a VSI and corresponding structures for bookkeeping
* purpose
*/
ret = ice_vsi_alloc_q_vectors(vsi);
if (ret)
goto unroll_vsi_init;
ret = ice_vsi_alloc_rings(vsi);
if (ret)
goto unroll_alloc_q_vector;
/* Setup Vector base only during VF init phase or when VF asks
* for more vectors than assigned number. In all other cases,
* assign hw_base_vector to the value given earlier.
*/
if (test_bit(ICE_VF_STATE_CFG_INTR, pf->vf[vf_id].vf_states)) {
ret = ice_vsi_setup_vector_base(vsi);
if (ret)
goto unroll_vector_base;
} else {
vsi->hw_base_vector = pf->vf[vf_id].first_vector_idx;
}
pf->q_left_tx -= vsi->alloc_txq;
pf->q_left_rx -= vsi->alloc_rxq;
break;
default: default:
/* if VSI type is not recognized, clean up the resources and /* if VSI type is not recognized, clean up the resources and
* exit * exit
...@@ -2080,6 +2189,8 @@ void ice_vsi_free_irq(struct ice_vsi *vsi) ...@@ -2080,6 +2189,8 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
return; return;
ice_vsi_release_msix(vsi); ice_vsi_release_msix(vsi);
if (vsi->type == ICE_VSI_VF)
return;
vsi->irqs_ready = false; vsi->irqs_ready = false;
for (i = 0; i < vsi->num_q_vectors; i++) { for (i = 0; i < vsi->num_q_vectors; i++) {
...@@ -2320,10 +2431,12 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi) ...@@ -2320,10 +2431,12 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi)
int ice_vsi_release(struct ice_vsi *vsi) int ice_vsi_release(struct ice_vsi *vsi)
{ {
struct ice_pf *pf; struct ice_pf *pf;
struct ice_vf *vf;
if (!vsi->back) if (!vsi->back)
return -ENODEV; return -ENODEV;
pf = vsi->back; pf = vsi->back;
vf = &pf->vf[vsi->vf_id];
/* do not unregister and free netdevs while driver is in the reset /* do not unregister and free netdevs while driver is in the reset
* recovery pending state. Since reset/rebuild happens through PF * recovery pending state. Since reset/rebuild happens through PF
* service task workqueue, its not a good idea to unregister netdev * service task workqueue, its not a good idea to unregister netdev
...@@ -2345,10 +2458,23 @@ int ice_vsi_release(struct ice_vsi *vsi) ...@@ -2345,10 +2458,23 @@ int ice_vsi_release(struct ice_vsi *vsi)
ice_vsi_close(vsi); ice_vsi_close(vsi);
/* reclaim interrupt vectors back to PF */ /* reclaim interrupt vectors back to PF */
ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, vsi->idx); if (vsi->type != ICE_VSI_VF) {
pf->num_avail_sw_msix += vsi->num_q_vectors; /* reclaim SW interrupts back to the common pool */
ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, vsi->idx); ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector,
pf->num_avail_hw_msix += vsi->num_q_vectors; vsi->idx);
pf->num_avail_sw_msix += vsi->num_q_vectors;
/* reclaim HW interrupts back to the common pool */
ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector,
vsi->idx);
pf->num_avail_hw_msix += vsi->num_q_vectors;
} else if (test_bit(ICE_VF_STATE_CFG_INTR, vf->vf_states)) {
/* Reclaim VF resources back only while freeing all VFs or
* vector reassignment is requested
*/
ice_free_res(vsi->back->hw_irq_tracker, vf->first_vector_idx,
vsi->idx);
pf->num_avail_hw_msix += pf->num_vf_msix;
}
ice_remove_vsi_fltr(&pf->hw, vsi->idx); ice_remove_vsi_fltr(&pf->hw, vsi->idx);
ice_vsi_delete(vsi); ice_vsi_delete(vsi);
...@@ -2417,6 +2543,22 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) ...@@ -2417,6 +2543,22 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
ice_vsi_map_rings_to_vectors(vsi); ice_vsi_map_rings_to_vectors(vsi);
break; break;
case ICE_VSI_VF:
ret = ice_vsi_alloc_q_vectors(vsi);
if (ret)
goto err_rings;
ret = ice_vsi_setup_vector_base(vsi);
if (ret)
goto err_vectors;
ret = ice_vsi_alloc_rings(vsi);
if (ret)
goto err_vectors;
vsi->back->q_left_tx -= vsi->alloc_txq;
vsi->back->q_left_rx -= vsi->alloc_rxq;
break;
default: default:
break; break;
} }
......
...@@ -19,6 +19,7 @@ struct ice_vsi_ctx { ...@@ -19,6 +19,7 @@ struct ice_vsi_ctx {
struct ice_aqc_vsi_props info; struct ice_aqc_vsi_props info;
struct ice_sched_vsi_info sched; struct ice_sched_vsi_info sched;
u8 alloc_from_pool; u8 alloc_from_pool;
u8 vf_num;
}; };
enum ice_sw_fwd_act_type { enum ice_sw_fwd_act_type {
......
...@@ -443,4 +443,7 @@ struct ice_hw_port_stats { ...@@ -443,4 +443,7 @@ struct ice_hw_port_stats {
#define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800 #define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800
#define ICE_SR_WORDS_IN_1KB 512 #define ICE_SR_WORDS_IN_1KB 512
/* Hash redirection LUT for VSI - maximum array size */
#define ICE_VSIQF_HLUT_ARRAY_SIZE ((VSIQF_HLUT_MAX_INDEX + 1) * 4)
#endif /* _ICE_TYPE_H_ */ #endif /* _ICE_TYPE_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment