Commit 5743020d authored by Akeem G Abodunrin's avatar Akeem G Abodunrin Committed by Jeff Kirsher

ice: Fix issue reconfiguring VF queues

When VF requested for queues changes, we need to update LAN Tx queue with
correct number of VF queue pairs and re-allocate VF resources based on
this new requested number of queues, which is constraint within maximum
queue supported per VF.
Signed-off-by: default avatarAkeem G Abodunrin <akeem.g.abodunrin@intel.com>
Signed-off-by: default avatarAnirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Tested-by: default avatarAndrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 23d21c3d
...@@ -297,13 +297,19 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi) ...@@ -297,13 +297,19 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
/** /**
* ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
* @vsi: the VSI being configured * @vsi: the VSI being configured
* @vf_id: Id of the VF being configured
* *
* Return 0 on success and a negative value on error * Return 0 on success and a negative value on error
*/ */
static void ice_vsi_set_num_qs(struct ice_vsi *vsi) static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
{ {
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
struct ice_vf *vf = NULL;
if (vsi->type == ICE_VSI_VF)
vsi->vf_id = vf_id;
switch (vsi->type) { switch (vsi->type) {
case ICE_VSI_PF: case ICE_VSI_PF:
vsi->alloc_txq = pf->num_lan_tx; vsi->alloc_txq = pf->num_lan_tx;
...@@ -311,8 +317,9 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi) ...@@ -311,8 +317,9 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx); vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx);
break; break;
case ICE_VSI_VF: case ICE_VSI_VF:
vsi->alloc_txq = pf->num_vf_qps; vf = &pf->vf[vsi->vf_id];
vsi->alloc_rxq = pf->num_vf_qps; vsi->alloc_txq = vf->num_vf_qs;
vsi->alloc_rxq = vf->num_vf_qs;
/* pf->num_vf_msix includes (VF miscellaneous vector + /* pf->num_vf_msix includes (VF miscellaneous vector +
* data queue interrupts). Since vsi->num_q_vectors is number * data queue interrupts). Since vsi->num_q_vectors is number
* of queues vectors, subtract 1 from the original vector * of queues vectors, subtract 1 from the original vector
...@@ -472,10 +479,12 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data) ...@@ -472,10 +479,12 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
* ice_vsi_alloc - Allocates the next available struct VSI in the PF * ice_vsi_alloc - Allocates the next available struct VSI in the PF
* @pf: board private structure * @pf: board private structure
* @type: type of VSI * @type: type of VSI
* @vf_id: Id of the VF being configured
* *
* returns a pointer to a VSI on success, NULL on failure. * returns a pointer to a VSI on success, NULL on failure.
*/ */
static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type) static struct ice_vsi *
ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id)
{ {
struct ice_vsi *vsi = NULL; struct ice_vsi *vsi = NULL;
...@@ -501,7 +510,10 @@ static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type) ...@@ -501,7 +510,10 @@ static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type)
vsi->idx = pf->next_vsi; vsi->idx = pf->next_vsi;
vsi->work_lmt = ICE_DFLT_IRQ_WORK; vsi->work_lmt = ICE_DFLT_IRQ_WORK;
ice_vsi_set_num_qs(vsi); if (type == ICE_VSI_VF)
ice_vsi_set_num_qs(vsi, vf_id);
else
ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
switch (vsi->type) { switch (vsi->type) {
case ICE_VSI_PF: case ICE_VSI_PF:
...@@ -2171,7 +2183,11 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, ...@@ -2171,7 +2183,11 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
struct ice_vsi *vsi; struct ice_vsi *vsi;
int ret, i; int ret, i;
vsi = ice_vsi_alloc(pf, type); if (type == ICE_VSI_VF)
vsi = ice_vsi_alloc(pf, type, vf_id);
else
vsi = ice_vsi_alloc(pf, type, ICE_INVAL_VFID);
if (!vsi) { if (!vsi) {
dev_err(dev, "could not allocate VSI\n"); dev_err(dev, "could not allocate VSI\n");
return NULL; return NULL;
...@@ -2691,7 +2707,10 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) ...@@ -2691,7 +2707,10 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
ice_vsi_clear_rings(vsi); ice_vsi_clear_rings(vsi);
ice_vsi_free_arrays(vsi, false); ice_vsi_free_arrays(vsi, false);
ice_dev_onetime_setup(&vsi->back->hw); ice_dev_onetime_setup(&vsi->back->hw);
ice_vsi_set_num_qs(vsi); if (vsi->type == ICE_VSI_VF)
ice_vsi_set_num_qs(vsi, vf->vf_id);
else
ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
ice_vsi_set_tc_cfg(vsi); ice_vsi_set_tc_cfg(vsi);
/* Initialize VSI struct elements and create VSI in FW */ /* Initialize VSI struct elements and create VSI in FW */
......
...@@ -495,6 +495,8 @@ static int ice_alloc_vsi_res(struct ice_vf *vf) ...@@ -495,6 +495,8 @@ static int ice_alloc_vsi_res(struct ice_vf *vf)
*/ */
static int ice_alloc_vf_res(struct ice_vf *vf) static int ice_alloc_vf_res(struct ice_vf *vf)
{ {
struct ice_pf *pf = vf->pf;
int tx_rx_queue_left;
int status; int status;
/* setup VF VSI and necessary resources */ /* setup VF VSI and necessary resources */
...@@ -502,6 +504,15 @@ static int ice_alloc_vf_res(struct ice_vf *vf) ...@@ -502,6 +504,15 @@ static int ice_alloc_vf_res(struct ice_vf *vf)
if (status) if (status)
goto ice_alloc_vf_res_exit; goto ice_alloc_vf_res_exit;
/* Update number of VF queues, in case VF had requested for queue
* changes
*/
tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx);
tx_rx_queue_left += ICE_DFLT_QS_PER_VF;
if (vf->num_req_qs && vf->num_req_qs <= tx_rx_queue_left &&
vf->num_req_qs != vf->num_vf_qs)
vf->num_vf_qs = vf->num_req_qs;
if (vf->trusted) if (vf->trusted)
set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
else else
...@@ -835,8 +846,18 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) ...@@ -835,8 +846,18 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
usleep_range(10000, 20000); usleep_range(10000, 20000);
/* free VF resources to begin resetting the VSI state */ /* free VF resources to begin resetting the VSI state */
for (v = 0; v < pf->num_alloc_vfs; v++) for (v = 0; v < pf->num_alloc_vfs; v++) {
ice_free_vf_res(&pf->vf[v]); vf = &pf->vf[v];
ice_free_vf_res(vf);
/* Free VF queues as well, and reallocate later.
* If a given VF has different number of queues
* configured, the request for update will come
* via mailbox communication.
*/
vf->num_vf_qs = 0;
}
if (ice_check_avail_res(pf)) { if (ice_check_avail_res(pf)) {
dev_err(&pf->pdev->dev, dev_err(&pf->pdev->dev,
...@@ -845,8 +866,15 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) ...@@ -845,8 +866,15 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
} }
/* Finish the reset on each VF */ /* Finish the reset on each VF */
for (v = 0; v < pf->num_alloc_vfs; v++) for (v = 0; v < pf->num_alloc_vfs; v++) {
ice_cleanup_and_realloc_vf(&pf->vf[v]); vf = &pf->vf[v];
vf->num_vf_qs = pf->num_vf_qps;
dev_dbg(&pf->pdev->dev,
"VF-id %d has %d queues configured\n",
vf->vf_id, vf->num_vf_qs);
ice_cleanup_and_realloc_vf(vf);
}
ice_flush(hw); ice_flush(hw);
clear_bit(__ICE_VF_DIS, pf->state); clear_bit(__ICE_VF_DIS, pf->state);
...@@ -1766,6 +1794,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) ...@@ -1766,6 +1794,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
struct virtchnl_vsi_queue_config_info *qci = struct virtchnl_vsi_queue_config_info *qci =
(struct virtchnl_vsi_queue_config_info *)msg; (struct virtchnl_vsi_queue_config_info *)msg;
struct virtchnl_queue_pair_info *qpi; struct virtchnl_queue_pair_info *qpi;
struct ice_pf *pf = vf->pf;
enum ice_status aq_ret = 0; enum ice_status aq_ret = 0;
struct ice_vsi *vsi; struct ice_vsi *vsi;
int i; int i;
...@@ -1786,6 +1815,14 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) ...@@ -1786,6 +1815,14 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
goto error_param; goto error_param;
} }
if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF) {
dev_err(&pf->pdev->dev,
"VF-%d requesting more than supported number of queues: %d\n",
vf->vf_id, qci->num_queue_pairs);
aq_ret = ICE_ERR_PARAM;
goto error_param;
}
for (i = 0; i < qci->num_queue_pairs; i++) { for (i = 0; i < qci->num_queue_pairs; i++) {
qpi = &qci->qpair[i]; qpi = &qci->qpair[i];
if (qpi->txq.vsi_id != qci->vsi_id || if (qpi->txq.vsi_id != qci->vsi_id ||
...@@ -2013,6 +2050,7 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg) ...@@ -2013,6 +2050,7 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
int req_queues = vfres->num_queue_pairs; int req_queues = vfres->num_queue_pairs;
enum ice_status aq_ret = 0; enum ice_status aq_ret = 0;
struct ice_pf *pf = vf->pf; struct ice_pf *pf = vf->pf;
int max_allowed_vf_queues;
int tx_rx_queue_left; int tx_rx_queue_left;
int cur_queues; int cur_queues;
...@@ -2021,22 +2059,24 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg) ...@@ -2021,22 +2059,24 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
goto error_param; goto error_param;
} }
cur_queues = pf->num_vf_qps; cur_queues = vf->num_vf_qs;
tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx); tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx);
max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
if (req_queues <= 0) { if (req_queues <= 0) {
dev_err(&pf->pdev->dev, dev_err(&pf->pdev->dev,
"VF %d tried to request %d queues. Ignoring.\n", "VF %d tried to request %d queues. Ignoring.\n",
vf->vf_id, req_queues); vf->vf_id, req_queues);
} else if (req_queues > ICE_MAX_QS_PER_VF) { } else if (req_queues > ICE_MAX_BASE_QS_PER_VF) {
dev_err(&pf->pdev->dev, dev_err(&pf->pdev->dev,
"VF %d tried to request more than %d queues.\n", "VF %d tried to request more than %d queues.\n",
vf->vf_id, ICE_MAX_QS_PER_VF); vf->vf_id, ICE_MAX_BASE_QS_PER_VF);
vfres->num_queue_pairs = ICE_MAX_QS_PER_VF; vfres->num_queue_pairs = ICE_MAX_BASE_QS_PER_VF;
} else if (req_queues - cur_queues > tx_rx_queue_left) { } else if (req_queues - cur_queues > tx_rx_queue_left) {
dev_warn(&pf->pdev->dev, dev_warn(&pf->pdev->dev,
"VF %d requested %d more queues, but only %d left.\n", "VF %d requested %d more queues, but only %d left.\n",
vf->vf_id, req_queues - cur_queues, tx_rx_queue_left); vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
vfres->num_queue_pairs = tx_rx_queue_left + cur_queues; vfres->num_queue_pairs = min_t(int, max_allowed_vf_queues,
ICE_MAX_BASE_QS_PER_VF);
} else { } else {
/* request is successful, then reset VF */ /* request is successful, then reset VF */
vf->num_req_qs = req_queues; vf->num_req_qs = req_queues;
......
...@@ -70,6 +70,7 @@ struct ice_vf { ...@@ -70,6 +70,7 @@ struct ice_vf {
u8 spoofchk; u8 spoofchk;
u16 num_mac; u16 num_mac;
u16 num_vlan; u16 num_vlan;
u16 num_vf_qs; /* num of queue configured per VF */
u8 num_req_qs; /* num of queue pairs requested by VF */ u8 num_req_qs; /* num of queue pairs requested by VF */
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment