Commit be2af714 authored by Przemyslaw Patynowski's avatar Przemyslaw Patynowski Committed by Tony Nguyen

ice: Fix queue config fail handling

Disable VF's RX/TX queues, when VIRTCHNL_OP_CONFIG_VSI_QUEUES fail.
Not disabling them might lead to scenario, where PF driver leaves VF
queues enabled, when VF's VSI failed queue config.
In this scenario VF should not have RX/TX queues enabled. If PF failed
to set up VF's queues, VF will reset due to TX timeouts in VF driver.
Initialize iterator 'i' to -1, so if error happens prior to configuring
queues then error path code will not disable queue 0. Loop that
configures queues will is using same iterator, so error path code will
only disable queues that were configured.

Fixes: 77ca27c4 ("ice: add support for virtchnl_queue_select.[tx|rx]_queues bitmap")
Suggested-by: default avatarSlawomir Laba <slawomirx.laba@intel.com>
Signed-off-by: default avatarPrzemyslaw Patynowski <przemyslawx.patynowski@intel.com>
Signed-off-by: default avatarMateusz Palczewski <mateusz.palczewski@intel.com>
Tested-by: default avatarKonrad Jankowski <konrad0.jankowski@intel.com>
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
parent 9542ef4f
...@@ -1569,35 +1569,27 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) ...@@ -1569,35 +1569,27 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
*/ */
static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
{ {
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_vsi_queue_config_info *qci = struct virtchnl_vsi_queue_config_info *qci =
(struct virtchnl_vsi_queue_config_info *)msg; (struct virtchnl_vsi_queue_config_info *)msg;
struct virtchnl_queue_pair_info *qpi; struct virtchnl_queue_pair_info *qpi;
struct ice_pf *pf = vf->pf; struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi; struct ice_vsi *vsi;
int i, q_idx; int i = -1, q_idx;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param; goto error_param;
}
if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) { if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id))
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param; goto error_param;
}
vsi = ice_get_vf_vsi(vf); vsi = ice_get_vf_vsi(vf);
if (!vsi) { if (!vsi)
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param; goto error_param;
}
if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF || if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) { qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n", dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)); vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param; goto error_param;
} }
...@@ -1610,7 +1602,6 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) ...@@ -1610,7 +1602,6 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
!ice_vc_isvalid_ring_len(qpi->txq.ring_len) || !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
!ice_vc_isvalid_ring_len(qpi->rxq.ring_len) || !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
!ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) { !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param; goto error_param;
} }
...@@ -1620,7 +1611,6 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) ...@@ -1620,7 +1611,6 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
* for selected "vsi" * for selected "vsi"
*/ */
if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) { if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param; goto error_param;
} }
...@@ -1630,14 +1620,13 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) ...@@ -1630,14 +1620,13 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
vsi->tx_rings[i]->count = qpi->txq.ring_len; vsi->tx_rings[i]->count = qpi->txq.ring_len;
/* Disable any existing queue first */ /* Disable any existing queue first */
if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx)) { if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx))
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param; goto error_param;
}
/* Configure a queue with the requested settings */ /* Configure a queue with the requested settings */
if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) { if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure TX queue %d\n",
vf->vf_id, i);
goto error_param; goto error_param;
} }
} }
...@@ -1651,17 +1640,13 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) ...@@ -1651,17 +1640,13 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
if (qpi->rxq.databuffer_size != 0 && if (qpi->rxq.databuffer_size != 0 &&
(qpi->rxq.databuffer_size > ((16 * 1024) - 128) || (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
qpi->rxq.databuffer_size < 1024)) { qpi->rxq.databuffer_size < 1024))
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param; goto error_param;
}
vsi->rx_buf_len = qpi->rxq.databuffer_size; vsi->rx_buf_len = qpi->rxq.databuffer_size;
vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len; vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
if (qpi->rxq.max_pkt_size > max_frame_size || if (qpi->rxq.max_pkt_size > max_frame_size ||
qpi->rxq.max_pkt_size < 64) { qpi->rxq.max_pkt_size < 64)
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param; goto error_param;
}
vsi->max_frame = qpi->rxq.max_pkt_size; vsi->max_frame = qpi->rxq.max_pkt_size;
/* add space for the port VLAN since the VF driver is /* add space for the port VLAN since the VF driver is
...@@ -1672,16 +1657,30 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) ...@@ -1672,16 +1657,30 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
vsi->max_frame += VLAN_HLEN; vsi->max_frame += VLAN_HLEN;
if (ice_vsi_cfg_single_rxq(vsi, q_idx)) { if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure RX queue %d\n",
vf->vf_id, i);
goto error_param; goto error_param;
} }
} }
} }
/* send the response to the VF */
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
VIRTCHNL_STATUS_SUCCESS, NULL, 0);
error_param: error_param:
/* disable whatever we can */
for (; i >= 0; i--) {
if (ice_vsi_ctrl_one_rx_ring(vsi, false, i, true))
dev_err(ice_pf_to_dev(pf), "VF-%d could not disable RX queue %d\n",
vf->vf_id, i);
if (ice_vf_vsi_dis_single_txq(vf, vsi, i))
dev_err(ice_pf_to_dev(pf), "VF-%d could not disable TX queue %d\n",
vf->vf_id, i);
}
/* send the response to the VF */ /* send the response to the VF */
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret, return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
NULL, 0); VIRTCHNL_STATUS_ERR_PARAM, NULL, 0);
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment