Commit 005881bc authored by Brett Creeley's avatar Brett Creeley Committed by Jeff Kirsher

ice: Add ice_for_each_vf() macro

Currently we do "for (i = 0; i < pf->num_alloc_vfs; i++)" all over the
place. Many other places use macros to contain this repeated for loop,
So create the macro ice_for_each_vf(pf, i) that does the same thing.

There were a couple places we were using one loop variable and a VF
iterator, which were changed to using a local variable within the
ice_for_each_vf() macro.

Also in ice_alloc_vfs() we were setting pf->num_alloc_vfs after doing
"for (i = 0; i < num_alloc_vfs; i++)". Instead assign pf->num_alloc_vfs
right after allocating memory for the pf->vf array.
Signed-off-by: default avatarBrett Creeley <brett.creeley@intel.com>
Tested-by: default avatarAndrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent fc0f39bc
...@@ -283,12 +283,15 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, ...@@ -283,12 +283,15 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
*/ */
static bool ice_active_vfs(struct ice_pf *pf) static bool ice_active_vfs(struct ice_pf *pf)
{ {
struct ice_vf *vf = pf->vf;
int i; int i;
for (i = 0; i < pf->num_alloc_vfs; i++, vf++) ice_for_each_vf(pf, i) {
struct ice_vf *vf = &pf->vf[i];
if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
return true; return true;
}
return false; return false;
} }
......
...@@ -476,7 +476,7 @@ ice_prepare_for_reset(struct ice_pf *pf) ...@@ -476,7 +476,7 @@ ice_prepare_for_reset(struct ice_pf *pf)
ice_vc_notify_reset(pf); ice_vc_notify_reset(pf);
/* Disable VFs until reset is completed */ /* Disable VFs until reset is completed */
for (i = 0; i < pf->num_alloc_vfs; i++) ice_for_each_vf(pf, i)
ice_set_vf_state_qs_dis(&pf->vf[i]); ice_set_vf_state_qs_dis(&pf->vf[i]);
/* clear SW filtering DB */ /* clear SW filtering DB */
...@@ -1295,7 +1295,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf) ...@@ -1295,7 +1295,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
} }
/* check to see if one of the VFs caused the MDD */ /* check to see if one of the VFs caused the MDD */
for (i = 0; i < pf->num_alloc_vfs; i++) { ice_for_each_vf(pf, i) {
struct ice_vf *vf = &pf->vf[i]; struct ice_vf *vf = &pf->vf[i];
bool vf_mdd_detected = false; bool vf_mdd_detected = false;
......
...@@ -78,10 +78,11 @@ ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode, ...@@ -78,10 +78,11 @@ ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
enum virtchnl_status_code v_retval, u8 *msg, u16 msglen) enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
{ {
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
struct ice_vf *vf = pf->vf;
int i; int i;
for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { ice_for_each_vf(pf, i) {
struct ice_vf *vf = &pf->vf[i];
/* Not all vfs are enabled so skip the ones that are not */ /* Not all vfs are enabled so skip the ones that are not */
if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) && if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
...@@ -331,7 +332,7 @@ void ice_free_vfs(struct ice_pf *pf) ...@@ -331,7 +332,7 @@ void ice_free_vfs(struct ice_pf *pf)
usleep_range(1000, 2000); usleep_range(1000, 2000);
/* Avoid wait time by stopping all VFs at the same time */ /* Avoid wait time by stopping all VFs at the same time */
for (i = 0; i < pf->num_alloc_vfs; i++) ice_for_each_vf(pf, i)
if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states)) if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states))
ice_dis_vf_qs(&pf->vf[i]); ice_dis_vf_qs(&pf->vf[i]);
...@@ -1077,7 +1078,7 @@ static bool ice_config_res_vfs(struct ice_pf *pf) ...@@ -1077,7 +1078,7 @@ static bool ice_config_res_vfs(struct ice_pf *pf)
ice_irq_dynamic_ena(hw, NULL, NULL); ice_irq_dynamic_ena(hw, NULL, NULL);
/* Finish resetting each VF and allocate resources */ /* Finish resetting each VF and allocate resources */
for (v = 0; v < pf->num_alloc_vfs; v++) { ice_for_each_vf(pf, v) {
struct ice_vf *vf = &pf->vf[v]; struct ice_vf *vf = &pf->vf[v];
vf->num_vf_qs = pf->num_vf_qps; vf->num_vf_qs = pf->num_vf_qps;
...@@ -1120,10 +1121,10 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) ...@@ -1120,10 +1121,10 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
return false; return false;
/* Begin reset on all VFs at once */ /* Begin reset on all VFs at once */
for (v = 0; v < pf->num_alloc_vfs; v++) ice_for_each_vf(pf, v)
ice_trigger_vf_reset(&pf->vf[v], is_vflr, true); ice_trigger_vf_reset(&pf->vf[v], is_vflr, true);
for (v = 0; v < pf->num_alloc_vfs; v++) { ice_for_each_vf(pf, v) {
struct ice_vsi *vsi; struct ice_vsi *vsi;
vf = &pf->vf[v]; vf = &pf->vf[v];
...@@ -1168,7 +1169,7 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) ...@@ -1168,7 +1169,7 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
dev_warn(dev, "VF reset check timeout\n"); dev_warn(dev, "VF reset check timeout\n");
/* free VF resources to begin resetting the VSI state */ /* free VF resources to begin resetting the VSI state */
for (v = 0; v < pf->num_alloc_vfs; v++) { ice_for_each_vf(pf, v) {
vf = &pf->vf[v]; vf = &pf->vf[v];
ice_free_vf_res(vf); ice_free_vf_res(vf);
...@@ -1308,7 +1309,7 @@ void ice_vc_notify_link_state(struct ice_pf *pf) ...@@ -1308,7 +1309,7 @@ void ice_vc_notify_link_state(struct ice_pf *pf)
{ {
int i; int i;
for (i = 0; i < pf->num_alloc_vfs; i++) ice_for_each_vf(pf, i)
ice_vc_notify_vf_link_state(&pf->vf[i]); ice_vc_notify_vf_link_state(&pf->vf[i]);
} }
...@@ -1392,9 +1393,10 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs) ...@@ -1392,9 +1393,10 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
goto err_pci_disable_sriov; goto err_pci_disable_sriov;
} }
pf->vf = vfs; pf->vf = vfs;
pf->num_alloc_vfs = num_alloc_vfs;
/* apply default profile */ /* apply default profile */
for (i = 0; i < num_alloc_vfs; i++) { ice_for_each_vf(pf, i) {
vfs[i].pf = pf; vfs[i].pf = pf;
vfs[i].vf_sw_id = pf->first_sw; vfs[i].vf_sw_id = pf->first_sw;
vfs[i].vf_id = i; vfs[i].vf_id = i;
...@@ -1403,7 +1405,6 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs) ...@@ -1403,7 +1405,6 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
vfs[i].spoofchk = true; vfs[i].spoofchk = true;
} }
pf->num_alloc_vfs = num_alloc_vfs;
/* VF resources get allocated with initialization */ /* VF resources get allocated with initialization */
if (!ice_config_res_vfs(pf)) { if (!ice_config_res_vfs(pf)) {
...@@ -1542,7 +1543,7 @@ void ice_process_vflr_event(struct ice_pf *pf) ...@@ -1542,7 +1543,7 @@ void ice_process_vflr_event(struct ice_pf *pf)
!pf->num_alloc_vfs) !pf->num_alloc_vfs)
return; return;
for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { ice_for_each_vf(pf, vf_id) {
struct ice_vf *vf = &pf->vf[vf_id]; struct ice_vf *vf = &pf->vf[vf_id];
u32 reg_idx, bit_idx; u32 reg_idx, bit_idx;
......
...@@ -40,6 +40,9 @@ ...@@ -40,6 +40,9 @@
#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1) #define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1)
#define ICE_MAX_VF_RESET_WAIT 15 #define ICE_MAX_VF_RESET_WAIT 15
#define ice_for_each_vf(pf, i) \
for ((i) = 0; (i) < (pf)->num_alloc_vfs; (i)++)
/* Specific VF states */ /* Specific VF states */
enum ice_vf_states { enum ice_vf_states {
ICE_VF_STATE_INIT = 0, /* PF is initializing VF */ ICE_VF_STATE_INIT = 0, /* PF is initializing VF */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment