Commit 000773c0 authored by Jacob Keller's avatar Jacob Keller Committed by Tony Nguyen

ice: factor VF variables to separate structure

We maintain a number of values for VFs within the ice_pf structure. This
includes the VF table, the number of allocated VFs, the maximum number
of supported SR-IOV VFs, the number of queue pairs per VF, the number of
MSI-X vectors per VF, and a bitmap of the VFs with detected MDD events.

We're about to add a few more variables to this list. Clean this up
first by extracting these members out into a new ice_vfs structure
defined in ice_virtchnl_pf.h
Signed-off-by: default avatarJacob Keller <jacob.e.keller@intel.com>
Tested-by: default avatarKonrad Jankowski <konrad0.jankowski@intel.com>
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
parent c4c2c7db
...@@ -528,15 +528,7 @@ struct ice_pf { ...@@ -528,15 +528,7 @@ struct ice_pf {
struct ice_vsi **vsi; /* VSIs created by the driver */ struct ice_vsi **vsi; /* VSIs created by the driver */
struct ice_sw *first_sw; /* first switch created by firmware */ struct ice_sw *first_sw; /* first switch created by firmware */
u16 eswitch_mode; /* current mode of eswitch */ u16 eswitch_mode; /* current mode of eswitch */
/* Virtchnl/SR-IOV config info */ struct ice_vfs vfs;
struct ice_vf *vf;
u16 num_alloc_vfs; /* actual number of VFs allocated */
u16 num_vfs_supported; /* num VFs supported for this PF */
u16 num_qps_per_vf;
u16 num_msix_per_vf;
/* used to ratelimit the MDD event logging */
unsigned long last_printed_mdd_jiffies;
DECLARE_BITMAP(malvfs, ICE_MAX_VF_COUNT);
DECLARE_BITMAP(features, ICE_F_MAX); DECLARE_BITMAP(features, ICE_F_MAX);
DECLARE_BITMAP(state, ICE_STATE_NBITS); DECLARE_BITMAP(state, ICE_STATE_NBITS);
DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS); DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS);
......
...@@ -176,10 +176,20 @@ static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf) ...@@ -176,10 +176,20 @@ static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf)
int q_id; int q_id;
ice_for_each_txq(vsi, q_id) { ice_for_each_txq(vsi, q_id) {
struct ice_repr *repr = pf->vf[q_id].repr; struct ice_q_vector *q_vector;
struct ice_q_vector *q_vector = repr->q_vector; struct ice_tx_ring *tx_ring;
struct ice_tx_ring *tx_ring = vsi->tx_rings[q_id]; struct ice_rx_ring *rx_ring;
struct ice_rx_ring *rx_ring = vsi->rx_rings[q_id]; struct ice_repr *repr;
struct ice_vf *vf;
if (WARN_ON(q_id >= pf->vfs.num_alloc))
continue;
vf = &pf->vfs.table[q_id];
repr = vf->repr;
q_vector = repr->q_vector;
tx_ring = vsi->tx_rings[q_id];
rx_ring = vsi->rx_rings[q_id];
q_vector->vsi = vsi; q_vector->vsi = vsi;
q_vector->reg_idx = vsi->q_vectors[0]->reg_idx; q_vector->reg_idx = vsi->q_vectors[0]->reg_idx;
...@@ -525,7 +535,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode, ...@@ -525,7 +535,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
if (pf->eswitch_mode == mode) if (pf->eswitch_mode == mode)
return 0; return 0;
if (pf->num_alloc_vfs) { if (pf->vfs.num_alloc) {
dev_info(ice_pf_to_dev(pf), "Changing eswitch mode is allowed only if there is no VFs created"); dev_info(ice_pf_to_dev(pf), "Changing eswitch mode is allowed only if there is no VFs created");
NL_SET_ERR_MSG_MOD(extack, "Changing eswitch mode is allowed only if there is no VFs created"); NL_SET_ERR_MSG_MOD(extack, "Changing eswitch mode is allowed only if there is no VFs created");
return -EOPNOTSUPP; return -EOPNOTSUPP;
......
...@@ -1297,7 +1297,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) ...@@ -1297,7 +1297,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
} }
if (test_bit(ICE_FLAG_VF_VLAN_PRUNING, change_flags) && if (test_bit(ICE_FLAG_VF_VLAN_PRUNING, change_flags) &&
pf->num_alloc_vfs) { pf->vfs.num_alloc) {
dev_err(dev, "vf-vlan-pruning: VLAN pruning cannot be changed while VFs are active.\n"); dev_err(dev, "vf-vlan-pruning: VLAN pruning cannot be changed while VFs are active.\n");
/* toggle bit back to previous state */ /* toggle bit back to previous state */
change_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags); change_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags);
......
...@@ -215,8 +215,8 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, struct ice_vf *vf) ...@@ -215,8 +215,8 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, struct ice_vf *vf)
/* The number of queues for ctrl VSI is equal to number of VFs. /* The number of queues for ctrl VSI is equal to number of VFs.
* Each ring is associated to the corresponding VF_PR netdev. * Each ring is associated to the corresponding VF_PR netdev.
*/ */
vsi->alloc_txq = pf->num_alloc_vfs; vsi->alloc_txq = pf->vfs.num_alloc;
vsi->alloc_rxq = pf->num_alloc_vfs; vsi->alloc_rxq = pf->vfs.num_alloc;
vsi->num_q_vectors = 1; vsi->num_q_vectors = 1;
break; break;
case ICE_VSI_VF: case ICE_VSI_VF:
...@@ -224,12 +224,12 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, struct ice_vf *vf) ...@@ -224,12 +224,12 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, struct ice_vf *vf)
vf->num_vf_qs = vf->num_req_qs; vf->num_vf_qs = vf->num_req_qs;
vsi->alloc_txq = vf->num_vf_qs; vsi->alloc_txq = vf->num_vf_qs;
vsi->alloc_rxq = vf->num_vf_qs; vsi->alloc_rxq = vf->num_vf_qs;
/* pf->num_msix_per_vf includes (VF miscellaneous vector + /* pf->vfs.num_msix_per includes (VF miscellaneous vector +
* data queue interrupts). Since vsi->num_q_vectors is number * data queue interrupts). Since vsi->num_q_vectors is number
* of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the * of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the
* original vector count * original vector count
*/ */
vsi->num_q_vectors = pf->num_msix_per_vf - ICE_NONQ_VECS_VF; vsi->num_q_vectors = pf->vfs.num_msix_per - ICE_NONQ_VECS_VF;
break; break;
case ICE_VSI_CTRL: case ICE_VSI_CTRL:
vsi->alloc_txq = 1; vsi->alloc_txq = 1;
......
...@@ -3712,7 +3712,7 @@ static void ice_set_pf_caps(struct ice_pf *pf) ...@@ -3712,7 +3712,7 @@ static void ice_set_pf_caps(struct ice_pf *pf)
clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
if (func_caps->common_cap.sr_iov_1_1) { if (func_caps->common_cap.sr_iov_1_1) {
set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs, pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs,
ICE_MAX_VF_COUNT); ICE_MAX_VF_COUNT);
} }
clear_bit(ICE_FLAG_RSS_ENA, pf->flags); clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
......
...@@ -50,8 +50,8 @@ ...@@ -50,8 +50,8 @@
* Use vf->vf_id to get the id number if needed. * Use vf->vf_id to get the id number if needed.
*/ */
#define ice_for_each_vf(pf, bkt, entry) \ #define ice_for_each_vf(pf, bkt, entry) \
for ((bkt) = 0, (entry) = &(pf)->vf[0]; \ for ((bkt) = 0, (entry) = &(pf)->vfs.table[0]; \
(bkt) < (pf)->num_alloc_vfs; \ (bkt) < (pf)->vfs.num_alloc; \
(bkt)++, (entry)++) (bkt)++, (entry)++)
/* Specific VF states */ /* Specific VF states */
...@@ -116,6 +116,17 @@ struct ice_vc_vf_ops { ...@@ -116,6 +116,17 @@ struct ice_vc_vf_ops {
int (*dis_vlan_insertion_v2_msg)(struct ice_vf *vf, u8 *msg); int (*dis_vlan_insertion_v2_msg)(struct ice_vf *vf, u8 *msg);
}; };
/* Virtchnl/SR-IOV config info */
struct ice_vfs {
struct ice_vf *table; /* table of VF entries */
u16 num_alloc; /* number of allocated VFs */
u16 num_supported; /* max supported VFs on this PF */
u16 num_qps_per; /* number of queue pairs per VF */
u16 num_msix_per; /* number of MSI-X vectors per VF */
unsigned long last_printed_mdd_jiffies; /* MDD message rate limit */
DECLARE_BITMAP(malvfs, ICE_MAX_VF_COUNT); /* malicious VF indicator */
};
/* VF information structure */ /* VF information structure */
struct ice_vf { struct ice_vf {
struct ice_pf *pf; struct ice_pf *pf;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment