Commit fff292b4 authored by Michal Swiatkowski's avatar Michal Swiatkowski Committed by Tony Nguyen

ice: add VF representors one by one

Implement adding representors one by one. Always set switchdev
environment when first representor is being added and clear environment
when last one is being removed.

Basic switchdev configuration remains the same. Code related to creating
and configuring representor was changed.

Instead of setting whole representors in one function handle only one
representor in setup function. The same with removing representors.

Stop representors when new one is being added or removed. Stop means,
disabling napi, stopping traffic and removing slow path rule. It is
needed because ::q_id will change after remapping, so each representor
will need new rule.

When representor are stopped rebuild control plane VSI with one more or
one less queue. One more if new representor is being added, one less if
representor is being removed.

Bridge port is removed during unregister_netdev() call on PR, so there
is no need to call it from driver side.

After that do remap new queues to correct vector. At the end start all
representors (napi enable, start queues, add slow path rule).
Reviewed-by: default avatarPiotr Raczynski <piotr.raczynski@intel.com>
Reviewed-by: default avatarWojciech Drewek <wojciech.drewek@intel.com>
Signed-off-by: default avatarMichal Swiatkowski <michal.swiatkowski@linux.intel.com>
Tested-by: default avatarSujai Buvaneswaran <sujai.buvaneswaran@intel.com>
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
parent 5995ef88
......@@ -7,8 +7,9 @@
#include <net/devlink.h>
#ifdef CONFIG_ICE_SWITCHDEV
void ice_eswitch_release(struct ice_pf *pf);
int ice_eswitch_configure(struct ice_pf *pf);
void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf);
int
ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf);
int ice_eswitch_rebuild(struct ice_pf *pf);
int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode);
......@@ -26,7 +27,13 @@ void ice_eswitch_set_target_vsi(struct sk_buff *skb,
netdev_tx_t
ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev);
#else /* CONFIG_ICE_SWITCHDEV */
static inline void ice_eswitch_release(struct ice_pf *pf) { }
static inline void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) { }
static inline int
ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
{
return -EOPNOTSUPP;
}
static inline void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) { }
......
......@@ -287,44 +287,26 @@ static void ice_repr_remove_node(struct devlink_port *devlink_port)
/**
* ice_repr_rem - remove representor from VF
* @reprs: xarray storing representors
* @repr: pointer to representor structure
*/
static void ice_repr_rem(struct xarray *reprs, struct ice_repr *repr)
static void ice_repr_rem(struct ice_repr *repr)
{
xa_erase(reprs, repr->id);
kfree(repr->q_vector);
free_netdev(repr->netdev);
kfree(repr);
}
static void ice_repr_rem_vf(struct ice_vf *vf)
{
struct ice_repr *repr = xa_load(&vf->pf->eswitch.reprs, vf->repr_id);
if (!repr)
return;
ice_repr_remove_node(&repr->vf->devlink_port);
unregister_netdev(repr->netdev);
ice_repr_rem(&vf->pf->eswitch.reprs, repr);
ice_devlink_destroy_vf_port(vf);
ice_virtchnl_set_dflt_ops(vf);
}
/**
* ice_repr_rem_from_all_vfs - remove port representor for all VFs
* @pf: pointer to PF structure
* ice_repr_rem_vf - remove representor from VF
* @repr: pointer to representor structure
*/
void ice_repr_rem_from_all_vfs(struct ice_pf *pf)
void ice_repr_rem_vf(struct ice_repr *repr)
{
struct ice_vf *vf;
unsigned int bkt;
lockdep_assert_held(&pf->vfs.table_lock);
ice_for_each_vf(pf, bkt, vf)
ice_repr_rem_vf(vf);
ice_repr_remove_node(&repr->vf->devlink_port);
unregister_netdev(repr->netdev);
ice_devlink_destroy_vf_port(repr->vf);
ice_virtchnl_set_dflt_ops(repr->vf);
ice_repr_rem(repr);
}
static void ice_repr_set_tx_topology(struct ice_pf *pf)
......@@ -374,19 +356,12 @@ ice_repr_add(struct ice_pf *pf, struct ice_vsi *src_vsi, const u8 *parent_mac)
goto err_alloc_q_vector;
}
repr->q_vector = q_vector;
err = xa_alloc(&pf->eswitch.reprs, &repr->id, repr,
XA_LIMIT(1, INT_MAX), GFP_KERNEL);
if (err)
goto err_xa_alloc;
repr->q_id = repr->id;
ether_addr_copy(repr->parent_mac, parent_mac);
return repr;
err_xa_alloc:
kfree(repr->q_vector);
err_alloc_q_vector:
free_netdev(repr->netdev);
err_alloc:
......@@ -394,7 +369,7 @@ ice_repr_add(struct ice_pf *pf, struct ice_vsi *src_vsi, const u8 *parent_mac)
return ERR_PTR(err);
}
static struct ice_repr *ice_repr_add_vf(struct ice_vf *vf)
struct ice_repr *ice_repr_add_vf(struct ice_vf *vf)
{
struct ice_repr *repr;
struct ice_vsi *vsi;
......@@ -414,7 +389,6 @@ static struct ice_repr *ice_repr_add_vf(struct ice_vf *vf)
goto err_repr_add;
}
vf->repr_id = repr->id;
repr->vf = vf;
repr->netdev->min_mtu = ETH_MIN_MTU;
......@@ -432,49 +406,12 @@ static struct ice_repr *ice_repr_add_vf(struct ice_vf *vf)
return repr;
err_netdev:
ice_repr_rem(&vf->pf->eswitch.reprs, repr);
ice_repr_rem(repr);
err_repr_add:
ice_devlink_destroy_vf_port(vf);
return ERR_PTR(err);
}
/**
* ice_repr_add_for_all_vfs - add port representor for all VFs
* @pf: pointer to PF structure
*/
int ice_repr_add_for_all_vfs(struct ice_pf *pf)
{
struct devlink *devlink;
struct ice_repr *repr;
struct ice_vf *vf;
unsigned int bkt;
int err;
lockdep_assert_held(&pf->vfs.table_lock);
ice_for_each_vf(pf, bkt, vf) {
repr = ice_repr_add_vf(vf);
if (IS_ERR(repr)) {
err = PTR_ERR(repr);
goto err;
}
}
/* only export if ADQ and DCB disabled */
if (ice_is_adq_active(pf) || ice_is_dcb_active(pf))
return 0;
devlink = priv_to_devlink(pf);
ice_devlink_rate_init_tx_topology(devlink, ice_get_main_vsi(pf));
return 0;
err:
ice_repr_rem_from_all_vfs(pf);
return err;
}
struct ice_repr *ice_repr_get_by_vsi(struct ice_vsi *vsi)
{
if (!vsi->vf)
......
......@@ -22,8 +22,8 @@ struct ice_repr {
#endif
};
int ice_repr_add_for_all_vfs(struct ice_pf *pf);
void ice_repr_rem_from_all_vfs(struct ice_pf *pf);
struct ice_repr *ice_repr_add_vf(struct ice_vf *vf);
void ice_repr_rem_vf(struct ice_repr *repr);
void ice_repr_start_tx_queues(struct ice_repr *repr);
void ice_repr_stop_tx_queues(struct ice_repr *repr);
......
......@@ -174,11 +174,10 @@ void ice_free_vfs(struct ice_pf *pf)
mutex_lock(&vfs->table_lock);
ice_eswitch_release(pf);
ice_for_each_vf(pf, bkt, vf) {
mutex_lock(&vf->cfg_lock);
ice_eswitch_detach(pf, vf);
ice_dis_vf_qs(vf);
if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
......@@ -614,6 +613,14 @@ static int ice_start_vfs(struct ice_pf *pf)
goto teardown;
}
retval = ice_eswitch_attach(pf, vf);
if (retval) {
dev_err(ice_pf_to_dev(pf), "Failed to attach VF %d to eswitch, error %d",
vf->vf_id, retval);
ice_vf_vsi_release(vf);
goto teardown;
}
set_bit(ICE_VF_STATE_INIT, vf->vf_states);
ice_ena_vf_mappings(vf);
wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
......@@ -932,12 +939,6 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
clear_bit(ICE_VF_DIS, pf->state);
ret = ice_eswitch_configure(pf);
if (ret) {
dev_err(dev, "Failed to configure eswitch, err %d\n", ret);
goto err_unroll_sriov;
}
/* rearm global interrupts */
if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state))
ice_irq_dynamic_ena(hw, NULL, NULL);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment