Commit 03803083 authored by Paolo Abeni's avatar Paolo Abeni

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue

Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2023-07-14 (ice)

This series contains updates to ice driver only.

Petr Oros removes multiple calls made to unregister netdev and
devlink_port.

Michal fixes null pointer dereference that can occur during reload.
====================

Link: https://lore.kernel.org/r/20230714201041.1717834-1-anthony.l.nguyen@intel.comSigned-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parents 162d626f b3e7b3a6
...@@ -800,6 +800,8 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi) ...@@ -800,6 +800,8 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
ice_for_each_q_vector(vsi, v_idx) ice_for_each_q_vector(vsi, v_idx)
ice_free_q_vector(vsi, v_idx); ice_free_q_vector(vsi, v_idx);
vsi->num_q_vectors = 0;
} }
/** /**
......
...@@ -2681,8 +2681,13 @@ ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, ...@@ -2681,8 +2681,13 @@ ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
ring->rx_max_pending = ICE_MAX_NUM_DESC; ring->rx_max_pending = ICE_MAX_NUM_DESC;
ring->tx_max_pending = ICE_MAX_NUM_DESC; ring->tx_max_pending = ICE_MAX_NUM_DESC;
if (vsi->tx_rings && vsi->rx_rings) {
ring->rx_pending = vsi->rx_rings[0]->count; ring->rx_pending = vsi->rx_rings[0]->count;
ring->tx_pending = vsi->tx_rings[0]->count; ring->tx_pending = vsi->tx_rings[0]->count;
} else {
ring->rx_pending = 0;
ring->tx_pending = 0;
}
/* Rx mini and jumbo rings are not supported */ /* Rx mini and jumbo rings are not supported */
ring->rx_mini_max_pending = 0; ring->rx_mini_max_pending = 0;
...@@ -2716,6 +2721,10 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, ...@@ -2716,6 +2721,10 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
return -EINVAL; return -EINVAL;
} }
/* Return if there is no rings (device is reloading) */
if (!vsi->tx_rings || !vsi->rx_rings)
return -EBUSY;
new_tx_cnt = ALIGN(ring->tx_pending, ICE_REQ_DESC_MULTIPLE); new_tx_cnt = ALIGN(ring->tx_pending, ICE_REQ_DESC_MULTIPLE);
if (new_tx_cnt != ring->tx_pending) if (new_tx_cnt != ring->tx_pending)
netdev_info(netdev, "Requested Tx descriptor count rounded up to %d\n", netdev_info(netdev, "Requested Tx descriptor count rounded up to %d\n",
......
...@@ -2972,39 +2972,12 @@ int ice_vsi_release(struct ice_vsi *vsi) ...@@ -2972,39 +2972,12 @@ int ice_vsi_release(struct ice_vsi *vsi)
return -ENODEV; return -ENODEV;
pf = vsi->back; pf = vsi->back;
/* do not unregister while driver is in the reset recovery pending
* state. Since reset/rebuild happens through PF service task workqueue,
* it's not a good idea to unregister netdev that is associated to the
* PF that is running the work queue items currently. This is done to
* avoid check_flush_dependency() warning on this wq
*/
if (vsi->netdev && !ice_is_reset_in_progress(pf->state) &&
(test_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state))) {
unregister_netdev(vsi->netdev);
clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
}
if (vsi->type == ICE_VSI_PF)
ice_devlink_destroy_pf_port(pf);
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
ice_rss_clean(vsi); ice_rss_clean(vsi);
ice_vsi_close(vsi); ice_vsi_close(vsi);
ice_vsi_decfg(vsi); ice_vsi_decfg(vsi);
if (vsi->netdev) {
if (test_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state)) {
unregister_netdev(vsi->netdev);
clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
}
if (test_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state)) {
free_netdev(vsi->netdev);
vsi->netdev = NULL;
clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
}
}
/* retain SW VSI data structure since it is needed to unregister and /* retain SW VSI data structure since it is needed to unregister and
* free VSI netdev when PF is not in reset recovery pending state,\ * free VSI netdev when PF is not in reset recovery pending state,\
* for ex: during rmmod. * for ex: during rmmod.
......
...@@ -4430,9 +4430,9 @@ static int ice_start_eth(struct ice_vsi *vsi) ...@@ -4430,9 +4430,9 @@ static int ice_start_eth(struct ice_vsi *vsi)
if (err) if (err)
return err; return err;
rtnl_lock();
err = ice_vsi_open(vsi); err = ice_vsi_open(vsi);
rtnl_unlock(); if (err)
ice_fltr_remove_all(vsi);
return err; return err;
} }
...@@ -4895,6 +4895,7 @@ int ice_load(struct ice_pf *pf) ...@@ -4895,6 +4895,7 @@ int ice_load(struct ice_pf *pf)
params = ice_vsi_to_params(vsi); params = ice_vsi_to_params(vsi);
params.flags = ICE_VSI_FLAG_INIT; params.flags = ICE_VSI_FLAG_INIT;
rtnl_lock();
err = ice_vsi_cfg(vsi, &params); err = ice_vsi_cfg(vsi, &params);
if (err) if (err)
goto err_vsi_cfg; goto err_vsi_cfg;
...@@ -4902,6 +4903,7 @@ int ice_load(struct ice_pf *pf) ...@@ -4902,6 +4903,7 @@ int ice_load(struct ice_pf *pf)
err = ice_start_eth(ice_get_main_vsi(pf)); err = ice_start_eth(ice_get_main_vsi(pf));
if (err) if (err)
goto err_start_eth; goto err_start_eth;
rtnl_unlock();
err = ice_init_rdma(pf); err = ice_init_rdma(pf);
if (err) if (err)
...@@ -4916,9 +4918,11 @@ int ice_load(struct ice_pf *pf) ...@@ -4916,9 +4918,11 @@ int ice_load(struct ice_pf *pf)
err_init_rdma: err_init_rdma:
ice_vsi_close(ice_get_main_vsi(pf)); ice_vsi_close(ice_get_main_vsi(pf));
rtnl_lock();
err_start_eth: err_start_eth:
ice_vsi_decfg(ice_get_main_vsi(pf)); ice_vsi_decfg(ice_get_main_vsi(pf));
err_vsi_cfg: err_vsi_cfg:
rtnl_unlock();
ice_deinit_dev(pf); ice_deinit_dev(pf);
return err; return err;
} }
...@@ -4931,8 +4935,10 @@ void ice_unload(struct ice_pf *pf) ...@@ -4931,8 +4935,10 @@ void ice_unload(struct ice_pf *pf)
{ {
ice_deinit_features(pf); ice_deinit_features(pf);
ice_deinit_rdma(pf); ice_deinit_rdma(pf);
rtnl_lock();
ice_stop_eth(ice_get_main_vsi(pf)); ice_stop_eth(ice_get_main_vsi(pf));
ice_vsi_decfg(ice_get_main_vsi(pf)); ice_vsi_decfg(ice_get_main_vsi(pf));
rtnl_unlock();
ice_deinit_dev(pf); ice_deinit_dev(pf);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment