Commit 912cebf4 authored by Leon Romanovsky's avatar Leon Romanovsky

net/mlx5e: Connect ethernet part to auxiliary bus

Reuse auxiliary bus to perform device management of the
ethernet part of the mlx5 driver.
Signed-off-by: default avatarLeon Romanovsky <leonro@nvidia.com>
parent 74c9729d
......@@ -31,6 +31,7 @@
*/
#include <linux/mlx5/driver.h>
#include <linux/mlx5/eswitch.h>
#include <linux/mlx5/mlx5_ifc_vdpa.h>
#include "mlx5_core.h"
......@@ -52,6 +53,75 @@ enum {
MLX5_INTERFACE_ATTACHED,
};
static bool is_eth_rep_supported(struct mlx5_core_dev *dev)
{
if (!IS_ENABLED(CONFIG_MLX5_ESWITCH))
return false;
if (!MLX5_ESWITCH_MANAGER(dev))
return false;
if (mlx5_eswitch_mode(dev->priv.eswitch) != MLX5_ESWITCH_OFFLOADS)
return false;
return true;
}
static bool is_eth_supported(struct mlx5_core_dev *dev)
{
if (!IS_ENABLED(CONFIG_MLX5_CORE_EN))
return false;
if (is_eth_rep_supported(dev))
return false;
if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
return false;
if (!MLX5_CAP_GEN(dev, eth_net_offloads)) {
mlx5_core_warn(dev, "Missing eth_net_offloads capability\n");
return false;
}
if (!MLX5_CAP_GEN(dev, nic_flow_table)) {
mlx5_core_warn(dev, "Missing nic_flow_table capability\n");
return false;
}
if (!MLX5_CAP_ETH(dev, csum_cap)) {
mlx5_core_warn(dev, "Missing csum_cap capability\n");
return false;
}
if (!MLX5_CAP_ETH(dev, max_lso_cap)) {
mlx5_core_warn(dev, "Missing max_lso_cap capability\n");
return false;
}
if (!MLX5_CAP_ETH(dev, vlan_cap)) {
mlx5_core_warn(dev, "Missing vlan_cap capability\n");
return false;
}
if (!MLX5_CAP_ETH(dev, rss_ind_tbl_cap)) {
mlx5_core_warn(dev, "Missing rss_ind_tbl_cap capability\n");
return false;
}
if (MLX5_CAP_FLOWTABLE(dev,
flow_table_properties_nic_receive.max_ft_level) < 3) {
mlx5_core_warn(dev, "max_ft_level < 3\n");
return false;
}
if (!MLX5_CAP_ETH(dev, self_lb_en_modifiable))
mlx5_core_warn(dev, "Self loop back prevention is not supported\n");
if (!MLX5_CAP_GEN(dev, cq_moderation))
mlx5_core_warn(dev, "CQ moderation is not supported\n");
return true;
}
static bool is_vnet_supported(struct mlx5_core_dev *dev)
{
if (!IS_ENABLED(CONFIG_MLX5_VDPA_NET))
......@@ -80,6 +150,10 @@ static const struct mlx5_adev_device {
} mlx5_adev_devices[] = {
[MLX5_INTERFACE_PROTOCOL_VDPA] = { .suffix = "vnet",
.is_supported = &is_vnet_supported },
[MLX5_INTERFACE_PROTOCOL_ETH] = { .suffix = "eth",
.is_supported = &is_eth_supported },
[MLX5_INTERFACE_PROTOCOL_ETH_REP] = { .suffix = "eth-rep",
.is_supported = &is_eth_rep_supported },
};
int mlx5_adev_idx_alloc(void)
......
......@@ -4597,31 +4597,6 @@ const struct net_device_ops mlx5e_netdev_ops = {
.ndo_get_devlink_port = mlx5e_get_devlink_port,
};
static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
{
if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
return -EOPNOTSUPP;
if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
!MLX5_CAP_GEN(mdev, nic_flow_table) ||
!MLX5_CAP_ETH(mdev, csum_cap) ||
!MLX5_CAP_ETH(mdev, max_lso_cap) ||
!MLX5_CAP_ETH(mdev, vlan_cap) ||
!MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) ||
MLX5_CAP_FLOWTABLE(mdev,
flow_table_properties_nic_receive.max_ft_level)
< 3) {
mlx5_core_warn(mdev,
"Not creating net device, some required device capabilities are missing\n");
return -EOPNOTSUPP;
}
if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
if (!MLX5_CAP_GEN(mdev, cq_moderation))
mlx5_core_warn(mdev, "CQ moderation is not supported\n");
return 0;
}
void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
int num_channels)
{
......@@ -5441,13 +5416,12 @@ void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
free_netdev(netdev);
}
/* mlx5e_attach and mlx5e_detach scope should be only creating/destroying
* hardware contexts and to connect it to the current netdev.
*/
static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv)
static int mlx5e_resume(struct auxiliary_device *adev)
{
struct mlx5e_priv *priv = vpriv;
struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
struct mlx5e_priv *priv = dev_get_drvdata(&adev->dev);
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = edev->mdev;
int err;
if (netif_device_present(netdev))
......@@ -5466,109 +5440,111 @@ static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv)
return 0;
}
static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv)
static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state)
{
struct mlx5e_priv *priv = vpriv;
struct mlx5e_priv *priv = dev_get_drvdata(&adev->dev);
struct net_device *netdev = priv->netdev;
#ifdef CONFIG_MLX5_ESWITCH
if (MLX5_ESWITCH_MANAGER(mdev) && vpriv == mdev)
return;
#endif
struct mlx5_core_dev *mdev = priv->mdev;
if (!netif_device_present(netdev))
return;
return -ENODEV;
mlx5e_detach_netdev(priv);
mlx5e_destroy_mdev_resources(mdev);
return 0;
}
static void *mlx5e_add(struct mlx5_core_dev *mdev)
static int mlx5e_probe(struct auxiliary_device *adev,
const struct auxiliary_device_id *id)
{
struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
struct mlx5_core_dev *mdev = edev->mdev;
struct net_device *netdev;
pm_message_t state = {};
void *priv;
int err;
int nch;
err = mlx5e_check_required_hca_cap(mdev);
if (err)
return NULL;
#ifdef CONFIG_MLX5_ESWITCH
if (MLX5_ESWITCH_MANAGER(mdev) &&
mlx5_eswitch_mode(mdev->priv.eswitch) == MLX5_ESWITCH_OFFLOADS) {
mlx5e_rep_register_vport_reps(mdev);
return mdev;
}
#endif
nch = mlx5e_get_max_num_channels(mdev);
netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, nch, NULL);
if (!netdev) {
mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
return NULL;
return -ENOMEM;
}
dev_net_set(netdev, mlx5_core_net(mdev));
priv = netdev_priv(netdev);
dev_set_drvdata(&adev->dev, priv);
err = mlx5e_attach(mdev, priv);
err = mlx5e_resume(adev);
if (err) {
mlx5_core_err(mdev, "mlx5e_attach failed, %d\n", err);
mlx5_core_err(mdev, "mlx5e_resume failed, %d\n", err);
goto err_destroy_netdev;
}
err = register_netdev(netdev);
if (err) {
mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
goto err_detach;
goto err_resume;
}
mlx5e_devlink_port_type_eth_set(priv);
mlx5e_dcbnl_init_app(priv);
return priv;
return 0;
err_detach:
mlx5e_detach(mdev, priv);
err_resume:
mlx5e_suspend(adev, state);
err_destroy_netdev:
mlx5e_destroy_netdev(priv);
return NULL;
return err;
}
static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
static void mlx5e_remove(struct auxiliary_device *adev)
{
struct mlx5e_priv *priv;
struct mlx5e_priv *priv = dev_get_drvdata(&adev->dev);
pm_message_t state = {};
#ifdef CONFIG_MLX5_ESWITCH
if (MLX5_ESWITCH_MANAGER(mdev) && vpriv == mdev) {
mlx5e_rep_unregister_vport_reps(mdev);
return;
}
#endif
priv = vpriv;
mlx5e_dcbnl_delete_app(priv);
unregister_netdev(priv->netdev);
mlx5e_detach(mdev, vpriv);
mlx5e_suspend(adev, state);
mlx5e_destroy_netdev(priv);
}
static struct mlx5_interface mlx5e_interface = {
.add = mlx5e_add,
.remove = mlx5e_remove,
.attach = mlx5e_attach,
.detach = mlx5e_detach,
.protocol = MLX5_INTERFACE_PROTOCOL_ETH,
static const struct auxiliary_device_id mlx5e_id_table[] = {
{ .name = MLX5_ADEV_NAME ".eth", },
{},
};
void mlx5e_init(void)
MODULE_DEVICE_TABLE(auxiliary, mlx5e_id_table);
static struct auxiliary_driver mlx5e_driver = {
.name = "eth",
.probe = mlx5e_probe,
.remove = mlx5e_remove,
.suspend = mlx5e_suspend,
.resume = mlx5e_resume,
.id_table = mlx5e_id_table,
};
int mlx5e_init(void)
{
int ret;
mlx5e_ipsec_build_inverse_table();
mlx5e_build_ptys2ethtool_map();
mlx5_register_interface(&mlx5e_interface);
ret = mlx5e_rep_init();
if (ret)
return ret;
ret = auxiliary_driver_register(&mlx5e_driver);
if (ret)
mlx5e_rep_cleanup();
return ret;
}
void mlx5e_cleanup(void)
{
mlx5_unregister_interface(&mlx5e_interface);
auxiliary_driver_unregister(&mlx5e_driver);
mlx5e_rep_cleanup();
}
......@@ -1315,16 +1315,48 @@ static const struct mlx5_eswitch_rep_ops rep_ops = {
.get_proto_dev = mlx5e_vport_rep_get_proto_dev
};
void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev)
static int mlx5e_rep_probe(struct auxiliary_device *adev,
const struct auxiliary_device_id *id)
{
struct mlx5_eswitch *esw = mdev->priv.eswitch;
struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
struct mlx5_core_dev *mdev = edev->mdev;
struct mlx5_eswitch *esw;
esw = mdev->priv.eswitch;
mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_ETH);
return 0;
}
void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev)
static void mlx5e_rep_remove(struct auxiliary_device *adev)
{
struct mlx5_eswitch *esw = mdev->priv.eswitch;
struct mlx5_adev *vdev = container_of(adev, struct mlx5_adev, adev);
struct mlx5_core_dev *mdev = vdev->mdev;
struct mlx5_eswitch *esw;
esw = mdev->priv.eswitch;
mlx5_eswitch_unregister_vport_reps(esw, REP_ETH);
}
static const struct auxiliary_device_id mlx5e_rep_id_table[] = {
{ .name = MLX5_ADEV_NAME ".eth-rep", },
{},
};
MODULE_DEVICE_TABLE(auxiliary, mlx5e_rep_id_table);
static struct auxiliary_driver mlx5e_rep_driver = {
.name = "eth-rep",
.probe = mlx5e_rep_probe,
.remove = mlx5e_rep_remove,
.id_table = mlx5e_rep_id_table,
};
int mlx5e_rep_init(void)
{
return auxiliary_driver_register(&mlx5e_rep_driver);
}
void mlx5e_rep_cleanup(void)
{
auxiliary_driver_unregister(&mlx5e_rep_driver);
}
......@@ -203,8 +203,8 @@ struct mlx5e_rep_sq {
struct list_head list;
};
void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev);
void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev);
int mlx5e_rep_init(void);
void mlx5e_rep_cleanup(void);
int mlx5e_rep_bond_init(struct mlx5e_rep_priv *rpriv);
void mlx5e_rep_bond_cleanup(struct mlx5e_rep_priv *rpriv);
int mlx5e_rep_bond_enslave(struct mlx5_eswitch *esw, struct net_device *netdev,
......@@ -232,6 +232,8 @@ static inline bool mlx5e_eswitch_rep(struct net_device *netdev)
static inline bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) { return false; }
static inline int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv) {}
static inline int mlx5e_rep_init(void) { return 0; };
static inline void mlx5e_rep_cleanup(void) {};
#endif
static inline bool mlx5e_is_vport_rep(struct mlx5e_priv *priv)
......
......@@ -1614,7 +1614,7 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs)
if (mode == MLX5_ESWITCH_LEGACY) {
err = esw_legacy_enable(esw);
} else {
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
mlx5_rescan_drivers(esw->dev);
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
err = esw_offloads_enable(esw);
}
......@@ -1635,7 +1635,7 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs)
if (mode == MLX5_ESWITCH_OFFLOADS) {
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
mlx5_rescan_drivers(esw->dev);
}
esw_destroy_tsar(esw);
return err;
......@@ -1699,7 +1699,7 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
if (old_mode == MLX5_ESWITCH_OFFLOADS) {
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
mlx5_rescan_drivers(esw->dev);
}
esw_destroy_tsar(esw);
......
......@@ -596,6 +596,8 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
if (err)
mlx5_core_err(dev, "Failed to init multipath lag err=%d\n",
err);
return;
}
/* Must be called with intf_mutex held */
......
......@@ -1676,7 +1676,11 @@ static int __init init(void)
goto err_debug;
#ifdef CONFIG_MLX5_CORE_EN
mlx5e_init();
err = mlx5e_init();
if (err) {
pci_unregister_driver(&mlx5_core_driver);
goto err_debug;
}
#endif
return 0;
......
......@@ -217,7 +217,7 @@ int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw,
int mlx5_fw_version_query(struct mlx5_core_dev *dev,
u32 *running_ver, u32 *stored_ver);
void mlx5e_init(void);
int mlx5e_init(void);
void mlx5e_cleanup(void);
static inline bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment