Commit fe998a3c authored by Shay Drory's avatar Shay Drory Committed by Saeed Mahameed

net/mlx5: Enable management PF initialization

Enable initialization of DPU Management PF, which is a new loopback PF
designed for communication with BMC.
For now Management PF doesn't support nor require most upper layer
protocols so avoid them.
Signed-off-by: default avatarShay Drory <shayd@nvidia.com>
Reviewed-by: default avatarEran Ben Elisha <eranbe@nvidia.com>
Reviewed-by: default avatarMoshe Shemesh <moshe@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 0e414518
...@@ -59,6 +59,9 @@ bool mlx5_eth_supported(struct mlx5_core_dev *dev) ...@@ -59,6 +59,9 @@ bool mlx5_eth_supported(struct mlx5_core_dev *dev)
if (!IS_ENABLED(CONFIG_MLX5_CORE_EN)) if (!IS_ENABLED(CONFIG_MLX5_CORE_EN))
return false; return false;
if (mlx5_core_is_management_pf(dev))
return false;
if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
return false; return false;
...@@ -198,6 +201,9 @@ bool mlx5_rdma_supported(struct mlx5_core_dev *dev) ...@@ -198,6 +201,9 @@ bool mlx5_rdma_supported(struct mlx5_core_dev *dev)
if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND)) if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
return false; return false;
if (mlx5_core_is_management_pf(dev))
return false;
if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV) if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV)
return false; return false;
......
...@@ -75,6 +75,10 @@ int mlx5_ec_init(struct mlx5_core_dev *dev) ...@@ -75,6 +75,10 @@ int mlx5_ec_init(struct mlx5_core_dev *dev)
if (!mlx5_core_is_ecpf(dev)) if (!mlx5_core_is_ecpf(dev))
return 0; return 0;
/* Management PF don't have a peer PF */
if (mlx5_core_is_management_pf(dev))
return 0;
return mlx5_host_pf_init(dev); return mlx5_host_pf_init(dev);
} }
...@@ -85,6 +89,10 @@ void mlx5_ec_cleanup(struct mlx5_core_dev *dev) ...@@ -85,6 +89,10 @@ void mlx5_ec_cleanup(struct mlx5_core_dev *dev)
if (!mlx5_core_is_ecpf(dev)) if (!mlx5_core_is_ecpf(dev))
return; return;
/* Management PF don't have a peer PF */
if (mlx5_core_is_management_pf(dev))
return;
mlx5_host_pf_cleanup(dev); mlx5_host_pf_cleanup(dev);
err = mlx5_wait_for_pages(dev, &dev->priv.host_pf_pages); err = mlx5_wait_for_pages(dev, &dev->priv.host_pf_pages);
......
...@@ -1488,7 +1488,7 @@ int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 * ...@@ -1488,7 +1488,7 @@ int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *
void *hca_caps; void *hca_caps;
int err; int err;
if (!mlx5_core_is_ecpf(dev)) { if (!mlx5_core_is_ecpf(dev) || mlx5_core_is_management_pf(dev)) {
*max_sfs = 0; *max_sfs = 0;
return 0; return 0;
} }
......
...@@ -1202,6 +1202,11 @@ static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev) ...@@ -1202,6 +1202,11 @@ static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev)
return dev->coredev_type == MLX5_COREDEV_VF; return dev->coredev_type == MLX5_COREDEV_VF;
} }
static inline bool mlx5_core_is_management_pf(const struct mlx5_core_dev *dev)
{
return MLX5_CAP_GEN(dev, num_ports) == 1 && !MLX5_CAP_GEN(dev, native_port_num);
}
static inline bool mlx5_core_is_ecpf(const struct mlx5_core_dev *dev) static inline bool mlx5_core_is_ecpf(const struct mlx5_core_dev *dev)
{ {
return dev->caps.embedded_cpu; return dev->caps.embedded_cpu;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment