Commit 09591595 authored by Petr Machata's avatar Petr Machata Committed by Jakub Kicinski

mlxsw: core, pci: Add plumbing related to CFF mode

CFF mode, for Compressed FID Flooding, is a way of organizing flood vectors
in the PGT table. The bus module determines whether CFF is supported, can
configure flood mode to CFF if it is, and knows what flood mode has been
configured. Therefore add a bus callback to determine the configured flood
mode. Also add to core an API to query it.

Since after this patch, we rely on mlxsw_pci->flood_mode being set, it
becomes a coding error if a driver invokes this function with a set of
fields that misses the initialization. Warn and bail out in that case.

The CFF mode is not used as of this patch. The code to actually use it will
be added later.
Signed-off-by: default avatarPetr Machata <petrm@nvidia.com>
Reviewed-by: default avatarAmit Cohen <amcohen@nvidia.com>
Reviewed-by: default avatarIdo Schimmel <idosch@nvidia.com>
Link: https://lore.kernel.org/r/889d58759dd40f5037f2206b9fc4a78a9240da80.1700503644.git.petrm@nvidia.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 6b10371c
...@@ -211,6 +211,13 @@ mlxsw_core_lag_mode(struct mlxsw_core *mlxsw_core) ...@@ -211,6 +211,13 @@ mlxsw_core_lag_mode(struct mlxsw_core *mlxsw_core)
} }
EXPORT_SYMBOL(mlxsw_core_lag_mode); EXPORT_SYMBOL(mlxsw_core_lag_mode);
enum mlxsw_cmd_mbox_config_profile_flood_mode
mlxsw_core_flood_mode(struct mlxsw_core *mlxsw_core)
{
return mlxsw_core->bus->flood_mode(mlxsw_core->bus_priv);
}
EXPORT_SYMBOL(mlxsw_core_flood_mode);
void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core) void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core)
{ {
return mlxsw_core->driver_priv; return mlxsw_core->driver_priv;
......
...@@ -38,6 +38,8 @@ unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core); ...@@ -38,6 +38,8 @@ unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core);
int mlxsw_core_max_lag(struct mlxsw_core *mlxsw_core, u16 *p_max_lag); int mlxsw_core_max_lag(struct mlxsw_core *mlxsw_core, u16 *p_max_lag);
enum mlxsw_cmd_mbox_config_profile_lag_mode enum mlxsw_cmd_mbox_config_profile_lag_mode
mlxsw_core_lag_mode(struct mlxsw_core *mlxsw_core); mlxsw_core_lag_mode(struct mlxsw_core *mlxsw_core);
enum mlxsw_cmd_mbox_config_profile_flood_mode
mlxsw_core_flood_mode(struct mlxsw_core *mlxsw_core);
void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core); void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core);
...@@ -489,6 +491,7 @@ struct mlxsw_bus { ...@@ -489,6 +491,7 @@ struct mlxsw_bus {
u32 (*read_utc_sec)(void *bus_priv); u32 (*read_utc_sec)(void *bus_priv);
u32 (*read_utc_nsec)(void *bus_priv); u32 (*read_utc_nsec)(void *bus_priv);
enum mlxsw_cmd_mbox_config_profile_lag_mode (*lag_mode)(void *bus_priv); enum mlxsw_cmd_mbox_config_profile_lag_mode (*lag_mode)(void *bus_priv);
enum mlxsw_cmd_mbox_config_profile_flood_mode (*flood_mode)(void *priv);
u8 features; u8 features;
}; };
......
...@@ -106,7 +106,9 @@ struct mlxsw_pci { ...@@ -106,7 +106,9 @@ struct mlxsw_pci {
u64 utc_sec_offset; u64 utc_sec_offset;
u64 utc_nsec_offset; u64 utc_nsec_offset;
bool lag_mode_support; bool lag_mode_support;
bool cff_support;
enum mlxsw_cmd_mbox_config_profile_lag_mode lag_mode; enum mlxsw_cmd_mbox_config_profile_lag_mode lag_mode;
enum mlxsw_cmd_mbox_config_profile_flood_mode flood_mode;
struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT]; struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
u32 doorbell_offset; u32 doorbell_offset;
struct mlxsw_core *core; struct mlxsw_core *core;
...@@ -1251,6 +1253,10 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox, ...@@ -1251,6 +1253,10 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mbox, 1); mbox, 1);
mlxsw_cmd_mbox_config_profile_flood_mode_set( mlxsw_cmd_mbox_config_profile_flood_mode_set(
mbox, profile->flood_mode); mbox, profile->flood_mode);
mlxsw_pci->flood_mode = profile->flood_mode;
} else {
WARN_ON(1);
return -EINVAL;
} }
if (profile->used_max_ib_mc) { if (profile->used_max_ib_mc) {
mlxsw_cmd_mbox_config_profile_set_max_ib_mc_set( mlxsw_cmd_mbox_config_profile_set_max_ib_mc_set(
...@@ -1654,6 +1660,9 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, ...@@ -1654,6 +1660,9 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
mlxsw_pci->lag_mode_support = mlxsw_pci->lag_mode_support =
mlxsw_cmd_mbox_query_fw_lag_mode_support_get(mbox); mlxsw_cmd_mbox_query_fw_lag_mode_support_get(mbox);
mlxsw_pci->cff_support =
mlxsw_cmd_mbox_query_fw_cff_support_get(mbox);
num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox); num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox);
err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages); err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages);
if (err) if (err)
...@@ -1970,6 +1979,14 @@ mlxsw_pci_lag_mode(void *bus_priv) ...@@ -1970,6 +1979,14 @@ mlxsw_pci_lag_mode(void *bus_priv)
return mlxsw_pci->lag_mode; return mlxsw_pci->lag_mode;
} }
static enum mlxsw_cmd_mbox_config_profile_flood_mode
mlxsw_pci_flood_mode(void *bus_priv)
{
struct mlxsw_pci *mlxsw_pci = bus_priv;
return mlxsw_pci->flood_mode;
}
static const struct mlxsw_bus mlxsw_pci_bus = { static const struct mlxsw_bus mlxsw_pci_bus = {
.kind = "pci", .kind = "pci",
.init = mlxsw_pci_init, .init = mlxsw_pci_init,
...@@ -1982,6 +1999,7 @@ static const struct mlxsw_bus mlxsw_pci_bus = { ...@@ -1982,6 +1999,7 @@ static const struct mlxsw_bus mlxsw_pci_bus = {
.read_utc_sec = mlxsw_pci_read_utc_sec, .read_utc_sec = mlxsw_pci_read_utc_sec,
.read_utc_nsec = mlxsw_pci_read_utc_nsec, .read_utc_nsec = mlxsw_pci_read_utc_nsec,
.lag_mode = mlxsw_pci_lag_mode, .lag_mode = mlxsw_pci_lag_mode,
.flood_mode = mlxsw_pci_flood_mode,
.features = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET, .features = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET,
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment