Commit 12dbc04d authored by Doug Ledford's avatar Doug Ledford

Merge remote-tracking branch 'mlx5-next/mlx5-next' into HEAD

Take mlx5-next so we can take a dependent two patch series next.
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parents 7608bf40 82b11f07
...@@ -60,7 +60,7 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) ...@@ -60,7 +60,7 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
if (!__mlx5_ib_add(ibdev, profile)) if (!__mlx5_ib_add(ibdev, profile))
return -EINVAL; return -EINVAL;
rep->rep_if[REP_IB].priv = ibdev; rep->rep_data[REP_IB].priv = ibdev;
return 0; return 0;
} }
...@@ -70,13 +70,13 @@ mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep) ...@@ -70,13 +70,13 @@ mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep)
{ {
struct mlx5_ib_dev *dev; struct mlx5_ib_dev *dev;
if (!rep->rep_if[REP_IB].priv || if (!rep->rep_data[REP_IB].priv ||
rep->vport != MLX5_VPORT_UPLINK) rep->vport != MLX5_VPORT_UPLINK)
return; return;
dev = mlx5_ib_rep_to_dev(rep); dev = mlx5_ib_rep_to_dev(rep);
__mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX); __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
rep->rep_if[REP_IB].priv = NULL; rep->rep_data[REP_IB].priv = NULL;
} }
static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep) static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep)
...@@ -84,16 +84,17 @@ static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep) ...@@ -84,16 +84,17 @@ static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep)
return mlx5_ib_rep_to_dev(rep); return mlx5_ib_rep_to_dev(rep);
} }
static const struct mlx5_eswitch_rep_ops rep_ops = {
.load = mlx5_ib_vport_rep_load,
.unload = mlx5_ib_vport_rep_unload,
.get_proto_dev = mlx5_ib_vport_get_proto_dev,
};
void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev) void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev)
{ {
struct mlx5_eswitch *esw = mdev->priv.eswitch; struct mlx5_eswitch *esw = mdev->priv.eswitch;
struct mlx5_eswitch_rep_if rep_if = {};
rep_if.load = mlx5_ib_vport_rep_load;
rep_if.unload = mlx5_ib_vport_rep_unload;
rep_if.get_proto_dev = mlx5_ib_vport_get_proto_dev;
mlx5_eswitch_register_vport_reps(esw, &rep_if, REP_IB); mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_IB);
} }
void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev) void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev)
......
...@@ -72,6 +72,6 @@ struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw, ...@@ -72,6 +72,6 @@ struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
static inline static inline
struct mlx5_ib_dev *mlx5_ib_rep_to_dev(struct mlx5_eswitch_rep *rep) struct mlx5_ib_dev *mlx5_ib_rep_to_dev(struct mlx5_eswitch_rep *rep)
{ {
return (struct mlx5_ib_dev *)rep->rep_if[REP_IB].priv; return rep->rep_data[REP_IB].priv;
} }
#endif /* __MLX5_IB_REP_H__ */ #endif /* __MLX5_IB_REP_H__ */
...@@ -920,6 +920,7 @@ struct mlx5_ib_lb_state { ...@@ -920,6 +920,7 @@ struct mlx5_ib_lb_state {
}; };
struct mlx5_ib_pf_eq { struct mlx5_ib_pf_eq {
struct notifier_block irq_nb;
struct mlx5_ib_dev *dev; struct mlx5_ib_dev *dev;
struct mlx5_eq *core; struct mlx5_eq *core;
struct work_struct work; struct work_struct work;
......
...@@ -1485,9 +1485,11 @@ static void mlx5_ib_eq_pf_process(struct mlx5_ib_pf_eq *eq) ...@@ -1485,9 +1485,11 @@ static void mlx5_ib_eq_pf_process(struct mlx5_ib_pf_eq *eq)
mlx5_eq_update_ci(eq->core, cc, 1); mlx5_eq_update_ci(eq->core, cc, 1);
} }
static irqreturn_t mlx5_ib_eq_pf_int(int irq, void *eq_ptr) static int mlx5_ib_eq_pf_int(struct notifier_block *nb, unsigned long type,
void *data)
{ {
struct mlx5_ib_pf_eq *eq = eq_ptr; struct mlx5_ib_pf_eq *eq =
container_of(nb, struct mlx5_ib_pf_eq, irq_nb);
unsigned long flags; unsigned long flags;
if (spin_trylock_irqsave(&eq->lock, flags)) { if (spin_trylock_irqsave(&eq->lock, flags)) {
...@@ -1550,20 +1552,26 @@ mlx5_ib_create_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq) ...@@ -1550,20 +1552,26 @@ mlx5_ib_create_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
goto err_mempool; goto err_mempool;
} }
eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int;
param = (struct mlx5_eq_param) { param = (struct mlx5_eq_param) {
.index = MLX5_EQ_PFAULT_IDX, .irq_index = 0,
.mask = 1 << MLX5_EVENT_TYPE_PAGE_FAULT, .mask = 1 << MLX5_EVENT_TYPE_PAGE_FAULT,
.nent = MLX5_IB_NUM_PF_EQE, .nent = MLX5_IB_NUM_PF_EQE,
.context = eq,
.handler = mlx5_ib_eq_pf_int
}; };
eq->core = mlx5_eq_create_generic(dev->mdev, "mlx5_ib_page_fault_eq", &param); eq->core = mlx5_eq_create_generic(dev->mdev, &param);
if (IS_ERR(eq->core)) { if (IS_ERR(eq->core)) {
err = PTR_ERR(eq->core); err = PTR_ERR(eq->core);
goto err_wq; goto err_wq;
} }
err = mlx5_eq_enable(dev->mdev, eq->core, &eq->irq_nb);
if (err) {
mlx5_ib_err(dev, "failed to enable odp EQ %d\n", err);
goto err_eq;
}
return 0; return 0;
err_eq:
mlx5_eq_destroy_generic(dev->mdev, eq->core);
err_wq: err_wq:
destroy_workqueue(eq->wq); destroy_workqueue(eq->wq);
err_mempool: err_mempool:
...@@ -1576,6 +1584,7 @@ mlx5_ib_destroy_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq) ...@@ -1576,6 +1584,7 @@ mlx5_ib_destroy_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
{ {
int err; int err;
mlx5_eq_disable(dev->mdev, eq->core, &eq->irq_nb);
err = mlx5_eq_destroy_generic(dev->mdev, eq->core); err = mlx5_eq_destroy_generic(dev->mdev, eq->core);
cancel_work_sync(&eq->work); cancel_work_sync(&eq->work);
destroy_workqueue(eq->wq); destroy_workqueue(eq->wq);
......
...@@ -13,7 +13,7 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o ...@@ -13,7 +13,7 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
# #
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o alloc.o qp.o port.o mr.o pd.o \ health.o mcg.o cq.o alloc.o qp.o port.o mr.o pd.o \
transobj.o vport.o sriov.o fs_cmd.o fs_core.o \ transobj.o vport.o sriov.o fs_cmd.o fs_core.o pci_irq.o \
fs_counters.o rl.o lag.o dev.o events.o wq.o lib/gid.o \ fs_counters.o rl.o lag.o dev.o events.o wq.o lib/gid.o \
lib/devcom.o diag/fs_tracepoint.o diag/fw_tracer.o lib/devcom.o diag/fs_tracepoint.o diag/fw_tracer.o
......
...@@ -316,7 +316,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, ...@@ -316,7 +316,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT: case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT:
case MLX5_CMD_OP_DEALLOC_MEMIC: case MLX5_CMD_OP_DEALLOC_MEMIC:
case MLX5_CMD_OP_PAGE_FAULT_RESUME: case MLX5_CMD_OP_PAGE_FAULT_RESUME:
case MLX5_CMD_OP_QUERY_HOST_PARAMS: case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
return MLX5_CMD_STAT_OK; return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP: case MLX5_CMD_OP_QUERY_HCA_CAP:
...@@ -628,7 +628,7 @@ const char *mlx5_command_str(int command) ...@@ -628,7 +628,7 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(QUERY_MODIFY_HEADER_CONTEXT); MLX5_COMMAND_STR_CASE(QUERY_MODIFY_HEADER_CONTEXT);
MLX5_COMMAND_STR_CASE(ALLOC_MEMIC); MLX5_COMMAND_STR_CASE(ALLOC_MEMIC);
MLX5_COMMAND_STR_CASE(DEALLOC_MEMIC); MLX5_COMMAND_STR_CASE(DEALLOC_MEMIC);
MLX5_COMMAND_STR_CASE(QUERY_HOST_PARAMS); MLX5_COMMAND_STR_CASE(QUERY_ESW_FUNCTIONS);
default: return "unknown command opcode"; default: return "unknown command opcode";
} }
} }
......
...@@ -83,30 +83,3 @@ void mlx5_ec_cleanup(struct mlx5_core_dev *dev) ...@@ -83,30 +83,3 @@ void mlx5_ec_cleanup(struct mlx5_core_dev *dev)
mlx5_peer_pf_cleanup(dev); mlx5_peer_pf_cleanup(dev);
} }
static int mlx5_query_host_params_context(struct mlx5_core_dev *dev,
u32 *out, int outlen)
{
u32 in[MLX5_ST_SZ_DW(query_host_params_in)] = {};
MLX5_SET(query_host_params_in, in, opcode,
MLX5_CMD_OP_QUERY_HOST_PARAMS);
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
int mlx5_query_host_params_num_vfs(struct mlx5_core_dev *dev, int *num_vf)
{
u32 out[MLX5_ST_SZ_DW(query_host_params_out)] = {};
int err;
err = mlx5_query_host_params_context(dev, out, sizeof(out));
if (err)
return err;
*num_vf = MLX5_GET(query_host_params_out, out,
host_params_context.host_num_of_vfs);
mlx5_core_dbg(dev, "host_num_of_vfs %d\n", *num_vf);
return 0;
}
...@@ -16,7 +16,6 @@ enum { ...@@ -16,7 +16,6 @@ enum {
bool mlx5_read_embedded_cpu(struct mlx5_core_dev *dev); bool mlx5_read_embedded_cpu(struct mlx5_core_dev *dev);
int mlx5_ec_init(struct mlx5_core_dev *dev); int mlx5_ec_init(struct mlx5_core_dev *dev);
void mlx5_ec_cleanup(struct mlx5_core_dev *dev); void mlx5_ec_cleanup(struct mlx5_core_dev *dev);
int mlx5_query_host_params_num_vfs(struct mlx5_core_dev *dev, int *num_vf);
#else /* CONFIG_MLX5_ESWITCH */ #else /* CONFIG_MLX5_ESWITCH */
...@@ -24,9 +23,6 @@ static inline bool ...@@ -24,9 +23,6 @@ static inline bool
mlx5_read_embedded_cpu(struct mlx5_core_dev *dev) { return false; } mlx5_read_embedded_cpu(struct mlx5_core_dev *dev) { return false; }
static inline int mlx5_ec_init(struct mlx5_core_dev *dev) { return 0; } static inline int mlx5_ec_init(struct mlx5_core_dev *dev) { return 0; }
static inline void mlx5_ec_cleanup(struct mlx5_core_dev *dev) {} static inline void mlx5_ec_cleanup(struct mlx5_core_dev *dev) {}
static inline int
mlx5_query_host_params_num_vfs(struct mlx5_core_dev *dev, int *num_vf)
{ return -EOPNOTSUPP; }
#endif /* CONFIG_MLX5_ESWITCH */ #endif /* CONFIG_MLX5_ESWITCH */
......
...@@ -1752,7 +1752,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) ...@@ -1752,7 +1752,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
} }
rpriv->netdev = netdev; rpriv->netdev = netdev;
rep->rep_if[REP_ETH].priv = rpriv; rep->rep_data[REP_ETH].priv = rpriv;
INIT_LIST_HEAD(&rpriv->vport_sqs_list); INIT_LIST_HEAD(&rpriv->vport_sqs_list);
if (rep->vport == MLX5_VPORT_UPLINK) { if (rep->vport == MLX5_VPORT_UPLINK) {
...@@ -1826,16 +1826,17 @@ static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep) ...@@ -1826,16 +1826,17 @@ static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep)
return rpriv->netdev; return rpriv->netdev;
} }
static const struct mlx5_eswitch_rep_ops rep_ops = {
.load = mlx5e_vport_rep_load,
.unload = mlx5e_vport_rep_unload,
.get_proto_dev = mlx5e_vport_rep_get_proto_dev
};
void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev) void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev)
{ {
struct mlx5_eswitch *esw = mdev->priv.eswitch; struct mlx5_eswitch *esw = mdev->priv.eswitch;
struct mlx5_eswitch_rep_if rep_if = {};
rep_if.load = mlx5e_vport_rep_load;
rep_if.unload = mlx5e_vport_rep_unload;
rep_if.get_proto_dev = mlx5e_vport_rep_get_proto_dev;
mlx5_eswitch_register_vport_reps(esw, &rep_if, REP_ETH); mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_ETH);
} }
void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev) void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev)
......
...@@ -91,7 +91,7 @@ struct mlx5e_rep_priv { ...@@ -91,7 +91,7 @@ struct mlx5e_rep_priv {
static inline static inline
struct mlx5e_rep_priv *mlx5e_rep_to_rep_priv(struct mlx5_eswitch_rep *rep) struct mlx5e_rep_priv *mlx5e_rep_to_rep_priv(struct mlx5_eswitch_rep *rep)
{ {
return (struct mlx5e_rep_priv *)rep->rep_if[REP_ETH].priv; return rep->rep_data[REP_ETH].priv;
} }
struct mlx5e_neigh { struct mlx5e_neigh {
......
...@@ -1686,13 +1686,23 @@ static int eswitch_vport_event(struct notifier_block *nb, ...@@ -1686,13 +1686,23 @@ static int eswitch_vport_event(struct notifier_block *nb,
return NOTIFY_OK; return NOTIFY_OK;
} }
int mlx5_esw_query_functions(struct mlx5_core_dev *dev, u32 *out, int outlen)
{
u32 in[MLX5_ST_SZ_DW(query_esw_functions_in)] = {};
MLX5_SET(query_esw_functions_in, in, opcode,
MLX5_CMD_OP_QUERY_ESW_FUNCTIONS);
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
/* Public E-Switch API */ /* Public E-Switch API */
#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev)) #define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
{ {
int vf_nvports = 0, total_nvports = 0;
struct mlx5_vport *vport; struct mlx5_vport *vport;
int total_nvports = 0;
int err; int err;
int i, enabled_events; int i, enabled_events;
...@@ -1711,16 +1721,11 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) ...@@ -1711,16 +1721,11 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode); esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode);
if (mode == SRIOV_OFFLOADS) { if (mode == SRIOV_OFFLOADS) {
if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { if (mlx5_core_is_ecpf_esw_manager(esw->dev))
err = mlx5_query_host_params_num_vfs(esw->dev, &vf_nvports);
if (err)
return err;
total_nvports = esw->total_vports; total_nvports = esw->total_vports;
} else { else
vf_nvports = nvfs;
total_nvports = nvfs + MLX5_SPECIAL_VPORTS(esw->dev); total_nvports = nvfs + MLX5_SPECIAL_VPORTS(esw->dev);
} }
}
esw->mode = mode; esw->mode = mode;
...@@ -1733,7 +1738,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) ...@@ -1733,7 +1738,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
} else { } else {
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH); mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
err = esw_offloads_init(esw, vf_nvports, total_nvports); err = esw_offloads_init(esw, nvfs, total_nvports);
} }
if (err) if (err)
...@@ -2452,6 +2457,17 @@ u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw) ...@@ -2452,6 +2457,17 @@ u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw)
} }
EXPORT_SYMBOL_GPL(mlx5_eswitch_mode); EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
enum devlink_eswitch_encap_mode
mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev)
{
struct mlx5_eswitch *esw;
esw = dev->priv.eswitch;
return ESW_ALLOWED(esw) ? esw->offloads.encap :
DEVLINK_ESWITCH_ENCAP_MODE_NONE;
}
EXPORT_SYMBOL(mlx5_eswitch_get_encap_mode);
bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1)
{ {
if ((dev0->priv.eswitch->mode == SRIOV_NONE && if ((dev0->priv.eswitch->mode == SRIOV_NONE &&
......
...@@ -173,9 +173,10 @@ struct mlx5_esw_offload { ...@@ -173,9 +173,10 @@ struct mlx5_esw_offload {
struct mutex peer_mutex; struct mutex peer_mutex;
DECLARE_HASHTABLE(encap_tbl, 8); DECLARE_HASHTABLE(encap_tbl, 8);
DECLARE_HASHTABLE(mod_hdr_tbl, 8); DECLARE_HASHTABLE(mod_hdr_tbl, 8);
const struct mlx5_eswitch_rep_ops *rep_ops[NUM_REP_TYPES];
u8 inline_mode; u8 inline_mode;
u64 num_flows; u64 num_flows;
u8 encap; enum devlink_eswitch_encap_mode encap;
}; };
/* E-Switch MC FDB table hash node */ /* E-Switch MC FDB table hash node */
...@@ -190,7 +191,7 @@ struct mlx5_host_work { ...@@ -190,7 +191,7 @@ struct mlx5_host_work {
struct mlx5_eswitch *esw; struct mlx5_eswitch *esw;
}; };
struct mlx5_host_info { struct mlx5_esw_functions {
struct mlx5_nb nb; struct mlx5_nb nb;
u16 num_vfs; u16 num_vfs;
}; };
...@@ -219,7 +220,7 @@ struct mlx5_eswitch { ...@@ -219,7 +220,7 @@ struct mlx5_eswitch {
int mode; int mode;
int nvports; int nvports;
u16 manager_vport; u16 manager_vport;
struct mlx5_host_info host_info; struct mlx5_esw_functions esw_funcs;
}; };
void esw_offloads_cleanup(struct mlx5_eswitch *esw); void esw_offloads_cleanup(struct mlx5_eswitch *esw);
...@@ -356,9 +357,11 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, ...@@ -356,9 +357,11 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
struct netlink_ext_ack *extack); struct netlink_ext_ack *extack);
int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode); int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode);
int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode); int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode);
int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap, int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
enum devlink_eswitch_encap_mode encap,
struct netlink_ext_ack *extack); struct netlink_ext_ack *extack);
int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap); int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
enum devlink_eswitch_encap_mode *encap);
void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type); void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
...@@ -386,6 +389,8 @@ bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, ...@@ -386,6 +389,8 @@ bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0,
bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0, bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
struct mlx5_core_dev *dev1); struct mlx5_core_dev *dev1);
int mlx5_esw_query_functions(struct mlx5_core_dev *dev, u32 *out, int outlen);
#define MLX5_DEBUG_ESWITCH_MASK BIT(3) #define MLX5_DEBUG_ESWITCH_MASK BIT(3)
#define esw_info(__dev, format, ...) \ #define esw_info(__dev, format, ...) \
...@@ -404,6 +409,18 @@ static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev) ...@@ -404,6 +409,18 @@ static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev)
MLX5_VPORT_ECPF : MLX5_VPORT_PF; MLX5_VPORT_ECPF : MLX5_VPORT_PF;
} }
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev)
{
/* Ideally device should have the functions changed supported
* capability regardless of it being ECPF or PF wherever such
* event should be processed such as on eswitch manager device.
* However, some ECPF based device might not have this capability
* set. Hence OR for ECPF check to cover such device.
*/
return MLX5_CAP_ESW(dev, esw_functions_changed) ||
mlx5_core_is_ecpf_esw_manager(dev);
}
static inline int mlx5_eswitch_uplink_idx(struct mlx5_eswitch *esw) static inline int mlx5_eswitch_uplink_idx(struct mlx5_eswitch *esw)
{ {
/* Uplink always locate at the last element of the array.*/ /* Uplink always locate at the last element of the array.*/
...@@ -498,6 +515,12 @@ static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {} ...@@ -498,6 +515,12 @@ static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
static inline int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) { return 0; } static inline int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) { return 0; }
static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) {} static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) {}
static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; } static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; }
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
static inline int
mlx5_esw_query_functions(struct mlx5_core_dev *dev, u32 *out, int outlen)
{
return -EOPNOTSUPP;
}
#define FDB_MAX_CHAIN 1 #define FDB_MAX_CHAIN 1
#define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1) #define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1)
......
...@@ -108,8 +108,8 @@ static const char *eqe_type_str(u8 type) ...@@ -108,8 +108,8 @@ static const char *eqe_type_str(u8 type)
return "MLX5_EVENT_TYPE_STALL_EVENT"; return "MLX5_EVENT_TYPE_STALL_EVENT";
case MLX5_EVENT_TYPE_CMD: case MLX5_EVENT_TYPE_CMD:
return "MLX5_EVENT_TYPE_CMD"; return "MLX5_EVENT_TYPE_CMD";
case MLX5_EVENT_TYPE_HOST_PARAMS_CHANGE: case MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED:
return "MLX5_EVENT_TYPE_HOST_PARAMS_CHANGE"; return "MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED";
case MLX5_EVENT_TYPE_PAGE_REQUEST: case MLX5_EVENT_TYPE_PAGE_REQUEST:
return "MLX5_EVENT_TYPE_PAGE_REQUEST"; return "MLX5_EVENT_TYPE_PAGE_REQUEST";
case MLX5_EVENT_TYPE_PAGE_FAULT: case MLX5_EVENT_TYPE_PAGE_FAULT:
......
...@@ -147,6 +147,7 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns, ...@@ -147,6 +147,7 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
{ {
int en_encap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT); int en_encap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
int en_decap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP); int en_decap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
int term = !!(ft->flags & MLX5_FLOW_TABLE_TERMINATION);
u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0}; u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0}; u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
struct mlx5_core_dev *dev = ns->dev; struct mlx5_core_dev *dev = ns->dev;
...@@ -167,6 +168,8 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns, ...@@ -167,6 +168,8 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
en_decap); en_decap);
MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en, MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
en_encap); en_encap);
MLX5_SET(create_flow_table_in, in, flow_table_context.termination_table,
term);
switch (ft->op_mod) { switch (ft->op_mod) {
case FS_FT_OP_MOD_NORMAL: case FS_FT_OP_MOD_NORMAL:
......
...@@ -7,7 +7,6 @@ ...@@ -7,7 +7,6 @@
#include <linux/mlx5/eq.h> #include <linux/mlx5/eq.h>
#include <linux/mlx5/cq.h> #include <linux/mlx5/cq.h>
#define MLX5_MAX_IRQ_NAME (32)
#define MLX5_EQE_SIZE (sizeof(struct mlx5_eqe)) #define MLX5_EQE_SIZE (sizeof(struct mlx5_eqe))
struct mlx5_eq_tasklet { struct mlx5_eq_tasklet {
...@@ -36,8 +35,14 @@ struct mlx5_eq { ...@@ -36,8 +35,14 @@ struct mlx5_eq {
struct mlx5_rsc_debug *dbg; struct mlx5_rsc_debug *dbg;
}; };
struct mlx5_eq_async {
struct mlx5_eq core;
struct notifier_block irq_nb;
};
struct mlx5_eq_comp { struct mlx5_eq_comp {
struct mlx5_eq core; /* Must be first */ struct mlx5_eq core;
struct notifier_block irq_nb;
struct mlx5_eq_tasklet tasklet_ctx; struct mlx5_eq_tasklet tasklet_ctx;
struct list_head list; struct list_head list;
}; };
......
...@@ -169,18 +169,28 @@ static struct mlx5_profile profile[] = { ...@@ -169,18 +169,28 @@ static struct mlx5_profile profile[] = {
#define FW_INIT_TIMEOUT_MILI 2000 #define FW_INIT_TIMEOUT_MILI 2000
#define FW_INIT_WAIT_MS 2 #define FW_INIT_WAIT_MS 2
#define FW_PRE_INIT_TIMEOUT_MILI 10000 #define FW_PRE_INIT_TIMEOUT_MILI 120000
#define FW_INIT_WARN_MESSAGE_INTERVAL 20000
static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili) static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili,
u32 warn_time_mili)
{ {
unsigned long warn = jiffies + msecs_to_jiffies(warn_time_mili);
unsigned long end = jiffies + msecs_to_jiffies(max_wait_mili); unsigned long end = jiffies + msecs_to_jiffies(max_wait_mili);
int err = 0; int err = 0;
BUILD_BUG_ON(FW_PRE_INIT_TIMEOUT_MILI < FW_INIT_WARN_MESSAGE_INTERVAL);
while (fw_initializing(dev)) { while (fw_initializing(dev)) {
if (time_after(jiffies, end)) { if (time_after(jiffies, end)) {
err = -EBUSY; err = -EBUSY;
break; break;
} }
if (warn_time_mili && time_after(jiffies, warn)) {
mlx5_core_warn(dev, "Waiting for FW initialization, timeout abort in %ds\n",
jiffies_to_msecs(end - warn) / 1000);
warn = jiffies + msecs_to_jiffies(warn_time_mili);
}
msleep(FW_INIT_WAIT_MS); msleep(FW_INIT_WAIT_MS);
} }
...@@ -794,10 +804,16 @@ static int mlx5_init_once(struct mlx5_core_dev *dev) ...@@ -794,10 +804,16 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
goto err_devcom; goto err_devcom;
} }
err = mlx5_irq_table_init(dev);
if (err) {
mlx5_core_err(dev, "failed to initialize irq table\n");
goto err_devcom;
}
err = mlx5_eq_table_init(dev); err = mlx5_eq_table_init(dev);
if (err) { if (err) {
mlx5_core_err(dev, "failed to initialize eq\n"); mlx5_core_err(dev, "failed to initialize eq\n");
goto err_devcom; goto err_irq_cleanup;
} }
err = mlx5_events_init(dev); err = mlx5_events_init(dev);
...@@ -834,32 +850,32 @@ static int mlx5_init_once(struct mlx5_core_dev *dev) ...@@ -834,32 +850,32 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
goto err_rl_cleanup; goto err_rl_cleanup;
} }
err = mlx5_eswitch_init(dev); err = mlx5_sriov_init(dev);
if (err) { if (err) {
mlx5_core_err(dev, "Failed to init eswitch %d\n", err); mlx5_core_err(dev, "Failed to init sriov %d\n", err);
goto err_mpfs_cleanup; goto err_mpfs_cleanup;
} }
err = mlx5_sriov_init(dev); err = mlx5_eswitch_init(dev);
if (err) { if (err) {
mlx5_core_err(dev, "Failed to init sriov %d\n", err); mlx5_core_err(dev, "Failed to init eswitch %d\n", err);
goto err_eswitch_cleanup; goto err_sriov_cleanup;
} }
err = mlx5_fpga_init(dev); err = mlx5_fpga_init(dev);
if (err) { if (err) {
mlx5_core_err(dev, "Failed to init fpga device %d\n", err); mlx5_core_err(dev, "Failed to init fpga device %d\n", err);
goto err_sriov_cleanup; goto err_eswitch_cleanup;
} }
dev->tracer = mlx5_fw_tracer_create(dev); dev->tracer = mlx5_fw_tracer_create(dev);
return 0; return 0;
err_sriov_cleanup:
mlx5_sriov_cleanup(dev);
err_eswitch_cleanup: err_eswitch_cleanup:
mlx5_eswitch_cleanup(dev->priv.eswitch); mlx5_eswitch_cleanup(dev->priv.eswitch);
err_sriov_cleanup:
mlx5_sriov_cleanup(dev);
err_mpfs_cleanup: err_mpfs_cleanup:
mlx5_mpfs_cleanup(dev); mlx5_mpfs_cleanup(dev);
err_rl_cleanup: err_rl_cleanup:
...@@ -873,6 +889,8 @@ static int mlx5_init_once(struct mlx5_core_dev *dev) ...@@ -873,6 +889,8 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
mlx5_events_cleanup(dev); mlx5_events_cleanup(dev);
err_eq_cleanup: err_eq_cleanup:
mlx5_eq_table_cleanup(dev); mlx5_eq_table_cleanup(dev);
err_irq_cleanup:
mlx5_irq_table_cleanup(dev);
err_devcom: err_devcom:
mlx5_devcom_unregister_device(dev->priv.devcom); mlx5_devcom_unregister_device(dev->priv.devcom);
...@@ -883,8 +901,8 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev) ...@@ -883,8 +901,8 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
{ {
mlx5_fw_tracer_destroy(dev->tracer); mlx5_fw_tracer_destroy(dev->tracer);
mlx5_fpga_cleanup(dev); mlx5_fpga_cleanup(dev);
mlx5_sriov_cleanup(dev);
mlx5_eswitch_cleanup(dev->priv.eswitch); mlx5_eswitch_cleanup(dev->priv.eswitch);
mlx5_sriov_cleanup(dev);
mlx5_mpfs_cleanup(dev); mlx5_mpfs_cleanup(dev);
mlx5_cleanup_rl_table(dev); mlx5_cleanup_rl_table(dev);
mlx5_vxlan_destroy(dev->vxlan); mlx5_vxlan_destroy(dev->vxlan);
...@@ -895,6 +913,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev) ...@@ -895,6 +913,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_cq_debugfs_cleanup(dev); mlx5_cq_debugfs_cleanup(dev);
mlx5_events_cleanup(dev); mlx5_events_cleanup(dev);
mlx5_eq_table_cleanup(dev); mlx5_eq_table_cleanup(dev);
mlx5_irq_table_cleanup(dev);
mlx5_devcom_unregister_device(dev->priv.devcom); mlx5_devcom_unregister_device(dev->priv.devcom);
} }
...@@ -911,7 +930,7 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot) ...@@ -911,7 +930,7 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
/* wait for firmware to accept initialization segments configurations /* wait for firmware to accept initialization segments configurations
*/ */
err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI); err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI, FW_INIT_WARN_MESSAGE_INTERVAL);
if (err) { if (err) {
mlx5_core_err(dev, "Firmware over %d MS in pre-initializing state, aborting\n", mlx5_core_err(dev, "Firmware over %d MS in pre-initializing state, aborting\n",
FW_PRE_INIT_TIMEOUT_MILI); FW_PRE_INIT_TIMEOUT_MILI);
...@@ -924,7 +943,7 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot) ...@@ -924,7 +943,7 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
return err; return err;
} }
err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI); err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI, 0);
if (err) { if (err) {
mlx5_core_err(dev, "Firmware over %d MS in initializing state, aborting\n", mlx5_core_err(dev, "Firmware over %d MS in initializing state, aborting\n",
FW_INIT_TIMEOUT_MILI); FW_INIT_TIMEOUT_MILI);
...@@ -1028,6 +1047,12 @@ static int mlx5_load(struct mlx5_core_dev *dev) ...@@ -1028,6 +1047,12 @@ static int mlx5_load(struct mlx5_core_dev *dev)
mlx5_events_start(dev); mlx5_events_start(dev);
mlx5_pagealloc_start(dev); mlx5_pagealloc_start(dev);
err = mlx5_irq_table_create(dev);
if (err) {
mlx5_core_err(dev, "Failed to alloc IRQs\n");
goto err_irq_table;
}
err = mlx5_eq_table_create(dev); err = mlx5_eq_table_create(dev);
if (err) { if (err) {
mlx5_core_err(dev, "Failed to create EQs\n"); mlx5_core_err(dev, "Failed to create EQs\n");
...@@ -1099,6 +1124,8 @@ static int mlx5_load(struct mlx5_core_dev *dev) ...@@ -1099,6 +1124,8 @@ static int mlx5_load(struct mlx5_core_dev *dev)
err_fw_tracer: err_fw_tracer:
mlx5_eq_table_destroy(dev); mlx5_eq_table_destroy(dev);
err_eq_table: err_eq_table:
mlx5_irq_table_destroy(dev);
err_irq_table:
mlx5_pagealloc_stop(dev); mlx5_pagealloc_stop(dev);
mlx5_events_stop(dev); mlx5_events_stop(dev);
mlx5_put_uars_page(dev, dev->priv.uar); mlx5_put_uars_page(dev, dev->priv.uar);
...@@ -1115,6 +1142,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev) ...@@ -1115,6 +1142,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
mlx5_fpga_device_stop(dev); mlx5_fpga_device_stop(dev);
mlx5_fw_tracer_cleanup(dev->tracer); mlx5_fw_tracer_cleanup(dev->tracer);
mlx5_eq_table_destroy(dev); mlx5_eq_table_destroy(dev);
mlx5_irq_table_destroy(dev);
mlx5_pagealloc_stop(dev); mlx5_pagealloc_stop(dev);
mlx5_events_stop(dev); mlx5_events_stop(dev);
mlx5_put_uars_page(dev, dev->priv.uar); mlx5_put_uars_page(dev, dev->priv.uar);
......
...@@ -153,6 +153,19 @@ int mlx5_query_qcam_reg(struct mlx5_core_dev *mdev, u32 *qcam, ...@@ -153,6 +153,19 @@ int mlx5_query_qcam_reg(struct mlx5_core_dev *mdev, u32 *qcam,
void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev); void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev);
void mlx5_lag_remove(struct mlx5_core_dev *dev); void mlx5_lag_remove(struct mlx5_core_dev *dev);
int mlx5_irq_table_init(struct mlx5_core_dev *dev);
void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev);
int mlx5_irq_table_create(struct mlx5_core_dev *dev);
void mlx5_irq_table_destroy(struct mlx5_core_dev *dev);
int mlx5_irq_attach_nb(struct mlx5_irq_table *irq_table, int vecidx,
struct notifier_block *nb);
int mlx5_irq_detach_nb(struct mlx5_irq_table *irq_table, int vecidx,
struct notifier_block *nb);
struct cpumask *
mlx5_irq_get_affinity_mask(struct mlx5_irq_table *irq_table, int vecidx);
struct cpu_rmap *mlx5_irq_get_rmap(struct mlx5_irq_table *table);
int mlx5_irq_get_num_comp(struct mlx5_irq_table *table);
int mlx5_events_init(struct mlx5_core_dev *dev); int mlx5_events_init(struct mlx5_core_dev *dev);
void mlx5_events_cleanup(struct mlx5_core_dev *dev); void mlx5_events_cleanup(struct mlx5_core_dev *dev);
void mlx5_events_start(struct mlx5_core_dev *dev); void mlx5_events_start(struct mlx5_core_dev *dev);
......
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2019 Mellanox Technologies. */
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
#ifdef CONFIG_RFS_ACCEL
#include <linux/cpu_rmap.h>
#endif
#define MLX5_MAX_IRQ_NAME (32)
struct mlx5_irq {
struct atomic_notifier_head nh;
cpumask_var_t mask;
char name[MLX5_MAX_IRQ_NAME];
};
struct mlx5_irq_table {
struct mlx5_irq *irq;
int nvec;
#ifdef CONFIG_RFS_ACCEL
struct cpu_rmap *rmap;
#endif
};
int mlx5_irq_table_init(struct mlx5_core_dev *dev)
{
struct mlx5_irq_table *irq_table;
irq_table = kvzalloc(sizeof(*irq_table), GFP_KERNEL);
if (!irq_table)
return -ENOMEM;
dev->priv.irq_table = irq_table;
return 0;
}
void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev)
{
kvfree(dev->priv.irq_table);
}
int mlx5_irq_get_num_comp(struct mlx5_irq_table *table)
{
return table->nvec - MLX5_IRQ_VEC_COMP_BASE;
}
static struct mlx5_irq *mlx5_irq_get(struct mlx5_core_dev *dev, int vecidx)
{
struct mlx5_irq_table *irq_table = dev->priv.irq_table;
return &irq_table->irq[vecidx];
}
int mlx5_irq_attach_nb(struct mlx5_irq_table *irq_table, int vecidx,
struct notifier_block *nb)
{
struct mlx5_irq *irq;
irq = &irq_table->irq[vecidx];
return atomic_notifier_chain_register(&irq->nh, nb);
}
int mlx5_irq_detach_nb(struct mlx5_irq_table *irq_table, int vecidx,
struct notifier_block *nb)
{
struct mlx5_irq *irq;
irq = &irq_table->irq[vecidx];
return atomic_notifier_chain_unregister(&irq->nh, nb);
}
static irqreturn_t mlx5_irq_int_handler(int irq, void *nh)
{
atomic_notifier_call_chain(nh, 0, NULL);
return IRQ_HANDLED;
}
static void irq_set_name(char *name, int vecidx)
{
if (vecidx == 0) {
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_async");
return;
}
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d",
vecidx - MLX5_IRQ_VEC_COMP_BASE);
return;
}
static int request_irqs(struct mlx5_core_dev *dev, int nvec)
{
char name[MLX5_MAX_IRQ_NAME];
int err;
int i;
for (i = 0; i < nvec; i++) {
struct mlx5_irq *irq = mlx5_irq_get(dev, i);
int irqn = pci_irq_vector(dev->pdev, i);
irq_set_name(name, i);
ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh);
snprintf(irq->name, MLX5_MAX_IRQ_NAME,
"%s@pci:%s", name, pci_name(dev->pdev));
err = request_irq(irqn, mlx5_irq_int_handler, 0, irq->name,
&irq->nh);
if (err) {
mlx5_core_err(dev, "Failed to request irq\n");
goto err_request_irq;
}
}
return 0;
err_request_irq:
for (; i >= 0; i--) {
struct mlx5_irq *irq = mlx5_irq_get(dev, i);
int irqn = pci_irq_vector(dev->pdev, i);
free_irq(irqn, &irq->nh);
}
return err;
}
static void irq_clear_rmap(struct mlx5_core_dev *dev)
{
#ifdef CONFIG_RFS_ACCEL
struct mlx5_irq_table *irq_table = dev->priv.irq_table;
free_irq_cpu_rmap(irq_table->rmap);
#endif
}
static int irq_set_rmap(struct mlx5_core_dev *mdev)
{
int err = 0;
#ifdef CONFIG_RFS_ACCEL
struct mlx5_irq_table *irq_table = mdev->priv.irq_table;
int num_affinity_vec;
int vecidx;
num_affinity_vec = mlx5_irq_get_num_comp(irq_table);
irq_table->rmap = alloc_irq_cpu_rmap(num_affinity_vec);
if (!irq_table->rmap) {
err = -ENOMEM;
mlx5_core_err(mdev, "Failed to allocate cpu_rmap. err %d", err);
goto err_out;
}
vecidx = MLX5_IRQ_VEC_COMP_BASE;
for (; vecidx < irq_table->nvec; vecidx++) {
err = irq_cpu_rmap_add(irq_table->rmap,
pci_irq_vector(mdev->pdev, vecidx));
if (err) {
mlx5_core_err(mdev, "irq_cpu_rmap_add failed. err %d",
err);
goto err_irq_cpu_rmap_add;
}
}
return 0;
err_irq_cpu_rmap_add:
irq_clear_rmap(mdev);
err_out:
#endif
return err;
}
/* Completion IRQ vectors */
static int set_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i)
{
int vecidx = MLX5_IRQ_VEC_COMP_BASE + i;
struct mlx5_irq *irq;
int irqn;
irq = mlx5_irq_get(mdev, vecidx);
irqn = pci_irq_vector(mdev->pdev, vecidx);
if (!zalloc_cpumask_var(&irq->mask, GFP_KERNEL)) {
mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
return -ENOMEM;
}
cpumask_set_cpu(cpumask_local_spread(i, mdev->priv.numa_node),
irq->mask);
if (IS_ENABLED(CONFIG_SMP) &&
irq_set_affinity_hint(irqn, irq->mask))
mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x",
irqn);
return 0;
}
static void clear_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i)
{
int vecidx = MLX5_IRQ_VEC_COMP_BASE + i;
struct mlx5_irq *irq;
int irqn;
irq = mlx5_irq_get(mdev, vecidx);
irqn = pci_irq_vector(mdev->pdev, vecidx);
irq_set_affinity_hint(irqn, NULL);
free_cpumask_var(irq->mask);
}
static int set_comp_irq_affinity_hints(struct mlx5_core_dev *mdev)
{
int nvec = mlx5_irq_get_num_comp(mdev->priv.irq_table);
int err;
int i;
for (i = 0; i < nvec; i++) {
err = set_comp_irq_affinity_hint(mdev, i);
if (err)
goto err_out;
}
return 0;
err_out:
for (i--; i >= 0; i--)
clear_comp_irq_affinity_hint(mdev, i);
return err;
}
static void clear_comp_irqs_affinity_hints(struct mlx5_core_dev *mdev)
{
int nvec = mlx5_irq_get_num_comp(mdev->priv.irq_table);
int i;
for (i = 0; i < nvec; i++)
clear_comp_irq_affinity_hint(mdev, i);
}
struct cpumask *
mlx5_irq_get_affinity_mask(struct mlx5_irq_table *irq_table, int vecidx)
{
return irq_table->irq[vecidx].mask;
}
#ifdef CONFIG_RFS_ACCEL
struct cpu_rmap *mlx5_irq_get_rmap(struct mlx5_irq_table *irq_table)
{
return irq_table->rmap;
}
#endif
static void unrequest_irqs(struct mlx5_core_dev *dev)
{
struct mlx5_irq_table *table = dev->priv.irq_table;
int i;
for (i = 0; i < table->nvec; i++)
free_irq(pci_irq_vector(dev->pdev, i),
&mlx5_irq_get(dev, i)->nh);
}
int mlx5_irq_table_create(struct mlx5_core_dev *dev)
{
struct mlx5_priv *priv = &dev->priv;
struct mlx5_irq_table *table = priv->irq_table;
int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
MLX5_CAP_GEN(dev, max_num_eqs) :
1 << MLX5_CAP_GEN(dev, log_max_eq);
int nvec;
int err;
nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
MLX5_IRQ_VEC_COMP_BASE;
nvec = min_t(int, nvec, num_eqs);
if (nvec <= MLX5_IRQ_VEC_COMP_BASE)
return -ENOMEM;
table->irq = kcalloc(nvec, sizeof(*table->irq), GFP_KERNEL);
if (!table->irq)
return -ENOMEM;
nvec = pci_alloc_irq_vectors(dev->pdev, MLX5_IRQ_VEC_COMP_BASE + 1,
nvec, PCI_IRQ_MSIX);
if (nvec < 0) {
err = nvec;
goto err_free_irq;
}
table->nvec = nvec;
err = irq_set_rmap(dev);
if (err)
goto err_set_rmap;
err = request_irqs(dev, nvec);
if (err)
goto err_request_irqs;
err = set_comp_irq_affinity_hints(dev);
if (err) {
mlx5_core_err(dev, "Failed to alloc affinity hint cpumask\n");
goto err_set_affinity;
}
return 0;
err_set_affinity:
unrequest_irqs(dev);
err_request_irqs:
irq_clear_rmap(dev);
err_set_rmap:
pci_free_irq_vectors(dev->pdev);
err_free_irq:
kfree(table->irq);
return err;
}
void mlx5_irq_table_destroy(struct mlx5_core_dev *dev)
{
struct mlx5_irq_table *table = dev->priv.irq_table;
int i;
/* free_irq requires that affinity and rmap will be cleared
* before calling it. This is why there is asymmetry with set_rmap
* which should be called after alloc_irq but before request_irq.
*/
irq_clear_rmap(dev);
clear_comp_irqs_affinity_hints(dev);
for (i = 0; i < table->nvec; i++)
free_irq(pci_irq_vector(dev->pdev, i),
&mlx5_irq_get(dev, i)->nh);
pci_free_irq_vectors(dev->pdev);
kfree(table->irq);
}
...@@ -106,10 +106,10 @@ static int mlx5_rdma_enable_roce_steering(struct mlx5_core_dev *dev) ...@@ -106,10 +106,10 @@ static int mlx5_rdma_enable_roce_steering(struct mlx5_core_dev *dev)
return 0; return 0;
destroy_flow_table:
mlx5_destroy_flow_table(ft);
destroy_flow_group: destroy_flow_group:
mlx5_destroy_flow_group(fg); mlx5_destroy_flow_group(fg);
destroy_flow_table:
mlx5_destroy_flow_table(ft);
free: free:
kvfree(spec); kvfree(spec);
kvfree(flow_group_in); kvfree(flow_group_in);
......
...@@ -208,6 +208,27 @@ void mlx5_sriov_detach(struct mlx5_core_dev *dev) ...@@ -208,6 +208,27 @@ void mlx5_sriov_detach(struct mlx5_core_dev *dev)
mlx5_device_disable_sriov(dev); mlx5_device_disable_sriov(dev);
} }
static u16 mlx5_get_max_vfs(struct mlx5_core_dev *dev)
{
u32 out[MLX5_ST_SZ_DW(query_esw_functions_out)] = {};
u16 host_total_vfs;
int err;
if (mlx5_core_is_ecpf_esw_manager(dev)) {
err = mlx5_esw_query_functions(dev, out, sizeof(out));
host_total_vfs = MLX5_GET(query_esw_functions_out, out,
host_params_context.host_total_vfs);
/* Old FW doesn't support getting total_vfs from esw func
* but supports getting it from pci_sriov.
*/
if (!err && host_total_vfs)
return host_total_vfs;
}
return pci_sriov_get_totalvfs(dev->pdev);
}
int mlx5_sriov_init(struct mlx5_core_dev *dev) int mlx5_sriov_init(struct mlx5_core_dev *dev)
{ {
struct mlx5_core_sriov *sriov = &dev->priv.sriov; struct mlx5_core_sriov *sriov = &dev->priv.sriov;
...@@ -218,6 +239,7 @@ int mlx5_sriov_init(struct mlx5_core_dev *dev) ...@@ -218,6 +239,7 @@ int mlx5_sriov_init(struct mlx5_core_dev *dev)
return 0; return 0;
total_vfs = pci_sriov_get_totalvfs(pdev); total_vfs = pci_sriov_get_totalvfs(pdev);
sriov->max_vfs = mlx5_get_max_vfs(dev);
sriov->num_vfs = pci_num_vf(pdev); sriov->num_vfs = pci_num_vf(pdev);
sriov->vfs_ctx = kcalloc(total_vfs, sizeof(*sriov->vfs_ctx), GFP_KERNEL); sriov->vfs_ctx = kcalloc(total_vfs, sizeof(*sriov->vfs_ctx), GFP_KERNEL);
if (!sriov->vfs_ctx) if (!sriov->vfs_ctx)
......
...@@ -342,7 +342,7 @@ enum mlx5_event { ...@@ -342,7 +342,7 @@ enum mlx5_event {
MLX5_EVENT_TYPE_PAGE_FAULT = 0xc, MLX5_EVENT_TYPE_PAGE_FAULT = 0xc,
MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd, MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd,
MLX5_EVENT_TYPE_HOST_PARAMS_CHANGE = 0xe, MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED = 0xe,
MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c, MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c,
......
...@@ -107,6 +107,7 @@ enum { ...@@ -107,6 +107,7 @@ enum {
MLX5_REG_FPGA_CAP = 0x4022, MLX5_REG_FPGA_CAP = 0x4022,
MLX5_REG_FPGA_CTRL = 0x4023, MLX5_REG_FPGA_CTRL = 0x4023,
MLX5_REG_FPGA_ACCESS_REG = 0x4024, MLX5_REG_FPGA_ACCESS_REG = 0x4024,
MLX5_REG_CORE_DUMP = 0x402e,
MLX5_REG_PCAP = 0x5001, MLX5_REG_PCAP = 0x5001,
MLX5_REG_PMTU = 0x5003, MLX5_REG_PMTU = 0x5003,
MLX5_REG_PTYS = 0x5004, MLX5_REG_PTYS = 0x5004,
...@@ -469,6 +470,7 @@ struct mlx5_core_sriov { ...@@ -469,6 +470,7 @@ struct mlx5_core_sriov {
struct mlx5_vf_context *vfs_ctx; struct mlx5_vf_context *vfs_ctx;
int num_vfs; int num_vfs;
int enabled_vfs; int enabled_vfs;
u16 max_vfs;
}; };
struct mlx5_fc_stats { struct mlx5_fc_stats {
...@@ -490,6 +492,7 @@ struct mlx5_eswitch; ...@@ -490,6 +492,7 @@ struct mlx5_eswitch;
struct mlx5_lag; struct mlx5_lag;
struct mlx5_devcom; struct mlx5_devcom;
struct mlx5_eq_table; struct mlx5_eq_table;
struct mlx5_irq_table;
struct mlx5_rate_limit { struct mlx5_rate_limit {
u32 rate; u32 rate;
...@@ -519,6 +522,8 @@ struct mlx5_core_roce { ...@@ -519,6 +522,8 @@ struct mlx5_core_roce {
}; };
struct mlx5_priv { struct mlx5_priv {
/* IRQ table valid only for real pci devices PF or VF */
struct mlx5_irq_table *irq_table;
struct mlx5_eq_table *eq_table; struct mlx5_eq_table *eq_table;
/* pages stuff */ /* pages stuff */
...@@ -1102,13 +1107,9 @@ static inline bool mlx5_ecpf_vport_exists(struct mlx5_core_dev *dev) ...@@ -1102,13 +1107,9 @@ static inline bool mlx5_ecpf_vport_exists(struct mlx5_core_dev *dev)
return mlx5_core_is_pf(dev) && MLX5_CAP_ESW(dev, ecpf_vport_exists); return mlx5_core_is_pf(dev) && MLX5_CAP_ESW(dev, ecpf_vport_exists);
} }
#define MLX5_HOST_PF_MAX_VFS (127u)
static inline u16 mlx5_core_max_vfs(struct mlx5_core_dev *dev) static inline u16 mlx5_core_max_vfs(struct mlx5_core_dev *dev)
{ {
if (mlx5_core_is_ecpf_esw_manager(dev)) return dev->priv.sriov.max_vfs;
return MLX5_HOST_PF_MAX_VFS;
else
return pci_sriov_get_totalvfs(dev->pdev);
} }
static inline int mlx5_get_gid_table_len(u16 param) static inline int mlx5_get_gid_table_len(u16 param)
......
...@@ -4,17 +4,7 @@ ...@@ -4,17 +4,7 @@
#ifndef MLX5_CORE_EQ_H #ifndef MLX5_CORE_EQ_H
#define MLX5_CORE_EQ_H #define MLX5_CORE_EQ_H
enum { #define MLX5_IRQ_VEC_COMP_BASE 1
MLX5_EQ_PAGEREQ_IDX = 0,
MLX5_EQ_CMD_IDX = 1,
MLX5_EQ_ASYNC_IDX = 2,
/* reserved to be used by mlx5_core ulps (mlx5e/mlx5_ib) */
MLX5_EQ_PFAULT_IDX = 3,
MLX5_EQ_MAX_ASYNC_EQS,
/* completion eqs vector indices start here */
MLX5_EQ_VEC_COMP_BASE = MLX5_EQ_MAX_ASYNC_EQS,
};
#define MLX5_NUM_CMD_EQE (32) #define MLX5_NUM_CMD_EQE (32)
#define MLX5_NUM_ASYNC_EQE (0x1000) #define MLX5_NUM_ASYNC_EQE (0x1000)
#define MLX5_NUM_SPARE_EQE (0x80) #define MLX5_NUM_SPARE_EQE (0x80)
...@@ -23,18 +13,19 @@ struct mlx5_eq; ...@@ -23,18 +13,19 @@ struct mlx5_eq;
struct mlx5_core_dev; struct mlx5_core_dev;
struct mlx5_eq_param { struct mlx5_eq_param {
u8 index; u8 irq_index;
int nent; int nent;
u64 mask; u64 mask;
void *context;
irq_handler_t handler;
}; };
struct mlx5_eq * struct mlx5_eq *
mlx5_eq_create_generic(struct mlx5_core_dev *dev, const char *name, mlx5_eq_create_generic(struct mlx5_core_dev *dev, struct mlx5_eq_param *param);
struct mlx5_eq_param *param);
int int
mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq); mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
struct notifier_block *nb);
void mlx5_eq_disable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
struct notifier_block *nb);
struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc); struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc);
void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm); void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm);
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#define _MLX5_ESWITCH_ #define _MLX5_ESWITCH_
#include <linux/mlx5/driver.h> #include <linux/mlx5/driver.h>
#include <net/devlink.h>
#define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_manager) #define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_manager)
...@@ -29,17 +30,19 @@ enum { ...@@ -29,17 +30,19 @@ enum {
}; };
struct mlx5_eswitch_rep; struct mlx5_eswitch_rep;
struct mlx5_eswitch_rep_if { struct mlx5_eswitch_rep_ops {
int (*load)(struct mlx5_core_dev *dev, int (*load)(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep);
struct mlx5_eswitch_rep *rep);
void (*unload)(struct mlx5_eswitch_rep *rep); void (*unload)(struct mlx5_eswitch_rep *rep);
void *(*get_proto_dev)(struct mlx5_eswitch_rep *rep); void *(*get_proto_dev)(struct mlx5_eswitch_rep *rep);
};
struct mlx5_eswitch_rep_data {
void *priv; void *priv;
atomic_t state; atomic_t state;
}; };
struct mlx5_eswitch_rep { struct mlx5_eswitch_rep {
struct mlx5_eswitch_rep_if rep_if[NUM_REP_TYPES]; struct mlx5_eswitch_rep_data rep_data[NUM_REP_TYPES];
u16 vport; u16 vport;
u8 hw_id[ETH_ALEN]; u8 hw_id[ETH_ALEN];
u16 vlan; u16 vlan;
...@@ -47,7 +50,7 @@ struct mlx5_eswitch_rep { ...@@ -47,7 +50,7 @@ struct mlx5_eswitch_rep {
}; };
void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw, void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep_if *rep_if, const struct mlx5_eswitch_rep_ops *ops,
u8 rep_type); u8 rep_type);
void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type); void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type);
void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw, void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
...@@ -60,4 +63,15 @@ u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw); ...@@ -60,4 +63,15 @@ u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw);
struct mlx5_flow_handle * struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw,
int vport, u32 sqn); int vport, u32 sqn);
#ifdef CONFIG_MLX5_ESWITCH
enum devlink_eswitch_encap_mode
mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev);
#else /* CONFIG_MLX5_ESWITCH */
static inline enum devlink_eswitch_encap_mode
mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev)
{
return DEVLINK_ESWITCH_ENCAP_MODE_NONE;
}
#endif /* CONFIG_MLX5_ESWITCH */
#endif #endif
...@@ -47,6 +47,7 @@ enum { ...@@ -47,6 +47,7 @@ enum {
enum { enum {
MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT = BIT(0), MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT = BIT(0),
MLX5_FLOW_TABLE_TUNNEL_EN_DECAP = BIT(1), MLX5_FLOW_TABLE_TUNNEL_EN_DECAP = BIT(1),
MLX5_FLOW_TABLE_TERMINATION = BIT(2),
}; };
#define LEFTOVERS_RULE_NUM 2 #define LEFTOVERS_RULE_NUM 2
......
...@@ -155,7 +155,7 @@ enum { ...@@ -155,7 +155,7 @@ enum {
MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY = 0x725, MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY = 0x725,
MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY = 0x726, MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY = 0x726,
MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS = 0x727, MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS = 0x727,
MLX5_CMD_OP_QUERY_HOST_PARAMS = 0x740, MLX5_CMD_OP_QUERY_ESW_FUNCTIONS = 0x740,
MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750, MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750,
MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751, MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751,
MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT = 0x752, MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT = 0x752,
...@@ -382,7 +382,8 @@ struct mlx5_ifc_flow_table_prop_layout_bits { ...@@ -382,7 +382,8 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 reformat_and_modify_action[0x1]; u8 reformat_and_modify_action[0x1];
u8 reserved_at_15[0x2]; u8 reserved_at_15[0x2];
u8 table_miss_action_domain[0x1]; u8 table_miss_action_domain[0x1];
u8 reserved_at_18[0x8]; u8 termination_table[0x1];
u8 reserved_at_19[0x7];
u8 reserved_at_20[0x2]; u8 reserved_at_20[0x2];
u8 log_max_ft_size[0x6]; u8 log_max_ft_size[0x6];
u8 log_max_modify_header_context[0x8]; u8 log_max_modify_header_context[0x8];
...@@ -664,7 +665,9 @@ struct mlx5_ifc_e_switch_cap_bits { ...@@ -664,7 +665,9 @@ struct mlx5_ifc_e_switch_cap_bits {
u8 vport_svlan_insert[0x1]; u8 vport_svlan_insert[0x1];
u8 vport_cvlan_insert_if_not_exist[0x1]; u8 vport_cvlan_insert_if_not_exist[0x1];
u8 vport_cvlan_insert_overwrite[0x1]; u8 vport_cvlan_insert_overwrite[0x1];
u8 reserved_at_5[0x16]; u8 reserved_at_5[0x14];
u8 esw_functions_changed[0x1];
u8 reserved_at_1a[0x1];
u8 ecpf_vport_exists[0x1]; u8 ecpf_vport_exists[0x1];
u8 counter_eswitch_affinity[0x1]; u8 counter_eswitch_affinity[0x1];
u8 merged_eswitch[0x1]; u8 merged_eswitch[0x1];
...@@ -715,7 +718,9 @@ struct mlx5_ifc_qos_cap_bits { ...@@ -715,7 +718,9 @@ struct mlx5_ifc_qos_cap_bits {
}; };
struct mlx5_ifc_debug_cap_bits { struct mlx5_ifc_debug_cap_bits {
u8 reserved_at_0[0x20]; u8 core_dump_general[0x1];
u8 core_dump_qp[0x1];
u8 reserved_at_2[0x1e];
u8 reserved_at_20[0x2]; u8 reserved_at_20[0x2];
u8 stall_detect[0x1]; u8 stall_detect[0x1];
...@@ -2531,6 +2536,7 @@ union mlx5_ifc_hca_cap_union_bits { ...@@ -2531,6 +2536,7 @@ union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_e_switch_cap_bits e_switch_cap; struct mlx5_ifc_e_switch_cap_bits e_switch_cap;
struct mlx5_ifc_vector_calc_cap_bits vector_calc_cap; struct mlx5_ifc_vector_calc_cap_bits vector_calc_cap;
struct mlx5_ifc_qos_cap_bits qos_cap; struct mlx5_ifc_qos_cap_bits qos_cap;
struct mlx5_ifc_debug_cap_bits debug_cap;
struct mlx5_ifc_fpga_cap_bits fpga_cap; struct mlx5_ifc_fpga_cap_bits fpga_cap;
u8 reserved_at_0[0x8000]; u8 reserved_at_0[0x8000];
}; };
...@@ -7236,7 +7242,8 @@ struct mlx5_ifc_create_flow_table_out_bits { ...@@ -7236,7 +7242,8 @@ struct mlx5_ifc_create_flow_table_out_bits {
struct mlx5_ifc_flow_table_context_bits { struct mlx5_ifc_flow_table_context_bits {
u8 reformat_en[0x1]; u8 reformat_en[0x1];
u8 decap_en[0x1]; u8 decap_en[0x1];
u8 reserved_at_2[0x2]; u8 reserved_at_2[0x1];
u8 termination_table[0x1];
u8 table_miss_action[0x4]; u8 table_miss_action[0x4];
u8 level[0x8]; u8 level[0x8];
u8 reserved_at_10[0x8]; u8 reserved_at_10[0x8];
...@@ -8546,6 +8553,18 @@ struct mlx5_ifc_qcam_reg_bits { ...@@ -8546,6 +8553,18 @@ struct mlx5_ifc_qcam_reg_bits {
u8 reserved_at_1c0[0x80]; u8 reserved_at_1c0[0x80];
}; };
struct mlx5_ifc_core_dump_reg_bits {
u8 reserved_at_0[0x18];
u8 core_dump_type[0x8];
u8 reserved_at_20[0x30];
u8 vhca_id[0x10];
u8 reserved_at_60[0x8];
u8 qpn[0x18];
u8 reserved_at_80[0x180];
};
struct mlx5_ifc_pcap_reg_bits { struct mlx5_ifc_pcap_reg_bits {
u8 reserved_at_0[0x8]; u8 reserved_at_0[0x8];
u8 local_port[0x8]; u8 local_port[0x8];
...@@ -9692,7 +9711,7 @@ struct mlx5_ifc_host_params_context_bits { ...@@ -9692,7 +9711,7 @@ struct mlx5_ifc_host_params_context_bits {
u8 reserved_at_8[0x8]; u8 reserved_at_8[0x8];
u8 host_num_of_vfs[0x10]; u8 host_num_of_vfs[0x10];
u8 reserved_at_20[0x10]; u8 host_total_vfs[0x10];
u8 host_pci_bus[0x10]; u8 host_pci_bus[0x10];
u8 reserved_at_40[0x10]; u8 reserved_at_40[0x10];
...@@ -9704,7 +9723,7 @@ struct mlx5_ifc_host_params_context_bits { ...@@ -9704,7 +9723,7 @@ struct mlx5_ifc_host_params_context_bits {
u8 reserved_at_80[0x180]; u8 reserved_at_80[0x180];
}; };
struct mlx5_ifc_query_host_params_in_bits { struct mlx5_ifc_query_esw_functions_in_bits {
u8 opcode[0x10]; u8 opcode[0x10];
u8 reserved_at_10[0x10]; u8 reserved_at_10[0x10];
...@@ -9714,7 +9733,7 @@ struct mlx5_ifc_query_host_params_in_bits { ...@@ -9714,7 +9733,7 @@ struct mlx5_ifc_query_host_params_in_bits {
u8 reserved_at_40[0x40]; u8 reserved_at_40[0x40];
}; };
struct mlx5_ifc_query_host_params_out_bits { struct mlx5_ifc_query_esw_functions_out_bits {
u8 status[0x8]; u8 status[0x8];
u8 reserved_at_8[0x18]; u8 reserved_at_8[0x18];
......
...@@ -530,8 +530,10 @@ struct devlink_ops { ...@@ -530,8 +530,10 @@ struct devlink_ops {
int (*eswitch_inline_mode_get)(struct devlink *devlink, u8 *p_inline_mode); int (*eswitch_inline_mode_get)(struct devlink *devlink, u8 *p_inline_mode);
int (*eswitch_inline_mode_set)(struct devlink *devlink, u8 inline_mode, int (*eswitch_inline_mode_set)(struct devlink *devlink, u8 inline_mode,
struct netlink_ext_ack *extack); struct netlink_ext_ack *extack);
int (*eswitch_encap_mode_get)(struct devlink *devlink, u8 *p_encap_mode); int (*eswitch_encap_mode_get)(struct devlink *devlink,
int (*eswitch_encap_mode_set)(struct devlink *devlink, u8 encap_mode, enum devlink_eswitch_encap_mode *p_encap_mode);
int (*eswitch_encap_mode_set)(struct devlink *devlink,
enum devlink_eswitch_encap_mode encap_mode,
struct netlink_ext_ack *extack); struct netlink_ext_ack *extack);
int (*info_get)(struct devlink *devlink, struct devlink_info_req *req, int (*info_get)(struct devlink *devlink, struct devlink_info_req *req,
struct netlink_ext_ack *extack); struct netlink_ext_ack *extack);
......
...@@ -1552,7 +1552,8 @@ static int devlink_nl_eswitch_fill(struct sk_buff *msg, struct devlink *devlink, ...@@ -1552,7 +1552,8 @@ static int devlink_nl_eswitch_fill(struct sk_buff *msg, struct devlink *devlink,
u32 seq, int flags) u32 seq, int flags)
{ {
const struct devlink_ops *ops = devlink->ops; const struct devlink_ops *ops = devlink->ops;
u8 inline_mode, encap_mode; enum devlink_eswitch_encap_mode encap_mode;
u8 inline_mode;
void *hdr; void *hdr;
int err = 0; int err = 0;
u16 mode; u16 mode;
...@@ -1628,7 +1629,8 @@ static int devlink_nl_cmd_eswitch_set_doit(struct sk_buff *skb, ...@@ -1628,7 +1629,8 @@ static int devlink_nl_cmd_eswitch_set_doit(struct sk_buff *skb,
{ {
struct devlink *devlink = info->user_ptr[0]; struct devlink *devlink = info->user_ptr[0];
const struct devlink_ops *ops = devlink->ops; const struct devlink_ops *ops = devlink->ops;
u8 inline_mode, encap_mode; enum devlink_eswitch_encap_mode encap_mode;
u8 inline_mode;
int err = 0; int err = 0;
u16 mode; u16 mode;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment