Commit 94229d45 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2020-03-13' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2020-03-13

Misc update to mlx5 core and E-Switch driver:

1) Blue-Field, Update VF vports config when num of VFs changed

From Bodon, Various misc cleanups and refactoring
for vport enabling/disabling routines to allow them to be called
dynamically and not only on E-Switch load.

This will allow ECPF (ConnectX BlueField Smartnic) support for dynamic
num vf changes and dynamic vport creation and configuration as introduced
in "Update VF vports config when num of VFs changed" patch.

2) From Parav and Mark, trivial clean-ups.

3) Software steering support for flow table id as destination
and a clean-up patch to remove unnecessary function stubs, from Alex.
====================
Acked-by: default avatarJakub Kicinski <kuba@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 44ef976a bc1a0288
......@@ -175,28 +175,20 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
struct mlx5e_tir *tir = priv->indir_tir;
struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_spec *spec;
enum mlx5e_traffic_types tt;
int err = 0;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
err = -ENOMEM;
goto out;
}
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
tt = arfs_get_tt(type);
if (tt == -EINVAL) {
netdev_err(priv->netdev, "%s: bad arfs_type: %d\n",
__func__, type);
err = -EINVAL;
goto out;
return -EINVAL;
}
dest.tir_num = tir[tt].tirn;
arfs_t->default_rule = mlx5_add_flow_rules(arfs_t->ft.t, spec,
arfs_t->default_rule = mlx5_add_flow_rules(arfs_t->ft.t, NULL,
&flow_act,
&dest, 1);
if (IS_ERR(arfs_t->default_rule)) {
......@@ -205,8 +197,7 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
netdev_err(priv->netdev, "%s: add rule failed, arfs type=%d\n",
__func__, type);
}
out:
kvfree(spec);
return err;
}
......
......@@ -1334,7 +1334,6 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
goto out;
}
memset(spec, 0, sizeof(*spec));
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
/* Attach drop flow counter */
......@@ -1346,7 +1345,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
dest_num++;
}
vport->ingress.legacy.drop_rule =
mlx5_add_flow_rules(vport->ingress.acl, spec,
mlx5_add_flow_rules(vport->ingress.acl, NULL,
&flow_act, dst, dest_num);
if (IS_ERR(vport->ingress.legacy.drop_rule)) {
err = PTR_ERR(vport->ingress.legacy.drop_rule);
......@@ -1409,7 +1408,6 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
struct mlx5_flow_destination drop_ctr_dst = {0};
struct mlx5_flow_destination *dst = NULL;
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_spec *spec;
int dest_num = 0;
int err = 0;
......@@ -1438,11 +1436,6 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
if (err)
return err;
/* Drop others rule (star rule) */
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
goto out;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
/* Attach egress drop flow counter */
......@@ -1454,7 +1447,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
dest_num++;
}
vport->egress.legacy.drop_rule =
mlx5_add_flow_rules(vport->egress.acl, spec,
mlx5_add_flow_rules(vport->egress.acl, NULL,
&flow_act, dst, dest_num);
if (IS_ERR(vport->egress.legacy.drop_rule)) {
err = PTR_ERR(vport->egress.legacy.drop_rule);
......@@ -1463,8 +1456,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
vport->vport, err);
vport->egress.legacy.drop_rule = NULL;
}
out:
kvfree(spec);
return err;
}
......@@ -1670,34 +1662,6 @@ static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
((u8 *)node_guid)[0] = mac[5];
}
static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
u16 vport_num = vport->vport;
int flags;
if (mlx5_esw_is_manager_vport(esw, vport_num))
return;
mlx5_modify_vport_admin_state(esw->dev,
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
vport_num, 1,
vport->info.link_state);
/* Host PF has its own mac/guid. */
if (vport_num) {
mlx5_modify_nic_vport_mac_address(esw->dev, vport_num,
vport->info.mac);
mlx5_modify_nic_vport_node_guid(esw->dev, vport_num,
vport->info.node_guid);
}
flags = (vport->info.vlan || vport->info.qos) ?
SET_VLAN_STRIP | SET_VLAN_INSERT : 0;
modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, vport->info.qos,
flags);
}
static int esw_vport_create_legacy_acl_tables(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
......@@ -1707,8 +1671,7 @@ static int esw_vport_create_legacy_acl_tables(struct mlx5_eswitch *esw,
if (mlx5_esw_is_manager_vport(esw, vport->vport))
return 0;
if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) {
if (MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) {
vport->ingress.legacy.drop_counter = mlx5_fc_create(esw->dev, false);
if (IS_ERR(vport->ingress.legacy.drop_counter)) {
esw_warn(esw->dev,
......@@ -1722,8 +1685,7 @@ static int esw_vport_create_legacy_acl_tables(struct mlx5_eswitch *esw,
if (ret)
goto ingress_err;
if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
MLX5_CAP_ESW_EGRESS_ACL(esw->dev, flow_counter)) {
if (MLX5_CAP_ESW_EGRESS_ACL(esw->dev, flow_counter)) {
vport->egress.legacy.drop_counter = mlx5_fc_create(esw->dev, false);
if (IS_ERR(vport->egress.legacy.drop_counter)) {
esw_warn(esw->dev,
......@@ -1784,29 +1746,75 @@ static void esw_vport_cleanup_acl(struct mlx5_eswitch *esw,
esw_vport_destroy_offloads_acl_tables(esw, vport);
}
static int esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
enum mlx5_eswitch_vport_event enabled_events)
static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
u16 vport_num = vport->vport;
int flags;
int err;
err = esw_vport_setup_acl(esw, vport);
if (err)
return err;
/* Attach vport to the eswitch rate limiter */
esw_vport_enable_qos(esw, vport, vport->info.max_rate, vport->qos.bw_share);
if (mlx5_esw_is_manager_vport(esw, vport_num))
return 0;
mlx5_modify_vport_admin_state(esw->dev,
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
vport_num, 1,
vport->info.link_state);
/* Host PF has its own mac/guid. */
if (vport_num) {
mlx5_modify_nic_vport_mac_address(esw->dev, vport_num,
vport->info.mac);
mlx5_modify_nic_vport_node_guid(esw->dev, vport_num,
vport->info.node_guid);
}
flags = (vport->info.vlan || vport->info.qos) ?
SET_VLAN_STRIP | SET_VLAN_INSERT : 0;
modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan,
vport->info.qos, flags);
return 0;
}
/* Don't cleanup vport->info, it's needed to restore vport configuration */
static void esw_vport_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
u16 vport_num = vport->vport;
if (!mlx5_esw_is_manager_vport(esw, vport_num))
mlx5_modify_vport_admin_state(esw->dev,
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
vport_num, 1,
MLX5_VPORT_ADMIN_STATE_DOWN);
esw_vport_disable_qos(esw, vport);
esw_vport_cleanup_acl(esw, vport);
}
static int esw_enable_vport(struct mlx5_eswitch *esw, u16 vport_num,
enum mlx5_eswitch_vport_event enabled_events)
{
struct mlx5_vport *vport;
int ret;
vport = mlx5_eswitch_get_vport(esw, vport_num);
mutex_lock(&esw->state_lock);
WARN_ON(vport->enabled);
esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
/* Restore old vport configuration */
esw_apply_vport_conf(esw, vport);
ret = esw_vport_setup_acl(esw, vport);
ret = esw_vport_setup(esw, vport);
if (ret)
goto done;
/* Attach vport to the eswitch rate limiter */
if (esw_vport_enable_qos(esw, vport, vport->info.max_rate,
vport->qos.bw_share))
esw_warn(esw->dev, "Failed to attach vport %d to eswitch rate limiter", vport_num);
/* Sync with current vport context */
vport->enabled_events = enabled_events;
vport->enabled = true;
......@@ -1827,10 +1835,11 @@ static int esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
return ret;
}
static void esw_disable_vport(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
static void esw_disable_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
u16 vport_num = vport->vport;
struct mlx5_vport *vport;
vport = mlx5_eswitch_get_vport(esw, vport_num);
mutex_lock(&esw->state_lock);
if (!vport->enabled)
......@@ -1848,16 +1857,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw,
*/
esw_vport_change_handle_locked(vport);
vport->enabled_events = 0;
esw_vport_disable_qos(esw, vport);
if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
esw->mode == MLX5_ESWITCH_LEGACY)
mlx5_modify_vport_admin_state(esw->dev,
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
vport_num, 1,
MLX5_VPORT_ADMIN_STATE_DOWN);
esw_vport_cleanup_acl(esw, vport);
esw_vport_cleanup(esw, vport);
esw->enabled_vports--;
done:
......@@ -1945,6 +1945,59 @@ static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
/* Public E-Switch API */
#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
enum mlx5_eswitch_vport_event enabled_events)
{
int err;
err = esw_enable_vport(esw, vport_num, enabled_events);
if (err)
return err;
err = esw_offloads_load_rep(esw, vport_num);
if (err)
goto err_rep;
return err;
err_rep:
esw_disable_vport(esw, vport_num);
return err;
}
void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
esw_offloads_unload_rep(esw, vport_num);
esw_disable_vport(esw, vport_num);
}
void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs)
{
int i;
mlx5_esw_for_each_vf_vport_num_reverse(esw, i, num_vfs)
mlx5_eswitch_unload_vport(esw, i);
}
int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
enum mlx5_eswitch_vport_event enabled_events)
{
int err;
int i;
mlx5_esw_for_each_vf_vport_num(esw, i, num_vfs) {
err = mlx5_eswitch_load_vport(esw, i, enabled_events);
if (err)
goto vf_err;
}
return 0;
vf_err:
mlx5_eswitch_unload_vf_vports(esw, i - 1);
return err;
}
/* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs
* whichever are present on the eswitch.
*/
......@@ -1952,46 +2005,33 @@ int
mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
enum mlx5_eswitch_vport_event enabled_events)
{
struct mlx5_vport *vport;
int num_vfs;
int ret;
int i;
/* Enable PF vport */
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
ret = esw_enable_vport(esw, vport, enabled_events);
ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_PF, enabled_events);
if (ret)
return ret;
/* Enable ECPF vport */
if (mlx5_ecpf_vport_exists(esw->dev)) {
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
ret = esw_enable_vport(esw, vport, enabled_events);
ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_ECPF, enabled_events);
if (ret)
goto ecpf_err;
}
/* Enable VF vports */
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
ret = esw_enable_vport(esw, vport, enabled_events);
ret = mlx5_eswitch_load_vf_vports(esw, esw->esw_funcs.num_vfs,
enabled_events);
if (ret)
goto vf_err;
}
return 0;
vf_err:
num_vfs = i - 1;
mlx5_esw_for_each_vf_vport_reverse(esw, i, vport, num_vfs)
esw_disable_vport(esw, vport);
if (mlx5_ecpf_vport_exists(esw->dev)) {
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
esw_disable_vport(esw, vport);
}
if (mlx5_ecpf_vport_exists(esw->dev))
mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF);
ecpf_err:
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
esw_disable_vport(esw, vport);
mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF);
return ret;
}
......@@ -2000,11 +2040,12 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
*/
void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
{
struct mlx5_vport *vport;
int i;
mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
mlx5_esw_for_all_vports_reverse(esw, i, vport)
esw_disable_vport(esw, vport);
if (mlx5_ecpf_vport_exists(esw->dev))
mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF);
mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF);
}
static void mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw)
......@@ -2198,6 +2239,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw->dev->priv.eswitch = NULL;
destroy_workqueue(esw->work_queue);
esw_offloads_cleanup_reps(esw);
mutex_destroy(&esw->state_lock);
mutex_destroy(&esw->offloads.mod_hdr.lock);
mutex_destroy(&esw->offloads.encap_tbl_lock);
kfree(esw->vports);
......@@ -2432,12 +2474,11 @@ static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
}
/* Star rule to forward all traffic to uplink vport */
memset(spec, 0, sizeof(*spec));
memset(&dest, 0, sizeof(dest));
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport.num = MLX5_VPORT_UPLINK;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, spec,
flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, NULL,
&flow_act, &dest, 1);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
......@@ -2622,9 +2663,13 @@ static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
u64 bytes = 0;
int err = 0;
if (!vport->enabled || esw->mode != MLX5_ESWITCH_LEGACY)
if (esw->mode != MLX5_ESWITCH_LEGACY)
return 0;
mutex_lock(&esw->state_lock);
if (!vport->enabled)
goto unlock;
if (vport->egress.legacy.drop_counter)
mlx5_fc_query(dev, vport->egress.legacy.drop_counter,
&stats->rx_dropped, &bytes);
......@@ -2635,20 +2680,22 @@ static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) &&
!MLX5_CAP_GEN(dev, transmit_discard_vport_down))
return 0;
goto unlock;
err = mlx5_query_vport_down_stats(dev, vport->vport, 1,
&rx_discard_vport_down,
&tx_discard_vport_down);
if (err)
return err;
goto unlock;
if (MLX5_CAP_GEN(dev, receive_discard_vport_down))
stats->rx_dropped += rx_discard_vport_down;
if (MLX5_CAP_GEN(dev, transmit_discard_vport_down))
stats->tx_dropped += tx_discard_vport_down;
return 0;
unlock:
mutex_unlock(&esw->state_lock);
return err;
}
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
......
......@@ -651,6 +651,17 @@ esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag);
u32
esw_get_max_restore_tag(struct mlx5_eswitch *esw);
int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num);
void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num);
int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
enum mlx5_eswitch_vport_event enabled_events);
void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num);
int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
enum mlx5_eswitch_vport_event enabled_events);
void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs);
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
......
......@@ -1634,187 +1634,66 @@ static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
esw->offloads.rep_ops[rep_type]->unload(rep);
}
static void __unload_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
{
struct mlx5_eswitch_rep *rep;
if (mlx5_ecpf_vport_exists(esw->dev)) {
rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
__esw_offloads_unload_rep(esw, rep, rep_type);
}
if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
__esw_offloads_unload_rep(esw, rep, rep_type);
}
rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
__esw_offloads_unload_rep(esw, rep, rep_type);
}
static void __unload_reps_vf_vport(struct mlx5_eswitch *esw, int nvports,
u8 rep_type)
static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
{
struct mlx5_eswitch_rep *rep;
int i;
mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvports)
mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, esw->esw_funcs.num_vfs)
__esw_offloads_unload_rep(esw, rep, rep_type);
}
static void esw_offloads_unload_vf_reps(struct mlx5_eswitch *esw, int nvports)
{
u8 rep_type = NUM_REP_TYPES;
while (rep_type-- > 0)
__unload_reps_vf_vport(esw, nvports, rep_type);
}
static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
{
__unload_reps_vf_vport(esw, esw->esw_funcs.num_vfs, rep_type);
/* Special vports must be the last to unload. */
__unload_reps_special_vport(esw, rep_type);
}
static void esw_offloads_unload_all_reps(struct mlx5_eswitch *esw)
{
u8 rep_type = NUM_REP_TYPES;
while (rep_type-- > 0)
__unload_reps_all_vport(esw, rep_type);
}
static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep, u8 rep_type)
{
int err = 0;
if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
if (err)
atomic_set(&rep->rep_data[rep_type].state,
REP_REGISTERED);
}
return err;
}
static int __load_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
{
struct mlx5_eswitch_rep *rep;
int err;
rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
err = __esw_offloads_load_rep(esw, rep, rep_type);
if (err)
return err;
if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
err = __esw_offloads_load_rep(esw, rep, rep_type);
if (err)
goto err_pf;
}
if (mlx5_ecpf_vport_exists(esw->dev)) {
rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
err = __esw_offloads_load_rep(esw, rep, rep_type);
if (err)
goto err_ecpf;
__esw_offloads_unload_rep(esw, rep, rep_type);
}
return 0;
err_ecpf:
if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
__esw_offloads_unload_rep(esw, rep, rep_type);
}
err_pf:
rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
__esw_offloads_unload_rep(esw, rep, rep_type);
return err;
}
static int __load_reps_vf_vport(struct mlx5_eswitch *esw, int nvports,
u8 rep_type)
int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_eswitch_rep *rep;
int err, i;
mlx5_esw_for_each_vf_rep(esw, i, rep, nvports) {
err = __esw_offloads_load_rep(esw, rep, rep_type);
if (err)
goto err_vf;
}
return 0;
err_vf:
__unload_reps_vf_vport(esw, --i, rep_type);
return err;
}
static int __load_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
{
int rep_type;
int err;
/* Special vports must be loaded first, uplink rep creates mdev resource. */
err = __load_reps_special_vport(esw, rep_type);
if (err)
return err;
err = __load_reps_vf_vport(esw, esw->esw_funcs.num_vfs, rep_type);
if (err)
goto err_vfs;
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return 0;
err_vfs:
__unload_reps_special_vport(esw, rep_type);
return err;
}
static int esw_offloads_load_vf_reps(struct mlx5_eswitch *esw, int nvports)
{
u8 rep_type = 0;
int err;
for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
err = __load_reps_vf_vport(esw, nvports, rep_type);
rep = mlx5_eswitch_get_rep(esw, vport_num);
for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
if (err)
goto err_reps;
}
return err;
return 0;
err_reps:
while (rep_type-- > 0)
__unload_reps_vf_vport(esw, nvports, rep_type);
atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED);
for (--rep_type; rep_type >= 0; rep_type--)
__esw_offloads_unload_rep(esw, rep, rep_type);
return err;
}
static int esw_offloads_load_all_reps(struct mlx5_eswitch *esw)
void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
{
u8 rep_type = 0;
int err;
for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
err = __load_reps_all_vport(esw, rep_type);
if (err)
goto err_reps;
}
struct mlx5_eswitch_rep *rep;
int rep_type;
return err;
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return;
err_reps:
while (rep_type-- > 0)
__unload_reps_all_vport(esw, rep_type);
return err;
rep = mlx5_eswitch_get_rep(esw, vport_num);
for (rep_type = NUM_REP_TYPES - 1; rep_type >= 0; rep_type--)
__esw_offloads_unload_rep(esw, rep, rep_type);
}
#define ESW_OFFLOADS_DEVCOM_PAIR (0)
......@@ -2002,7 +1881,6 @@ static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
u8 action[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)] = {};
static const struct mlx5_flow_spec spec = {};
struct mlx5_flow_act flow_act = {};
int err = 0;
u32 key;
......@@ -2034,7 +1912,7 @@ static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
vport->ingress.offloads.modify_metadata_rule =
mlx5_add_flow_rules(vport->ingress.acl,
&spec, &flow_act, NULL, 0);
NULL, &flow_act, NULL, 0);
if (IS_ERR(vport->ingress.offloads.modify_metadata_rule)) {
err = PTR_ERR(vport->ingress.offloads.modify_metadata_rule);
esw_warn(esw->dev,
......@@ -2391,11 +2269,12 @@ esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
/* Number of VFs can only change from "0 to x" or "x to 0". */
if (esw->esw_funcs.num_vfs > 0) {
esw_offloads_unload_vf_reps(esw, esw->esw_funcs.num_vfs);
mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
} else {
int err;
err = esw_offloads_load_vf_reps(esw, new_num_vfs);
err = mlx5_eswitch_load_vf_vports(esw, new_num_vfs,
MLX5_VPORT_UC_ADDR_CHANGE);
if (err)
return;
}
......@@ -2453,6 +2332,7 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
else
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
mutex_init(&esw->offloads.termtbl_mutex);
mlx5_rdma_enable_roce(esw->dev);
err = esw_offloads_steering_init(esw);
if (err)
......@@ -2466,27 +2346,28 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
/* Uplink vport rep must load first. */
err = esw_offloads_load_rep(esw, MLX5_VPORT_UPLINK);
if (err)
goto err_vports;
goto err_uplink;
err = esw_offloads_load_all_reps(esw);
err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
if (err)
goto err_reps;
goto err_vports;
esw_offloads_devcom_init(esw);
mutex_init(&esw->offloads.termtbl_mutex);
return 0;
err_reps:
mlx5_eswitch_disable_pf_vf_vports(esw);
err_vports:
esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
err_uplink:
esw_set_passing_vport_metadata(esw, false);
err_vport_metadata:
esw_offloads_steering_cleanup(esw);
err_steering_init:
mlx5_rdma_disable_roce(esw->dev);
mutex_destroy(&esw->offloads.termtbl_mutex);
return err;
}
......@@ -2512,11 +2393,12 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
void esw_offloads_disable(struct mlx5_eswitch *esw)
{
esw_offloads_devcom_cleanup(esw);
esw_offloads_unload_all_reps(esw);
mlx5_eswitch_disable_pf_vf_vports(esw);
esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
esw_set_passing_vport_metadata(esw, false);
esw_offloads_steering_cleanup(esw);
mlx5_rdma_disable_roce(esw->dev);
mutex_destroy(&esw->offloads.termtbl_mutex);
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
}
......@@ -2596,10 +2478,8 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
return 0;
}
static int mlx5_devlink_eswitch_check(struct devlink *devlink)
static int mlx5_eswitch_check(const struct mlx5_core_dev *dev)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
return -EOPNOTSUPP;
......@@ -2620,7 +2500,7 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
u16 cur_mlx5_mode, mlx5_mode = 0;
int err;
err = mlx5_devlink_eswitch_check(devlink);
err = mlx5_eswitch_check(dev);
if (err)
return err;
......@@ -2645,7 +2525,7 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
struct mlx5_core_dev *dev = devlink_priv(devlink);
int err;
err = mlx5_devlink_eswitch_check(devlink);
err = mlx5_eswitch_check(dev);
if (err)
return err;
......@@ -2660,7 +2540,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
int err, vport, num_vport;
u8 mlx5_mode;
err = mlx5_devlink_eswitch_check(devlink);
err = mlx5_eswitch_check(dev);
if (err)
return err;
......@@ -2714,7 +2594,7 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
struct mlx5_eswitch *esw = dev->priv.eswitch;
int err;
err = mlx5_devlink_eswitch_check(devlink);
err = mlx5_eswitch_check(dev);
if (err)
return err;
......@@ -2729,7 +2609,7 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
struct mlx5_eswitch *esw = dev->priv.eswitch;
int err;
err = mlx5_devlink_eswitch_check(devlink);
err = mlx5_eswitch_check(dev);
if (err)
return err;
......@@ -2778,7 +2658,7 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
struct mlx5_eswitch *esw = dev->priv.eswitch;
int err;
err = mlx5_devlink_eswitch_check(devlink);
err = mlx5_eswitch_check(dev);
if (err)
return err;
......@@ -2786,6 +2666,21 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
return 0;
}
static bool
mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num)
{
/* Currently, only ECPF based device has representor for host PF. */
if (vport_num == MLX5_VPORT_PF &&
!mlx5_core_is_ecpf_esw_manager(esw->dev))
return false;
if (vport_num == MLX5_VPORT_ECPF &&
!mlx5_ecpf_vport_exists(esw->dev))
return false;
return true;
}
void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
const struct mlx5_eswitch_rep_ops *ops,
u8 rep_type)
......@@ -2796,9 +2691,11 @@ void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
esw->offloads.rep_ops[rep_type] = ops;
mlx5_esw_for_all_reps(esw, i, rep) {
if (likely(mlx5_eswitch_vport_has_rep(esw, i))) {
rep_data = &rep->rep_data[rep_type];
atomic_set(&rep_data->state, REP_REGISTERED);
}
}
}
EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
......
......@@ -410,7 +410,6 @@ mlx5_esw_chains_add_miss_rule(struct fdb_chain *fdb_chain,
struct mlx5_flow_table *fdb,
struct mlx5_flow_table *next_fdb)
{
static const struct mlx5_flow_spec spec = {};
struct mlx5_eswitch *esw = fdb_chain->esw;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_act act = {};
......@@ -425,7 +424,7 @@ mlx5_esw_chains_add_miss_rule(struct fdb_chain *fdb_chain,
act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
}
return mlx5_add_flow_rules(fdb, &spec, &act, &dest, 1);
return mlx5_add_flow_rules(fdb, NULL, &act, &dest, 1);
}
static int
......
......@@ -49,7 +49,6 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev,
struct mlx5_termtbl_handle *tt,
struct mlx5_flow_act *flow_act)
{
static const struct mlx5_flow_spec spec = {};
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_namespace *root_ns;
int err;
......@@ -73,7 +72,7 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev,
return -EOPNOTSUPP;
}
tt->rule = mlx5_add_flow_rules(tt->termtbl, &spec, flow_act,
tt->rule = mlx5_add_flow_rules(tt->termtbl, NULL, flow_act,
&tt->dest, 1);
if (IS_ERR(tt->rule)) {
......
......@@ -1892,12 +1892,16 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
int num_dest)
{
struct mlx5_flow_root_namespace *root = find_root(&ft->node);
static const struct mlx5_flow_spec zero_spec = {};
struct mlx5_flow_destination gen_dest = {};
struct mlx5_flow_table *next_ft = NULL;
struct mlx5_flow_handle *handle = NULL;
u32 sw_action = flow_act->action;
struct fs_prio *prio;
if (!spec)
spec = &zero_spec;
fs_get_obj(prio, ft->node.parent);
if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
if (!fwd_next_prio_supported(ft))
......
......@@ -964,6 +964,24 @@ struct mlx5dr_action *mlx5dr_action_create_drop(void)
return dr_action_create_generic(DR_ACTION_TYP_DROP);
}
struct mlx5dr_action *
mlx5dr_action_create_dest_table_num(struct mlx5dr_domain *dmn, u32 table_num)
{
struct mlx5dr_action *action;
action = dr_action_create_generic(DR_ACTION_TYP_FT);
if (!action)
return NULL;
action->dest_tbl.is_fw_tbl = true;
action->dest_tbl.fw_tbl.dmn = dmn;
action->dest_tbl.fw_tbl.id = table_num;
action->dest_tbl.fw_tbl.type = FS_FT_FDB;
refcount_inc(&dmn->refcount);
return action;
}
struct mlx5dr_action *
mlx5dr_action_create_dest_table(struct mlx5dr_table *tbl)
{
......
......@@ -384,6 +384,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
list_for_each_entry(dst, &fte->node.children, node.list) {
enum mlx5_flow_destination_type type = dst->dest_attr.type;
u32 ft_id;
if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
num_term_actions >= MLX5_FLOW_CONTEXT_ACTION_MAX) {
......@@ -420,6 +421,17 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
num_term_actions++;
break;
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
ft_id = dst->dest_attr.ft_num;
tmp_action = mlx5dr_action_create_dest_table_num(domain,
ft_id);
if (!tmp_action) {
err = -ENOMEM;
goto free_actions;
}
fs_dr_actions[fs_dr_num_actions++] = tmp_action;
term_actions[num_term_actions++].dest = tmp_action;
break;
default:
err = -EOPNOTSUPP;
goto free_actions;
......
......@@ -38,8 +38,6 @@ struct mlx5dr_action_dest {
struct mlx5dr_action *reformat;
};
#ifdef CONFIG_MLX5_SW_STEERING
struct mlx5dr_domain *
mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type);
......@@ -76,6 +74,9 @@ int mlx5dr_rule_destroy(struct mlx5dr_rule *rule);
int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,
struct mlx5dr_action *action);
struct mlx5dr_action *
mlx5dr_action_create_dest_table_num(struct mlx5dr_domain *dmn, u32 table_num);
struct mlx5dr_action *
mlx5dr_action_create_dest_table(struct mlx5dr_table *table);
......@@ -125,103 +126,4 @@ mlx5dr_is_supported(struct mlx5_core_dev *dev)
return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner);
}
#else /* CONFIG_MLX5_SW_STEERING */
static inline struct mlx5dr_domain *
mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type) { return NULL; }
static inline int
mlx5dr_domain_destroy(struct mlx5dr_domain *domain) { return 0; }
static inline int
mlx5dr_domain_sync(struct mlx5dr_domain *domain, u32 flags) { return 0; }
static inline void
mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
struct mlx5dr_domain *peer_dmn) { }
static inline struct mlx5dr_table *
mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level, u32 flags) { return NULL; }
static inline int
mlx5dr_table_destroy(struct mlx5dr_table *table) { return 0; }
static inline u32
mlx5dr_table_get_id(struct mlx5dr_table *table) { return 0; }
static inline struct mlx5dr_matcher *
mlx5dr_matcher_create(struct mlx5dr_table *table,
u32 priority,
u8 match_criteria_enable,
struct mlx5dr_match_parameters *mask) { return NULL; }
static inline int
mlx5dr_matcher_destroy(struct mlx5dr_matcher *matcher) { return 0; }
static inline struct mlx5dr_rule *
mlx5dr_rule_create(struct mlx5dr_matcher *matcher,
struct mlx5dr_match_parameters *value,
size_t num_actions,
struct mlx5dr_action *actions[]) { return NULL; }
static inline int
mlx5dr_rule_destroy(struct mlx5dr_rule *rule) { return 0; }
static inline int
mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,
struct mlx5dr_action *action) { return 0; }
static inline struct mlx5dr_action *
mlx5dr_action_create_dest_table(struct mlx5dr_table *table) { return NULL; }
static inline struct mlx5dr_action *
mlx5dr_action_create_dest_flow_fw_table(struct mlx5dr_domain *domain,
struct mlx5_flow_table *ft) { return NULL; }
static inline struct mlx5dr_action *
mlx5dr_action_create_dest_vport(struct mlx5dr_domain *domain,
u32 vport, u8 vhca_id_valid,
u16 vhca_id) { return NULL; }
static inline struct mlx5dr_action *
mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
struct mlx5dr_action_dest *dests,
u32 num_of_dests) { return NULL; }
static inline struct mlx5dr_action *
mlx5dr_action_create_drop(void) { return NULL; }
static inline struct mlx5dr_action *
mlx5dr_action_create_tag(u32 tag_value) { return NULL; }
static inline struct mlx5dr_action *
mlx5dr_action_create_flow_counter(u32 counter_id) { return NULL; }
static inline struct mlx5dr_action *
mlx5dr_action_create_packet_reformat(struct mlx5dr_domain *dmn,
enum mlx5dr_action_reformat_type reformat_type,
size_t data_sz,
void *data) { return NULL; }
static inline struct mlx5dr_action *
mlx5dr_action_create_modify_header(struct mlx5dr_domain *domain,
u32 flags,
size_t actions_sz,
__be64 actions[]) { return NULL; }
static inline struct mlx5dr_action *
mlx5dr_action_create_pop_vlan(void) { return NULL; }
static inline struct mlx5dr_action *
mlx5dr_action_create_push_vlan(struct mlx5dr_domain *domain,
__be32 vlan_hdr) { return NULL; }
static inline int
mlx5dr_action_destroy(struct mlx5dr_action *action) { return 0; }
static inline bool
mlx5dr_is_supported(struct mlx5_core_dev *dev) { return false; }
#endif /* CONFIG_MLX5_SW_STEERING */
#endif /* _MLX5DR_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment