Commit ae430332 authored by Ariel Levkovich's avatar Ariel Levkovich Committed by Saeed Mahameed

net/mlx5: Refactor multi chains and prios support

Decouple the chains infrastructure from eswitch and make
it generic to support other steering namespaces.

The change defines an agnostic data structure to keep
all the relevant information for maintaining flow table
chaining in any steering namespace. Each namespace that
requires table chaining will be required to allocate
such data structure.

The chains creation code will receive the steering namespace
and flow table parameters from the caller so it will operate
agnosticly when creating the required resources to
maintain the table chaining function while Parts of the code
that are relevant to eswitch specific functionality are moved
to eswitch files.
Signed-off-by: default avatarAriel Levkovich <lariel@mellanox.com>
Reviewed-by: default avatarRoi Dayan <roid@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent 748d1c8a
...@@ -37,7 +37,7 @@ mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += en/hv_vhca_stats.o ...@@ -37,7 +37,7 @@ mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += en/hv_vhca_stats.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += lag_mp.o lib/geneve.o lib/port_tun.o \ mlx5_core-$(CONFIG_MLX5_ESWITCH) += lag_mp.o lib/geneve.o lib/port_tun.o \
en_rep.o en/rep/bond.o en/mod_hdr.o en_rep.o en/rep/bond.o en/mod_hdr.o
mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \ mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \
en/mapping.o esw/chains.o en/tc_tun.o \ en/mapping.o lib/fs_chains.o en/tc_tun.o \
en/tc_tun_vxlan.o en/tc_tun_gre.o en/tc_tun_geneve.o \ en/tc_tun_vxlan.o en/tc_tun_gre.o en/tc_tun_geneve.o \
en/tc_tun_mplsoudp.o diag/en_tc_tracepoint.o en/tc_tun_mplsoudp.o diag/en_tc_tracepoint.o
mlx5_core-$(CONFIG_MLX5_TC_CT) += en/tc_ct.o mlx5_core-$(CONFIG_MLX5_TC_CT) += en/tc_ct.o
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
#include "neigh.h" #include "neigh.h"
#include "en_rep.h" #include "en_rep.h"
#include "eswitch.h" #include "eswitch.h"
#include "esw/chains.h" #include "lib/fs_chains.h"
#include "en/tc_ct.h" #include "en/tc_ct.h"
#include "en/mapping.h" #include "en/mapping.h"
#include "en/tc_tun.h" #include "en/tc_tun.h"
...@@ -191,7 +191,7 @@ static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data, ...@@ -191,7 +191,7 @@ static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data,
case TC_SETUP_CLSFLOWER: case TC_SETUP_CLSFLOWER:
memcpy(&tmp, f, sizeof(*f)); memcpy(&tmp, f, sizeof(*f));
if (!mlx5_esw_chains_prios_supported(esw)) if (!mlx5_chains_prios_supported(esw_chains(esw)))
return -EOPNOTSUPP; return -EOPNOTSUPP;
/* Re-use tc offload path by moving the ft flow to the /* Re-use tc offload path by moving the ft flow to the
...@@ -203,12 +203,12 @@ static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data, ...@@ -203,12 +203,12 @@ static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data,
* *
* We only support chain 0 of FT offload. * We only support chain 0 of FT offload.
*/ */
if (tmp.common.prio >= mlx5_esw_chains_get_prio_range(esw)) if (tmp.common.prio >= mlx5_chains_get_prio_range(esw_chains(esw)))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (tmp.common.chain_index != 0) if (tmp.common.chain_index != 0)
return -EOPNOTSUPP; return -EOPNOTSUPP;
tmp.common.chain_index = mlx5_esw_chains_get_ft_chain(esw); tmp.common.chain_index = mlx5_chains_get_nf_ft_chain(esw_chains(esw));
tmp.common.prio++; tmp.common.prio++;
err = mlx5e_rep_setup_tc_cls_flower(priv, &tmp, flags); err = mlx5e_rep_setup_tc_cls_flower(priv, &tmp, flags);
memcpy(&f->stats, &tmp.stats, sizeof(f->stats)); memcpy(&f->stats, &tmp.stats, sizeof(f->stats));
...@@ -378,12 +378,12 @@ static int mlx5e_rep_indr_setup_ft_cb(enum tc_setup_type type, ...@@ -378,12 +378,12 @@ static int mlx5e_rep_indr_setup_ft_cb(enum tc_setup_type type,
* *
* We only support chain 0 of FT offload. * We only support chain 0 of FT offload.
*/ */
if (!mlx5_esw_chains_prios_supported(esw) || if (!mlx5_chains_prios_supported(esw_chains(esw)) ||
tmp.common.prio >= mlx5_esw_chains_get_prio_range(esw) || tmp.common.prio >= mlx5_chains_get_prio_range(esw_chains(esw)) ||
tmp.common.chain_index) tmp.common.chain_index)
return -EOPNOTSUPP; return -EOPNOTSUPP;
tmp.common.chain_index = mlx5_esw_chains_get_ft_chain(esw); tmp.common.chain_index = mlx5_chains_get_nf_ft_chain(esw_chains(esw));
tmp.common.prio++; tmp.common.prio++;
err = mlx5e_rep_indr_offload(priv->netdev, &tmp, priv, flags); err = mlx5e_rep_indr_offload(priv->netdev, &tmp, priv, flags);
memcpy(&f->stats, &tmp.stats, sizeof(f->stats)); memcpy(&f->stats, &tmp.stats, sizeof(f->stats));
...@@ -626,7 +626,7 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe, ...@@ -626,7 +626,7 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
priv = netdev_priv(skb->dev); priv = netdev_priv(skb->dev);
esw = priv->mdev->priv.eswitch; esw = priv->mdev->priv.eswitch;
err = mlx5_eswitch_get_chain_for_tag(esw, reg_c0, &chain); err = mlx5_get_chain_for_tag(esw_chains(esw), reg_c0, &chain);
if (err) { if (err) {
netdev_dbg(priv->netdev, netdev_dbg(priv->netdev,
"Couldn't find chain for chain tag: %d, err: %d\n", "Couldn't find chain for chain tag: %d, err: %d\n",
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/xarray.h> #include <linux/xarray.h>
#include "esw/chains.h" #include "lib/fs_chains.h"
#include "en/tc_ct.h" #include "en/tc_ct.h"
#include "en/mod_hdr.h" #include "en/mod_hdr.h"
#include "en/mapping.h" #include "en/mapping.h"
...@@ -1485,7 +1485,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, ...@@ -1485,7 +1485,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
* don't go though all prios of this chain as normal tc rules * don't go though all prios of this chain as normal tc rules
* miss. * miss.
*/ */
err = mlx5_esw_chains_get_chain_mapping(esw, attr->chain, err = mlx5_chains_get_chain_mapping(esw_chains(esw), attr->chain,
&chain_mapping); &chain_mapping);
if (err) { if (err) {
ct_dbg("Failed to get chain register mapping for chain"); ct_dbg("Failed to get chain register mapping for chain");
...@@ -1582,7 +1582,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, ...@@ -1582,7 +1582,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr); mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr);
err_mapping: err_mapping:
dealloc_mod_hdr_actions(&pre_mod_acts); dealloc_mod_hdr_actions(&pre_mod_acts);
mlx5_esw_chains_put_chain_mapping(esw, ct_flow->chain_mapping); mlx5_chains_put_chain_mapping(esw_chains(esw), ct_flow->chain_mapping);
err_get_chain: err_get_chain:
idr_remove(&ct_priv->fte_ids, fte_id); idr_remove(&ct_priv->fte_ids, fte_id);
err_idr: err_idr:
...@@ -1694,7 +1694,7 @@ __mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *ct_priv, ...@@ -1694,7 +1694,7 @@ __mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *ct_priv,
if (ct_flow->post_ct_rule) { if (ct_flow->post_ct_rule) {
mlx5_eswitch_del_offloaded_rule(esw, ct_flow->post_ct_rule, mlx5_eswitch_del_offloaded_rule(esw, ct_flow->post_ct_rule,
&ct_flow->post_ct_attr); &ct_flow->post_ct_attr);
mlx5_esw_chains_put_chain_mapping(esw, ct_flow->chain_mapping); mlx5_chains_put_chain_mapping(esw_chains(esw), ct_flow->chain_mapping);
idr_remove(&ct_priv->fte_ids, ct_flow->fte_id); idr_remove(&ct_priv->fte_ids, ct_flow->fte_id);
mlx5_tc_ct_del_ft_cb(ct_priv, ct_flow->ft); mlx5_tc_ct_del_ft_cb(ct_priv, ct_flow->ft);
} }
...@@ -1817,14 +1817,14 @@ mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv) ...@@ -1817,14 +1817,14 @@ mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv)
ct_priv->esw = esw; ct_priv->esw = esw;
ct_priv->netdev = rpriv->netdev; ct_priv->netdev = rpriv->netdev;
ct_priv->ct = mlx5_esw_chains_create_global_table(esw); ct_priv->ct = mlx5_chains_create_global_table(esw_chains(esw));
if (IS_ERR(ct_priv->ct)) { if (IS_ERR(ct_priv->ct)) {
err = PTR_ERR(ct_priv->ct); err = PTR_ERR(ct_priv->ct);
mlx5_tc_ct_init_err(rpriv, "failed to create ct table", err); mlx5_tc_ct_init_err(rpriv, "failed to create ct table", err);
goto err_ct_tbl; goto err_ct_tbl;
} }
ct_priv->ct_nat = mlx5_esw_chains_create_global_table(esw); ct_priv->ct_nat = mlx5_chains_create_global_table(esw_chains(esw));
if (IS_ERR(ct_priv->ct_nat)) { if (IS_ERR(ct_priv->ct_nat)) {
err = PTR_ERR(ct_priv->ct_nat); err = PTR_ERR(ct_priv->ct_nat);
mlx5_tc_ct_init_err(rpriv, "failed to create ct nat table", mlx5_tc_ct_init_err(rpriv, "failed to create ct nat table",
...@@ -1832,7 +1832,7 @@ mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv) ...@@ -1832,7 +1832,7 @@ mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv)
goto err_ct_nat_tbl; goto err_ct_nat_tbl;
} }
ct_priv->post_ct = mlx5_esw_chains_create_global_table(esw); ct_priv->post_ct = mlx5_chains_create_global_table(esw_chains(esw));
if (IS_ERR(ct_priv->post_ct)) { if (IS_ERR(ct_priv->post_ct)) {
err = PTR_ERR(ct_priv->post_ct); err = PTR_ERR(ct_priv->post_ct);
mlx5_tc_ct_init_err(rpriv, "failed to create post ct table", mlx5_tc_ct_init_err(rpriv, "failed to create post ct table",
...@@ -1852,9 +1852,9 @@ mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv) ...@@ -1852,9 +1852,9 @@ mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv)
return 0; return 0;
err_post_ct_tbl: err_post_ct_tbl:
mlx5_esw_chains_destroy_global_table(esw, ct_priv->ct_nat); mlx5_chains_destroy_global_table(esw_chains(esw), ct_priv->ct_nat);
err_ct_nat_tbl: err_ct_nat_tbl:
mlx5_esw_chains_destroy_global_table(esw, ct_priv->ct); mlx5_chains_destroy_global_table(esw_chains(esw), ct_priv->ct);
err_ct_tbl: err_ct_tbl:
mapping_destroy(ct_priv->labels_mapping); mapping_destroy(ct_priv->labels_mapping);
err_mapping_labels: err_mapping_labels:
...@@ -1871,13 +1871,18 @@ void ...@@ -1871,13 +1871,18 @@ void
mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv) mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv)
{ {
struct mlx5_tc_ct_priv *ct_priv = uplink_priv->ct_priv; struct mlx5_tc_ct_priv *ct_priv = uplink_priv->ct_priv;
struct mlx5_fs_chains *chains;
struct mlx5_eswitch *esw;
if (!ct_priv) if (!ct_priv)
return; return;
mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->post_ct); esw = ct_priv->esw;
mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->ct_nat); chains = esw_chains(esw);
mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->ct);
mlx5_chains_destroy_global_table(chains, ct_priv->post_ct);
mlx5_chains_destroy_global_table(chains, ct_priv->ct_nat);
mlx5_chains_destroy_global_table(chains, ct_priv->ct);
mapping_destroy(ct_priv->zone_mapping); mapping_destroy(ct_priv->zone_mapping);
mapping_destroy(ct_priv->labels_mapping); mapping_destroy(ct_priv->labels_mapping);
......
...@@ -39,7 +39,6 @@ ...@@ -39,7 +39,6 @@
#include <net/ipv6_stubs.h> #include <net/ipv6_stubs.h>
#include "eswitch.h" #include "eswitch.h"
#include "esw/chains.h"
#include "en.h" #include "en.h"
#include "en_rep.h" #include "en_rep.h"
#include "en/txrx.h" #include "en/txrx.h"
......
...@@ -57,7 +57,6 @@ ...@@ -57,7 +57,6 @@
#include "en/rep/neigh.h" #include "en/rep/neigh.h"
#include "en_tc.h" #include "en_tc.h"
#include "eswitch.h" #include "eswitch.h"
#include "esw/chains.h"
#include "fs_core.h" #include "fs_core.h"
#include "en/port.h" #include "en/port.h"
#include "en/tc_tun.h" #include "en/tc_tun.h"
...@@ -66,6 +65,7 @@ ...@@ -66,6 +65,7 @@
#include "en/mod_hdr.h" #include "en/mod_hdr.h"
#include "lib/devcom.h" #include "lib/devcom.h"
#include "lib/geneve.h" #include "lib/geneve.h"
#include "lib/fs_chains.h"
#include "diag/en_tc_tracepoint.h" #include "diag/en_tc_tracepoint.h"
#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
...@@ -1180,7 +1180,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, ...@@ -1180,7 +1180,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
int err = 0; int err = 0;
int out_index; int out_index;
if (!mlx5_esw_chains_prios_supported(esw) && attr->prio != 1) { if (!mlx5_chains_prios_supported(esw_chains(esw)) && attr->prio != 1) {
NL_SET_ERR_MSG_MOD(extack, NL_SET_ERR_MSG_MOD(extack,
"E-switch priorities unsupported, upgrade FW"); "E-switch priorities unsupported, upgrade FW");
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -1191,14 +1191,14 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, ...@@ -1191,14 +1191,14 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
* FDB_FT_CHAIN which is outside tc range. * FDB_FT_CHAIN which is outside tc range.
* See mlx5e_rep_setup_ft_cb(). * See mlx5e_rep_setup_ft_cb().
*/ */
max_chain = mlx5_esw_chains_get_chain_range(esw); max_chain = mlx5_chains_get_chain_range(esw_chains(esw));
if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) { if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
NL_SET_ERR_MSG_MOD(extack, NL_SET_ERR_MSG_MOD(extack,
"Requested chain is out of supported range"); "Requested chain is out of supported range");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
max_prio = mlx5_esw_chains_get_prio_range(esw); max_prio = mlx5_chains_get_prio_range(esw_chains(esw));
if (attr->prio > max_prio) { if (attr->prio > max_prio) {
NL_SET_ERR_MSG_MOD(extack, NL_SET_ERR_MSG_MOD(extack,
"Requested priority is out of supported range"); "Requested priority is out of supported range");
...@@ -3845,7 +3845,7 @@ static int mlx5_validate_goto_chain(struct mlx5_eswitch *esw, ...@@ -3845,7 +3845,7 @@ static int mlx5_validate_goto_chain(struct mlx5_eswitch *esw,
u32 actions, u32 actions,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
u32 max_chain = mlx5_esw_chains_get_chain_range(esw); u32 max_chain = mlx5_chains_get_chain_range(esw_chains(esw));
struct mlx5_esw_flow_attr *attr = flow->esw_attr; struct mlx5_esw_flow_attr *attr = flow->esw_attr;
bool ft_flow = mlx5e_is_ft_flow(flow); bool ft_flow = mlx5e_is_ft_flow(flow);
u32 dest_chain = act->chain_index; u32 dest_chain = act->chain_index;
...@@ -3855,7 +3855,7 @@ static int mlx5_validate_goto_chain(struct mlx5_eswitch *esw, ...@@ -3855,7 +3855,7 @@ static int mlx5_validate_goto_chain(struct mlx5_eswitch *esw,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
if (!mlx5_esw_chains_backwards_supported(esw) && if (!mlx5_chains_backwards_supported(esw_chains(esw)) &&
dest_chain <= attr->chain) { dest_chain <= attr->chain) {
NL_SET_ERR_MSG_MOD(extack, NL_SET_ERR_MSG_MOD(extack,
"Goto lower numbered chain isn't supported"); "Goto lower numbered chain isn't supported");
......
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2020 Mellanox Technologies. */
#ifndef __ML5_ESW_CHAINS_H__
#define __ML5_ESW_CHAINS_H__
#include "eswitch.h"
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
bool
mlx5_esw_chains_prios_supported(struct mlx5_eswitch *esw);
bool
mlx5_esw_chains_backwards_supported(struct mlx5_eswitch *esw);
u32
mlx5_esw_chains_get_prio_range(struct mlx5_eswitch *esw);
u32
mlx5_esw_chains_get_chain_range(struct mlx5_eswitch *esw);
u32
mlx5_esw_chains_get_ft_chain(struct mlx5_eswitch *esw);
struct mlx5_flow_table *
mlx5_esw_chains_get_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
u32 level);
void
mlx5_esw_chains_put_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
u32 level);
struct mlx5_flow_table *
mlx5_esw_chains_get_tc_end_ft(struct mlx5_eswitch *esw);
struct mlx5_flow_table *
mlx5_esw_chains_create_global_table(struct mlx5_eswitch *esw);
void
mlx5_esw_chains_destroy_global_table(struct mlx5_eswitch *esw,
struct mlx5_flow_table *ft);
int
mlx5_esw_chains_get_chain_mapping(struct mlx5_eswitch *esw, u32 chain,
u32 *chain_mapping);
int
mlx5_esw_chains_put_chain_mapping(struct mlx5_eswitch *esw,
u32 chain_mapping);
int mlx5_esw_chains_create(struct mlx5_eswitch *esw);
void mlx5_esw_chains_destroy(struct mlx5_eswitch *esw);
int
mlx5_eswitch_get_chain_for_tag(struct mlx5_eswitch *esw, u32 tag, u32 *chain);
#else /* CONFIG_MLX5_CLS_ACT */
static inline struct mlx5_flow_table *
mlx5_esw_chains_get_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
u32 level) { return ERR_PTR(-EOPNOTSUPP); }
static inline void
mlx5_esw_chains_put_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
u32 level) {}
static inline struct mlx5_flow_table *
mlx5_esw_chains_get_tc_end_ft(struct mlx5_eswitch *esw) { return ERR_PTR(-EOPNOTSUPP); }
static inline int mlx5_esw_chains_create(struct mlx5_eswitch *esw) { return 0; }
static inline void mlx5_esw_chains_destroy(struct mlx5_eswitch *esw) {}
#endif /* CONFIG_MLX5_CLS_ACT */
#endif /* __ML5_ESW_CHAINS_H__ */
...@@ -42,6 +42,7 @@ ...@@ -42,6 +42,7 @@
#include <linux/mlx5/vport.h> #include <linux/mlx5/vport.h>
#include <linux/mlx5/fs.h> #include <linux/mlx5/fs.h>
#include "lib/mpfs.h" #include "lib/mpfs.h"
#include "lib/fs_chains.h"
#include "en/tc_ct.h" #include "en/tc_ct.h"
#ifdef CONFIG_MLX5_ESWITCH #ifdef CONFIG_MLX5_ESWITCH
...@@ -62,6 +63,9 @@ ...@@ -62,6 +63,9 @@
#define mlx5_esw_has_fwd_fdb(dev) \ #define mlx5_esw_has_fwd_fdb(dev) \
MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table) MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table)
#define esw_chains(esw) \
((esw)->fdb_table.offloads.esw_chains_priv)
struct vport_ingress { struct vport_ingress {
struct mlx5_flow_table *acl; struct mlx5_flow_table *acl;
struct mlx5_flow_handle *allow_rule; struct mlx5_flow_handle *allow_rule;
...@@ -154,12 +158,6 @@ struct mlx5_vport { ...@@ -154,12 +158,6 @@ struct mlx5_vport {
enum mlx5_eswitch_vport_event enabled_events; enum mlx5_eswitch_vport_event enabled_events;
}; };
enum offloads_fdb_flags {
ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED = BIT(0),
};
struct mlx5_esw_chains_priv;
struct mlx5_eswitch_fdb { struct mlx5_eswitch_fdb {
union { union {
struct legacy_fdb { struct legacy_fdb {
...@@ -183,7 +181,7 @@ struct mlx5_eswitch_fdb { ...@@ -183,7 +181,7 @@ struct mlx5_eswitch_fdb {
struct mlx5_flow_handle *miss_rule_multi; struct mlx5_flow_handle *miss_rule_multi;
int vlan_push_pop_refcount; int vlan_push_pop_refcount;
struct mlx5_esw_chains_priv *esw_chains_priv; struct mlx5_fs_chains *esw_chains_priv;
struct { struct {
DECLARE_HASHTABLE(table, 8); DECLARE_HASHTABLE(table, 8);
/* Protects vports.table */ /* Protects vports.table */
......
...@@ -39,12 +39,12 @@ ...@@ -39,12 +39,12 @@
#include "mlx5_core.h" #include "mlx5_core.h"
#include "eswitch.h" #include "eswitch.h"
#include "esw/acl/ofld.h" #include "esw/acl/ofld.h"
#include "esw/chains.h"
#include "rdma.h" #include "rdma.h"
#include "en.h" #include "en.h"
#include "fs_core.h" #include "fs_core.h"
#include "lib/devcom.h" #include "lib/devcom.h"
#include "lib/eq.h" #include "lib/eq.h"
#include "lib/fs_chains.h"
/* There are two match-all miss flows, one for unicast dst mac and /* There are two match-all miss flows, one for unicast dst mac and
* one for multicast. * one for multicast.
...@@ -294,6 +294,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, ...@@ -294,6 +294,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
{ {
struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {}; struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, }; struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
struct mlx5_fs_chains *chains = esw_chains(esw);
bool split = !!(attr->split_count); bool split = !!(attr->split_count);
struct mlx5_flow_handle *rule; struct mlx5_flow_handle *rule;
struct mlx5_flow_table *fdb; struct mlx5_flow_table *fdb;
...@@ -329,11 +330,11 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, ...@@ -329,11 +330,11 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
} else if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) { } else if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[i].ft = mlx5_esw_chains_get_tc_end_ft(esw); dest[i].ft = mlx5_chains_get_tc_end_ft(chains);
i++; i++;
} else if (attr->dest_chain) { } else if (attr->dest_chain) {
flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
ft = mlx5_esw_chains_get_table(esw, attr->dest_chain, ft = mlx5_chains_get_table(chains, attr->dest_chain,
1, 0); 1, 0);
if (IS_ERR(ft)) { if (IS_ERR(ft)) {
rule = ERR_CAST(ft); rule = ERR_CAST(ft);
...@@ -385,7 +386,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, ...@@ -385,7 +386,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
fdb = esw_vport_tbl_get(esw, attr); fdb = esw_vport_tbl_get(esw, attr);
} else { } else {
if (attr->chain || attr->prio) if (attr->chain || attr->prio)
fdb = mlx5_esw_chains_get_table(esw, attr->chain, fdb = mlx5_chains_get_table(chains, attr->chain,
attr->prio, 0); attr->prio, 0);
else else
fdb = attr->fdb; fdb = attr->fdb;
...@@ -416,10 +417,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, ...@@ -416,10 +417,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (split) if (split)
esw_vport_tbl_put(esw, attr); esw_vport_tbl_put(esw, attr);
else if (attr->chain || attr->prio) else if (attr->chain || attr->prio)
mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0); mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
err_esw_get: err_esw_get:
if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) && attr->dest_chain) if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) && attr->dest_chain)
mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0); mlx5_chains_put_table(chains, attr->dest_chain, 1, 0);
err_create_goto_table: err_create_goto_table:
return rule; return rule;
} }
...@@ -431,12 +432,13 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, ...@@ -431,12 +432,13 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
{ {
struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {}; struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, }; struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
struct mlx5_fs_chains *chains = esw_chains(esw);
struct mlx5_flow_table *fast_fdb; struct mlx5_flow_table *fast_fdb;
struct mlx5_flow_table *fwd_fdb; struct mlx5_flow_table *fwd_fdb;
struct mlx5_flow_handle *rule; struct mlx5_flow_handle *rule;
int i; int i;
fast_fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio, 0); fast_fdb = mlx5_chains_get_table(chains, attr->chain, attr->prio, 0);
if (IS_ERR(fast_fdb)) { if (IS_ERR(fast_fdb)) {
rule = ERR_CAST(fast_fdb); rule = ERR_CAST(fast_fdb);
goto err_get_fast; goto err_get_fast;
...@@ -483,7 +485,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, ...@@ -483,7 +485,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
add_err: add_err:
esw_vport_tbl_put(esw, attr); esw_vport_tbl_put(esw, attr);
err_get_fwd: err_get_fwd:
mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0); mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
err_get_fast: err_get_fast:
return rule; return rule;
} }
...@@ -494,6 +496,7 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw, ...@@ -494,6 +496,7 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
struct mlx5_esw_flow_attr *attr, struct mlx5_esw_flow_attr *attr,
bool fwd_rule) bool fwd_rule)
{ {
struct mlx5_fs_chains *chains = esw_chains(esw);
bool split = (attr->split_count > 0); bool split = (attr->split_count > 0);
int i; int i;
...@@ -511,15 +514,14 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw, ...@@ -511,15 +514,14 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
if (fwd_rule) { if (fwd_rule) {
esw_vport_tbl_put(esw, attr); esw_vport_tbl_put(esw, attr);
mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0); mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
} else { } else {
if (split) if (split)
esw_vport_tbl_put(esw, attr); esw_vport_tbl_put(esw, attr);
else if (attr->chain || attr->prio) else if (attr->chain || attr->prio)
mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
0);
if (attr->dest_chain) if (attr->dest_chain)
mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0); mlx5_chains_put_table(chains, attr->dest_chain, 1, 0);
} }
} }
...@@ -1137,6 +1139,126 @@ static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw, ...@@ -1137,6 +1139,126 @@ static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
} }
} }
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
#define fdb_modify_header_fwd_to_table_supported(esw) \
(MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table))
static void esw_init_chains_offload_flags(struct mlx5_eswitch *esw, u32 *flags)
{
struct mlx5_core_dev *dev = esw->dev;
if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ignore_flow_level))
*flags |= MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
if (!MLX5_CAP_ESW_FLOWTABLE(dev, multi_fdb_encap) &&
esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
*flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n");
} else if (!mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
*flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
esw_warn(dev, "Tc chains and priorities offload aren't supported\n");
} else if (!fdb_modify_header_fwd_to_table_supported(esw)) {
/* Disabled when ttl workaround is needed, e.g
* when ESWITCH_IPV4_TTL_MODIFY_ENABLE = true in mlxconfig
*/
esw_warn(dev,
"Tc chains and priorities offload aren't supported, check firmware version, or mlxconfig settings\n");
*flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
} else {
*flags |= MLX5_CHAINS_AND_PRIOS_SUPPORTED;
esw_info(dev, "Supported tc chains and prios offload\n");
}
if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
*flags |= MLX5_CHAINS_FT_TUNNEL_SUPPORTED;
}
static int
esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
{
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_table *nf_ft, *ft;
struct mlx5_chains_attr attr = {};
struct mlx5_fs_chains *chains;
u32 fdb_max;
int err;
fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
esw_init_chains_offload_flags(esw, &attr.flags);
attr.ns = MLX5_FLOW_NAMESPACE_FDB;
attr.max_ft_sz = fdb_max;
attr.max_grp_num = esw->params.large_group_num;
attr.default_ft = miss_fdb;
attr.max_restore_tag = esw_get_max_restore_tag(esw);
chains = mlx5_chains_create(dev, &attr);
if (IS_ERR(chains)) {
err = PTR_ERR(chains);
esw_warn(dev, "Failed to create fdb chains err(%d)\n", err);
return err;
}
esw->fdb_table.offloads.esw_chains_priv = chains;
/* Create tc_end_ft which is the always created ft chain */
nf_ft = mlx5_chains_get_table(chains, mlx5_chains_get_nf_ft_chain(chains),
1, 0);
if (IS_ERR(nf_ft)) {
err = PTR_ERR(nf_ft);
goto nf_ft_err;
}
/* Always open the root for fast path */
ft = mlx5_chains_get_table(chains, 0, 1, 0);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto level_0_err;
}
/* Open level 1 for split fdb rules now if prios isn't supported */
if (!mlx5_chains_prios_supported(chains)) {
err = mlx5_esw_vport_tbl_get(esw);
if (err)
goto level_1_err;
}
mlx5_chains_set_end_ft(chains, nf_ft);
return 0;
level_1_err:
mlx5_chains_put_table(chains, 0, 1, 0);
level_0_err:
mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0);
nf_ft_err:
mlx5_chains_destroy(chains);
esw->fdb_table.offloads.esw_chains_priv = NULL;
return err;
}
static void
esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
{
if (!mlx5_chains_prios_supported(chains))
mlx5_esw_vport_tbl_put(esw);
mlx5_chains_put_table(chains, 0, 1, 0);
mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0);
mlx5_chains_destroy(chains);
}
#else /* CONFIG_MLX5_CLS_ACT */
static int
esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
{ return 0; }
static void
esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
{}
#endif
static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw) static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
{ {
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
...@@ -1192,9 +1314,9 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw) ...@@ -1192,9 +1314,9 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
} }
esw->fdb_table.offloads.slow_fdb = fdb; esw->fdb_table.offloads.slow_fdb = fdb;
err = mlx5_esw_chains_create(esw); err = esw_chains_create(esw, fdb);
if (err) { if (err) {
esw_warn(dev, "Failed to create fdb chains err(%d)\n", err); esw_warn(dev, "Failed to open fdb chains err(%d)\n", err);
goto fdb_chains_err; goto fdb_chains_err;
} }
...@@ -1288,7 +1410,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw) ...@@ -1288,7 +1410,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
peer_miss_err: peer_miss_err:
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
send_vport_err: send_vport_err:
mlx5_esw_chains_destroy(esw); esw_chains_destroy(esw, esw_chains(esw));
fdb_chains_err: fdb_chains_err:
mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb); mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
slow_fdb_err: slow_fdb_err:
...@@ -1312,7 +1434,8 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw) ...@@ -1312,7 +1434,8 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp); mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
mlx5_esw_chains_destroy(esw); esw_chains_destroy(esw, esw_chains(esw));
mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb); mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
/* Holds true only as long as DMFS is the default */ /* Holds true only as long as DMFS is the default */
mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns, mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
#include <linux/mlx5/mlx5_ifc.h> #include <linux/mlx5/mlx5_ifc.h>
#include <linux/mlx5/fs.h> #include <linux/mlx5/fs.h>
#include "esw/chains.h" #include "lib/fs_chains.h"
#include "en/mapping.h" #include "en/mapping.h"
#include "mlx5_core.h" #include "mlx5_core.h"
#include "fs_core.h" #include "fs_core.h"
...@@ -13,47 +13,51 @@ ...@@ -13,47 +13,51 @@
#include "en.h" #include "en.h"
#include "en_tc.h" #include "en_tc.h"
#define esw_chains_priv(esw) ((esw)->fdb_table.offloads.esw_chains_priv) #define chains_lock(chains) ((chains)->lock)
#define esw_chains_lock(esw) (esw_chains_priv(esw)->lock) #define chains_ht(chains) ((chains)->chains_ht)
#define esw_chains_ht(esw) (esw_chains_priv(esw)->chains_ht) #define chains_mapping(chains) ((chains)->chains_mapping)
#define esw_chains_mapping(esw) (esw_chains_priv(esw)->chains_mapping) #define prios_ht(chains) ((chains)->prios_ht)
#define esw_prios_ht(esw) (esw_chains_priv(esw)->prios_ht) #define ft_pool_left(chains) ((chains)->ft_left)
#define fdb_pool_left(esw) (esw_chains_priv(esw)->fdb_left) #define tc_default_ft(chains) ((chains)->tc_default_ft)
#define tc_slow_fdb(esw) ((esw)->fdb_table.offloads.slow_fdb) #define tc_end_ft(chains) ((chains)->tc_end_ft)
#define tc_end_fdb(esw) (esw_chains_priv(esw)->tc_end_fdb) #define ns_to_chains_fs_prio(ns) ((ns) == MLX5_FLOW_NAMESPACE_FDB ? \
#define fdb_ignore_flow_level_supported(esw) \ FDB_TC_OFFLOAD : MLX5E_TC_PRIO)
(MLX5_CAP_ESW_FLOWTABLE_FDB((esw)->dev, ignore_flow_level))
#define fdb_modify_header_fwd_to_table_supported(esw) \ /* Firmware currently has 4 pool of 4 sizes that it supports (FT_POOLS),
(MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table)) * and a virtual memory region of 16M (MLX5_FT_SIZE), this region is duplicated
/* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS),
* and a virtual memory region of 16M (ESW_SIZE), this region is duplicated
* for each flow table pool. We can allocate up to 16M of each pool, * for each flow table pool. We can allocate up to 16M of each pool,
* and we keep track of how much we used via get_next_avail_sz_from_pool. * and we keep track of how much we used via get_next_avail_sz_from_pool.
* Firmware doesn't report any of this for now. * Firmware doesn't report any of this for now.
* ESW_POOL is expected to be sorted from large to small and match firmware * ESW_POOL is expected to be sorted from large to small and match firmware
* pools. * pools.
*/ */
#define ESW_SIZE (16 * 1024 * 1024) #define FT_SIZE (16 * 1024 * 1024)
static const unsigned int ESW_POOLS[] = { 4 * 1024 * 1024, static const unsigned int FT_POOLS[] = { 4 * 1024 * 1024,
1 * 1024 * 1024, 1 * 1024 * 1024,
64 * 1024, 64 * 1024,
128 }; 128 };
#define ESW_FT_TBL_SZ (64 * 1024) #define FT_TBL_SZ (64 * 1024)
struct mlx5_fs_chains {
struct mlx5_core_dev *dev;
struct mlx5_esw_chains_priv {
struct rhashtable chains_ht; struct rhashtable chains_ht;
struct rhashtable prios_ht; struct rhashtable prios_ht;
/* Protects above chains_ht and prios_ht */ /* Protects above chains_ht and prios_ht */
struct mutex lock; struct mutex lock;
struct mlx5_flow_table *tc_end_fdb; struct mlx5_flow_table *tc_default_ft;
struct mlx5_flow_table *tc_end_ft;
struct mapping_ctx *chains_mapping; struct mapping_ctx *chains_mapping;
int fdb_left[ARRAY_SIZE(ESW_POOLS)]; enum mlx5_flow_namespace_type ns;
u32 group_num;
u32 flags;
int ft_left[ARRAY_SIZE(FT_POOLS)];
}; };
struct fdb_chain { struct fs_chain {
struct rhash_head node; struct rhash_head node;
u32 chain; u32 chain;
...@@ -61,102 +65,117 @@ struct fdb_chain { ...@@ -61,102 +65,117 @@ struct fdb_chain {
int ref; int ref;
int id; int id;
struct mlx5_eswitch *esw; struct mlx5_fs_chains *chains;
struct list_head prios_list; struct list_head prios_list;
struct mlx5_flow_handle *restore_rule; struct mlx5_flow_handle *restore_rule;
struct mlx5_modify_hdr *miss_modify_hdr; struct mlx5_modify_hdr *miss_modify_hdr;
}; };
struct fdb_prio_key { struct prio_key {
u32 chain; u32 chain;
u32 prio; u32 prio;
u32 level; u32 level;
}; };
struct fdb_prio { struct prio {
struct rhash_head node; struct rhash_head node;
struct list_head list; struct list_head list;
struct fdb_prio_key key; struct prio_key key;
int ref; int ref;
struct fdb_chain *fdb_chain; struct fs_chain *chain;
struct mlx5_flow_table *fdb; struct mlx5_flow_table *ft;
struct mlx5_flow_table *next_fdb; struct mlx5_flow_table *next_ft;
struct mlx5_flow_group *miss_group; struct mlx5_flow_group *miss_group;
struct mlx5_flow_handle *miss_rule; struct mlx5_flow_handle *miss_rule;
}; };
static const struct rhashtable_params chain_params = { static const struct rhashtable_params chain_params = {
.head_offset = offsetof(struct fdb_chain, node), .head_offset = offsetof(struct fs_chain, node),
.key_offset = offsetof(struct fdb_chain, chain), .key_offset = offsetof(struct fs_chain, chain),
.key_len = sizeof_field(struct fdb_chain, chain), .key_len = sizeof_field(struct fs_chain, chain),
.automatic_shrinking = true, .automatic_shrinking = true,
}; };
static const struct rhashtable_params prio_params = { static const struct rhashtable_params prio_params = {
.head_offset = offsetof(struct fdb_prio, node), .head_offset = offsetof(struct prio, node),
.key_offset = offsetof(struct fdb_prio, key), .key_offset = offsetof(struct prio, key),
.key_len = sizeof_field(struct fdb_prio, key), .key_len = sizeof_field(struct prio, key),
.automatic_shrinking = true, .automatic_shrinking = true,
}; };
bool mlx5_esw_chains_prios_supported(struct mlx5_eswitch *esw) bool mlx5_chains_prios_supported(struct mlx5_fs_chains *chains)
{
return chains->flags & MLX5_CHAINS_AND_PRIOS_SUPPORTED;
}
static bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains)
{ {
return esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED; return chains->flags & MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
} }
bool mlx5_esw_chains_backwards_supported(struct mlx5_eswitch *esw) bool mlx5_chains_backwards_supported(struct mlx5_fs_chains *chains)
{ {
return mlx5_esw_chains_prios_supported(esw) && return mlx5_chains_prios_supported(chains) &&
fdb_ignore_flow_level_supported(esw); mlx5_chains_ignore_flow_level_supported(chains);
} }
u32 mlx5_esw_chains_get_chain_range(struct mlx5_eswitch *esw) u32 mlx5_chains_get_chain_range(struct mlx5_fs_chains *chains)
{ {
if (!mlx5_esw_chains_prios_supported(esw)) if (!mlx5_chains_prios_supported(chains))
return 1; return 1;
if (fdb_ignore_flow_level_supported(esw)) if (mlx5_chains_ignore_flow_level_supported(chains))
return UINT_MAX - 1; return UINT_MAX - 1;
/* We should get here only for eswitch case */
return FDB_TC_MAX_CHAIN; return FDB_TC_MAX_CHAIN;
} }
u32 mlx5_esw_chains_get_ft_chain(struct mlx5_eswitch *esw) u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains)
{ {
return mlx5_esw_chains_get_chain_range(esw) + 1; return mlx5_chains_get_chain_range(chains) + 1;
} }
u32 mlx5_esw_chains_get_prio_range(struct mlx5_eswitch *esw) u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains)
{ {
if (!mlx5_esw_chains_prios_supported(esw)) if (!mlx5_chains_prios_supported(chains))
return 1; return 1;
if (fdb_ignore_flow_level_supported(esw)) if (mlx5_chains_ignore_flow_level_supported(chains))
return UINT_MAX; return UINT_MAX;
/* We should get here only for eswitch case */
return FDB_TC_MAX_PRIO; return FDB_TC_MAX_PRIO;
} }
static unsigned int mlx5_esw_chains_get_level_range(struct mlx5_eswitch *esw) static unsigned int mlx5_chains_get_level_range(struct mlx5_fs_chains *chains)
{ {
if (fdb_ignore_flow_level_supported(esw)) if (mlx5_chains_ignore_flow_level_supported(chains))
return UINT_MAX; return UINT_MAX;
/* Same value for FDB and NIC RX tables */
return FDB_TC_LEVELS_PER_PRIO; return FDB_TC_LEVELS_PER_PRIO;
} }
void
mlx5_chains_set_end_ft(struct mlx5_fs_chains *chains,
struct mlx5_flow_table *ft)
{
tc_end_ft(chains) = ft;
}
#define POOL_NEXT_SIZE 0 #define POOL_NEXT_SIZE 0
static int static int
mlx5_esw_chains_get_avail_sz_from_pool(struct mlx5_eswitch *esw, mlx5_chains_get_avail_sz_from_pool(struct mlx5_fs_chains *chains,
int desired_size) int desired_size)
{ {
int i, found_i = -1; int i, found_i = -1;
for (i = ARRAY_SIZE(ESW_POOLS) - 1; i >= 0; i--) { for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--) {
if (fdb_pool_left(esw)[i] && ESW_POOLS[i] > desired_size) { if (ft_pool_left(chains)[i] && FT_POOLS[i] > desired_size) {
found_i = i; found_i = i;
if (desired_size != POOL_NEXT_SIZE) if (desired_size != POOL_NEXT_SIZE)
break; break;
...@@ -164,80 +183,79 @@ mlx5_esw_chains_get_avail_sz_from_pool(struct mlx5_eswitch *esw, ...@@ -164,80 +183,79 @@ mlx5_esw_chains_get_avail_sz_from_pool(struct mlx5_eswitch *esw,
} }
if (found_i != -1) { if (found_i != -1) {
--fdb_pool_left(esw)[found_i]; --ft_pool_left(chains)[found_i];
return ESW_POOLS[found_i]; return FT_POOLS[found_i];
} }
return 0; return 0;
} }
static void static void
mlx5_esw_chains_put_sz_to_pool(struct mlx5_eswitch *esw, int sz) mlx5_chains_put_sz_to_pool(struct mlx5_fs_chains *chains, int sz)
{ {
int i; int i;
for (i = ARRAY_SIZE(ESW_POOLS) - 1; i >= 0; i--) { for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--) {
if (sz == ESW_POOLS[i]) { if (sz == FT_POOLS[i]) {
++fdb_pool_left(esw)[i]; ++ft_pool_left(chains)[i];
return; return;
} }
} }
WARN_ONCE(1, "Couldn't find size %d in fdb size pool", sz); WARN_ONCE(1, "Couldn't find size %d in flow table size pool", sz);
} }
static void static void
mlx5_esw_chains_init_sz_pool(struct mlx5_eswitch *esw) mlx5_chains_init_sz_pool(struct mlx5_fs_chains *chains, u32 ft_max)
{ {
u32 fdb_max;
int i; int i;
fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, log_max_ft_size); for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--)
ft_pool_left(chains)[i] =
for (i = ARRAY_SIZE(ESW_POOLS) - 1; i >= 0; i--) FT_POOLS[i] <= ft_max ? FT_SIZE / FT_POOLS[i] : 0;
fdb_pool_left(esw)[i] =
ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0;
} }
static struct mlx5_flow_table * static struct mlx5_flow_table *
mlx5_esw_chains_create_fdb_table(struct mlx5_eswitch *esw, mlx5_chains_create_table(struct mlx5_fs_chains *chains,
u32 chain, u32 prio, u32 level) u32 chain, u32 prio, u32 level)
{ {
struct mlx5_flow_table_attr ft_attr = {}; struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_namespace *ns; struct mlx5_flow_namespace *ns;
struct mlx5_flow_table *fdb; struct mlx5_flow_table *ft;
int sz; int sz;
if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) if (chains->flags & MLX5_CHAINS_FT_TUNNEL_SUPPORTED)
ft_attr.flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT | ft_attr.flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
MLX5_FLOW_TABLE_TUNNEL_EN_DECAP); MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
sz = (chain == mlx5_esw_chains_get_ft_chain(esw)) ? sz = (chain == mlx5_chains_get_nf_ft_chain(chains)) ?
mlx5_esw_chains_get_avail_sz_from_pool(esw, ESW_FT_TBL_SZ) : mlx5_chains_get_avail_sz_from_pool(chains, FT_TBL_SZ) :
mlx5_esw_chains_get_avail_sz_from_pool(esw, POOL_NEXT_SIZE); mlx5_chains_get_avail_sz_from_pool(chains, POOL_NEXT_SIZE);
if (!sz) if (!sz)
return ERR_PTR(-ENOSPC); return ERR_PTR(-ENOSPC);
ft_attr.max_fte = sz; ft_attr.max_fte = sz;
/* We use tc_slow_fdb(esw) as the table's next_ft till /* We use tc_default_ft(chains) as the table's next_ft till
* ignore_flow_level is allowed on FT creation and not just for FTEs. * ignore_flow_level is allowed on FT creation and not just for FTEs.
* Instead caller should add an explicit miss rule if needed. * Instead caller should add an explicit miss rule if needed.
*/ */
ft_attr.next_ft = tc_slow_fdb(esw); ft_attr.next_ft = tc_default_ft(chains);
/* The root table(chain 0, prio 1, level 0) is required to be /* The root table(chain 0, prio 1, level 0) is required to be
* connected to the previous prio (FDB_BYPASS_PATH if exists). * connected to the previous fs_core managed prio.
* We always create it, as a managed table, in order to align with * We always create it, as a managed table, in order to align with
* fs_core logic. * fs_core logic.
*/ */
if (!fdb_ignore_flow_level_supported(esw) || if (!mlx5_chains_ignore_flow_level_supported(chains) ||
(chain == 0 && prio == 1 && level == 0)) { (chain == 0 && prio == 1 && level == 0)) {
ft_attr.level = level; ft_attr.level = level;
ft_attr.prio = prio - 1; ft_attr.prio = prio - 1;
ns = mlx5_get_fdb_sub_ns(esw->dev, chain); ns = (chains->ns == MLX5_FLOW_NAMESPACE_FDB) ?
mlx5_get_fdb_sub_ns(chains->dev, chain) :
mlx5_get_flow_namespace(chains->dev, chains->ns);
} else { } else {
ft_attr.flags |= MLX5_FLOW_TABLE_UNMANAGED; ft_attr.flags |= MLX5_FLOW_TABLE_UNMANAGED;
ft_attr.prio = FDB_TC_OFFLOAD; ft_attr.prio = ns_to_chains_fs_prio(chains->ns);
/* Firmware doesn't allow us to create another level 0 table, /* Firmware doesn't allow us to create another level 0 table,
* so we create all unmanaged tables as level 1. * so we create all unmanaged tables as level 1.
* *
...@@ -246,45 +264,46 @@ mlx5_esw_chains_create_fdb_table(struct mlx5_eswitch *esw, ...@@ -246,45 +264,46 @@ mlx5_esw_chains_create_fdb_table(struct mlx5_eswitch *esw,
* these rules (if needed). * these rules (if needed).
*/ */
ft_attr.level = 1; ft_attr.level = 1;
ns = mlx5_get_flow_namespace(esw->dev, MLX5_FLOW_NAMESPACE_FDB); ns = mlx5_get_flow_namespace(chains->dev, chains->ns);
} }
ft_attr.autogroup.num_reserved_entries = 2; ft_attr.autogroup.num_reserved_entries = 2;
ft_attr.autogroup.max_num_groups = esw->params.large_group_num; ft_attr.autogroup.max_num_groups = chains->group_num;
fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr); ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(fdb)) { if (IS_ERR(ft)) {
esw_warn(esw->dev, mlx5_core_warn(chains->dev, "Failed to create chains table err %d (chain: %d, prio: %d, level: %d, size: %d)\n",
"Failed to create FDB table err %d (chain: %d, prio: %d, level: %d, size: %d)\n", (int)PTR_ERR(ft), chain, prio, level, sz);
(int)PTR_ERR(fdb), chain, prio, level, sz); mlx5_chains_put_sz_to_pool(chains, sz);
mlx5_esw_chains_put_sz_to_pool(esw, sz); return ft;
return fdb;
} }
return fdb; return ft;
} }
static void static void
mlx5_esw_chains_destroy_fdb_table(struct mlx5_eswitch *esw, mlx5_chains_destroy_table(struct mlx5_fs_chains *chains,
struct mlx5_flow_table *fdb) struct mlx5_flow_table *ft)
{ {
mlx5_esw_chains_put_sz_to_pool(esw, fdb->max_fte); mlx5_chains_put_sz_to_pool(chains, ft->max_fte);
mlx5_destroy_flow_table(fdb); mlx5_destroy_flow_table(ft);
} }
static int static int
create_fdb_chain_restore(struct fdb_chain *fdb_chain) create_chain_restore(struct fs_chain *chain)
{ {
struct mlx5_eswitch *esw = chain->chains->dev->priv.eswitch;
char modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)]; char modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)];
struct mlx5_eswitch *esw = fdb_chain->esw; struct mlx5_fs_chains *chains = chain->chains;
enum mlx5e_tc_attr_to_reg chain_to_reg;
struct mlx5_modify_hdr *mod_hdr; struct mlx5_modify_hdr *mod_hdr;
u32 index; u32 index;
int err; int err;
if (fdb_chain->chain == mlx5_esw_chains_get_ft_chain(esw) || if (chain->chain == mlx5_chains_get_nf_ft_chain(chains) ||
!mlx5_esw_chains_prios_supported(esw)) !mlx5_chains_prios_supported(chains))
return 0; return 0;
err = mapping_add(esw_chains_mapping(esw), &fdb_chain->chain, &index); err = mapping_add(chains_mapping(chains), &chain->chain, &index);
if (err) if (err)
return err; return err;
if (index == MLX5_FS_DEFAULT_FLOW_TAG) { if (index == MLX5_FS_DEFAULT_FLOW_TAG) {
...@@ -294,168 +313,178 @@ create_fdb_chain_restore(struct fdb_chain *fdb_chain) ...@@ -294,168 +313,178 @@ create_fdb_chain_restore(struct fdb_chain *fdb_chain)
* *
* This case isn't possible with MLX5_FS_DEFAULT_FLOW_TAG = 0. * This case isn't possible with MLX5_FS_DEFAULT_FLOW_TAG = 0.
*/ */
err = mapping_add(esw_chains_mapping(esw), err = mapping_add(chains_mapping(chains),
&fdb_chain->chain, &index); &chain->chain, &index);
mapping_remove(esw_chains_mapping(esw), mapping_remove(chains_mapping(chains),
MLX5_FS_DEFAULT_FLOW_TAG); MLX5_FS_DEFAULT_FLOW_TAG);
if (err) if (err)
return err; return err;
} }
fdb_chain->id = index; chain->id = index;
if (chains->ns == MLX5_FLOW_NAMESPACE_FDB) {
chain_to_reg = CHAIN_TO_REG;
chain->restore_rule = esw_add_restore_rule(esw, chain->id);
if (IS_ERR(chain->restore_rule)) {
err = PTR_ERR(chain->restore_rule);
goto err_rule;
}
} else {
err = -EINVAL;
goto err_rule;
}
MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET); MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
MLX5_SET(set_action_in, modact, field, MLX5_SET(set_action_in, modact, field,
mlx5e_tc_attr_to_reg_mappings[CHAIN_TO_REG].mfield); mlx5e_tc_attr_to_reg_mappings[chain_to_reg].mfield);
MLX5_SET(set_action_in, modact, offset, MLX5_SET(set_action_in, modact, offset,
mlx5e_tc_attr_to_reg_mappings[CHAIN_TO_REG].moffset * 8); mlx5e_tc_attr_to_reg_mappings[chain_to_reg].moffset * 8);
MLX5_SET(set_action_in, modact, length, MLX5_SET(set_action_in, modact, length,
mlx5e_tc_attr_to_reg_mappings[CHAIN_TO_REG].mlen * 8); mlx5e_tc_attr_to_reg_mappings[chain_to_reg].mlen * 8);
MLX5_SET(set_action_in, modact, data, fdb_chain->id); MLX5_SET(set_action_in, modact, data, chain->id);
mod_hdr = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_FDB, mod_hdr = mlx5_modify_header_alloc(chains->dev, chains->ns,
1, modact); 1, modact);
if (IS_ERR(mod_hdr)) { if (IS_ERR(mod_hdr)) {
err = PTR_ERR(mod_hdr); err = PTR_ERR(mod_hdr);
goto err_mod_hdr; goto err_mod_hdr;
} }
fdb_chain->miss_modify_hdr = mod_hdr; chain->miss_modify_hdr = mod_hdr;
fdb_chain->restore_rule = esw_add_restore_rule(esw, fdb_chain->id);
if (IS_ERR(fdb_chain->restore_rule)) {
err = PTR_ERR(fdb_chain->restore_rule);
goto err_rule;
}
return 0; return 0;
err_rule:
mlx5_modify_header_dealloc(esw->dev, fdb_chain->miss_modify_hdr);
err_mod_hdr: err_mod_hdr:
if (!IS_ERR_OR_NULL(chain->restore_rule))
mlx5_del_flow_rules(chain->restore_rule);
err_rule:
/* Datapath can't find this mapping, so we can safely remove it */ /* Datapath can't find this mapping, so we can safely remove it */
mapping_remove(esw_chains_mapping(esw), fdb_chain->id); mapping_remove(chains_mapping(chains), chain->id);
return err; return err;
} }
static void destroy_fdb_chain_restore(struct fdb_chain *fdb_chain) static void destroy_chain_restore(struct fs_chain *chain)
{ {
struct mlx5_eswitch *esw = fdb_chain->esw; struct mlx5_fs_chains *chains = chain->chains;
if (!fdb_chain->miss_modify_hdr) if (!chain->miss_modify_hdr)
return; return;
mlx5_del_flow_rules(fdb_chain->restore_rule); if (chain->restore_rule)
mlx5_modify_header_dealloc(esw->dev, fdb_chain->miss_modify_hdr); mlx5_del_flow_rules(chain->restore_rule);
mapping_remove(esw_chains_mapping(esw), fdb_chain->id);
mlx5_modify_header_dealloc(chains->dev, chain->miss_modify_hdr);
mapping_remove(chains_mapping(chains), chain->id);
} }
static struct fdb_chain * static struct fs_chain *
mlx5_esw_chains_create_fdb_chain(struct mlx5_eswitch *esw, u32 chain) mlx5_chains_create_chain(struct mlx5_fs_chains *chains, u32 chain)
{ {
struct fdb_chain *fdb_chain = NULL; struct fs_chain *chain_s = NULL;
int err; int err;
fdb_chain = kvzalloc(sizeof(*fdb_chain), GFP_KERNEL); chain_s = kvzalloc(sizeof(*chain_s), GFP_KERNEL);
if (!fdb_chain) if (!chain_s)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
fdb_chain->esw = esw; chain_s->chains = chains;
fdb_chain->chain = chain; chain_s->chain = chain;
INIT_LIST_HEAD(&fdb_chain->prios_list); INIT_LIST_HEAD(&chain_s->prios_list);
err = create_fdb_chain_restore(fdb_chain); err = create_chain_restore(chain_s);
if (err) if (err)
goto err_restore; goto err_restore;
err = rhashtable_insert_fast(&esw_chains_ht(esw), &fdb_chain->node, err = rhashtable_insert_fast(&chains_ht(chains), &chain_s->node,
chain_params); chain_params);
if (err) if (err)
goto err_insert; goto err_insert;
return fdb_chain; return chain_s;
err_insert: err_insert:
destroy_fdb_chain_restore(fdb_chain); destroy_chain_restore(chain_s);
err_restore: err_restore:
kvfree(fdb_chain); kvfree(chain_s);
return ERR_PTR(err); return ERR_PTR(err);
} }
static void static void
mlx5_esw_chains_destroy_fdb_chain(struct fdb_chain *fdb_chain) mlx5_chains_destroy_chain(struct fs_chain *chain)
{ {
struct mlx5_eswitch *esw = fdb_chain->esw; struct mlx5_fs_chains *chains = chain->chains;
rhashtable_remove_fast(&esw_chains_ht(esw), &fdb_chain->node, rhashtable_remove_fast(&chains_ht(chains), &chain->node,
chain_params); chain_params);
destroy_fdb_chain_restore(fdb_chain); destroy_chain_restore(chain);
kvfree(fdb_chain); kvfree(chain);
} }
static struct fdb_chain * static struct fs_chain *
mlx5_esw_chains_get_fdb_chain(struct mlx5_eswitch *esw, u32 chain) mlx5_chains_get_chain(struct mlx5_fs_chains *chains, u32 chain)
{ {
struct fdb_chain *fdb_chain; struct fs_chain *chain_s;
fdb_chain = rhashtable_lookup_fast(&esw_chains_ht(esw), &chain, chain_s = rhashtable_lookup_fast(&chains_ht(chains), &chain,
chain_params); chain_params);
if (!fdb_chain) { if (!chain_s) {
fdb_chain = mlx5_esw_chains_create_fdb_chain(esw, chain); chain_s = mlx5_chains_create_chain(chains, chain);
if (IS_ERR(fdb_chain)) if (IS_ERR(chain_s))
return fdb_chain; return chain_s;
} }
fdb_chain->ref++; chain_s->ref++;
return fdb_chain; return chain_s;
} }
static struct mlx5_flow_handle * static struct mlx5_flow_handle *
mlx5_esw_chains_add_miss_rule(struct fdb_chain *fdb_chain, mlx5_chains_add_miss_rule(struct fs_chain *chain,
struct mlx5_flow_table *fdb, struct mlx5_flow_table *ft,
struct mlx5_flow_table *next_fdb) struct mlx5_flow_table *next_ft)
{ {
struct mlx5_eswitch *esw = fdb_chain->esw; struct mlx5_fs_chains *chains = chain->chains;
struct mlx5_flow_destination dest = {}; struct mlx5_flow_destination dest = {};
struct mlx5_flow_act act = {}; struct mlx5_flow_act act = {};
act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL | FLOW_ACT_NO_APPEND; act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL | FLOW_ACT_NO_APPEND;
act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = next_fdb; dest.ft = next_ft;
if (next_fdb == tc_end_fdb(esw) && if (next_ft == tc_end_ft(chains) &&
mlx5_esw_chains_prios_supported(esw)) { chain->chain != mlx5_chains_get_nf_ft_chain(chains) &&
act.modify_hdr = fdb_chain->miss_modify_hdr; mlx5_chains_prios_supported(chains)) {
act.modify_hdr = chain->miss_modify_hdr;
act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
} }
return mlx5_add_flow_rules(fdb, NULL, &act, &dest, 1); return mlx5_add_flow_rules(ft, NULL, &act, &dest, 1);
} }
static int static int
mlx5_esw_chains_update_prio_prevs(struct fdb_prio *fdb_prio, mlx5_chains_update_prio_prevs(struct prio *prio,
struct mlx5_flow_table *next_fdb) struct mlx5_flow_table *next_ft)
{ {
struct mlx5_flow_handle *miss_rules[FDB_TC_LEVELS_PER_PRIO + 1] = {}; struct mlx5_flow_handle *miss_rules[FDB_TC_LEVELS_PER_PRIO + 1] = {};
struct fdb_chain *fdb_chain = fdb_prio->fdb_chain; struct fs_chain *chain = prio->chain;
struct fdb_prio *pos; struct prio *pos;
int n = 0, err; int n = 0, err;
if (fdb_prio->key.level) if (prio->key.level)
return 0; return 0;
/* Iterate in reverse order until reaching the level 0 rule of /* Iterate in reverse order until reaching the level 0 rule of
* the previous priority, adding all the miss rules first, so we can * the previous priority, adding all the miss rules first, so we can
* revert them if any of them fails. * revert them if any of them fails.
*/ */
pos = fdb_prio; pos = prio;
list_for_each_entry_continue_reverse(pos, list_for_each_entry_continue_reverse(pos,
&fdb_chain->prios_list, &chain->prios_list,
list) { list) {
miss_rules[n] = mlx5_esw_chains_add_miss_rule(fdb_chain, miss_rules[n] = mlx5_chains_add_miss_rule(chain,
pos->fdb, pos->ft,
next_fdb); next_ft);
if (IS_ERR(miss_rules[n])) { if (IS_ERR(miss_rules[n])) {
err = PTR_ERR(miss_rules[n]); err = PTR_ERR(miss_rules[n]);
goto err_prev_rule; goto err_prev_rule;
...@@ -468,14 +497,14 @@ mlx5_esw_chains_update_prio_prevs(struct fdb_prio *fdb_prio, ...@@ -468,14 +497,14 @@ mlx5_esw_chains_update_prio_prevs(struct fdb_prio *fdb_prio,
/* Success, delete old miss rules, and update the pointers. */ /* Success, delete old miss rules, and update the pointers. */
n = 0; n = 0;
pos = fdb_prio; pos = prio;
list_for_each_entry_continue_reverse(pos, list_for_each_entry_continue_reverse(pos,
&fdb_chain->prios_list, &chain->prios_list,
list) { list) {
mlx5_del_flow_rules(pos->miss_rule); mlx5_del_flow_rules(pos->miss_rule);
pos->miss_rule = miss_rules[n]; pos->miss_rule = miss_rules[n];
pos->next_fdb = next_fdb; pos->next_ft = next_ft;
n++; n++;
if (!pos->key.level) if (!pos->key.level)
...@@ -492,34 +521,34 @@ mlx5_esw_chains_update_prio_prevs(struct fdb_prio *fdb_prio, ...@@ -492,34 +521,34 @@ mlx5_esw_chains_update_prio_prevs(struct fdb_prio *fdb_prio,
} }
static void static void
mlx5_esw_chains_put_fdb_chain(struct fdb_chain *fdb_chain) mlx5_chains_put_chain(struct fs_chain *chain)
{ {
if (--fdb_chain->ref == 0) if (--chain->ref == 0)
mlx5_esw_chains_destroy_fdb_chain(fdb_chain); mlx5_chains_destroy_chain(chain);
} }
static struct fdb_prio * static struct prio *
mlx5_esw_chains_create_fdb_prio(struct mlx5_eswitch *esw, mlx5_chains_create_prio(struct mlx5_fs_chains *chains,
u32 chain, u32 prio, u32 level) u32 chain, u32 prio, u32 level)
{ {
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_handle *miss_rule = NULL; struct mlx5_flow_handle *miss_rule = NULL;
struct mlx5_flow_group *miss_group; struct mlx5_flow_group *miss_group;
struct fdb_prio *fdb_prio = NULL; struct mlx5_flow_table *next_ft;
struct mlx5_flow_table *next_fdb; struct mlx5_flow_table *ft;
struct fdb_chain *fdb_chain; struct prio *prio_s = NULL;
struct mlx5_flow_table *fdb; struct fs_chain *chain_s;
struct list_head *pos; struct list_head *pos;
u32 *flow_group_in; u32 *flow_group_in;
int err; int err;
fdb_chain = mlx5_esw_chains_get_fdb_chain(esw, chain); chain_s = mlx5_chains_get_chain(chains, chain);
if (IS_ERR(fdb_chain)) if (IS_ERR(chain_s))
return ERR_CAST(fdb_chain); return ERR_CAST(chain_s);
fdb_prio = kvzalloc(sizeof(*fdb_prio), GFP_KERNEL); prio_s = kvzalloc(sizeof(*prio_s), GFP_KERNEL);
flow_group_in = kvzalloc(inlen, GFP_KERNEL); flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!fdb_prio || !flow_group_in) { if (!prio_s || !flow_group_in) {
err = -ENOMEM; err = -ENOMEM;
goto err_alloc; goto err_alloc;
} }
...@@ -536,127 +565,127 @@ mlx5_esw_chains_create_fdb_prio(struct mlx5_eswitch *esw, ...@@ -536,127 +565,127 @@ mlx5_esw_chains_create_fdb_prio(struct mlx5_eswitch *esw,
*/ */
/* Default miss for each chain: */ /* Default miss for each chain: */
next_fdb = (chain == mlx5_esw_chains_get_ft_chain(esw)) ? next_ft = (chain == mlx5_chains_get_nf_ft_chain(chains)) ?
tc_slow_fdb(esw) : tc_default_ft(chains) :
tc_end_fdb(esw); tc_end_ft(chains);
list_for_each(pos, &fdb_chain->prios_list) { list_for_each(pos, &chain_s->prios_list) {
struct fdb_prio *p = list_entry(pos, struct fdb_prio, list); struct prio *p = list_entry(pos, struct prio, list);
/* exit on first pos that is larger */ /* exit on first pos that is larger */
if (prio < p->key.prio || (prio == p->key.prio && if (prio < p->key.prio || (prio == p->key.prio &&
level < p->key.level)) { level < p->key.level)) {
/* Get next level 0 table */ /* Get next level 0 table */
next_fdb = p->key.level == 0 ? p->fdb : p->next_fdb; next_ft = p->key.level == 0 ? p->ft : p->next_ft;
break; break;
} }
} }
fdb = mlx5_esw_chains_create_fdb_table(esw, chain, prio, level); ft = mlx5_chains_create_table(chains, chain, prio, level);
if (IS_ERR(fdb)) { if (IS_ERR(ft)) {
err = PTR_ERR(fdb); err = PTR_ERR(ft);
goto err_create; goto err_create;
} }
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index,
fdb->max_fte - 2); ft->max_fte - 2);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
fdb->max_fte - 1); ft->max_fte - 1);
miss_group = mlx5_create_flow_group(fdb, flow_group_in); miss_group = mlx5_create_flow_group(ft, flow_group_in);
if (IS_ERR(miss_group)) { if (IS_ERR(miss_group)) {
err = PTR_ERR(miss_group); err = PTR_ERR(miss_group);
goto err_group; goto err_group;
} }
/* Add miss rule to next_fdb */ /* Add miss rule to next_ft */
miss_rule = mlx5_esw_chains_add_miss_rule(fdb_chain, fdb, next_fdb); miss_rule = mlx5_chains_add_miss_rule(chain_s, ft, next_ft);
if (IS_ERR(miss_rule)) { if (IS_ERR(miss_rule)) {
err = PTR_ERR(miss_rule); err = PTR_ERR(miss_rule);
goto err_miss_rule; goto err_miss_rule;
} }
fdb_prio->miss_group = miss_group; prio_s->miss_group = miss_group;
fdb_prio->miss_rule = miss_rule; prio_s->miss_rule = miss_rule;
fdb_prio->next_fdb = next_fdb; prio_s->next_ft = next_ft;
fdb_prio->fdb_chain = fdb_chain; prio_s->chain = chain_s;
fdb_prio->key.chain = chain; prio_s->key.chain = chain;
fdb_prio->key.prio = prio; prio_s->key.prio = prio;
fdb_prio->key.level = level; prio_s->key.level = level;
fdb_prio->fdb = fdb; prio_s->ft = ft;
err = rhashtable_insert_fast(&esw_prios_ht(esw), &fdb_prio->node, err = rhashtable_insert_fast(&prios_ht(chains), &prio_s->node,
prio_params); prio_params);
if (err) if (err)
goto err_insert; goto err_insert;
list_add(&fdb_prio->list, pos->prev); list_add(&prio_s->list, pos->prev);
/* Table is ready, connect it */ /* Table is ready, connect it */
err = mlx5_esw_chains_update_prio_prevs(fdb_prio, fdb); err = mlx5_chains_update_prio_prevs(prio_s, ft);
if (err) if (err)
goto err_update; goto err_update;
kvfree(flow_group_in); kvfree(flow_group_in);
return fdb_prio; return prio_s;
err_update: err_update:
list_del(&fdb_prio->list); list_del(&prio_s->list);
rhashtable_remove_fast(&esw_prios_ht(esw), &fdb_prio->node, rhashtable_remove_fast(&prios_ht(chains), &prio_s->node,
prio_params); prio_params);
err_insert: err_insert:
mlx5_del_flow_rules(miss_rule); mlx5_del_flow_rules(miss_rule);
err_miss_rule: err_miss_rule:
mlx5_destroy_flow_group(miss_group); mlx5_destroy_flow_group(miss_group);
err_group: err_group:
mlx5_esw_chains_destroy_fdb_table(esw, fdb); mlx5_chains_destroy_table(chains, ft);
err_create: err_create:
err_alloc: err_alloc:
kvfree(fdb_prio); kvfree(prio_s);
kvfree(flow_group_in); kvfree(flow_group_in);
mlx5_esw_chains_put_fdb_chain(fdb_chain); mlx5_chains_put_chain(chain_s);
return ERR_PTR(err); return ERR_PTR(err);
} }
static void static void
mlx5_esw_chains_destroy_fdb_prio(struct mlx5_eswitch *esw, mlx5_chains_destroy_prio(struct mlx5_fs_chains *chains,
struct fdb_prio *fdb_prio) struct prio *prio)
{ {
struct fdb_chain *fdb_chain = fdb_prio->fdb_chain; struct fs_chain *chain = prio->chain;
WARN_ON(mlx5_esw_chains_update_prio_prevs(fdb_prio, WARN_ON(mlx5_chains_update_prio_prevs(prio,
fdb_prio->next_fdb)); prio->next_ft));
list_del(&fdb_prio->list); list_del(&prio->list);
rhashtable_remove_fast(&esw_prios_ht(esw), &fdb_prio->node, rhashtable_remove_fast(&prios_ht(chains), &prio->node,
prio_params); prio_params);
mlx5_del_flow_rules(fdb_prio->miss_rule); mlx5_del_flow_rules(prio->miss_rule);
mlx5_destroy_flow_group(fdb_prio->miss_group); mlx5_destroy_flow_group(prio->miss_group);
mlx5_esw_chains_destroy_fdb_table(esw, fdb_prio->fdb); mlx5_chains_destroy_table(chains, prio->ft);
mlx5_esw_chains_put_fdb_chain(fdb_chain); mlx5_chains_put_chain(chain);
kvfree(fdb_prio); kvfree(prio);
} }
struct mlx5_flow_table * struct mlx5_flow_table *
mlx5_esw_chains_get_table(struct mlx5_eswitch *esw, u32 chain, u32 prio, mlx5_chains_get_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
u32 level) u32 level)
{ {
struct mlx5_flow_table *prev_fts; struct mlx5_flow_table *prev_fts;
struct fdb_prio *fdb_prio; struct prio *prio_s;
struct fdb_prio_key key; struct prio_key key;
int l = 0; int l = 0;
if ((chain > mlx5_esw_chains_get_chain_range(esw) && if ((chain > mlx5_chains_get_chain_range(chains) &&
chain != mlx5_esw_chains_get_ft_chain(esw)) || chain != mlx5_chains_get_nf_ft_chain(chains)) ||
prio > mlx5_esw_chains_get_prio_range(esw) || prio > mlx5_chains_get_prio_range(chains) ||
level > mlx5_esw_chains_get_level_range(esw)) level > mlx5_chains_get_level_range(chains))
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
/* create earlier levels for correct fs_core lookup when /* create earlier levels for correct fs_core lookup when
* connecting tables. * connecting tables.
*/ */
for (l = 0; l < level; l++) { for (l = 0; l < level; l++) {
prev_fts = mlx5_esw_chains_get_table(esw, chain, prio, l); prev_fts = mlx5_chains_get_table(chains, chain, prio, l);
if (IS_ERR(prev_fts)) { if (IS_ERR(prev_fts)) {
fdb_prio = ERR_CAST(prev_fts); prio_s = ERR_CAST(prev_fts);
goto err_get_prevs; goto err_get_prevs;
} }
} }
...@@ -665,278 +694,207 @@ mlx5_esw_chains_get_table(struct mlx5_eswitch *esw, u32 chain, u32 prio, ...@@ -665,278 +694,207 @@ mlx5_esw_chains_get_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
key.prio = prio; key.prio = prio;
key.level = level; key.level = level;
mutex_lock(&esw_chains_lock(esw)); mutex_lock(&chains_lock(chains));
fdb_prio = rhashtable_lookup_fast(&esw_prios_ht(esw), &key, prio_s = rhashtable_lookup_fast(&prios_ht(chains), &key,
prio_params); prio_params);
if (!fdb_prio) { if (!prio_s) {
fdb_prio = mlx5_esw_chains_create_fdb_prio(esw, chain, prio_s = mlx5_chains_create_prio(chains, chain,
prio, level); prio, level);
if (IS_ERR(fdb_prio)) if (IS_ERR(prio_s))
goto err_create_prio; goto err_create_prio;
} }
++fdb_prio->ref; ++prio_s->ref;
mutex_unlock(&esw_chains_lock(esw)); mutex_unlock(&chains_lock(chains));
return fdb_prio->fdb; return prio_s->ft;
err_create_prio: err_create_prio:
mutex_unlock(&esw_chains_lock(esw)); mutex_unlock(&chains_lock(chains));
err_get_prevs: err_get_prevs:
while (--l >= 0) while (--l >= 0)
mlx5_esw_chains_put_table(esw, chain, prio, l); mlx5_chains_put_table(chains, chain, prio, l);
return ERR_CAST(fdb_prio); return ERR_CAST(prio_s);
} }
void void
mlx5_esw_chains_put_table(struct mlx5_eswitch *esw, u32 chain, u32 prio, mlx5_chains_put_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
u32 level) u32 level)
{ {
struct fdb_prio *fdb_prio; struct prio *prio_s;
struct fdb_prio_key key; struct prio_key key;
key.chain = chain; key.chain = chain;
key.prio = prio; key.prio = prio;
key.level = level; key.level = level;
mutex_lock(&esw_chains_lock(esw)); mutex_lock(&chains_lock(chains));
fdb_prio = rhashtable_lookup_fast(&esw_prios_ht(esw), &key, prio_s = rhashtable_lookup_fast(&prios_ht(chains), &key,
prio_params); prio_params);
if (!fdb_prio) if (!prio_s)
goto err_get_prio; goto err_get_prio;
if (--fdb_prio->ref == 0) if (--prio_s->ref == 0)
mlx5_esw_chains_destroy_fdb_prio(esw, fdb_prio); mlx5_chains_destroy_prio(chains, prio_s);
mutex_unlock(&esw_chains_lock(esw)); mutex_unlock(&chains_lock(chains));
while (level-- > 0) while (level-- > 0)
mlx5_esw_chains_put_table(esw, chain, prio, level); mlx5_chains_put_table(chains, chain, prio, level);
return; return;
err_get_prio: err_get_prio:
mutex_unlock(&esw_chains_lock(esw)); mutex_unlock(&chains_lock(chains));
WARN_ONCE(1, WARN_ONCE(1,
"Couldn't find table: (chain: %d prio: %d level: %d)", "Couldn't find table: (chain: %d prio: %d level: %d)",
chain, prio, level); chain, prio, level);
} }
struct mlx5_flow_table * struct mlx5_flow_table *
mlx5_esw_chains_get_tc_end_ft(struct mlx5_eswitch *esw) mlx5_chains_get_tc_end_ft(struct mlx5_fs_chains *chains)
{ {
return tc_end_fdb(esw); return tc_end_ft(chains);
} }
struct mlx5_flow_table * struct mlx5_flow_table *
mlx5_esw_chains_create_global_table(struct mlx5_eswitch *esw) mlx5_chains_create_global_table(struct mlx5_fs_chains *chains)
{ {
u32 chain, prio, level; u32 chain, prio, level;
int err; int err;
if (!fdb_ignore_flow_level_supported(esw)) { if (!mlx5_chains_ignore_flow_level_supported(chains)) {
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
esw_warn(esw->dev, mlx5_core_warn(chains->dev,
"Couldn't create global flow table, ignore_flow_level not supported."); "Couldn't create global flow table, ignore_flow_level not supported.");
goto err_ignore; goto err_ignore;
} }
chain = mlx5_esw_chains_get_chain_range(esw), chain = mlx5_chains_get_chain_range(chains),
prio = mlx5_esw_chains_get_prio_range(esw); prio = mlx5_chains_get_prio_range(chains);
level = mlx5_esw_chains_get_level_range(esw); level = mlx5_chains_get_level_range(chains);
return mlx5_esw_chains_create_fdb_table(esw, chain, prio, level); return mlx5_chains_create_table(chains, chain, prio, level);
err_ignore: err_ignore:
return ERR_PTR(err); return ERR_PTR(err);
} }
void void
mlx5_esw_chains_destroy_global_table(struct mlx5_eswitch *esw, mlx5_chains_destroy_global_table(struct mlx5_fs_chains *chains,
struct mlx5_flow_table *ft) struct mlx5_flow_table *ft)
{ {
mlx5_esw_chains_destroy_fdb_table(esw, ft); mlx5_chains_destroy_table(chains, ft);
} }
static int static struct mlx5_fs_chains *
mlx5_esw_chains_init(struct mlx5_eswitch *esw) mlx5_chains_init(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr)
{ {
struct mlx5_esw_chains_priv *chains_priv; struct mlx5_fs_chains *chains_priv;
struct mlx5_core_dev *dev = esw->dev;
u32 max_flow_counter, fdb_max;
struct mapping_ctx *mapping; struct mapping_ctx *mapping;
u32 max_flow_counter;
int err; int err;
chains_priv = kzalloc(sizeof(*chains_priv), GFP_KERNEL); chains_priv = kzalloc(sizeof(*chains_priv), GFP_KERNEL);
if (!chains_priv) if (!chains_priv)
return -ENOMEM; return ERR_PTR(-ENOMEM);
esw_chains_priv(esw) = chains_priv;
max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) | max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
MLX5_CAP_GEN(dev, max_flow_counter_15_0); MLX5_CAP_GEN(dev, max_flow_counter_15_0);
fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
esw_debug(dev,
"Init esw offloads chains, max counters(%d), groups(%d), max flow table size(%d)\n",
max_flow_counter, esw->params.large_group_num, fdb_max);
mlx5_esw_chains_init_sz_pool(esw);
if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, multi_fdb_encap) &&
esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n");
} else if (!mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
esw_warn(dev, "Tc chains and priorities offload aren't supported\n");
} else if (!fdb_modify_header_fwd_to_table_supported(esw)) {
/* Disabled when ttl workaround is needed, e.g
* when ESWITCH_IPV4_TTL_MODIFY_ENABLE = true in mlxconfig
*/
esw_warn(dev,
"Tc chains and priorities offload aren't supported, check firmware version, or mlxconfig settings\n");
esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
} else {
esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
esw_info(dev, "Supported tc offload range - chains: %u, prios: %u\n",
mlx5_esw_chains_get_chain_range(esw),
mlx5_esw_chains_get_prio_range(esw));
}
err = rhashtable_init(&esw_chains_ht(esw), &chain_params); mlx5_core_dbg(dev,
"Init flow table chains, max counters(%d), groups(%d), max flow table size(%d)\n",
max_flow_counter, attr->max_grp_num, attr->max_ft_sz);
chains_priv->dev = dev;
chains_priv->flags = attr->flags;
chains_priv->ns = attr->ns;
chains_priv->group_num = attr->max_grp_num;
tc_default_ft(chains_priv) = tc_end_ft(chains_priv) = attr->default_ft;
mlx5_core_info(dev, "Supported tc offload range - chains: %u, prios: %u\n",
mlx5_chains_get_chain_range(chains_priv),
mlx5_chains_get_prio_range(chains_priv));
mlx5_chains_init_sz_pool(chains_priv, attr->max_ft_sz);
err = rhashtable_init(&chains_ht(chains_priv), &chain_params);
if (err) if (err)
goto init_chains_ht_err; goto init_chains_ht_err;
err = rhashtable_init(&esw_prios_ht(esw), &prio_params); err = rhashtable_init(&prios_ht(chains_priv), &prio_params);
if (err) if (err)
goto init_prios_ht_err; goto init_prios_ht_err;
mapping = mapping_create(sizeof(u32), esw_get_max_restore_tag(esw), mapping = mapping_create(sizeof(u32), attr->max_restore_tag,
true); true);
if (IS_ERR(mapping)) { if (IS_ERR(mapping)) {
err = PTR_ERR(mapping); err = PTR_ERR(mapping);
goto mapping_err; goto mapping_err;
} }
esw_chains_mapping(esw) = mapping; chains_mapping(chains_priv) = mapping;
mutex_init(&esw_chains_lock(esw)); mutex_init(&chains_lock(chains_priv));
return 0; return chains_priv;
mapping_err: mapping_err:
rhashtable_destroy(&esw_prios_ht(esw)); rhashtable_destroy(&prios_ht(chains_priv));
init_prios_ht_err: init_prios_ht_err:
rhashtable_destroy(&esw_chains_ht(esw)); rhashtable_destroy(&chains_ht(chains_priv));
init_chains_ht_err: init_chains_ht_err:
kfree(chains_priv); kfree(chains_priv);
return err; return ERR_PTR(err);
} }
static void static void
mlx5_esw_chains_cleanup(struct mlx5_eswitch *esw) mlx5_chains_cleanup(struct mlx5_fs_chains *chains)
{ {
mutex_destroy(&esw_chains_lock(esw)); mutex_destroy(&chains_lock(chains));
mapping_destroy(esw_chains_mapping(esw)); mapping_destroy(chains_mapping(chains));
rhashtable_destroy(&esw_prios_ht(esw)); rhashtable_destroy(&prios_ht(chains));
rhashtable_destroy(&esw_chains_ht(esw)); rhashtable_destroy(&chains_ht(chains));
kfree(esw_chains_priv(esw)); kfree(chains);
} }
static int struct mlx5_fs_chains *
mlx5_esw_chains_open(struct mlx5_eswitch *esw) mlx5_chains_create(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr)
{ {
struct mlx5_flow_table *ft; struct mlx5_fs_chains *chains;
int err;
/* Create tc_end_fdb(esw) which is the always created ft chain */ chains = mlx5_chains_init(dev, attr);
ft = mlx5_esw_chains_get_table(esw, mlx5_esw_chains_get_ft_chain(esw),
1, 0);
if (IS_ERR(ft))
return PTR_ERR(ft);
tc_end_fdb(esw) = ft; return chains;
/* Always open the root for fast path */
ft = mlx5_esw_chains_get_table(esw, 0, 1, 0);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto level_0_err;
}
/* Open level 1 for split rules now if prios isn't supported */
if (!mlx5_esw_chains_prios_supported(esw)) {
err = mlx5_esw_vport_tbl_get(esw);
if (err)
goto level_1_err;
}
return 0;
level_1_err:
mlx5_esw_chains_put_table(esw, 0, 1, 0);
level_0_err:
mlx5_esw_chains_put_table(esw, mlx5_esw_chains_get_ft_chain(esw), 1, 0);
return err;
}
static void
mlx5_esw_chains_close(struct mlx5_eswitch *esw)
{
if (!mlx5_esw_chains_prios_supported(esw))
mlx5_esw_vport_tbl_put(esw);
mlx5_esw_chains_put_table(esw, 0, 1, 0);
mlx5_esw_chains_put_table(esw, mlx5_esw_chains_get_ft_chain(esw), 1, 0);
}
int
mlx5_esw_chains_create(struct mlx5_eswitch *esw)
{
int err;
err = mlx5_esw_chains_init(esw);
if (err)
return err;
err = mlx5_esw_chains_open(esw);
if (err)
goto err_open;
return 0;
err_open:
mlx5_esw_chains_cleanup(esw);
return err;
} }
void void
mlx5_esw_chains_destroy(struct mlx5_eswitch *esw) mlx5_chains_destroy(struct mlx5_fs_chains *chains)
{ {
mlx5_esw_chains_close(esw); mlx5_chains_cleanup(chains);
mlx5_esw_chains_cleanup(esw);
} }
int int
mlx5_esw_chains_get_chain_mapping(struct mlx5_eswitch *esw, u32 chain, mlx5_chains_get_chain_mapping(struct mlx5_fs_chains *chains, u32 chain,
u32 *chain_mapping) u32 *chain_mapping)
{ {
return mapping_add(esw_chains_mapping(esw), &chain, chain_mapping); return mapping_add(chains_mapping(chains), &chain, chain_mapping);
} }
int int
mlx5_esw_chains_put_chain_mapping(struct mlx5_eswitch *esw, u32 chain_mapping) mlx5_chains_put_chain_mapping(struct mlx5_fs_chains *chains, u32 chain_mapping)
{ {
return mapping_remove(esw_chains_mapping(esw), chain_mapping); return mapping_remove(chains_mapping(chains), chain_mapping);
} }
int mlx5_eswitch_get_chain_for_tag(struct mlx5_eswitch *esw, u32 tag, int mlx5_get_chain_for_tag(struct mlx5_fs_chains *chains, u32 tag,
u32 *chain) u32 *chain)
{ {
int err; int err;
err = mapping_find(esw_chains_mapping(esw), tag, chain); err = mapping_find(chains_mapping(chains), tag, chain);
if (err) { if (err) {
esw_warn(esw->dev, "Can't find chain for tag: %d\n", tag); mlx5_core_warn(chains->dev, "Can't find chain for tag: %d\n", tag);
return -ENOENT; return -ENOENT;
} }
......
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2020 Mellanox Technologies. */
#ifndef __ML5_ESW_CHAINS_H__
#define __ML5_ESW_CHAINS_H__
#include <linux/mlx5/fs.h>
struct mlx5_fs_chains;
enum mlx5_chains_flags {
MLX5_CHAINS_AND_PRIOS_SUPPORTED = BIT(0),
MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED = BIT(1),
MLX5_CHAINS_FT_TUNNEL_SUPPORTED = BIT(2),
};
struct mlx5_chains_attr {
enum mlx5_flow_namespace_type ns;
u32 flags;
u32 max_ft_sz;
u32 max_grp_num;
struct mlx5_flow_table *default_ft;
u32 max_restore_tag;
};
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
bool
mlx5_chains_prios_supported(struct mlx5_fs_chains *chains);
bool
mlx5_chains_backwards_supported(struct mlx5_fs_chains *chains);
u32
mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains);
u32
mlx5_chains_get_chain_range(struct mlx5_fs_chains *chains);
u32
mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains);
struct mlx5_flow_table *
mlx5_chains_get_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
u32 level);
void
mlx5_chains_put_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
u32 level);
struct mlx5_flow_table *
mlx5_chains_get_tc_end_ft(struct mlx5_fs_chains *chains);
struct mlx5_flow_table *
mlx5_chains_create_global_table(struct mlx5_fs_chains *chains);
void
mlx5_chains_destroy_global_table(struct mlx5_fs_chains *chains,
struct mlx5_flow_table *ft);
int
mlx5_chains_get_chain_mapping(struct mlx5_fs_chains *chains, u32 chain,
u32 *chain_mapping);
int
mlx5_chains_put_chain_mapping(struct mlx5_fs_chains *chains,
u32 chain_mapping);
struct mlx5_fs_chains *
mlx5_chains_create(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr);
void mlx5_chains_destroy(struct mlx5_fs_chains *chains);
int
mlx5_get_chain_for_tag(struct mlx5_fs_chains *chains, u32 tag, u32 *chain);
void
mlx5_chains_set_end_ft(struct mlx5_fs_chains *chains,
struct mlx5_flow_table *ft);
#else /* CONFIG_MLX5_CLS_ACT */
static inline struct mlx5_flow_table *
mlx5_chains_get_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
u32 level) { return ERR_PTR(-EOPNOTSUPP); }
static inline void
mlx5_chains_put_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
u32 level) {};
static inline struct mlx5_flow_table *
mlx5_chains_get_tc_end_ft(struct mlx5_fs_chains *chains) { return ERR_PTR(-EOPNOTSUPP); }
static inline struct mlx5_fs_chains *
mlx5_chains_create(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr)
{ return NULL; }
static inline void
mlx5_chains_destroy(struct mlx5_fs_chains *chains) {};
#endif /* CONFIG_MLX5_CLS_ACT */
#endif /* __ML5_ESW_CHAINS_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment