Commit 16e82920 authored by David S. Miller's avatar David S. Miller

Merge branch 'mlxsw-Add-VXLAN-support-for-Spectrum-2'

Ido Schimmel says:

====================
mlxsw: Add VXLAN support for Spectrum-2

This patchset adds support for VXLAN tunneling on the Spectrum-2 ASIC.
Spectrum-1 and Spectrum-2 are largely backward compatible in this area,
so not too many changes are required.

Patches #1-#2 expose a function and perform small refactoring towards
the actual Spectrum-2 implementation in patches #3-#4.

Patch #3 adds the required initialization steps on Spectrum-2.

Patch #4 finally enables VXLAN on Spectrum-2.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 482dcf7d 02d21b59
...@@ -507,6 +507,9 @@ void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id, ...@@ -507,6 +507,9 @@ void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
const union mlxsw_sp_l3addr *ul_sip); const union mlxsw_sp_l3addr *ul_sip);
int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id, int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
u16 *vr_id); u16 *vr_id);
int mlxsw_sp_router_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
u16 *ul_rif_index);
void mlxsw_sp_router_ul_rif_put(struct mlxsw_sp *mlxsw_sp, u16 ul_rif_index);
/* spectrum_kvdl.c */ /* spectrum_kvdl.c */
enum mlxsw_sp_kvdl_entry_type { enum mlxsw_sp_kvdl_entry_type {
......
...@@ -28,6 +28,7 @@ struct mlxsw_sp_nve { ...@@ -28,6 +28,7 @@ struct mlxsw_sp_nve {
unsigned int num_nve_tunnels; /* Protected by RTNL */ unsigned int num_nve_tunnels; /* Protected by RTNL */
unsigned int num_max_mc_entries[MLXSW_SP_L3_PROTO_MAX]; unsigned int num_max_mc_entries[MLXSW_SP_L3_PROTO_MAX];
u32 tunnel_index; u32 tunnel_index;
u16 ul_rif_index; /* Reserved for Spectrum */
}; };
struct mlxsw_sp_nve_ops { struct mlxsw_sp_nve_ops {
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <net/vxlan.h> #include <net/vxlan.h>
#include "reg.h" #include "reg.h"
#include "spectrum.h"
#include "spectrum_nve.h" #include "spectrum_nve.h"
/* Eth (18B) | IPv6 (40B) | UDP (8B) | VxLAN (8B) | Eth (14B) | IPv6 (40B) /* Eth (18B) | IPv6 (40B) | UDP (8B) | VxLAN (8B) | Eth (14B) | IPv6 (40B)
...@@ -20,9 +21,9 @@ ...@@ -20,9 +21,9 @@
#define MLXSW_SP_NVE_VXLAN_SUPPORTED_FLAGS (VXLAN_F_UDP_ZERO_CSUM_TX | \ #define MLXSW_SP_NVE_VXLAN_SUPPORTED_FLAGS (VXLAN_F_UDP_ZERO_CSUM_TX | \
VXLAN_F_LEARN) VXLAN_F_LEARN)
static bool mlxsw_sp1_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve, static bool mlxsw_sp_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
const struct net_device *dev, const struct net_device *dev,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_config *cfg = &vxlan->cfg; struct vxlan_config *cfg = &vxlan->cfg;
...@@ -112,13 +113,30 @@ static int mlxsw_sp_nve_parsing_set(struct mlxsw_sp *mlxsw_sp, ...@@ -112,13 +113,30 @@ static int mlxsw_sp_nve_parsing_set(struct mlxsw_sp *mlxsw_sp,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
} }
static void
mlxsw_sp_nve_vxlan_config_prepare(char *tngcr_pl,
const struct mlxsw_sp_nve_config *config)
{
u8 udp_sport;
mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, true,
config->ttl);
/* VxLAN driver's default UDP source port range is 32768 (0x8000)
* to 60999 (0xee47). Set the upper 8 bits of the UDP source port
* to a random number between 0x80 and 0xee
*/
get_random_bytes(&udp_sport, sizeof(udp_sport));
udp_sport = (udp_sport % (0xee - 0x80 + 1)) + 0x80;
mlxsw_reg_tngcr_nve_udp_sport_prefix_set(tngcr_pl, udp_sport);
mlxsw_reg_tngcr_usipv4_set(tngcr_pl, be32_to_cpu(config->ul_sip.addr4));
}
static int static int
mlxsw_sp1_nve_vxlan_config_set(struct mlxsw_sp *mlxsw_sp, mlxsw_sp1_nve_vxlan_config_set(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_nve_config *config) const struct mlxsw_sp_nve_config *config)
{ {
char tngcr_pl[MLXSW_REG_TNGCR_LEN]; char tngcr_pl[MLXSW_REG_TNGCR_LEN];
u16 ul_vr_id; u16 ul_vr_id;
u8 udp_sport;
int err; int err;
err = mlxsw_sp_router_tb_id_vr_id(mlxsw_sp, config->ul_tb_id, err = mlxsw_sp_router_tb_id_vr_id(mlxsw_sp, config->ul_tb_id,
...@@ -126,18 +144,9 @@ mlxsw_sp1_nve_vxlan_config_set(struct mlxsw_sp *mlxsw_sp, ...@@ -126,18 +144,9 @@ mlxsw_sp1_nve_vxlan_config_set(struct mlxsw_sp *mlxsw_sp,
if (err) if (err)
return err; return err;
mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, true, mlxsw_sp_nve_vxlan_config_prepare(tngcr_pl, config);
config->ttl);
/* VxLAN driver's default UDP source port range is 32768 (0x8000)
* to 60999 (0xee47). Set the upper 8 bits of the UDP source port
* to a random number between 0x80 and 0xee
*/
get_random_bytes(&udp_sport, sizeof(udp_sport));
udp_sport = (udp_sport % (0xee - 0x80 + 1)) + 0x80;
mlxsw_reg_tngcr_nve_udp_sport_prefix_set(tngcr_pl, udp_sport);
mlxsw_reg_tngcr_learn_enable_set(tngcr_pl, config->learning_en); mlxsw_reg_tngcr_learn_enable_set(tngcr_pl, config->learning_en);
mlxsw_reg_tngcr_underlay_virtual_router_set(tngcr_pl, ul_vr_id); mlxsw_reg_tngcr_underlay_virtual_router_set(tngcr_pl, ul_vr_id);
mlxsw_reg_tngcr_usipv4_set(tngcr_pl, be32_to_cpu(config->ul_sip.addr4));
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
} }
...@@ -231,7 +240,7 @@ mlxsw_sp_nve_vxlan_clear_offload(const struct net_device *nve_dev, __be32 vni) ...@@ -231,7 +240,7 @@ mlxsw_sp_nve_vxlan_clear_offload(const struct net_device *nve_dev, __be32 vni)
const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops = { const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops = {
.type = MLXSW_SP_NVE_TYPE_VXLAN, .type = MLXSW_SP_NVE_TYPE_VXLAN,
.can_offload = mlxsw_sp1_nve_vxlan_can_offload, .can_offload = mlxsw_sp_nve_vxlan_can_offload,
.nve_config = mlxsw_sp_nve_vxlan_config, .nve_config = mlxsw_sp_nve_vxlan_config,
.init = mlxsw_sp1_nve_vxlan_init, .init = mlxsw_sp1_nve_vxlan_init,
.fini = mlxsw_sp1_nve_vxlan_fini, .fini = mlxsw_sp1_nve_vxlan_fini,
...@@ -239,26 +248,126 @@ const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops = { ...@@ -239,26 +248,126 @@ const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops = {
.fdb_clear_offload = mlxsw_sp_nve_vxlan_clear_offload, .fdb_clear_offload = mlxsw_sp_nve_vxlan_clear_offload,
}; };
static bool mlxsw_sp2_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve, static bool mlxsw_sp2_nve_vxlan_learning_set(struct mlxsw_sp *mlxsw_sp,
const struct net_device *dev, bool learning_en)
struct netlink_ext_ack *extack)
{ {
return false; char tnpc_pl[MLXSW_REG_TNPC_LEN];
mlxsw_reg_tnpc_pack(tnpc_pl, MLXSW_REG_TNPC_TUNNEL_PORT_NVE,
learning_en);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tnpc), tnpc_pl);
}
static int
mlxsw_sp2_nve_vxlan_config_set(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_nve_config *config)
{
char tngcr_pl[MLXSW_REG_TNGCR_LEN];
u16 ul_rif_index;
int err;
err = mlxsw_sp_router_ul_rif_get(mlxsw_sp, config->ul_tb_id,
&ul_rif_index);
if (err)
return err;
mlxsw_sp->nve->ul_rif_index = ul_rif_index;
err = mlxsw_sp2_nve_vxlan_learning_set(mlxsw_sp, config->learning_en);
if (err)
goto err_vxlan_learning_set;
mlxsw_sp_nve_vxlan_config_prepare(tngcr_pl, config);
mlxsw_reg_tngcr_underlay_rif_set(tngcr_pl, ul_rif_index);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
if (err)
goto err_tngcr_write;
return 0;
err_tngcr_write:
mlxsw_sp2_nve_vxlan_learning_set(mlxsw_sp, false);
err_vxlan_learning_set:
mlxsw_sp_router_ul_rif_put(mlxsw_sp, ul_rif_index);
return err;
}
static void mlxsw_sp2_nve_vxlan_config_clear(struct mlxsw_sp *mlxsw_sp)
{
char tngcr_pl[MLXSW_REG_TNGCR_LEN];
mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, false, 0);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
mlxsw_sp2_nve_vxlan_learning_set(mlxsw_sp, false);
mlxsw_sp_router_ul_rif_put(mlxsw_sp, mlxsw_sp->nve->ul_rif_index);
}
static int mlxsw_sp2_nve_vxlan_rtdp_set(struct mlxsw_sp *mlxsw_sp,
unsigned int tunnel_index,
u16 ul_rif_index)
{
char rtdp_pl[MLXSW_REG_RTDP_LEN];
mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_NVE, tunnel_index);
mlxsw_reg_rtdp_egress_router_interface_set(rtdp_pl, ul_rif_index);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtdp), rtdp_pl);
} }
static int mlxsw_sp2_nve_vxlan_init(struct mlxsw_sp_nve *nve, static int mlxsw_sp2_nve_vxlan_init(struct mlxsw_sp_nve *nve,
const struct mlxsw_sp_nve_config *config) const struct mlxsw_sp_nve_config *config)
{ {
return -EOPNOTSUPP; struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp;
int err;
err = mlxsw_sp_nve_parsing_set(mlxsw_sp,
MLXSW_SP_NVE_VXLAN_PARSING_DEPTH,
config->udp_dport);
if (err)
return err;
err = mlxsw_sp2_nve_vxlan_config_set(mlxsw_sp, config);
if (err)
goto err_config_set;
err = mlxsw_sp2_nve_vxlan_rtdp_set(mlxsw_sp, nve->tunnel_index,
nve->ul_rif_index);
if (err)
goto err_rtdp_set;
err = mlxsw_sp_router_nve_promote_decap(mlxsw_sp, config->ul_tb_id,
config->ul_proto,
&config->ul_sip,
nve->tunnel_index);
if (err)
goto err_promote_decap;
return 0;
err_promote_decap:
err_rtdp_set:
mlxsw_sp2_nve_vxlan_config_clear(mlxsw_sp);
err_config_set:
mlxsw_sp_nve_parsing_set(mlxsw_sp, MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH,
config->udp_dport);
return err;
} }
static void mlxsw_sp2_nve_vxlan_fini(struct mlxsw_sp_nve *nve) static void mlxsw_sp2_nve_vxlan_fini(struct mlxsw_sp_nve *nve)
{ {
struct mlxsw_sp_nve_config *config = &nve->config;
struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp;
mlxsw_sp_router_nve_demote_decap(mlxsw_sp, config->ul_tb_id,
config->ul_proto, &config->ul_sip);
mlxsw_sp2_nve_vxlan_config_clear(mlxsw_sp);
mlxsw_sp_nve_parsing_set(mlxsw_sp, MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH,
config->udp_dport);
} }
const struct mlxsw_sp_nve_ops mlxsw_sp2_nve_vxlan_ops = { const struct mlxsw_sp_nve_ops mlxsw_sp2_nve_vxlan_ops = {
.type = MLXSW_SP_NVE_TYPE_VXLAN, .type = MLXSW_SP_NVE_TYPE_VXLAN,
.can_offload = mlxsw_sp2_nve_vxlan_can_offload, .can_offload = mlxsw_sp_nve_vxlan_can_offload,
.nve_config = mlxsw_sp_nve_vxlan_config, .nve_config = mlxsw_sp_nve_vxlan_config,
.init = mlxsw_sp2_nve_vxlan_init, .init = mlxsw_sp2_nve_vxlan_init,
.fini = mlxsw_sp2_nve_vxlan_fini, .fini = mlxsw_sp2_nve_vxlan_fini,
......
...@@ -7595,6 +7595,34 @@ static void mlxsw_sp_ul_rif_put(struct mlxsw_sp_rif *ul_rif) ...@@ -7595,6 +7595,34 @@ static void mlxsw_sp_ul_rif_put(struct mlxsw_sp_rif *ul_rif)
mlxsw_sp_vr_put(mlxsw_sp, vr); mlxsw_sp_vr_put(mlxsw_sp, vr);
} }
int mlxsw_sp_router_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
u16 *ul_rif_index)
{
struct mlxsw_sp_rif *ul_rif;
ASSERT_RTNL();
ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
if (IS_ERR(ul_rif))
return PTR_ERR(ul_rif);
*ul_rif_index = ul_rif->rif_index;
return 0;
}
void mlxsw_sp_router_ul_rif_put(struct mlxsw_sp *mlxsw_sp, u16 ul_rif_index)
{
struct mlxsw_sp_rif *ul_rif;
ASSERT_RTNL();
ul_rif = mlxsw_sp->router->rifs[ul_rif_index];
if (WARN_ON(!ul_rif))
return;
mlxsw_sp_ul_rif_put(ul_rif);
}
static int static int
mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif) mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment