Commit a3c785d7 authored by Saeed Mahameed's avatar Saeed Mahameed

net/mlx5e: Vxlan, rename from mlx5e to mlx5

Rename vxlan functions from mlx5e_vxlan_* to mlx5_vxlan_*.
Rename mlx5e_vxlan_db to mlx5_vxlan and move it from en.h to vxlan.c
since it is not related to mlx5e anymore.

Allocate mlx5_vxlan structure dynamically in order to make it easier to
move later to core driver and to make it private in vxlan.c.

This is in preparation to move vxlan API to mlx5 core.
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
Reviewed-by: default avatarOr Gerlitz <ogerlitz@mellanox.com>
parent 5006eb22
...@@ -52,6 +52,7 @@ ...@@ -52,6 +52,7 @@
#include "wq.h" #include "wq.h"
#include "mlx5_core.h" #include "mlx5_core.h"
#include "en_stats.h" #include "en_stats.h"
#include "vxlan.h"
struct page_pool; struct page_pool;
...@@ -654,13 +655,6 @@ enum { ...@@ -654,13 +655,6 @@ enum {
MLX5E_STATE_DESTROYING, MLX5E_STATE_DESTROYING,
}; };
struct mlx5e_vxlan_db {
spinlock_t lock; /* protect vxlan table */
/* max_num_ports is usuallly 4, 16 buckets is more than enough */
DECLARE_HASHTABLE(htable, 4);
int num_ports;
};
struct mlx5e_l2_rule { struct mlx5e_l2_rule {
u8 addr[ETH_ALEN + 2]; u8 addr[ETH_ALEN + 2];
struct mlx5_flow_handle *rule; struct mlx5_flow_handle *rule;
...@@ -818,7 +812,7 @@ struct mlx5e_priv { ...@@ -818,7 +812,7 @@ struct mlx5e_priv {
u32 tx_rates[MLX5E_MAX_NUM_SQS]; u32 tx_rates[MLX5E_MAX_NUM_SQS];
struct mlx5e_flow_steering fs; struct mlx5e_flow_steering fs;
struct mlx5e_vxlan_db vxlan; struct mlx5_vxlan *vxlan;
struct workqueue_struct *wq; struct workqueue_struct *wq;
struct work_struct update_carrier_work; struct work_struct update_carrier_work;
......
...@@ -2974,7 +2974,7 @@ int mlx5e_open(struct net_device *netdev) ...@@ -2974,7 +2974,7 @@ int mlx5e_open(struct net_device *netdev)
mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP); mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP);
mutex_unlock(&priv->state_lock); mutex_unlock(&priv->state_lock);
if (mlx5e_vxlan_allowed(priv->mdev)) if (mlx5_vxlan_allowed(priv->vxlan))
udp_tunnel_get_rx_info(netdev); udp_tunnel_get_rx_info(netdev);
return err; return err;
...@@ -3983,7 +3983,7 @@ static void mlx5e_vxlan_add_work(struct work_struct *work) ...@@ -3983,7 +3983,7 @@ static void mlx5e_vxlan_add_work(struct work_struct *work)
u16 port = vxlan_work->port; u16 port = vxlan_work->port;
mutex_lock(&priv->state_lock); mutex_lock(&priv->state_lock);
mlx5e_vxlan_add_port(priv, port); mlx5_vxlan_add_port(priv->vxlan, port);
mutex_unlock(&priv->state_lock); mutex_unlock(&priv->state_lock);
kfree(vxlan_work); kfree(vxlan_work);
...@@ -3997,7 +3997,7 @@ static void mlx5e_vxlan_del_work(struct work_struct *work) ...@@ -3997,7 +3997,7 @@ static void mlx5e_vxlan_del_work(struct work_struct *work)
u16 port = vxlan_work->port; u16 port = vxlan_work->port;
mutex_lock(&priv->state_lock); mutex_lock(&priv->state_lock);
mlx5e_vxlan_del_port(priv, port); mlx5_vxlan_del_port(priv->vxlan, port);
mutex_unlock(&priv->state_lock); mutex_unlock(&priv->state_lock);
kfree(vxlan_work); kfree(vxlan_work);
} }
...@@ -4028,7 +4028,7 @@ static void mlx5e_add_vxlan_port(struct net_device *netdev, ...@@ -4028,7 +4028,7 @@ static void mlx5e_add_vxlan_port(struct net_device *netdev,
if (ti->type != UDP_TUNNEL_TYPE_VXLAN) if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
return; return;
if (!mlx5e_vxlan_allowed(priv->mdev)) if (!mlx5_vxlan_allowed(priv->vxlan))
return; return;
mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 1); mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 1);
...@@ -4042,7 +4042,7 @@ static void mlx5e_del_vxlan_port(struct net_device *netdev, ...@@ -4042,7 +4042,7 @@ static void mlx5e_del_vxlan_port(struct net_device *netdev,
if (ti->type != UDP_TUNNEL_TYPE_VXLAN) if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
return; return;
if (!mlx5e_vxlan_allowed(priv->mdev)) if (!mlx5_vxlan_allowed(priv->vxlan))
return; return;
mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 0); mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 0);
...@@ -4076,7 +4076,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv, ...@@ -4076,7 +4076,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
port = be16_to_cpu(udph->dest); port = be16_to_cpu(udph->dest);
/* Verify if UDP port is being offloaded by HW */ /* Verify if UDP port is being offloaded by HW */
if (mlx5e_vxlan_lookup_port(priv, port)) if (mlx5_vxlan_lookup_port(priv->vxlan, port))
return features; return features;
} }
...@@ -4648,7 +4648,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) ...@@ -4648,7 +4648,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX; netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
if (mlx5e_vxlan_allowed(mdev) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) { if (mlx5_vxlan_allowed(priv->vxlan) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
netdev->hw_enc_features |= NETIF_F_IP_CSUM; netdev->hw_enc_features |= NETIF_F_IP_CSUM;
netdev->hw_enc_features |= NETIF_F_IPV6_CSUM; netdev->hw_enc_features |= NETIF_F_IPV6_CSUM;
netdev->hw_enc_features |= NETIF_F_TSO; netdev->hw_enc_features |= NETIF_F_TSO;
...@@ -4656,7 +4656,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) ...@@ -4656,7 +4656,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_enc_features |= NETIF_F_GSO_PARTIAL; netdev->hw_enc_features |= NETIF_F_GSO_PARTIAL;
} }
if (mlx5e_vxlan_allowed(mdev)) { if (mlx5_vxlan_allowed(priv->vxlan)) {
netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM; NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL | netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
...@@ -4758,6 +4758,8 @@ static void mlx5e_nic_init(struct mlx5_core_dev *mdev, ...@@ -4758,6 +4758,8 @@ static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
int err; int err;
priv->vxlan = mlx5_vxlan_create(mdev);
mlx5e_build_nic_netdev_priv(mdev, netdev, profile, ppriv); mlx5e_build_nic_netdev_priv(mdev, netdev, profile, ppriv);
err = mlx5e_ipsec_init(priv); err = mlx5e_ipsec_init(priv);
if (err) if (err)
...@@ -4767,14 +4769,13 @@ static void mlx5e_nic_init(struct mlx5_core_dev *mdev, ...@@ -4767,14 +4769,13 @@ static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
mlx5_core_err(mdev, "TLS initialization failed, %d\n", err); mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
mlx5e_build_nic_netdev(netdev); mlx5e_build_nic_netdev(netdev);
mlx5e_build_tc2txq_maps(priv); mlx5e_build_tc2txq_maps(priv);
mlx5e_vxlan_init(priv);
} }
static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{ {
mlx5_vxlan_destroy(priv->vxlan);
mlx5e_tls_cleanup(priv); mlx5e_tls_cleanup(priv);
mlx5e_ipsec_cleanup(priv); mlx5e_ipsec_cleanup(priv);
mlx5e_vxlan_cleanup(priv);
} }
static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
......
...@@ -1133,7 +1133,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, ...@@ -1133,7 +1133,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst))) if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
goto vxlan_match_offload_err; goto vxlan_match_offload_err;
if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->dst)) && if (mlx5_vxlan_lookup_port(up_priv->vxlan, be16_to_cpu(key->dst)) &&
MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
parse_vxlan_attr(spec, f); parse_vxlan_attr(spec, f);
else { else {
...@@ -2557,7 +2557,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, ...@@ -2557,7 +2557,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->tp_dst)) && if (mlx5_vxlan_lookup_port(up_priv->vxlan, be16_to_cpu(key->tp_dst)) &&
MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) { MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
tunnel_type = MLX5_HEADER_TYPE_VXLAN; tunnel_type = MLX5_HEADER_TYPE_VXLAN;
} else { } else {
......
...@@ -36,32 +36,26 @@ ...@@ -36,32 +36,26 @@
#include "mlx5_core.h" #include "mlx5_core.h"
#include "vxlan.h" #include "vxlan.h"
struct mlx5_vxlan {
struct mlx5_core_dev *mdev;
spinlock_t lock; /* protect vxlan table */
int num_ports;
/* max_num_ports is usuallly 4, 16 buckets is more than enough */
DECLARE_HASHTABLE(htable, 4);
};
struct mlx5_vxlan_port { struct mlx5_vxlan_port {
struct hlist_node hlist; struct hlist_node hlist;
atomic_t refcount; atomic_t refcount;
u16 udp_port; u16 udp_port;
}; };
void mlx5e_vxlan_init(struct mlx5e_priv *priv) static inline u8 mlx5_vxlan_max_udp_ports(struct mlx5_core_dev *mdev)
{
struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
spin_lock_init(&vxlan_db->lock);
hash_init(vxlan_db->htable);
if (mlx5e_vxlan_allowed(priv->mdev))
/* Hardware adds 4789 by default.
* Lockless since we are the only hash table consumers, wq and TX are disabled.
*/
mlx5e_vxlan_add_port(priv, 4789);
}
static inline u8 mlx5e_vxlan_max_udp_ports(struct mlx5_core_dev *mdev)
{ {
return MLX5_CAP_ETH(mdev, max_vxlan_udp_ports) ?: 4; return MLX5_CAP_ETH(mdev, max_vxlan_udp_ports) ?: 4;
} }
static int mlx5e_vxlan_core_add_port_cmd(struct mlx5_core_dev *mdev, u16 port) static int mlx5_vxlan_core_add_port_cmd(struct mlx5_core_dev *mdev, u16 port)
{ {
u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)] = {0}; u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)] = {0};
u32 out[MLX5_ST_SZ_DW(add_vxlan_udp_dport_out)] = {0}; u32 out[MLX5_ST_SZ_DW(add_vxlan_udp_dport_out)] = {0};
...@@ -72,7 +66,7 @@ static int mlx5e_vxlan_core_add_port_cmd(struct mlx5_core_dev *mdev, u16 port) ...@@ -72,7 +66,7 @@ static int mlx5e_vxlan_core_add_port_cmd(struct mlx5_core_dev *mdev, u16 port)
return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
} }
static int mlx5e_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port) static int mlx5_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port)
{ {
u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)] = {0}; u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)] = {0};
u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)] = {0}; u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)] = {0};
...@@ -84,12 +78,11 @@ static int mlx5e_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port) ...@@ -84,12 +78,11 @@ static int mlx5e_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port)
} }
static struct mlx5_vxlan_port* static struct mlx5_vxlan_port*
mlx5e_vxlan_lookup_port_locked(struct mlx5e_priv *priv, u16 port) mlx5_vxlan_lookup_port_locked(struct mlx5_vxlan *vxlan, u16 port)
{ {
struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
struct mlx5_vxlan_port *vxlanp; struct mlx5_vxlan_port *vxlanp;
hash_for_each_possible(vxlan_db->htable, vxlanp, hlist, port) { hash_for_each_possible(vxlan->htable, vxlanp, hlist, port) {
if (vxlanp->udp_port == port) if (vxlanp->udp_port == port)
return vxlanp; return vxlanp;
} }
...@@ -97,37 +90,38 @@ mlx5e_vxlan_lookup_port_locked(struct mlx5e_priv *priv, u16 port) ...@@ -97,37 +90,38 @@ mlx5e_vxlan_lookup_port_locked(struct mlx5e_priv *priv, u16 port)
return NULL; return NULL;
} }
struct mlx5_vxlan_port *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port) struct mlx5_vxlan_port *mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port)
{ {
struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
struct mlx5_vxlan_port *vxlanp; struct mlx5_vxlan_port *vxlanp;
spin_lock_bh(&vxlan_db->lock); if (!mlx5_vxlan_allowed(vxlan))
vxlanp = mlx5e_vxlan_lookup_port_locked(priv, port); return NULL;
spin_unlock_bh(&vxlan_db->lock);
spin_lock_bh(&vxlan->lock);
vxlanp = mlx5_vxlan_lookup_port_locked(vxlan, port);
spin_unlock_bh(&vxlan->lock);
return vxlanp; return vxlanp;
} }
void mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port) void mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port)
{ {
struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
struct mlx5_vxlan_port *vxlanp; struct mlx5_vxlan_port *vxlanp;
vxlanp = mlx5e_vxlan_lookup_port(priv, port); vxlanp = mlx5_vxlan_lookup_port(vxlan, port);
if (vxlanp) { if (vxlanp) {
atomic_inc(&vxlanp->refcount); atomic_inc(&vxlanp->refcount);
return; return;
} }
if (vxlan_db->num_ports >= mlx5e_vxlan_max_udp_ports(priv->mdev)) { if (vxlan->num_ports >= mlx5_vxlan_max_udp_ports(vxlan->mdev)) {
netdev_info(priv->netdev, mlx5_core_info(vxlan->mdev,
"UDP port (%d) not offloaded, max number of UDP ports (%d) are already offloaded\n", "UDP port (%d) not offloaded, max number of UDP ports (%d) are already offloaded\n",
port, mlx5e_vxlan_max_udp_ports(priv->mdev)); port, mlx5_vxlan_max_udp_ports(vxlan->mdev));
return; return;
} }
if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port)) if (mlx5_vxlan_core_add_port_cmd(vxlan->mdev, port))
return; return;
vxlanp = kzalloc(sizeof(*vxlanp), GFP_KERNEL); vxlanp = kzalloc(sizeof(*vxlanp), GFP_KERNEL);
...@@ -137,25 +131,24 @@ void mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port) ...@@ -137,25 +131,24 @@ void mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port)
vxlanp->udp_port = port; vxlanp->udp_port = port;
atomic_set(&vxlanp->refcount, 1); atomic_set(&vxlanp->refcount, 1);
spin_lock_bh(&vxlan_db->lock); spin_lock_bh(&vxlan->lock);
hash_add(vxlan_db->htable, &vxlanp->hlist, port); hash_add(vxlan->htable, &vxlanp->hlist, port);
spin_unlock_bh(&vxlan_db->lock); spin_unlock_bh(&vxlan->lock);
vxlan_db->num_ports++; vxlan->num_ports++;
return; return;
err_delete_port: err_delete_port:
mlx5e_vxlan_core_del_port_cmd(priv->mdev, port); mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port);
} }
void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port) void mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port)
{ {
struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
struct mlx5_vxlan_port *vxlanp; struct mlx5_vxlan_port *vxlanp;
bool remove = false; bool remove = false;
spin_lock_bh(&vxlan_db->lock); spin_lock_bh(&vxlan->lock);
vxlanp = mlx5e_vxlan_lookup_port_locked(priv, port); vxlanp = mlx5_vxlan_lookup_port_locked(vxlan, port);
if (!vxlanp) if (!vxlanp)
goto out_unlock; goto out_unlock;
...@@ -165,26 +158,51 @@ void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port) ...@@ -165,26 +158,51 @@ void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port)
} }
out_unlock: out_unlock:
spin_unlock_bh(&vxlan_db->lock); spin_unlock_bh(&vxlan->lock);
if (remove) { if (remove) {
mlx5e_vxlan_core_del_port_cmd(priv->mdev, port); mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port);
kfree(vxlanp); kfree(vxlanp);
vxlan_db->num_ports--; vxlan->num_ports--;
} }
} }
void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv) struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev)
{
struct mlx5_vxlan *vxlan;
if (!MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) || !mlx5_core_is_pf(mdev))
return ERR_PTR(-ENOTSUPP);
vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL);
if (!vxlan)
return ERR_PTR(-ENOMEM);
vxlan->mdev = mdev;
spin_lock_init(&vxlan->lock);
hash_init(vxlan->htable);
/* Hardware adds 4789 by default */
mlx5_vxlan_add_port(vxlan, 4789);
return vxlan;
}
void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan)
{ {
struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
struct mlx5_vxlan_port *vxlanp; struct mlx5_vxlan_port *vxlanp;
struct hlist_node *tmp; struct hlist_node *tmp;
int bkt; int bkt;
/* Lockless since we are the only hash table consumers, wq and TX are disabled */ if (!mlx5_vxlan_allowed(vxlan))
hash_for_each_safe(vxlan_db->htable, bkt, tmp, vxlanp, hlist) { return;
/* Lockless since we are the only hash table consumers*/
hash_for_each_safe(vxlan->htable, bkt, tmp, vxlanp, hlist) {
hash_del(&vxlanp->hlist); hash_del(&vxlanp->hlist);
mlx5e_vxlan_core_del_port_cmd(priv->mdev, vxlanp->udp_port); mlx5_vxlan_core_del_port_cmd(vxlan->mdev, vxlanp->udp_port);
kfree(vxlanp); kfree(vxlanp);
} }
kfree(vxlan);
} }
...@@ -33,20 +33,32 @@ ...@@ -33,20 +33,32 @@
#define __MLX5_VXLAN_H__ #define __MLX5_VXLAN_H__
#include <linux/mlx5/driver.h> #include <linux/mlx5/driver.h>
#include "en.h"
struct mlx5_vxlan;
struct mlx5_vxlan_port; struct mlx5_vxlan_port;
static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev) #ifdef CONFIG_MLX5_CORE_EN
static inline bool mlx5_vxlan_allowed(struct mlx5_vxlan *vxlan)
{ {
return (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) && /* not allowed reason is encoded in vxlan pointer as error,
mlx5_core_is_pf(mdev)); * on mlx5_vxlan_create
*/
return !IS_ERR_OR_NULL(vxlan);
} }
void mlx5e_vxlan_init(struct mlx5e_priv *priv); struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev);
void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv); void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan);
void mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port); void mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port);
void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port); void mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port);
struct mlx5_vxlan_port *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port); struct mlx5_vxlan_port *mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port);
#else
static inline struct mlx5_vxlan*
mlx5_vxlan_create(struct mlx5_core_dev *mdev) { return ERR_PTR(-ENOTSUPP); }
static inline void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan) { return; }
#endif
#endif /* __MLX5_VXLAN_H__ */ #endif /* __MLX5_VXLAN_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment