Commit 20cce886 authored by John Hurley's avatar John Hurley Committed by David S. Miller

nfp: flower: enable MAC address sharing for offloadable devs

A MAC address is not necessarily a unique identifier for a netdev. Drivers
such as Linux bonds, for example, can apply the same MAC address to the
upper layer device and all lower layer devices.

NFP MAC offload for tunnel decap includes port verification for reprs but
also supports the offload of non-repr MAC addresses by assigning 'global'
indexes to these. This means that the FW will not verify the incoming port
of a packet matching this destination MAC.

Modify the MAC offload logic to assign global indexes based on MAC address
instead of net device (as it currently does). Use this to allow multiple
devices to share the same MAC. In other words, if a repr shares its MAC
address with another device then give the offloaded MAC a global index
rather than associate it with an ingress port. Track this so that changes
can be reverted as MACs stop being shared.

Implement this by removing the current list based assignment of global
indexes and replacing it with an rhashtable that maps an offloaded MAC
address to the number of devices sharing it, distributing global indexes
based on this.
Signed-off-by: default avatarJohn Hurley <john.hurley@netronome.com>
Reviewed-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 13cf7103
...@@ -286,6 +286,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app, ...@@ -286,6 +286,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
nfp_repr = netdev_priv(repr); nfp_repr = netdev_priv(repr);
nfp_repr->app_priv = repr_priv; nfp_repr->app_priv = repr_priv;
repr_priv->nfp_repr = nfp_repr;
/* For now we only support 1 PF */ /* For now we only support 1 PF */
WARN_ON(repr_type == NFP_REPR_TYPE_PF && i); WARN_ON(repr_type == NFP_REPR_TYPE_PF && i);
...@@ -400,6 +401,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) ...@@ -400,6 +401,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
nfp_repr = netdev_priv(repr); nfp_repr = netdev_priv(repr);
nfp_repr->app_priv = repr_priv; nfp_repr->app_priv = repr_priv;
repr_priv->nfp_repr = nfp_repr;
port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr); port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr);
if (IS_ERR(port)) { if (IS_ERR(port)) {
......
...@@ -58,20 +58,18 @@ struct nfp_fl_stats_id { ...@@ -58,20 +58,18 @@ struct nfp_fl_stats_id {
/** /**
* struct nfp_fl_tunnel_offloads - priv data for tunnel offloads * struct nfp_fl_tunnel_offloads - priv data for tunnel offloads
* @mac_index_list: List of unique 8-bit indexes for non NFP netdevs * @offloaded_macs: Hashtable of the offloaded MAC addresses
* @ipv4_off_list: List of IPv4 addresses to offload * @ipv4_off_list: List of IPv4 addresses to offload
* @neigh_off_list: List of neighbour offloads * @neigh_off_list: List of neighbour offloads
* @mac_index_lock: Lock for the MAC index list
* @ipv4_off_lock: Lock for the IPv4 address list * @ipv4_off_lock: Lock for the IPv4 address list
* @neigh_off_lock: Lock for the neighbour address list * @neigh_off_lock: Lock for the neighbour address list
* @mac_off_ids: IDA to manage id assignment for offloaded MACs * @mac_off_ids: IDA to manage id assignment for offloaded MACs
* @neigh_nb: Notifier to monitor neighbour state * @neigh_nb: Notifier to monitor neighbour state
*/ */
struct nfp_fl_tunnel_offloads { struct nfp_fl_tunnel_offloads {
struct list_head mac_index_list; struct rhashtable offloaded_macs;
struct list_head ipv4_off_list; struct list_head ipv4_off_list;
struct list_head neigh_off_list; struct list_head neigh_off_list;
struct mutex mac_index_lock;
struct mutex ipv4_off_lock; struct mutex ipv4_off_lock;
spinlock_t neigh_off_lock; spinlock_t neigh_off_lock;
struct ida mac_off_ids; struct ida mac_off_ids;
...@@ -178,14 +176,18 @@ struct nfp_flower_priv { ...@@ -178,14 +176,18 @@ struct nfp_flower_priv {
/** /**
* struct nfp_flower_repr_priv - Flower APP per-repr priv data * struct nfp_flower_repr_priv - Flower APP per-repr priv data
* @nfp_repr: Back pointer to nfp_repr
* @lag_port_flags: Extended port flags to record lag state of repr * @lag_port_flags: Extended port flags to record lag state of repr
* @mac_offloaded: Flag indicating a MAC address is offloaded for repr * @mac_offloaded: Flag indicating a MAC address is offloaded for repr
* @offloaded_mac_addr: MAC address that has been offloaded for repr * @offloaded_mac_addr: MAC address that has been offloaded for repr
* @mac_list: List entry of reprs that share the same offloaded MAC
*/ */
struct nfp_flower_repr_priv { struct nfp_flower_repr_priv {
struct nfp_repr *nfp_repr;
unsigned long lag_port_flags; unsigned long lag_port_flags;
bool mac_offloaded; bool mac_offloaded;
u8 offloaded_mac_addr[ETH_ALEN]; u8 offloaded_mac_addr[ETH_ALEN];
struct list_head mac_list;
}; };
/** /**
......
...@@ -123,15 +123,26 @@ enum nfp_flower_mac_offload_cmd { ...@@ -123,15 +123,26 @@ enum nfp_flower_mac_offload_cmd {
#define NFP_MAX_MAC_INDEX 0xff #define NFP_MAX_MAC_INDEX 0xff
/** /**
* struct nfp_tun_mac_non_nfp_idx - converts non NFP netdev ifindex to 8-bit id * struct nfp_tun_offloaded_mac - hashtable entry for an offloaded MAC
* @ifindex: netdev ifindex of the device * @ht_node: Hashtable entry
* @index: index of netdevs mac on NFP * @addr: Offloaded MAC address
* @list: list pointer * @index: Offloaded index for given MAC address
* @ref_count: Number of devs using this MAC address
* @repr_list: List of reprs sharing this MAC address
*/ */
struct nfp_tun_mac_non_nfp_idx { struct nfp_tun_offloaded_mac {
int ifindex; struct rhash_head ht_node;
u8 index; u8 addr[ETH_ALEN];
struct list_head list; u16 index;
int ref_count;
struct list_head repr_list;
};
static const struct rhashtable_params offloaded_macs_params = {
.key_offset = offsetof(struct nfp_tun_offloaded_mac, addr),
.head_offset = offsetof(struct nfp_tun_offloaded_mac, ht_node),
.key_len = ETH_ALEN,
.automatic_shrinking = true,
}; };
void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb) void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
...@@ -466,103 +477,217 @@ void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4) ...@@ -466,103 +477,217 @@ void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4)
nfp_tun_write_ipv4_list(app); nfp_tun_write_ipv4_list(app);
} }
static int nfp_tun_get_mac_idx(struct nfp_app *app, int ifindex) static int
__nfp_tunnel_offload_mac(struct nfp_app *app, u8 *mac, u16 idx, bool del)
{
struct nfp_tun_mac_addr_offload payload;
memset(&payload, 0, sizeof(payload));
if (del)
payload.flags = cpu_to_be16(NFP_TUN_MAC_OFFLOAD_DEL_FLAG);
/* FW supports multiple MACs per cmsg but restrict to single. */
payload.count = cpu_to_be16(1);
payload.index = cpu_to_be16(idx);
ether_addr_copy(payload.addr, mac);
return nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_MAC,
sizeof(struct nfp_tun_mac_addr_offload),
&payload, GFP_KERNEL);
}
static bool nfp_tunnel_port_is_phy_repr(int port)
{
if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) ==
NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT)
return true;
return false;
}
static u16 nfp_tunnel_get_mac_idx_from_phy_port_id(int port)
{
return port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT;
}
static u16 nfp_tunnel_get_global_mac_idx_from_ida(int id)
{
return id << 8 | NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT;
}
static int nfp_tunnel_get_ida_from_global_mac_idx(u16 nfp_mac_idx)
{
return nfp_mac_idx >> 8;
}
static bool nfp_tunnel_is_mac_idx_global(u16 nfp_mac_idx)
{
return (nfp_mac_idx & 0xff) == NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT;
}
static struct nfp_tun_offloaded_mac *
nfp_tunnel_lookup_offloaded_macs(struct nfp_app *app, u8 *mac)
{ {
struct nfp_flower_priv *priv = app->priv; struct nfp_flower_priv *priv = app->priv;
struct nfp_tun_mac_non_nfp_idx *entry;
struct list_head *ptr, *storage;
int idx;
mutex_lock(&priv->tun.mac_index_lock); return rhashtable_lookup_fast(&priv->tun.offloaded_macs, mac,
list_for_each_safe(ptr, storage, &priv->tun.mac_index_list) { offloaded_macs_params);
entry = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx, list); }
if (entry->ifindex == ifindex) {
idx = entry->index; static void
mutex_unlock(&priv->tun.mac_index_lock); nfp_tunnel_offloaded_macs_inc_ref_and_link(struct nfp_tun_offloaded_mac *entry,
return idx; struct net_device *netdev, bool mod)
{
if (nfp_netdev_is_nfp_repr(netdev)) {
struct nfp_flower_repr_priv *repr_priv;
struct nfp_repr *repr;
repr = netdev_priv(netdev);
repr_priv = repr->app_priv;
/* If modifing MAC, remove repr from old list first. */
if (mod)
list_del(&repr_priv->mac_list);
list_add_tail(&repr_priv->mac_list, &entry->repr_list);
} }
entry->ref_count++;
}
static int
nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev,
int port, bool mod)
{
struct nfp_flower_priv *priv = app->priv;
int ida_idx = NFP_MAX_MAC_INDEX, err;
struct nfp_tun_offloaded_mac *entry;
u16 nfp_mac_idx = 0;
entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr);
if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) {
nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, netdev, mod);
return 0;
} }
idx = ida_simple_get(&priv->tun.mac_off_ids, 0, /* Assign a global index if non-repr or MAC address is now shared. */
if (entry || !port) {
ida_idx = ida_simple_get(&priv->tun.mac_off_ids, 0,
NFP_MAX_MAC_INDEX, GFP_KERNEL); NFP_MAX_MAC_INDEX, GFP_KERNEL);
if (idx < 0) { if (ida_idx < 0)
mutex_unlock(&priv->tun.mac_index_lock); return ida_idx;
return idx;
nfp_mac_idx = nfp_tunnel_get_global_mac_idx_from_ida(ida_idx);
} else {
nfp_mac_idx = nfp_tunnel_get_mac_idx_from_phy_port_id(port);
} }
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) { if (!entry) {
mutex_unlock(&priv->tun.mac_index_lock); entry = kzalloc(sizeof(*entry), GFP_KERNEL);
return -ENOMEM; if (!entry) {
err = -ENOMEM;
goto err_free_ida;
} }
entry->ifindex = ifindex;
entry->index = idx;
list_add_tail(&entry->list, &priv->tun.mac_index_list);
mutex_unlock(&priv->tun.mac_index_lock);
return idx; ether_addr_copy(entry->addr, netdev->dev_addr);
} INIT_LIST_HEAD(&entry->repr_list);
static void nfp_tun_del_mac_idx(struct nfp_app *app, int ifindex) if (rhashtable_insert_fast(&priv->tun.offloaded_macs,
{ &entry->ht_node,
struct nfp_flower_priv *priv = app->priv; offloaded_macs_params)) {
struct nfp_tun_mac_non_nfp_idx *entry; err = -ENOMEM;
struct list_head *ptr, *storage; goto err_free_entry;
mutex_lock(&priv->tun.mac_index_lock);
list_for_each_safe(ptr, storage, &priv->tun.mac_index_list) {
entry = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx, list);
if (entry->ifindex == ifindex) {
ida_simple_remove(&priv->tun.mac_off_ids,
entry->index);
list_del(&entry->list);
kfree(entry);
break;
} }
} }
mutex_unlock(&priv->tun.mac_index_lock);
}
static int err = __nfp_tunnel_offload_mac(app, netdev->dev_addr,
__nfp_tunnel_offload_mac(struct nfp_app *app, u8 *mac, u16 idx, bool del) nfp_mac_idx, false);
{ if (err) {
struct nfp_tun_mac_addr_offload payload; /* If not shared then free. */
if (!entry->ref_count)
goto err_remove_hash;
goto err_free_ida;
}
memset(&payload, 0, sizeof(payload)); entry->index = nfp_mac_idx;
nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, netdev, mod);
if (del) return 0;
payload.flags = cpu_to_be16(NFP_TUN_MAC_OFFLOAD_DEL_FLAG);
/* FW supports multiple MACs per cmsg but restrict to single. */ err_remove_hash:
payload.count = cpu_to_be16(1); rhashtable_remove_fast(&priv->tun.offloaded_macs, &entry->ht_node,
payload.index = cpu_to_be16(idx); offloaded_macs_params);
ether_addr_copy(payload.addr, mac); err_free_entry:
kfree(entry);
err_free_ida:
if (ida_idx != NFP_MAX_MAC_INDEX)
ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
return nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_MAC, return err;
sizeof(struct nfp_tun_mac_addr_offload),
&payload, GFP_KERNEL);
} }
static int static int
nfp_tunnel_get_mac_idx_from_port(struct nfp_app *app, struct net_device *netdev, nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
int port, u16 *nfp_mac_idx) u8 *mac, bool mod)
{ {
if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) == struct nfp_flower_priv *priv = app->priv;
NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT) { struct nfp_flower_repr_priv *repr_priv;
*nfp_mac_idx = port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT; struct nfp_tun_offloaded_mac *entry;
} else if (!port) { struct nfp_repr *repr;
/* Must assign our own unique 8-bit index. */ int ida_idx;
int idx = nfp_tun_get_mac_idx(app, netdev->ifindex);
if (idx < 0) entry = nfp_tunnel_lookup_offloaded_macs(app, mac);
return idx; if (!entry)
return 0;
*nfp_mac_idx = idx << 8 | NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT; entry->ref_count--;
} else { /* If del is part of a mod then mac_list is still in use elsewheree. */
return -EOPNOTSUPP; if (nfp_netdev_is_nfp_repr(netdev) && !mod) {
repr = netdev_priv(netdev);
repr_priv = repr->app_priv;
list_del(&repr_priv->mac_list);
}
/* If MAC is now used by 1 repr set the offloaded MAC index to port. */
if (entry->ref_count == 1 && list_is_singular(&entry->repr_list)) {
u16 nfp_mac_idx;
int port, err;
repr_priv = list_first_entry(&entry->repr_list,
struct nfp_flower_repr_priv,
mac_list);
repr = repr_priv->nfp_repr;
port = nfp_repr_get_port_id(repr->netdev);
nfp_mac_idx = nfp_tunnel_get_mac_idx_from_phy_port_id(port);
err = __nfp_tunnel_offload_mac(app, mac, nfp_mac_idx, false);
if (err) {
nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n",
netdev_name(netdev));
return 0;
}
ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index);
ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
entry->index = nfp_mac_idx;
return 0;
} }
if (entry->ref_count)
return 0; return 0;
WARN_ON_ONCE(rhashtable_remove_fast(&priv->tun.offloaded_macs,
&entry->ht_node,
offloaded_macs_params));
/* If MAC has global ID then extract and free the ida entry. */
if (nfp_tunnel_is_mac_idx_global(entry->index)) {
ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index);
ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
}
kfree(entry);
return __nfp_tunnel_offload_mac(app, mac, 0, true);
} }
static int static int
...@@ -573,7 +698,6 @@ nfp_tunnel_offload_mac(struct nfp_app *app, struct net_device *netdev, ...@@ -573,7 +698,6 @@ nfp_tunnel_offload_mac(struct nfp_app *app, struct net_device *netdev,
bool non_repr = false, *mac_offloaded; bool non_repr = false, *mac_offloaded;
u8 *off_mac = NULL; u8 *off_mac = NULL;
int err, port = 0; int err, port = 0;
u16 nfp_mac_idx;
if (nfp_netdev_is_nfp_repr(netdev)) { if (nfp_netdev_is_nfp_repr(netdev)) {
struct nfp_flower_repr_priv *repr_priv; struct nfp_flower_repr_priv *repr_priv;
...@@ -587,6 +711,8 @@ nfp_tunnel_offload_mac(struct nfp_app *app, struct net_device *netdev, ...@@ -587,6 +711,8 @@ nfp_tunnel_offload_mac(struct nfp_app *app, struct net_device *netdev,
mac_offloaded = &repr_priv->mac_offloaded; mac_offloaded = &repr_priv->mac_offloaded;
off_mac = &repr_priv->offloaded_mac_addr[0]; off_mac = &repr_priv->offloaded_mac_addr[0];
port = nfp_repr_get_port_id(netdev); port = nfp_repr_get_port_id(netdev);
if (!nfp_tunnel_port_is_phy_repr(port))
return 0;
} else if (nfp_fl_is_netdev_to_offload(netdev)) { } else if (nfp_fl_is_netdev_to_offload(netdev)) {
nr_priv = nfp_flower_non_repr_priv_get(app, netdev); nr_priv = nfp_flower_non_repr_priv_get(app, netdev);
if (!nr_priv) if (!nr_priv)
...@@ -609,16 +735,10 @@ nfp_tunnel_offload_mac(struct nfp_app *app, struct net_device *netdev, ...@@ -609,16 +735,10 @@ nfp_tunnel_offload_mac(struct nfp_app *app, struct net_device *netdev,
switch (cmd) { switch (cmd) {
case NFP_TUNNEL_MAC_OFFLOAD_ADD: case NFP_TUNNEL_MAC_OFFLOAD_ADD:
err = nfp_tunnel_get_mac_idx_from_port(app, netdev, port, err = nfp_tunnel_add_shared_mac(app, netdev, port, false);
&nfp_mac_idx);
if (err) if (err)
goto err_put_non_repr_priv; goto err_put_non_repr_priv;
err = __nfp_tunnel_offload_mac(app, netdev->dev_addr,
nfp_mac_idx, false);
if (err)
goto err_free_mac_idx;
if (non_repr) if (non_repr)
__nfp_flower_non_repr_priv_get(nr_priv); __nfp_flower_non_repr_priv_get(nr_priv);
...@@ -630,14 +750,13 @@ nfp_tunnel_offload_mac(struct nfp_app *app, struct net_device *netdev, ...@@ -630,14 +750,13 @@ nfp_tunnel_offload_mac(struct nfp_app *app, struct net_device *netdev,
if (!*mac_offloaded) if (!*mac_offloaded)
break; break;
if (non_repr) { if (non_repr)
nfp_tun_del_mac_idx(app, netdev->ifindex);
__nfp_flower_non_repr_priv_put(nr_priv); __nfp_flower_non_repr_priv_put(nr_priv);
}
*mac_offloaded = false; *mac_offloaded = false;
err = __nfp_tunnel_offload_mac(app, netdev->dev_addr, 0, true); err = nfp_tunnel_del_shared_mac(app, netdev, netdev->dev_addr,
false);
if (err) if (err)
goto err_put_non_repr_priv; goto err_put_non_repr_priv;
...@@ -647,19 +766,12 @@ nfp_tunnel_offload_mac(struct nfp_app *app, struct net_device *netdev, ...@@ -647,19 +766,12 @@ nfp_tunnel_offload_mac(struct nfp_app *app, struct net_device *netdev,
if (ether_addr_equal(netdev->dev_addr, off_mac)) if (ether_addr_equal(netdev->dev_addr, off_mac))
break; break;
err = nfp_tunnel_get_mac_idx_from_port(app, netdev, port, err = nfp_tunnel_add_shared_mac(app, netdev, port, true);
&nfp_mac_idx);
if (err)
goto err_put_non_repr_priv;
err = __nfp_tunnel_offload_mac(app, netdev->dev_addr,
nfp_mac_idx, false);
if (err) if (err)
goto err_put_non_repr_priv; goto err_put_non_repr_priv;
/* Delete the previous MAC address. */ /* Delete the previous MAC address. */
err = __nfp_tunnel_offload_mac(app, off_mac, nfp_mac_idx, err = nfp_tunnel_del_shared_mac(app, netdev, off_mac, true);
true);
if (err) if (err)
nfp_flower_cmsg_warn(app, "Failed to remove offload of replaced MAC addr on %s.\n", nfp_flower_cmsg_warn(app, "Failed to remove offload of replaced MAC addr on %s.\n",
netdev_name(netdev)); netdev_name(netdev));
...@@ -676,9 +788,6 @@ nfp_tunnel_offload_mac(struct nfp_app *app, struct net_device *netdev, ...@@ -676,9 +788,6 @@ nfp_tunnel_offload_mac(struct nfp_app *app, struct net_device *netdev,
return 0; return 0;
err_free_mac_idx:
if (non_repr)
nfp_tun_del_mac_idx(app, netdev->ifindex);
err_put_non_repr_priv: err_put_non_repr_priv:
if (non_repr) if (non_repr)
__nfp_flower_non_repr_priv_put(nr_priv); __nfp_flower_non_repr_priv_put(nr_priv);
...@@ -721,10 +830,14 @@ int nfp_tunnel_mac_event_handler(struct nfp_app *app, ...@@ -721,10 +830,14 @@ int nfp_tunnel_mac_event_handler(struct nfp_app *app,
int nfp_tunnel_config_start(struct nfp_app *app) int nfp_tunnel_config_start(struct nfp_app *app)
{ {
struct nfp_flower_priv *priv = app->priv; struct nfp_flower_priv *priv = app->priv;
int err;
/* Initialise rhash for MAC offload tracking. */
err = rhashtable_init(&priv->tun.offloaded_macs,
&offloaded_macs_params);
if (err)
return err;
/* Initialise priv data for MAC offloading. */
mutex_init(&priv->tun.mac_index_lock);
INIT_LIST_HEAD(&priv->tun.mac_index_list);
ida_init(&priv->tun.mac_off_ids); ida_init(&priv->tun.mac_off_ids);
/* Initialise priv data for IPv4 offloading. */ /* Initialise priv data for IPv4 offloading. */
...@@ -736,27 +849,25 @@ int nfp_tunnel_config_start(struct nfp_app *app) ...@@ -736,27 +849,25 @@ int nfp_tunnel_config_start(struct nfp_app *app)
INIT_LIST_HEAD(&priv->tun.neigh_off_list); INIT_LIST_HEAD(&priv->tun.neigh_off_list);
priv->tun.neigh_nb.notifier_call = nfp_tun_neigh_event_handler; priv->tun.neigh_nb.notifier_call = nfp_tun_neigh_event_handler;
return register_netevent_notifier(&priv->tun.neigh_nb); err = register_netevent_notifier(&priv->tun.neigh_nb);
if (err) {
rhashtable_free_and_destroy(&priv->tun.offloaded_macs,
nfp_check_rhashtable_empty, NULL);
return err;
}
return 0;
} }
void nfp_tunnel_config_stop(struct nfp_app *app) void nfp_tunnel_config_stop(struct nfp_app *app)
{ {
struct nfp_flower_priv *priv = app->priv; struct nfp_flower_priv *priv = app->priv;
struct nfp_ipv4_route_entry *route_entry; struct nfp_ipv4_route_entry *route_entry;
struct nfp_tun_mac_non_nfp_idx *mac_idx;
struct nfp_ipv4_addr_entry *ip_entry; struct nfp_ipv4_addr_entry *ip_entry;
struct list_head *ptr, *storage; struct list_head *ptr, *storage;
unregister_netevent_notifier(&priv->tun.neigh_nb); unregister_netevent_notifier(&priv->tun.neigh_nb);
/* Free any memory that may be occupied by MAC index list. */
list_for_each_safe(ptr, storage, &priv->tun.mac_index_list) {
mac_idx = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx,
list);
list_del(&mac_idx->list);
kfree(mac_idx);
}
ida_destroy(&priv->tun.mac_off_ids); ida_destroy(&priv->tun.mac_off_ids);
/* Free any memory that may be occupied by ipv4 list. */ /* Free any memory that may be occupied by ipv4 list. */
...@@ -773,4 +884,8 @@ void nfp_tunnel_config_stop(struct nfp_app *app) ...@@ -773,4 +884,8 @@ void nfp_tunnel_config_stop(struct nfp_app *app)
list_del(&route_entry->list); list_del(&route_entry->list);
kfree(route_entry); kfree(route_entry);
} }
/* Destroy rhash. Entries should be cleaned on netdev notifier unreg. */
rhashtable_free_and_destroy(&priv->tun.offloaded_macs,
nfp_check_rhashtable_empty, NULL);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment