Commit cfc15c1d authored by Ido Schimmel's avatar Ido Schimmel Committed by David S. Miller

nexthop: Allow reporting activity of nexthop buckets

The kernel periodically checks the idle time of nexthop buckets to
determine if they are idle and can be re-populated with a new nexthop.

When the resilient nexthop group is offloaded to hardware, the kernel
will not see activity on nexthop buckets unless it is reported from
hardware.

Add a function that can be periodically called by device drivers to
report activity on nexthop buckets after querying it from the underlying
device.
Signed-off-by: default avatarIdo Schimmel <idosch@nvidia.com>
Reviewed-by: default avatarPetr Machata <petrm@nvidia.com>
Reviewed-by: default avatarDavid Ahern <dsahern@kernel.org>
Signed-off-by: default avatarPetr Machata <petrm@nvidia.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 56ad5ba3
...@@ -222,6 +222,8 @@ int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb); ...@@ -222,6 +222,8 @@ int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb);
void nexthop_set_hw_flags(struct net *net, u32 id, bool offload, bool trap); void nexthop_set_hw_flags(struct net *net, u32 id, bool offload, bool trap);
void nexthop_bucket_set_hw_flags(struct net *net, u32 id, u16 bucket_index, void nexthop_bucket_set_hw_flags(struct net *net, u32 id, u16 bucket_index,
bool offload, bool trap); bool offload, bool trap);
void nexthop_res_grp_activity_update(struct net *net, u32 id, u16 num_buckets,
unsigned long *activity);
/* caller is holding rcu or rtnl; no reference taken to nexthop */ /* caller is holding rcu or rtnl; no reference taken to nexthop */
struct nexthop *nexthop_find_by_id(struct net *net, u32 id); struct nexthop *nexthop_find_by_id(struct net *net, u32 id);
......
...@@ -3106,6 +3106,41 @@ void nexthop_bucket_set_hw_flags(struct net *net, u32 id, u16 bucket_index, ...@@ -3106,6 +3106,41 @@ void nexthop_bucket_set_hw_flags(struct net *net, u32 id, u16 bucket_index,
} }
EXPORT_SYMBOL(nexthop_bucket_set_hw_flags); EXPORT_SYMBOL(nexthop_bucket_set_hw_flags);
void nexthop_res_grp_activity_update(struct net *net, u32 id, u16 num_buckets,
unsigned long *activity)
{
struct nh_res_table *res_table;
struct nexthop *nexthop;
struct nh_group *nhg;
u16 i;
rcu_read_lock();
nexthop = nexthop_find_by_id(net, id);
if (!nexthop || !nexthop->is_group)
goto out;
nhg = rcu_dereference(nexthop->nh_grp);
if (!nhg->resilient)
goto out;
/* Instead of silently ignoring some buckets, demand that the sizes
* be the same.
*/
res_table = rcu_dereference(nhg->res_table);
if (num_buckets != res_table->num_nh_buckets)
goto out;
for (i = 0; i < num_buckets; i++) {
if (test_bit(i, activity))
nh_res_bucket_set_busy(&res_table->nh_buckets[i]);
}
out:
rcu_read_unlock();
}
EXPORT_SYMBOL(nexthop_res_grp_activity_update);
static void __net_exit nexthop_net_exit(struct net *net) static void __net_exit nexthop_net_exit(struct net *net)
{ {
rtnl_lock(); rtnl_lock();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment