Commit 510c321b authored by Xin Long's avatar Xin Long Committed by Steffen Klassert

xfrm: reuse uncached_list to track xdsts

In early time, when freeing a xdst, it would be inserted into
dst_garbage.list first. Then if it's refcnt was still held
somewhere, later it would be put into dst_busy_list in
dst_gc_task().

When one dev was being unregistered, the dev of these dsts in
dst_busy_list would be set with loopback_dev and put this dev.
So that this dev's removal wouldn't get blocked, and avoid the
kmsg warning:

  kernel:unregister_netdevice: waiting for veth0 to become \
  free. Usage count = 2

However after Commit 52df157f ("xfrm: take refcnt of dst
when creating struct xfrm_dst bundle"), the xdst will not be
freed with dst gc, and this warning happens.

To fix it, we need to find these xdsts that are still held by
others when removing the dev, and free xdst's dev and set it
with loopback_dev.

But unfortunately after flow_cache for xfrm was deleted, no
list tracks them anymore. So we need to save these xdsts
somewhere to release the xdst's dev later.

To make this easier, this patch is to reuse uncached_list to
track xdsts, so that the dev refcnt can be released in the
event NETDEV_UNREGISTER process of fib_netdev_notifier.

Thanks to Florian, we could move forward this fix quickly.

Fixes: 52df157f ("xfrm: take refcnt of dst when creating struct xfrm_dst bundle")
Reported-by: default avatarJianlin Shi <jishi@redhat.com>
Reported-by: default avatarHangbin Liu <liuhangbin@gmail.com>
Tested-by: default avatarEyal Birger <eyal.birger@gmail.com>
Signed-off-by: default avatarXin Long <lucien.xin@gmail.com>
Signed-off-by: default avatarSteffen Klassert <steffen.klassert@secunet.com>
parent d97ca5d7
...@@ -179,6 +179,9 @@ void rt6_disable_ip(struct net_device *dev, unsigned long event); ...@@ -179,6 +179,9 @@ void rt6_disable_ip(struct net_device *dev, unsigned long event);
void rt6_sync_down_dev(struct net_device *dev, unsigned long event); void rt6_sync_down_dev(struct net_device *dev, unsigned long event);
void rt6_multipath_rebalance(struct rt6_info *rt); void rt6_multipath_rebalance(struct rt6_info *rt);
void rt6_uncached_list_add(struct rt6_info *rt);
void rt6_uncached_list_del(struct rt6_info *rt);
static inline const struct rt6_info *skb_rt6_info(const struct sk_buff *skb) static inline const struct rt6_info *skb_rt6_info(const struct sk_buff *skb)
{ {
const struct dst_entry *dst = skb_dst(skb); const struct dst_entry *dst = skb_dst(skb);
......
...@@ -227,6 +227,9 @@ struct in_ifaddr; ...@@ -227,6 +227,9 @@ struct in_ifaddr;
void fib_add_ifaddr(struct in_ifaddr *); void fib_add_ifaddr(struct in_ifaddr *);
void fib_del_ifaddr(struct in_ifaddr *, struct in_ifaddr *); void fib_del_ifaddr(struct in_ifaddr *, struct in_ifaddr *);
void rt_add_uncached_list(struct rtable *rt);
void rt_del_uncached_list(struct rtable *rt);
static inline void ip_rt_put(struct rtable *rt) static inline void ip_rt_put(struct rtable *rt)
{ {
/* dst_release() accepts a NULL parameter. /* dst_release() accepts a NULL parameter.
......
...@@ -1383,7 +1383,7 @@ struct uncached_list { ...@@ -1383,7 +1383,7 @@ struct uncached_list {
static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list); static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
static void rt_add_uncached_list(struct rtable *rt) void rt_add_uncached_list(struct rtable *rt)
{ {
struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list); struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);
...@@ -1394,14 +1394,8 @@ static void rt_add_uncached_list(struct rtable *rt) ...@@ -1394,14 +1394,8 @@ static void rt_add_uncached_list(struct rtable *rt)
spin_unlock_bh(&ul->lock); spin_unlock_bh(&ul->lock);
} }
static void ipv4_dst_destroy(struct dst_entry *dst) void rt_del_uncached_list(struct rtable *rt)
{ {
struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
struct rtable *rt = (struct rtable *) dst;
if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
kfree(p);
if (!list_empty(&rt->rt_uncached)) { if (!list_empty(&rt->rt_uncached)) {
struct uncached_list *ul = rt->rt_uncached_list; struct uncached_list *ul = rt->rt_uncached_list;
...@@ -1411,6 +1405,17 @@ static void ipv4_dst_destroy(struct dst_entry *dst) ...@@ -1411,6 +1405,17 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
} }
} }
static void ipv4_dst_destroy(struct dst_entry *dst)
{
struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
struct rtable *rt = (struct rtable *)dst;
if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
kfree(p);
rt_del_uncached_list(rt);
}
void rt_flush_dev(struct net_device *dev) void rt_flush_dev(struct net_device *dev)
{ {
struct net *net = dev_net(dev); struct net *net = dev_net(dev);
......
...@@ -102,6 +102,7 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, ...@@ -102,6 +102,7 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
xdst->u.rt.rt_pmtu = rt->rt_pmtu; xdst->u.rt.rt_pmtu = rt->rt_pmtu;
xdst->u.rt.rt_table_id = rt->rt_table_id; xdst->u.rt.rt_table_id = rt->rt_table_id;
INIT_LIST_HEAD(&xdst->u.rt.rt_uncached); INIT_LIST_HEAD(&xdst->u.rt.rt_uncached);
rt_add_uncached_list(&xdst->u.rt);
return 0; return 0;
} }
...@@ -241,7 +242,8 @@ static void xfrm4_dst_destroy(struct dst_entry *dst) ...@@ -241,7 +242,8 @@ static void xfrm4_dst_destroy(struct dst_entry *dst)
struct xfrm_dst *xdst = (struct xfrm_dst *)dst; struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
dst_destroy_metrics_generic(dst); dst_destroy_metrics_generic(dst);
if (xdst->u.rt.rt_uncached_list)
rt_del_uncached_list(&xdst->u.rt);
xfrm_dst_destroy(xdst); xfrm_dst_destroy(xdst);
} }
......
...@@ -128,7 +128,7 @@ struct uncached_list { ...@@ -128,7 +128,7 @@ struct uncached_list {
static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list); static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
static void rt6_uncached_list_add(struct rt6_info *rt) void rt6_uncached_list_add(struct rt6_info *rt)
{ {
struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list); struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
...@@ -139,7 +139,7 @@ static void rt6_uncached_list_add(struct rt6_info *rt) ...@@ -139,7 +139,7 @@ static void rt6_uncached_list_add(struct rt6_info *rt)
spin_unlock_bh(&ul->lock); spin_unlock_bh(&ul->lock);
} }
static void rt6_uncached_list_del(struct rt6_info *rt) void rt6_uncached_list_del(struct rt6_info *rt)
{ {
if (!list_empty(&rt->rt6i_uncached)) { if (!list_empty(&rt->rt6i_uncached)) {
struct uncached_list *ul = rt->rt6i_uncached_list; struct uncached_list *ul = rt->rt6i_uncached_list;
......
...@@ -113,6 +113,9 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, ...@@ -113,6 +113,9 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
xdst->u.rt6.rt6i_gateway = rt->rt6i_gateway; xdst->u.rt6.rt6i_gateway = rt->rt6i_gateway;
xdst->u.rt6.rt6i_dst = rt->rt6i_dst; xdst->u.rt6.rt6i_dst = rt->rt6i_dst;
xdst->u.rt6.rt6i_src = rt->rt6i_src; xdst->u.rt6.rt6i_src = rt->rt6i_src;
INIT_LIST_HEAD(&xdst->u.rt6.rt6i_uncached);
rt6_uncached_list_add(&xdst->u.rt6);
atomic_inc(&dev_net(dev)->ipv6.rt6_stats->fib_rt_uncache);
return 0; return 0;
} }
...@@ -244,6 +247,8 @@ static void xfrm6_dst_destroy(struct dst_entry *dst) ...@@ -244,6 +247,8 @@ static void xfrm6_dst_destroy(struct dst_entry *dst)
if (likely(xdst->u.rt6.rt6i_idev)) if (likely(xdst->u.rt6.rt6i_idev))
in6_dev_put(xdst->u.rt6.rt6i_idev); in6_dev_put(xdst->u.rt6.rt6i_idev);
dst_destroy_metrics_generic(dst); dst_destroy_metrics_generic(dst);
if (xdst->u.rt6.rt6i_uncached_list)
rt6_uncached_list_del(&xdst->u.rt6);
xfrm_dst_destroy(xdst); xfrm_dst_destroy(xdst);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment