Commit 6882f933 authored by David S. Miller's avatar David S. Miller

ipv4: Kill RT_CACHE_DEBUG

It's way past it's usefulness.  And this gets rid of a bunch
of stray ->rt_{dst,src} references.

Even the comment documenting the macro was inaccurate (stated
default was 1 when it's 0).

If reintroduced, it should be done properly, with dynamic debug
facilities.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 12f4d0a8
...@@ -16,13 +16,6 @@ ...@@ -16,13 +16,6 @@
#include <net/neighbour.h> #include <net/neighbour.h>
#include <asm/processor.h> #include <asm/processor.h>
/*
* 0 - no debugging messages
* 1 - rare events and bugs (default)
* 2 - trace mode.
*/
#define RT_CACHE_DEBUG 0
#define DST_GC_MIN (HZ/10) #define DST_GC_MIN (HZ/10)
#define DST_GC_INC (HZ/2) #define DST_GC_INC (HZ/2)
#define DST_GC_MAX (120*HZ) #define DST_GC_MAX (120*HZ)
......
...@@ -33,9 +33,6 @@ ...@@ -33,9 +33,6 @@
* 3) This list is guarded by a mutex, * 3) This list is guarded by a mutex,
* so that the gc_task and dst_dev_event() can be synchronized. * so that the gc_task and dst_dev_event() can be synchronized.
*/ */
#if RT_CACHE_DEBUG >= 2
static atomic_t dst_total = ATOMIC_INIT(0);
#endif
/* /*
* We want to keep lock & list close together * We want to keep lock & list close together
...@@ -69,10 +66,6 @@ static void dst_gc_task(struct work_struct *work) ...@@ -69,10 +66,6 @@ static void dst_gc_task(struct work_struct *work)
unsigned long expires = ~0L; unsigned long expires = ~0L;
struct dst_entry *dst, *next, head; struct dst_entry *dst, *next, head;
struct dst_entry *last = &head; struct dst_entry *last = &head;
#if RT_CACHE_DEBUG >= 2
ktime_t time_start = ktime_get();
struct timespec elapsed;
#endif
mutex_lock(&dst_gc_mutex); mutex_lock(&dst_gc_mutex);
next = dst_busy_list; next = dst_busy_list;
...@@ -146,15 +139,6 @@ static void dst_gc_task(struct work_struct *work) ...@@ -146,15 +139,6 @@ static void dst_gc_task(struct work_struct *work)
spin_unlock_bh(&dst_garbage.lock); spin_unlock_bh(&dst_garbage.lock);
mutex_unlock(&dst_gc_mutex); mutex_unlock(&dst_gc_mutex);
#if RT_CACHE_DEBUG >= 2
elapsed = ktime_to_timespec(ktime_sub(ktime_get(), time_start));
printk(KERN_DEBUG "dst_total: %d delayed: %d work_perf: %d"
" expires: %lu elapsed: %lu us\n",
atomic_read(&dst_total), delayed, work_performed,
expires,
elapsed.tv_sec * USEC_PER_SEC +
elapsed.tv_nsec / NSEC_PER_USEC);
#endif
} }
int dst_discard(struct sk_buff *skb) int dst_discard(struct sk_buff *skb)
...@@ -205,9 +189,6 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev, ...@@ -205,9 +189,6 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
dst->lastuse = jiffies; dst->lastuse = jiffies;
dst->flags = flags; dst->flags = flags;
dst->next = NULL; dst->next = NULL;
#if RT_CACHE_DEBUG >= 2
atomic_inc(&dst_total);
#endif
dst_entries_add(ops, 1); dst_entries_add(ops, 1);
return dst; return dst;
} }
...@@ -267,9 +248,6 @@ struct dst_entry *dst_destroy(struct dst_entry * dst) ...@@ -267,9 +248,6 @@ struct dst_entry *dst_destroy(struct dst_entry * dst)
dst->ops->destroy(dst); dst->ops->destroy(dst);
if (dst->dev) if (dst->dev)
dev_put(dst->dev); dev_put(dst->dev);
#if RT_CACHE_DEBUG >= 2
atomic_dec(&dst_total);
#endif
kmem_cache_free(dst->ops->kmem_cachep, dst); kmem_cache_free(dst->ops->kmem_cachep, dst);
dst = child; dst = child;
......
...@@ -968,10 +968,6 @@ static int rt_garbage_collect(struct dst_ops *ops) ...@@ -968,10 +968,6 @@ static int rt_garbage_collect(struct dst_ops *ops)
break; break;
expire >>= 1; expire >>= 1;
#if RT_CACHE_DEBUG >= 2
printk(KERN_DEBUG "expire>> %u %d %d %d\n", expire,
dst_entries_get_fast(&ipv4_dst_ops), goal, i);
#endif
if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size) if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
goto out; goto out;
...@@ -992,10 +988,6 @@ static int rt_garbage_collect(struct dst_ops *ops) ...@@ -992,10 +988,6 @@ static int rt_garbage_collect(struct dst_ops *ops)
dst_entries_get_fast(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh || dst_entries_get_fast(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh ||
dst_entries_get_slow(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh) dst_entries_get_slow(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh)
expire = ip_rt_gc_timeout; expire = ip_rt_gc_timeout;
#if RT_CACHE_DEBUG >= 2
printk(KERN_DEBUG "expire++ %u %d %d %d\n", expire,
dst_entries_get_fast(&ipv4_dst_ops), goal, rover);
#endif
out: return 0; out: return 0;
} }
...@@ -1179,16 +1171,6 @@ static struct rtable *rt_intern_hash(unsigned hash, struct rtable *rt, ...@@ -1179,16 +1171,6 @@ static struct rtable *rt_intern_hash(unsigned hash, struct rtable *rt,
rt->dst.rt_next = rt_hash_table[hash].chain; rt->dst.rt_next = rt_hash_table[hash].chain;
#if RT_CACHE_DEBUG >= 2
if (rt->dst.rt_next) {
struct rtable *trt;
printk(KERN_DEBUG "rt_cache @%02x: %pI4",
hash, &rt->rt_dst);
for (trt = rt->dst.rt_next; trt; trt = trt->dst.rt_next)
printk(" . %pI4", &trt->rt_dst);
printk("\n");
}
#endif
/* /*
* Since lookup is lockfree, we must make sure * Since lookup is lockfree, we must make sure
* previous writes to rt are committed to memory * previous writes to rt are committed to memory
...@@ -1347,10 +1329,6 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst) ...@@ -1347,10 +1329,6 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
unsigned hash = rt_hash(rt->rt_key_dst, rt->rt_key_src, unsigned hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
rt->rt_oif, rt->rt_oif,
rt_genid(dev_net(dst->dev))); rt_genid(dev_net(dst->dev)));
#if RT_CACHE_DEBUG >= 1
printk(KERN_DEBUG "ipv4_negative_advice: redirect to %pI4/%02x dropped\n",
&rt->rt_dst, rt->rt_key_tos);
#endif
rt_del(hash, rt); rt_del(hash, rt);
ret = NULL; ret = NULL;
} else if (rt->peer && } else if (rt->peer &&
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment