Commit 093c2ca4 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

[IPV4]: Convert ipv4 route to use the new dst_entry 'next' pointer

This patch removes the rt_next pointer from 'struct rtable.u' union,
and renames u.rt_next to u.dst_rt_next.

It also moves 'struct flowi' right after 'struct dst_entry' to prepare
the gain on lookups.
Signed-off-by: default avatarEric Dumazet <dada1@cosmosbay.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 75ce7cea
...@@ -53,9 +53,11 @@ struct rtable ...@@ -53,9 +53,11 @@ struct rtable
union union
{ {
struct dst_entry dst; struct dst_entry dst;
struct rtable *rt_next;
} u; } u;
/* Cache lookup keys */
struct flowi fl;
struct in_device *idev; struct in_device *idev;
unsigned rt_flags; unsigned rt_flags;
...@@ -69,9 +71,6 @@ struct rtable ...@@ -69,9 +71,6 @@ struct rtable
/* Info on neighbour */ /* Info on neighbour */
__be32 rt_gateway; __be32 rt_gateway;
/* Cache lookup keys */
struct flowi fl;
/* Miscellaneous cached information */ /* Miscellaneous cached information */
__be32 rt_spec_dst; /* RFC1122 specific destination */ __be32 rt_spec_dst; /* RFC1122 specific destination */
struct inet_peer *peer; /* long-living peer info */ struct inet_peer *peer; /* long-living peer info */
......
...@@ -289,7 +289,7 @@ static struct rtable *rt_cache_get_next(struct seq_file *seq, struct rtable *r) ...@@ -289,7 +289,7 @@ static struct rtable *rt_cache_get_next(struct seq_file *seq, struct rtable *r)
{ {
struct rt_cache_iter_state *st = rcu_dereference(seq->private); struct rt_cache_iter_state *st = rcu_dereference(seq->private);
r = r->u.rt_next; r = r->u.dst.rt_next;
while (!r) { while (!r) {
rcu_read_unlock_bh(); rcu_read_unlock_bh();
if (--st->bucket < 0) if (--st->bucket < 0)
...@@ -512,7 +512,7 @@ static __inline__ int rt_fast_clean(struct rtable *rth) ...@@ -512,7 +512,7 @@ static __inline__ int rt_fast_clean(struct rtable *rth)
/* Kill broadcast/multicast entries very aggresively, if they /* Kill broadcast/multicast entries very aggresively, if they
collide in hash table with more useful entries */ collide in hash table with more useful entries */
return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) && return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
rth->fl.iif && rth->u.rt_next; rth->fl.iif && rth->u.dst.rt_next;
} }
static __inline__ int rt_valuable(struct rtable *rth) static __inline__ int rt_valuable(struct rtable *rth)
...@@ -595,10 +595,10 @@ static struct rtable **rt_remove_balanced_route(struct rtable **chain_head, ...@@ -595,10 +595,10 @@ static struct rtable **rt_remove_balanced_route(struct rtable **chain_head,
if (((*rthp)->u.dst.flags & DST_BALANCED) != 0 && if (((*rthp)->u.dst.flags & DST_BALANCED) != 0 &&
compare_keys(&(*rthp)->fl, &expentry->fl)) { compare_keys(&(*rthp)->fl, &expentry->fl)) {
if (*rthp == expentry) { if (*rthp == expentry) {
*rthp = rth->u.rt_next; *rthp = rth->u.dst.rt_next;
continue; continue;
} else { } else {
*rthp = rth->u.rt_next; *rthp = rth->u.dst.rt_next;
rt_free(rth); rt_free(rth);
if (removed_count) if (removed_count)
++(*removed_count); ++(*removed_count);
...@@ -606,9 +606,9 @@ static struct rtable **rt_remove_balanced_route(struct rtable **chain_head, ...@@ -606,9 +606,9 @@ static struct rtable **rt_remove_balanced_route(struct rtable **chain_head,
} else { } else {
if (!((*rthp)->u.dst.flags & DST_BALANCED) && if (!((*rthp)->u.dst.flags & DST_BALANCED) &&
passedexpired && !nextstep) passedexpired && !nextstep)
nextstep = &rth->u.rt_next; nextstep = &rth->u.dst.rt_next;
rthp = &rth->u.rt_next; rthp = &rth->u.dst.rt_next;
} }
} }
...@@ -649,12 +649,12 @@ static void rt_check_expire(unsigned long dummy) ...@@ -649,12 +649,12 @@ static void rt_check_expire(unsigned long dummy)
/* Entry is expired even if it is in use */ /* Entry is expired even if it is in use */
if (time_before_eq(now, rth->u.dst.expires)) { if (time_before_eq(now, rth->u.dst.expires)) {
tmo >>= 1; tmo >>= 1;
rthp = &rth->u.rt_next; rthp = &rth->u.dst.rt_next;
continue; continue;
} }
} else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) { } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) {
tmo >>= 1; tmo >>= 1;
rthp = &rth->u.rt_next; rthp = &rth->u.dst.rt_next;
continue; continue;
} }
...@@ -668,11 +668,11 @@ static void rt_check_expire(unsigned long dummy) ...@@ -668,11 +668,11 @@ static void rt_check_expire(unsigned long dummy)
if (!rthp) if (!rthp)
break; break;
} else { } else {
*rthp = rth->u.rt_next; *rthp = rth->u.dst.rt_next;
rt_free(rth); rt_free(rth);
} }
#else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */ #else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
*rthp = rth->u.rt_next; *rthp = rth->u.dst.rt_next;
rt_free(rth); rt_free(rth);
#endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */ #endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
} }
...@@ -706,7 +706,7 @@ static void rt_run_flush(unsigned long dummy) ...@@ -706,7 +706,7 @@ static void rt_run_flush(unsigned long dummy)
spin_unlock_bh(rt_hash_lock_addr(i)); spin_unlock_bh(rt_hash_lock_addr(i));
for (; rth; rth = next) { for (; rth; rth = next) {
next = rth->u.rt_next; next = rth->u.dst.rt_next;
rt_free(rth); rt_free(rth);
} }
} }
...@@ -840,7 +840,7 @@ static int rt_garbage_collect(void) ...@@ -840,7 +840,7 @@ static int rt_garbage_collect(void)
while ((rth = *rthp) != NULL) { while ((rth = *rthp) != NULL) {
if (!rt_may_expire(rth, tmo, expire)) { if (!rt_may_expire(rth, tmo, expire)) {
tmo >>= 1; tmo >>= 1;
rthp = &rth->u.rt_next; rthp = &rth->u.dst.rt_next;
continue; continue;
} }
#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
...@@ -858,12 +858,12 @@ static int rt_garbage_collect(void) ...@@ -858,12 +858,12 @@ static int rt_garbage_collect(void)
if (!rthp) if (!rthp)
break; break;
} else { } else {
*rthp = rth->u.rt_next; *rthp = rth->u.dst.rt_next;
rt_free(rth); rt_free(rth);
goal--; goal--;
} }
#else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */ #else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
*rthp = rth->u.rt_next; *rthp = rth->u.dst.rt_next;
rt_free(rth); rt_free(rth);
goal--; goal--;
#endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */ #endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
...@@ -947,13 +947,13 @@ static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp) ...@@ -947,13 +947,13 @@ static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp)
if (compare_keys(&rth->fl, &rt->fl)) { if (compare_keys(&rth->fl, &rt->fl)) {
#endif #endif
/* Put it first */ /* Put it first */
*rthp = rth->u.rt_next; *rthp = rth->u.dst.rt_next;
/* /*
* Since lookup is lockfree, the deletion * Since lookup is lockfree, the deletion
* must be visible to another weakly ordered CPU before * must be visible to another weakly ordered CPU before
* the insertion at the start of the hash chain. * the insertion at the start of the hash chain.
*/ */
rcu_assign_pointer(rth->u.rt_next, rcu_assign_pointer(rth->u.dst.rt_next,
rt_hash_table[hash].chain); rt_hash_table[hash].chain);
/* /*
* Since lookup is lockfree, the update writes * Since lookup is lockfree, the update writes
...@@ -983,7 +983,7 @@ static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp) ...@@ -983,7 +983,7 @@ static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp)
chain_length++; chain_length++;
rthp = &rth->u.rt_next; rthp = &rth->u.dst.rt_next;
} }
if (cand) { if (cand) {
...@@ -994,7 +994,7 @@ static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp) ...@@ -994,7 +994,7 @@ static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp)
* only 2 entries per bucket. We will see. * only 2 entries per bucket. We will see.
*/ */
if (chain_length > ip_rt_gc_elasticity) { if (chain_length > ip_rt_gc_elasticity) {
*candp = cand->u.rt_next; *candp = cand->u.dst.rt_next;
rt_free(cand); rt_free(cand);
} }
} }
...@@ -1034,13 +1034,13 @@ static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp) ...@@ -1034,13 +1034,13 @@ static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp)
} }
} }
rt->u.rt_next = rt_hash_table[hash].chain; rt->u.dst.rt_next = rt_hash_table[hash].chain;
#if RT_CACHE_DEBUG >= 2 #if RT_CACHE_DEBUG >= 2
if (rt->u.rt_next) { if (rt->u.dst.rt_next) {
struct rtable *trt; struct rtable *trt;
printk(KERN_DEBUG "rt_cache @%02x: %u.%u.%u.%u", hash, printk(KERN_DEBUG "rt_cache @%02x: %u.%u.%u.%u", hash,
NIPQUAD(rt->rt_dst)); NIPQUAD(rt->rt_dst));
for (trt = rt->u.rt_next; trt; trt = trt->u.rt_next) for (trt = rt->u.dst.rt_next; trt; trt = trt->u.dst.rt_next)
printk(" . %u.%u.%u.%u", NIPQUAD(trt->rt_dst)); printk(" . %u.%u.%u.%u", NIPQUAD(trt->rt_dst));
printk("\n"); printk("\n");
} }
...@@ -1117,9 +1117,9 @@ static void rt_del(unsigned hash, struct rtable *rt) ...@@ -1117,9 +1117,9 @@ static void rt_del(unsigned hash, struct rtable *rt)
spin_lock_bh(rt_hash_lock_addr(hash)); spin_lock_bh(rt_hash_lock_addr(hash));
ip_rt_put(rt); ip_rt_put(rt);
for (rthp = &rt_hash_table[hash].chain; *rthp; for (rthp = &rt_hash_table[hash].chain; *rthp;
rthp = &(*rthp)->u.rt_next) rthp = &(*rthp)->u.dst.rt_next)
if (*rthp == rt) { if (*rthp == rt) {
*rthp = rt->u.rt_next; *rthp = rt->u.dst.rt_next;
rt_free(rt); rt_free(rt);
break; break;
} }
...@@ -1167,7 +1167,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, ...@@ -1167,7 +1167,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
rth->fl.fl4_src != skeys[i] || rth->fl.fl4_src != skeys[i] ||
rth->fl.oif != ikeys[k] || rth->fl.oif != ikeys[k] ||
rth->fl.iif != 0) { rth->fl.iif != 0) {
rthp = &rth->u.rt_next; rthp = &rth->u.dst.rt_next;
continue; continue;
} }
...@@ -1416,7 +1416,7 @@ unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu) ...@@ -1416,7 +1416,7 @@ unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu)
rcu_read_lock(); rcu_read_lock();
for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
rth = rcu_dereference(rth->u.rt_next)) { rth = rcu_dereference(rth->u.dst.rt_next)) {
if (rth->fl.fl4_dst == daddr && if (rth->fl.fl4_dst == daddr &&
rth->fl.fl4_src == skeys[i] && rth->fl.fl4_src == skeys[i] &&
rth->rt_dst == daddr && rth->rt_dst == daddr &&
...@@ -2099,7 +2099,7 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr, ...@@ -2099,7 +2099,7 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
rcu_read_lock(); rcu_read_lock();
for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
rth = rcu_dereference(rth->u.rt_next)) { rth = rcu_dereference(rth->u.dst.rt_next)) {
if (rth->fl.fl4_dst == daddr && if (rth->fl.fl4_dst == daddr &&
rth->fl.fl4_src == saddr && rth->fl.fl4_src == saddr &&
rth->fl.iif == iif && rth->fl.iif == iif &&
...@@ -2563,7 +2563,7 @@ int __ip_route_output_key(struct rtable **rp, const struct flowi *flp) ...@@ -2563,7 +2563,7 @@ int __ip_route_output_key(struct rtable **rp, const struct flowi *flp)
rcu_read_lock_bh(); rcu_read_lock_bh();
for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
rth = rcu_dereference(rth->u.rt_next)) { rth = rcu_dereference(rth->u.dst.rt_next)) {
if (rth->fl.fl4_dst == flp->fl4_dst && if (rth->fl.fl4_dst == flp->fl4_dst &&
rth->fl.fl4_src == flp->fl4_src && rth->fl.fl4_src == flp->fl4_src &&
rth->fl.iif == 0 && rth->fl.iif == 0 &&
...@@ -2825,7 +2825,7 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb) ...@@ -2825,7 +2825,7 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
s_idx = 0; s_idx = 0;
rcu_read_lock_bh(); rcu_read_lock_bh();
for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt; for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt;
rt = rcu_dereference(rt->u.rt_next), idx++) { rt = rcu_dereference(rt->u.dst.rt_next), idx++) {
if (idx < s_idx) if (idx < s_idx)
continue; continue;
skb->dst = dst_clone(&rt->u.dst); skb->dst = dst_clone(&rt->u.dst);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment