Commit 6f912042 authored by KAMEZAWA Hiroyuki's avatar KAMEZAWA Hiroyuki Committed by Linus Torvalds

[PATCH] for_each_possible_cpu: network codes

for_each_cpu() actually iterates across all possible CPUs.  We've had mistakes
in the past where people were using for_each_cpu() where they should have been
iterating across only online or present CPUs.  This is inefficient and
possibly buggy.

We're renaming for_each_cpu() to for_each_possible_cpu() to avoid this in the
future.

This patch replaces for_each_cpu with for_each_possible_cpu under /net
Signed-off-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: default avatar"David S. Miller" <davem@davemloft.net>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent dd7ba3b8
...@@ -829,7 +829,7 @@ static int translate_table(struct ebt_replace *repl, ...@@ -829,7 +829,7 @@ static int translate_table(struct ebt_replace *repl,
* sizeof(struct ebt_chainstack)); * sizeof(struct ebt_chainstack));
if (!newinfo->chainstack) if (!newinfo->chainstack)
return -ENOMEM; return -ENOMEM;
for_each_cpu(i) { for_each_possible_cpu(i) {
newinfo->chainstack[i] = newinfo->chainstack[i] =
vmalloc(udc_cnt * sizeof(struct ebt_chainstack)); vmalloc(udc_cnt * sizeof(struct ebt_chainstack));
if (!newinfo->chainstack[i]) { if (!newinfo->chainstack[i]) {
...@@ -901,7 +901,7 @@ static void get_counters(struct ebt_counter *oldcounters, ...@@ -901,7 +901,7 @@ static void get_counters(struct ebt_counter *oldcounters,
sizeof(struct ebt_counter) * nentries); sizeof(struct ebt_counter) * nentries);
/* add other counters to those of cpu 0 */ /* add other counters to those of cpu 0 */
for_each_cpu(cpu) { for_each_possible_cpu(cpu) {
if (cpu == 0) if (cpu == 0)
continue; continue;
counter_base = COUNTER_BASE(oldcounters, nentries, cpu); counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
...@@ -1036,7 +1036,7 @@ static int do_replace(void __user *user, unsigned int len) ...@@ -1036,7 +1036,7 @@ static int do_replace(void __user *user, unsigned int len)
vfree(table->entries); vfree(table->entries);
if (table->chainstack) { if (table->chainstack) {
for_each_cpu(i) for_each_possible_cpu(i)
vfree(table->chainstack[i]); vfree(table->chainstack[i]);
vfree(table->chainstack); vfree(table->chainstack);
} }
...@@ -1054,7 +1054,7 @@ static int do_replace(void __user *user, unsigned int len) ...@@ -1054,7 +1054,7 @@ static int do_replace(void __user *user, unsigned int len)
vfree(counterstmp); vfree(counterstmp);
/* can be initialized in translate_table() */ /* can be initialized in translate_table() */
if (newinfo->chainstack) { if (newinfo->chainstack) {
for_each_cpu(i) for_each_possible_cpu(i)
vfree(newinfo->chainstack[i]); vfree(newinfo->chainstack[i]);
vfree(newinfo->chainstack); vfree(newinfo->chainstack);
} }
...@@ -1201,7 +1201,7 @@ int ebt_register_table(struct ebt_table *table) ...@@ -1201,7 +1201,7 @@ int ebt_register_table(struct ebt_table *table)
mutex_unlock(&ebt_mutex); mutex_unlock(&ebt_mutex);
free_chainstack: free_chainstack:
if (newinfo->chainstack) { if (newinfo->chainstack) {
for_each_cpu(i) for_each_possible_cpu(i)
vfree(newinfo->chainstack[i]); vfree(newinfo->chainstack[i]);
vfree(newinfo->chainstack); vfree(newinfo->chainstack);
} }
...@@ -1224,7 +1224,7 @@ void ebt_unregister_table(struct ebt_table *table) ...@@ -1224,7 +1224,7 @@ void ebt_unregister_table(struct ebt_table *table)
mutex_unlock(&ebt_mutex); mutex_unlock(&ebt_mutex);
vfree(table->private->entries); vfree(table->private->entries);
if (table->private->chainstack) { if (table->private->chainstack) {
for_each_cpu(i) for_each_possible_cpu(i)
vfree(table->private->chainstack[i]); vfree(table->private->chainstack[i]);
vfree(table->private->chainstack); vfree(table->private->chainstack);
} }
......
...@@ -3346,7 +3346,7 @@ static int __init net_dev_init(void) ...@@ -3346,7 +3346,7 @@ static int __init net_dev_init(void)
* Initialise the packet receive queues. * Initialise the packet receive queues.
*/ */
for_each_cpu(i) { for_each_possible_cpu(i) {
struct softnet_data *queue; struct softnet_data *queue;
queue = &per_cpu(softnet_data, i); queue = &per_cpu(softnet_data, i);
......
...@@ -79,7 +79,7 @@ static void flow_cache_new_hashrnd(unsigned long arg) ...@@ -79,7 +79,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
{ {
int i; int i;
for_each_cpu(i) for_each_possible_cpu(i)
flow_hash_rnd_recalc(i) = 1; flow_hash_rnd_recalc(i) = 1;
flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
...@@ -361,7 +361,7 @@ static int __init flow_cache_init(void) ...@@ -361,7 +361,7 @@ static int __init flow_cache_init(void)
flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
add_timer(&flow_hash_rnd_timer); add_timer(&flow_hash_rnd_timer);
for_each_cpu(i) for_each_possible_cpu(i)
flow_cache_cpu_prepare(i); flow_cache_cpu_prepare(i);
hotcpu_notifier(flow_cache_cpu, 0); hotcpu_notifier(flow_cache_cpu, 0);
......
...@@ -1627,7 +1627,7 @@ static int neightbl_fill_info(struct neigh_table *tbl, struct sk_buff *skb, ...@@ -1627,7 +1627,7 @@ static int neightbl_fill_info(struct neigh_table *tbl, struct sk_buff *skb,
memset(&ndst, 0, sizeof(ndst)); memset(&ndst, 0, sizeof(ndst));
for_each_cpu(cpu) { for_each_possible_cpu(cpu) {
struct neigh_statistics *st; struct neigh_statistics *st;
st = per_cpu_ptr(tbl->stats, cpu); st = per_cpu_ptr(tbl->stats, cpu);
......
...@@ -121,7 +121,7 @@ void __init net_random_init(void) ...@@ -121,7 +121,7 @@ void __init net_random_init(void)
{ {
int i; int i;
for_each_cpu(i) { for_each_possible_cpu(i) {
struct nrnd_state *state = &per_cpu(net_rand_state,i); struct nrnd_state *state = &per_cpu(net_rand_state,i);
__net_srandom(state, i+jiffies); __net_srandom(state, i+jiffies);
} }
...@@ -133,7 +133,7 @@ static int net_random_reseed(void) ...@@ -133,7 +133,7 @@ static int net_random_reseed(void)
unsigned long seed[NR_CPUS]; unsigned long seed[NR_CPUS];
get_random_bytes(seed, sizeof(seed)); get_random_bytes(seed, sizeof(seed));
for_each_cpu(i) { for_each_possible_cpu(i) {
struct nrnd_state *state = &per_cpu(net_rand_state,i); struct nrnd_state *state = &per_cpu(net_rand_state,i);
__net_srandom(state, seed[i]); __net_srandom(state, seed[i]);
} }
......
...@@ -1107,7 +1107,7 @@ void __init icmp_init(struct net_proto_family *ops) ...@@ -1107,7 +1107,7 @@ void __init icmp_init(struct net_proto_family *ops)
struct inet_sock *inet; struct inet_sock *inet;
int i; int i;
for_each_cpu(i) { for_each_possible_cpu(i) {
int err; int err;
err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_ICMP, err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_ICMP,
......
...@@ -290,7 +290,7 @@ static void ipcomp_free_scratches(void) ...@@ -290,7 +290,7 @@ static void ipcomp_free_scratches(void)
if (!scratches) if (!scratches)
return; return;
for_each_cpu(i) { for_each_possible_cpu(i) {
void *scratch = *per_cpu_ptr(scratches, i); void *scratch = *per_cpu_ptr(scratches, i);
if (scratch) if (scratch)
vfree(scratch); vfree(scratch);
...@@ -313,7 +313,7 @@ static void **ipcomp_alloc_scratches(void) ...@@ -313,7 +313,7 @@ static void **ipcomp_alloc_scratches(void)
ipcomp_scratches = scratches; ipcomp_scratches = scratches;
for_each_cpu(i) { for_each_possible_cpu(i) {
void *scratch = vmalloc(IPCOMP_SCRATCH_SIZE); void *scratch = vmalloc(IPCOMP_SCRATCH_SIZE);
if (!scratch) if (!scratch)
return NULL; return NULL;
...@@ -344,7 +344,7 @@ static void ipcomp_free_tfms(struct crypto_tfm **tfms) ...@@ -344,7 +344,7 @@ static void ipcomp_free_tfms(struct crypto_tfm **tfms)
if (!tfms) if (!tfms)
return; return;
for_each_cpu(cpu) { for_each_possible_cpu(cpu) {
struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu); struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu);
crypto_free_tfm(tfm); crypto_free_tfm(tfm);
} }
...@@ -384,7 +384,7 @@ static struct crypto_tfm **ipcomp_alloc_tfms(const char *alg_name) ...@@ -384,7 +384,7 @@ static struct crypto_tfm **ipcomp_alloc_tfms(const char *alg_name)
if (!tfms) if (!tfms)
goto error; goto error;
for_each_cpu(cpu) { for_each_possible_cpu(cpu) {
struct crypto_tfm *tfm = crypto_alloc_tfm(alg_name, 0); struct crypto_tfm *tfm = crypto_alloc_tfm(alg_name, 0);
if (!tfm) if (!tfm)
goto error; goto error;
......
...@@ -646,7 +646,7 @@ static int translate_table(const char *name, ...@@ -646,7 +646,7 @@ static int translate_table(const char *name,
} }
/* And one copy for every other CPU */ /* And one copy for every other CPU */
for_each_cpu(i) { for_each_possible_cpu(i) {
if (newinfo->entries[i] && newinfo->entries[i] != entry0) if (newinfo->entries[i] && newinfo->entries[i] != entry0)
memcpy(newinfo->entries[i], entry0, newinfo->size); memcpy(newinfo->entries[i], entry0, newinfo->size);
} }
...@@ -696,7 +696,7 @@ static void get_counters(const struct xt_table_info *t, ...@@ -696,7 +696,7 @@ static void get_counters(const struct xt_table_info *t,
counters, counters,
&i); &i);
for_each_cpu(cpu) { for_each_possible_cpu(cpu) {
if (cpu == curcpu) if (cpu == curcpu)
continue; continue;
i = 0; i = 0;
......
...@@ -133,7 +133,7 @@ static void ip_ct_event_cache_flush(void) ...@@ -133,7 +133,7 @@ static void ip_ct_event_cache_flush(void)
struct ip_conntrack_ecache *ecache; struct ip_conntrack_ecache *ecache;
int cpu; int cpu;
for_each_cpu(cpu) { for_each_possible_cpu(cpu) {
ecache = &per_cpu(ip_conntrack_ecache, cpu); ecache = &per_cpu(ip_conntrack_ecache, cpu);
if (ecache->ct) if (ecache->ct)
ip_conntrack_put(ecache->ct); ip_conntrack_put(ecache->ct);
......
...@@ -735,7 +735,7 @@ translate_table(const char *name, ...@@ -735,7 +735,7 @@ translate_table(const char *name,
} }
/* And one copy for every other CPU */ /* And one copy for every other CPU */
for_each_cpu(i) { for_each_possible_cpu(i) {
if (newinfo->entries[i] && newinfo->entries[i] != entry0) if (newinfo->entries[i] && newinfo->entries[i] != entry0)
memcpy(newinfo->entries[i], entry0, newinfo->size); memcpy(newinfo->entries[i], entry0, newinfo->size);
} }
...@@ -788,7 +788,7 @@ get_counters(const struct xt_table_info *t, ...@@ -788,7 +788,7 @@ get_counters(const struct xt_table_info *t,
counters, counters,
&i); &i);
for_each_cpu(cpu) { for_each_possible_cpu(cpu) {
if (cpu == curcpu) if (cpu == curcpu)
continue; continue;
i = 0; i = 0;
......
...@@ -49,7 +49,7 @@ static int fold_prot_inuse(struct proto *proto) ...@@ -49,7 +49,7 @@ static int fold_prot_inuse(struct proto *proto)
int res = 0; int res = 0;
int cpu; int cpu;
for_each_cpu(cpu) for_each_possible_cpu(cpu)
res += proto->stats[cpu].inuse; res += proto->stats[cpu].inuse;
return res; return res;
...@@ -91,7 +91,7 @@ fold_field(void *mib[], int offt) ...@@ -91,7 +91,7 @@ fold_field(void *mib[], int offt)
unsigned long res = 0; unsigned long res = 0;
int i; int i;
for_each_cpu(i) { for_each_possible_cpu(i) {
res += *(((unsigned long *) per_cpu_ptr(mib[0], i)) + offt); res += *(((unsigned long *) per_cpu_ptr(mib[0], i)) + offt);
res += *(((unsigned long *) per_cpu_ptr(mib[1], i)) + offt); res += *(((unsigned long *) per_cpu_ptr(mib[1], i)) + offt);
} }
......
...@@ -3083,7 +3083,7 @@ static int ip_rt_acct_read(char *buffer, char **start, off_t offset, ...@@ -3083,7 +3083,7 @@ static int ip_rt_acct_read(char *buffer, char **start, off_t offset,
memcpy(dst, src, length); memcpy(dst, src, length);
/* Add the other cpus in, one int at a time */ /* Add the other cpus in, one int at a time */
for_each_cpu(i) { for_each_possible_cpu(i) {
unsigned int j; unsigned int j;
src = ((u32 *) IP_RT_ACCT_CPU(i)) + offset; src = ((u32 *) IP_RT_ACCT_CPU(i)) + offset;
......
...@@ -717,7 +717,7 @@ int __init icmpv6_init(struct net_proto_family *ops) ...@@ -717,7 +717,7 @@ int __init icmpv6_init(struct net_proto_family *ops)
struct sock *sk; struct sock *sk;
int err, i, j; int err, i, j;
for_each_cpu(i) { for_each_possible_cpu(i) {
err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6, err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6,
&per_cpu(__icmpv6_socket, i)); &per_cpu(__icmpv6_socket, i));
if (err < 0) { if (err < 0) {
...@@ -763,7 +763,7 @@ void icmpv6_cleanup(void) ...@@ -763,7 +763,7 @@ void icmpv6_cleanup(void)
{ {
int i; int i;
for_each_cpu(i) { for_each_possible_cpu(i) {
sock_release(per_cpu(__icmpv6_socket, i)); sock_release(per_cpu(__icmpv6_socket, i));
} }
inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6); inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6);
......
...@@ -290,7 +290,7 @@ static void ipcomp6_free_scratches(void) ...@@ -290,7 +290,7 @@ static void ipcomp6_free_scratches(void)
if (!scratches) if (!scratches)
return; return;
for_each_cpu(i) { for_each_possible_cpu(i) {
void *scratch = *per_cpu_ptr(scratches, i); void *scratch = *per_cpu_ptr(scratches, i);
vfree(scratch); vfree(scratch);
...@@ -313,7 +313,7 @@ static void **ipcomp6_alloc_scratches(void) ...@@ -313,7 +313,7 @@ static void **ipcomp6_alloc_scratches(void)
ipcomp6_scratches = scratches; ipcomp6_scratches = scratches;
for_each_cpu(i) { for_each_possible_cpu(i) {
void *scratch = vmalloc(IPCOMP_SCRATCH_SIZE); void *scratch = vmalloc(IPCOMP_SCRATCH_SIZE);
if (!scratch) if (!scratch)
return NULL; return NULL;
...@@ -344,7 +344,7 @@ static void ipcomp6_free_tfms(struct crypto_tfm **tfms) ...@@ -344,7 +344,7 @@ static void ipcomp6_free_tfms(struct crypto_tfm **tfms)
if (!tfms) if (!tfms)
return; return;
for_each_cpu(cpu) { for_each_possible_cpu(cpu) {
struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu); struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu);
crypto_free_tfm(tfm); crypto_free_tfm(tfm);
} }
...@@ -384,7 +384,7 @@ static struct crypto_tfm **ipcomp6_alloc_tfms(const char *alg_name) ...@@ -384,7 +384,7 @@ static struct crypto_tfm **ipcomp6_alloc_tfms(const char *alg_name)
if (!tfms) if (!tfms)
goto error; goto error;
for_each_cpu(cpu) { for_each_possible_cpu(cpu) {
struct crypto_tfm *tfm = crypto_alloc_tfm(alg_name, 0); struct crypto_tfm *tfm = crypto_alloc_tfm(alg_name, 0);
if (!tfm) if (!tfm)
goto error; goto error;
......
...@@ -788,7 +788,7 @@ translate_table(const char *name, ...@@ -788,7 +788,7 @@ translate_table(const char *name,
} }
/* And one copy for every other CPU */ /* And one copy for every other CPU */
for_each_cpu(i) { for_each_possible_cpu(i) {
if (newinfo->entries[i] && newinfo->entries[i] != entry0) if (newinfo->entries[i] && newinfo->entries[i] != entry0)
memcpy(newinfo->entries[i], entry0, newinfo->size); memcpy(newinfo->entries[i], entry0, newinfo->size);
} }
...@@ -841,7 +841,7 @@ get_counters(const struct xt_table_info *t, ...@@ -841,7 +841,7 @@ get_counters(const struct xt_table_info *t,
counters, counters,
&i); &i);
for_each_cpu(cpu) { for_each_possible_cpu(cpu) {
if (cpu == curcpu) if (cpu == curcpu)
continue; continue;
i = 0; i = 0;
......
...@@ -38,7 +38,7 @@ static int fold_prot_inuse(struct proto *proto) ...@@ -38,7 +38,7 @@ static int fold_prot_inuse(struct proto *proto)
int res = 0; int res = 0;
int cpu; int cpu;
for_each_cpu(cpu) for_each_possible_cpu(cpu)
res += proto->stats[cpu].inuse; res += proto->stats[cpu].inuse;
return res; return res;
...@@ -140,7 +140,7 @@ fold_field(void *mib[], int offt) ...@@ -140,7 +140,7 @@ fold_field(void *mib[], int offt)
unsigned long res = 0; unsigned long res = 0;
int i; int i;
for_each_cpu(i) { for_each_possible_cpu(i) {
res += *(((unsigned long *)per_cpu_ptr(mib[0], i)) + offt); res += *(((unsigned long *)per_cpu_ptr(mib[0], i)) + offt);
res += *(((unsigned long *)per_cpu_ptr(mib[1], i)) + offt); res += *(((unsigned long *)per_cpu_ptr(mib[1], i)) + offt);
} }
......
...@@ -146,7 +146,7 @@ static void nf_ct_event_cache_flush(void) ...@@ -146,7 +146,7 @@ static void nf_ct_event_cache_flush(void)
struct nf_conntrack_ecache *ecache; struct nf_conntrack_ecache *ecache;
int cpu; int cpu;
for_each_cpu(cpu) { for_each_possible_cpu(cpu) {
ecache = &per_cpu(nf_conntrack_ecache, cpu); ecache = &per_cpu(nf_conntrack_ecache, cpu);
if (ecache->ct) if (ecache->ct)
nf_ct_put(ecache->ct); nf_ct_put(ecache->ct);
......
...@@ -413,7 +413,7 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size) ...@@ -413,7 +413,7 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size)
newinfo->size = size; newinfo->size = size;
for_each_cpu(cpu) { for_each_possible_cpu(cpu) {
if (size <= PAGE_SIZE) if (size <= PAGE_SIZE)
newinfo->entries[cpu] = kmalloc_node(size, newinfo->entries[cpu] = kmalloc_node(size,
GFP_KERNEL, GFP_KERNEL,
...@@ -436,7 +436,7 @@ void xt_free_table_info(struct xt_table_info *info) ...@@ -436,7 +436,7 @@ void xt_free_table_info(struct xt_table_info *info)
{ {
int cpu; int cpu;
for_each_cpu(cpu) { for_each_possible_cpu(cpu) {
if (info->size <= PAGE_SIZE) if (info->size <= PAGE_SIZE)
kfree(info->entries[cpu]); kfree(info->entries[cpu]);
else else
......
...@@ -69,7 +69,7 @@ fold_field(void *mib[], int nr) ...@@ -69,7 +69,7 @@ fold_field(void *mib[], int nr)
unsigned long res = 0; unsigned long res = 0;
int i; int i;
for_each_cpu(i) { for_each_possible_cpu(i) {
res += res +=
*((unsigned long *) (((void *) per_cpu_ptr(mib[0], i)) + *((unsigned long *) (((void *) per_cpu_ptr(mib[0], i)) +
sizeof (unsigned long) * nr)); sizeof (unsigned long) * nr));
......
...@@ -2136,7 +2136,7 @@ void socket_seq_show(struct seq_file *seq) ...@@ -2136,7 +2136,7 @@ void socket_seq_show(struct seq_file *seq)
int cpu; int cpu;
int counter = 0; int counter = 0;
for_each_cpu(cpu) for_each_possible_cpu(cpu)
counter += per_cpu(sockets_in_use, cpu); counter += per_cpu(sockets_in_use, cpu);
/* It can be negative, by the way. 8) */ /* It can be negative, by the way. 8) */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment