Commit a283a525 authored by KAMEZAWA Hiroyuki's avatar KAMEZAWA Hiroyuki Committed by Linus Torvalds

[PATCH] for_each_possible_cpu: sparc64

for_each_cpu() actually iterates across all possible CPUs.  We've had mistakes
in the past where people were using for_each_cpu() where they should have been
iterating across only online or present CPUs.  This is inefficient and
possibly buggy.

We're renaming for_each_cpu() to for_each_possible_cpu() to avoid this in the
future.

This patch replaces for_each_cpu with for_each_possible_cpu.
for sparc64.
Signed-off-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: default avatar"David S. Miller" <davem@davemloft.net>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent fff8efe7
...@@ -1092,7 +1092,7 @@ void sun4v_pci_init(int node, char *model_name) ...@@ -1092,7 +1092,7 @@ void sun4v_pci_init(int node, char *model_name)
} }
} }
for_each_cpu(i) { for_each_possible_cpu(i) {
unsigned long page = get_zeroed_page(GFP_ATOMIC); unsigned long page = get_zeroed_page(GFP_ATOMIC);
if (!page) if (!page)
......
...@@ -535,7 +535,7 @@ static int __init topology_init(void) ...@@ -535,7 +535,7 @@ static int __init topology_init(void)
while (!cpu_find_by_instance(ncpus_probed, NULL, NULL)) while (!cpu_find_by_instance(ncpus_probed, NULL, NULL))
ncpus_probed++; ncpus_probed++;
for_each_cpu(i) { for_each_possible_cpu(i) {
struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL); struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL);
if (p) { if (p) {
register_cpu(p, i, NULL); register_cpu(p, i, NULL);
......
...@@ -1280,7 +1280,7 @@ int setup_profiling_timer(unsigned int multiplier) ...@@ -1280,7 +1280,7 @@ int setup_profiling_timer(unsigned int multiplier)
return -EINVAL; return -EINVAL;
spin_lock_irqsave(&prof_setup_lock, flags); spin_lock_irqsave(&prof_setup_lock, flags);
for_each_cpu(i) for_each_possible_cpu(i)
prof_multiplier(i) = multiplier; prof_multiplier(i) = multiplier;
current_tick_offset = (timer_tick_offset / multiplier); current_tick_offset = (timer_tick_offset / multiplier);
spin_unlock_irqrestore(&prof_setup_lock, flags); spin_unlock_irqrestore(&prof_setup_lock, flags);
...@@ -1308,12 +1308,12 @@ void __init smp_prepare_cpus(unsigned int max_cpus) ...@@ -1308,12 +1308,12 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
} }
} }
for_each_cpu(i) { for_each_possible_cpu(i) {
if (tlb_type == hypervisor) { if (tlb_type == hypervisor) {
int j; int j;
/* XXX get this mapping from machine description */ /* XXX get this mapping from machine description */
for_each_cpu(j) { for_each_possible_cpu(j) {
if ((j >> 2) == (i >> 2)) if ((j >> 2) == (i >> 2))
cpu_set(j, cpu_sibling_map[i]); cpu_set(j, cpu_sibling_map[i]);
} }
......
...@@ -26,7 +26,7 @@ register unsigned long __local_per_cpu_offset asm("g5"); ...@@ -26,7 +26,7 @@ register unsigned long __local_per_cpu_offset asm("g5");
#define percpu_modcopy(pcpudst, src, size) \ #define percpu_modcopy(pcpudst, src, size) \
do { \ do { \
unsigned int __i; \ unsigned int __i; \
for_each_cpu(__i) \ for_each_possible_cpu(__i) \
memcpy((pcpudst)+__per_cpu_offset(__i), \ memcpy((pcpudst)+__per_cpu_offset(__i), \
(src), (size)); \ (src), (size)); \
} while (0) } while (0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment