Commit 90c7f719 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] Make for_each_cpu() Iterator More Friendly

From: Rusty Russell <rusty@rustcorp.com.au>

Anton: breaks PPC64, as it needs cpu_possible_mask, but fix is already
in Ameslab tree.

The for_each_cpu() and for_each_online_cpu() iterators take a mask, and
noone uses them that way (except for arch/i386/mach-voyager, which uses
for_each_cpu(cpu_online_mask).  Make them more usable iterators, by
dropping the "mask" arg.

This requires that archs provide a cpu_possible_mask: most do, but PPC64
doesn't, so it is broken by this patch.  The other archs use a #define to
define it in asm/smp.h.

Most places doing loops over cpus testing for cpu_online() should use
for_each_cpu: it is synonymous at the moment, but with the CPU hotplug
patch the difference becomes important.

Followup patches will convert users.
parent 8e197efa
...@@ -130,7 +130,7 @@ send_QIC_CPI(__u32 cpuset, __u8 cpi) ...@@ -130,7 +130,7 @@ send_QIC_CPI(__u32 cpuset, __u8 cpi)
{ {
int cpu; int cpu;
for_each_cpu(cpu, cpu_online_map) { for_each_online_cpu(cpu) {
if(cpuset & (1<<cpu)) { if(cpuset & (1<<cpu)) {
#ifdef VOYAGER_DEBUG #ifdef VOYAGER_DEBUG
if(!cpu_isset(cpu, cpu_online_map)) if(!cpu_isset(cpu, cpu_online_map))
...@@ -1465,7 +1465,7 @@ send_CPI(__u32 cpuset, __u8 cpi) ...@@ -1465,7 +1465,7 @@ send_CPI(__u32 cpuset, __u8 cpi)
cpuset &= 0xff; /* only first 8 CPUs vaild for VIC CPI */ cpuset &= 0xff; /* only first 8 CPUs vaild for VIC CPI */
if(cpuset == 0) if(cpuset == 0)
return; return;
for_each_cpu(cpu, cpu_online_map) { for_each_online_cpu(cpu) {
if(cpuset & (1<<cpu)) if(cpuset & (1<<cpu))
set_bit(cpi, &vic_cpi_mailbox[cpu]); set_bit(cpi, &vic_cpi_mailbox[cpu]);
} }
...@@ -1579,7 +1579,7 @@ enable_vic_irq(unsigned int irq) ...@@ -1579,7 +1579,7 @@ enable_vic_irq(unsigned int irq)
VDEBUG(("VOYAGER: enable_vic_irq(%d) CPU%d affinity 0x%lx\n", VDEBUG(("VOYAGER: enable_vic_irq(%d) CPU%d affinity 0x%lx\n",
irq, cpu, cpu_irq_affinity[cpu])); irq, cpu, cpu_irq_affinity[cpu]));
spin_lock_irqsave(&vic_irq_lock, flags); spin_lock_irqsave(&vic_irq_lock, flags);
for_each_cpu(real_cpu, cpu_online_map) { for_each_online_cpu(real_cpu) {
if(!(voyager_extended_vic_processors & (1<<real_cpu))) if(!(voyager_extended_vic_processors & (1<<real_cpu)))
continue; continue;
if(!(cpu_irq_affinity[real_cpu] & mask)) { if(!(cpu_irq_affinity[real_cpu] & mask)) {
...@@ -1720,7 +1720,7 @@ after_handle_vic_irq(unsigned int irq) ...@@ -1720,7 +1720,7 @@ after_handle_vic_irq(unsigned int irq)
int i; int i;
__u8 cpu = smp_processor_id(); __u8 cpu = smp_processor_id();
__u8 real_cpu; __u8 real_cpu;
int mask; int mask; /* Um... initialize me??? --RR */
printk("VOYAGER SMP: CPU%d lost interrupt %d\n", printk("VOYAGER SMP: CPU%d lost interrupt %d\n",
cpu, irq); cpu, irq);
...@@ -1809,7 +1809,7 @@ set_vic_irq_affinity(unsigned int irq, cpumask_t mask) ...@@ -1809,7 +1809,7 @@ set_vic_irq_affinity(unsigned int irq, cpumask_t mask)
* bus) */ * bus) */
return; return;
for_each_cpu(cpu, cpu_online_map) { for_each_online_cpu(cpu) {
unsigned long cpu_mask = 1 << cpu; unsigned long cpu_mask = 1 << cpu;
if(cpu_mask & real_mask) { if(cpu_mask & real_mask) {
...@@ -1875,7 +1875,7 @@ voyager_smp_dump() ...@@ -1875,7 +1875,7 @@ voyager_smp_dump()
int old_cpu = smp_processor_id(), cpu; int old_cpu = smp_processor_id(), cpu;
/* dump the interrupt masks of each processor */ /* dump the interrupt masks of each processor */
for_each_cpu(cpu, cpu_online_map) { for_each_online_cpu(cpu) {
__u16 imr, isr, irr; __u16 imr, isr, irr;
unsigned long flags; unsigned long flags;
......
...@@ -48,8 +48,8 @@ extern struct cpuinfo_alpha cpu_data[NR_CPUS]; ...@@ -48,8 +48,8 @@ extern struct cpuinfo_alpha cpu_data[NR_CPUS];
extern cpumask_t cpu_present_mask; extern cpumask_t cpu_present_mask;
extern cpumask_t cpu_online_map; extern cpumask_t cpu_online_map;
extern int smp_num_cpus; extern int smp_num_cpus;
#define cpu_possible_map cpu_present_mask
#define cpu_possible(cpu) cpu_isset(cpu, cpu_present_mask)
#define cpu_online(cpu) cpu_isset(cpu, cpu_online_map) #define cpu_online(cpu) cpu_isset(cpu, cpu_online_map)
extern int smp_call_function_on_cpu(void (*func) (void *info), void *info,int retry, int wait, unsigned long cpu); extern int smp_call_function_on_cpu(void (*func) (void *info), void *info,int retry, int wait, unsigned long cpu);
......
...@@ -53,8 +53,7 @@ extern void zap_low_mappings (void); ...@@ -53,8 +53,7 @@ extern void zap_low_mappings (void);
#define smp_processor_id() (current_thread_info()->cpu) #define smp_processor_id() (current_thread_info()->cpu)
extern cpumask_t cpu_callout_map; extern cpumask_t cpu_callout_map;
#define cpu_possible_map cpu_callout_map
#define cpu_possible(cpu) cpu_isset(cpu, cpu_callout_map)
/* We don't mark CPUs online until __cpu_up(), so we need another measure */ /* We don't mark CPUs online until __cpu_up(), so we need another measure */
static inline int num_booting_cpus(void) static inline int num_booting_cpus(void)
......
...@@ -48,7 +48,7 @@ extern volatile int ia64_cpu_to_sapicid[]; ...@@ -48,7 +48,7 @@ extern volatile int ia64_cpu_to_sapicid[];
extern unsigned long ap_wakeup_vector; extern unsigned long ap_wakeup_vector;
#define cpu_possible(cpu) cpu_isset(cpu, phys_cpu_present_map) #define cpu_possible_map phys_cpu_present_map
/* /*
* Function to map hard smp processor id to logical id. Slow, so don't use this in * Function to map hard smp processor id to logical id. Slow, so don't use this in
......
...@@ -48,8 +48,8 @@ extern struct call_data_struct *call_data; ...@@ -48,8 +48,8 @@ extern struct call_data_struct *call_data;
extern cpumask_t phys_cpu_present_map; extern cpumask_t phys_cpu_present_map;
extern cpumask_t cpu_online_map; extern cpumask_t cpu_online_map;
#define cpu_possible_map phys_cpu_present_map
#define cpu_possible(cpu) cpu_isset(cpu, phys_cpu_present_map)
#define cpu_online(cpu) cpu_isset(cpu, cpu_online_map) #define cpu_online(cpu) cpu_isset(cpu, cpu_online_map)
static inline unsigned int num_online_cpus(void) static inline unsigned int num_online_cpus(void)
......
...@@ -54,7 +54,7 @@ extern unsigned long cpu_present_mask; ...@@ -54,7 +54,7 @@ extern unsigned long cpu_present_mask;
#define smp_processor_id() (current_thread_info()->cpu) #define smp_processor_id() (current_thread_info()->cpu)
#define cpu_online(cpu) cpu_isset(cpu, cpu_online_map) #define cpu_online(cpu) cpu_isset(cpu, cpu_online_map)
#define cpu_possible(cpu) cpu_isset(cpu, cpu_present_mask) #define cpu_possible_map cpu_present_map
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
......
...@@ -48,7 +48,6 @@ extern void smp_local_timer_interrupt(struct pt_regs *); ...@@ -48,7 +48,6 @@ extern void smp_local_timer_interrupt(struct pt_regs *);
#define smp_processor_id() (current_thread_info()->cpu) #define smp_processor_id() (current_thread_info()->cpu)
#define cpu_online(cpu) cpu_isset(cpu, cpu_online_map) #define cpu_online(cpu) cpu_isset(cpu, cpu_online_map)
#define cpu_possible(cpu) cpu_isset(cpu, cpu_possible_map)
extern int __cpu_up(unsigned int cpu); extern int __cpu_up(unsigned int cpu);
......
...@@ -49,7 +49,6 @@ extern cpumask_t cpu_possible_map; ...@@ -49,7 +49,6 @@ extern cpumask_t cpu_possible_map;
#define smp_processor_id() (S390_lowcore.cpu_data.cpu_nr) #define smp_processor_id() (S390_lowcore.cpu_data.cpu_nr)
#define cpu_online(cpu) cpu_isset(cpu, cpu_online_map) #define cpu_online(cpu) cpu_isset(cpu, cpu_online_map)
#define cpu_possible(cpu) cpu_isset(cpu, cpu_possible_map)
extern __inline__ __u16 hard_smp_processor_id(void) extern __inline__ __u16 hard_smp_processor_id(void)
{ {
......
...@@ -24,7 +24,6 @@ extern cpumask_t cpu_online_map; ...@@ -24,7 +24,6 @@ extern cpumask_t cpu_online_map;
extern cpumask_t cpu_possible_map; extern cpumask_t cpu_possible_map;
#define cpu_online(cpu) cpu_isset(cpu, cpu_online_map) #define cpu_online(cpu) cpu_isset(cpu, cpu_online_map)
#define cpu_possible(cpu) cpu_isset(cpu, cpu_possible_map)
#define smp_processor_id() (current_thread_info()->cpu) #define smp_processor_id() (current_thread_info()->cpu)
......
...@@ -33,7 +33,7 @@ ...@@ -33,7 +33,7 @@
extern unsigned char boot_cpu_id; extern unsigned char boot_cpu_id;
extern cpumask_t phys_cpu_present_map; extern cpumask_t phys_cpu_present_map;
#define cpu_possible(cpu) cpu_isset(cpu, phys_cpu_present_map) #define cpu_possible_map phys_cpu_present_map
#define cpu_online(cpu) cpu_isset(cpu, cpu_online_map) #define cpu_online(cpu) cpu_isset(cpu, cpu_online_map)
......
...@@ -20,7 +20,7 @@ extern int hard_smp_processor_id(void); ...@@ -20,7 +20,7 @@ extern int hard_smp_processor_id(void);
#define cpu_online(cpu) cpu_isset(cpu, cpu_online_map) #define cpu_online(cpu) cpu_isset(cpu, cpu_online_map)
extern int ncpus; extern int ncpus;
#define cpu_possible(cpu) (cpu < ncpus)
extern inline void smp_cpus_done(unsigned int maxcpus) extern inline void smp_cpus_done(unsigned int maxcpus)
{ {
......
...@@ -57,8 +57,7 @@ void smp_stop_cpu(void); ...@@ -57,8 +57,7 @@ void smp_stop_cpu(void);
*/ */
extern cpumask_t cpu_callout_map; extern cpumask_t cpu_callout_map;
#define cpu_possible_map cpu_callout_map
#define cpu_possible(cpu) cpu_isset(cpu, cpu_callout_map)
#define cpu_online(cpu) cpu_isset(cpu, cpu_online_map) #define cpu_online(cpu) cpu_isset(cpu, cpu_online_map)
static inline int num_booting_cpus(void) static inline int num_booting_cpus(void)
......
...@@ -8,32 +8,28 @@ ...@@ -8,32 +8,28 @@
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
extern cpumask_t cpu_online_map; extern cpumask_t cpu_online_map;
extern cpumask_t cpu_possible_map;
#define num_online_cpus() cpus_weight(cpu_online_map) #define num_online_cpus() cpus_weight(cpu_online_map)
#define cpu_online(cpu) cpu_isset(cpu, cpu_online_map) #define cpu_online(cpu) cpu_isset(cpu, cpu_online_map)
#define cpu_possible(cpu) cpu_isset(cpu, cpu_possible_map)
#define for_each_cpu_mask(cpu, mask) \
for (cpu = first_cpu_const(mk_cpumask_const(mask)); \
cpu < NR_CPUS; \
cpu = next_cpu_const(cpu, mk_cpumask_const(mask)))
#define for_each_cpu(cpu) for_each_cpu_mask(cpu, cpu_possible_map)
#define for_each_online_cpu(cpu) for_each_cpu_mask(cpu, cpu_online_map)
#else #else
#define cpu_online_map cpumask_of_cpu(0) #define cpu_online_map cpumask_of_cpu(0)
#define num_online_cpus() 1 #define num_online_cpus() 1
#define cpu_online(cpu) ({ BUG_ON((cpu) != 0); 1; }) #define cpu_online(cpu) ({ BUG_ON((cpu) != 0); 1; })
#endif #define cpu_possible(cpu) ({ BUG_ON((cpu) != 0); 1; })
static inline int next_online_cpu(int cpu, cpumask_t map)
{
do
cpu = next_cpu_const(cpu, mk_cpumask_const(map));
while (cpu < NR_CPUS && !cpu_online(cpu));
return cpu;
}
#define for_each_cpu(cpu, map) \ #define for_each_cpu(cpu) for (cpu = 0; cpu < 1; cpu++)
for (cpu = first_cpu_const(mk_cpumask_const(map)); \ #define for_each_online_cpu(cpu) for (cpu = 0; cpu < 1; cpu++)
cpu < NR_CPUS; \ #endif
cpu = next_cpu_const(cpu,mk_cpumask_const(map)))
#define for_each_online_cpu(cpu, map) \
for (cpu = first_cpu_const(mk_cpumask_const(map)); \
cpu < NR_CPUS; \
cpu = next_online_cpu(cpu,map))
extern int __mask_snprintf_len(char *buf, unsigned int buflen, extern int __mask_snprintf_len(char *buf, unsigned int buflen,
const unsigned long *maskp, unsigned int maskbytes); const unsigned long *maskp, unsigned int maskbytes);
......
...@@ -103,7 +103,6 @@ void smp_prepare_boot_cpu(void); ...@@ -103,7 +103,6 @@ void smp_prepare_boot_cpu(void);
#define on_each_cpu(func,info,retry,wait) ({ func(info); 0; }) #define on_each_cpu(func,info,retry,wait) ({ func(info); 0; })
static inline void smp_send_reschedule(int cpu) { } static inline void smp_send_reschedule(int cpu) { }
#define num_booting_cpus() 1 #define num_booting_cpus() 1
#define cpu_possible(cpu) ({ BUG_ON((cpu) != 0); 1; })
#define smp_prepare_boot_cpu() do {} while (0) #define smp_prepare_boot_cpu() do {} while (0)
#endif /* !SMP */ #endif /* !SMP */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment