Commit 9628937d authored by Mike Travis's avatar Mike Travis Committed by Ingo Molnar

x86: cleanup some remaining usages of NR_CPUS where s/b nr_cpu_ids

Impact: Reduce future system panics due to cpumask operations using NR_CPUS

Insure that code does not look at bits >= nr_cpu_ids as when cpumasks are
allocated based on nr_cpu_ids, these extra bits will not be defined.

Also some other minor updates:

   * change in to use cpu accessor function set_cpu_present() instead of
     directly accessing cpu_present_map w/cpu_clear() [arch/x86/kernel/reboot.c]

   * use cpumask_of() instead of &cpumask_of_cpu() [arch/x86/kernel/reboot.c]

   * optimize some cpu_mask_to_apicid_and functions.
Signed-off-by: default avatarMike Travis <travis@sgi.com>
Acked-by: default avatarRusty Russell <rusty@rustcorp.com.au>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 6ca09dfc
...@@ -157,7 +157,7 @@ cpu_mask_to_apicid_cluster(const struct cpumask *cpumask) ...@@ -157,7 +157,7 @@ cpu_mask_to_apicid_cluster(const struct cpumask *cpumask)
num_bits_set = cpumask_weight(cpumask); num_bits_set = cpumask_weight(cpumask);
/* Return id to all */ /* Return id to all */
if (num_bits_set == NR_CPUS) if (num_bits_set == nr_cpu_ids)
return 0xFF; return 0xFF;
/* /*
* The cpus in the mask must all be on the apic cluster. If are not * The cpus in the mask must all be on the apic cluster. If are not
...@@ -190,7 +190,7 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) ...@@ -190,7 +190,7 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
num_bits_set = cpus_weight(*cpumask); num_bits_set = cpus_weight(*cpumask);
/* Return id to all */ /* Return id to all */
if (num_bits_set == NR_CPUS) if (num_bits_set == nr_cpu_ids)
return cpu_to_logical_apicid(0); return cpu_to_logical_apicid(0);
/* /*
* The cpus in the mask must all be on the apic cluster. If are not * The cpus in the mask must all be on the apic cluster. If are not
...@@ -218,9 +218,6 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) ...@@ -218,9 +218,6 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
const struct cpumask *andmask) const struct cpumask *andmask)
{ {
int num_bits_set;
int cpus_found = 0;
int cpu;
int apicid = cpu_to_logical_apicid(0); int apicid = cpu_to_logical_apicid(0);
cpumask_var_t cpumask; cpumask_var_t cpumask;
...@@ -229,31 +226,8 @@ static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, ...@@ -229,31 +226,8 @@ static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
cpumask_and(cpumask, inmask, andmask); cpumask_and(cpumask, inmask, andmask);
cpumask_and(cpumask, cpumask, cpu_online_mask); cpumask_and(cpumask, cpumask, cpu_online_mask);
apicid = cpu_mask_to_apicid(cpumask);
num_bits_set = cpumask_weight(cpumask);
/* Return id to all */
if (num_bits_set == NR_CPUS)
goto exit;
/*
* The cpus in the mask must all be on the apic cluster. If are not
* on the same apicid cluster return default value of TARGET_CPUS.
*/
cpu = cpumask_first(cpumask);
apicid = cpu_to_logical_apicid(cpu);
while (cpus_found < num_bits_set) {
if (cpumask_test_cpu(cpu, cpumask)) {
int new_apicid = cpu_to_logical_apicid(cpu);
if (apicid_cluster(apicid) !=
apicid_cluster(new_apicid)){
printk ("%s: Not a valid mask!\n", __func__);
return cpu_to_logical_apicid(0);
}
apicid = new_apicid;
cpus_found++;
}
cpu++;
}
exit:
free_cpumask_var(cpumask); free_cpumask_var(cpumask);
return apicid; return apicid;
} }
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
#define SHARED_SWITCHER_PAGES \ #define SHARED_SWITCHER_PAGES \
DIV_ROUND_UP(end_switcher_text - start_switcher_text, PAGE_SIZE) DIV_ROUND_UP(end_switcher_text - start_switcher_text, PAGE_SIZE)
/* Pages for switcher itself, then two pages per cpu */ /* Pages for switcher itself, then two pages per cpu */
#define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * NR_CPUS) #define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * nr_cpu_ids)
/* We map at -4M for ease of mapping into the guest (one PTE page). */ /* We map at -4M for ease of mapping into the guest (one PTE page). */
#define SWITCHER_ADDR 0xFFC00000 #define SWITCHER_ADDR 0xFFC00000
......
...@@ -63,8 +63,8 @@ static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) ...@@ -63,8 +63,8 @@ static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
extern u8 cpu_2_logical_apicid[]; extern u8 cpu_2_logical_apicid[];
static inline int cpu_to_logical_apicid(int cpu) static inline int cpu_to_logical_apicid(int cpu)
{ {
if (cpu >= NR_CPUS) if (cpu >= nr_cpu_ids)
return BAD_APICID; return BAD_APICID;
return (int)cpu_2_logical_apicid[cpu]; return (int)cpu_2_logical_apicid[cpu];
} }
......
...@@ -52,7 +52,7 @@ static inline void init_apic_ldr(void) ...@@ -52,7 +52,7 @@ static inline void init_apic_ldr(void)
int i; int i;
/* Create logical APIC IDs by counting CPUs already in cluster. */ /* Create logical APIC IDs by counting CPUs already in cluster. */
for (count = 0, i = NR_CPUS; --i >= 0; ) { for (count = 0, i = nr_cpu_ids; --i >= 0; ) {
lid = cpu_2_logical_apicid[i]; lid = cpu_2_logical_apicid[i];
if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster) if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster)
++count; ++count;
...@@ -97,8 +97,8 @@ static inline int apicid_to_node(int logical_apicid) ...@@ -97,8 +97,8 @@ static inline int apicid_to_node(int logical_apicid)
static inline int cpu_to_logical_apicid(int cpu) static inline int cpu_to_logical_apicid(int cpu)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (cpu >= NR_CPUS) if (cpu >= nr_cpu_ids)
return BAD_APICID; return BAD_APICID;
return (int)cpu_2_logical_apicid[cpu]; return (int)cpu_2_logical_apicid[cpu];
#else #else
return logical_smp_processor_id(); return logical_smp_processor_id();
...@@ -107,7 +107,7 @@ static inline int cpu_to_logical_apicid(int cpu) ...@@ -107,7 +107,7 @@ static inline int cpu_to_logical_apicid(int cpu)
static inline int cpu_present_to_apicid(int mps_cpu) static inline int cpu_present_to_apicid(int mps_cpu)
{ {
if (mps_cpu < NR_CPUS) if (mps_cpu < nr_cpu_ids)
return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
else else
return BAD_APICID; return BAD_APICID;
...@@ -146,7 +146,7 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) ...@@ -146,7 +146,7 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
num_bits_set = cpus_weight(*cpumask); num_bits_set = cpus_weight(*cpumask);
/* Return id to all */ /* Return id to all */
if (num_bits_set == NR_CPUS) if (num_bits_set >= nr_cpu_ids)
return (int) 0xFF; return (int) 0xFF;
/* /*
* The cpus in the mask must all be on the apic cluster. If are not * The cpus in the mask must all be on the apic cluster. If are not
...@@ -173,42 +173,16 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) ...@@ -173,42 +173,16 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
const struct cpumask *andmask) const struct cpumask *andmask)
{ {
int num_bits_set; int apicid = cpu_to_logical_apicid(0);
int cpus_found = 0;
int cpu;
int apicid = 0xFF;
cpumask_var_t cpumask; cpumask_var_t cpumask;
if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
return (int) 0xFF; return apicid;
cpumask_and(cpumask, inmask, andmask); cpumask_and(cpumask, inmask, andmask);
cpumask_and(cpumask, cpumask, cpu_online_mask); cpumask_and(cpumask, cpumask, cpu_online_mask);
apicid = cpu_mask_to_apicid(cpumask);
num_bits_set = cpumask_weight(cpumask);
/* Return id to all */
if (num_bits_set == nr_cpu_ids)
goto exit;
/*
* The cpus in the mask must all be on the apic cluster. If are not
* on the same apicid cluster return default value of TARGET_CPUS.
*/
cpu = cpumask_first(cpumask);
apicid = cpu_to_logical_apicid(cpu);
while (cpus_found < num_bits_set) {
if (cpumask_test_cpu(cpu, cpumask)) {
int new_apicid = cpu_to_logical_apicid(cpu);
if (apicid_cluster(apicid) !=
apicid_cluster(new_apicid)){
printk ("%s: Not a valid mask!\n", __func__);
return 0xFF;
}
apicid = apicid | new_apicid;
cpus_found++;
}
cpu++;
}
exit:
free_cpumask_var(cpumask); free_cpumask_var(cpumask);
return apicid; return apicid;
} }
......
...@@ -598,7 +598,7 @@ EXPORT_SYMBOL(acpi_map_lsapic); ...@@ -598,7 +598,7 @@ EXPORT_SYMBOL(acpi_map_lsapic);
int acpi_unmap_lsapic(int cpu) int acpi_unmap_lsapic(int cpu)
{ {
per_cpu(x86_cpu_to_apicid, cpu) = -1; per_cpu(x86_cpu_to_apicid, cpu) = -1;
cpu_clear(cpu, cpu_present_map); set_cpu_present(cpu, false);
num_processors--; num_processors--;
return (0); return (0);
......
...@@ -140,7 +140,7 @@ static int lapic_next_event(unsigned long delta, ...@@ -140,7 +140,7 @@ static int lapic_next_event(unsigned long delta,
struct clock_event_device *evt); struct clock_event_device *evt);
static void lapic_timer_setup(enum clock_event_mode mode, static void lapic_timer_setup(enum clock_event_mode mode,
struct clock_event_device *evt); struct clock_event_device *evt);
static void lapic_timer_broadcast(const cpumask_t *mask); static void lapic_timer_broadcast(const struct cpumask *mask);
static void apic_pm_activate(void); static void apic_pm_activate(void);
/* /*
...@@ -453,7 +453,7 @@ static void lapic_timer_setup(enum clock_event_mode mode, ...@@ -453,7 +453,7 @@ static void lapic_timer_setup(enum clock_event_mode mode,
/* /*
* Local APIC timer broadcast function * Local APIC timer broadcast function
*/ */
static void lapic_timer_broadcast(const cpumask_t *mask) static void lapic_timer_broadcast(const struct cpumask *mask)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
send_IPI_mask(mask, LOCAL_TIMER_VECTOR); send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
......
...@@ -355,7 +355,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) ...@@ -355,7 +355,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
} else if (smp_num_siblings > 1) { } else if (smp_num_siblings > 1) {
if (smp_num_siblings > NR_CPUS) { if (smp_num_siblings > nr_cpu_ids) {
printk(KERN_WARNING "CPU: Unsupported number of siblings %d", printk(KERN_WARNING "CPU: Unsupported number of siblings %d",
smp_num_siblings); smp_num_siblings);
smp_num_siblings = 1; smp_num_siblings = 1;
......
...@@ -121,7 +121,7 @@ static int cpuid_open(struct inode *inode, struct file *file) ...@@ -121,7 +121,7 @@ static int cpuid_open(struct inode *inode, struct file *file)
lock_kernel(); lock_kernel();
cpu = iminor(file->f_path.dentry->d_inode); cpu = iminor(file->f_path.dentry->d_inode);
if (cpu >= NR_CPUS || !cpu_online(cpu)) { if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
ret = -ENXIO; /* No such CPU */ ret = -ENXIO; /* No such CPU */
goto out; goto out;
} }
......
...@@ -136,7 +136,7 @@ static int msr_open(struct inode *inode, struct file *file) ...@@ -136,7 +136,7 @@ static int msr_open(struct inode *inode, struct file *file)
lock_kernel(); lock_kernel();
cpu = iminor(file->f_path.dentry->d_inode); cpu = iminor(file->f_path.dentry->d_inode);
if (cpu >= NR_CPUS || !cpu_online(cpu)) { if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
ret = -ENXIO; /* No such CPU */ ret = -ENXIO; /* No such CPU */
goto out; goto out;
} }
......
...@@ -449,7 +449,7 @@ void native_machine_shutdown(void) ...@@ -449,7 +449,7 @@ void native_machine_shutdown(void)
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
/* See if there has been given a command line override */ /* See if there has been given a command line override */
if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) && if ((reboot_cpu != -1) && (reboot_cpu < nr_cpu_ids) &&
cpu_online(reboot_cpu)) cpu_online(reboot_cpu))
reboot_cpu_id = reboot_cpu; reboot_cpu_id = reboot_cpu;
#endif #endif
...@@ -459,7 +459,7 @@ void native_machine_shutdown(void) ...@@ -459,7 +459,7 @@ void native_machine_shutdown(void)
reboot_cpu_id = smp_processor_id(); reboot_cpu_id = smp_processor_id();
/* Make certain I only run on the appropriate processor */ /* Make certain I only run on the appropriate processor */
set_cpus_allowed_ptr(current, &cpumask_of_cpu(reboot_cpu_id)); set_cpus_allowed_ptr(current, cpumask_of(reboot_cpu_id));
/* O.K Now that I'm on the appropriate processor, /* O.K Now that I'm on the appropriate processor,
* stop all of the others. * stop all of the others.
......
...@@ -1154,7 +1154,7 @@ static void __init smp_cpu_index_default(void) ...@@ -1154,7 +1154,7 @@ static void __init smp_cpu_index_default(void)
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
c = &cpu_data(i); c = &cpu_data(i);
/* mark all to hotplug */ /* mark all to hotplug */
c->cpu_index = NR_CPUS; c->cpu_index = nr_cpu_ids;
} }
} }
......
...@@ -357,9 +357,8 @@ void __init find_smp_config(void) ...@@ -357,9 +357,8 @@ void __init find_smp_config(void)
printk("VOYAGER SMP: Boot cpu is %d\n", boot_cpu_id); printk("VOYAGER SMP: Boot cpu is %d\n", boot_cpu_id);
/* initialize the CPU structures (moved from smp_boot_cpus) */ /* initialize the CPU structures (moved from smp_boot_cpus) */
for (i = 0; i < NR_CPUS; i++) { for (i = 0; i < nr_cpu_ids; i++)
cpu_irq_affinity[i] = ~0; cpu_irq_affinity[i] = ~0;
}
cpu_online_map = cpumask_of_cpu(boot_cpu_id); cpu_online_map = cpumask_of_cpu(boot_cpu_id);
/* The boot CPU must be extended */ /* The boot CPU must be extended */
...@@ -1227,7 +1226,7 @@ int setup_profiling_timer(unsigned int multiplier) ...@@ -1227,7 +1226,7 @@ int setup_profiling_timer(unsigned int multiplier)
* new values until the next timer interrupt in which they do process * new values until the next timer interrupt in which they do process
* accounting. * accounting.
*/ */
for (i = 0; i < NR_CPUS; ++i) for (i = 0; i < nr_cpu_ids; ++i)
per_cpu(prof_multiplier, i) = multiplier; per_cpu(prof_multiplier, i) = multiplier;
return 0; return 0;
...@@ -1257,7 +1256,7 @@ void __init voyager_smp_intr_init(void) ...@@ -1257,7 +1256,7 @@ void __init voyager_smp_intr_init(void)
int i; int i;
/* initialize the per cpu irq mask to all disabled */ /* initialize the per cpu irq mask to all disabled */
for (i = 0; i < NR_CPUS; i++) for (i = 0; i < nr_cpu_ids; i++)
vic_irq_mask[i] = 0xFFFF; vic_irq_mask[i] = 0xFFFF;
VIC_SET_GATE(VIC_CPI_LEVEL0, vic_cpi_interrupt); VIC_SET_GATE(VIC_CPI_LEVEL0, vic_cpi_interrupt);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment