Commit 39c715b7 authored by Ingo Molnar's avatar Ingo Molnar Committed by Linus Torvalds

[PATCH] smp_processor_id() cleanup

This patch implements a number of smp_processor_id() cleanup ideas that
Arjan van de Ven and I came up with.

The previous __smp_processor_id/_smp_processor_id/smp_processor_id API
spaghetti was hard to follow both on the implementational and on the
usage side.

Some of the complexity arose from picking wrong names, some of the
complexity comes from the fact that not all architectures defined
__smp_processor_id.

In the new code, there are two externally visible symbols:

 - smp_processor_id(): debug variant.

 - raw_smp_processor_id(): nondebug variant. Replaces all existing
   uses of _smp_processor_id() and __smp_processor_id(). Defined
   by every SMP architecture in include/asm-*/smp.h.

There is one new internal symbol, dependent on DEBUG_PREEMPT:

 - debug_smp_processor_id(): internal debug variant, mapped to
                             smp_processor_id().

Also, i moved debug_smp_processor_id() from lib/kernel_lock.c into a new
lib/smp_processor_id.c file.  All related comments got updated and/or
clarified.

I have build/boot tested the following 8 .config combinations on x86:

 {SMP,UP} x {PREEMPT,!PREEMPT} x {DEBUG_PREEMPT,!DEBUG_PREEMPT}

I have also build/boot tested x64 on UP/PREEMPT/DEBUG_PREEMPT.  (Other
architectures are untested, but should work just fine.)
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarArjan van de Ven <arjan@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 84929801
...@@ -306,7 +306,7 @@ void die(const char * str, struct pt_regs * regs, long err) ...@@ -306,7 +306,7 @@ void die(const char * str, struct pt_regs * regs, long err)
}; };
static int die_counter; static int die_counter;
if (die.lock_owner != _smp_processor_id()) { if (die.lock_owner != raw_smp_processor_id()) {
console_verbose(); console_verbose();
spin_lock_irq(&die.lock); spin_lock_irq(&die.lock);
die.lock_owner = smp_processor_id(); die.lock_owner = smp_processor_id();
......
...@@ -34,7 +34,7 @@ inline void __const_udelay(unsigned long xloops) ...@@ -34,7 +34,7 @@ inline void __const_udelay(unsigned long xloops)
xloops *= 4; xloops *= 4;
__asm__("mull %0" __asm__("mull %0"
:"=d" (xloops), "=&a" (d0) :"=d" (xloops), "=&a" (d0)
:"1" (xloops),"0" (cpu_data[_smp_processor_id()].loops_per_jiffy * (HZ/4))); :"1" (xloops),"0" (cpu_data[raw_smp_processor_id()].loops_per_jiffy * (HZ/4)));
__delay(++xloops); __delay(++xloops);
} }
......
...@@ -130,7 +130,7 @@ void _raw_read_lock(rwlock_t *rw) ...@@ -130,7 +130,7 @@ void _raw_read_lock(rwlock_t *rw)
while (!read_can_lock(rw)) { while (!read_can_lock(rw)) {
if (--stuck == 0) { if (--stuck == 0) {
printk("_read_lock(%p) CPU#%d lock %d\n", printk("_read_lock(%p) CPU#%d lock %d\n",
rw, _smp_processor_id(), rw->lock); rw, raw_smp_processor_id(), rw->lock);
stuck = INIT_STUCK; stuck = INIT_STUCK;
} }
} }
...@@ -158,7 +158,7 @@ void _raw_write_lock(rwlock_t *rw) ...@@ -158,7 +158,7 @@ void _raw_write_lock(rwlock_t *rw)
while (!write_can_lock(rw)) { while (!write_can_lock(rw)) {
if (--stuck == 0) { if (--stuck == 0) {
printk("write_lock(%p) CPU#%d lock %d)\n", printk("write_lock(%p) CPU#%d lock %d)\n",
rw, _smp_processor_id(), rw->lock); rw, raw_smp_processor_id(), rw->lock);
stuck = INIT_STUCK; stuck = INIT_STUCK;
} }
} }
......
...@@ -292,7 +292,7 @@ static int native_idle(void) ...@@ -292,7 +292,7 @@ static int native_idle(void)
if (need_resched()) if (need_resched())
schedule(); schedule();
if (cpu_is_offline(_smp_processor_id()) && if (cpu_is_offline(raw_smp_processor_id()) &&
system_state == SYSTEM_RUNNING) system_state == SYSTEM_RUNNING)
cpu_die(); cpu_die();
} }
......
...@@ -24,7 +24,7 @@ inline void __const_udelay(unsigned long xloops) ...@@ -24,7 +24,7 @@ inline void __const_udelay(unsigned long xloops)
__asm__("dmulu.l %0, %2\n\t" __asm__("dmulu.l %0, %2\n\t"
"sts mach, %0" "sts mach, %0"
: "=r" (xloops) : "=r" (xloops)
: "0" (xloops), "r" (cpu_data[_smp_processor_id()].loops_per_jiffy) : "0" (xloops), "r" (cpu_data[raw_smp_processor_id()].loops_per_jiffy)
: "macl", "mach"); : "macl", "mach");
__delay(xloops * HZ); __delay(xloops * HZ);
} }
......
...@@ -31,7 +31,7 @@ void __const_udelay(unsigned long n) ...@@ -31,7 +31,7 @@ void __const_udelay(unsigned long n)
{ {
n *= 4; n *= 4;
n *= (cpu_data(_smp_processor_id()).udelay_val * (HZ/4)); n *= (cpu_data(raw_smp_processor_id()).udelay_val * (HZ/4));
n >>= 32; n >>= 32;
__delay(n + 1); __delay(n + 1);
......
...@@ -34,7 +34,7 @@ void __delay(unsigned long loops) ...@@ -34,7 +34,7 @@ void __delay(unsigned long loops)
inline void __const_udelay(unsigned long xloops) inline void __const_udelay(unsigned long xloops)
{ {
__delay(((xloops * cpu_data[_smp_processor_id()].loops_per_jiffy) >> 32) * HZ); __delay(((xloops * cpu_data[raw_smp_processor_id()].loops_per_jiffy) >> 32) * HZ);
} }
void __udelay(unsigned long usecs) void __udelay(unsigned long usecs)
......
...@@ -171,7 +171,7 @@ static void acpi_processor_idle (void) ...@@ -171,7 +171,7 @@ static void acpi_processor_idle (void)
int sleep_ticks = 0; int sleep_ticks = 0;
u32 t1, t2 = 0; u32 t1, t2 = 0;
pr = processors[_smp_processor_id()]; pr = processors[raw_smp_processor_id()];
if (!pr) if (!pr)
return; return;
......
...@@ -134,7 +134,7 @@ static int gameport_measure_speed(struct gameport *gameport) ...@@ -134,7 +134,7 @@ static int gameport_measure_speed(struct gameport *gameport)
} }
gameport_close(gameport); gameport_close(gameport);
return (cpu_data[_smp_processor_id()].loops_per_jiffy * (unsigned long)HZ / (1000 / 50)) / (tx < 1 ? 1 : tx); return (cpu_data[raw_smp_processor_id()].loops_per_jiffy * (unsigned long)HZ / (1000 / 50)) / (tx < 1 ? 1 : tx);
#else #else
......
...@@ -62,7 +62,7 @@ static int task_exit_notify(struct notifier_block * self, unsigned long val, voi ...@@ -62,7 +62,7 @@ static int task_exit_notify(struct notifier_block * self, unsigned long val, voi
/* To avoid latency problems, we only process the current CPU, /* To avoid latency problems, we only process the current CPU,
* hoping that most samples for the task are on this CPU * hoping that most samples for the task are on this CPU
*/ */
sync_buffer(_smp_processor_id()); sync_buffer(raw_smp_processor_id());
return 0; return 0;
} }
...@@ -86,7 +86,7 @@ static int munmap_notify(struct notifier_block * self, unsigned long val, void * ...@@ -86,7 +86,7 @@ static int munmap_notify(struct notifier_block * self, unsigned long val, void *
/* To avoid latency problems, we only process the current CPU, /* To avoid latency problems, we only process the current CPU,
* hoping that most samples for the task are on this CPU * hoping that most samples for the task are on this CPU
*/ */
sync_buffer(_smp_processor_id()); sync_buffer(raw_smp_processor_id());
return 0; return 0;
} }
......
...@@ -145,10 +145,10 @@ static inline void set_buffer_unwritten_io(struct buffer_head *bh) ...@@ -145,10 +145,10 @@ static inline void set_buffer_unwritten_io(struct buffer_head *bh)
#define xfs_inherit_nosymlinks xfs_params.inherit_nosym.val #define xfs_inherit_nosymlinks xfs_params.inherit_nosym.val
#define xfs_rotorstep xfs_params.rotorstep.val #define xfs_rotorstep xfs_params.rotorstep.val
#ifndef __smp_processor_id #ifndef raw_smp_processor_id
#define __smp_processor_id() smp_processor_id() #define raw_smp_processor_id() smp_processor_id()
#endif #endif
#define current_cpu() __smp_processor_id() #define current_cpu() raw_smp_processor_id()
#define current_pid() (current->pid) #define current_pid() (current->pid)
#define current_fsuid(cred) (current->fsuid) #define current_fsuid(cred) (current->fsuid)
#define current_fsgid(cred) (current->fsgid) #define current_fsgid(cred) (current->fsgid)
......
...@@ -43,7 +43,7 @@ extern struct cpuinfo_alpha cpu_data[NR_CPUS]; ...@@ -43,7 +43,7 @@ extern struct cpuinfo_alpha cpu_data[NR_CPUS];
#define PROC_CHANGE_PENALTY 20 #define PROC_CHANGE_PENALTY 20
#define hard_smp_processor_id() __hard_smp_processor_id() #define hard_smp_processor_id() __hard_smp_processor_id()
#define smp_processor_id() (current_thread_info()->cpu) #define raw_smp_processor_id() (current_thread_info()->cpu)
extern cpumask_t cpu_present_mask; extern cpumask_t cpu_present_mask;
extern cpumask_t cpu_online_map; extern cpumask_t cpu_online_map;
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
# error "<asm-arm/smp.h> included in non-SMP build" # error "<asm-arm/smp.h> included in non-SMP build"
#endif #endif
#define smp_processor_id() (current_thread_info()->cpu) #define raw_smp_processor_id() (current_thread_info()->cpu)
extern cpumask_t cpu_present_mask; extern cpumask_t cpu_present_mask;
#define cpu_possible_map cpu_present_mask #define cpu_possible_map cpu_present_mask
......
...@@ -51,7 +51,7 @@ extern u8 x86_cpu_to_apicid[]; ...@@ -51,7 +51,7 @@ extern u8 x86_cpu_to_apicid[];
* from the initial startup. We map APIC_BASE very early in page_setup(), * from the initial startup. We map APIC_BASE very early in page_setup(),
* so this is correct in the x86 case. * so this is correct in the x86 case.
*/ */
#define __smp_processor_id() (current_thread_info()->cpu) #define raw_smp_processor_id() (current_thread_info()->cpu)
extern cpumask_t cpu_callout_map; extern cpumask_t cpu_callout_map;
extern cpumask_t cpu_callin_map; extern cpumask_t cpu_callin_map;
......
...@@ -46,7 +46,7 @@ ia64_get_lid (void) ...@@ -46,7 +46,7 @@ ia64_get_lid (void)
#define SMP_IRQ_REDIRECTION (1 << 0) #define SMP_IRQ_REDIRECTION (1 << 0)
#define SMP_IPI_REDIRECTION (1 << 1) #define SMP_IPI_REDIRECTION (1 << 1)
#define smp_processor_id() (current_thread_info()->cpu) #define raw_smp_processor_id() (current_thread_info()->cpu)
extern struct smp_boot_data { extern struct smp_boot_data {
int cpu_count; int cpu_count;
......
...@@ -66,7 +66,7 @@ extern volatile int cpu_2_physid[NR_CPUS]; ...@@ -66,7 +66,7 @@ extern volatile int cpu_2_physid[NR_CPUS];
#define physid_to_cpu(physid) physid_2_cpu[physid] #define physid_to_cpu(physid) physid_2_cpu[physid]
#define cpu_to_physid(cpu_id) cpu_2_physid[cpu_id] #define cpu_to_physid(cpu_id) cpu_2_physid[cpu_id]
#define smp_processor_id() (current_thread_info()->cpu) #define raw_smp_processor_id() (current_thread_info()->cpu)
extern cpumask_t cpu_callout_map; extern cpumask_t cpu_callout_map;
#define cpu_possible_map cpu_callout_map #define cpu_possible_map cpu_callout_map
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#define smp_processor_id() (current_thread_info()->cpu) #define raw_smp_processor_id() (current_thread_info()->cpu)
/* Map from cpu id to sequential logical cpu number. This will only /* Map from cpu id to sequential logical cpu number. This will only
not be idempotent when cpus failed to come on-line. */ not be idempotent when cpus failed to come on-line. */
......
...@@ -51,7 +51,7 @@ extern void smp_send_reschedule(int cpu); ...@@ -51,7 +51,7 @@ extern void smp_send_reschedule(int cpu);
extern unsigned long cpu_present_mask; extern unsigned long cpu_present_mask;
#define smp_processor_id() (current_thread_info()->cpu) #define raw_smp_processor_id() (current_thread_info()->cpu)
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
......
...@@ -44,7 +44,7 @@ extern void smp_message_recv(int, struct pt_regs *); ...@@ -44,7 +44,7 @@ extern void smp_message_recv(int, struct pt_regs *);
#define NO_PROC_ID 0xFF /* No processor magic marker */ #define NO_PROC_ID 0xFF /* No processor magic marker */
#define PROC_CHANGE_PENALTY 20 #define PROC_CHANGE_PENALTY 20
#define smp_processor_id() (current_thread_info()->cpu) #define raw_smp_processor_id() (current_thread_info()->cpu)
extern int __cpu_up(unsigned int cpu); extern int __cpu_up(unsigned int cpu);
......
...@@ -45,7 +45,7 @@ void generic_cpu_die(unsigned int cpu); ...@@ -45,7 +45,7 @@ void generic_cpu_die(unsigned int cpu);
void generic_mach_cpu_die(void); void generic_mach_cpu_die(void);
#endif #endif
#define __smp_processor_id() (get_paca()->paca_index) #define raw_smp_processor_id() (get_paca()->paca_index)
#define hard_smp_processor_id() (get_paca()->hw_cpu_id) #define hard_smp_processor_id() (get_paca()->hw_cpu_id)
extern cpumask_t cpu_sibling_map[NR_CPUS]; extern cpumask_t cpu_sibling_map[NR_CPUS];
......
...@@ -47,7 +47,7 @@ extern int smp_call_function_on(void (*func) (void *info), void *info, ...@@ -47,7 +47,7 @@ extern int smp_call_function_on(void (*func) (void *info), void *info,
#define PROC_CHANGE_PENALTY 20 /* Schedule penalty */ #define PROC_CHANGE_PENALTY 20 /* Schedule penalty */
#define smp_processor_id() (S390_lowcore.cpu_data.cpu_nr) #define raw_smp_processor_id() (S390_lowcore.cpu_data.cpu_nr)
extern int smp_get_cpu(cpumask_t cpu_map); extern int smp_get_cpu(cpumask_t cpu_map);
extern void smp_put_cpu(int cpu); extern void smp_put_cpu(int cpu);
......
...@@ -25,7 +25,7 @@ extern cpumask_t cpu_possible_map; ...@@ -25,7 +25,7 @@ extern cpumask_t cpu_possible_map;
#define cpu_online(cpu) cpu_isset(cpu, cpu_online_map) #define cpu_online(cpu) cpu_isset(cpu, cpu_online_map)
#define smp_processor_id() (current_thread_info()->cpu) #define raw_smp_processor_id() (current_thread_info()->cpu)
/* I've no idea what the real meaning of this is */ /* I've no idea what the real meaning of this is */
#define PROC_CHANGE_PENALTY 20 #define PROC_CHANGE_PENALTY 20
......
...@@ -148,7 +148,7 @@ extern __inline__ int hard_smp_processor_id(void) ...@@ -148,7 +148,7 @@ extern __inline__ int hard_smp_processor_id(void)
} }
#endif #endif
#define smp_processor_id() (current_thread_info()->cpu) #define raw_smp_processor_id() (current_thread_info()->cpu)
#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier #define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
#define prof_counter(__cpu) cpu_data(__cpu).counter #define prof_counter(__cpu) cpu_data(__cpu).counter
......
...@@ -64,7 +64,7 @@ static __inline__ int hard_smp_processor_id(void) ...@@ -64,7 +64,7 @@ static __inline__ int hard_smp_processor_id(void)
} }
} }
#define smp_processor_id() (current_thread_info()->cpu) #define raw_smp_processor_id() (current_thread_info()->cpu)
#endif /* !(__ASSEMBLY__) */ #endif /* !(__ASSEMBLY__) */
......
...@@ -8,7 +8,8 @@ ...@@ -8,7 +8,8 @@
#include "asm/current.h" #include "asm/current.h"
#include "linux/cpumask.h" #include "linux/cpumask.h"
#define smp_processor_id() (current_thread->cpu) #define raw_smp_processor_id() (current_thread->cpu)
#define cpu_logical_map(n) (n) #define cpu_logical_map(n) (n)
#define cpu_number_map(n) (n) #define cpu_number_map(n) (n)
#define PROC_CHANGE_PENALTY 15 /* Pick a number, any number */ #define PROC_CHANGE_PENALTY 15 /* Pick a number, any number */
......
...@@ -68,7 +68,7 @@ static inline int num_booting_cpus(void) ...@@ -68,7 +68,7 @@ static inline int num_booting_cpus(void)
return cpus_weight(cpu_callout_map); return cpus_weight(cpu_callout_map);
} }
#define __smp_processor_id() read_pda(cpunumber) #define raw_smp_processor_id() read_pda(cpunumber)
extern __inline int hard_smp_processor_id(void) extern __inline int hard_smp_processor_id(void)
{ {
......
...@@ -381,7 +381,7 @@ int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *, ...@@ -381,7 +381,7 @@ int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *,
#include <linux/topology.h> #include <linux/topology.h>
/* Returns the number of the current Node. */ /* Returns the number of the current Node. */
#define numa_node_id() (cpu_to_node(_smp_processor_id())) #define numa_node_id() (cpu_to_node(raw_smp_processor_id()))
#ifndef CONFIG_DISCONTIGMEM #ifndef CONFIG_DISCONTIGMEM
......
...@@ -92,10 +92,7 @@ void smp_prepare_boot_cpu(void); ...@@ -92,10 +92,7 @@ void smp_prepare_boot_cpu(void);
/* /*
* These macros fold the SMP functionality into a single CPU system * These macros fold the SMP functionality into a single CPU system
*/ */
#define raw_smp_processor_id() 0
#if !defined(__smp_processor_id) || !defined(CONFIG_PREEMPT)
# define smp_processor_id() 0
#endif
#define hard_smp_processor_id() 0 #define hard_smp_processor_id() 0
#define smp_call_function(func,info,retry,wait) ({ 0; }) #define smp_call_function(func,info,retry,wait) ({ 0; })
#define on_each_cpu(func,info,retry,wait) ({ func(info); 0; }) #define on_each_cpu(func,info,retry,wait) ({ func(info); 0; })
...@@ -106,30 +103,25 @@ static inline void smp_send_reschedule(int cpu) { } ...@@ -106,30 +103,25 @@ static inline void smp_send_reschedule(int cpu) { }
#endif /* !SMP */ #endif /* !SMP */
/* /*
* DEBUG_PREEMPT support: check whether smp_processor_id() is being * smp_processor_id(): get the current CPU ID.
* used in a preemption-safe way.
* *
* An architecture has to enable this debugging code explicitly. * if DEBUG_PREEMPT is enabled the we check whether it is
* It can do so by renaming the smp_processor_id() macro to * used in a preemption-safe way. (smp_processor_id() is safe
* __smp_processor_id(). This should only be done after some minimal * if it's used in a preemption-off critical section, or in
* testing, because usually there are a number of false positives * a thread that is bound to the current CPU.)
* that an architecture will trigger.
* *
* To fix a false positive (i.e. smp_processor_id() use that the * NOTE: raw_smp_processor_id() is for internal use only
* debugging code reports but which use for some reason is legal), * (smp_processor_id() is the preferred variant), but in rare
* change the smp_processor_id() reference to _smp_processor_id(), * instances it might also be used to turn off false positives
* which is the nondebug variant. NOTE: don't use this to hack around * (i.e. smp_processor_id() use that the debugging code reports but
* real bugs. * which use for some reason is legal). Don't use this to hack around
* the warning message, as your code might not work under PREEMPT.
*/ */
#ifdef __smp_processor_id #ifdef CONFIG_DEBUG_PREEMPT
# if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT) extern unsigned int debug_smp_processor_id(void);
extern unsigned int smp_processor_id(void); # define smp_processor_id() debug_smp_processor_id()
# else
# define smp_processor_id() __smp_processor_id()
# endif
# define _smp_processor_id() __smp_processor_id()
#else #else
# define _smp_processor_id() smp_processor_id() # define smp_processor_id() raw_smp_processor_id()
#endif #endif
#define get_cpu() ({ preempt_disable(); smp_processor_id(); }) #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
......
...@@ -107,7 +107,7 @@ struct rt_cache_stat ...@@ -107,7 +107,7 @@ struct rt_cache_stat
extern struct rt_cache_stat *rt_cache_stat; extern struct rt_cache_stat *rt_cache_stat;
#define RT_CACHE_STAT_INC(field) \ #define RT_CACHE_STAT_INC(field) \
(per_cpu_ptr(rt_cache_stat, _smp_processor_id())->field++) (per_cpu_ptr(rt_cache_stat, raw_smp_processor_id())->field++)
extern struct ip_rt_acct *ip_rt_acct; extern struct ip_rt_acct *ip_rt_acct;
......
...@@ -128,18 +128,18 @@ struct linux_mib { ...@@ -128,18 +128,18 @@ struct linux_mib {
#define SNMP_STAT_USRPTR(name) (name[1]) #define SNMP_STAT_USRPTR(name) (name[1])
#define SNMP_INC_STATS_BH(mib, field) \ #define SNMP_INC_STATS_BH(mib, field) \
(per_cpu_ptr(mib[0], _smp_processor_id())->mibs[field]++) (per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field]++)
#define SNMP_INC_STATS_OFFSET_BH(mib, field, offset) \ #define SNMP_INC_STATS_OFFSET_BH(mib, field, offset) \
(per_cpu_ptr(mib[0], _smp_processor_id())->mibs[field + (offset)]++) (per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field + (offset)]++)
#define SNMP_INC_STATS_USER(mib, field) \ #define SNMP_INC_STATS_USER(mib, field) \
(per_cpu_ptr(mib[1], _smp_processor_id())->mibs[field]++) (per_cpu_ptr(mib[1], raw_smp_processor_id())->mibs[field]++)
#define SNMP_INC_STATS(mib, field) \ #define SNMP_INC_STATS(mib, field) \
(per_cpu_ptr(mib[!in_softirq()], _smp_processor_id())->mibs[field]++) (per_cpu_ptr(mib[!in_softirq()], raw_smp_processor_id())->mibs[field]++)
#define SNMP_DEC_STATS(mib, field) \ #define SNMP_DEC_STATS(mib, field) \
(per_cpu_ptr(mib[!in_softirq()], _smp_processor_id())->mibs[field]--) (per_cpu_ptr(mib[!in_softirq()], raw_smp_processor_id())->mibs[field]--)
#define SNMP_ADD_STATS_BH(mib, field, addend) \ #define SNMP_ADD_STATS_BH(mib, field, addend) \
(per_cpu_ptr(mib[0], _smp_processor_id())->mibs[field] += addend) (per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field] += addend)
#define SNMP_ADD_STATS_USER(mib, field, addend) \ #define SNMP_ADD_STATS_USER(mib, field, addend) \
(per_cpu_ptr(mib[1], _smp_processor_id())->mibs[field] += addend) (per_cpu_ptr(mib[1], raw_smp_processor_id())->mibs[field] += addend)
#endif #endif
...@@ -379,7 +379,7 @@ static void module_unload_init(struct module *mod) ...@@ -379,7 +379,7 @@ static void module_unload_init(struct module *mod)
for (i = 0; i < NR_CPUS; i++) for (i = 0; i < NR_CPUS; i++)
local_set(&mod->ref[i].count, 0); local_set(&mod->ref[i].count, 0);
/* Hold reference count during initialization. */ /* Hold reference count during initialization. */
local_set(&mod->ref[_smp_processor_id()].count, 1); local_set(&mod->ref[raw_smp_processor_id()].count, 1);
/* Backwards compatibility macros put refcount during init. */ /* Backwards compatibility macros put refcount during init. */
mod->waiter = current; mod->waiter = current;
} }
......
...@@ -48,11 +48,11 @@ void disable_nonboot_cpus(void) ...@@ -48,11 +48,11 @@ void disable_nonboot_cpus(void)
{ {
oldmask = current->cpus_allowed; oldmask = current->cpus_allowed;
set_cpus_allowed(current, cpumask_of_cpu(0)); set_cpus_allowed(current, cpumask_of_cpu(0));
printk("Freezing CPUs (at %d)", _smp_processor_id()); printk("Freezing CPUs (at %d)", raw_smp_processor_id());
current->state = TASK_INTERRUPTIBLE; current->state = TASK_INTERRUPTIBLE;
schedule_timeout(HZ); schedule_timeout(HZ);
printk("..."); printk("...");
BUG_ON(_smp_processor_id() != 0); BUG_ON(raw_smp_processor_id() != 0);
/* FIXME: for this to work, all the CPUs must be running /* FIXME: for this to work, all the CPUs must be running
* "idle" thread (or we deadlock). Is that guaranteed? */ * "idle" thread (or we deadlock). Is that guaranteed? */
......
...@@ -3814,7 +3814,7 @@ EXPORT_SYMBOL(yield); ...@@ -3814,7 +3814,7 @@ EXPORT_SYMBOL(yield);
*/ */
void __sched io_schedule(void) void __sched io_schedule(void)
{ {
struct runqueue *rq = &per_cpu(runqueues, _smp_processor_id()); struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id());
atomic_inc(&rq->nr_iowait); atomic_inc(&rq->nr_iowait);
schedule(); schedule();
...@@ -3825,7 +3825,7 @@ EXPORT_SYMBOL(io_schedule); ...@@ -3825,7 +3825,7 @@ EXPORT_SYMBOL(io_schedule);
long __sched io_schedule_timeout(long timeout) long __sched io_schedule_timeout(long timeout)
{ {
struct runqueue *rq = &per_cpu(runqueues, _smp_processor_id()); struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id());
long ret; long ret;
atomic_inc(&rq->nr_iowait); atomic_inc(&rq->nr_iowait);
......
...@@ -100,7 +100,7 @@ static int stop_machine(void) ...@@ -100,7 +100,7 @@ static int stop_machine(void)
stopmachine_state = STOPMACHINE_WAIT; stopmachine_state = STOPMACHINE_WAIT;
for_each_online_cpu(i) { for_each_online_cpu(i) {
if (i == _smp_processor_id()) if (i == raw_smp_processor_id())
continue; continue;
ret = kernel_thread(stopmachine, (void *)(long)i,CLONE_KERNEL); ret = kernel_thread(stopmachine, (void *)(long)i,CLONE_KERNEL);
if (ret < 0) if (ret < 0)
...@@ -182,7 +182,7 @@ struct task_struct *__stop_machine_run(int (*fn)(void *), void *data, ...@@ -182,7 +182,7 @@ struct task_struct *__stop_machine_run(int (*fn)(void *), void *data,
/* If they don't care which CPU fn runs on, bind to any online one. */ /* If they don't care which CPU fn runs on, bind to any online one. */
if (cpu == NR_CPUS) if (cpu == NR_CPUS)
cpu = _smp_processor_id(); cpu = raw_smp_processor_id();
p = kthread_create(do_stop, &smdata, "kstopmachine"); p = kthread_create(do_stop, &smdata, "kstopmachine");
if (!IS_ERR(p)) { if (!IS_ERR(p)) {
......
...@@ -20,6 +20,7 @@ lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o ...@@ -20,6 +20,7 @@ lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
ifneq ($(CONFIG_HAVE_DEC_LOCK),y) ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
lib-y += dec_and_lock.o lib-y += dec_and_lock.o
......
...@@ -9,61 +9,6 @@ ...@@ -9,61 +9,6 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/kallsyms.h> #include <linux/kallsyms.h>
#if defined(CONFIG_PREEMPT) && defined(__smp_processor_id) && \
defined(CONFIG_DEBUG_PREEMPT)
/*
* Debugging check.
*/
unsigned int smp_processor_id(void)
{
unsigned long preempt_count = preempt_count();
int this_cpu = __smp_processor_id();
cpumask_t this_mask;
if (likely(preempt_count))
goto out;
if (irqs_disabled())
goto out;
/*
* Kernel threads bound to a single CPU can safely use
* smp_processor_id():
*/
this_mask = cpumask_of_cpu(this_cpu);
if (cpus_equal(current->cpus_allowed, this_mask))
goto out;
/*
* It is valid to assume CPU-locality during early bootup:
*/
if (system_state != SYSTEM_RUNNING)
goto out;
/*
* Avoid recursion:
*/
preempt_disable();
if (!printk_ratelimit())
goto out_enable;
printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] code: %s/%d\n", preempt_count(), current->comm, current->pid);
print_symbol("caller is %s\n", (long)__builtin_return_address(0));
dump_stack();
out_enable:
preempt_enable_no_resched();
out:
return this_cpu;
}
EXPORT_SYMBOL(smp_processor_id);
#endif /* PREEMPT && __smp_processor_id && DEBUG_PREEMPT */
#ifdef CONFIG_PREEMPT_BKL #ifdef CONFIG_PREEMPT_BKL
/* /*
* The 'big kernel semaphore' * The 'big kernel semaphore'
......
/*
* lib/smp_processor_id.c
*
* DEBUG_PREEMPT variant of smp_processor_id().
*/
#include <linux/module.h>
#include <linux/kallsyms.h>
unsigned int debug_smp_processor_id(void)
{
unsigned long preempt_count = preempt_count();
int this_cpu = raw_smp_processor_id();
cpumask_t this_mask;
if (likely(preempt_count))
goto out;
if (irqs_disabled())
goto out;
/*
* Kernel threads bound to a single CPU can safely use
* smp_processor_id():
*/
this_mask = cpumask_of_cpu(this_cpu);
if (cpus_equal(current->cpus_allowed, this_mask))
goto out;
/*
* It is valid to assume CPU-locality during early bootup:
*/
if (system_state != SYSTEM_RUNNING)
goto out;
/*
* Avoid recursion:
*/
preempt_disable();
if (!printk_ratelimit())
goto out_enable;
printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] code: %s/%d\n", preempt_count(), current->comm, current->pid);
print_symbol("caller is %s\n", (long)__builtin_return_address(0));
dump_stack();
out_enable:
preempt_enable_no_resched();
out:
return this_cpu;
}
EXPORT_SYMBOL(debug_smp_processor_id);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment