Commit 83cccd0e authored by Christoph Hellwig's avatar Christoph Hellwig

Cleanup BKL handling and move kernel_flag definition to common code

parent d17e9bb6
...@@ -40,7 +40,6 @@ ...@@ -40,7 +40,6 @@
extern struct hwrpb_struct *hwrpb; extern struct hwrpb_struct *hwrpb;
extern void dump_thread(struct pt_regs *, struct user *); extern void dump_thread(struct pt_regs *, struct user *);
extern int dump_fpu(struct pt_regs *, elf_fpregset_t *); extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
extern spinlock_t kernel_flag;
extern spinlock_t rtc_lock; extern spinlock_t rtc_lock;
/* these are C runtime functions with special calling conventions: */ /* these are C runtime functions with special calling conventions: */
...@@ -207,7 +206,6 @@ EXPORT_SYMBOL(up); ...@@ -207,7 +206,6 @@ EXPORT_SYMBOL(up);
*/ */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
EXPORT_SYMBOL(kernel_flag);
EXPORT_SYMBOL(synchronize_irq); EXPORT_SYMBOL(synchronize_irq);
EXPORT_SYMBOL(flush_tlb_all); EXPORT_SYMBOL(flush_tlb_all);
EXPORT_SYMBOL(flush_tlb_mm); EXPORT_SYMBOL(flush_tlb_mm);
......
...@@ -67,8 +67,6 @@ enum ipi_message_type { ...@@ -67,8 +67,6 @@ enum ipi_message_type {
IPI_CPU_STOP, IPI_CPU_STOP,
}; };
spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
/* Set to a secondary's cpuid when it comes online. */ /* Set to a secondary's cpuid when it comes online. */
static int smp_secondary_alive __initdata = 0; static int smp_secondary_alive __initdata = 0;
......
...@@ -273,7 +273,3 @@ EXPORT_SYMBOL_NOVERS(__down_trylock_failed); ...@@ -273,7 +273,3 @@ EXPORT_SYMBOL_NOVERS(__down_trylock_failed);
EXPORT_SYMBOL_NOVERS(__up_wakeup); EXPORT_SYMBOL_NOVERS(__up_wakeup);
EXPORT_SYMBOL(get_wchan); EXPORT_SYMBOL(get_wchan);
#ifdef CONFIG_PREEMPT
EXPORT_SYMBOL(kernel_flag);
#endif
...@@ -36,10 +36,6 @@ ...@@ -36,10 +36,6 @@
#define MEM_SIZE (16*1024*1024) #define MEM_SIZE (16*1024*1024)
#endif #endif
#ifdef CONFIG_PREEMPT
spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
#endif
#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE) #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
char fpe_type[8]; char fpe_type[8];
......
...@@ -126,7 +126,6 @@ EXPORT_SYMBOL(mmx_copy_page); ...@@ -126,7 +126,6 @@ EXPORT_SYMBOL(mmx_copy_page);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
EXPORT_SYMBOL(cpu_data); EXPORT_SYMBOL(cpu_data);
EXPORT_SYMBOL(kernel_flag);
EXPORT_SYMBOL(cpu_online_map); EXPORT_SYMBOL(cpu_online_map);
EXPORT_SYMBOL_NOVERS(__write_lock_failed); EXPORT_SYMBOL_NOVERS(__write_lock_failed);
EXPORT_SYMBOL_NOVERS(__read_lock_failed); EXPORT_SYMBOL_NOVERS(__read_lock_failed);
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/kernel_stat.h> #include <linux/kernel_stat.h>
#include <linux/mc146818rtc.h> #include <linux/mc146818rtc.h>
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/interrupt.h>
#include <asm/mtrr.h> #include <asm/mtrr.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
...@@ -103,9 +104,6 @@ ...@@ -103,9 +104,6 @@
* about nothing of note with C stepping upwards. * about nothing of note with C stepping upwards.
*/ */
/* The 'big kernel lock' */
spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
struct tlb_state cpu_tlbstate[NR_CPUS] __cacheline_aligned = {[0 ... NR_CPUS-1] = { &init_mm, 0, }}; struct tlb_state cpu_tlbstate[NR_CPUS] __cacheline_aligned = {[0 ... NR_CPUS-1] = { &init_mm, 0, }};
/* /*
......
...@@ -84,10 +84,6 @@ EXPORT_SYMBOL(smp_call_function); ...@@ -84,10 +84,6 @@ EXPORT_SYMBOL(smp_call_function);
EXPORT_SYMBOL(smp_call_function_single); EXPORT_SYMBOL(smp_call_function_single);
EXPORT_SYMBOL(cpu_online_map); EXPORT_SYMBOL(cpu_online_map);
EXPORT_SYMBOL(ia64_cpu_to_sapicid); EXPORT_SYMBOL(ia64_cpu_to_sapicid);
#include <asm/smplock.h>
EXPORT_SYMBOL(kernel_flag);
#else /* !CONFIG_SMP */ #else /* !CONFIG_SMP */
EXPORT_SYMBOL(__flush_tlb_all); EXPORT_SYMBOL(__flush_tlb_all);
......
...@@ -52,14 +52,6 @@ ...@@ -52,14 +52,6 @@
#include <asm/unistd.h> #include <asm/unistd.h>
#include <asm/mca.h> #include <asm/mca.h>
/*
* The Big Kernel Lock. It's not supposed to be used for performance critical stuff
* anymore. But we still need to align it because certain workloads are still affected by
* it. For example, llseek() and various other filesystem related routines still use the
* BKL.
*/
spinlock_t kernel_flag __cacheline_aligned = SPIN_LOCK_UNLOCKED;
/* /*
* Structure and data for smp_call_function(). This is designed to minimise static memory * Structure and data for smp_call_function(). This is designed to minimise static memory
* requirements. It also looks cleaner. * requirements. It also looks cleaner.
......
...@@ -53,7 +53,6 @@ ...@@ -53,7 +53,6 @@
/* Ze Big Kernel Lock! */ /* Ze Big Kernel Lock! */
spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
int smp_threads_ready; /* Not used */ int smp_threads_ready; /* Not used */
int smp_num_cpus; int smp_num_cpus;
int global_irq_holder = NO_PROC_ID; int global_irq_holder = NO_PROC_ID;
......
...@@ -53,7 +53,6 @@ static void sendintr(int destid, unsigned char status) ...@@ -53,7 +53,6 @@ static void sendintr(int destid, unsigned char status)
#endif /* CONFIG_SGI_IP27 */ #endif /* CONFIG_SGI_IP27 */
/* The 'big kernel lock' */ /* The 'big kernel lock' */
spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
int smp_threads_ready; /* Not used */ int smp_threads_ready; /* Not used */
atomic_t smp_commenced = ATOMIC_INIT(0); atomic_t smp_commenced = ATOMIC_INIT(0);
struct cpuinfo_mips cpu_data[NR_CPUS]; struct cpuinfo_mips cpu_data[NR_CPUS];
......
...@@ -35,9 +35,6 @@ EXPORT_SYMBOL(boot_cpu_data); ...@@ -35,9 +35,6 @@ EXPORT_SYMBOL(boot_cpu_data);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
EXPORT_SYMBOL(synchronize_irq); EXPORT_SYMBOL(synchronize_irq);
#include <asm/smplock.h>
EXPORT_SYMBOL(kernel_flag);
#include <asm/system.h> #include <asm/system.h>
EXPORT_SYMBOL(__global_sti); EXPORT_SYMBOL(__global_sti);
EXPORT_SYMBOL(__global_cli); EXPORT_SYMBOL(__global_cli);
......
...@@ -93,9 +93,6 @@ EXPORT_SYMBOL(enable_irq); ...@@ -93,9 +93,6 @@ EXPORT_SYMBOL(enable_irq);
EXPORT_SYMBOL(disable_irq); EXPORT_SYMBOL(disable_irq);
EXPORT_SYMBOL(disable_irq_nosync); EXPORT_SYMBOL(disable_irq_nosync);
EXPORT_SYMBOL(probe_irq_mask); EXPORT_SYMBOL(probe_irq_mask);
#ifdef CONFIG_SMP
EXPORT_SYMBOL(kernel_flag);
#endif /* CONFIG_SMP */
EXPORT_SYMBOL(ISA_DMA_THRESHOLD); EXPORT_SYMBOL(ISA_DMA_THRESHOLD);
EXPORT_SYMBOL_NOVERS(DMA_MODE_READ); EXPORT_SYMBOL_NOVERS(DMA_MODE_READ);
......
...@@ -47,7 +47,6 @@ struct cpuinfo_PPC cpu_data[NR_CPUS]; ...@@ -47,7 +47,6 @@ struct cpuinfo_PPC cpu_data[NR_CPUS];
struct klock_info_struct klock_info = { KLOCK_CLEAR, 0 }; struct klock_info_struct klock_info = { KLOCK_CLEAR, 0 };
atomic_t ipi_recv; atomic_t ipi_recv;
atomic_t ipi_sent; atomic_t ipi_sent;
spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
unsigned int prof_multiplier[NR_CPUS] = { [1 ... NR_CPUS-1] = 1 }; unsigned int prof_multiplier[NR_CPUS] = { [1 ... NR_CPUS-1] = 1 };
unsigned int prof_counter[NR_CPUS] = { [1 ... NR_CPUS-1] = 1 }; unsigned int prof_counter[NR_CPUS] = { [1 ... NR_CPUS-1] = 1 };
unsigned long cache_decay_ticks = HZ/100; unsigned long cache_decay_ticks = HZ/100;
......
...@@ -74,7 +74,6 @@ EXPORT_SYMBOL(disable_irq); ...@@ -74,7 +74,6 @@ EXPORT_SYMBOL(disable_irq);
EXPORT_SYMBOL(disable_irq_nosync); EXPORT_SYMBOL(disable_irq_nosync);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
EXPORT_SYMBOL(synchronize_irq); EXPORT_SYMBOL(synchronize_irq);
EXPORT_SYMBOL(kernel_flag);
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
EXPORT_SYMBOL(register_ioctl32_conversion); EXPORT_SYMBOL(register_ioctl32_conversion);
......
...@@ -51,7 +51,6 @@ ...@@ -51,7 +51,6 @@
#include <asm/machdep.h> #include <asm/machdep.h>
int smp_threads_ready = 0; int smp_threads_ready = 0;
spinlock_t kernel_flag __cacheline_aligned = SPIN_LOCK_UNLOCKED;
unsigned long cache_decay_ticks; unsigned long cache_decay_ticks;
/* initialised so it doesnt end up in bss */ /* initialised so it doesnt end up in bss */
......
...@@ -54,8 +54,6 @@ cycles_t cacheflush_time=0; ...@@ -54,8 +54,6 @@ cycles_t cacheflush_time=0;
int smp_threads_ready=0; /* Set when the idlers are all forked. */ int smp_threads_ready=0; /* Set when the idlers are all forked. */
static atomic_t smp_commenced = ATOMIC_INIT(0); static atomic_t smp_commenced = ATOMIC_INIT(0);
spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
volatile unsigned long phys_cpu_present_map; volatile unsigned long phys_cpu_present_map;
volatile unsigned long cpu_online_map; volatile unsigned long cpu_online_map;
unsigned long cache_decay_ticks = 0; unsigned long cache_decay_ticks = 0;
...@@ -634,7 +632,6 @@ int setup_profiling_timer(unsigned int multiplier) ...@@ -634,7 +632,6 @@ int setup_profiling_timer(unsigned int multiplier)
} }
EXPORT_SYMBOL(lowcore_ptr); EXPORT_SYMBOL(lowcore_ptr);
EXPORT_SYMBOL(kernel_flag);
EXPORT_SYMBOL(smp_ctl_set_bit); EXPORT_SYMBOL(smp_ctl_set_bit);
EXPORT_SYMBOL(smp_ctl_clear_bit); EXPORT_SYMBOL(smp_ctl_clear_bit);
EXPORT_SYMBOL(smp_num_cpus); EXPORT_SYMBOL(smp_num_cpus);
......
...@@ -53,8 +53,6 @@ cycles_t cacheflush_time=0; ...@@ -53,8 +53,6 @@ cycles_t cacheflush_time=0;
int smp_threads_ready=0; /* Set when the idlers are all forked. */ int smp_threads_ready=0; /* Set when the idlers are all forked. */
static atomic_t smp_commenced = ATOMIC_INIT(0); static atomic_t smp_commenced = ATOMIC_INIT(0);
spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
volatile unsigned long phys_cpu_present_map; volatile unsigned long phys_cpu_present_map;
volatile unsigned long cpu_online_map; volatile unsigned long cpu_online_map;
unsigned long cache_decay_ticks = 0; unsigned long cache_decay_ticks = 0;
...@@ -613,7 +611,6 @@ int setup_profiling_timer(unsigned int multiplier) ...@@ -613,7 +611,6 @@ int setup_profiling_timer(unsigned int multiplier)
} }
EXPORT_SYMBOL(lowcore_ptr); EXPORT_SYMBOL(lowcore_ptr);
EXPORT_SYMBOL(kernel_flag);
EXPORT_SYMBOL(smp_ctl_set_bit); EXPORT_SYMBOL(smp_ctl_set_bit);
EXPORT_SYMBOL(smp_ctl_clear_bit); EXPORT_SYMBOL(smp_ctl_clear_bit);
EXPORT_SYMBOL(smp_num_cpus); EXPORT_SYMBOL(smp_num_cpus);
......
...@@ -66,9 +66,6 @@ cycles_t cacheflush_time = 0; /* XXX */ ...@@ -66,9 +66,6 @@ cycles_t cacheflush_time = 0; /* XXX */
* instruction which is much better... * instruction which is much better...
*/ */
/* Kernel spinlock */
spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
/* Used to make bitops atomic */ /* Used to make bitops atomic */
unsigned char bitops_spinlock = 0; unsigned char bitops_spinlock = 0;
......
...@@ -77,10 +77,6 @@ extern int __divdi3(int, int); ...@@ -77,10 +77,6 @@ extern int __divdi3(int, int);
extern void dump_thread(struct pt_regs *, struct user *); extern void dump_thread(struct pt_regs *, struct user *);
#ifdef CONFIG_SMP
extern spinlock_t kernel_flag;
#endif
/* One thing to note is that the way the symbols of the mul/div /* One thing to note is that the way the symbols of the mul/div
* support routines are named is a mess, they all start with * support routines are named is a mess, they all start with
* a '.' which makes it a bitch to export, here is the trick: * a '.' which makes it a bitch to export, here is the trick:
...@@ -130,9 +126,6 @@ EXPORT_SYMBOL_PRIVATE(_clear_bit); ...@@ -130,9 +126,6 @@ EXPORT_SYMBOL_PRIVATE(_clear_bit);
EXPORT_SYMBOL_PRIVATE(_change_bit); EXPORT_SYMBOL_PRIVATE(_change_bit);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* Kernel wide locking */
EXPORT_SYMBOL(kernel_flag);
/* IRQ implementation. */ /* IRQ implementation. */
EXPORT_SYMBOL(global_irq_holder); EXPORT_SYMBOL(global_irq_holder);
EXPORT_SYMBOL(synchronize_irq); EXPORT_SYMBOL(synchronize_irq);
......
...@@ -46,9 +46,6 @@ cpuinfo_sparc cpu_data[NR_CPUS]; ...@@ -46,9 +46,6 @@ cpuinfo_sparc cpu_data[NR_CPUS];
/* Please don't make this stuff initdata!!! --DaveM */ /* Please don't make this stuff initdata!!! --DaveM */
static unsigned char boot_cpu_id; static unsigned char boot_cpu_id;
/* Kernel spinlock */
spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
atomic_t sparc64_num_cpus_online = ATOMIC_INIT(0); atomic_t sparc64_num_cpus_online = ATOMIC_INIT(0);
unsigned long cpu_online_map = 0; unsigned long cpu_online_map = 0;
atomic_t sparc64_num_cpus_possible = ATOMIC_INIT(0); atomic_t sparc64_num_cpus_possible = ATOMIC_INIT(0);
......
...@@ -101,9 +101,7 @@ extern int __ashrdi3(int, int); ...@@ -101,9 +101,7 @@ extern int __ashrdi3(int, int);
extern void dump_thread(struct pt_regs *, struct user *); extern void dump_thread(struct pt_regs *, struct user *);
extern int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs); extern int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs);
#ifdef CONFIG_SMP #if defined(CONFIG_SMP) && defined(CONFIG_DEBUG_SPINLOCK)
extern spinlock_t kernel_flag;
#ifdef CONFIG_DEBUG_SPINLOCK
extern void _do_spin_lock (spinlock_t *lock, char *str); extern void _do_spin_lock (spinlock_t *lock, char *str);
extern void _do_spin_unlock (spinlock_t *lock); extern void _do_spin_unlock (spinlock_t *lock);
extern int _spin_trylock (spinlock_t *lock); extern int _spin_trylock (spinlock_t *lock);
...@@ -112,7 +110,6 @@ extern void _do_read_unlock(rwlock_t *rw, char *str); ...@@ -112,7 +110,6 @@ extern void _do_read_unlock(rwlock_t *rw, char *str);
extern void _do_write_lock(rwlock_t *rw, char *str); extern void _do_write_lock(rwlock_t *rw, char *str);
extern void _do_write_unlock(rwlock_t *rw); extern void _do_write_unlock(rwlock_t *rw);
#endif #endif
#endif
extern unsigned long phys_base; extern unsigned long phys_base;
extern unsigned long pfn_base; extern unsigned long pfn_base;
...@@ -127,9 +124,6 @@ EXPORT_SYMBOL(__write_lock); ...@@ -127,9 +124,6 @@ EXPORT_SYMBOL(__write_lock);
EXPORT_SYMBOL(__write_unlock); EXPORT_SYMBOL(__write_unlock);
#endif #endif
/* Kernel wide locking */
EXPORT_SYMBOL(kernel_flag);
/* Hard IRQ locking */ /* Hard IRQ locking */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
EXPORT_SYMBOL(synchronize_irq); EXPORT_SYMBOL(synchronize_irq);
......
...@@ -22,9 +22,6 @@ ...@@ -22,9 +22,6 @@
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
/* The 'big kernel lock' */
spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
/* /*
* the following functions deal with sending IPIs between CPUs. * the following functions deal with sending IPIs between CPUs.
* *
......
...@@ -109,7 +109,6 @@ EXPORT_SYMBOL(mmx_copy_page); ...@@ -109,7 +109,6 @@ EXPORT_SYMBOL(mmx_copy_page);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
EXPORT_SYMBOL(cpu_data); EXPORT_SYMBOL(cpu_data);
EXPORT_SYMBOL(kernel_flag);
EXPORT_SYMBOL(smp_num_cpus); EXPORT_SYMBOL(smp_num_cpus);
EXPORT_SYMBOL(cpu_online_map); EXPORT_SYMBOL(cpu_online_map);
EXPORT_SYMBOL_NOVERS(__write_lock_failed); EXPORT_SYMBOL_NOVERS(__write_lock_failed);
......
...@@ -2,21 +2,10 @@ ...@@ -2,21 +2,10 @@
#define __LINUX_SMPLOCK_H #define __LINUX_SMPLOCK_H
#include <linux/config.h> #include <linux/config.h>
#if !defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT)
#define lock_kernel() do { } while(0)
#define unlock_kernel() do { } while(0)
#define release_kernel_lock(task) do { } while(0)
#define reacquire_kernel_lock(task) do { } while(0)
#define kernel_locked() 1
#else
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <asm/current.h> #include <linux/spinlock.h>
#if CONFIG_SMP || CONFIG_PREEMPT
extern spinlock_t kernel_flag; extern spinlock_t kernel_flag;
...@@ -26,23 +15,22 @@ extern spinlock_t kernel_flag; ...@@ -26,23 +15,22 @@ extern spinlock_t kernel_flag;
#define put_kernel_lock() spin_unlock(&kernel_flag) #define put_kernel_lock() spin_unlock(&kernel_flag)
/* /*
* Release global kernel lock and global interrupt lock * Release global kernel lock.
*/ */
#define release_kernel_lock(task) \ static inline void release_kernel_lock(struct task_struct *task)
do { \ {
if (unlikely(task->lock_depth >= 0)) \ if (unlikely(task->lock_depth >= 0))
put_kernel_lock(); \ put_kernel_lock();
} while (0) }
/* /*
* Re-acquire the kernel lock * Re-acquire the kernel lock
*/ */
#define reacquire_kernel_lock(task) \ static inline void reacquire_kernel_lock(struct task_struct *task)
do { \ {
if (unlikely(task->lock_depth >= 0)) \ if (unlikely(task->lock_depth >= 0))
get_kernel_lock(); \ get_kernel_lock();
} while (0) }
/* /*
* Getting the big kernel lock. * Getting the big kernel lock.
...@@ -51,22 +39,29 @@ do { \ ...@@ -51,22 +39,29 @@ do { \
* so we only need to worry about other * so we only need to worry about other
* CPU's. * CPU's.
*/ */
static __inline__ void lock_kernel(void) static inline void lock_kernel(void)
{ {
int depth = current->lock_depth+1; int depth = current->lock_depth+1;
if (!depth) if (likely(!depth))
get_kernel_lock(); get_kernel_lock();
current->lock_depth = depth; current->lock_depth = depth;
} }
static __inline__ void unlock_kernel(void) static inline void unlock_kernel(void)
{ {
if (current->lock_depth < 0) if (unlikely(current->lock_depth < 0))
BUG(); BUG();
if (--current->lock_depth < 0) if (likely(--current->lock_depth < 0))
put_kernel_lock(); put_kernel_lock();
} }
#endif /* CONFIG_SMP */ #else
#define lock_kernel() do { } while(0)
#define unlock_kernel() do { } while(0)
#define release_kernel_lock(task) do { } while(0)
#define reacquire_kernel_lock(task) do { } while(0)
#define kernel_locked() 1
#endif #endif /* CONFIG_SMP || CONFIG_PREEMPT */
#endif /* __LINUX_SMPLOCK_H */
...@@ -51,6 +51,7 @@ ...@@ -51,6 +51,7 @@
#include <linux/buffer_head.h> #include <linux/buffer_head.h>
#include <linux/root_dev.h> #include <linux/root_dev.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/smp_lock.h>
#include <asm/checksum.h> #include <asm/checksum.h>
#if defined(CONFIG_PROC_FS) #if defined(CONFIG_PROC_FS)
...@@ -481,6 +482,9 @@ EXPORT_SYMBOL_GPL(idle_cpu); ...@@ -481,6 +482,9 @@ EXPORT_SYMBOL_GPL(idle_cpu);
#if CONFIG_SMP #if CONFIG_SMP
EXPORT_SYMBOL_GPL(set_cpus_allowed); EXPORT_SYMBOL_GPL(set_cpus_allowed);
#endif #endif
#if CONFIG_SMP || CONFIG_PREEMPT
EXPORT_SYMBOL(kernel_flag);
#endif
EXPORT_SYMBOL(jiffies); EXPORT_SYMBOL(jiffies);
EXPORT_SYMBOL(jiffies_64); EXPORT_SYMBOL(jiffies_64);
EXPORT_SYMBOL(xtime); EXPORT_SYMBOL(xtime);
......
...@@ -1881,7 +1881,6 @@ void __init init_idle(task_t *idle, int cpu) ...@@ -1881,7 +1881,6 @@ void __init init_idle(task_t *idle, int cpu)
} }
#if CONFIG_SMP #if CONFIG_SMP
/* /*
* This is how migration works: * This is how migration works:
* *
...@@ -2070,6 +2069,20 @@ __init int migration_init(void) ...@@ -2070,6 +2069,20 @@ __init int migration_init(void)
#endif #endif
#if CONFIG_SMP || CONFIG_PREEMPT
/*
* The 'big kernel lock'
*
* This spinlock is taken and released recursively by lock_kernel()
* and unlock_kernel(). It is transparently dropped and reaquired
* over schedule(). It is used to protect legacy code that hasn't
* been migrated to a proper locking design yet.
*
* Don't use in new code.
*/
spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
#endif
extern void init_timervecs(void); extern void init_timervecs(void);
extern void timer_bh(void); extern void timer_bh(void);
extern void tqueue_bh(void); extern void tqueue_bh(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment