Commit e4039bb2 authored by Robert Love's avatar Robert Love Committed by Trond Myklebust

[PATCH] spinlock.h cleanup

 - cleanup #defines: I do not follow the rationale behind the
   odd line-wrapped defines at the beginning of the file.  If
   we have to use multiple lines, then we might as well do so
   cleanly and according to normal practice...

 - Remove a level of indirection: do not have spin_lock_foo
   use spin_lock - just explicitly call what is needed.

 - we do not need to define the spin_lock functions twice, once
   for CONFIG_PREEMPT and once for !CONFIG_PREEMPT.  Defining
   them once with the preempt macros will optimize away fine.

 - cleanup preempt.h too

 - other misc. cleanup, improved comments, reordering, etc.
parent 1edfa642
#ifndef __LINUX_PREEMPT_H #ifndef __LINUX_PREEMPT_H
#define __LINUX_PREEMPT_H #define __LINUX_PREEMPT_H
/*
* include/linux/preempt.h - macros for accessing and manipulating
* preempt_count (used for kernel preemption, interrupt count, etc.)
*/
#include <linux/config.h> #include <linux/config.h>
#define preempt_count() (current_thread_info()->preempt_count) #define preempt_count() (current_thread_info()->preempt_count)
#define inc_preempt_count() \ #define inc_preempt_count() \
do { \ do { \
...@@ -31,17 +36,16 @@ do { \ ...@@ -31,17 +36,16 @@ do { \
barrier(); \ barrier(); \
} while (0) } while (0)
#define preempt_enable() \ #define preempt_check_resched() \
do { \ do { \
preempt_enable_no_resched(); \
if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \ if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
preempt_schedule(); \ preempt_schedule(); \
} while (0) } while (0)
#define preempt_check_resched() \ #define preempt_enable() \
do { \ do { \
if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \ preempt_enable_no_resched(); \
preempt_schedule(); \ preempt_check_resched(); \
} while (0) } while (0)
#define inc_preempt_count_non_preempt() do { } while (0) #define inc_preempt_count_non_preempt() do { } while (0)
...@@ -50,7 +54,7 @@ do { \ ...@@ -50,7 +54,7 @@ do { \
#else #else
#define preempt_disable() do { } while (0) #define preempt_disable() do { } while (0)
#define preempt_enable_no_resched() do {} while(0) #define preempt_enable_no_resched() do { } while (0)
#define preempt_enable() do { } while (0) #define preempt_enable() do { } while (0)
#define preempt_check_resched() do { } while (0) #define preempt_check_resched() do { } while (0)
......
#ifndef __LINUX_SPINLOCK_H #ifndef __LINUX_SPINLOCK_H
#define __LINUX_SPINLOCK_H #define __LINUX_SPINLOCK_H
/*
* include/linux/spinlock.h - generic locking declarations
*/
#include <linux/config.h> #include <linux/config.h>
#include <linux/preempt.h> #include <linux/preempt.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/thread_info.h> #include <linux/thread_info.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/stringify.h>
#include <asm/system.h> #include <asm/system.h>
/* /*
* These are the generic versions of the spinlocks and read-write * Must define these before including other files, inline functions need them
* locks..
*/ */
#define spin_lock_irqsave(lock, flags) do { local_irq_save(flags); spin_lock(lock); } while (0)
#define spin_lock_irq(lock) do { local_irq_disable(); spin_lock(lock); } while (0)
#define spin_lock_bh(lock) do { local_bh_disable(); spin_lock(lock); } while (0)
#define read_lock_irqsave(lock, flags) do { local_irq_save(flags); read_lock(lock); } while (0)
#define read_lock_irq(lock) do { local_irq_disable(); read_lock(lock); } while (0)
#define read_lock_bh(lock) do { local_bh_disable(); read_lock(lock); } while (0)
#define write_lock_irqsave(lock, flags) do { local_irq_save(flags); write_lock(lock); } while (0)
#define write_lock_irq(lock) do { local_irq_disable(); write_lock(lock); } while (0)
#define write_lock_bh(lock) do { local_bh_disable(); write_lock(lock); } while (0)
#define spin_unlock_irqrestore(lock, flags) do { _raw_spin_unlock(lock); local_irq_restore(flags); preempt_enable(); } while (0)
#define _raw_spin_unlock_irqrestore(lock, flags) do { _raw_spin_unlock(lock); local_irq_restore(flags); } while (0)
#define spin_unlock_irq(lock) do { _raw_spin_unlock(lock); local_irq_enable(); preempt_enable(); } while (0)
#define spin_unlock_bh(lock) do { spin_unlock(lock); local_bh_enable(); } while (0)
#define read_unlock_irqrestore(lock, flags) do { _raw_read_unlock(lock); local_irq_restore(flags); preempt_enable(); } while (0)
#define read_unlock_irq(lock) do { _raw_read_unlock(lock); local_irq_enable(); preempt_enable(); } while (0)
#define read_unlock_bh(lock) do { read_unlock(lock); local_bh_enable(); } while (0)
#define write_unlock_irqrestore(lock, flags) do { _raw_write_unlock(lock); local_irq_restore(flags); preempt_enable(); } while (0)
#define write_unlock_irq(lock) do { _raw_write_unlock(lock); local_irq_enable(); preempt_enable(); } while (0)
#define write_unlock_bh(lock) do { write_unlock(lock); local_bh_enable(); } while (0)
#define spin_trylock_bh(lock) ({ int __r; local_bh_disable();\
__r = spin_trylock(lock); \
if (!__r) local_bh_enable(); \
__r; })
/* Must define these before including other files, inline functions need them */
#include <linux/stringify.h>
#define LOCK_SECTION_NAME \ #define LOCK_SECTION_NAME \
".text.lock." __stringify(KBUILD_BASENAME) ".text.lock." __stringify(KBUILD_BASENAME)
...@@ -60,11 +31,17 @@ ...@@ -60,11 +31,17 @@
#define LOCK_SECTION_END \ #define LOCK_SECTION_END \
".previous\n\t" ".previous\n\t"
/*
* If CONFIG_SMP is set, pull in the _raw_* definitions
*/
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#include <asm/spinlock.h> #include <asm/spinlock.h>
#elif !defined(spin_lock_init) /* !SMP and spin_lock_init not previously /*
defined (e.g. by including asm/spinlock.h */ * !CONFIG_SMP and spin_lock_init not previously defined
* (e.g. by including include/asm/spinlock.h)
*/
#elif !defined(spin_lock_init)
#ifndef CONFIG_PREEMPT #ifndef CONFIG_PREEMPT
# define atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic) # define atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic)
...@@ -72,55 +49,42 @@ ...@@ -72,55 +49,42 @@
#endif #endif
/* /*
* Your basic spinlocks, allowing only a single CPU anywhere * gcc versions before ~2.95 have a nasty bug with empty initializers.
*
* Most gcc versions have a nasty bug with empty initializers.
*/ */
#if (__GNUC__ > 2) #if (__GNUC__ > 2)
typedef struct { } spinlock_t; typedef struct { } spinlock_t;
# define SPIN_LOCK_UNLOCKED (spinlock_t) { } typedef struct { } rwlock_t;
#define SPIN_LOCK_UNLOCKED (spinlock_t) { }
#define RW_LOCK_UNLOCKED (rwlock_t) { }
#else #else
typedef struct { int gcc_is_buggy; } spinlock_t; typedef struct { int gcc_is_buggy; } spinlock_t;
# define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } typedef struct { int gcc_is_buggy; } rwlock_t;
#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
#define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
#endif #endif
/*
* If CONFIG_SMP is unset, declare the _raw_* definitions as nops
*/
#define spin_lock_init(lock) do { (void)(lock); } while(0) #define spin_lock_init(lock) do { (void)(lock); } while(0)
#define _raw_spin_lock(lock) (void)(lock) /* Not "unused variable". */ #define _raw_spin_lock(lock) (void)(lock)
#define spin_is_locked(lock) ((void)(lock), 0) #define spin_is_locked(lock) ((void)(lock), 0)
#define _raw_spin_trylock(lock) ((void)(lock), 1) #define _raw_spin_trylock(lock) ((void)(lock), 1)
#define spin_unlock_wait(lock) do { (void)(lock); } while(0) #define spin_unlock_wait(lock) do { (void)(lock); } while(0)
#define _raw_spin_unlock(lock) do { (void)(lock); } while(0) #define _raw_spin_unlock(lock) do { (void)(lock); } while(0)
/*
* Read-write spinlocks, allowing multiple readers
* but only one writer.
*
* NOTE! it is quite common to have readers in interrupts
* but no interrupt writers. For those circumstances we
* can "mix" irq-safe locks - any writer needs to get a
* irq-safe write-lock, but readers can get non-irqsafe
* read-locks.
*
* Most gcc versions have a nasty bug with empty initializers.
*/
#if (__GNUC__ > 2)
typedef struct { } rwlock_t;
#define RW_LOCK_UNLOCKED (rwlock_t) { }
#else
typedef struct { int gcc_is_buggy; } rwlock_t;
#define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
#endif
#define rwlock_init(lock) do { } while(0) #define rwlock_init(lock) do { } while(0)
#define _raw_read_lock(lock) (void)(lock) /* Not "unused variable". */ #define _raw_read_lock(lock) (void)(lock)
#define _raw_read_unlock(lock) do { } while(0) #define _raw_read_unlock(lock) do { } while(0)
#define _raw_write_lock(lock) (void)(lock) /* Not "unused variable". */ #define _raw_write_lock(lock) (void)(lock)
#define _raw_write_unlock(lock) do { } while(0) #define _raw_write_unlock(lock) do { } while(0)
#endif /* !SMP */ #endif /* !SMP */
#ifdef CONFIG_PREEMPT /*
* Define the various spin_lock and rw_lock methods. Note we define these
* regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
* methods are defined as nops in the case they are not required.
*/
#define spin_lock(lock) \ #define spin_lock(lock) \
do { \ do { \
preempt_disable(); \ preempt_disable(); \
...@@ -129,31 +93,175 @@ do { \ ...@@ -129,31 +93,175 @@ do { \
#define spin_trylock(lock) ({preempt_disable(); _raw_spin_trylock(lock) ? \ #define spin_trylock(lock) ({preempt_disable(); _raw_spin_trylock(lock) ? \
1 : ({preempt_enable(); 0;});}) 1 : ({preempt_enable(); 0;});})
#define spin_unlock(lock) \ #define spin_unlock(lock) \
do { \ do { \
_raw_spin_unlock(lock); \ _raw_spin_unlock(lock); \
preempt_enable(); \ preempt_enable(); \
} while (0) } while (0)
#define read_lock(lock) ({preempt_disable(); _raw_read_lock(lock);}) #define read_lock(lock) \
#define read_unlock(lock) ({_raw_read_unlock(lock); preempt_enable();}) do { \
#define write_lock(lock) ({preempt_disable(); _raw_write_lock(lock);}) preempt_disable(); \
#define write_unlock(lock) ({_raw_write_unlock(lock); preempt_enable();}) _raw_read_lock(lock); \
} while(0)
#define read_unlock(lock) \
do { \
_raw_read_unlock(lock); \
preempt_enable(); \
} while(0)
#define write_lock(lock) \
do { \
preempt_disable(); \
_raw_write_lock(lock); \
} while(0)
#define write_unlock(lock) \
do { \
_raw_write_unlock(lock); \
preempt_enable(); \
} while(0)
#define write_trylock(lock) ({preempt_disable();_raw_write_trylock(lock) ? \ #define write_trylock(lock) ({preempt_disable();_raw_write_trylock(lock) ? \
1 : ({preempt_enable(); 0;});}) 1 : ({preempt_enable(); 0;});})
#else #define spin_lock_irqsave(lock, flags) \
do { \
local_irq_save(flags); \
preempt_disable(); \
_raw_spin_lock(lock); \
} while (0)
#define spin_lock(lock) _raw_spin_lock(lock) #define spin_lock_irq(lock) \
#define spin_trylock(lock) _raw_spin_trylock(lock) do { \
#define spin_unlock(lock) _raw_spin_unlock(lock) local_irq_disable(); \
preempt_disable(); \
_raw_spin_lock(lock); \
} while (0)
#define read_lock(lock) _raw_read_lock(lock) #define spin_lock_bh(lock) \
#define read_unlock(lock) _raw_read_unlock(lock) do { \
#define write_lock(lock) _raw_write_lock(lock) local_bh_disable(); \
#define write_unlock(lock) _raw_write_unlock(lock) preempt_disable(); \
#define write_trylock(lock) _raw_write_trylock(lock) _raw_spin_lock(lock); \
#endif } while (0)
#define read_lock_irqsave(lock, flags) \
do { \
local_irq_save(flags); \
preempt_disable(); \
_raw_read_lock(lock); \
} while (0)
#define read_lock_irq(lock) \
do { \
local_irq_disable(); \
preempt_disable(); \
_raw_read_lock(lock); \
} while (0)
#define read_lock_bh(lock) \
do { \
local_bh_disable(); \
preempt_disable(); \
_raw_read_lock(lock); \
} while (0)
#define write_lock_irqsave(lock, flags) \
do { \
local_irq_save(flags); \
preempt_disable(); \
_raw_write_lock(lock); \
} while (0)
#define write_lock_irq(lock) \
do { \
local_irq_disable(); \
preempt_disable(); \
_raw_write_lock(lock); \
} while (0)
#define write_lock_bh(lock) \
do { \
local_bh_disable(); \
preempt_disable(); \
_raw_write_lock(lock); \
} while (0)
#define spin_unlock_irqrestore(lock, flags) \
do { \
_raw_spin_unlock(lock); \
local_irq_restore(flags); \
preempt_enable(); \
} while (0)
#define _raw_spin_unlock_irqrestore(lock, flags) \
do { \
_raw_spin_unlock(lock); \
local_irq_restore(flags); \
} while (0)
#define spin_unlock_irq(lock) \
do { \
_raw_spin_unlock(lock); \
local_irq_enable(); \
preempt_enable(); \
} while (0)
#define spin_unlock_bh(lock) \
do { \
_raw_spin_unlock(lock); \
preempt_enable(); \
local_bh_enable(); \
} while (0)
#define read_unlock_irqrestore(lock, flags) \
do { \
_raw_read_unlock(lock); \
local_irq_restore(flags); \
preempt_enable(); \
} while (0)
#define read_unlock_irq(lock) \
do { \
_raw_read_unlock(lock); \
local_irq_enable(); \
preempt_enable(); \
} while (0)
#define read_unlock_bh(lock) \
do { \
_raw_read_unlock(lock); \
preempt_enable(); \
local_bh_enable(); \
} while (0)
#define write_unlock_irqrestore(lock, flags) \
do { \
_raw_write_unlock(lock); \
local_irq_restore(flags); \
preempt_enable(); \
} while (0)
#define write_unlock_irq(lock) \
do { \
_raw_write_unlock(lock); \
local_irq_enable(); \
preempt_enable(); \
} while (0)
#define write_unlock_bh(lock) \
do { \
_raw_write_unlock(lock); \
preempt_enable(); \
local_bh_enable(); \
} while (0)
#define spin_trylock_bh(lock) ({ local_bh_disable(); preempt_disable(); \
_raw_spin_trylock(lock) ? 1 : \
({preempt_enable(); local_bh_enable(); 0;});})
/* "lock on reference count zero" */ /* "lock on reference count zero" */
#ifndef ATOMIC_DEC_AND_LOCK #ifndef ATOMIC_DEC_AND_LOCK
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment