Commit eada9464 authored by Martin Dalecki's avatar Martin Dalecki Committed by Linus Torvalds

[PATCH] 2.5.20 locks.h cleanup

Since I have been looking in to the lcoking issues recently
the following rather trivial gabrage code collection became
obvious...

 - Remove "not yet used" code from 1995 in asm/locks.h. It's garbage.

 - Remove useless DEBUG_SPINLOCK code from generic spinlock.h code. Just
   compiling for SMP does the trick already.

 - Replace all usages of SPINLOCK_DEBUG with the now global
   CONFIG_DEBUG_SPINLOCK.
parent 467b45ea
......@@ -7,6 +7,7 @@
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <asm/hwrpb.h>
#include <asm/io.h>
......@@ -1103,66 +1104,7 @@ static int SMC37c669_xlate_drq(
unsigned int drq
);
#if 0
/*
** External Data Declarations
*/
extern struct LOCK spl_atomic;
/*
** External Function Prototype Declarations
*/
/* From kernel_alpha.mar */
extern spinlock(
struct LOCK *spl
);
extern spinunlock(
struct LOCK *spl
);
/* From filesys.c */
int allocinode(
char *name,
int can_create,
struct INODE **ipp
);
extern int null_procedure( void );
int smcc669_init( void );
int smcc669_open( struct FILE *fp, char *info, char *next, char *mode );
int smcc669_read( struct FILE *fp, int size, int number, unsigned char *buf );
int smcc669_write( struct FILE *fp, int size, int number, unsigned char *buf );
int smcc669_close( struct FILE *fp );
struct DDB smc_ddb = {
"smc", /* how this routine wants to be called */
smcc669_read, /* read routine */
smcc669_write, /* write routine */
smcc669_open, /* open routine */
smcc669_close, /* close routine */
null_procedure, /* name expansion routine */
null_procedure, /* delete routine */
null_procedure, /* create routine */
null_procedure, /* setmode */
null_procedure, /* validation routine */
0, /* class specific use */
1, /* allows information */
0, /* must be stacked */
0, /* is a flash update driver */
0, /* is a block device */
0, /* not seekable */
0, /* is an Ethernet device */
0, /* is a filesystem driver */
};
#endif
#define spinlock(x)
#define spinunlock(x)
static spinlock_t smc_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED;
/*
**++
......@@ -2042,10 +1984,10 @@ static void __init SMC37c669_config_mode(
** mode. Therefore, a spinlock is placed around the two writes to
** guarantee that they complete uninterrupted.
*/
spinlock( &spl_atomic );
spin_lock(&smc_lock);
wb( &SMC37c669->index_port, SMC37c669_CONFIG_ON_KEY );
wb( &SMC37c669->index_port, SMC37c669_CONFIG_ON_KEY );
spinunlock( &spl_atomic );
spin_unlock(&smc_lock);
}
else {
wb( &SMC37c669->index_port, SMC37c669_CONFIG_OFF_KEY );
......
......@@ -217,7 +217,7 @@ EXPORT_SYMBOL(__global_cli);
EXPORT_SYMBOL(__global_sti);
EXPORT_SYMBOL(__global_save_flags);
EXPORT_SYMBOL(__global_restore_flags);
#ifdef SPINLOCK_DEBUG
#ifdef CONFIG_DEBUG_SPINLOCK
EXPORT_SYMBOL(_raw_spin_lock);
EXPORT_SYMBOL(_raw_spin_unlock);
EXPORT_SYMBOL(_raw_spin_trylock);
......
......@@ -16,7 +16,7 @@
#include <asm/system.h>
#include <asm/io.h>
#ifdef SPINLOCK_DEBUG
#ifdef CONFIG_DEBUG_SPINLOCK
#undef INIT_STUCK
#define INIT_STUCK 200000000 /*0xffffffff*/
......
......@@ -254,8 +254,6 @@
* undef : traditional save_flags; cli; restore_flags;
*/
//#define DEBUG_SPINLOCKS 2 /* Set to 0, 1 or 2 in include/linux/spinlock.h */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,1,30)
# include <linux/init.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,30)
......@@ -293,7 +291,7 @@ MODULE_DEVICE_TABLE(pci, tmscsim_pci_tbl);
# if USE_SPINLOCKS == 3 /* both */
# if defined (CONFIG_SMP) || DEBUG_SPINLOCKS > 0
# if defined (CONFIG_SMP)
# define DC390_LOCKA_INIT { spinlock_t __unlocked = SPIN_LOCK_UNLOCKED; pACB->lock = __unlocked; };
# else
# define DC390_LOCKA_INIT
......@@ -322,7 +320,7 @@ MODULE_DEVICE_TABLE(pci, tmscsim_pci_tbl);
# if USE_SPINLOCKS == 2 /* adapter specific locks */
# if defined (CONFIG_SMP) || DEBUG_SPINLOCKS > 0
# if defined (CONFIG_SMP)
# define DC390_LOCKA_INIT { spinlock_t __unlocked = SPIN_LOCK_UNLOCKED; pACB->lock = __unlocked; };
# else
# define DC390_LOCKA_INIT
......
/*
* SMP locks primitives for building ix86 locks
* (not yet used).
*
* Alan Cox, alan@cymru.net, 1995
*/
/*
* This would be much easier but far less clear and easy
* to borrow for other processors if it was just assembler.
*/
extern __inline__ void prim_spin_lock(struct spinlock *sp)
{
int processor=smp_processor_id();
/*
* Grab the lock bit
*/
while(lock_set_bit(0,&sp->lock))
{
/*
* Failed, but that's cos we own it!
*/
if(sp->cpu==processor)
{
sp->users++;
return 0;
}
/*
* Spin in the cache S state if possible
*/
while(sp->lock)
{
/*
* Wait for any invalidates to go off
*/
if(smp_invalidate_needed&(1<<processor))
while(lock_clear_bit(processor,&smp_invalidate_needed))
local_flush_tlb();
sp->spins++;
}
/*
* Someone wrote the line, we go 'I' and get
* the cache entry. Now try to regrab
*/
}
sp->users++;sp->cpu=processor;
return 1;
}
/*
* Release a spin lock
*/
extern __inline__ int prim_spin_unlock(struct spinlock *sp)
{
/* This is safe. The decrement is still guarded by the lock. A multilock would
not be safe this way */
if(!--sp->users)
{
lock_clear_bit(0,&sp->lock);sp->cpu= NO_PROC_ID;
return 1;
}
return 0;
}
/*
* Non blocking lock grab
*/
extern __inline__ int prim_spin_lock_nb(struct spinlock *sp)
{
if(lock_set_bit(0,&sp->lock))
return 0; /* Locked already */
sp->users++;
return 1; /* We got the lock */
}
/*
* These wrap the locking primitives up for usage
*/
extern __inline__ void spinlock(struct spinlock *sp)
{
if(sp->priority<current->lock_order)
panic("lock order violation: %s (%d)\n", sp->name, current->lock_order);
if(prim_spin_lock(sp))
{
/*
* We got a new lock. Update the priority chain
*/
sp->oldpri=current->lock_order;
current->lock_order=sp->priority;
}
}
extern __inline__ void spinunlock(struct spinlock *sp)
{
if(current->lock_order!=sp->priority)
panic("lock release order violation %s (%d)\n", sp->name, current->lock_order);
if(prim_spin_unlock(sp))
{
/*
* Update the debugging lock priority chain. We dumped
* our last right to the lock.
*/
current->lock_order=sp->oldpri;
}
}
extern __inline__ void spintestlock(struct spinlock *sp)
{
/*
* We do no sanity checks, it's legal to optimistically
* get a lower lock.
*/
prim_spin_lock_nb(sp);
}
extern __inline__ void spintestunlock(struct spinlock *sp)
{
/*
* A testlock doesn't update the lock chain so we
* must not update it on free
*/
prim_spin_unlock(sp);
}
/*
* SMP locks primitives for building ix86 locks
* (not yet used).
*
* Alan Cox, alan@redhat.com, 1995
*/
/*
* This would be much easier but far less clear and easy
* to borrow for other processors if it was just assembler.
*/
static __inline__ void prim_spin_lock(struct spinlock *sp)
{
int processor=smp_processor_id();
/*
* Grab the lock bit
*/
while(lock_set_bit(0,&sp->lock))
{
/*
* Failed, but that's cos we own it!
*/
if(sp->cpu==processor)
{
sp->users++;
return 0;
}
/*
* Spin in the cache S state if possible
*/
while(sp->lock)
{
/*
* Wait for any invalidates to go off
*/
if(smp_invalidate_needed&(1<<processor))
while(lock_clear_bit(processor,&smp_invalidate_needed))
local_flush_tlb();
sp->spins++;
}
/*
* Someone wrote the line, we go 'I' and get
* the cache entry. Now try to regrab
*/
}
sp->users++;sp->cpu=processor;
return 1;
}
/*
* Release a spin lock
*/
static __inline__ int prim_spin_unlock(struct spinlock *sp)
{
/* This is safe. The decrement is still guarded by the lock. A multilock would
not be safe this way */
if(!--sp->users)
{
sp->cpu= NO_PROC_ID;lock_clear_bit(0,&sp->lock);
return 1;
}
return 0;
}
/*
* Non blocking lock grab
*/
static __inline__ int prim_spin_lock_nb(struct spinlock *sp)
{
if(lock_set_bit(0,&sp->lock))
return 0; /* Locked already */
sp->users++;
return 1; /* We got the lock */
}
/*
* These wrap the locking primitives up for usage
*/
static __inline__ void spinlock(struct spinlock *sp)
{
if(sp->priority<current->lock_order)
panic("lock order violation: %s (%d)\n", sp->name, current->lock_order);
if(prim_spin_lock(sp))
{
/*
* We got a new lock. Update the priority chain
*/
sp->oldpri=current->lock_order;
current->lock_order=sp->priority;
}
}
static __inline__ void spinunlock(struct spinlock *sp)
{
int pri;
if(current->lock_order!=sp->priority)
panic("lock release order violation %s (%d)\n", sp->name, current->lock_order);
pri=sp->oldpri;
if(prim_spin_unlock(sp))
{
/*
* Update the debugging lock priority chain. We dumped
* our last right to the lock.
*/
current->lock_order=sp->pri;
}
}
static __inline__ void spintestlock(struct spinlock *sp)
{
/*
* We do no sanity checks, it's legal to optimistically
* get a lower lock.
*/
prim_spin_lock_nb(sp);
}
static __inline__ void spintestunlock(struct spinlock *sp)
{
/*
* A testlock doesn't update the lock chain so we
* must not update it on free
*/
prim_spin_unlock(sp);
}
......@@ -9,30 +9,20 @@
extern int printk(const char * fmt, ...)
__attribute__ ((format (printf, 1, 2)));
/* It seems that people are forgetting to
* initialize their spinlocks properly, tsk tsk.
* Remember to turn this off in 2.4. -ben
*/
#if defined(CONFIG_DEBUG_SPINLOCK)
#define SPINLOCK_DEBUG 1
#else
#define SPINLOCK_DEBUG 0
#endif
/*
* Your basic SMP spinlocks, allowing only a single CPU anywhere
*/
typedef struct {
volatile unsigned int lock;
#if SPINLOCK_DEBUG
#ifdef CONFIG_DEBUG_SPINLOCK
unsigned magic;
#endif
} spinlock_t;
#define SPINLOCK_MAGIC 0xdead4ead
#if SPINLOCK_DEBUG
#ifdef CONFIG_DEBUG_SPINLOCK
#define SPINLOCK_MAGIC_INIT , SPINLOCK_MAGIC
#else
#define SPINLOCK_MAGIC_INIT /* */
......@@ -79,7 +69,7 @@ typedef struct {
static inline void _raw_spin_unlock(spinlock_t *lock)
{
#if SPINLOCK_DEBUG
#ifdef CONFIG_DEBUG_SPINLOCK
if (lock->magic != SPINLOCK_MAGIC)
BUG();
if (!spin_is_locked(lock))
......@@ -100,7 +90,7 @@ static inline void _raw_spin_unlock(spinlock_t *lock)
static inline void _raw_spin_unlock(spinlock_t *lock)
{
char oldval = 1;
#if SPINLOCK_DEBUG
#ifdef CONFIG_DEBUG_SPINLOCK
if (lock->magic != SPINLOCK_MAGIC)
BUG();
if (!spin_is_locked(lock))
......@@ -125,7 +115,7 @@ static inline int _raw_spin_trylock(spinlock_t *lock)
static inline void _raw_spin_lock(spinlock_t *lock)
{
#if SPINLOCK_DEBUG
#ifdef CONFIG_DEBUG_SPINLOCK
__label__ here;
here:
if (lock->magic != SPINLOCK_MAGIC) {
......@@ -151,14 +141,14 @@ printk("eip: %p\n", &&here);
*/
typedef struct {
volatile unsigned int lock;
#if SPINLOCK_DEBUG
#ifdef CONFIG_DEBUG_SPINLOCK
unsigned magic;
#endif
} rwlock_t;
#define RWLOCK_MAGIC 0xdeaf1eed
#if SPINLOCK_DEBUG
#ifdef CONFIG_DEBUG_SPINLOCK
#define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC
#else
#define RWLOCK_MAGIC_INIT /* */
......@@ -181,7 +171,7 @@ typedef struct {
static inline void _raw_read_lock(rwlock_t *rw)
{
#if SPINLOCK_DEBUG
#ifdef CONFIG_DEBUG_SPINLOCK
if (rw->magic != RWLOCK_MAGIC)
BUG();
#endif
......@@ -190,7 +180,7 @@ static inline void _raw_read_lock(rwlock_t *rw)
static inline void _raw_write_lock(rwlock_t *rw)
{
#if SPINLOCK_DEBUG
#ifdef CONFIG_DEBUG_SPINLOCK
if (rw->magic != RWLOCK_MAGIC)
BUG();
#endif
......
......@@ -7,22 +7,20 @@
#include <asm/system.h>
#include <asm/processor.h>
#undef SPINLOCK_DEBUG
/*
* Simple spin lock operations.
*/
typedef struct {
volatile unsigned long lock;
#ifdef SPINLOCK_DEBUG
#ifdef CONFIG_DEBUG_SPINLOCK
volatile unsigned long owner_pc;
volatile unsigned long owner_cpu;
#endif
} spinlock_t;
#ifdef __KERNEL__
#if SPINLOCK_DEBUG
#if CONFIG_DEBUG_SPINLOCK
#define SPINLOCK_DEBUG_INIT , 0, 0
#else
#define SPINLOCK_DEBUG_INIT /* */
......@@ -34,7 +32,7 @@ typedef struct {
#define spin_is_locked(x) ((x)->lock != 0)
#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
#ifndef SPINLOCK_DEBUG
#ifndef CONFIG_DEBUG_SPINLOCK
static inline void _raw_spin_lock(spinlock_t *lock)
{
......@@ -88,12 +86,12 @@ extern unsigned long __spin_trylock(volatile unsigned long *lock);
*/
typedef struct {
volatile unsigned long lock;
#ifdef SPINLOCK_DEBUG
#ifdef CONFIG_DEBUG_SPINLOCK
volatile unsigned long owner_pc;
#endif
} rwlock_t;
#if SPINLOCK_DEBUG
#if CONFIG_DEBUG_SPINLOCK
#define RWLOCK_DEBUG_INIT , 0
#else
#define RWLOCK_DEBUG_INIT /* */
......@@ -102,7 +100,7 @@ typedef struct {
#define RW_LOCK_UNLOCKED (rwlock_t) { 0 RWLOCK_DEBUG_INIT }
#define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0)
#ifndef SPINLOCK_DEBUG
#ifndef CONFIG_DEBUG_SPINLOCK
static __inline__ void _raw_read_lock(rwlock_t *rw)
{
......
/*
* SMP locks primitives for building ix86 locks
* (not yet used).
*
* Alan Cox, alan@redhat.com, 1995
*/
/*
* This would be much easier but far less clear and easy
* to borrow for other processors if it was just assembler.
*/
extern __inline__ void prim_spin_lock(struct spinlock *sp)
{
int processor=smp_processor_id();
/*
* Grab the lock bit
*/
while(lock_set_bit(0,&sp->lock))
{
/*
* Failed, but that's cos we own it!
*/
if(sp->cpu==processor)
{
sp->users++;
return 0;
}
/*
* Spin in the cache S state if possible
*/
while(sp->lock)
{
/*
* Wait for any invalidates to go off
*/
if(smp_invalidate_needed&(1<<processor))
while(lock_clear_bit(processor,&smp_invalidate_needed))
local_flush_tlb();
sp->spins++;
}
/*
* Someone wrote the line, we go 'I' and get
* the cache entry. Now try to regrab
*/
}
sp->users++;sp->cpu=processor;
return 1;
}
/*
* Release a spin lock
*/
extern __inline__ int prim_spin_unlock(struct spinlock *sp)
{
/* This is safe. The decrement is still guarded by the lock. A multilock would
not be safe this way */
if(!--sp->users)
{
sp->cpu= NO_PROC_ID;lock_clear_bit(0,&sp->lock);
return 1;
}
return 0;
}
/*
* Non blocking lock grab
*/
extern __inline__ int prim_spin_lock_nb(struct spinlock *sp)
{
if(lock_set_bit(0,&sp->lock))
return 0; /* Locked already */
sp->users++;
return 1; /* We got the lock */
}
/*
* These wrap the locking primitives up for usage
*/
extern __inline__ void spinlock(struct spinlock *sp)
{
if(sp->priority<current->lock_order)
panic("lock order violation: %s (%d)\n", sp->name, current->lock_order);
if(prim_spin_lock(sp))
{
/*
* We got a new lock. Update the priority chain
*/
sp->oldpri=current->lock_order;
current->lock_order=sp->priority;
}
}
extern __inline__ void spinunlock(struct spinlock *sp)
{
int pri;
if(current->lock_order!=sp->priority)
panic("lock release order violation %s (%d)\n", sp->name, current->lock_order);
pri=sp->oldpri;
if(prim_spin_unlock(sp))
{
/*
* Update the debugging lock priority chain. We dumped
* our last right to the lock.
*/
current->lock_order=sp->pri;
}
}
extern __inline__ void spintestlock(struct spinlock *sp)
{
/*
* We do no sanity checks, it's legal to optimistically
* get a lower lock.
*/
prim_spin_lock_nb(sp);
}
extern __inline__ void spintestunlock(struct spinlock *sp)
{
/*
* A testlock doesn't update the lock chain so we
* must not update it on free
*/
prim_spin_unlock(sp);
}
......@@ -9,30 +9,20 @@
extern int printk(const char * fmt, ...)
__attribute__ ((format (printf, 1, 2)));
/* It seems that people are forgetting to
* initialize their spinlocks properly, tsk tsk.
* Remember to turn this off in 2.4. -ben
*/
#if defined(CONFIG_DEBUG_SPINLOCK)
#define SPINLOCK_DEBUG 1
#else
#define SPINLOCK_DEBUG 0
#endif
/*
* Your basic SMP spinlocks, allowing only a single CPU anywhere
*/
typedef struct {
volatile unsigned int lock;
#if SPINLOCK_DEBUG
#ifdef CONFIG_DEBUG_SPINLOCK
unsigned magic;
#endif
} spinlock_t;
#define SPINLOCK_MAGIC 0xdead4ead
#if SPINLOCK_DEBUG
#ifdef CONFIG_DEBUG_SPINLOCK
#define SPINLOCK_MAGIC_INIT , SPINLOCK_MAGIC
#else
#define SPINLOCK_MAGIC_INIT /* */
......@@ -82,7 +72,7 @@ static inline int _raw_spin_trylock(spinlock_t *lock)
static inline void _raw_spin_lock(spinlock_t *lock)
{
#if SPINLOCK_DEBUG
#ifdef CONFIG_DEBUG_SPINLOCK
__label__ here;
here:
if (lock->magic != SPINLOCK_MAGIC) {
......@@ -97,7 +87,7 @@ printk("eip: %p\n", &&here);
static inline void _raw_spin_unlock(spinlock_t *lock)
{
#if SPINLOCK_DEBUG
#ifdef CONFIG_DEBUG_SPINLOCK
if (lock->magic != SPINLOCK_MAGIC)
BUG();
if (!spin_is_locked(lock))
......@@ -120,14 +110,14 @@ static inline void _raw_spin_unlock(spinlock_t *lock)
*/
typedef struct {
volatile unsigned int lock;
#if SPINLOCK_DEBUG
#ifdef CONFIG_DEBUG_SPINLOCK
unsigned magic;
#endif
} rwlock_t;
#define RWLOCK_MAGIC 0xdeaf1eed
#if SPINLOCK_DEBUG
#ifdef CONFIG_DEBUG_SPINLOCK
#define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC
#else
#define RWLOCK_MAGIC_INIT /* */
......@@ -150,7 +140,7 @@ typedef struct {
extern inline void _raw_read_lock(rwlock_t *rw)
{
#if SPINLOCK_DEBUG
#ifdef CONFIG_DEBUG_SPINLOCK
if (rw->magic != RWLOCK_MAGIC)
BUG();
#endif
......@@ -159,7 +149,7 @@ extern inline void _raw_read_lock(rwlock_t *rw)
static inline void _raw_write_lock(rwlock_t *rw)
{
#if SPINLOCK_DEBUG
#ifdef CONFIG_DEBUG_SPINLOCK
if (rw->magic != RWLOCK_MAGIC)
BUG();
#endif
......
......@@ -64,13 +64,9 @@
#elif !defined(spin_lock_init) /* !SMP and spin_lock_init not previously
defined (e.g. by including asm/spinlock.h */
#define DEBUG_SPINLOCKS 0 /* 0 == no debugging, 1 == maintain lock state, 2 == full debug */
#if (DEBUG_SPINLOCKS < 1)
#ifndef CONFIG_PREEMPT
#define atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic)
#define ATOMIC_DEC_AND_LOCK
# define atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic)
# define ATOMIC_DEC_AND_LOCK
#endif
/*
......@@ -80,10 +76,10 @@
*/
#if (__GNUC__ > 2)
typedef struct { } spinlock_t;
#define SPIN_LOCK_UNLOCKED (spinlock_t) { }
# define SPIN_LOCK_UNLOCKED (spinlock_t) { }
#else
typedef struct { int gcc_is_buggy; } spinlock_t;
#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
# define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
#endif
#define spin_lock_init(lock) do { (void)(lock); } while(0)
......@@ -93,42 +89,6 @@
#define spin_unlock_wait(lock) do { (void)(lock); } while(0)
#define _raw_spin_unlock(lock) do { (void)(lock); } while(0)
#elif (DEBUG_SPINLOCKS < 2)
typedef struct {
volatile unsigned long lock;
} spinlock_t;
#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
#define spin_lock_init(x) do { (x)->lock = 0; } while (0)
#define spin_is_locked(lock) (test_bit(0,(lock)))
#define spin_trylock(lock) (!test_and_set_bit(0,(lock)))
#define spin_lock(x) do { (x)->lock = 1; } while (0)
#define spin_unlock_wait(x) do { } while (0)
#define spin_unlock(x) do { (x)->lock = 0; } while (0)
#else /* (DEBUG_SPINLOCKS >= 2) */
typedef struct {
volatile unsigned long lock;
volatile unsigned int babble;
const char *module;
} spinlock_t;
#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 25, __BASE_FILE__ }
#include <linux/kernel.h>
#define spin_lock_init(x) do { (x)->lock = 0; } while (0)
#define spin_is_locked(lock) (test_bit(0,(lock)))
#define spin_trylock(lock) (!test_and_set_bit(0,(lock)))
#define spin_lock(x) do {unsigned long __spinflags; save_flags(__spinflags); cli(); if ((x)->lock&&(x)->babble) {printk("%s:%d: spin_lock(%s:%p) already locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 1; restore_flags(__spinflags);} while (0)
#define spin_unlock_wait(x) do {unsigned long __spinflags; save_flags(__spinflags); cli(); if ((x)->lock&&(x)->babble) {printk("%s:%d: spin_unlock_wait(%s:%p) deadlock\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} restore_flags(__spinflags);} while (0)
#define spin_unlock(x) do {unsigned long __spinflags; save_flags(__spinflags); cli(); if (!(x)->lock&&(x)->babble) {printk("%s:%d: spin_unlock(%s:%p) not locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 0; restore_flags(__spinflags);} while (0)
#endif /* DEBUG_SPINLOCKS */
/*
* Read-write spinlocks, allowing multiple readers
* but only one writer.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment