Commit ee6536f8 authored by Christoph Hellwig's avatar Christoph Hellwig

Implement down_read_trylock() and down_write_trylock() and add a

generic spinlock implementation for downgrade_write().
parent 46979afd
...@@ -117,6 +117,29 @@ LOCK_PREFIX " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value ...@@ -117,6 +117,29 @@ LOCK_PREFIX " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value
: "memory", "cc"); : "memory", "cc");
} }
/*
* trylock for reading -- returns 1 if successful, 0 if contention
*/
static inline int __down_read_trylock(struct rw_semaphore *sem)
{
__s32 result, tmp;
__asm__ __volatile__(
"# beginning __down_read_trylock\n\t"
" movl %0,%1\n\t"
"1:\n\t"
" movl %1,%2\n\t"
" addl %3,%2\n\t"
" jle 2f\n\t"
LOCK_PREFIX " cmpxchgl %2,%0\n\t"
" jnz 1b\n\t"
"2:\n\t"
"# ending __down_read_trylock\n\t"
: "+m"(sem->count), "=&a"(result), "=&r"(tmp)
: "i"(RWSEM_ACTIVE_READ_BIAS)
: "memory", "cc");
return result>=0 ? 1 : 0;
}
/* /*
* lock for writing * lock for writing
*/ */
...@@ -144,6 +167,19 @@ LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the ...@@ -144,6 +167,19 @@ LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the
: "memory", "cc"); : "memory", "cc");
} }
/*
* trylock for writing -- returns 1 if successful, 0 if contention
*/
static inline int __down_write_trylock(struct rw_semaphore *sem)
{
signed long ret = cmpxchg(&sem->count,
RWSEM_UNLOCKED_VALUE,
RWSEM_ACTIVE_WRITE_BIAS);
if (ret == RWSEM_UNLOCKED_VALUE)
return 1;
return 0;
}
/* /*
* unlock after reading * unlock after reading
*/ */
......
...@@ -54,9 +54,12 @@ struct rw_semaphore { ...@@ -54,9 +54,12 @@ struct rw_semaphore {
extern void FASTCALL(init_rwsem(struct rw_semaphore *sem)); extern void FASTCALL(init_rwsem(struct rw_semaphore *sem));
extern void FASTCALL(__down_read(struct rw_semaphore *sem)); extern void FASTCALL(__down_read(struct rw_semaphore *sem));
extern int FASTCALL(__down_read_trylock(struct rw_semaphore *sem));
extern void FASTCALL(__down_write(struct rw_semaphore *sem)); extern void FASTCALL(__down_write(struct rw_semaphore *sem));
extern int FASTCALL(__down_write_trylock(struct rw_semaphore *sem));
extern void FASTCALL(__up_read(struct rw_semaphore *sem)); extern void FASTCALL(__up_read(struct rw_semaphore *sem));
extern void FASTCALL(__up_write(struct rw_semaphore *sem)); extern void FASTCALL(__up_write(struct rw_semaphore *sem));
extern void FASTCALL(__downgrade_write(struct rw_semaphore *sem));
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _LINUX_RWSEM_SPINLOCK_H */ #endif /* _LINUX_RWSEM_SPINLOCK_H */
...@@ -45,6 +45,18 @@ static inline void down_read(struct rw_semaphore *sem) ...@@ -45,6 +45,18 @@ static inline void down_read(struct rw_semaphore *sem)
rwsemtrace(sem,"Leaving down_read"); rwsemtrace(sem,"Leaving down_read");
} }
/*
* trylock for reading -- returns 1 if successful, 0 if contention
*/
static inline int down_read_trylock(struct rw_semaphore *sem)
{
int ret;
rwsemtrace(sem,"Entering down_read_trylock");
ret = __down_read_trylock(sem);
rwsemtrace(sem,"Leaving down_read_trylock");
return ret;
}
/* /*
* lock for writing * lock for writing
*/ */
...@@ -55,6 +67,18 @@ static inline void down_write(struct rw_semaphore *sem) ...@@ -55,6 +67,18 @@ static inline void down_write(struct rw_semaphore *sem)
rwsemtrace(sem,"Leaving down_write"); rwsemtrace(sem,"Leaving down_write");
} }
/*
* trylock for writing -- returns 1 if successful, 0 if contention
*/
static inline int down_write_trylock(struct rw_semaphore *sem)
{
int ret;
rwsemtrace(sem,"Entering down_write_trylock");
ret = __down_write_trylock(sem);
rwsemtrace(sem,"Leaving down_write_trylock");
return ret;
}
/* /*
* release a read lock * release a read lock
*/ */
...@@ -85,6 +109,5 @@ static inline void downgrade_write(struct rw_semaphore *sem) ...@@ -85,6 +109,5 @@ static inline void downgrade_write(struct rw_semaphore *sem)
rwsemtrace(sem,"Leaving downgrade_write"); rwsemtrace(sem,"Leaving downgrade_write");
} }
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _LINUX_RWSEM_H */ #endif /* _LINUX_RWSEM_H */
...@@ -46,8 +46,9 @@ void init_rwsem(struct rw_semaphore *sem) ...@@ -46,8 +46,9 @@ void init_rwsem(struct rw_semaphore *sem)
* - the 'waiting count' is non-zero * - the 'waiting count' is non-zero
* - the spinlock must be held by the caller * - the spinlock must be held by the caller
* - woken process blocks are discarded from the list after having flags zeroised * - woken process blocks are discarded from the list after having flags zeroised
* - writers are only woken if wakewrite is non-zero
*/ */
static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem) static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
{ {
struct rwsem_waiter *waiter; struct rwsem_waiter *waiter;
int woken; int woken;
...@@ -56,7 +57,14 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem) ...@@ -56,7 +57,14 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem)
waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list); waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
/* try to grant a single write lock if there's a writer at the front of the queue if (!wakewrite) {
if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
goto out;
goto dont_wake_writers;
}
/* if we are allowed to wake writers try to grant a single write lock if there's a
* writer at the front of the queue
* - we leave the 'waiting count' incremented to signify potential contention * - we leave the 'waiting count' incremented to signify potential contention
*/ */
if (waiter->flags & RWSEM_WAITING_FOR_WRITE) { if (waiter->flags & RWSEM_WAITING_FOR_WRITE) {
...@@ -68,16 +76,19 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem) ...@@ -68,16 +76,19 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem)
} }
/* grant an infinite number of read locks to the readers at the front of the queue */ /* grant an infinite number of read locks to the readers at the front of the queue */
dont_wake_writers:
woken = 0; woken = 0;
do { while (waiter->flags&RWSEM_WAITING_FOR_READ) {
struct list_head *next = waiter->list.next;
list_del(&waiter->list); list_del(&waiter->list);
waiter->flags = 0; waiter->flags = 0;
wake_up_process(waiter->task); wake_up_process(waiter->task);
woken++; woken++;
if (list_empty(&sem->wait_list)) if (list_empty(&sem->wait_list))
break; break;
waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list); waiter = list_entry(next,struct rwsem_waiter,list);
} while (waiter->flags&RWSEM_WAITING_FOR_READ); }
sem->activity += woken; sem->activity += woken;
...@@ -148,6 +159,28 @@ void __down_read(struct rw_semaphore *sem) ...@@ -148,6 +159,28 @@ void __down_read(struct rw_semaphore *sem)
rwsemtrace(sem,"Leaving __down_read"); rwsemtrace(sem,"Leaving __down_read");
} }
/*
* trylock for reading -- returns 1 if successful, 0 if contention
*/
int __down_read_trylock(struct rw_semaphore *sem)
{
int ret = 0;
rwsemtrace(sem,"Entering __down_read_trylock");
spin_lock(&sem->wait_lock);
if (sem->activity>=0 && list_empty(&sem->wait_list)) {
/* granted */
sem->activity++;
ret = 1;
}
spin_unlock(&sem->wait_lock);
rwsemtrace(sem,"Leaving __down_read_trylock");
return ret;
}
/* /*
* get a write lock on the semaphore * get a write lock on the semaphore
* - note that we increment the waiting count anyway to indicate an exclusive lock * - note that we increment the waiting count anyway to indicate an exclusive lock
...@@ -194,6 +227,28 @@ void __down_write(struct rw_semaphore *sem) ...@@ -194,6 +227,28 @@ void __down_write(struct rw_semaphore *sem)
rwsemtrace(sem,"Leaving __down_write"); rwsemtrace(sem,"Leaving __down_write");
} }
/*
* trylock for writing -- returns 1 if successful, 0 if contention
*/
int __down_write_trylock(struct rw_semaphore *sem)
{
int ret = 0;
rwsemtrace(sem,"Entering __down_write_trylock");
spin_lock(&sem->wait_lock);
if (sem->activity==0 && list_empty(&sem->wait_list)) {
/* granted */
sem->activity = -1;
ret = 1;
}
spin_unlock(&sem->wait_lock);
rwsemtrace(sem,"Leaving __down_write_trylock");
return ret;
}
/* /*
* release a read lock on the semaphore * release a read lock on the semaphore
*/ */
...@@ -222,18 +277,40 @@ void __up_write(struct rw_semaphore *sem) ...@@ -222,18 +277,40 @@ void __up_write(struct rw_semaphore *sem)
sem->activity = 0; sem->activity = 0;
if (!list_empty(&sem->wait_list)) if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem); sem = __rwsem_do_wake(sem, 1);
spin_unlock(&sem->wait_lock); spin_unlock(&sem->wait_lock);
rwsemtrace(sem,"Leaving __up_write"); rwsemtrace(sem,"Leaving __up_write");
} }
/*
* downgrade a write lock into a read lock
* - just wake up any readers at the front of the queue
*/
void __downgrade_write(struct rw_semaphore *sem)
{
rwsemtrace(sem,"Entering __rwsem_downgrade");
spin_lock(&sem->wait_lock);
sem->activity = 1;
if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem,0);
spin_unlock(&sem->wait_lock);
rwsemtrace(sem,"Leaving __rwsem_downgrade");
}
EXPORT_SYMBOL(init_rwsem); EXPORT_SYMBOL(init_rwsem);
EXPORT_SYMBOL(__down_read); EXPORT_SYMBOL(__down_read);
EXPORT_SYMBOL(__down_read_trylock);
EXPORT_SYMBOL(__down_write); EXPORT_SYMBOL(__down_write);
EXPORT_SYMBOL(__down_write_trylock);
EXPORT_SYMBOL(__up_read); EXPORT_SYMBOL(__up_read);
EXPORT_SYMBOL(__up_write); EXPORT_SYMBOL(__up_write);
EXPORT_SYMBOL(__downgrade_write);
#if RWSEM_DEBUG #if RWSEM_DEBUG
EXPORT_SYMBOL(rwsemtrace); EXPORT_SYMBOL(rwsemtrace);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment