Commit 866b413d authored by David Howells's avatar David Howells Committed by Linus Torvalds

[PATCH] read-write semaphore downgrade and trylock

Here's a patch from Christoph Hellwig and myself to supply write->read
semaphore downgrade, and also from Brian Watson to supply trylock for rwsems.
parent 8b8c90a3
......@@ -46,6 +46,7 @@ struct rwsem_waiter;
extern struct rw_semaphore *FASTCALL(rwsem_down_read_failed(struct rw_semaphore *sem));
extern struct rw_semaphore *FASTCALL(rwsem_down_write_failed(struct rw_semaphore *sem));
extern struct rw_semaphore *FASTCALL(rwsem_wake(struct rw_semaphore *));
extern struct rw_semaphore *FASTCALL(rwsem_downgrade_write(struct rw_semaphore *sem));
/*
* the semaphore definition
......@@ -195,6 +196,31 @@ LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 ->
: "memory", "cc", "edx");
}
/*
* downgrade write lock to read lock
*/
static inline void __downgrade_write(struct rw_semaphore *sem)
{
__asm__ __volatile__(
"# beginning __downgrade_write\n\t"
LOCK_PREFIX " addl %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
" js 2f\n\t" /* jump if the lock is being waited upon */
"1:\n\t"
LOCK_SECTION_START("")
"2:\n\t"
" pushl %%ecx\n\t"
" pushl %%edx\n\t"
" call rwsem_downgrade_wake\n\t"
" popl %%edx\n\t"
" popl %%ecx\n\t"
" jmp 1b\n"
LOCK_SECTION_END
"# ending __downgrade_write\n"
: "=m"(sem->count)
: "a"(sem), "i"(-RWSEM_WAITING_BIAS), "m"(sem->count)
: "memory", "cc");
}
/*
* implement atomic add functionality
*/
......
......@@ -75,6 +75,16 @@ static inline void up_write(struct rw_semaphore *sem)
rwsemtrace(sem,"Leaving up_write");
}
/*
* downgrade write lock to read lock
*/
static inline void downgrade_write(struct rw_semaphore *sem)
{
rwsemtrace(sem,"Entering downgrade_write");
__downgrade_write(sem);
rwsemtrace(sem,"Leaving downgrade_write");
}
#endif /* __KERNEL__ */
#endif /* _LINUX_RWSEM_H */
......@@ -34,8 +34,9 @@ void rwsemtrace(struct rw_semaphore *sem, const char *str)
* - there must be someone on the queue
* - the spinlock must be held by the caller
* - woken process blocks are discarded from the list after having flags zeroised
* - writers are only woken if wakewrite is non-zero
*/
static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem)
static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
{
struct rwsem_waiter *waiter;
struct list_head *next;
......@@ -44,6 +45,9 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem)
rwsemtrace(sem,"Entering __rwsem_do_wake");
if (!wakewrite)
goto dont_wake_writers;
/* only wake someone up if we can transition the active part of the count from 0 -> 1 */
try_again:
oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS,sem) - RWSEM_ACTIVE_BIAS;
......@@ -64,6 +68,12 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem)
wake_up_process(waiter->task);
goto out;
/* don't want to wake any writers */
dont_wake_writers:
waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
goto out;
/* grant an infinite number of read locks to the readers at the front of the queue
* - note we increment the 'active part' of the count by the number of readers (less one
* for the activity decrement we've already done) before waking any processes up
......@@ -132,7 +142,7 @@ static inline struct rw_semaphore *rwsem_down_failed_common(struct rw_semaphore
* - it might even be this process, since the waker takes a more active part
*/
if (!(count & RWSEM_ACTIVE_MASK))
sem = __rwsem_do_wake(sem);
sem = __rwsem_do_wake(sem,1);
spin_unlock(&sem->wait_lock);
......@@ -193,7 +203,7 @@ struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
/* do nothing if list empty */
if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem);
sem = __rwsem_do_wake(sem,1);
spin_unlock(&sem->wait_lock);
......@@ -202,6 +212,27 @@ struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
return sem;
}
/*
* downgrade a write lock into a read lock
* - caller incremented waiting part of count, and discovered it to be still negative
* - just wake up any readers at the front of the queue
*/
struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
{
rwsemtrace(sem,"Entering rwsem_downgrade_wake");
spin_lock(&sem->wait_lock);
/* do nothing if list empty */
if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem,0);
spin_unlock(&sem->wait_lock);
rwsemtrace(sem,"Leaving rwsem_downgrade_wake");
return sem;
}
EXPORT_SYMBOL_NOVERS(rwsem_down_read_failed);
EXPORT_SYMBOL_NOVERS(rwsem_down_write_failed);
EXPORT_SYMBOL_NOVERS(rwsem_wake);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment