Commit 6cef7ff6 authored by Waiman Long's avatar Waiman Long Committed by Ingo Molnar

locking/rwsem: Code cleanup after files merging

After merging all the relevant rwsem code into one single file, there
are a number of optimizations and cleanups that can be done:

 1) Remove all the EXPORT_SYMBOL() calls for functions that are not
    accessed elsewhere.
 2) Remove all the __visible tags as none of the functions will be
    called from assembly code anymore.
 3) Make all the internal functions static.
 4) Remove some unneeded blank lines.
 5) Remove the intermediate rwsem_down_{read|write}_failed*() functions
    and rename __rwsem_down_{read|write}_failed_common() to
    rwsem_down_{read|write}_slowpath().
 6) Remove "__" prefix of __rwsem_mark_wake().
 7) Use atomic_long_try_cmpxchg_acquire() as much as possible.
 8) Remove the rwsem_rtrylock and rwsem_wtrylock lock events as they
    are not that useful.

That enables the compiler to do better optimization and reduce code
size. The text+data size of rwsem.o on an x86-64 machine with gcc8 was
reduced from 10237 bytes to 5030 bytes with this change.
Suggested-by: default avatarPeter Zijlstra <peterz@infradead.org>
Signed-off-by: default avatarWaiman Long <longman@redhat.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: huang ying <huang.ying.caritas@gmail.com>
Link: https://lkml.kernel.org/r/20190520205918.22251-6-longman@redhat.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 5dec94d4
...@@ -61,7 +61,5 @@ LOCK_EVENT(rwsem_opt_fail) /* # of failed opt-spinnings */ ...@@ -61,7 +61,5 @@ LOCK_EVENT(rwsem_opt_fail) /* # of failed opt-spinnings */
LOCK_EVENT(rwsem_rlock) /* # of read locks acquired */ LOCK_EVENT(rwsem_rlock) /* # of read locks acquired */
LOCK_EVENT(rwsem_rlock_fast) /* # of fast read locks acquired */ LOCK_EVENT(rwsem_rlock_fast) /* # of fast read locks acquired */
LOCK_EVENT(rwsem_rlock_fail) /* # of failed read lock acquisitions */ LOCK_EVENT(rwsem_rlock_fail) /* # of failed read lock acquisitions */
LOCK_EVENT(rwsem_rtrylock) /* # of read trylock calls */
LOCK_EVENT(rwsem_wlock) /* # of write locks acquired */ LOCK_EVENT(rwsem_wlock) /* # of write locks acquired */
LOCK_EVENT(rwsem_wlock_fail) /* # of failed write lock acquisitions */ LOCK_EVENT(rwsem_wlock_fail) /* # of failed write lock acquisitions */
LOCK_EVENT(rwsem_wtrylock) /* # of write trylock calls */
...@@ -205,7 +205,6 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name, ...@@ -205,7 +205,6 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
osq_lock_init(&sem->osq); osq_lock_init(&sem->osq);
#endif #endif
} }
EXPORT_SYMBOL(__init_rwsem); EXPORT_SYMBOL(__init_rwsem);
enum rwsem_waiter_type { enum rwsem_waiter_type {
...@@ -237,7 +236,7 @@ enum rwsem_wake_type { ...@@ -237,7 +236,7 @@ enum rwsem_wake_type {
* - woken process blocks are discarded from the list after having task zeroed * - woken process blocks are discarded from the list after having task zeroed
* - writers are only marked woken if downgrading is false * - writers are only marked woken if downgrading is false
*/ */
static void __rwsem_mark_wake(struct rw_semaphore *sem, static void rwsem_mark_wake(struct rw_semaphore *sem,
enum rwsem_wake_type wake_type, enum rwsem_wake_type wake_type,
struct wake_q_head *wake_q) struct wake_q_head *wake_q)
{ {
...@@ -330,7 +329,7 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem, ...@@ -330,7 +329,7 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
/* /*
* Ensure calling get_task_struct() before setting the reader * Ensure calling get_task_struct() before setting the reader
* waiter to nil such that rwsem_down_read_failed() cannot * waiter to nil such that rwsem_down_read_slowpath() cannot
* race with do_exit() by always holding a reference count * race with do_exit() by always holding a reference count
* to the task to wakeup. * to the task to wakeup.
*/ */
...@@ -516,8 +515,8 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem) ...@@ -516,8 +515,8 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
/* /*
* Wait for the read lock to be granted * Wait for the read lock to be granted
*/ */
static inline struct rw_semaphore __sched * static struct rw_semaphore __sched *
__rwsem_down_read_failed_common(struct rw_semaphore *sem, int state) rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
{ {
long count, adjustment = -RWSEM_READER_BIAS; long count, adjustment = -RWSEM_READER_BIAS;
struct rwsem_waiter waiter; struct rwsem_waiter waiter;
...@@ -555,7 +554,7 @@ __rwsem_down_read_failed_common(struct rw_semaphore *sem, int state) ...@@ -555,7 +554,7 @@ __rwsem_down_read_failed_common(struct rw_semaphore *sem, int state)
*/ */
if (!(count & RWSEM_LOCK_MASK) || if (!(count & RWSEM_LOCK_MASK) ||
(!(count & RWSEM_WRITER_MASK) && (adjustment & RWSEM_FLAG_WAITERS))) (!(count & RWSEM_WRITER_MASK) && (adjustment & RWSEM_FLAG_WAITERS)))
__rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q); rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
raw_spin_unlock_irq(&sem->wait_lock); raw_spin_unlock_irq(&sem->wait_lock);
wake_up_q(&wake_q); wake_up_q(&wake_q);
...@@ -589,25 +588,11 @@ __rwsem_down_read_failed_common(struct rw_semaphore *sem, int state) ...@@ -589,25 +588,11 @@ __rwsem_down_read_failed_common(struct rw_semaphore *sem, int state)
return ERR_PTR(-EINTR); return ERR_PTR(-EINTR);
} }
__visible struct rw_semaphore * __sched
rwsem_down_read_failed(struct rw_semaphore *sem)
{
return __rwsem_down_read_failed_common(sem, TASK_UNINTERRUPTIBLE);
}
EXPORT_SYMBOL(rwsem_down_read_failed);
__visible struct rw_semaphore * __sched
rwsem_down_read_failed_killable(struct rw_semaphore *sem)
{
return __rwsem_down_read_failed_common(sem, TASK_KILLABLE);
}
EXPORT_SYMBOL(rwsem_down_read_failed_killable);
/* /*
* Wait until we successfully acquire the write lock * Wait until we successfully acquire the write lock
*/ */
static inline struct rw_semaphore * static struct rw_semaphore *
__rwsem_down_write_failed_common(struct rw_semaphore *sem, int state) rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
{ {
long count; long count;
bool waiting = true; /* any queued threads before us */ bool waiting = true; /* any queued threads before us */
...@@ -646,7 +631,7 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state) ...@@ -646,7 +631,7 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
*/ */
if (!(count & RWSEM_WRITER_MASK) && if (!(count & RWSEM_WRITER_MASK) &&
(count & RWSEM_READER_MASK)) { (count & RWSEM_READER_MASK)) {
__rwsem_mark_wake(sem, RWSEM_WAKE_READERS, &wake_q); rwsem_mark_wake(sem, RWSEM_WAKE_READERS, &wake_q);
/* /*
* The wakeup is normally called _after_ the wait_lock * The wakeup is normally called _after_ the wait_lock
* is released, but given that we are proactively waking * is released, but given that we are proactively waking
...@@ -700,7 +685,7 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state) ...@@ -700,7 +685,7 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
if (list_empty(&sem->wait_list)) if (list_empty(&sem->wait_list))
atomic_long_andnot(RWSEM_FLAG_WAITERS, &sem->count); atomic_long_andnot(RWSEM_FLAG_WAITERS, &sem->count);
else else
__rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q); rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
raw_spin_unlock_irq(&sem->wait_lock); raw_spin_unlock_irq(&sem->wait_lock);
wake_up_q(&wake_q); wake_up_q(&wake_q);
lockevent_inc(rwsem_wlock_fail); lockevent_inc(rwsem_wlock_fail);
...@@ -708,26 +693,11 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state) ...@@ -708,26 +693,11 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
return ERR_PTR(-EINTR); return ERR_PTR(-EINTR);
} }
__visible struct rw_semaphore * __sched
rwsem_down_write_failed(struct rw_semaphore *sem)
{
return __rwsem_down_write_failed_common(sem, TASK_UNINTERRUPTIBLE);
}
EXPORT_SYMBOL(rwsem_down_write_failed);
__visible struct rw_semaphore * __sched
rwsem_down_write_failed_killable(struct rw_semaphore *sem)
{
return __rwsem_down_write_failed_common(sem, TASK_KILLABLE);
}
EXPORT_SYMBOL(rwsem_down_write_failed_killable);
/* /*
* handle waking up a waiter on the semaphore * handle waking up a waiter on the semaphore
* - up_read/up_write has decremented the active part of count if we come here * - up_read/up_write has decremented the active part of count if we come here
*/ */
__visible static struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
{ {
unsigned long flags; unsigned long flags;
DEFINE_WAKE_Q(wake_q); DEFINE_WAKE_Q(wake_q);
...@@ -735,22 +705,20 @@ struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) ...@@ -735,22 +705,20 @@ struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
raw_spin_lock_irqsave(&sem->wait_lock, flags); raw_spin_lock_irqsave(&sem->wait_lock, flags);
if (!list_empty(&sem->wait_list)) if (!list_empty(&sem->wait_list))
__rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q); rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags); raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
wake_up_q(&wake_q); wake_up_q(&wake_q);
return sem; return sem;
} }
EXPORT_SYMBOL(rwsem_wake);
/* /*
* downgrade a write lock into a read lock * downgrade a write lock into a read lock
* - caller incremented waiting part of count and discovered it still negative * - caller incremented waiting part of count and discovered it still negative
* - just wake up any readers at the front of the queue * - just wake up any readers at the front of the queue
*/ */
__visible static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
{ {
unsigned long flags; unsigned long flags;
DEFINE_WAKE_Q(wake_q); DEFINE_WAKE_Q(wake_q);
...@@ -758,14 +726,13 @@ struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem) ...@@ -758,14 +726,13 @@ struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
raw_spin_lock_irqsave(&sem->wait_lock, flags); raw_spin_lock_irqsave(&sem->wait_lock, flags);
if (!list_empty(&sem->wait_list)) if (!list_empty(&sem->wait_list))
__rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q); rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags); raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
wake_up_q(&wake_q); wake_up_q(&wake_q);
return sem; return sem;
} }
EXPORT_SYMBOL(rwsem_downgrade_wake);
/* /*
* lock for reading * lock for reading
...@@ -774,7 +741,7 @@ inline void __down_read(struct rw_semaphore *sem) ...@@ -774,7 +741,7 @@ inline void __down_read(struct rw_semaphore *sem)
{ {
if (unlikely(atomic_long_fetch_add_acquire(RWSEM_READER_BIAS, if (unlikely(atomic_long_fetch_add_acquire(RWSEM_READER_BIAS,
&sem->count) & RWSEM_READ_FAILED_MASK)) { &sem->count) & RWSEM_READ_FAILED_MASK)) {
rwsem_down_read_failed(sem); rwsem_down_read_slowpath(sem, TASK_UNINTERRUPTIBLE);
DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner &
RWSEM_READER_OWNED), sem); RWSEM_READER_OWNED), sem);
} else { } else {
...@@ -786,7 +753,7 @@ static inline int __down_read_killable(struct rw_semaphore *sem) ...@@ -786,7 +753,7 @@ static inline int __down_read_killable(struct rw_semaphore *sem)
{ {
if (unlikely(atomic_long_fetch_add_acquire(RWSEM_READER_BIAS, if (unlikely(atomic_long_fetch_add_acquire(RWSEM_READER_BIAS,
&sem->count) & RWSEM_READ_FAILED_MASK)) { &sem->count) & RWSEM_READ_FAILED_MASK)) {
if (IS_ERR(rwsem_down_read_failed_killable(sem))) if (IS_ERR(rwsem_down_read_slowpath(sem, TASK_KILLABLE)))
return -EINTR; return -EINTR;
DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner &
RWSEM_READER_OWNED), sem); RWSEM_READER_OWNED), sem);
...@@ -803,7 +770,6 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) ...@@ -803,7 +770,6 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
*/ */
long tmp = RWSEM_UNLOCKED_VALUE; long tmp = RWSEM_UNLOCKED_VALUE;
lockevent_inc(rwsem_rtrylock);
do { do {
if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
tmp + RWSEM_READER_BIAS)) { tmp + RWSEM_READER_BIAS)) {
...@@ -819,30 +785,33 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) ...@@ -819,30 +785,33 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
*/ */
static inline void __down_write(struct rw_semaphore *sem) static inline void __down_write(struct rw_semaphore *sem)
{ {
if (unlikely(atomic_long_cmpxchg_acquire(&sem->count, 0, long tmp = RWSEM_UNLOCKED_VALUE;
if (unlikely(!atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
RWSEM_WRITER_LOCKED))) RWSEM_WRITER_LOCKED)))
rwsem_down_write_failed(sem); rwsem_down_write_slowpath(sem, TASK_UNINTERRUPTIBLE);
rwsem_set_owner(sem); rwsem_set_owner(sem);
} }
static inline int __down_write_killable(struct rw_semaphore *sem) static inline int __down_write_killable(struct rw_semaphore *sem)
{ {
if (unlikely(atomic_long_cmpxchg_acquire(&sem->count, 0, long tmp = RWSEM_UNLOCKED_VALUE;
RWSEM_WRITER_LOCKED)))
if (IS_ERR(rwsem_down_write_failed_killable(sem))) if (unlikely(!atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
RWSEM_WRITER_LOCKED))) {
if (IS_ERR(rwsem_down_write_slowpath(sem, TASK_KILLABLE)))
return -EINTR; return -EINTR;
}
rwsem_set_owner(sem); rwsem_set_owner(sem);
return 0; return 0;
} }
static inline int __down_write_trylock(struct rw_semaphore *sem) static inline int __down_write_trylock(struct rw_semaphore *sem)
{ {
long tmp; long tmp = RWSEM_UNLOCKED_VALUE;
lockevent_inc(rwsem_wtrylock); if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
tmp = atomic_long_cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE, RWSEM_WRITER_LOCKED)) {
RWSEM_WRITER_LOCKED);
if (tmp == RWSEM_UNLOCKED_VALUE) {
rwsem_set_owner(sem); rwsem_set_owner(sem);
return true; return true;
} }
...@@ -856,12 +825,11 @@ inline void __up_read(struct rw_semaphore *sem) ...@@ -856,12 +825,11 @@ inline void __up_read(struct rw_semaphore *sem)
{ {
long tmp; long tmp;
DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & RWSEM_READER_OWNED), DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & RWSEM_READER_OWNED), sem);
sem);
rwsem_clear_reader_owned(sem); rwsem_clear_reader_owned(sem);
tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count); tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count);
if (unlikely((tmp & (RWSEM_LOCK_MASK|RWSEM_FLAG_WAITERS)) if (unlikely((tmp & (RWSEM_LOCK_MASK|RWSEM_FLAG_WAITERS)) ==
== RWSEM_FLAG_WAITERS)) RWSEM_FLAG_WAITERS))
rwsem_wake(sem); rwsem_wake(sem);
} }
...@@ -870,10 +838,12 @@ inline void __up_read(struct rw_semaphore *sem) ...@@ -870,10 +838,12 @@ inline void __up_read(struct rw_semaphore *sem)
*/ */
static inline void __up_write(struct rw_semaphore *sem) static inline void __up_write(struct rw_semaphore *sem)
{ {
long tmp;
DEBUG_RWSEMS_WARN_ON(sem->owner != current, sem); DEBUG_RWSEMS_WARN_ON(sem->owner != current, sem);
rwsem_clear_owner(sem); rwsem_clear_owner(sem);
if (unlikely(atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count);
&sem->count) & RWSEM_FLAG_WAITERS)) if (unlikely(tmp & RWSEM_FLAG_WAITERS))
rwsem_wake(sem); rwsem_wake(sem);
} }
...@@ -909,7 +879,6 @@ void __sched down_read(struct rw_semaphore *sem) ...@@ -909,7 +879,6 @@ void __sched down_read(struct rw_semaphore *sem)
LOCK_CONTENDED(sem, __down_read_trylock, __down_read); LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
} }
EXPORT_SYMBOL(down_read); EXPORT_SYMBOL(down_read);
int __sched down_read_killable(struct rw_semaphore *sem) int __sched down_read_killable(struct rw_semaphore *sem)
...@@ -924,7 +893,6 @@ int __sched down_read_killable(struct rw_semaphore *sem) ...@@ -924,7 +893,6 @@ int __sched down_read_killable(struct rw_semaphore *sem)
return 0; return 0;
} }
EXPORT_SYMBOL(down_read_killable); EXPORT_SYMBOL(down_read_killable);
/* /*
...@@ -938,7 +906,6 @@ int down_read_trylock(struct rw_semaphore *sem) ...@@ -938,7 +906,6 @@ int down_read_trylock(struct rw_semaphore *sem)
rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_); rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
return ret; return ret;
} }
EXPORT_SYMBOL(down_read_trylock); EXPORT_SYMBOL(down_read_trylock);
/* /*
...@@ -948,10 +915,8 @@ void __sched down_write(struct rw_semaphore *sem) ...@@ -948,10 +915,8 @@ void __sched down_write(struct rw_semaphore *sem)
{ {
might_sleep(); might_sleep();
rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_); rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(sem, __down_write_trylock, __down_write); LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
} }
EXPORT_SYMBOL(down_write); EXPORT_SYMBOL(down_write);
/* /*
...@@ -962,14 +927,14 @@ int __sched down_write_killable(struct rw_semaphore *sem) ...@@ -962,14 +927,14 @@ int __sched down_write_killable(struct rw_semaphore *sem)
might_sleep(); might_sleep();
rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_); rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock, __down_write_killable)) { if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
__down_write_killable)) {
rwsem_release(&sem->dep_map, 1, _RET_IP_); rwsem_release(&sem->dep_map, 1, _RET_IP_);
return -EINTR; return -EINTR;
} }
return 0; return 0;
} }
EXPORT_SYMBOL(down_write_killable); EXPORT_SYMBOL(down_write_killable);
/* /*
...@@ -984,7 +949,6 @@ int down_write_trylock(struct rw_semaphore *sem) ...@@ -984,7 +949,6 @@ int down_write_trylock(struct rw_semaphore *sem)
return ret; return ret;
} }
EXPORT_SYMBOL(down_write_trylock); EXPORT_SYMBOL(down_write_trylock);
/* /*
...@@ -993,10 +957,8 @@ EXPORT_SYMBOL(down_write_trylock); ...@@ -993,10 +957,8 @@ EXPORT_SYMBOL(down_write_trylock);
void up_read(struct rw_semaphore *sem) void up_read(struct rw_semaphore *sem)
{ {
rwsem_release(&sem->dep_map, 1, _RET_IP_); rwsem_release(&sem->dep_map, 1, _RET_IP_);
__up_read(sem); __up_read(sem);
} }
EXPORT_SYMBOL(up_read); EXPORT_SYMBOL(up_read);
/* /*
...@@ -1005,10 +967,8 @@ EXPORT_SYMBOL(up_read); ...@@ -1005,10 +967,8 @@ EXPORT_SYMBOL(up_read);
void up_write(struct rw_semaphore *sem) void up_write(struct rw_semaphore *sem)
{ {
rwsem_release(&sem->dep_map, 1, _RET_IP_); rwsem_release(&sem->dep_map, 1, _RET_IP_);
__up_write(sem); __up_write(sem);
} }
EXPORT_SYMBOL(up_write); EXPORT_SYMBOL(up_write);
/* /*
...@@ -1017,10 +977,8 @@ EXPORT_SYMBOL(up_write); ...@@ -1017,10 +977,8 @@ EXPORT_SYMBOL(up_write);
void downgrade_write(struct rw_semaphore *sem) void downgrade_write(struct rw_semaphore *sem)
{ {
lock_downgrade(&sem->dep_map, _RET_IP_); lock_downgrade(&sem->dep_map, _RET_IP_);
__downgrade_write(sem); __downgrade_write(sem);
} }
EXPORT_SYMBOL(downgrade_write); EXPORT_SYMBOL(downgrade_write);
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
...@@ -1029,40 +987,32 @@ void down_read_nested(struct rw_semaphore *sem, int subclass) ...@@ -1029,40 +987,32 @@ void down_read_nested(struct rw_semaphore *sem, int subclass)
{ {
might_sleep(); might_sleep();
rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_); rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
LOCK_CONTENDED(sem, __down_read_trylock, __down_read); LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
} }
EXPORT_SYMBOL(down_read_nested); EXPORT_SYMBOL(down_read_nested);
void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest) void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest)
{ {
might_sleep(); might_sleep();
rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_); rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_);
LOCK_CONTENDED(sem, __down_write_trylock, __down_write); LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
} }
EXPORT_SYMBOL(_down_write_nest_lock); EXPORT_SYMBOL(_down_write_nest_lock);
void down_read_non_owner(struct rw_semaphore *sem) void down_read_non_owner(struct rw_semaphore *sem)
{ {
might_sleep(); might_sleep();
__down_read(sem); __down_read(sem);
__rwsem_set_reader_owned(sem, NULL); __rwsem_set_reader_owned(sem, NULL);
} }
EXPORT_SYMBOL(down_read_non_owner); EXPORT_SYMBOL(down_read_non_owner);
void down_write_nested(struct rw_semaphore *sem, int subclass) void down_write_nested(struct rw_semaphore *sem, int subclass)
{ {
might_sleep(); might_sleep();
rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_); rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
LOCK_CONTENDED(sem, __down_write_trylock, __down_write); LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
} }
EXPORT_SYMBOL(down_write_nested); EXPORT_SYMBOL(down_write_nested);
int __sched down_write_killable_nested(struct rw_semaphore *sem, int subclass) int __sched down_write_killable_nested(struct rw_semaphore *sem, int subclass)
...@@ -1070,14 +1020,14 @@ int __sched down_write_killable_nested(struct rw_semaphore *sem, int subclass) ...@@ -1070,14 +1020,14 @@ int __sched down_write_killable_nested(struct rw_semaphore *sem, int subclass)
might_sleep(); might_sleep();
rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_); rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock, __down_write_killable)) { if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
__down_write_killable)) {
rwsem_release(&sem->dep_map, 1, _RET_IP_); rwsem_release(&sem->dep_map, 1, _RET_IP_);
return -EINTR; return -EINTR;
} }
return 0; return 0;
} }
EXPORT_SYMBOL(down_write_killable_nested); EXPORT_SYMBOL(down_write_killable_nested);
void up_read_non_owner(struct rw_semaphore *sem) void up_read_non_owner(struct rw_semaphore *sem)
...@@ -1086,7 +1036,6 @@ void up_read_non_owner(struct rw_semaphore *sem) ...@@ -1086,7 +1036,6 @@ void up_read_non_owner(struct rw_semaphore *sem)
sem); sem);
__up_read(sem); __up_read(sem);
} }
EXPORT_SYMBOL(up_read_non_owner); EXPORT_SYMBOL(up_read_non_owner);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment