Commit 8282947f authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar

locking/rwlock: Provide RT variant

Similar to rw_semaphores, on RT the rwlock substitution is not writer fair,
because it's not feasible to have a writer inherit its priority to
multiple readers. Readers blocked on a writer follow the normal rules of
priority inheritance. Like RT spinlocks, RT rwlocks are state preserving
across the slow lock operations (contended case).
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20210815211303.882793524@linutronix.de
parent 0f383b6d
// SPDX-License-Identifier: GPL-2.0-only
#ifndef __LINUX_RWLOCK_RT_H
#define __LINUX_RWLOCK_RT_H
#ifndef __LINUX_SPINLOCK_RT_H
#error Do not #include directly. Use <linux/spinlock.h>.
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
extern void __rt_rwlock_init(rwlock_t *rwlock, const char *name,
struct lock_class_key *key);
#else
static inline void __rt_rwlock_init(rwlock_t *rwlock, char *name,
struct lock_class_key *key)
{
}
#endif
#define rwlock_init(rwl) \
do { \
static struct lock_class_key __key; \
\
init_rwbase_rt(&(rwl)->rwbase); \
__rt_rwlock_init(rwl, #rwl, &__key); \
} while (0)
extern void rt_read_lock(rwlock_t *rwlock);
extern int rt_read_trylock(rwlock_t *rwlock);
extern void rt_read_unlock(rwlock_t *rwlock);
extern void rt_write_lock(rwlock_t *rwlock);
extern int rt_write_trylock(rwlock_t *rwlock);
extern void rt_write_unlock(rwlock_t *rwlock);
static __always_inline void read_lock(rwlock_t *rwlock)
{
rt_read_lock(rwlock);
}
static __always_inline void read_lock_bh(rwlock_t *rwlock)
{
local_bh_disable();
rt_read_lock(rwlock);
}
static __always_inline void read_lock_irq(rwlock_t *rwlock)
{
rt_read_lock(rwlock);
}
#define read_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
rt_read_lock(lock); \
flags = 0; \
} while (0)
#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock))
static __always_inline void read_unlock(rwlock_t *rwlock)
{
rt_read_unlock(rwlock);
}
static __always_inline void read_unlock_bh(rwlock_t *rwlock)
{
rt_read_unlock(rwlock);
local_bh_enable();
}
static __always_inline void read_unlock_irq(rwlock_t *rwlock)
{
rt_read_unlock(rwlock);
}
static __always_inline void read_unlock_irqrestore(rwlock_t *rwlock,
unsigned long flags)
{
rt_read_unlock(rwlock);
}
static __always_inline void write_lock(rwlock_t *rwlock)
{
rt_write_lock(rwlock);
}
static __always_inline void write_lock_bh(rwlock_t *rwlock)
{
local_bh_disable();
rt_write_lock(rwlock);
}
static __always_inline void write_lock_irq(rwlock_t *rwlock)
{
rt_write_lock(rwlock);
}
#define write_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
rt_write_lock(lock); \
flags = 0; \
} while (0)
#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock))
#define write_trylock_irqsave(lock, flags) \
({ \
int __locked; \
\
typecheck(unsigned long, flags); \
flags = 0; \
__locked = write_trylock(lock); \
__locked; \
})
static __always_inline void write_unlock(rwlock_t *rwlock)
{
rt_write_unlock(rwlock);
}
static __always_inline void write_unlock_bh(rwlock_t *rwlock)
{
rt_write_unlock(rwlock);
local_bh_enable();
}
static __always_inline void write_unlock_irq(rwlock_t *rwlock)
{
rt_write_unlock(rwlock);
}
static __always_inline void write_unlock_irqrestore(rwlock_t *rwlock,
unsigned long flags)
{
rt_write_unlock(rwlock);
}
#define rwlock_is_contended(lock) (((void)(lock), 0))
#endif /* __LINUX_RWLOCK_RT_H */
...@@ -5,9 +5,19 @@ ...@@ -5,9 +5,19 @@
# error "Do not include directly, include spinlock_types.h" # error "Do not include directly, include spinlock_types.h"
#endif #endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define RW_DEP_MAP_INIT(lockname) \
.dep_map = { \
.name = #lockname, \
.wait_type_inner = LD_WAIT_CONFIG, \
}
#else
# define RW_DEP_MAP_INIT(lockname)
#endif
#ifndef CONFIG_PREEMPT_RT
/* /*
* include/linux/rwlock_types.h - generic rwlock type definitions * generic rwlock type definitions and initializers
* and initializers
* *
* portions Copyright 2005, Red Hat, Inc., Ingo Molnar * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
* Released under the General Public License (GPL). * Released under the General Public License (GPL).
...@@ -25,16 +35,6 @@ typedef struct { ...@@ -25,16 +35,6 @@ typedef struct {
#define RWLOCK_MAGIC 0xdeaf1eed #define RWLOCK_MAGIC 0xdeaf1eed
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define RW_DEP_MAP_INIT(lockname) \
.dep_map = { \
.name = #lockname, \
.wait_type_inner = LD_WAIT_CONFIG, \
}
#else
# define RW_DEP_MAP_INIT(lockname)
#endif
#ifdef CONFIG_DEBUG_SPINLOCK #ifdef CONFIG_DEBUG_SPINLOCK
#define __RW_LOCK_UNLOCKED(lockname) \ #define __RW_LOCK_UNLOCKED(lockname) \
(rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \ (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \
...@@ -50,4 +50,29 @@ typedef struct { ...@@ -50,4 +50,29 @@ typedef struct {
#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) #define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
#else /* !CONFIG_PREEMPT_RT */
#include <linux/rwbase_rt.h>
typedef struct {
struct rwbase_rt rwbase;
atomic_t readers;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
} rwlock_t;
#define __RWLOCK_RT_INITIALIZER(name) \
{ \
.rwbase = __RWBASE_INITIALIZER(name), \
RW_DEP_MAP_INIT(name) \
}
#define __RW_LOCK_UNLOCKED(name) __RWLOCK_RT_INITIALIZER(name)
#define DEFINE_RWLOCK(name) \
rwlock_t name = __RW_LOCK_UNLOCKED(name)
#endif /* CONFIG_PREEMPT_RT */
#endif /* __LINUX_RWLOCK_TYPES_H */ #endif /* __LINUX_RWLOCK_TYPES_H */
...@@ -146,4 +146,6 @@ static inline int spin_is_locked(spinlock_t *lock) ...@@ -146,4 +146,6 @@ static inline int spin_is_locked(spinlock_t *lock)
#define assert_spin_locked(lock) BUG_ON(!spin_is_locked(lock)) #define assert_spin_locked(lock) BUG_ON(!spin_is_locked(lock))
#include <linux/rwlock_rt.h>
#endif #endif
...@@ -251,7 +251,7 @@ config ARCH_USE_QUEUED_RWLOCKS ...@@ -251,7 +251,7 @@ config ARCH_USE_QUEUED_RWLOCKS
config QUEUED_RWLOCKS config QUEUED_RWLOCKS
def_bool y if ARCH_USE_QUEUED_RWLOCKS def_bool y if ARCH_USE_QUEUED_RWLOCKS
depends on SMP depends on SMP && !PREEMPT_RT
config ARCH_HAS_MMIOWB config ARCH_HAS_MMIOWB
bool bool
......
...@@ -124,8 +124,11 @@ void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \ ...@@ -124,8 +124,11 @@ void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \
* __[spin|read|write]_lock_bh() * __[spin|read|write]_lock_bh()
*/ */
BUILD_LOCK_OPS(spin, raw_spinlock); BUILD_LOCK_OPS(spin, raw_spinlock);
#ifndef CONFIG_PREEMPT_RT
BUILD_LOCK_OPS(read, rwlock); BUILD_LOCK_OPS(read, rwlock);
BUILD_LOCK_OPS(write, rwlock); BUILD_LOCK_OPS(write, rwlock);
#endif
#endif #endif
...@@ -209,6 +212,8 @@ void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) ...@@ -209,6 +212,8 @@ void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)
EXPORT_SYMBOL(_raw_spin_unlock_bh); EXPORT_SYMBOL(_raw_spin_unlock_bh);
#endif #endif
#ifndef CONFIG_PREEMPT_RT
#ifndef CONFIG_INLINE_READ_TRYLOCK #ifndef CONFIG_INLINE_READ_TRYLOCK
int __lockfunc _raw_read_trylock(rwlock_t *lock) int __lockfunc _raw_read_trylock(rwlock_t *lock)
{ {
...@@ -353,6 +358,8 @@ void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) ...@@ -353,6 +358,8 @@ void __lockfunc _raw_write_unlock_bh(rwlock_t *lock)
EXPORT_SYMBOL(_raw_write_unlock_bh); EXPORT_SYMBOL(_raw_write_unlock_bh);
#endif #endif
#endif /* !CONFIG_PREEMPT_RT */
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
......
...@@ -31,6 +31,7 @@ void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, ...@@ -31,6 +31,7 @@ void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
EXPORT_SYMBOL(__raw_spin_lock_init); EXPORT_SYMBOL(__raw_spin_lock_init);
#ifndef CONFIG_PREEMPT_RT
void __rwlock_init(rwlock_t *lock, const char *name, void __rwlock_init(rwlock_t *lock, const char *name,
struct lock_class_key *key) struct lock_class_key *key)
{ {
...@@ -48,6 +49,7 @@ void __rwlock_init(rwlock_t *lock, const char *name, ...@@ -48,6 +49,7 @@ void __rwlock_init(rwlock_t *lock, const char *name,
} }
EXPORT_SYMBOL(__rwlock_init); EXPORT_SYMBOL(__rwlock_init);
#endif
static void spin_dump(raw_spinlock_t *lock, const char *msg) static void spin_dump(raw_spinlock_t *lock, const char *msg)
{ {
...@@ -139,6 +141,7 @@ void do_raw_spin_unlock(raw_spinlock_t *lock) ...@@ -139,6 +141,7 @@ void do_raw_spin_unlock(raw_spinlock_t *lock)
arch_spin_unlock(&lock->raw_lock); arch_spin_unlock(&lock->raw_lock);
} }
#ifndef CONFIG_PREEMPT_RT
static void rwlock_bug(rwlock_t *lock, const char *msg) static void rwlock_bug(rwlock_t *lock, const char *msg)
{ {
if (!debug_locks_off()) if (!debug_locks_off())
...@@ -228,3 +231,5 @@ void do_raw_write_unlock(rwlock_t *lock) ...@@ -228,3 +231,5 @@ void do_raw_write_unlock(rwlock_t *lock)
debug_write_unlock(lock); debug_write_unlock(lock);
arch_write_unlock(&lock->raw_lock); arch_write_unlock(&lock->raw_lock);
} }
#endif /* !CONFIG_PREEMPT_RT */
...@@ -127,3 +127,134 @@ void __rt_spin_lock_init(spinlock_t *lock, const char *name, ...@@ -127,3 +127,134 @@ void __rt_spin_lock_init(spinlock_t *lock, const char *name,
} }
EXPORT_SYMBOL(__rt_spin_lock_init); EXPORT_SYMBOL(__rt_spin_lock_init);
#endif #endif
/*
* RT-specific reader/writer locks
*/
#define rwbase_set_and_save_current_state(state) \
current_save_and_set_rtlock_wait_state()
#define rwbase_restore_current_state() \
current_restore_rtlock_saved_state()
static __always_inline int
rwbase_rtmutex_lock_state(struct rt_mutex_base *rtm, unsigned int state)
{
if (unlikely(!rt_mutex_cmpxchg_acquire(rtm, NULL, current)))
rtlock_slowlock(rtm);
return 0;
}
static __always_inline int
rwbase_rtmutex_slowlock_locked(struct rt_mutex_base *rtm, unsigned int state)
{
rtlock_slowlock_locked(rtm);
return 0;
}
static __always_inline void rwbase_rtmutex_unlock(struct rt_mutex_base *rtm)
{
if (likely(rt_mutex_cmpxchg_acquire(rtm, current, NULL)))
return;
rt_mutex_slowunlock(rtm);
}
static __always_inline int rwbase_rtmutex_trylock(struct rt_mutex_base *rtm)
{
if (likely(rt_mutex_cmpxchg_acquire(rtm, NULL, current)))
return 1;
return rt_mutex_slowtrylock(rtm);
}
#define rwbase_signal_pending_state(state, current) (0)
#define rwbase_schedule() \
schedule_rtlock()
#include "rwbase_rt.c"
/*
* The common functions which get wrapped into the rwlock API.
*/
int __sched rt_read_trylock(rwlock_t *rwlock)
{
int ret;
ret = rwbase_read_trylock(&rwlock->rwbase);
if (ret) {
rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_);
rcu_read_lock();
migrate_disable();
}
return ret;
}
EXPORT_SYMBOL(rt_read_trylock);
int __sched rt_write_trylock(rwlock_t *rwlock)
{
int ret;
ret = rwbase_write_trylock(&rwlock->rwbase);
if (ret) {
rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
rcu_read_lock();
migrate_disable();
}
return ret;
}
EXPORT_SYMBOL(rt_write_trylock);
void __sched rt_read_lock(rwlock_t *rwlock)
{
___might_sleep(__FILE__, __LINE__, 0);
rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_);
rwbase_read_lock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
rcu_read_lock();
migrate_disable();
}
EXPORT_SYMBOL(rt_read_lock);
void __sched rt_write_lock(rwlock_t *rwlock)
{
___might_sleep(__FILE__, __LINE__, 0);
rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
rwbase_write_lock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
rcu_read_lock();
migrate_disable();
}
EXPORT_SYMBOL(rt_write_lock);
void __sched rt_read_unlock(rwlock_t *rwlock)
{
rwlock_release(&rwlock->dep_map, _RET_IP_);
migrate_enable();
rcu_read_unlock();
rwbase_read_unlock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
}
EXPORT_SYMBOL(rt_read_unlock);
void __sched rt_write_unlock(rwlock_t *rwlock)
{
rwlock_release(&rwlock->dep_map, _RET_IP_);
rcu_read_unlock();
migrate_enable();
rwbase_write_unlock(&rwlock->rwbase);
}
EXPORT_SYMBOL(rt_write_unlock);
int __sched rt_rwlock_is_contended(rwlock_t *rwlock)
{
return rw_base_is_contended(&rwlock->rwbase);
}
EXPORT_SYMBOL(rt_rwlock_is_contended);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void __rt_rwlock_init(rwlock_t *rwlock, const char *name,
struct lock_class_key *key)
{
debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock));
lockdep_init_map_wait(&rwlock->dep_map, name, key, 0, LD_WAIT_CONFIG);
}
EXPORT_SYMBOL(__rt_rwlock_init);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment