Commit a23ba907 authored by Davidlohr Bueso's avatar Davidlohr Bueso Committed by Linus Torvalds

locking/rtmutex: replace top-waiter and pi_waiters leftmost caching

... with the generic rbtree flavor instead. No changes
in semantics whatsoever.

Link: http://lkml.kernel.org/r/20170719014603.19029-10-dave@stgolabs.netSigned-off-by: default avatarDavidlohr Bueso <dbueso@suse.de>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 2161573e
...@@ -175,9 +175,8 @@ extern struct cred init_cred; ...@@ -175,9 +175,8 @@ extern struct cred init_cred;
#ifdef CONFIG_RT_MUTEXES #ifdef CONFIG_RT_MUTEXES
# define INIT_RT_MUTEXES(tsk) \ # define INIT_RT_MUTEXES(tsk) \
.pi_waiters = RB_ROOT, \ .pi_waiters = RB_ROOT_CACHED, \
.pi_top_task = NULL, \ .pi_top_task = NULL,
.pi_waiters_leftmost = NULL,
#else #else
# define INIT_RT_MUTEXES(tsk) # define INIT_RT_MUTEXES(tsk)
#endif #endif
......
...@@ -22,18 +22,17 @@ extern int max_lock_depth; /* for sysctl */ ...@@ -22,18 +22,17 @@ extern int max_lock_depth; /* for sysctl */
* The rt_mutex structure * The rt_mutex structure
* *
* @wait_lock: spinlock to protect the structure * @wait_lock: spinlock to protect the structure
* @waiters: rbtree root to enqueue waiters in priority order * @waiters: rbtree root to enqueue waiters in priority order;
* @waiters_leftmost: top waiter * caches top-waiter (leftmost node).
* @owner: the mutex owner * @owner: the mutex owner
*/ */
struct rt_mutex { struct rt_mutex {
raw_spinlock_t wait_lock; raw_spinlock_t wait_lock;
struct rb_root waiters; struct rb_root_cached waiters;
struct rb_node *waiters_leftmost;
struct task_struct *owner; struct task_struct *owner;
#ifdef CONFIG_DEBUG_RT_MUTEXES #ifdef CONFIG_DEBUG_RT_MUTEXES
int save_state; int save_state;
const char *name, *file; const char *name, *file;
int line; int line;
void *magic; void *magic;
#endif #endif
...@@ -84,7 +83,7 @@ do { \ ...@@ -84,7 +83,7 @@ do { \
#define __RT_MUTEX_INITIALIZER(mutexname) \ #define __RT_MUTEX_INITIALIZER(mutexname) \
{ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
, .waiters = RB_ROOT \ , .waiters = RB_ROOT_CACHED \
, .owner = NULL \ , .owner = NULL \
__DEBUG_RT_MUTEX_INITIALIZER(mutexname) \ __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
__DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)} __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)}
......
...@@ -812,8 +812,7 @@ struct task_struct { ...@@ -812,8 +812,7 @@ struct task_struct {
#ifdef CONFIG_RT_MUTEXES #ifdef CONFIG_RT_MUTEXES
/* PI waiters blocked on a rt_mutex held by this task: */ /* PI waiters blocked on a rt_mutex held by this task: */
struct rb_root pi_waiters; struct rb_root_cached pi_waiters;
struct rb_node *pi_waiters_leftmost;
/* Updated under owner's pi_lock and rq lock */ /* Updated under owner's pi_lock and rq lock */
struct task_struct *pi_top_task; struct task_struct *pi_top_task;
/* Deadlock detection and priority inheritance handling: */ /* Deadlock detection and priority inheritance handling: */
......
...@@ -1462,8 +1462,7 @@ static void rt_mutex_init_task(struct task_struct *p) ...@@ -1462,8 +1462,7 @@ static void rt_mutex_init_task(struct task_struct *p)
{ {
raw_spin_lock_init(&p->pi_lock); raw_spin_lock_init(&p->pi_lock);
#ifdef CONFIG_RT_MUTEXES #ifdef CONFIG_RT_MUTEXES
p->pi_waiters = RB_ROOT; p->pi_waiters = RB_ROOT_CACHED;
p->pi_waiters_leftmost = NULL;
p->pi_top_task = NULL; p->pi_top_task = NULL;
p->pi_blocked_on = NULL; p->pi_blocked_on = NULL;
#endif #endif
......
...@@ -58,7 +58,7 @@ static void printk_lock(struct rt_mutex *lock, int print_owner) ...@@ -58,7 +58,7 @@ static void printk_lock(struct rt_mutex *lock, int print_owner)
void rt_mutex_debug_task_free(struct task_struct *task) void rt_mutex_debug_task_free(struct task_struct *task)
{ {
DEBUG_LOCKS_WARN_ON(!RB_EMPTY_ROOT(&task->pi_waiters)); DEBUG_LOCKS_WARN_ON(!RB_EMPTY_ROOT(&task->pi_waiters.rb_root));
DEBUG_LOCKS_WARN_ON(task->pi_blocked_on); DEBUG_LOCKS_WARN_ON(task->pi_blocked_on);
} }
......
...@@ -271,10 +271,10 @@ rt_mutex_waiter_equal(struct rt_mutex_waiter *left, ...@@ -271,10 +271,10 @@ rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
static void static void
rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
{ {
struct rb_node **link = &lock->waiters.rb_node; struct rb_node **link = &lock->waiters.rb_root.rb_node;
struct rb_node *parent = NULL; struct rb_node *parent = NULL;
struct rt_mutex_waiter *entry; struct rt_mutex_waiter *entry;
int leftmost = 1; bool leftmost = true;
while (*link) { while (*link) {
parent = *link; parent = *link;
...@@ -283,15 +283,12 @@ rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) ...@@ -283,15 +283,12 @@ rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
link = &parent->rb_left; link = &parent->rb_left;
} else { } else {
link = &parent->rb_right; link = &parent->rb_right;
leftmost = 0; leftmost = false;
} }
} }
if (leftmost)
lock->waiters_leftmost = &waiter->tree_entry;
rb_link_node(&waiter->tree_entry, parent, link); rb_link_node(&waiter->tree_entry, parent, link);
rb_insert_color(&waiter->tree_entry, &lock->waiters); rb_insert_color_cached(&waiter->tree_entry, &lock->waiters, leftmost);
} }
static void static void
...@@ -300,20 +297,17 @@ rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) ...@@ -300,20 +297,17 @@ rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
if (RB_EMPTY_NODE(&waiter->tree_entry)) if (RB_EMPTY_NODE(&waiter->tree_entry))
return; return;
if (lock->waiters_leftmost == &waiter->tree_entry) rb_erase_cached(&waiter->tree_entry, &lock->waiters);
lock->waiters_leftmost = rb_next(&waiter->tree_entry);
rb_erase(&waiter->tree_entry, &lock->waiters);
RB_CLEAR_NODE(&waiter->tree_entry); RB_CLEAR_NODE(&waiter->tree_entry);
} }
static void static void
rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
{ {
struct rb_node **link = &task->pi_waiters.rb_node; struct rb_node **link = &task->pi_waiters.rb_root.rb_node;
struct rb_node *parent = NULL; struct rb_node *parent = NULL;
struct rt_mutex_waiter *entry; struct rt_mutex_waiter *entry;
int leftmost = 1; bool leftmost = true;
while (*link) { while (*link) {
parent = *link; parent = *link;
...@@ -322,15 +316,12 @@ rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) ...@@ -322,15 +316,12 @@ rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
link = &parent->rb_left; link = &parent->rb_left;
} else { } else {
link = &parent->rb_right; link = &parent->rb_right;
leftmost = 0; leftmost = false;
} }
} }
if (leftmost)
task->pi_waiters_leftmost = &waiter->pi_tree_entry;
rb_link_node(&waiter->pi_tree_entry, parent, link); rb_link_node(&waiter->pi_tree_entry, parent, link);
rb_insert_color(&waiter->pi_tree_entry, &task->pi_waiters); rb_insert_color_cached(&waiter->pi_tree_entry, &task->pi_waiters, leftmost);
} }
static void static void
...@@ -339,10 +330,7 @@ rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) ...@@ -339,10 +330,7 @@ rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
if (RB_EMPTY_NODE(&waiter->pi_tree_entry)) if (RB_EMPTY_NODE(&waiter->pi_tree_entry))
return; return;
if (task->pi_waiters_leftmost == &waiter->pi_tree_entry) rb_erase_cached(&waiter->pi_tree_entry, &task->pi_waiters);
task->pi_waiters_leftmost = rb_next(&waiter->pi_tree_entry);
rb_erase(&waiter->pi_tree_entry, &task->pi_waiters);
RB_CLEAR_NODE(&waiter->pi_tree_entry); RB_CLEAR_NODE(&waiter->pi_tree_entry);
} }
...@@ -1657,8 +1645,7 @@ void __rt_mutex_init(struct rt_mutex *lock, const char *name, ...@@ -1657,8 +1645,7 @@ void __rt_mutex_init(struct rt_mutex *lock, const char *name,
{ {
lock->owner = NULL; lock->owner = NULL;
raw_spin_lock_init(&lock->wait_lock); raw_spin_lock_init(&lock->wait_lock);
lock->waiters = RB_ROOT; lock->waiters = RB_ROOT_CACHED;
lock->waiters_leftmost = NULL;
if (name && key) if (name && key)
debug_rt_mutex_init(lock, name, key); debug_rt_mutex_init(lock, name, key);
......
...@@ -45,7 +45,7 @@ struct rt_mutex_waiter { ...@@ -45,7 +45,7 @@ struct rt_mutex_waiter {
static inline int rt_mutex_has_waiters(struct rt_mutex *lock) static inline int rt_mutex_has_waiters(struct rt_mutex *lock)
{ {
return !RB_EMPTY_ROOT(&lock->waiters); return !RB_EMPTY_ROOT(&lock->waiters.rb_root);
} }
static inline struct rt_mutex_waiter * static inline struct rt_mutex_waiter *
...@@ -53,8 +53,8 @@ rt_mutex_top_waiter(struct rt_mutex *lock) ...@@ -53,8 +53,8 @@ rt_mutex_top_waiter(struct rt_mutex *lock)
{ {
struct rt_mutex_waiter *w; struct rt_mutex_waiter *w;
w = rb_entry(lock->waiters_leftmost, struct rt_mutex_waiter, w = rb_entry(lock->waiters.rb_leftmost,
tree_entry); struct rt_mutex_waiter, tree_entry);
BUG_ON(w->lock != lock); BUG_ON(w->lock != lock);
return w; return w;
...@@ -62,14 +62,14 @@ rt_mutex_top_waiter(struct rt_mutex *lock) ...@@ -62,14 +62,14 @@ rt_mutex_top_waiter(struct rt_mutex *lock)
static inline int task_has_pi_waiters(struct task_struct *p) static inline int task_has_pi_waiters(struct task_struct *p)
{ {
return !RB_EMPTY_ROOT(&p->pi_waiters); return !RB_EMPTY_ROOT(&p->pi_waiters.rb_root);
} }
static inline struct rt_mutex_waiter * static inline struct rt_mutex_waiter *
task_top_pi_waiter(struct task_struct *p) task_top_pi_waiter(struct task_struct *p)
{ {
return rb_entry(p->pi_waiters_leftmost, struct rt_mutex_waiter, return rb_entry(p->pi_waiters.rb_leftmost,
pi_tree_entry); struct rt_mutex_waiter, pi_tree_entry);
} }
#else #else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment