Commit 4b05a1c7 authored by Mikulas Patocka's avatar Mikulas Patocka Committed by Linus Torvalds

percpu-rwsem: use synchronize_sched_expedited

Use synchronize_sched_expedited() instead of synchronize_sched()
to improve mount speed.

This patch improves mount time from 0.500s to 0.013s for Jeff's
test-case.
Signed-off-by: default avatarMikulas Patocka <mpatocka@redhat.com>
Reported-and-tested-by: default avatarJeff Chua <jeff.chua.linux@gmail.com>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e23739b4
...@@ -13,7 +13,7 @@ struct percpu_rw_semaphore { ...@@ -13,7 +13,7 @@ struct percpu_rw_semaphore {
}; };
#define light_mb() barrier() #define light_mb() barrier()
#define heavy_mb() synchronize_sched() #define heavy_mb() synchronize_sched_expedited()
static inline void percpu_down_read(struct percpu_rw_semaphore *p) static inline void percpu_down_read(struct percpu_rw_semaphore *p)
{ {
...@@ -51,7 +51,7 @@ static inline void percpu_down_write(struct percpu_rw_semaphore *p) ...@@ -51,7 +51,7 @@ static inline void percpu_down_write(struct percpu_rw_semaphore *p)
{ {
mutex_lock(&p->mtx); mutex_lock(&p->mtx);
p->locked = true; p->locked = true;
synchronize_sched(); /* make sure that all readers exit the rcu_read_lock_sched region */ synchronize_sched_expedited(); /* make sure that all readers exit the rcu_read_lock_sched region */
while (__percpu_count(p->counters)) while (__percpu_count(p->counters))
msleep(1); msleep(1);
heavy_mb(); /* C, between read of p->counter and write to data, paired with B */ heavy_mb(); /* C, between read of p->counter and write to data, paired with B */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment