Commit b004aa86 authored by Davidlohr Bueso's avatar Davidlohr Bueso Committed by Jens Axboe

bcache: optimize barrier usage for Rmw atomic bitops

We can avoid the unnecessary barrier on non LL/SC architectures,
such as x86. Instead, use the smp_mb__after_atomic().
Signed-off-by: default avatarDavidlohr Bueso <dbueso@suse.de>
Signed-off-by: default avatarColy Li <colyli@suse.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 9876e386
......@@ -183,7 +183,7 @@ static void update_writeback_rate(struct work_struct *work)
*/
set_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
/* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
smp_mb();
smp_mb__after_atomic();
/*
* CACHE_SET_IO_DISABLE might be set via sysfs interface,
......@@ -193,7 +193,7 @@ static void update_writeback_rate(struct work_struct *work)
test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
/* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
smp_mb();
smp_mb__after_atomic();
return;
}
......@@ -229,7 +229,7 @@ static void update_writeback_rate(struct work_struct *work)
*/
clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
/* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
smp_mb();
smp_mb__after_atomic();
}
static unsigned int writeback_delay(struct cached_dev *dc,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment