Commit 2480945c authored by Mikulas Patocka's avatar Mikulas Patocka Committed by Alasdair G Kergon

dm bufio: submit writes outside lock

This patch changes dm-bufio so that it submits write I/Os outside of the
lock. If the number of submitted buffers is greater than the number of
requests on the target queue, submit_bio blocks. We want to block outside
of the lock to improve latency of other threads that may need the lock.
Signed-off-by: default avatarMikulas Patocka <mpatocka@redhat.com>
Signed-off-by: default avatarAlasdair G Kergon <agk@redhat.com>
parent 43aeaa29
...@@ -145,6 +145,7 @@ struct dm_buffer { ...@@ -145,6 +145,7 @@ struct dm_buffer {
unsigned long state; unsigned long state;
unsigned long last_accessed; unsigned long last_accessed;
struct dm_bufio_client *c; struct dm_bufio_client *c;
struct list_head write_list;
struct bio bio; struct bio bio;
struct bio_vec bio_vec[DM_BUFIO_INLINE_VECS]; struct bio_vec bio_vec[DM_BUFIO_INLINE_VECS];
}; };
...@@ -630,7 +631,8 @@ static int do_io_schedule(void *word) ...@@ -630,7 +631,8 @@ static int do_io_schedule(void *word)
* - Submit our write and don't wait on it. We set B_WRITING indicating * - Submit our write and don't wait on it. We set B_WRITING indicating
* that there is a write in progress. * that there is a write in progress.
*/ */
static void __write_dirty_buffer(struct dm_buffer *b) static void __write_dirty_buffer(struct dm_buffer *b,
struct list_head *write_list)
{ {
if (!test_bit(B_DIRTY, &b->state)) if (!test_bit(B_DIRTY, &b->state))
return; return;
...@@ -639,7 +641,24 @@ static void __write_dirty_buffer(struct dm_buffer *b) ...@@ -639,7 +641,24 @@ static void __write_dirty_buffer(struct dm_buffer *b)
wait_on_bit_lock(&b->state, B_WRITING, wait_on_bit_lock(&b->state, B_WRITING,
do_io_schedule, TASK_UNINTERRUPTIBLE); do_io_schedule, TASK_UNINTERRUPTIBLE);
submit_io(b, WRITE, b->block, write_endio); if (!write_list)
submit_io(b, WRITE, b->block, write_endio);
else
list_add_tail(&b->write_list, write_list);
}
static void __flush_write_list(struct list_head *write_list)
{
struct blk_plug plug;
blk_start_plug(&plug);
while (!list_empty(write_list)) {
struct dm_buffer *b =
list_entry(write_list->next, struct dm_buffer, write_list);
list_del(&b->write_list);
submit_io(b, WRITE, b->block, write_endio);
dm_bufio_cond_resched();
}
blk_finish_plug(&plug);
} }
/* /*
...@@ -655,7 +674,7 @@ static void __make_buffer_clean(struct dm_buffer *b) ...@@ -655,7 +674,7 @@ static void __make_buffer_clean(struct dm_buffer *b)
return; return;
wait_on_bit(&b->state, B_READING, do_io_schedule, TASK_UNINTERRUPTIBLE); wait_on_bit(&b->state, B_READING, do_io_schedule, TASK_UNINTERRUPTIBLE);
__write_dirty_buffer(b); __write_dirty_buffer(b, NULL);
wait_on_bit(&b->state, B_WRITING, do_io_schedule, TASK_UNINTERRUPTIBLE); wait_on_bit(&b->state, B_WRITING, do_io_schedule, TASK_UNINTERRUPTIBLE);
} }
...@@ -802,7 +821,8 @@ static void __free_buffer_wake(struct dm_buffer *b) ...@@ -802,7 +821,8 @@ static void __free_buffer_wake(struct dm_buffer *b)
wake_up(&c->free_buffer_wait); wake_up(&c->free_buffer_wait);
} }
static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait) static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
struct list_head *write_list)
{ {
struct dm_buffer *b, *tmp; struct dm_buffer *b, *tmp;
...@@ -818,7 +838,7 @@ static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait) ...@@ -818,7 +838,7 @@ static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait)
if (no_wait && test_bit(B_WRITING, &b->state)) if (no_wait && test_bit(B_WRITING, &b->state))
return; return;
__write_dirty_buffer(b); __write_dirty_buffer(b, write_list);
dm_bufio_cond_resched(); dm_bufio_cond_resched();
} }
} }
...@@ -853,7 +873,8 @@ static void __get_memory_limit(struct dm_bufio_client *c, ...@@ -853,7 +873,8 @@ static void __get_memory_limit(struct dm_bufio_client *c,
* If we are over threshold_buffers, start freeing buffers. * If we are over threshold_buffers, start freeing buffers.
* If we're over "limit_buffers", block until we get under the limit. * If we're over "limit_buffers", block until we get under the limit.
*/ */
static void __check_watermark(struct dm_bufio_client *c) static void __check_watermark(struct dm_bufio_client *c,
struct list_head *write_list)
{ {
unsigned long threshold_buffers, limit_buffers; unsigned long threshold_buffers, limit_buffers;
...@@ -872,7 +893,7 @@ static void __check_watermark(struct dm_bufio_client *c) ...@@ -872,7 +893,7 @@ static void __check_watermark(struct dm_bufio_client *c)
} }
if (c->n_buffers[LIST_DIRTY] > threshold_buffers) if (c->n_buffers[LIST_DIRTY] > threshold_buffers)
__write_dirty_buffers_async(c, 1); __write_dirty_buffers_async(c, 1, write_list);
} }
/* /*
...@@ -897,7 +918,8 @@ static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block) ...@@ -897,7 +918,8 @@ static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
*--------------------------------------------------------------*/ *--------------------------------------------------------------*/
static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block, static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
enum new_flag nf, int *need_submit) enum new_flag nf, int *need_submit,
struct list_head *write_list)
{ {
struct dm_buffer *b, *new_b = NULL; struct dm_buffer *b, *new_b = NULL;
...@@ -924,7 +946,7 @@ static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block, ...@@ -924,7 +946,7 @@ static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
goto found_buffer; goto found_buffer;
} }
__check_watermark(c); __check_watermark(c, write_list);
b = new_b; b = new_b;
b->hold_count = 1; b->hold_count = 1;
...@@ -992,10 +1014,14 @@ static void *new_read(struct dm_bufio_client *c, sector_t block, ...@@ -992,10 +1014,14 @@ static void *new_read(struct dm_bufio_client *c, sector_t block,
int need_submit; int need_submit;
struct dm_buffer *b; struct dm_buffer *b;
LIST_HEAD(write_list);
dm_bufio_lock(c); dm_bufio_lock(c);
b = __bufio_new(c, block, nf, &need_submit); b = __bufio_new(c, block, nf, &need_submit, &write_list);
dm_bufio_unlock(c); dm_bufio_unlock(c);
__flush_write_list(&write_list);
if (!b) if (!b)
return b; return b;
...@@ -1047,6 +1073,8 @@ void dm_bufio_prefetch(struct dm_bufio_client *c, ...@@ -1047,6 +1073,8 @@ void dm_bufio_prefetch(struct dm_bufio_client *c,
{ {
struct blk_plug plug; struct blk_plug plug;
LIST_HEAD(write_list);
BUG_ON(dm_bufio_in_request()); BUG_ON(dm_bufio_in_request());
blk_start_plug(&plug); blk_start_plug(&plug);
...@@ -1055,7 +1083,15 @@ void dm_bufio_prefetch(struct dm_bufio_client *c, ...@@ -1055,7 +1083,15 @@ void dm_bufio_prefetch(struct dm_bufio_client *c,
for (; n_blocks--; block++) { for (; n_blocks--; block++) {
int need_submit; int need_submit;
struct dm_buffer *b; struct dm_buffer *b;
b = __bufio_new(c, block, NF_PREFETCH, &need_submit); b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
&write_list);
if (unlikely(!list_empty(&write_list))) {
dm_bufio_unlock(c);
blk_finish_plug(&plug);
__flush_write_list(&write_list);
blk_start_plug(&plug);
dm_bufio_lock(c);
}
if (unlikely(b != NULL)) { if (unlikely(b != NULL)) {
dm_bufio_unlock(c); dm_bufio_unlock(c);
...@@ -1069,7 +1105,6 @@ void dm_bufio_prefetch(struct dm_bufio_client *c, ...@@ -1069,7 +1105,6 @@ void dm_bufio_prefetch(struct dm_bufio_client *c,
goto flush_plug; goto flush_plug;
dm_bufio_lock(c); dm_bufio_lock(c);
} }
} }
dm_bufio_unlock(c); dm_bufio_unlock(c);
...@@ -1126,11 +1161,14 @@ EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty); ...@@ -1126,11 +1161,14 @@ EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c) void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
{ {
LIST_HEAD(write_list);
BUG_ON(dm_bufio_in_request()); BUG_ON(dm_bufio_in_request());
dm_bufio_lock(c); dm_bufio_lock(c);
__write_dirty_buffers_async(c, 0); __write_dirty_buffers_async(c, 0, &write_list);
dm_bufio_unlock(c); dm_bufio_unlock(c);
__flush_write_list(&write_list);
} }
EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async); EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
...@@ -1147,8 +1185,13 @@ int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c) ...@@ -1147,8 +1185,13 @@ int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
unsigned long buffers_processed = 0; unsigned long buffers_processed = 0;
struct dm_buffer *b, *tmp; struct dm_buffer *b, *tmp;
LIST_HEAD(write_list);
dm_bufio_lock(c);
__write_dirty_buffers_async(c, 0, &write_list);
dm_bufio_unlock(c);
__flush_write_list(&write_list);
dm_bufio_lock(c); dm_bufio_lock(c);
__write_dirty_buffers_async(c, 0);
again: again:
list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) { list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
...@@ -1274,7 +1317,7 @@ void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block) ...@@ -1274,7 +1317,7 @@ void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block)
BUG_ON(!b->hold_count); BUG_ON(!b->hold_count);
BUG_ON(test_bit(B_READING, &b->state)); BUG_ON(test_bit(B_READING, &b->state));
__write_dirty_buffer(b); __write_dirty_buffer(b, NULL);
if (b->hold_count == 1) { if (b->hold_count == 1) {
wait_on_bit(&b->state, B_WRITING, wait_on_bit(&b->state, B_WRITING,
do_io_schedule, TASK_UNINTERRUPTIBLE); do_io_schedule, TASK_UNINTERRUPTIBLE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment