Commit ee50cc19 authored by Mikulas Patocka's avatar Mikulas Patocka Committed by Mike Snitzer

dm writecache: don't split bios when overwriting contiguous cache content

If dm-writecache overwrites existing cached data, it splits the
incoming bio into many block-sized bios. The I/O scheduler does merge
these bios into one large request but this needless splitting and
merging causes performance degradation.

Fix this by avoiding bio splitting if the cache target area that is
being overwritten is contiguous.
Signed-off-by: default avatarMikulas Patocka <mpatocka@redhat.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent 6bcd658f
...@@ -1360,14 +1360,18 @@ static int writecache_map(struct dm_target *ti, struct bio *bio) ...@@ -1360,14 +1360,18 @@ static int writecache_map(struct dm_target *ti, struct bio *bio)
} else { } else {
do { do {
bool found_entry = false; bool found_entry = false;
bool search_used = false;
if (writecache_has_error(wc)) if (writecache_has_error(wc))
goto unlock_error; goto unlock_error;
e = writecache_find_entry(wc, bio->bi_iter.bi_sector, 0); e = writecache_find_entry(wc, bio->bi_iter.bi_sector, 0);
if (e) { if (e) {
if (!writecache_entry_is_committed(wc, e)) if (!writecache_entry_is_committed(wc, e)) {
search_used = true;
goto bio_copy; goto bio_copy;
}
if (!WC_MODE_PMEM(wc) && !e->write_in_progress) { if (!WC_MODE_PMEM(wc) && !e->write_in_progress) {
wc->overwrote_committed = true; wc->overwrote_committed = true;
search_used = true;
goto bio_copy; goto bio_copy;
} }
found_entry = true; found_entry = true;
...@@ -1404,13 +1408,31 @@ static int writecache_map(struct dm_target *ti, struct bio *bio) ...@@ -1404,13 +1408,31 @@ static int writecache_map(struct dm_target *ti, struct bio *bio)
sector_t current_cache_sec = start_cache_sec + (bio_size >> SECTOR_SHIFT); sector_t current_cache_sec = start_cache_sec + (bio_size >> SECTOR_SHIFT);
while (bio_size < bio->bi_iter.bi_size) { while (bio_size < bio->bi_iter.bi_size) {
struct wc_entry *f = writecache_pop_from_freelist(wc, current_cache_sec); if (!search_used) {
if (!f) struct wc_entry *f = writecache_pop_from_freelist(wc, current_cache_sec);
break; if (!f)
write_original_sector_seq_count(wc, f, bio->bi_iter.bi_sector + break;
(bio_size >> SECTOR_SHIFT), wc->seq_count); write_original_sector_seq_count(wc, f, bio->bi_iter.bi_sector +
writecache_insert_entry(wc, f); (bio_size >> SECTOR_SHIFT), wc->seq_count);
wc->uncommitted_blocks++; writecache_insert_entry(wc, f);
wc->uncommitted_blocks++;
} else {
struct wc_entry *f;
struct rb_node *next = rb_next(&e->rb_node);
if (!next)
break;
f = container_of(next, struct wc_entry, rb_node);
if (f != e + 1)
break;
if (read_original_sector(wc, f) !=
read_original_sector(wc, e) + (wc->block_size >> SECTOR_SHIFT))
break;
if (unlikely(f->write_in_progress))
break;
if (writecache_entry_is_committed(wc, f))
wc->overwrote_committed = true;
e = f;
}
bio_size += wc->block_size; bio_size += wc->block_size;
current_cache_sec += wc->block_size >> SECTOR_SHIFT; current_cache_sec += wc->block_size >> SECTOR_SHIFT;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment