Commit cdd4d783 authored by Mike Snitzer's avatar Mike Snitzer

dm writecache: split up writecache_map() to improve code readability

writecache_map() has grown too large and can be confusing to read given
all the goto statements.
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent 99d26de2
...@@ -1293,56 +1293,19 @@ static void writecache_offload_bio(struct dm_writecache *wc, struct bio *bio) ...@@ -1293,56 +1293,19 @@ static void writecache_offload_bio(struct dm_writecache *wc, struct bio *bio)
bio_list_add(&wc->flush_list, bio); bio_list_add(&wc->flush_list, bio);
} }
static int writecache_map(struct dm_target *ti, struct bio *bio) enum wc_map_op {
WC_MAP_SUBMIT,
WC_MAP_REMAP,
WC_MAP_REMAP_ORIGIN,
WC_MAP_RETURN,
WC_MAP_ERROR,
};
static enum wc_map_op writecache_map_read(struct dm_writecache *wc, struct bio *bio)
{ {
enum wc_map_op map_op;
struct wc_entry *e; struct wc_entry *e;
struct dm_writecache *wc = ti->private;
bio->bi_private = NULL;
wc_lock(wc);
if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
if (writecache_has_error(wc))
goto unlock_error;
if (WC_MODE_PMEM(wc)) {
writecache_flush(wc);
if (writecache_has_error(wc))
goto unlock_error;
if (unlikely(wc->cleaner) || unlikely(wc->metadata_only))
goto unlock_remap_origin;
goto unlock_submit;
} else {
if (dm_bio_get_target_bio_nr(bio))
goto unlock_remap_origin;
writecache_offload_bio(wc, bio);
goto unlock_return;
}
}
bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
if (unlikely((((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
(wc->block_size / 512 - 1)) != 0)) {
DMERR("I/O is not aligned, sector %llu, size %u, block size %u",
(unsigned long long)bio->bi_iter.bi_sector,
bio->bi_iter.bi_size, wc->block_size);
goto unlock_error;
}
if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
if (writecache_has_error(wc))
goto unlock_error;
if (WC_MODE_PMEM(wc)) {
writecache_discard(wc, bio->bi_iter.bi_sector, bio_end_sector(bio));
goto unlock_remap_origin;
} else {
writecache_offload_bio(wc, bio);
goto unlock_return;
}
}
if (bio_data_dir(bio) == READ) {
read_next_block: read_next_block:
e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING); e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
if (e && read_original_sector(wc, e) == bio->bi_iter.bi_sector) { if (e && read_original_sector(wc, e) == bio->bi_iter.bi_sector) {
...@@ -1350,31 +1313,88 @@ static int writecache_map(struct dm_target *ti, struct bio *bio) ...@@ -1350,31 +1313,88 @@ static int writecache_map(struct dm_target *ti, struct bio *bio)
bio_copy_block(wc, bio, memory_data(wc, e)); bio_copy_block(wc, bio, memory_data(wc, e));
if (bio->bi_iter.bi_size) if (bio->bi_iter.bi_size)
goto read_next_block; goto read_next_block;
goto unlock_submit; map_op = WC_MAP_SUBMIT;
} else { } else {
dm_accept_partial_bio(bio, wc->block_size >> SECTOR_SHIFT); dm_accept_partial_bio(bio, wc->block_size >> SECTOR_SHIFT);
bio_set_dev(bio, wc->ssd_dev->bdev); bio_set_dev(bio, wc->ssd_dev->bdev);
bio->bi_iter.bi_sector = cache_sector(wc, e); bio->bi_iter.bi_sector = cache_sector(wc, e);
if (!writecache_entry_is_committed(wc, e)) if (!writecache_entry_is_committed(wc, e))
writecache_wait_for_ios(wc, WRITE); writecache_wait_for_ios(wc, WRITE);
goto unlock_remap; map_op = WC_MAP_REMAP;
} }
} else { } else {
if (e) { if (e) {
sector_t next_boundary = sector_t next_boundary =
read_original_sector(wc, e) - bio->bi_iter.bi_sector; read_original_sector(wc, e) - bio->bi_iter.bi_sector;
if (next_boundary < bio->bi_iter.bi_size >> SECTOR_SHIFT) { if (next_boundary < bio->bi_iter.bi_size >> SECTOR_SHIFT)
dm_accept_partial_bio(bio, next_boundary); dm_accept_partial_bio(bio, next_boundary);
} }
map_op = WC_MAP_REMAP_ORIGIN;
} }
goto unlock_remap_origin;
return map_op;
}
static enum wc_map_op writecache_bio_copy_ssd(struct dm_writecache *wc, struct bio *bio,
struct wc_entry *e, bool search_used)
{
unsigned bio_size = wc->block_size;
sector_t start_cache_sec = cache_sector(wc, e);
sector_t current_cache_sec = start_cache_sec + (bio_size >> SECTOR_SHIFT);
while (bio_size < bio->bi_iter.bi_size) {
if (!search_used) {
struct wc_entry *f = writecache_pop_from_freelist(wc, current_cache_sec);
if (!f)
break;
write_original_sector_seq_count(wc, f, bio->bi_iter.bi_sector +
(bio_size >> SECTOR_SHIFT), wc->seq_count);
writecache_insert_entry(wc, f);
wc->uncommitted_blocks++;
} else {
struct wc_entry *f;
struct rb_node *next = rb_next(&e->rb_node);
if (!next)
break;
f = container_of(next, struct wc_entry, rb_node);
if (f != e + 1)
break;
if (read_original_sector(wc, f) !=
read_original_sector(wc, e) + (wc->block_size >> SECTOR_SHIFT))
break;
if (unlikely(f->write_in_progress))
break;
if (writecache_entry_is_committed(wc, f))
wc->overwrote_committed = true;
e = f;
}
bio_size += wc->block_size;
current_cache_sec += wc->block_size >> SECTOR_SHIFT;
} }
bio_set_dev(bio, wc->ssd_dev->bdev);
bio->bi_iter.bi_sector = start_cache_sec;
dm_accept_partial_bio(bio, bio_size >> SECTOR_SHIFT);
if (unlikely(wc->uncommitted_blocks >= wc->autocommit_blocks)) {
wc->uncommitted_blocks = 0;
queue_work(wc->writeback_wq, &wc->flush_work);
} else { } else {
writecache_schedule_autocommit(wc);
}
return WC_MAP_REMAP;
}
static enum wc_map_op writecache_map_write(struct dm_writecache *wc, struct bio *bio)
{
struct wc_entry *e;
do { do {
bool found_entry = false; bool found_entry = false;
bool search_used = false; bool search_used = false;
if (writecache_has_error(wc)) if (writecache_has_error(wc))
goto unlock_error; return WC_MAP_ERROR;
e = writecache_find_entry(wc, bio->bi_iter.bi_sector, 0); e = writecache_find_entry(wc, bio->bi_iter.bi_sector, 0);
if (e) { if (e) {
if (!writecache_entry_is_committed(wc, e)) { if (!writecache_entry_is_committed(wc, e)) {
...@@ -1404,7 +1424,7 @@ static int writecache_map(struct dm_target *ti, struct bio *bio) ...@@ -1404,7 +1424,7 @@ static int writecache_map(struct dm_target *ti, struct bio *bio)
dm_accept_partial_bio(bio, next_boundary); dm_accept_partial_bio(bio, next_boundary);
} }
} }
goto unlock_remap_origin; return WC_MAP_REMAP_ORIGIN;
} }
writecache_wait_on_freelist(wc); writecache_wait_on_freelist(wc);
continue; continue;
...@@ -1413,65 +1433,76 @@ static int writecache_map(struct dm_target *ti, struct bio *bio) ...@@ -1413,65 +1433,76 @@ static int writecache_map(struct dm_target *ti, struct bio *bio)
writecache_insert_entry(wc, e); writecache_insert_entry(wc, e);
wc->uncommitted_blocks++; wc->uncommitted_blocks++;
bio_copy: bio_copy:
if (WC_MODE_PMEM(wc)) { if (WC_MODE_PMEM(wc))
bio_copy_block(wc, bio, memory_data(wc, e)); bio_copy_block(wc, bio, memory_data(wc, e));
} else { else
unsigned bio_size = wc->block_size; return writecache_bio_copy_ssd(wc, bio, e, search_used);
sector_t start_cache_sec = cache_sector(wc, e); } while (bio->bi_iter.bi_size);
sector_t current_cache_sec = start_cache_sec + (bio_size >> SECTOR_SHIFT);
while (bio_size < bio->bi_iter.bi_size) { if (unlikely(bio->bi_opf & REQ_FUA || wc->uncommitted_blocks >= wc->autocommit_blocks))
if (!search_used) { writecache_flush(wc);
struct wc_entry *f = writecache_pop_from_freelist(wc, current_cache_sec); else
if (!f) writecache_schedule_autocommit(wc);
break;
write_original_sector_seq_count(wc, f, bio->bi_iter.bi_sector + return WC_MAP_SUBMIT;
(bio_size >> SECTOR_SHIFT), wc->seq_count); }
writecache_insert_entry(wc, f);
wc->uncommitted_blocks++; static int writecache_map(struct dm_target *ti, struct bio *bio)
{
struct dm_writecache *wc = ti->private;
enum wc_map_op map_op = WC_MAP_ERROR;
bio->bi_private = NULL;
wc_lock(wc);
if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
if (writecache_has_error(wc))
goto unlock_error;
if (WC_MODE_PMEM(wc)) {
writecache_flush(wc);
if (writecache_has_error(wc))
goto unlock_error;
if (unlikely(wc->cleaner) || unlikely(wc->metadata_only))
goto unlock_remap_origin;
goto unlock_submit;
} else { } else {
struct wc_entry *f; if (dm_bio_get_target_bio_nr(bio))
struct rb_node *next = rb_next(&e->rb_node); goto unlock_remap_origin;
if (!next) writecache_offload_bio(wc, bio);
break; goto unlock_return;
f = container_of(next, struct wc_entry, rb_node);
if (f != e + 1)
break;
if (read_original_sector(wc, f) !=
read_original_sector(wc, e) + (wc->block_size >> SECTOR_SHIFT))
break;
if (unlikely(f->write_in_progress))
break;
if (writecache_entry_is_committed(wc, f))
wc->overwrote_committed = true;
e = f;
} }
bio_size += wc->block_size;
current_cache_sec += wc->block_size >> SECTOR_SHIFT;
} }
bio_set_dev(bio, wc->ssd_dev->bdev); bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
bio->bi_iter.bi_sector = start_cache_sec;
dm_accept_partial_bio(bio, bio_size >> SECTOR_SHIFT);
if (unlikely(wc->uncommitted_blocks >= wc->autocommit_blocks)) { if (unlikely((((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
wc->uncommitted_blocks = 0; (wc->block_size / 512 - 1)) != 0)) {
queue_work(wc->writeback_wq, &wc->flush_work); DMERR("I/O is not aligned, sector %llu, size %u, block size %u",
(unsigned long long)bio->bi_iter.bi_sector,
bio->bi_iter.bi_size, wc->block_size);
goto unlock_error;
}
if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
if (writecache_has_error(wc))
goto unlock_error;
if (WC_MODE_PMEM(wc)) {
writecache_discard(wc, bio->bi_iter.bi_sector, bio_end_sector(bio));
goto unlock_remap_origin;
} else { } else {
writecache_schedule_autocommit(wc); writecache_offload_bio(wc, bio);
goto unlock_return;
} }
goto unlock_remap;
} }
} while (bio->bi_iter.bi_size);
if (unlikely(bio->bi_opf & REQ_FUA || if (bio_data_dir(bio) == READ)
wc->uncommitted_blocks >= wc->autocommit_blocks)) map_op = writecache_map_read(wc, bio);
writecache_flush(wc);
else else
writecache_schedule_autocommit(wc); map_op = writecache_map_write(wc, bio);
goto unlock_submit;
}
switch (map_op) {
case WC_MAP_REMAP_ORIGIN:
unlock_remap_origin: unlock_remap_origin:
if (likely(wc->pause != 0)) { if (likely(wc->pause != 0)) {
if (bio_op(bio) == REQ_OP_WRITE) { if (bio_op(bio) == REQ_OP_WRITE) {
...@@ -1483,6 +1514,7 @@ static int writecache_map(struct dm_target *ti, struct bio *bio) ...@@ -1483,6 +1514,7 @@ static int writecache_map(struct dm_target *ti, struct bio *bio)
wc_unlock(wc); wc_unlock(wc);
return DM_MAPIO_REMAPPED; return DM_MAPIO_REMAPPED;
case WC_MAP_REMAP:
unlock_remap: unlock_remap:
/* make sure that writecache_end_io decrements bio_in_progress: */ /* make sure that writecache_end_io decrements bio_in_progress: */
bio->bi_private = (void *)1; bio->bi_private = (void *)1;
...@@ -1490,19 +1522,23 @@ static int writecache_map(struct dm_target *ti, struct bio *bio) ...@@ -1490,19 +1522,23 @@ static int writecache_map(struct dm_target *ti, struct bio *bio)
wc_unlock(wc); wc_unlock(wc);
return DM_MAPIO_REMAPPED; return DM_MAPIO_REMAPPED;
case WC_MAP_SUBMIT:
unlock_submit: unlock_submit:
wc_unlock(wc); wc_unlock(wc);
bio_endio(bio); bio_endio(bio);
return DM_MAPIO_SUBMITTED; return DM_MAPIO_SUBMITTED;
case WC_MAP_RETURN:
unlock_return: unlock_return:
wc_unlock(wc); wc_unlock(wc);
return DM_MAPIO_SUBMITTED; return DM_MAPIO_SUBMITTED;
case WC_MAP_ERROR:
unlock_error: unlock_error:
wc_unlock(wc); wc_unlock(wc);
bio_io_error(bio); bio_io_error(bio);
return DM_MAPIO_SUBMITTED; return DM_MAPIO_SUBMITTED;
}
} }
static int writecache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *status) static int writecache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *status)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment