Commit 4a2fe296 authored by Mike Snitzer's avatar Mike Snitzer

dm: enhance alloc_multiple_bios() to be more versatile

alloc_multiple_bios() has the useful ability to try allocating bios
with GFP_NOWAIT but will fallback to using GFP_NOIO.  The callers
service both empty flush bios and abnormal bios (e.g. discard).

alloc_multiple_bios() enhancements offered in this commit:
- don't require table_devices_lock if num_bios = 1
- allow caller to pass GFP_NOWAIT to do usual GFP_NOWAIT with GFP_NOIO
  fallback
- allow caller to pass GFP_NOIO to _only_ allocate using GFP_NOIO

Flush bios with data may be issued to DM with REQ_NOWAIT, as such it
makes sense to attempt servicing them with GFP_NOWAIT allocations.

But abnormal IO should never be issued using REQ_NOWAIT (if that
changes in the future that's fine, but no sense supporting it now).

While at it, rename __send_changing_extent_only() to
__send_abnormal_io().

[Thanks to both Ming and Mikulas for help with translating known
possible IO scenarios to requirements.]
Suggested-by: default avatarMing Lei <ming.lei@redhat.com>
Suggested-by: default avatarMikulas Patocka <mpatocka@redhat.com>
Signed-off-by: default avatarMike Snitzer <snitzer@kernel.org>
parent 34dbaa88
...@@ -1478,15 +1478,15 @@ static void setup_split_accounting(struct clone_info *ci, unsigned int len) ...@@ -1478,15 +1478,15 @@ static void setup_split_accounting(struct clone_info *ci, unsigned int len)
static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci, static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
struct dm_target *ti, unsigned int num_bios, struct dm_target *ti, unsigned int num_bios,
unsigned *len) unsigned *len, gfp_t gfp_flag)
{ {
struct bio *bio; struct bio *bio;
int try; int try = (gfp_flag & GFP_NOWAIT) ? 0 : 1;
for (try = 0; try < 2; try++) { for (; try < 2; try++) {
int bio_nr; int bio_nr;
if (try) if (try && num_bios > 1)
mutex_lock(&ci->io->md->table_devices_lock); mutex_lock(&ci->io->md->table_devices_lock);
for (bio_nr = 0; bio_nr < num_bios; bio_nr++) { for (bio_nr = 0; bio_nr < num_bios; bio_nr++) {
bio = alloc_tio(ci, ti, bio_nr, len, bio = alloc_tio(ci, ti, bio_nr, len,
...@@ -1496,7 +1496,7 @@ static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci, ...@@ -1496,7 +1496,7 @@ static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
bio_list_add(blist, bio); bio_list_add(blist, bio);
} }
if (try) if (try && num_bios > 1)
mutex_unlock(&ci->io->md->table_devices_lock); mutex_unlock(&ci->io->md->table_devices_lock);
if (bio_nr == num_bios) if (bio_nr == num_bios)
return; return;
...@@ -1507,33 +1507,30 @@ static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci, ...@@ -1507,33 +1507,30 @@ static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
} }
static unsigned int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, static unsigned int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
unsigned int num_bios, unsigned int *len) unsigned int num_bios, unsigned int *len,
gfp_t gfp_flag)
{ {
struct bio_list blist = BIO_EMPTY_LIST; struct bio_list blist = BIO_EMPTY_LIST;
struct bio *clone; struct bio *clone;
unsigned int ret = 0; unsigned int ret = 0;
switch (num_bios) { if (WARN_ON_ONCE(num_bios == 0)) /* num_bios = 0 is a bug in caller */
case 0: return 0;
break;
case 1: /* dm_accept_partial_bio() is not supported with shared tio->len_ptr */
if (len) if (len)
setup_split_accounting(ci, *len); setup_split_accounting(ci, *len);
clone = alloc_tio(ci, ti, 0, len, GFP_NOIO);
__map_bio(clone); /*
ret = 1; * Using alloc_multiple_bios(), even if num_bios is 1, to consistently
break; * support allocating using GFP_NOWAIT with GFP_NOIO fallback.
default: */
if (len) alloc_multiple_bios(&blist, ci, ti, num_bios, len, gfp_flag);
setup_split_accounting(ci, *len); while ((clone = bio_list_pop(&blist))) {
/* dm_accept_partial_bio() is not supported with shared tio->len_ptr */ if (num_bios > 1)
alloc_multiple_bios(&blist, ci, ti, num_bios, len);
while ((clone = bio_list_pop(&blist))) {
dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO); dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO);
__map_bio(clone); __map_bio(clone);
ret += 1; ret += 1;
}
break;
} }
return ret; return ret;
...@@ -1560,8 +1557,12 @@ static void __send_empty_flush(struct clone_info *ci) ...@@ -1560,8 +1557,12 @@ static void __send_empty_flush(struct clone_info *ci)
unsigned int bios; unsigned int bios;
struct dm_target *ti = dm_table_get_target(t, i); struct dm_target *ti = dm_table_get_target(t, i);
if (unlikely(ti->num_flush_bios == 0))
continue;
atomic_add(ti->num_flush_bios, &ci->io->io_count); atomic_add(ti->num_flush_bios, &ci->io->io_count);
bios = __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); bios = __send_duplicate_bios(ci, ti, ti->num_flush_bios,
NULL, GFP_NOWAIT);
atomic_sub(ti->num_flush_bios - bios, &ci->io->io_count); atomic_sub(ti->num_flush_bios - bios, &ci->io->io_count);
} }
...@@ -1574,10 +1575,9 @@ static void __send_empty_flush(struct clone_info *ci) ...@@ -1574,10 +1575,9 @@ static void __send_empty_flush(struct clone_info *ci)
bio_uninit(ci->bio); bio_uninit(ci->bio);
} }
static void __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti, static void __send_abnormal_io(struct clone_info *ci, struct dm_target *ti,
unsigned int num_bios, unsigned int num_bios, unsigned int max_granularity,
unsigned int max_granularity, unsigned int max_sectors)
unsigned int max_sectors)
{ {
unsigned int len, bios; unsigned int len, bios;
...@@ -1585,7 +1585,7 @@ static void __send_changing_extent_only(struct clone_info *ci, struct dm_target ...@@ -1585,7 +1585,7 @@ static void __send_changing_extent_only(struct clone_info *ci, struct dm_target
__max_io_len(ti, ci->sector, max_granularity, max_sectors)); __max_io_len(ti, ci->sector, max_granularity, max_sectors));
atomic_add(num_bios, &ci->io->io_count); atomic_add(num_bios, &ci->io->io_count);
bios = __send_duplicate_bios(ci, ti, num_bios, &len); bios = __send_duplicate_bios(ci, ti, num_bios, &len, GFP_NOIO);
/* /*
* alloc_io() takes one extra reference for submission, so the * alloc_io() takes one extra reference for submission, so the
* reference won't reach 0 without the following (+1) subtraction * reference won't reach 0 without the following (+1) subtraction
...@@ -1654,8 +1654,8 @@ static blk_status_t __process_abnormal_io(struct clone_info *ci, ...@@ -1654,8 +1654,8 @@ static blk_status_t __process_abnormal_io(struct clone_info *ci,
if (unlikely(!num_bios)) if (unlikely(!num_bios))
return BLK_STS_NOTSUPP; return BLK_STS_NOTSUPP;
__send_changing_extent_only(ci, ti, num_bios, __send_abnormal_io(ci, ti, num_bios, max_granularity, max_sectors);
max_granularity, max_sectors);
return BLK_STS_OK; return BLK_STS_OK;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment