Commit a3282b43 authored by Bart Van Assche's avatar Bart Van Assche Committed by Jens Axboe

dm/core: Combine request operation type and flags

Improve kernel code uniformity by combining the request operation type and
flags into a single variable. Change 'int rw' into 'enum req_op op' because
the name 'op' is what is used in the block layer to hold a request type.
Use the blk_opf_t and enum req_op types where appropriate to improve static
type checking.

Cc: Alasdair Kergon <agk@redhat.com>
Cc: Mike Snitzer <snitzer@kernel.org>
Cc: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: default avatarBart Van Assche <bvanassche@acm.org>
Link: https://lore.kernel.org/r/20220714180729.1065367-24-bvanassche@acm.orgSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 71f7113d
...@@ -577,12 +577,12 @@ static void dmio_complete(unsigned long error, void *context) ...@@ -577,12 +577,12 @@ static void dmio_complete(unsigned long error, void *context)
b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0); b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0);
} }
static void use_dmio(struct dm_buffer *b, int rw, sector_t sector, static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector,
unsigned n_sectors, unsigned offset) unsigned n_sectors, unsigned offset)
{ {
int r; int r;
struct dm_io_request io_req = { struct dm_io_request io_req = {
.bi_opf = rw, .bi_opf = op,
.notify.fn = dmio_complete, .notify.fn = dmio_complete,
.notify.context = b, .notify.context = b,
.client = b->c->dm_io, .client = b->c->dm_io,
...@@ -615,7 +615,7 @@ static void bio_complete(struct bio *bio) ...@@ -615,7 +615,7 @@ static void bio_complete(struct bio *bio)
b->end_io(b, status); b->end_io(b, status);
} }
static void use_bio(struct dm_buffer *b, int rw, sector_t sector, static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector,
unsigned n_sectors, unsigned offset) unsigned n_sectors, unsigned offset)
{ {
struct bio *bio; struct bio *bio;
...@@ -629,10 +629,10 @@ static void use_bio(struct dm_buffer *b, int rw, sector_t sector, ...@@ -629,10 +629,10 @@ static void use_bio(struct dm_buffer *b, int rw, sector_t sector,
bio = bio_kmalloc(vec_size, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN); bio = bio_kmalloc(vec_size, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN);
if (!bio) { if (!bio) {
dmio: dmio:
use_dmio(b, rw, sector, n_sectors, offset); use_dmio(b, op, sector, n_sectors, offset);
return; return;
} }
bio_init(bio, b->c->bdev, bio->bi_inline_vecs, vec_size, rw); bio_init(bio, b->c->bdev, bio->bi_inline_vecs, vec_size, op);
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
bio->bi_end_io = bio_complete; bio->bi_end_io = bio_complete;
bio->bi_private = b; bio->bi_private = b;
...@@ -668,7 +668,8 @@ static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block ...@@ -668,7 +668,8 @@ static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block
return sector; return sector;
} }
static void submit_io(struct dm_buffer *b, int rw, void (*end_io)(struct dm_buffer *, blk_status_t)) static void submit_io(struct dm_buffer *b, enum req_op op,
void (*end_io)(struct dm_buffer *, blk_status_t))
{ {
unsigned n_sectors; unsigned n_sectors;
sector_t sector; sector_t sector;
...@@ -678,7 +679,7 @@ static void submit_io(struct dm_buffer *b, int rw, void (*end_io)(struct dm_buff ...@@ -678,7 +679,7 @@ static void submit_io(struct dm_buffer *b, int rw, void (*end_io)(struct dm_buff
sector = block_to_sector(b->c, b->block); sector = block_to_sector(b->c, b->block);
if (rw != REQ_OP_WRITE) { if (op != REQ_OP_WRITE) {
n_sectors = b->c->block_size >> SECTOR_SHIFT; n_sectors = b->c->block_size >> SECTOR_SHIFT;
offset = 0; offset = 0;
} else { } else {
...@@ -697,9 +698,9 @@ static void submit_io(struct dm_buffer *b, int rw, void (*end_io)(struct dm_buff ...@@ -697,9 +698,9 @@ static void submit_io(struct dm_buffer *b, int rw, void (*end_io)(struct dm_buff
} }
if (b->data_mode != DATA_MODE_VMALLOC) if (b->data_mode != DATA_MODE_VMALLOC)
use_bio(b, rw, sector, n_sectors, offset); use_bio(b, op, sector, n_sectors, offset);
else else
use_dmio(b, rw, sector, n_sectors, offset); use_dmio(b, op, sector, n_sectors, offset);
} }
/*---------------------------------------------------------------- /*----------------------------------------------------------------
......
...@@ -293,7 +293,7 @@ static void km_dp_init(struct dpages *dp, void *data) ...@@ -293,7 +293,7 @@ static void km_dp_init(struct dpages *dp, void *data)
/*----------------------------------------------------------------- /*-----------------------------------------------------------------
* IO routines that accept a list of pages. * IO routines that accept a list of pages.
*---------------------------------------------------------------*/ *---------------------------------------------------------------*/
static void do_region(int op, int op_flags, unsigned region, static void do_region(const blk_opf_t opf, unsigned region,
struct dm_io_region *where, struct dpages *dp, struct dm_io_region *where, struct dpages *dp,
struct io *io) struct io *io)
{ {
...@@ -306,6 +306,7 @@ static void do_region(int op, int op_flags, unsigned region, ...@@ -306,6 +306,7 @@ static void do_region(int op, int op_flags, unsigned region,
struct request_queue *q = bdev_get_queue(where->bdev); struct request_queue *q = bdev_get_queue(where->bdev);
sector_t num_sectors; sector_t num_sectors;
unsigned int special_cmd_max_sectors; unsigned int special_cmd_max_sectors;
const enum req_op op = opf & REQ_OP_MASK;
/* /*
* Reject unsupported discard and write same requests. * Reject unsupported discard and write same requests.
...@@ -339,8 +340,8 @@ static void do_region(int op, int op_flags, unsigned region, ...@@ -339,8 +340,8 @@ static void do_region(int op, int op_flags, unsigned region,
(PAGE_SIZE >> SECTOR_SHIFT))); (PAGE_SIZE >> SECTOR_SHIFT)));
} }
bio = bio_alloc_bioset(where->bdev, num_bvecs, op | op_flags, bio = bio_alloc_bioset(where->bdev, num_bvecs, opf, GFP_NOIO,
GFP_NOIO, &io->client->bios); &io->client->bios);
bio->bi_iter.bi_sector = where->sector + (where->count - remaining); bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
bio->bi_end_io = endio; bio->bi_end_io = endio;
store_io_and_region_in_bio(bio, io, region); store_io_and_region_in_bio(bio, io, region);
...@@ -368,7 +369,7 @@ static void do_region(int op, int op_flags, unsigned region, ...@@ -368,7 +369,7 @@ static void do_region(int op, int op_flags, unsigned region,
} while (remaining); } while (remaining);
} }
static void dispatch_io(int op, int op_flags, unsigned int num_regions, static void dispatch_io(blk_opf_t opf, unsigned int num_regions,
struct dm_io_region *where, struct dpages *dp, struct dm_io_region *where, struct dpages *dp,
struct io *io, int sync) struct io *io, int sync)
{ {
...@@ -378,7 +379,7 @@ static void dispatch_io(int op, int op_flags, unsigned int num_regions, ...@@ -378,7 +379,7 @@ static void dispatch_io(int op, int op_flags, unsigned int num_regions,
BUG_ON(num_regions > DM_IO_MAX_REGIONS); BUG_ON(num_regions > DM_IO_MAX_REGIONS);
if (sync) if (sync)
op_flags |= REQ_SYNC; opf |= REQ_SYNC;
/* /*
* For multiple regions we need to be careful to rewind * For multiple regions we need to be careful to rewind
...@@ -386,8 +387,8 @@ static void dispatch_io(int op, int op_flags, unsigned int num_regions, ...@@ -386,8 +387,8 @@ static void dispatch_io(int op, int op_flags, unsigned int num_regions,
*/ */
for (i = 0; i < num_regions; i++) { for (i = 0; i < num_regions; i++) {
*dp = old_pages; *dp = old_pages;
if (where[i].count || (op_flags & REQ_PREFLUSH)) if (where[i].count || (opf & REQ_PREFLUSH))
do_region(op, op_flags, i, where + i, dp, io); do_region(opf, i, where + i, dp, io);
} }
/* /*
...@@ -411,13 +412,13 @@ static void sync_io_complete(unsigned long error, void *context) ...@@ -411,13 +412,13 @@ static void sync_io_complete(unsigned long error, void *context)
} }
static int sync_io(struct dm_io_client *client, unsigned int num_regions, static int sync_io(struct dm_io_client *client, unsigned int num_regions,
struct dm_io_region *where, int op, int op_flags, struct dm_io_region *where, blk_opf_t opf, struct dpages *dp,
struct dpages *dp, unsigned long *error_bits) unsigned long *error_bits)
{ {
struct io *io; struct io *io;
struct sync_io sio; struct sync_io sio;
if (num_regions > 1 && !op_is_write(op)) { if (num_regions > 1 && !op_is_write(opf)) {
WARN_ON(1); WARN_ON(1);
return -EIO; return -EIO;
} }
...@@ -434,7 +435,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, ...@@ -434,7 +435,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
io->vma_invalidate_address = dp->vma_invalidate_address; io->vma_invalidate_address = dp->vma_invalidate_address;
io->vma_invalidate_size = dp->vma_invalidate_size; io->vma_invalidate_size = dp->vma_invalidate_size;
dispatch_io(op, op_flags, num_regions, where, dp, io, 1); dispatch_io(opf, num_regions, where, dp, io, 1);
wait_for_completion_io(&sio.wait); wait_for_completion_io(&sio.wait);
...@@ -445,12 +446,12 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, ...@@ -445,12 +446,12 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
} }
static int async_io(struct dm_io_client *client, unsigned int num_regions, static int async_io(struct dm_io_client *client, unsigned int num_regions,
struct dm_io_region *where, int op, int op_flags, struct dm_io_region *where, blk_opf_t opf,
struct dpages *dp, io_notify_fn fn, void *context) struct dpages *dp, io_notify_fn fn, void *context)
{ {
struct io *io; struct io *io;
if (num_regions > 1 && !op_is_write(op)) { if (num_regions > 1 && !op_is_write(opf)) {
WARN_ON(1); WARN_ON(1);
fn(1, context); fn(1, context);
return -EIO; return -EIO;
...@@ -466,7 +467,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions, ...@@ -466,7 +467,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions,
io->vma_invalidate_address = dp->vma_invalidate_address; io->vma_invalidate_address = dp->vma_invalidate_address;
io->vma_invalidate_size = dp->vma_invalidate_size; io->vma_invalidate_size = dp->vma_invalidate_size;
dispatch_io(op, op_flags, num_regions, where, dp, io, 0); dispatch_io(opf, num_regions, where, dp, io, 0);
return 0; return 0;
} }
...@@ -519,13 +520,10 @@ int dm_io(struct dm_io_request *io_req, unsigned num_regions, ...@@ -519,13 +520,10 @@ int dm_io(struct dm_io_request *io_req, unsigned num_regions,
if (!io_req->notify.fn) if (!io_req->notify.fn)
return sync_io(io_req->client, num_regions, where, return sync_io(io_req->client, num_regions, where,
io_req->bi_opf & REQ_OP_MASK, io_req->bi_opf, &dp, sync_error_bits);
io_req->bi_opf & ~REQ_OP_MASK, &dp,
sync_error_bits);
return async_io(io_req->client, num_regions, where, return async_io(io_req->client, num_regions, where,
io_req->bi_opf & REQ_OP_MASK, io_req->bi_opf, &dp, io_req->notify.fn,
io_req->bi_opf & ~REQ_OP_MASK, &dp, io_req->notify.fn,
io_req->notify.context); io_req->notify.context);
} }
EXPORT_SYMBOL(dm_io); EXPORT_SYMBOL(dm_io);
......
...@@ -716,7 +716,7 @@ static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) ...@@ -716,7 +716,7 @@ static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
} }
static inline struct dm_table *dm_get_live_table_bio(struct mapped_device *md, static inline struct dm_table *dm_get_live_table_bio(struct mapped_device *md,
int *srcu_idx, unsigned bio_opf) int *srcu_idx, blk_opf_t bio_opf)
{ {
if (bio_opf & REQ_NOWAIT) if (bio_opf & REQ_NOWAIT)
return dm_get_live_table_fast(md); return dm_get_live_table_fast(md);
...@@ -725,7 +725,7 @@ static inline struct dm_table *dm_get_live_table_bio(struct mapped_device *md, ...@@ -725,7 +725,7 @@ static inline struct dm_table *dm_get_live_table_bio(struct mapped_device *md,
} }
static inline void dm_put_live_table_bio(struct mapped_device *md, int srcu_idx, static inline void dm_put_live_table_bio(struct mapped_device *md, int srcu_idx,
unsigned bio_opf) blk_opf_t bio_opf)
{ {
if (bio_opf & REQ_NOWAIT) if (bio_opf & REQ_NOWAIT)
dm_put_live_table_fast(md); dm_put_live_table_fast(md);
...@@ -1511,7 +1511,7 @@ static void __send_changing_extent_only(struct clone_info *ci, struct dm_target ...@@ -1511,7 +1511,7 @@ static void __send_changing_extent_only(struct clone_info *ci, struct dm_target
static bool is_abnormal_io(struct bio *bio) static bool is_abnormal_io(struct bio *bio)
{ {
unsigned int op = bio_op(bio); enum req_op op = bio_op(bio);
if (op != REQ_OP_READ && op != REQ_OP_WRITE && op != REQ_OP_FLUSH) { if (op != REQ_OP_READ && op != REQ_OP_WRITE && op != REQ_OP_FLUSH) {
switch (op) { switch (op) {
...@@ -1625,7 +1625,7 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci) ...@@ -1625,7 +1625,7 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci)
* Only support bio polling for normal IO, and the target io is * Only support bio polling for normal IO, and the target io is
* exactly inside the dm_io instance (verified in dm_poll_dm_io) * exactly inside the dm_io instance (verified in dm_poll_dm_io)
*/ */
ci->submit_as_polled = ci->bio->bi_opf & REQ_POLLED; ci->submit_as_polled = !!(ci->bio->bi_opf & REQ_POLLED);
len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count); len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count);
setup_split_accounting(ci, len); setup_split_accounting(ci, len);
...@@ -1722,7 +1722,7 @@ static void dm_submit_bio(struct bio *bio) ...@@ -1722,7 +1722,7 @@ static void dm_submit_bio(struct bio *bio)
struct mapped_device *md = bio->bi_bdev->bd_disk->private_data; struct mapped_device *md = bio->bi_bdev->bd_disk->private_data;
int srcu_idx; int srcu_idx;
struct dm_table *map; struct dm_table *map;
unsigned bio_opf = bio->bi_opf; blk_opf_t bio_opf = bio->bi_opf;
map = dm_get_live_table_bio(md, &srcu_idx, bio_opf); map = dm_get_live_table_bio(md, &srcu_idx, bio_opf);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment