Commit 581075e4 authored by Bart Van Assche's avatar Bart Van Assche Committed by Jens Axboe

dm/core: Reduce the size of struct dm_io_request

Combine the bi_op and bi_op_flags into the bi_opf member. Use the new
blk_opf_t type to improve static type checking. This patch does not
change any functionality.

Cc: Alasdair Kergon <agk@redhat.com>
Cc: Mike Snitzer <snitzer@kernel.org>
Cc: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: default avatarBart Van Assche <bvanassche@acm.org>
Link: https://lore.kernel.org/r/20220714180729.1065367-22-bvanassche@acm.orgSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 7ee1de6e
...@@ -582,8 +582,7 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t sector, ...@@ -582,8 +582,7 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t sector,
{ {
int r; int r;
struct dm_io_request io_req = { struct dm_io_request io_req = {
.bi_op = rw, .bi_opf = rw,
.bi_op_flags = 0,
.notify.fn = dmio_complete, .notify.fn = dmio_complete,
.notify.context = b, .notify.context = b,
.client = b->c->dm_io, .client = b->c->dm_io,
...@@ -1341,8 +1340,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers); ...@@ -1341,8 +1340,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
int dm_bufio_issue_flush(struct dm_bufio_client *c) int dm_bufio_issue_flush(struct dm_bufio_client *c)
{ {
struct dm_io_request io_req = { struct dm_io_request io_req = {
.bi_op = REQ_OP_WRITE, .bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC,
.bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
.mem.type = DM_IO_KMEM, .mem.type = DM_IO_KMEM,
.mem.ptr.addr = NULL, .mem.ptr.addr = NULL,
.client = c->dm_io, .client = c->dm_io,
...@@ -1365,8 +1363,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_issue_flush); ...@@ -1365,8 +1363,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count) int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count)
{ {
struct dm_io_request io_req = { struct dm_io_request io_req = {
.bi_op = REQ_OP_DISCARD, .bi_opf = REQ_OP_DISCARD | REQ_SYNC,
.bi_op_flags = REQ_SYNC,
.mem.type = DM_IO_KMEM, .mem.type = DM_IO_KMEM,
.mem.ptr.addr = NULL, .mem.ptr.addr = NULL,
.client = c->dm_io, .client = c->dm_io,
......
...@@ -557,8 +557,7 @@ static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags) ...@@ -557,8 +557,7 @@ static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags)
struct dm_io_region io_loc; struct dm_io_region io_loc;
int r; int r;
io_req.bi_op = op; io_req.bi_opf = op | op_flags;
io_req.bi_op_flags = op_flags;
io_req.mem.type = DM_IO_KMEM; io_req.mem.type = DM_IO_KMEM;
io_req.mem.ptr.addr = ic->sb; io_req.mem.ptr.addr = ic->sb;
io_req.notify.fn = NULL; io_req.notify.fn = NULL;
...@@ -1067,8 +1066,7 @@ static void rw_journal_sectors(struct dm_integrity_c *ic, int op, int op_flags, ...@@ -1067,8 +1066,7 @@ static void rw_journal_sectors(struct dm_integrity_c *ic, int op, int op_flags,
pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT); pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1); pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
io_req.bi_op = op; io_req.bi_opf = op | op_flags;
io_req.bi_op_flags = op_flags;
io_req.mem.type = DM_IO_PAGE_LIST; io_req.mem.type = DM_IO_PAGE_LIST;
if (ic->journal_io) if (ic->journal_io)
io_req.mem.ptr.pl = &ic->journal_io[pl_index]; io_req.mem.ptr.pl = &ic->journal_io[pl_index];
...@@ -1188,8 +1186,7 @@ static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsig ...@@ -1188,8 +1186,7 @@ static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsig
pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT); pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1); pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
io_req.bi_op = REQ_OP_WRITE; io_req.bi_opf = REQ_OP_WRITE;
io_req.bi_op_flags = 0;
io_req.mem.type = DM_IO_PAGE_LIST; io_req.mem.type = DM_IO_PAGE_LIST;
io_req.mem.ptr.pl = &ic->journal[pl_index]; io_req.mem.ptr.pl = &ic->journal[pl_index];
io_req.mem.offset = pl_offset; io_req.mem.offset = pl_offset;
...@@ -1516,8 +1513,7 @@ static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_dat ...@@ -1516,8 +1513,7 @@ static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_dat
if (!ic->meta_dev) if (!ic->meta_dev)
flush_data = false; flush_data = false;
if (flush_data) { if (flush_data) {
fr.io_req.bi_op = REQ_OP_WRITE, fr.io_req.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC,
fr.io_req.bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
fr.io_req.mem.type = DM_IO_KMEM, fr.io_req.mem.type = DM_IO_KMEM,
fr.io_req.mem.ptr.addr = NULL, fr.io_req.mem.ptr.addr = NULL,
fr.io_req.notify.fn = flush_notify, fr.io_req.notify.fn = flush_notify,
...@@ -2706,8 +2702,7 @@ static void integrity_recalc(struct work_struct *w) ...@@ -2706,8 +2702,7 @@ static void integrity_recalc(struct work_struct *w)
if (unlikely(dm_integrity_failed(ic))) if (unlikely(dm_integrity_failed(ic)))
goto err; goto err;
io_req.bi_op = REQ_OP_READ; io_req.bi_opf = REQ_OP_READ;
io_req.bi_op_flags = 0;
io_req.mem.type = DM_IO_VMA; io_req.mem.type = DM_IO_VMA;
io_req.mem.ptr.addr = ic->recalc_buffer; io_req.mem.ptr.addr = ic->recalc_buffer;
io_req.notify.fn = NULL; io_req.notify.fn = NULL;
......
...@@ -489,7 +489,7 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp, ...@@ -489,7 +489,7 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
case DM_IO_VMA: case DM_IO_VMA:
flush_kernel_vmap_range(io_req->mem.ptr.vma, size); flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
if (io_req->bi_op == REQ_OP_READ) { if ((io_req->bi_opf & REQ_OP_MASK) == REQ_OP_READ) {
dp->vma_invalidate_address = io_req->mem.ptr.vma; dp->vma_invalidate_address = io_req->mem.ptr.vma;
dp->vma_invalidate_size = size; dp->vma_invalidate_size = size;
} }
...@@ -519,11 +519,13 @@ int dm_io(struct dm_io_request *io_req, unsigned num_regions, ...@@ -519,11 +519,13 @@ int dm_io(struct dm_io_request *io_req, unsigned num_regions,
if (!io_req->notify.fn) if (!io_req->notify.fn)
return sync_io(io_req->client, num_regions, where, return sync_io(io_req->client, num_regions, where,
io_req->bi_op, io_req->bi_op_flags, &dp, io_req->bi_opf & REQ_OP_MASK,
io_req->bi_opf & ~REQ_OP_MASK, &dp,
sync_error_bits); sync_error_bits);
return async_io(io_req->client, num_regions, where, io_req->bi_op, return async_io(io_req->client, num_regions, where,
io_req->bi_op_flags, &dp, io_req->notify.fn, io_req->bi_opf & REQ_OP_MASK,
io_req->bi_opf & ~REQ_OP_MASK, &dp, io_req->notify.fn,
io_req->notify.context); io_req->notify.context);
} }
EXPORT_SYMBOL(dm_io); EXPORT_SYMBOL(dm_io);
......
...@@ -549,8 +549,7 @@ static int run_io_job(struct kcopyd_job *job) ...@@ -549,8 +549,7 @@ static int run_io_job(struct kcopyd_job *job)
{ {
int r; int r;
struct dm_io_request io_req = { struct dm_io_request io_req = {
.bi_op = job->rw, .bi_opf = job->rw,
.bi_op_flags = 0,
.mem.type = DM_IO_PAGE_LIST, .mem.type = DM_IO_PAGE_LIST,
.mem.ptr.pl = job->pages, .mem.ptr.pl = job->pages,
.mem.offset = 0, .mem.offset = 0,
......
...@@ -293,8 +293,7 @@ static void header_from_disk(struct log_header_core *core, struct log_header_dis ...@@ -293,8 +293,7 @@ static void header_from_disk(struct log_header_core *core, struct log_header_dis
static int rw_header(struct log_c *lc, int op) static int rw_header(struct log_c *lc, int op)
{ {
lc->io_req.bi_op = op; lc->io_req.bi_opf = op;
lc->io_req.bi_op_flags = 0;
return dm_io(&lc->io_req, 1, &lc->header_location, NULL); return dm_io(&lc->io_req, 1, &lc->header_location, NULL);
} }
...@@ -307,8 +306,7 @@ static int flush_header(struct log_c *lc) ...@@ -307,8 +306,7 @@ static int flush_header(struct log_c *lc)
.count = 0, .count = 0,
}; };
lc->io_req.bi_op = REQ_OP_WRITE; lc->io_req.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
lc->io_req.bi_op_flags = REQ_PREFLUSH;
return dm_io(&lc->io_req, 1, &null_location, NULL); return dm_io(&lc->io_req, 1, &null_location, NULL);
} }
......
...@@ -260,8 +260,7 @@ static int mirror_flush(struct dm_target *ti) ...@@ -260,8 +260,7 @@ static int mirror_flush(struct dm_target *ti)
struct dm_io_region io[MAX_NR_MIRRORS]; struct dm_io_region io[MAX_NR_MIRRORS];
struct mirror *m; struct mirror *m;
struct dm_io_request io_req = { struct dm_io_request io_req = {
.bi_op = REQ_OP_WRITE, .bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC,
.bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
.mem.type = DM_IO_KMEM, .mem.type = DM_IO_KMEM,
.mem.ptr.addr = NULL, .mem.ptr.addr = NULL,
.client = ms->io_client, .client = ms->io_client,
...@@ -535,8 +534,7 @@ static void read_async_bio(struct mirror *m, struct bio *bio) ...@@ -535,8 +534,7 @@ static void read_async_bio(struct mirror *m, struct bio *bio)
{ {
struct dm_io_region io; struct dm_io_region io;
struct dm_io_request io_req = { struct dm_io_request io_req = {
.bi_op = REQ_OP_READ, .bi_opf = REQ_OP_READ,
.bi_op_flags = 0,
.mem.type = DM_IO_BIO, .mem.type = DM_IO_BIO,
.mem.ptr.bio = bio, .mem.ptr.bio = bio,
.notify.fn = read_callback, .notify.fn = read_callback,
...@@ -648,9 +646,9 @@ static void do_write(struct mirror_set *ms, struct bio *bio) ...@@ -648,9 +646,9 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
unsigned int i; unsigned int i;
struct dm_io_region io[MAX_NR_MIRRORS], *dest = io; struct dm_io_region io[MAX_NR_MIRRORS], *dest = io;
struct mirror *m; struct mirror *m;
blk_opf_t op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH);
struct dm_io_request io_req = { struct dm_io_request io_req = {
.bi_op = REQ_OP_WRITE, .bi_opf = REQ_OP_WRITE | op_flags,
.bi_op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH),
.mem.type = DM_IO_BIO, .mem.type = DM_IO_BIO,
.mem.ptr.bio = bio, .mem.ptr.bio = bio,
.notify.fn = write_callback, .notify.fn = write_callback,
...@@ -659,7 +657,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio) ...@@ -659,7 +657,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
}; };
if (bio_op(bio) == REQ_OP_DISCARD) { if (bio_op(bio) == REQ_OP_DISCARD) {
io_req.bi_op = REQ_OP_DISCARD; io_req.bi_opf = REQ_OP_DISCARD | op_flags;
io_req.mem.type = DM_IO_KMEM; io_req.mem.type = DM_IO_KMEM;
io_req.mem.ptr.addr = NULL; io_req.mem.ptr.addr = NULL;
} }
......
...@@ -235,8 +235,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int op, ...@@ -235,8 +235,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int op,
.count = ps->store->chunk_size, .count = ps->store->chunk_size,
}; };
struct dm_io_request io_req = { struct dm_io_request io_req = {
.bi_op = op, .bi_opf = op | op_flags,
.bi_op_flags = op_flags,
.mem.type = DM_IO_VMA, .mem.type = DM_IO_VMA,
.mem.ptr.vma = area, .mem.ptr.vma = area,
.client = ps->io_client, .client = ps->io_client,
......
...@@ -523,8 +523,7 @@ static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios) ...@@ -523,8 +523,7 @@ static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
region.sector += wc->start_sector; region.sector += wc->start_sector;
atomic_inc(&endio.count); atomic_inc(&endio.count);
req.bi_op = REQ_OP_WRITE; req.bi_opf = REQ_OP_WRITE | REQ_SYNC;
req.bi_op_flags = REQ_SYNC;
req.mem.type = DM_IO_VMA; req.mem.type = DM_IO_VMA;
req.mem.ptr.vma = (char *)wc->memory_map + (size_t)i * BITMAP_GRANULARITY; req.mem.ptr.vma = (char *)wc->memory_map + (size_t)i * BITMAP_GRANULARITY;
req.client = wc->dm_io; req.client = wc->dm_io;
...@@ -562,8 +561,7 @@ static void ssd_commit_superblock(struct dm_writecache *wc) ...@@ -562,8 +561,7 @@ static void ssd_commit_superblock(struct dm_writecache *wc)
region.sector += wc->start_sector; region.sector += wc->start_sector;
req.bi_op = REQ_OP_WRITE; req.bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_FUA;
req.bi_op_flags = REQ_SYNC | REQ_FUA;
req.mem.type = DM_IO_VMA; req.mem.type = DM_IO_VMA;
req.mem.ptr.vma = (char *)wc->memory_map; req.mem.ptr.vma = (char *)wc->memory_map;
req.client = wc->dm_io; req.client = wc->dm_io;
...@@ -592,8 +590,7 @@ static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev) ...@@ -592,8 +590,7 @@ static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev)
region.bdev = dev->bdev; region.bdev = dev->bdev;
region.sector = 0; region.sector = 0;
region.count = 0; region.count = 0;
req.bi_op = REQ_OP_WRITE; req.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
req.bi_op_flags = REQ_PREFLUSH;
req.mem.type = DM_IO_KMEM; req.mem.type = DM_IO_KMEM;
req.mem.ptr.addr = NULL; req.mem.ptr.addr = NULL;
req.client = wc->dm_io; req.client = wc->dm_io;
...@@ -981,8 +978,7 @@ static int writecache_read_metadata(struct dm_writecache *wc, sector_t n_sectors ...@@ -981,8 +978,7 @@ static int writecache_read_metadata(struct dm_writecache *wc, sector_t n_sectors
region.bdev = wc->ssd_dev->bdev; region.bdev = wc->ssd_dev->bdev;
region.sector = wc->start_sector; region.sector = wc->start_sector;
region.count = n_sectors; region.count = n_sectors;
req.bi_op = REQ_OP_READ; req.bi_opf = REQ_OP_READ | REQ_SYNC;
req.bi_op_flags = REQ_SYNC;
req.mem.type = DM_IO_VMA; req.mem.type = DM_IO_VMA;
req.mem.ptr.vma = (char *)wc->memory_map; req.mem.ptr.vma = (char *)wc->memory_map;
req.client = wc->dm_io; req.client = wc->dm_io;
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/types.h> #include <linux/types.h>
#include <linux/blk_types.h>
struct dm_io_region { struct dm_io_region {
struct block_device *bdev; struct block_device *bdev;
...@@ -57,8 +58,7 @@ struct dm_io_notify { ...@@ -57,8 +58,7 @@ struct dm_io_notify {
*/ */
struct dm_io_client; struct dm_io_client;
struct dm_io_request { struct dm_io_request {
int bi_op; /* REQ_OP */ blk_opf_t bi_opf; /* Request type and flags */
int bi_op_flags; /* req_flag_bits */
struct dm_io_memory mem; /* Memory to use for io */ struct dm_io_memory mem; /* Memory to use for io */
struct dm_io_notify notify; /* Synchronous if notify.fn is NULL */ struct dm_io_notify notify; /* Synchronous if notify.fn is NULL */
struct dm_io_client *client; /* Client memory handler */ struct dm_io_client *client; /* Client memory handler */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment