Commit 848c4dd5 authored by Zach Brown's avatar Zach Brown Committed by Linus Torvalds

dio: zero struct dio with kzalloc instead of manually

This patch uses kzalloc to zero all of struct dio rather than manually
trying to track which fields we rely on being zero.  It passed aio+dio
stress testing and some bug regression testing on ext3.

This patch was introduced by Linus in the conversation that lead up to
Badari's minimal fix to manually zero .map_bh.b_state in commit:

  6a648fa7

It makes the code a bit smaller.  Maybe a couple fewer cachelines to
load, if we're lucky:

   text    data     bss     dec     hex filename
3285925  568506 1304616 5159047  4eb887 vmlinux
3285797  568506 1304616 5158919  4eb807 vmlinux.patched

I was unable to measure a stable difference in the number of cpu cycles
spent in blockdev_direct_IO() when pushing aio+dio 256K reads at
~340MB/s.

So the resulting intent of the patch isn't a performance gain but to
avoid exposing ourselves to the risk of finding another field like
.map_bh.b_state where we rely on zeroing but don't enforce it in the
code.
Signed-off-by: default avatarZach Brown <zach.brown@oracle.com>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 38f061c5
...@@ -958,36 +958,22 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode, ...@@ -958,36 +958,22 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
ssize_t ret2; ssize_t ret2;
size_t bytes; size_t bytes;
dio->bio = NULL;
dio->inode = inode; dio->inode = inode;
dio->rw = rw; dio->rw = rw;
dio->blkbits = blkbits; dio->blkbits = blkbits;
dio->blkfactor = inode->i_blkbits - blkbits; dio->blkfactor = inode->i_blkbits - blkbits;
dio->start_zero_done = 0;
dio->size = 0;
dio->block_in_file = offset >> blkbits; dio->block_in_file = offset >> blkbits;
dio->blocks_available = 0;
dio->cur_page = NULL;
dio->boundary = 0;
dio->reap_counter = 0;
dio->get_block = get_block; dio->get_block = get_block;
dio->end_io = end_io; dio->end_io = end_io;
dio->map_bh.b_private = NULL;
dio->map_bh.b_state = 0;
dio->final_block_in_bio = -1; dio->final_block_in_bio = -1;
dio->next_block_for_io = -1; dio->next_block_for_io = -1;
dio->page_errors = 0;
dio->io_error = 0;
dio->result = 0;
dio->iocb = iocb; dio->iocb = iocb;
dio->i_size = i_size_read(inode); dio->i_size = i_size_read(inode);
spin_lock_init(&dio->bio_lock); spin_lock_init(&dio->bio_lock);
dio->refcount = 1; dio->refcount = 1;
dio->bio_list = NULL;
dio->waiter = NULL;
/* /*
* In case of non-aligned buffers, we may need 2 more * In case of non-aligned buffers, we may need 2 more
...@@ -995,8 +981,6 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode, ...@@ -995,8 +981,6 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
*/ */
if (unlikely(dio->blkfactor)) if (unlikely(dio->blkfactor))
dio->pages_in_io = 2; dio->pages_in_io = 2;
else
dio->pages_in_io = 0;
for (seg = 0; seg < nr_segs; seg++) { for (seg = 0; seg < nr_segs; seg++) {
user_addr = (unsigned long)iov[seg].iov_base; user_addr = (unsigned long)iov[seg].iov_base;
...@@ -1184,7 +1168,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, ...@@ -1184,7 +1168,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
} }
} }
dio = kmalloc(sizeof(*dio), GFP_KERNEL); dio = kzalloc(sizeof(*dio), GFP_KERNEL);
retval = -ENOMEM; retval = -ENOMEM;
if (!dio) if (!dio)
goto out; goto out;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment