Commit 5500221e authored by Gao Xiang's avatar Gao Xiang Committed by Theodore Ts'o

ext4: bio_alloc with __GFP_DIRECT_RECLAIM never fails

Similar to [1] [2], bio_alloc with __GFP_DIRECT_RECLAIM flags
guarantees bio allocation under some given restrictions, as
stated in block/bio.c and fs/direct-io.c So here it's ok to
not check for NULL value from bio_alloc().

[1] https://lore.kernel.org/r/20191030035518.65477-1-gaoxiang25@huawei.com
[2] https://lore.kernel.org/r/20190830162812.GA10694@infradead.org
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: Andreas Dilger <adilger.kernel@dilger.ca>
Cc: Ritesh Harjani <riteshh@linux.ibm.com>
Reviewed-by: default avatarChao Yu <yuchao0@huawei.com>
Signed-off-by: default avatarGao Xiang <gaoxiang25@huawei.com>
Link: https://lore.kernel.org/r/20191031092315.139267-1-gaoxiang25@huawei.comSigned-off-by: default avatarTheodore Ts'o <tytso@mit.edu>
parent ebc11f7b
...@@ -394,14 +394,16 @@ void ext4_io_submit_init(struct ext4_io_submit *io, ...@@ -394,14 +394,16 @@ void ext4_io_submit_init(struct ext4_io_submit *io,
io->io_end = NULL; io->io_end = NULL;
} }
static int io_submit_init_bio(struct ext4_io_submit *io, static void io_submit_init_bio(struct ext4_io_submit *io,
struct buffer_head *bh) struct buffer_head *bh)
{ {
struct bio *bio; struct bio *bio;
/*
* bio_alloc will _always_ be able to allocate a bio if
* __GFP_DIRECT_RECLAIM is set, see comments for bio_alloc_bioset().
*/
bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES); bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
if (!bio)
return -ENOMEM;
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio_set_dev(bio, bh->b_bdev); bio_set_dev(bio, bh->b_bdev);
bio->bi_end_io = ext4_end_bio; bio->bi_end_io = ext4_end_bio;
...@@ -409,13 +411,12 @@ static int io_submit_init_bio(struct ext4_io_submit *io, ...@@ -409,13 +411,12 @@ static int io_submit_init_bio(struct ext4_io_submit *io,
io->io_bio = bio; io->io_bio = bio;
io->io_next_block = bh->b_blocknr; io->io_next_block = bh->b_blocknr;
wbc_init_bio(io->io_wbc, bio); wbc_init_bio(io->io_wbc, bio);
return 0;
} }
static int io_submit_add_bh(struct ext4_io_submit *io, static void io_submit_add_bh(struct ext4_io_submit *io,
struct inode *inode, struct inode *inode,
struct page *page, struct page *page,
struct buffer_head *bh) struct buffer_head *bh)
{ {
int ret; int ret;
...@@ -424,9 +425,7 @@ static int io_submit_add_bh(struct ext4_io_submit *io, ...@@ -424,9 +425,7 @@ static int io_submit_add_bh(struct ext4_io_submit *io,
ext4_io_submit(io); ext4_io_submit(io);
} }
if (io->io_bio == NULL) { if (io->io_bio == NULL) {
ret = io_submit_init_bio(io, bh); io_submit_init_bio(io, bh);
if (ret)
return ret;
io->io_bio->bi_write_hint = inode->i_write_hint; io->io_bio->bi_write_hint = inode->i_write_hint;
} }
ret = bio_add_page(io->io_bio, page, bh->b_size, bh_offset(bh)); ret = bio_add_page(io->io_bio, page, bh->b_size, bh_offset(bh));
...@@ -434,7 +433,6 @@ static int io_submit_add_bh(struct ext4_io_submit *io, ...@@ -434,7 +433,6 @@ static int io_submit_add_bh(struct ext4_io_submit *io,
goto submit_and_retry; goto submit_and_retry;
wbc_account_cgroup_owner(io->io_wbc, page, bh->b_size); wbc_account_cgroup_owner(io->io_wbc, page, bh->b_size);
io->io_next_block++; io->io_next_block++;
return 0;
} }
int ext4_bio_write_page(struct ext4_io_submit *io, int ext4_bio_write_page(struct ext4_io_submit *io,
...@@ -527,8 +525,14 @@ int ext4_bio_write_page(struct ext4_io_submit *io, ...@@ -527,8 +525,14 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
gfp_flags |= __GFP_NOFAIL; gfp_flags |= __GFP_NOFAIL;
goto retry_encrypt; goto retry_encrypt;
} }
bounce_page = NULL;
goto out; printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret);
redirty_page_for_writepage(wbc, page);
do {
clear_buffer_async_write(bh);
bh = bh->b_this_page;
} while (bh != head);
goto unlock;
} }
} }
...@@ -536,30 +540,13 @@ int ext4_bio_write_page(struct ext4_io_submit *io, ...@@ -536,30 +540,13 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
do { do {
if (!buffer_async_write(bh)) if (!buffer_async_write(bh))
continue; continue;
ret = io_submit_add_bh(io, inode, bounce_page ?: page, bh); io_submit_add_bh(io, inode,
if (ret) { bounce_page ? bounce_page : page, bh);
/*
* We only get here on ENOMEM. Not much else
* we can do but mark the page as dirty, and
* better luck next time.
*/
break;
}
nr_submitted++; nr_submitted++;
clear_buffer_dirty(bh); clear_buffer_dirty(bh);
} while ((bh = bh->b_this_page) != head); } while ((bh = bh->b_this_page) != head);
/* Error stopped previous loop? Clean up buffers... */ unlock:
if (ret) {
out:
fscrypt_free_bounce_page(bounce_page);
printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret);
redirty_page_for_writepage(wbc, page);
do {
clear_buffer_async_write(bh);
bh = bh->b_this_page;
} while (bh != head);
}
unlock_page(page); unlock_page(page);
/* Nothing submitted - we have to end page writeback */ /* Nothing submitted - we have to end page writeback */
if (!nr_submitted) if (!nr_submitted)
......
...@@ -360,10 +360,12 @@ int ext4_mpage_readpages(struct address_space *mapping, ...@@ -360,10 +360,12 @@ int ext4_mpage_readpages(struct address_space *mapping,
if (bio == NULL) { if (bio == NULL) {
struct bio_post_read_ctx *ctx; struct bio_post_read_ctx *ctx;
/*
* bio_alloc will _always_ be able to allocate a bio if
* __GFP_DIRECT_RECLAIM is set, see bio_alloc_bioset().
*/
bio = bio_alloc(GFP_KERNEL, bio = bio_alloc(GFP_KERNEL,
min_t(int, nr_pages, BIO_MAX_PAGES)); min_t(int, nr_pages, BIO_MAX_PAGES));
if (!bio)
goto set_error_page;
ctx = get_bio_post_read_ctx(inode, bio, page->index); ctx = get_bio_post_read_ctx(inode, bio, page->index);
if (IS_ERR(ctx)) { if (IS_ERR(ctx)) {
bio_put(bio); bio_put(bio);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment