Commit bb64c08b authored by Matthew Wilcox's avatar Matthew Wilcox Committed by Theodore Ts'o

ext4: Convert ext4_finish_bio() to use folios

Prepare ext4 to support large folios in the page writeback path.
Also set the actual error in the mapping, not just -EIO.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarRitesh Harjani (IBM) <ritesh.list@gmail.com>
Reviewed-by: default avatarTheodore Ts'o <tytso@mit.edu>
Link: https://lore.kernel.org/r/20230324180129.1220691-5-willy@infradead.orgSigned-off-by: default avatarTheodore Ts'o <tytso@mit.edu>
parent cd57b771
...@@ -99,30 +99,30 @@ static void buffer_io_error(struct buffer_head *bh) ...@@ -99,30 +99,30 @@ static void buffer_io_error(struct buffer_head *bh)
static void ext4_finish_bio(struct bio *bio) static void ext4_finish_bio(struct bio *bio)
{ {
struct bio_vec *bvec; struct folio_iter fi;
struct bvec_iter_all iter_all;
bio_for_each_segment_all(bvec, bio, iter_all) { bio_for_each_folio_all(fi, bio) {
struct page *page = bvec->bv_page; struct folio *folio = fi.folio;
struct page *bounce_page = NULL; struct folio *io_folio = NULL;
struct buffer_head *bh, *head; struct buffer_head *bh, *head;
unsigned bio_start = bvec->bv_offset; size_t bio_start = fi.offset;
unsigned bio_end = bio_start + bvec->bv_len; size_t bio_end = bio_start + fi.length;
unsigned under_io = 0; unsigned under_io = 0;
unsigned long flags; unsigned long flags;
if (fscrypt_is_bounce_page(page)) { if (fscrypt_is_bounce_folio(folio)) {
bounce_page = page; io_folio = folio;
page = fscrypt_pagecache_page(bounce_page); folio = fscrypt_pagecache_folio(folio);
} }
if (bio->bi_status) { if (bio->bi_status) {
SetPageError(page); int err = blk_status_to_errno(bio->bi_status);
mapping_set_error(page->mapping, -EIO); folio_set_error(folio);
mapping_set_error(folio->mapping, err);
} }
bh = head = page_buffers(page); bh = head = folio_buffers(folio);
/* /*
* We check all buffers in the page under b_uptodate_lock * We check all buffers in the folio under b_uptodate_lock
* to avoid races with other end io clearing async_write flags * to avoid races with other end io clearing async_write flags
*/ */
spin_lock_irqsave(&head->b_uptodate_lock, flags); spin_lock_irqsave(&head->b_uptodate_lock, flags);
...@@ -141,8 +141,8 @@ static void ext4_finish_bio(struct bio *bio) ...@@ -141,8 +141,8 @@ static void ext4_finish_bio(struct bio *bio)
} while ((bh = bh->b_this_page) != head); } while ((bh = bh->b_this_page) != head);
spin_unlock_irqrestore(&head->b_uptodate_lock, flags); spin_unlock_irqrestore(&head->b_uptodate_lock, flags);
if (!under_io) { if (!under_io) {
fscrypt_free_bounce_page(bounce_page); fscrypt_free_bounce_page(&io_folio->page);
end_page_writeback(page); folio_end_writeback(folio);
} }
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment