Commit 81a0d3e1 authored by Matthew Wilcox's avatar Matthew Wilcox Committed by Theodore Ts'o

ext4: Convert mpage_submit_page() to mpage_submit_folio()

All callers now have a folio so we can pass one in and use the folio
APIs to support large folios as well as save instructions by eliminating
calls to compound_head().
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarTheodore Ts'o <tytso@mit.edu>
Link: https://lore.kernel.org/r/20230324180129.1220691-7-willy@infradead.orgSigned-off-by: default avatarTheodore Ts'o <tytso@mit.edu>
parent 4da2f6e3
...@@ -1869,34 +1869,33 @@ static void mpage_page_done(struct mpage_da_data *mpd, struct page *page) ...@@ -1869,34 +1869,33 @@ static void mpage_page_done(struct mpage_da_data *mpd, struct page *page)
unlock_page(page); unlock_page(page);
} }
static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page) static int mpage_submit_folio(struct mpage_da_data *mpd, struct folio *folio)
{ {
int len; size_t len;
loff_t size; loff_t size;
int err; int err;
BUG_ON(page->index != mpd->first_page); BUG_ON(folio->index != mpd->first_page);
clear_page_dirty_for_io(page); folio_clear_dirty_for_io(folio);
/* /*
* We have to be very careful here! Nothing protects writeback path * We have to be very careful here! Nothing protects writeback path
* against i_size changes and the page can be writeably mapped into * against i_size changes and the page can be writeably mapped into
* page tables. So an application can be growing i_size and writing * page tables. So an application can be growing i_size and writing
* data through mmap while writeback runs. clear_page_dirty_for_io() * data through mmap while writeback runs. folio_clear_dirty_for_io()
* write-protects our page in page tables and the page cannot get * write-protects our page in page tables and the page cannot get
* written to again until we release page lock. So only after * written to again until we release folio lock. So only after
* clear_page_dirty_for_io() we are safe to sample i_size for * folio_clear_dirty_for_io() we are safe to sample i_size for
* ext4_bio_write_page() to zero-out tail of the written page. We rely * ext4_bio_write_page() to zero-out tail of the written page. We rely
* on the barrier provided by TestClearPageDirty in * on the barrier provided by TestClearPageDirty in
* clear_page_dirty_for_io() to make sure i_size is really sampled only * folio_clear_dirty_for_io() to make sure i_size is really sampled only
* after page tables are updated. * after page tables are updated.
*/ */
size = i_size_read(mpd->inode); size = i_size_read(mpd->inode);
if (page->index == size >> PAGE_SHIFT && len = folio_size(folio);
if (folio_pos(folio) + len > size &&
!ext4_verity_in_progress(mpd->inode)) !ext4_verity_in_progress(mpd->inode))
len = size & ~PAGE_MASK; len = size & ~PAGE_MASK;
else err = ext4_bio_write_page(&mpd->io_submit, &folio->page, len);
len = PAGE_SIZE;
err = ext4_bio_write_page(&mpd->io_submit, page, len);
if (!err) if (!err)
mpd->wbc->nr_to_write--; mpd->wbc->nr_to_write--;
...@@ -2009,7 +2008,7 @@ static int mpage_process_page_bufs(struct mpage_da_data *mpd, ...@@ -2009,7 +2008,7 @@ static int mpage_process_page_bufs(struct mpage_da_data *mpd,
} while (lblk++, (bh = bh->b_this_page) != head); } while (lblk++, (bh = bh->b_this_page) != head);
/* So far everything mapped? Submit the page for IO. */ /* So far everything mapped? Submit the page for IO. */
if (mpd->map.m_len == 0) { if (mpd->map.m_len == 0) {
err = mpage_submit_page(mpd, head->b_page); err = mpage_submit_folio(mpd, head->b_folio);
if (err < 0) if (err < 0)
return err; return err;
mpage_page_done(mpd, head->b_page); mpage_page_done(mpd, head->b_page);
...@@ -2142,7 +2141,7 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd) ...@@ -2142,7 +2141,7 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
if (err < 0 || map_bh) if (err < 0 || map_bh)
goto out; goto out;
/* Page fully mapped - let IO run! */ /* Page fully mapped - let IO run! */
err = mpage_submit_page(mpd, &folio->page); err = mpage_submit_folio(mpd, folio);
if (err < 0) if (err < 0)
goto out; goto out;
mpage_page_done(mpd, &folio->page); mpage_page_done(mpd, &folio->page);
...@@ -2532,12 +2531,12 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd) ...@@ -2532,12 +2531,12 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
if (ext4_page_nomap_can_writeout(&folio->page)) { if (ext4_page_nomap_can_writeout(&folio->page)) {
WARN_ON_ONCE(sb->s_writers.frozen == WARN_ON_ONCE(sb->s_writers.frozen ==
SB_FREEZE_COMPLETE); SB_FREEZE_COMPLETE);
err = mpage_submit_page(mpd, &folio->page); err = mpage_submit_folio(mpd, folio);
if (err < 0) if (err < 0)
goto out; goto out;
} }
/* Pending dirtying of journalled data? */ /* Pending dirtying of journalled data? */
if (PageChecked(&folio->page)) { if (folio_test_checked(folio)) {
WARN_ON_ONCE(sb->s_writers.frozen >= WARN_ON_ONCE(sb->s_writers.frozen >=
SB_FREEZE_FS); SB_FREEZE_FS);
err = mpage_journal_page_buffers(handle, err = mpage_journal_page_buffers(handle,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment