Commit 0fff435f authored by Vishal Moola (Oracle)'s avatar Vishal Moola (Oracle) Committed by Andrew Morton

page-writeback: convert write_cache_pages() to use filemap_get_folios_tag()

Convert function to use folios throughout.  This is in preparation for the
removal of find_get_pages_range_tag().  This change removes 8 calls to
compound_head(), and the function now supports large folios.

Link: https://lkml.kernel.org/r/20230104211448.4804-5-vishal.moola@gmail.comSigned-off-by: default avatarVishal Moola (Oracle) <vishal.moola@gmail.com>
Reviewed-by: default avatarMatthew Wilcow (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 6817ef51
......@@ -2398,15 +2398,15 @@ int write_cache_pages(struct address_space *mapping,
int ret = 0;
int done = 0;
int error;
struct pagevec pvec;
int nr_pages;
struct folio_batch fbatch;
int nr_folios;
pgoff_t index;
pgoff_t end; /* Inclusive */
pgoff_t done_index;
int range_whole = 0;
xa_mark_t tag;
pagevec_init(&pvec);
folio_batch_init(&fbatch);
if (wbc->range_cyclic) {
index = mapping->writeback_index; /* prev offset */
end = -1;
......@@ -2426,17 +2426,18 @@ int write_cache_pages(struct address_space *mapping,
while (!done && (index <= end)) {
int i;
nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
tag);
if (nr_pages == 0)
nr_folios = filemap_get_folios_tag(mapping, &index, end,
tag, &fbatch);
if (nr_folios == 0)
break;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
for (i = 0; i < nr_folios; i++) {
struct folio *folio = fbatch.folios[i];
done_index = page->index;
done_index = folio->index;
lock_page(page);
folio_lock(folio);
/*
* Page truncated or invalidated. We can freely skip it
......@@ -2446,30 +2447,30 @@ int write_cache_pages(struct address_space *mapping,
* even if there is now a new, dirty page at the same
* pagecache address.
*/
if (unlikely(page->mapping != mapping)) {
if (unlikely(folio->mapping != mapping)) {
continue_unlock:
unlock_page(page);
folio_unlock(folio);
continue;
}
if (!PageDirty(page)) {
if (!folio_test_dirty(folio)) {
/* someone wrote it for us */
goto continue_unlock;
}
if (PageWriteback(page)) {
if (folio_test_writeback(folio)) {
if (wbc->sync_mode != WB_SYNC_NONE)
wait_on_page_writeback(page);
folio_wait_writeback(folio);
else
goto continue_unlock;
}
BUG_ON(PageWriteback(page));
if (!clear_page_dirty_for_io(page))
BUG_ON(folio_test_writeback(folio));
if (!folio_clear_dirty_for_io(folio))
goto continue_unlock;
trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
error = (*writepage)(page, wbc, data);
error = writepage(&folio->page, wbc, data);
if (unlikely(error)) {
/*
* Handle errors according to the type of
......@@ -2484,11 +2485,12 @@ int write_cache_pages(struct address_space *mapping,
* the first error.
*/
if (error == AOP_WRITEPAGE_ACTIVATE) {
unlock_page(page);
folio_unlock(folio);
error = 0;
} else if (wbc->sync_mode != WB_SYNC_ALL) {
ret = error;
done_index = page->index + 1;
done_index = folio->index +
folio_nr_pages(folio);
done = 1;
break;
}
......@@ -2508,7 +2510,7 @@ int write_cache_pages(struct address_space *mapping,
break;
}
}
pagevec_release(&pvec);
folio_batch_release(&fbatch);
cond_resched();
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment