nilfs2: Convert nilfs_copy_back_pages() to use filemap_get_folios()

Use folios throughout.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: default avatarRyusuke Konishi <konishi.ryusuke@gmail.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarChristian Brauner (Microsoft) <brauner@kernel.org>
parent 1508062e
...@@ -294,57 +294,57 @@ int nilfs_copy_dirty_pages(struct address_space *dmap, ...@@ -294,57 +294,57 @@ int nilfs_copy_dirty_pages(struct address_space *dmap,
void nilfs_copy_back_pages(struct address_space *dmap, void nilfs_copy_back_pages(struct address_space *dmap,
struct address_space *smap) struct address_space *smap)
{ {
struct pagevec pvec; struct folio_batch fbatch;
unsigned int i, n; unsigned int i, n;
pgoff_t index = 0; pgoff_t start = 0;
pagevec_init(&pvec); folio_batch_init(&fbatch);
repeat: repeat:
n = pagevec_lookup(&pvec, smap, &index); n = filemap_get_folios(smap, &start, ~0UL, &fbatch);
if (!n) if (!n)
return; return;
for (i = 0; i < pagevec_count(&pvec); i++) { for (i = 0; i < folio_batch_count(&fbatch); i++) {
struct page *page = pvec.pages[i], *dpage; struct folio *folio = fbatch.folios[i], *dfolio;
pgoff_t offset = page->index; pgoff_t index = folio->index;
lock_page(page); folio_lock(folio);
dpage = find_lock_page(dmap, offset); dfolio = filemap_lock_folio(dmap, index);
if (dpage) { if (dfolio) {
/* overwrite existing page in the destination cache */ /* overwrite existing folio in the destination cache */
WARN_ON(PageDirty(dpage)); WARN_ON(folio_test_dirty(dfolio));
nilfs_copy_page(dpage, page, 0); nilfs_copy_page(&dfolio->page, &folio->page, 0);
unlock_page(dpage); folio_unlock(dfolio);
put_page(dpage); folio_put(dfolio);
/* Do we not need to remove page from smap here? */ /* Do we not need to remove folio from smap here? */
} else { } else {
struct page *p; struct folio *f;
/* move the page to the destination cache */ /* move the folio to the destination cache */
xa_lock_irq(&smap->i_pages); xa_lock_irq(&smap->i_pages);
p = __xa_erase(&smap->i_pages, offset); f = __xa_erase(&smap->i_pages, index);
WARN_ON(page != p); WARN_ON(folio != f);
smap->nrpages--; smap->nrpages--;
xa_unlock_irq(&smap->i_pages); xa_unlock_irq(&smap->i_pages);
xa_lock_irq(&dmap->i_pages); xa_lock_irq(&dmap->i_pages);
p = __xa_store(&dmap->i_pages, offset, page, GFP_NOFS); f = __xa_store(&dmap->i_pages, index, folio, GFP_NOFS);
if (unlikely(p)) { if (unlikely(f)) {
/* Probably -ENOMEM */ /* Probably -ENOMEM */
page->mapping = NULL; folio->mapping = NULL;
put_page(page); folio_put(folio);
} else { } else {
page->mapping = dmap; folio->mapping = dmap;
dmap->nrpages++; dmap->nrpages++;
if (PageDirty(page)) if (folio_test_dirty(folio))
__xa_set_mark(&dmap->i_pages, offset, __xa_set_mark(&dmap->i_pages, index,
PAGECACHE_TAG_DIRTY); PAGECACHE_TAG_DIRTY);
} }
xa_unlock_irq(&dmap->i_pages); xa_unlock_irq(&dmap->i_pages);
} }
unlock_page(page); folio_unlock(folio);
} }
pagevec_release(&pvec); folio_batch_release(&fbatch);
cond_resched(); cond_resched();
goto repeat; goto repeat;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment