Commit 03fbf77a authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by David Sterba

btrfs: convert defrag_prepare_one_page() to use a folio

Use a folio throughout defrag_prepare_one_page() to remove dozens of
hidden calls to compound_head().  There is no support here for large
folios; indeed, turn the existing check for PageCompound into a check
for large folios.
Reviewed-by: default avatarJohannes Thumshirn <johannes.thumshirn@wdc.com>
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent dfba9f47
......@@ -868,13 +868,14 @@ static struct page *defrag_prepare_one_page(struct btrfs_inode *inode, pgoff_t i
u64 page_start = (u64)index << PAGE_SHIFT;
u64 page_end = page_start + PAGE_SIZE - 1;
struct extent_state *cached_state = NULL;
struct page *page;
struct folio *folio;
int ret;
again:
page = find_or_create_page(mapping, index, mask);
if (!page)
return ERR_PTR(-ENOMEM);
folio = __filemap_get_folio(mapping, index,
FGP_LOCK | FGP_ACCESSED | FGP_CREAT, mask);
if (IS_ERR(folio))
return &folio->page;
/*
* Since we can defragment files opened read-only, we can encounter
......@@ -884,16 +885,16 @@ static struct page *defrag_prepare_one_page(struct btrfs_inode *inode, pgoff_t i
* executables that explicitly enable them, so this isn't very
* restrictive.
*/
if (PageCompound(page)) {
unlock_page(page);
put_page(page);
if (folio_test_large(folio)) {
folio_unlock(folio);
folio_put(folio);
return ERR_PTR(-ETXTBSY);
}
ret = set_page_extent_mapped(page);
ret = set_folio_extent_mapped(folio);
if (ret < 0) {
unlock_page(page);
put_page(page);
folio_unlock(folio);
folio_put(folio);
return ERR_PTR(ret);
}
......@@ -908,17 +909,17 @@ static struct page *defrag_prepare_one_page(struct btrfs_inode *inode, pgoff_t i
if (!ordered)
break;
unlock_page(page);
folio_unlock(folio);
btrfs_start_ordered_extent(ordered);
btrfs_put_ordered_extent(ordered);
lock_page(page);
folio_lock(folio);
/*
* We unlocked the page above, so we need check if it was
* We unlocked the folio above, so we need check if it was
* released or not.
*/
if (page->mapping != mapping || !PagePrivate(page)) {
unlock_page(page);
put_page(page);
if (folio->mapping != mapping || !folio->private) {
folio_unlock(folio);
folio_put(folio);
goto again;
}
}
......@@ -927,21 +928,21 @@ static struct page *defrag_prepare_one_page(struct btrfs_inode *inode, pgoff_t i
* Now the page range has no ordered extent any more. Read the page to
* make it uptodate.
*/
if (!PageUptodate(page)) {
btrfs_read_folio(NULL, page_folio(page));
lock_page(page);
if (page->mapping != mapping || !PagePrivate(page)) {
unlock_page(page);
put_page(page);
if (!folio_test_uptodate(folio)) {
btrfs_read_folio(NULL, folio);
folio_lock(folio);
if (folio->mapping != mapping || !folio->private) {
folio_unlock(folio);
folio_put(folio);
goto again;
}
if (!PageUptodate(page)) {
unlock_page(page);
put_page(page);
if (!folio_test_uptodate(folio)) {
folio_unlock(folio);
folio_put(folio);
return ERR_PTR(-EIO);
}
}
return page;
return &folio->page;
}
struct defrag_target_range {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment