Commit c808c1dc authored by Josef Bacik's avatar Josef Bacik Committed by David Sterba

btrfs: convert add_ra_bio_pages() to use only folios

Willy is going to get rid of page->index, and add_ra_bio_pages uses
page->index.  Make his life easier by converting add_ra_bio_pages to use
folios so that we are no longer using page->index.
Signed-off-by: default avatarJosef Bacik <josef@toxicpanda.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 9b320229
...@@ -420,7 +420,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, ...@@ -420,7 +420,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
u64 cur = cb->orig_bbio->file_offset + orig_bio->bi_iter.bi_size; u64 cur = cb->orig_bbio->file_offset + orig_bio->bi_iter.bi_size;
u64 isize = i_size_read(inode); u64 isize = i_size_read(inode);
int ret; int ret;
struct page *page; struct folio *folio;
struct extent_map *em; struct extent_map *em;
struct address_space *mapping = inode->i_mapping; struct address_space *mapping = inode->i_mapping;
struct extent_map_tree *em_tree; struct extent_map_tree *em_tree;
...@@ -453,9 +453,13 @@ static noinline int add_ra_bio_pages(struct inode *inode, ...@@ -453,9 +453,13 @@ static noinline int add_ra_bio_pages(struct inode *inode,
if (pg_index > end_index) if (pg_index > end_index)
break; break;
page = xa_load(&mapping->i_pages, pg_index); folio = __filemap_get_folio(mapping, pg_index, 0, 0);
if (page && !xa_is_value(page)) { if (!IS_ERR(folio)) {
sectors_missed += (PAGE_SIZE - offset_in_page(cur)) >> u64 folio_sz = folio_size(folio);
u64 offset = offset_in_folio(folio, cur);
folio_put(folio);
sectors_missed += (folio_sz - offset) >>
fs_info->sectorsize_bits; fs_info->sectorsize_bits;
/* Beyond threshold, no need to continue */ /* Beyond threshold, no need to continue */
...@@ -466,35 +470,35 @@ static noinline int add_ra_bio_pages(struct inode *inode, ...@@ -466,35 +470,35 @@ static noinline int add_ra_bio_pages(struct inode *inode,
* Jump to next page start as we already have page for * Jump to next page start as we already have page for
* current offset. * current offset.
*/ */
cur = (pg_index << PAGE_SHIFT) + PAGE_SIZE; cur += (folio_sz - offset);
continue; continue;
} }
page = __page_cache_alloc(mapping_gfp_constraint(mapping, folio = filemap_alloc_folio(mapping_gfp_constraint(mapping,
~__GFP_FS)); ~__GFP_FS), 0);
if (!page) if (!folio)
break; break;
if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) { if (filemap_add_folio(mapping, folio, pg_index, GFP_NOFS)) {
put_page(page);
/* There is already a page, skip to page end */ /* There is already a page, skip to page end */
cur = (pg_index << PAGE_SHIFT) + PAGE_SIZE; cur += folio_size(folio);
folio_put(folio);
continue; continue;
} }
if (!*memstall && PageWorkingset(page)) { if (!*memstall && folio_test_workingset(folio)) {
psi_memstall_enter(pflags); psi_memstall_enter(pflags);
*memstall = 1; *memstall = 1;
} }
ret = set_page_extent_mapped(page); ret = set_folio_extent_mapped(folio);
if (ret < 0) { if (ret < 0) {
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
break; break;
} }
page_end = (pg_index << PAGE_SHIFT) + PAGE_SIZE - 1; page_end = (pg_index << PAGE_SHIFT) + folio_size(folio) - 1;
lock_extent(tree, cur, page_end, NULL); lock_extent(tree, cur, page_end, NULL);
read_lock(&em_tree->lock); read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, cur, page_end + 1 - cur); em = lookup_extent_mapping(em_tree, cur, page_end + 1 - cur);
...@@ -511,28 +515,28 @@ static noinline int add_ra_bio_pages(struct inode *inode, ...@@ -511,28 +515,28 @@ static noinline int add_ra_bio_pages(struct inode *inode,
orig_bio->bi_iter.bi_sector) { orig_bio->bi_iter.bi_sector) {
free_extent_map(em); free_extent_map(em);
unlock_extent(tree, cur, page_end, NULL); unlock_extent(tree, cur, page_end, NULL);
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
break; break;
} }
add_size = min(em->start + em->len, page_end + 1) - cur; add_size = min(em->start + em->len, page_end + 1) - cur;
free_extent_map(em); free_extent_map(em);
if (page->index == end_index) { if (folio->index == end_index) {
size_t zero_offset = offset_in_page(isize); size_t zero_offset = offset_in_folio(folio, isize);
if (zero_offset) { if (zero_offset) {
int zeros; int zeros;
zeros = PAGE_SIZE - zero_offset; zeros = folio_size(folio) - zero_offset;
memzero_page(page, zero_offset, zeros); folio_zero_range(folio, zero_offset, zeros);
} }
} }
ret = bio_add_page(orig_bio, page, add_size, offset_in_page(cur)); if (!bio_add_folio(orig_bio, folio, add_size,
if (ret != add_size) { offset_in_folio(folio, cur))) {
unlock_extent(tree, cur, page_end, NULL); unlock_extent(tree, cur, page_end, NULL);
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
break; break;
} }
/* /*
...@@ -541,9 +545,9 @@ static noinline int add_ra_bio_pages(struct inode *inode, ...@@ -541,9 +545,9 @@ static noinline int add_ra_bio_pages(struct inode *inode,
* subpage::readers and to unlock the page. * subpage::readers and to unlock the page.
*/ */
if (fs_info->sectorsize < PAGE_SIZE) if (fs_info->sectorsize < PAGE_SIZE)
btrfs_subpage_start_reader(fs_info, page_folio(page), btrfs_subpage_start_reader(fs_info, folio, cur,
cur, add_size); add_size);
put_page(page); folio_put(folio);
cur += add_size; cur += add_size;
} }
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment