Commit 970ea374 authored by David Sterba's avatar David Sterba

btrfs: pass a valid extent map cache pointer to __get_extent_map()

We can pass a valid em cache pointer down to __get_extent_map() and
drop the validity check. This avoids the special case, the call stacks
are simple:

btrfs_read_folio
  btrfs_do_readpage
    __get_extent_map

extent_readahead
  contiguous_readpages
    btrfs_do_readpage
      __get_extent_map
Reviewed-by: default avatarJosef Bacik <josef@toxicpanda.com>
Reviewed-by: default avatarQu Wenruo <wqu@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 5a8a57f9
...@@ -970,7 +970,9 @@ static struct extent_map *__get_extent_map(struct inode *inode, struct page *pag ...@@ -970,7 +970,9 @@ static struct extent_map *__get_extent_map(struct inode *inode, struct page *pag
{ {
struct extent_map *em; struct extent_map *em;
if (em_cached && *em_cached) { ASSERT(em_cached);
if (*em_cached) {
em = *em_cached; em = *em_cached;
if (extent_map_in_tree(em) && start >= em->start && if (extent_map_in_tree(em) && start >= em->start &&
start < extent_map_end(em)) { start < extent_map_end(em)) {
...@@ -983,7 +985,7 @@ static struct extent_map *__get_extent_map(struct inode *inode, struct page *pag ...@@ -983,7 +985,7 @@ static struct extent_map *__get_extent_map(struct inode *inode, struct page *pag
} }
em = btrfs_get_extent(BTRFS_I(inode), page, start, len); em = btrfs_get_extent(BTRFS_I(inode), page, start, len);
if (em_cached && !IS_ERR(em)) { if (!IS_ERR(em)) {
BUG_ON(*em_cached); BUG_ON(*em_cached);
refcount_inc(&em->refs); refcount_inc(&em->refs);
*em_cached = em; *em_cached = em;
...@@ -1154,11 +1156,14 @@ int btrfs_read_folio(struct file *file, struct folio *folio) ...@@ -1154,11 +1156,14 @@ int btrfs_read_folio(struct file *file, struct folio *folio)
u64 start = page_offset(page); u64 start = page_offset(page);
u64 end = start + PAGE_SIZE - 1; u64 end = start + PAGE_SIZE - 1;
struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ }; struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ };
struct extent_map *em_cached = NULL;
int ret; int ret;
btrfs_lock_and_flush_ordered_range(inode, start, end, NULL); btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
ret = btrfs_do_readpage(page, NULL, &bio_ctrl, NULL); ret = btrfs_do_readpage(page, &em_cached, &bio_ctrl, NULL);
free_extent_map(em_cached);
/* /*
* If btrfs_do_readpage() failed we will want to submit the assembled * If btrfs_do_readpage() failed we will want to submit the assembled
* bio to do the cleanup. * bio to do the cleanup.
...@@ -1176,6 +1181,8 @@ static inline void contiguous_readpages(struct page *pages[], int nr_pages, ...@@ -1176,6 +1181,8 @@ static inline void contiguous_readpages(struct page *pages[], int nr_pages,
struct btrfs_inode *inode = page_to_inode(pages[0]); struct btrfs_inode *inode = page_to_inode(pages[0]);
int index; int index;
ASSERT(em_cached);
btrfs_lock_and_flush_ordered_range(inode, start, end, NULL); btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
for (index = 0; index < nr_pages; index++) { for (index = 0; index < nr_pages; index++) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment