Commit e65ef21e authored by Nikolay Borisov's avatar Nikolay Borisov Committed by David Sterba

btrfs: Exploit the fact that pages passed to extent_readpages are always contiguous

Currently extent_readpages (called from btrfs_readpages) will always
call __extent_readpages which tries to create contiguous range of pages
and call __do_contiguous_readpages when such contiguous range is
created.

It turns out this is unnecessary due to the fact that generic MM code
always calls filesystem's ->readpages callback (btrfs_readpages in
this case) with already contiguous pages. Armed with this knowledge it's
possible to simplify extent_readpages by eliminating the call to
__extent_readpages and directly calling contiguous_readpages.

The only edge case that needs to be handled is when
add_to_page_cache_lru fails. This is easy as all that is needed is to
submit whatever is the number of pages successfully added to the lru.
This can happen when the page is already in the range, so it does not
need to be read again, and we can't do anything else in case of other
errors.
Signed-off-by: default avatarNikolay Borisov <nborisov@suse.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent ed1b4ed7
...@@ -3076,7 +3076,7 @@ static int __do_readpage(struct extent_io_tree *tree, ...@@ -3076,7 +3076,7 @@ static int __do_readpage(struct extent_io_tree *tree,
return ret; return ret;
} }
static inline void __do_contiguous_readpages(struct extent_io_tree *tree, static inline void contiguous_readpages(struct extent_io_tree *tree,
struct page *pages[], int nr_pages, struct page *pages[], int nr_pages,
u64 start, u64 end, u64 start, u64 end,
struct extent_map **em_cached, struct extent_map **em_cached,
...@@ -3107,46 +3107,6 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree, ...@@ -3107,46 +3107,6 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
} }
} }
static void __extent_readpages(struct extent_io_tree *tree,
struct page *pages[],
int nr_pages,
struct extent_map **em_cached,
struct bio **bio, unsigned long *bio_flags,
u64 *prev_em_start)
{
u64 start = 0;
u64 end = 0;
u64 page_start;
int index;
int first_index = 0;
for (index = 0; index < nr_pages; index++) {
page_start = page_offset(pages[index]);
if (!end) {
start = page_start;
end = start + PAGE_SIZE - 1;
first_index = index;
} else if (end + 1 == page_start) {
end += PAGE_SIZE;
} else {
__do_contiguous_readpages(tree, &pages[first_index],
index - first_index, start,
end, em_cached,
bio, bio_flags,
prev_em_start);
start = page_start;
end = start + PAGE_SIZE - 1;
first_index = index;
}
}
if (end)
__do_contiguous_readpages(tree, &pages[first_index],
index - first_index, start,
end, em_cached, bio,
bio_flags, prev_em_start);
}
static int __extent_read_full_page(struct extent_io_tree *tree, static int __extent_read_full_page(struct extent_io_tree *tree,
struct page *page, struct page *page,
get_extent_t *get_extent, get_extent_t *get_extent,
...@@ -4109,6 +4069,8 @@ int extent_readpages(struct address_space *mapping, struct list_head *pages, ...@@ -4109,6 +4069,8 @@ int extent_readpages(struct address_space *mapping, struct list_head *pages,
u64 prev_em_start = (u64)-1; u64 prev_em_start = (u64)-1;
while (!list_empty(pages)) { while (!list_empty(pages)) {
u64 contig_end = 0;
for (nr = 0; nr < ARRAY_SIZE(pagepool) && !list_empty(pages);) { for (nr = 0; nr < ARRAY_SIZE(pagepool) && !list_empty(pages);) {
struct page *page = lru_to_page(pages); struct page *page = lru_to_page(pages);
...@@ -4117,14 +4079,22 @@ int extent_readpages(struct address_space *mapping, struct list_head *pages, ...@@ -4117,14 +4079,22 @@ int extent_readpages(struct address_space *mapping, struct list_head *pages,
if (add_to_page_cache_lru(page, mapping, page->index, if (add_to_page_cache_lru(page, mapping, page->index,
readahead_gfp_mask(mapping))) { readahead_gfp_mask(mapping))) {
put_page(page); put_page(page);
continue; break;
} }
pagepool[nr++] = page; pagepool[nr++] = page;
contig_end = page_offset(page) + PAGE_SIZE - 1;
} }
__extent_readpages(tree, pagepool, nr, &em_cached, &bio, if (nr) {
&bio_flags, &prev_em_start); u64 contig_start = page_offset(pagepool[0]);
ASSERT(contig_start + nr * PAGE_SIZE - 1 == contig_end);
contiguous_readpages(tree, pagepool, nr, contig_start,
contig_end, &em_cached, &bio, &bio_flags,
&prev_em_start);
}
} }
if (em_cached) if (em_cached)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment