Commit baf863b9 authored by Liu Bo's avatar Liu Bo Committed by David Sterba

Btrfs: fix eb memory leak due to readpage failure

eb->io_pages is set in read_extent_buffer_pages().

In case of readpage failure, for pages that have been added to bio,
it calls bio_endio and later readpage_io_failed_hook() does the work.

When this eb's page (couldn't be the 1st page) fails to add itself to bio
due to failure in merge_bio(), it cannot decrease eb->io_pages via bio_endio,
 and ends up with a memory leak eventually.

This lets __do_readpage propagate errors to callers and adds the
 'atomic_dec(&eb->io_pages)'.
Signed-off-by: default avatarLiu Bo <bo.li.liu@oracle.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent f4907095
...@@ -2878,6 +2878,7 @@ __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset, ...@@ -2878,6 +2878,7 @@ __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
* into the tree that are removed when the IO is done (by the end_io * into the tree that are removed when the IO is done (by the end_io
* handlers) * handlers)
* XXX JDM: This needs looking at to ensure proper page locking * XXX JDM: This needs looking at to ensure proper page locking
* return 0 on success, otherwise return error
*/ */
static int __do_readpage(struct extent_io_tree *tree, static int __do_readpage(struct extent_io_tree *tree,
struct page *page, struct page *page,
...@@ -2899,7 +2900,7 @@ static int __do_readpage(struct extent_io_tree *tree, ...@@ -2899,7 +2900,7 @@ static int __do_readpage(struct extent_io_tree *tree,
sector_t sector; sector_t sector;
struct extent_map *em; struct extent_map *em;
struct block_device *bdev; struct block_device *bdev;
int ret; int ret = 0;
int nr = 0; int nr = 0;
size_t pg_offset = 0; size_t pg_offset = 0;
size_t iosize; size_t iosize;
...@@ -3080,6 +3081,7 @@ static int __do_readpage(struct extent_io_tree *tree, ...@@ -3080,6 +3081,7 @@ static int __do_readpage(struct extent_io_tree *tree,
} else { } else {
SetPageError(page); SetPageError(page);
unlock_extent(tree, cur, cur + iosize - 1); unlock_extent(tree, cur, cur + iosize - 1);
goto out;
} }
cur = cur + iosize; cur = cur + iosize;
pg_offset += iosize; pg_offset += iosize;
...@@ -3090,7 +3092,7 @@ static int __do_readpage(struct extent_io_tree *tree, ...@@ -3090,7 +3092,7 @@ static int __do_readpage(struct extent_io_tree *tree,
SetPageUptodate(page); SetPageUptodate(page);
unlock_page(page); unlock_page(page);
} }
return 0; return ret;
} }
static inline void __do_contiguous_readpages(struct extent_io_tree *tree, static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
...@@ -5230,14 +5232,31 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, ...@@ -5230,14 +5232,31 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
atomic_set(&eb->io_pages, num_reads); atomic_set(&eb->io_pages, num_reads);
for (i = start_i; i < num_pages; i++) { for (i = start_i; i < num_pages; i++) {
page = eb->pages[i]; page = eb->pages[i];
if (!PageUptodate(page)) { if (!PageUptodate(page)) {
if (ret) {
atomic_dec(&eb->io_pages);
unlock_page(page);
continue;
}
ClearPageError(page); ClearPageError(page);
err = __extent_read_full_page(tree, page, err = __extent_read_full_page(tree, page,
get_extent, &bio, get_extent, &bio,
mirror_num, &bio_flags, mirror_num, &bio_flags,
READ | REQ_META); READ | REQ_META);
if (err) if (err) {
ret = err; ret = err;
/*
* We use &bio in above __extent_read_full_page,
* so we ensure that if it returns error, the
* current page fails to add itself to bio and
* it's been unlocked.
*
* We must dec io_pages by ourselves.
*/
atomic_dec(&eb->io_pages);
}
} else { } else {
unlock_page(page); unlock_page(page);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment