Commit 42dc7bab authored by Chris Mason's avatar Chris Mason

Btrfs: Fix compressed writes on truncated pages

The compression code was using isize to limit the amount of data it
sent through zlib.  But, it wasn't properly limiting the looping to
just the pages inside i_size.  The end result was trying to compress
too many pages, including those that had not been setup and properly locked
down.  This made the compression code oops while trying find_get_page on a
page that didn't exist.
Signed-off-by: default avatarChris Mason <chris.mason@oracle.com>
parent 17d217fe
...@@ -2887,8 +2887,8 @@ int btrfs_split_item(struct btrfs_trans_handle *trans, ...@@ -2887,8 +2887,8 @@ int btrfs_split_item(struct btrfs_trans_handle *trans,
path->keep_locks = 0; path->keep_locks = 0;
BUG_ON(ret); BUG_ON(ret);
BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
leaf = path->nodes[0]; leaf = path->nodes[0];
BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
split: split:
item = btrfs_item_nr(leaf, path->slots[0]); item = btrfs_item_nr(leaf, path->slots[0]);
......
...@@ -332,6 +332,7 @@ static noinline int compress_file_range(struct inode *inode, ...@@ -332,6 +332,7 @@ static noinline int compress_file_range(struct inode *inode,
u64 disk_num_bytes; u64 disk_num_bytes;
u64 blocksize = root->sectorsize; u64 blocksize = root->sectorsize;
u64 actual_end; u64 actual_end;
u64 isize = i_size_read(inode);
int ret = 0; int ret = 0;
struct page **pages = NULL; struct page **pages = NULL;
unsigned long nr_pages; unsigned long nr_pages;
...@@ -345,12 +346,12 @@ static noinline int compress_file_range(struct inode *inode, ...@@ -345,12 +346,12 @@ static noinline int compress_file_range(struct inode *inode,
orig_start = start; orig_start = start;
actual_end = min_t(u64, isize, end + 1);
again: again:
will_compress = 0; will_compress = 0;
nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1; nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE); nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
actual_end = min_t(u64, i_size_read(inode), end + 1);
total_compressed = actual_end - start; total_compressed = actual_end - start;
/* we want to make sure that amount of ram required to uncompress /* we want to make sure that amount of ram required to uncompress
...@@ -488,7 +489,7 @@ static noinline int compress_file_range(struct inode *inode, ...@@ -488,7 +489,7 @@ static noinline int compress_file_range(struct inode *inode,
add_async_extent(async_cow, start, num_bytes, add_async_extent(async_cow, start, num_bytes,
total_compressed, pages, nr_pages_ret); total_compressed, pages, nr_pages_ret);
if (start + num_bytes < end) { if (start + num_bytes < end && start + num_bytes < actual_end) {
start += num_bytes; start += num_bytes;
pages = NULL; pages = NULL;
cond_resched(); cond_resched();
...@@ -696,6 +697,7 @@ static noinline int cow_file_range(struct inode *inode, ...@@ -696,6 +697,7 @@ static noinline int cow_file_range(struct inode *inode,
u64 cur_alloc_size; u64 cur_alloc_size;
u64 blocksize = root->sectorsize; u64 blocksize = root->sectorsize;
u64 actual_end; u64 actual_end;
u64 isize = i_size_read(inode);
struct btrfs_key ins; struct btrfs_key ins;
struct extent_map *em; struct extent_map *em;
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
...@@ -705,7 +707,7 @@ static noinline int cow_file_range(struct inode *inode, ...@@ -705,7 +707,7 @@ static noinline int cow_file_range(struct inode *inode,
BUG_ON(!trans); BUG_ON(!trans);
btrfs_set_trans_block_group(trans, inode); btrfs_set_trans_block_group(trans, inode);
actual_end = min_t(u64, i_size_read(inode), end + 1); actual_end = min_t(u64, isize, end + 1);
num_bytes = (end - start + blocksize) & ~(blocksize - 1); num_bytes = (end - start + blocksize) & ~(blocksize - 1);
num_bytes = max(blocksize, num_bytes); num_bytes = max(blocksize, num_bytes);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment