Commit 4adaa611 authored by Chris Mason's avatar Chris Mason

Btrfs: fix race between mmap writes and compression

Btrfs uses page_mkwrite to ensure stable pages during
crc calculations and mmap workloads.  We call clear_page_dirty_for_io
before we do any crcs, and this forces any application with the file
mapped to wait for the crc to finish before it is allowed to change
the file.

With compression on, the clear_page_dirty_for_io step is happening after
we've compressed the pages.  This means the applications might be
changing the pages while we are compressing them, and some of those
modifications might not hit the disk.

This commit adds the clear_page_dirty_for_io before compression starts
and makes sure to redirty the page if we have to fallback to
uncompressed IO as well.
Signed-off-by: default avatarChris Mason <chris.mason@fusionio.com>
Reported-by: default avatarAlexandre Oliva <oliva@gnu.org>
cc: stable@vger.kernel.org
parent 1dd05682
...@@ -1257,6 +1257,39 @@ int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end) ...@@ -1257,6 +1257,39 @@ int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
GFP_NOFS); GFP_NOFS);
} }
int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
{
unsigned long index = start >> PAGE_CACHE_SHIFT;
unsigned long end_index = end >> PAGE_CACHE_SHIFT;
struct page *page;
while (index <= end_index) {
page = find_get_page(inode->i_mapping, index);
BUG_ON(!page); /* Pages should be in the extent_io_tree */
clear_page_dirty_for_io(page);
page_cache_release(page);
index++;
}
return 0;
}
int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
{
unsigned long index = start >> PAGE_CACHE_SHIFT;
unsigned long end_index = end >> PAGE_CACHE_SHIFT;
struct page *page;
while (index <= end_index) {
page = find_get_page(inode->i_mapping, index);
BUG_ON(!page); /* Pages should be in the extent_io_tree */
account_page_redirty(page);
__set_page_dirty_nobuffers(page);
page_cache_release(page);
index++;
}
return 0;
}
/* /*
* helper function to set both pages and extents in the tree writeback * helper function to set both pages and extents in the tree writeback
*/ */
......
...@@ -325,6 +325,8 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset, ...@@ -325,6 +325,8 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset,
unsigned long *map_len); unsigned long *map_len);
int extent_range_uptodate(struct extent_io_tree *tree, int extent_range_uptodate(struct extent_io_tree *tree,
u64 start, u64 end); u64 start, u64 end);
int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
int extent_clear_unlock_delalloc(struct inode *inode, int extent_clear_unlock_delalloc(struct inode *inode,
struct extent_io_tree *tree, struct extent_io_tree *tree,
u64 start, u64 end, struct page *locked_page, u64 start, u64 end, struct page *locked_page,
......
...@@ -353,6 +353,7 @@ static noinline int compress_file_range(struct inode *inode, ...@@ -353,6 +353,7 @@ static noinline int compress_file_range(struct inode *inode,
int i; int i;
int will_compress; int will_compress;
int compress_type = root->fs_info->compress_type; int compress_type = root->fs_info->compress_type;
int redirty = 0;
/* if this is a small write inside eof, kick off a defrag */ /* if this is a small write inside eof, kick off a defrag */
if ((end - start + 1) < 16 * 1024 && if ((end - start + 1) < 16 * 1024 &&
...@@ -415,6 +416,17 @@ static noinline int compress_file_range(struct inode *inode, ...@@ -415,6 +416,17 @@ static noinline int compress_file_range(struct inode *inode,
if (BTRFS_I(inode)->force_compress) if (BTRFS_I(inode)->force_compress)
compress_type = BTRFS_I(inode)->force_compress; compress_type = BTRFS_I(inode)->force_compress;
/*
* we need to call clear_page_dirty_for_io on each
* page in the range. Otherwise applications with the file
* mmap'd can wander in and change the page contents while
* we are compressing them.
*
* If the compression fails for any reason, we set the pages
* dirty again later on.
*/
extent_range_clear_dirty_for_io(inode, start, end);
redirty = 1;
ret = btrfs_compress_pages(compress_type, ret = btrfs_compress_pages(compress_type,
inode->i_mapping, start, inode->i_mapping, start,
total_compressed, pages, total_compressed, pages,
...@@ -554,6 +566,8 @@ static noinline int compress_file_range(struct inode *inode, ...@@ -554,6 +566,8 @@ static noinline int compress_file_range(struct inode *inode,
__set_page_dirty_nobuffers(locked_page); __set_page_dirty_nobuffers(locked_page);
/* unlocked later on in the async handlers */ /* unlocked later on in the async handlers */
} }
if (redirty)
extent_range_redirty_for_io(inode, start, end);
add_async_extent(async_cow, start, end - start + 1, add_async_extent(async_cow, start, end - start + 1,
0, NULL, 0, BTRFS_COMPRESS_NONE); 0, NULL, 0, BTRFS_COMPRESS_NONE);
*num_added += 1; *num_added += 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment