Commit e94e54e8 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by David Sterba

btrfs: streamline compress_file_range

Reorder compress_file_range so that the main compression flow happens
straight line and not in branches.  To do this ensure that pages is
always zeroed before a page allocation happens, which allows the
cleanup_and_bail_uncompressed label to clean up the page allocations
as needed.
Reviewed-by: default avatarJosef Bacik <josef@toxicpanda.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 00d31d17
...@@ -839,10 +839,11 @@ static void compress_file_range(struct btrfs_work *work) ...@@ -839,10 +839,11 @@ static void compress_file_range(struct btrfs_work *work)
u64 actual_end; u64 actual_end;
u64 i_size; u64 i_size;
int ret = 0; int ret = 0;
struct page **pages = NULL; struct page **pages;
unsigned long nr_pages; unsigned long nr_pages;
unsigned long total_compressed = 0; unsigned long total_compressed = 0;
unsigned long total_in = 0; unsigned long total_in = 0;
unsigned int poff;
int i; int i;
int will_compress; int will_compress;
int compress_type = fs_info->compress_type; int compress_type = fs_info->compress_type;
...@@ -865,6 +866,7 @@ static void compress_file_range(struct btrfs_work *work) ...@@ -865,6 +866,7 @@ static void compress_file_range(struct btrfs_work *work)
actual_end = min_t(u64, i_size, end + 1); actual_end = min_t(u64, i_size, end + 1);
again: again:
will_compress = 0; will_compress = 0;
pages = NULL;
nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1; nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
nr_pages = min_t(unsigned long, nr_pages, BTRFS_MAX_COMPRESSED_PAGES); nr_pages = min_t(unsigned long, nr_pages, BTRFS_MAX_COMPRESSED_PAGES);
...@@ -908,15 +910,19 @@ static void compress_file_range(struct btrfs_work *work) ...@@ -908,15 +910,19 @@ static void compress_file_range(struct btrfs_work *work)
ret = 0; ret = 0;
/* /*
* we do compression for mount -o compress and when the * We do compression for mount -o compress and when the inode has not
* inode has not been flagged as nocompress. This flag can * been flagged as NOCOMPRESS. This flag can change at any time if we
* change at any time if we discover bad compression ratios. * discover bad compression ratios.
*/ */
if (inode_need_compress(inode, start, end)) { if (!inode_need_compress(inode, start, end))
WARN_ON(pages); goto cont;
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
if (!pages) { if (!pages) {
/* just bail out to the uncompressed code */ /*
* Memory allocation failure is not a fatal error, we can fall
* back to uncompressed code.
*/
nr_pages = 0; nr_pages = 0;
goto cont; goto cont;
} }
...@@ -927,47 +933,41 @@ static void compress_file_range(struct btrfs_work *work) ...@@ -927,47 +933,41 @@ static void compress_file_range(struct btrfs_work *work)
compress_type = inode->prop_compress; compress_type = inode->prop_compress;
/* /*
* we need to call clear_page_dirty_for_io on each * We need to call clear_page_dirty_for_io on each page in the range.
* page in the range. Otherwise applications with the file * Otherwise applications with the file mmap'd can wander in and change
* mmap'd can wander in and change the page contents while * the page contents while we are compressing them.
* we are compressing them.
* *
* If the compression fails for any reason, we set the pages * If the compression fails for any reason, we set the pages dirty again
* dirty again later on. * later on.
* *
* Note that the remaining part is redirtied, the start pointer * Note that the remaining part is redirtied, the start pointer has
* has moved, the end is the original one. * moved, the end is the original one.
*/ */
if (!redirty) { if (!redirty) {
extent_range_clear_dirty_for_io(&inode->vfs_inode, start, end); extent_range_clear_dirty_for_io(&inode->vfs_inode, start, end);
redirty = 1; redirty = 1;
} }
/* Compression level is applied here and only here */ /* Compression level is applied here. */
ret = btrfs_compress_pages( ret = btrfs_compress_pages(compress_type | (fs_info->compress_level << 4),
compress_type | (fs_info->compress_level << 4), mapping, start, pages, &nr_pages, &total_in,
mapping, start,
pages,
&nr_pages,
&total_in,
&total_compressed); &total_compressed);
if (ret)
goto cont;
if (!ret) { /*
unsigned long offset = offset_in_page(total_compressed); * Zero the tail end of the last page, as we might be sending it down
struct page *page = pages[nr_pages - 1]; * to disk.
/* zero the tail end of the last page, we might be
* sending it down to disk
*/ */
if (offset) poff = offset_in_page(total_compressed);
memzero_page(page, offset, PAGE_SIZE - offset); if (poff)
memzero_page(pages[nr_pages - 1], poff, PAGE_SIZE - poff);
will_compress = 1; will_compress = 1;
}
}
cont: cont:
/* /*
* Check cow_file_range() for why we don't even try to create inline * Check cow_file_range() for why we don't even try to create inline
* extent for subpage case. * extent for the subpage case.
*/ */
if (start == 0 && fs_info->sectorsize == PAGE_SIZE) { if (start == 0 && fs_info->sectorsize == PAGE_SIZE) {
/* lets try to make an inline extent */ /* lets try to make an inline extent */
...@@ -1026,39 +1026,38 @@ static void compress_file_range(struct btrfs_work *work) ...@@ -1026,39 +1026,38 @@ static void compress_file_range(struct btrfs_work *work)
} }
} }
if (will_compress) { if (!will_compress)
goto cleanup_and_bail_uncompressed;
/* /*
* we aren't doing an inline extent round the compressed size * We aren't doing an inline extent. Round the compressed size up to a
* up to a block size boundary so the allocator does sane * block size boundary so the allocator does sane things.
* things
*/ */
total_compressed = ALIGN(total_compressed, blocksize); total_compressed = ALIGN(total_compressed, blocksize);
/* /*
* one last check to make sure the compression is really a * One last check to make sure the compression is really a win, compare
* win, compare the page count read with the blocks on disk, * the page count read with the blocks on disk, compression must free at
* compression must free at least one sector size * least one sector.
*/ */
total_in = round_up(total_in, fs_info->sectorsize); total_in = round_up(total_in, fs_info->sectorsize);
if (total_compressed + blocksize <= total_in) { if (total_compressed + blocksize > total_in)
goto cleanup_and_bail_uncompressed;
/* /*
* The async work queues will take care of doing actual * The async work queues will take care of doing actual allocation on
* allocation on disk for these compressed pages, and * disk for these compressed pages, and will submit the bios.
* will submit them to the elevator.
*/ */
add_async_extent(async_chunk, start, total_in, add_async_extent(async_chunk, start, total_in, total_compressed, pages,
total_compressed, pages, nr_pages, nr_pages, compress_type);
compress_type);
if (start + total_in < end) { if (start + total_in < end) {
start += total_in; start += total_in;
pages = NULL;
cond_resched(); cond_resched();
goto again; goto again;
} }
return; return;
}
} cleanup_and_bail_uncompressed:
if (pages) { if (pages) {
/* /*
* the compression code ran but failed to make things smaller, * the compression code ran but failed to make things smaller,
...@@ -1079,7 +1078,7 @@ static void compress_file_range(struct btrfs_work *work) ...@@ -1079,7 +1078,7 @@ static void compress_file_range(struct btrfs_work *work)
inode->flags |= BTRFS_INODE_NOCOMPRESS; inode->flags |= BTRFS_INODE_NOCOMPRESS;
} }
} }
cleanup_and_bail_uncompressed:
/* /*
* No compression, but we still need to write the pages in the file * No compression, but we still need to write the pages in the file
* we've been given so far. redirty the locked page if it corresponds * we've been given so far. redirty the locked page if it corresponds
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment