Commit 420829d8 authored by Nikolay Borisov's avatar Nikolay Borisov Committed by David Sterba

btrfs: Refactor shrink_delalloc

Add a couple of comments regarding the logic flow in shrink_delalloc.
Then, cease using max_reclaim as a temporary variable when calculating
nr_pages. Finally give max_reclaim a more becoming name, which
uneqivocally shows at what this variable really holds. No functional
changes.
Signed-off-by: default avatarNikolay Borisov <nborisov@suse.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 4546d178
...@@ -4741,7 +4741,7 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info, u64 to_reclaim, ...@@ -4741,7 +4741,7 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info, u64 to_reclaim,
struct btrfs_space_info *space_info; struct btrfs_space_info *space_info;
struct btrfs_trans_handle *trans; struct btrfs_trans_handle *trans;
u64 delalloc_bytes; u64 delalloc_bytes;
u64 max_reclaim; u64 async_pages;
u64 items; u64 items;
long time_left; long time_left;
unsigned long nr_pages; unsigned long nr_pages;
...@@ -4766,25 +4766,36 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info, u64 to_reclaim, ...@@ -4766,25 +4766,36 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info, u64 to_reclaim,
loops = 0; loops = 0;
while (delalloc_bytes && loops < 3) { while (delalloc_bytes && loops < 3) {
max_reclaim = min(delalloc_bytes, to_reclaim); nr_pages = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
nr_pages = max_reclaim >> PAGE_SHIFT;
/*
* Triggers inode writeback for up to nr_pages. This will invoke
* ->writepages callback and trigger delalloc filling
* (btrfs_run_delalloc_range()).
*/
btrfs_writeback_inodes_sb_nr(fs_info, nr_pages, items); btrfs_writeback_inodes_sb_nr(fs_info, nr_pages, items);
/* /*
* We need to wait for the async pages to actually start before * We need to wait for the compressed pages to start before
* we do anything. * we continue.
*/ */
max_reclaim = atomic_read(&fs_info->async_delalloc_pages); async_pages = atomic_read(&fs_info->async_delalloc_pages);
if (!max_reclaim) if (!async_pages)
goto skip_async; goto skip_async;
if (max_reclaim <= nr_pages) /*
max_reclaim = 0; * Calculate how many compressed pages we want to be written
* before we continue. I.e if there are more async pages than we
* require wait_event will wait until nr_pages are written.
*/
if (async_pages <= nr_pages)
async_pages = 0;
else else
max_reclaim -= nr_pages; async_pages -= nr_pages;
wait_event(fs_info->async_submit_wait, wait_event(fs_info->async_submit_wait,
atomic_read(&fs_info->async_delalloc_pages) <= atomic_read(&fs_info->async_delalloc_pages) <=
(int)max_reclaim); (int)async_pages);
skip_async: skip_async:
spin_lock(&space_info->lock); spin_lock(&space_info->lock);
if (list_empty(&space_info->tickets) && if (list_empty(&space_info->tickets) &&
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment