Commit a9132667 authored by Liu Bo's avatar Liu Bo Committed by David Sterba

Btrfs: make mapping->writeback_index point to the last written page

If sequential writer is writing in the middle of the page and it just redirties
the last written page by continuing from it.

In the above case this can end up with seeking back to that firstly redirtied
page after writing all the pages at the end of file because btrfs updates
mapping->writeback_index to 1 past the current one.

For non-cow filesystems, the cost is only about extra seek, while for cow
filesystems such as btrfs, it means unnecessary fragments.

To avoid it, we just need to continue writeback from the last written page.

This also updates btrfs to behave like what write_cache_pages() does, ie, bail
 out immediately if there is an error in writepage().

<Ref: https://www.spinics.net/lists/linux-btrfs/msg52628.html>
Reported-by: default avatarHolger Hoffstätte <holger.hoffstaette@googlemail.com>
Signed-off-by: default avatarLiu Bo <bo.li.liu@oracle.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 4c63c245
...@@ -3200,14 +3200,10 @@ int extent_read_full_page(struct extent_io_tree *tree, struct page *page, ...@@ -3200,14 +3200,10 @@ int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
return ret; return ret;
} }
static noinline void update_nr_written(struct page *page, static void update_nr_written(struct page *page, struct writeback_control *wbc,
struct writeback_control *wbc, unsigned long nr_written)
unsigned long nr_written)
{ {
wbc->nr_to_write -= nr_written; wbc->nr_to_write -= nr_written;
if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
page->mapping->writeback_index = page->index + nr_written;
} }
/* /*
...@@ -3926,6 +3922,8 @@ static int extent_write_cache_pages(struct extent_io_tree *tree, ...@@ -3926,6 +3922,8 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
int nr_pages; int nr_pages;
pgoff_t index; pgoff_t index;
pgoff_t end; /* Inclusive */ pgoff_t end; /* Inclusive */
pgoff_t done_index;
int range_whole = 0;
int scanned = 0; int scanned = 0;
int tag; int tag;
...@@ -3948,6 +3946,8 @@ static int extent_write_cache_pages(struct extent_io_tree *tree, ...@@ -3948,6 +3946,8 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
} else { } else {
index = wbc->range_start >> PAGE_SHIFT; index = wbc->range_start >> PAGE_SHIFT;
end = wbc->range_end >> PAGE_SHIFT; end = wbc->range_end >> PAGE_SHIFT;
if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
range_whole = 1;
scanned = 1; scanned = 1;
} }
if (wbc->sync_mode == WB_SYNC_ALL) if (wbc->sync_mode == WB_SYNC_ALL)
...@@ -3957,6 +3957,7 @@ static int extent_write_cache_pages(struct extent_io_tree *tree, ...@@ -3957,6 +3957,7 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
retry: retry:
if (wbc->sync_mode == WB_SYNC_ALL) if (wbc->sync_mode == WB_SYNC_ALL)
tag_pages_for_writeback(mapping, index, end); tag_pages_for_writeback(mapping, index, end);
done_index = index;
while (!done && !nr_to_write_done && (index <= end) && while (!done && !nr_to_write_done && (index <= end) &&
(nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) { min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
...@@ -3966,6 +3967,7 @@ static int extent_write_cache_pages(struct extent_io_tree *tree, ...@@ -3966,6 +3967,7 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
for (i = 0; i < nr_pages; i++) { for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i]; struct page *page = pvec.pages[i];
done_index = page->index;
/* /*
* At this point we hold neither mapping->tree_lock nor * At this point we hold neither mapping->tree_lock nor
* lock on the page itself: the page may be truncated or * lock on the page itself: the page may be truncated or
...@@ -4009,6 +4011,20 @@ static int extent_write_cache_pages(struct extent_io_tree *tree, ...@@ -4009,6 +4011,20 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
} }
if (!err && ret < 0) if (!err && ret < 0)
err = ret; err = ret;
if (ret < 0) {
/*
* done_index is set past this page,
* so media errors will not choke
* background writeout for the entire
* file. This has consequences for
* range_cyclic semantics (ie. it may
* not be suitable for data integrity
* writeout).
*/
done_index = page->index + 1;
done = 1;
break;
}
/* /*
* the filesystem may choose to bump up nr_to_write. * the filesystem may choose to bump up nr_to_write.
...@@ -4029,6 +4045,10 @@ static int extent_write_cache_pages(struct extent_io_tree *tree, ...@@ -4029,6 +4045,10 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
index = 0; index = 0;
goto retry; goto retry;
} }
if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
mapping->writeback_index = done_index;
btrfs_add_delayed_iput(inode); btrfs_add_delayed_iput(inode);
return err; return err;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment