Commit 810caa3e authored by David Howells's avatar David Howells
parent 630f5dda
...@@ -490,47 +490,25 @@ static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, ...@@ -490,47 +490,25 @@ static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter,
} }
/* /*
* Synchronously write back the locked page and any subsequent non-locked dirty * Extend the region to be written back to include subsequent contiguously
* pages. * dirty pages if possible, but don't sleep while doing so.
*
* If this page holds new content, then we can include filler zeros in the
* writeback.
*/ */
static int afs_write_back_from_locked_page(struct address_space *mapping, static void afs_extend_writeback(struct address_space *mapping,
struct writeback_control *wbc, struct afs_vnode *vnode,
struct page *primary_page, long *_count,
pgoff_t final_page) pgoff_t start,
pgoff_t final_page,
unsigned *_offset,
unsigned *_to,
bool new_content)
{ {
struct afs_vnode *vnode = AFS_FS_I(mapping->host);
struct iov_iter iter;
struct page *pages[8], *page; struct page *pages[8], *page;
unsigned long count, priv; unsigned long count = *_count, priv;
unsigned n, offset, to, f, t; unsigned offset = *_offset, to = *_to, n, f, t;
pgoff_t start, first, last; int loop;
loff_t i_size, pos, end;
int loop, ret;
_enter(",%lx", primary_page->index);
count = 1;
if (test_set_page_writeback(primary_page))
BUG();
/* Find all consecutive lockable dirty pages that have contiguous
* written regions, stopping when we find a page that is not
* immediately lockable, is not dirty or is missing, or we reach the
* end of the range.
*/
start = primary_page->index;
priv = page_private(primary_page);
offset = afs_page_dirty_from(primary_page, priv);
to = afs_page_dirty_to(primary_page, priv);
trace_afs_page_dirty(vnode, tracepoint_string("store"), primary_page);
WARN_ON(offset == to);
if (offset == to)
trace_afs_page_dirty(vnode, tracepoint_string("WARN"), primary_page);
if (start >= final_page ||
(to < PAGE_SIZE && !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)))
goto no_more;
start++; start++;
do { do {
...@@ -551,8 +529,7 @@ static int afs_write_back_from_locked_page(struct address_space *mapping, ...@@ -551,8 +529,7 @@ static int afs_write_back_from_locked_page(struct address_space *mapping,
for (loop = 0; loop < n; loop++) { for (loop = 0; loop < n; loop++) {
page = pages[loop]; page = pages[loop];
if (to != PAGE_SIZE && if (to != PAGE_SIZE && !new_content)
!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags))
break; break;
if (page->index > final_page) if (page->index > final_page)
break; break;
...@@ -566,8 +543,7 @@ static int afs_write_back_from_locked_page(struct address_space *mapping, ...@@ -566,8 +543,7 @@ static int afs_write_back_from_locked_page(struct address_space *mapping,
priv = page_private(page); priv = page_private(page);
f = afs_page_dirty_from(page, priv); f = afs_page_dirty_from(page, priv);
t = afs_page_dirty_to(page, priv); t = afs_page_dirty_to(page, priv);
if (f != 0 && if (f != 0 && !new_content) {
!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)) {
unlock_page(page); unlock_page(page);
break; break;
} }
...@@ -593,6 +569,55 @@ static int afs_write_back_from_locked_page(struct address_space *mapping, ...@@ -593,6 +569,55 @@ static int afs_write_back_from_locked_page(struct address_space *mapping,
} while (start <= final_page && count < 65536); } while (start <= final_page && count < 65536);
no_more: no_more:
*_count = count;
*_offset = offset;
*_to = to;
}
/*
* Synchronously write back the locked page and any subsequent non-locked dirty
* pages.
*/
static int afs_write_back_from_locked_page(struct address_space *mapping,
struct writeback_control *wbc,
struct page *primary_page,
pgoff_t final_page)
{
struct afs_vnode *vnode = AFS_FS_I(mapping->host);
struct iov_iter iter;
unsigned long count, priv;
unsigned offset, to;
pgoff_t start, first, last;
loff_t i_size, pos, end;
bool new_content = test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
int ret;
_enter(",%lx", primary_page->index);
count = 1;
if (test_set_page_writeback(primary_page))
BUG();
/* Find all consecutive lockable dirty pages that have contiguous
* written regions, stopping when we find a page that is not
* immediately lockable, is not dirty or is missing, or we reach the
* end of the range.
*/
start = primary_page->index;
priv = page_private(primary_page);
offset = afs_page_dirty_from(primary_page, priv);
to = afs_page_dirty_to(primary_page, priv);
trace_afs_page_dirty(vnode, tracepoint_string("store"), primary_page);
WARN_ON(offset == to);
if (offset == to)
trace_afs_page_dirty(vnode, tracepoint_string("WARN"), primary_page);
if (start < final_page &&
(to == PAGE_SIZE || new_content))
afs_extend_writeback(mapping, vnode, &count, start, final_page,
&offset, &to, new_content);
/* We now have a contiguous set of dirty pages, each with writeback /* We now have a contiguous set of dirty pages, each with writeback
* set; the first page is still locked at this point, but all the rest * set; the first page is still locked at this point, but all the rest
* have been unlocked. * have been unlocked.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment