Commit 61e930a9 authored by Trond Myklebust's avatar Trond Myklebust

NFS: Fix a writeback race...

This patch fixes a regression that was introduced by commit
44dd151d

We cannot zero the user page in nfs_mark_uptodate() any more, since

  a) We'd be modifying the page without holding the page lock
  b) We can race with other updates of the page, most notably
     because of the call to nfs_wb_page() in nfs_writepage_setup().

Instead, we do the zeroing in nfs_update_request() if we see that we're
creating a request that might potentially be marked as up to date.

Thanks to Olivier Paquet for reporting the bug and providing a test-case.
Signed-off-by: default avatarTrond Myklebust <Trond.Myklebust@netapp.com>
parent 4fa4d23f
...@@ -174,8 +174,6 @@ static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int ...@@ -174,8 +174,6 @@ static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int
return; return;
if (count != nfs_page_length(page)) if (count != nfs_page_length(page))
return; return;
if (count != PAGE_CACHE_SIZE)
zero_user_page(page, count, PAGE_CACHE_SIZE - count, KM_USER0);
SetPageUptodate(page); SetPageUptodate(page);
} }
...@@ -627,7 +625,8 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx, ...@@ -627,7 +625,8 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
return ERR_PTR(error); return ERR_PTR(error);
} }
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
return new; req = new;
goto zero_page;
} }
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
...@@ -655,13 +654,23 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx, ...@@ -655,13 +654,23 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
if (offset < req->wb_offset) { if (offset < req->wb_offset) {
req->wb_offset = offset; req->wb_offset = offset;
req->wb_pgbase = offset; req->wb_pgbase = offset;
req->wb_bytes = rqend - req->wb_offset; req->wb_bytes = max(end, rqend) - req->wb_offset;
goto zero_page;
} }
if (end > rqend) if (end > rqend)
req->wb_bytes = end - req->wb_offset; req->wb_bytes = end - req->wb_offset;
return req; return req;
zero_page:
/* If this page might potentially be marked as up to date,
* then we need to zero any uninitalised data. */
if (req->wb_pgbase == 0 && req->wb_bytes != PAGE_CACHE_SIZE
&& !PageUptodate(req->wb_page))
zero_user_page(req->wb_page, req->wb_bytes,
PAGE_CACHE_SIZE - req->wb_bytes,
KM_USER0);
return req;
} }
int nfs_flush_incompatible(struct file *file, struct page *page) int nfs_flush_incompatible(struct file *file, struct page *page)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment