Commit a9ab5e84 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Trond Myklebust

nfs: page cache invalidation for dio

Make sure to properly invalidate the pagecache before performing direct I/O,
so that no stale pages are left around.  This matches what the generic
direct I/O code does.  Also take the i_mutex over the direct write submission
to avoid the lifelock vs truncate waiting for i_dio_count to decrease, and
to avoid having the pagecache easily repopulated while direct I/O is in
progrss.  Again matching the generic direct I/O code.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarTrond Myklebust <trond.myklebust@primarydata.com>
parent d0b9875d
...@@ -939,9 +939,12 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov, ...@@ -939,9 +939,12 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
struct nfs_direct_req *dreq; struct nfs_direct_req *dreq;
struct nfs_lock_context *l_ctx; struct nfs_lock_context *l_ctx;
loff_t end;
size_t count; size_t count;
count = iov_length(iov, nr_segs); count = iov_length(iov, nr_segs);
end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count); nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n", dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n",
...@@ -958,16 +961,25 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov, ...@@ -958,16 +961,25 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
if (!count) if (!count)
goto out; goto out;
mutex_lock(&inode->i_mutex);
result = nfs_sync_mapping(mapping); result = nfs_sync_mapping(mapping);
if (result) if (result)
goto out; goto out_unlock;
if (mapping->nrpages) {
result = invalidate_inode_pages2_range(mapping,
pos >> PAGE_CACHE_SHIFT, end);
if (result)
goto out_unlock;
}
task_io_account_write(count); task_io_account_write(count);
result = -ENOMEM; result = -ENOMEM;
dreq = nfs_direct_req_alloc(); dreq = nfs_direct_req_alloc();
if (!dreq) if (!dreq)
goto out; goto out_unlock;
dreq->inode = inode; dreq->inode = inode;
dreq->bytes_left = count; dreq->bytes_left = count;
...@@ -982,6 +994,14 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov, ...@@ -982,6 +994,14 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
dreq->iocb = iocb; dreq->iocb = iocb;
result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos, uio); result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos, uio);
if (mapping->nrpages) {
invalidate_inode_pages2_range(mapping,
pos >> PAGE_CACHE_SHIFT, end);
}
mutex_unlock(&inode->i_mutex);
if (!result) { if (!result) {
result = nfs_direct_wait(dreq); result = nfs_direct_wait(dreq);
if (result > 0) { if (result > 0) {
...@@ -994,8 +1014,13 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov, ...@@ -994,8 +1014,13 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
} }
} }
nfs_direct_req_release(dreq);
return result;
out_release: out_release:
nfs_direct_req_release(dreq); nfs_direct_req_release(dreq);
out_unlock:
mutex_unlock(&inode->i_mutex);
out: out:
return result; return result;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment