Commit 075a924d authored by Dave Chinner's avatar Dave Chinner Committed by Dave Chinner

xfs: use i_mmaplock on write faults

Take the i_mmaplock over write page faults. These come through the
->page_mkwrite callout, so we need to wrap that calls with the
i_mmaplock.

This gives us a lock order of mmap_sem -> i_mmaplock -> page_lock
-> i_lock.

Also, move the page_mkwrite wrapper to the same region of xfs_file.c
as the read fault wrappers and add a tracepoint.
Signed-off-by: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatarBrian Foster <bfoster@redhat.com>
Signed-off-by: default avatarDave Chinner <david@fromorbit.com>
parent de0e8c20
...@@ -990,20 +990,6 @@ xfs_file_mmap( ...@@ -990,20 +990,6 @@ xfs_file_mmap(
return 0; return 0;
} }
/*
* mmap()d file has taken write protection fault and is being made
* writable. We can set the page state up correctly for a writable
* page, which means we can do correct delalloc accounting (ENOSPC
* checking!) and unwritten extent mapping.
*/
STATIC int
xfs_vm_page_mkwrite(
struct vm_area_struct *vma,
struct vm_fault *vmf)
{
return block_page_mkwrite(vma, vmf, xfs_get_blocks);
}
/* /*
* This type is designed to indicate the type of offset we would like * This type is designed to indicate the type of offset we would like
* to search from page cache for xfs_seek_hole_data(). * to search from page cache for xfs_seek_hole_data().
...@@ -1405,6 +1391,29 @@ xfs_filemap_fault( ...@@ -1405,6 +1391,29 @@ xfs_filemap_fault(
return error; return error;
} }
/*
* mmap()d file has taken write protection fault and is being made writable. We
* can set the page state up correctly for a writable page, which means we can
* do correct delalloc accounting (ENOSPC checking!) and unwritten extent
* mapping.
*/
STATIC int
xfs_filemap_page_mkwrite(
struct vm_area_struct *vma,
struct vm_fault *vmf)
{
struct xfs_inode *ip = XFS_I(vma->vm_file->f_mapping->host);
int error;
trace_xfs_filemap_page_mkwrite(ip);
xfs_ilock(ip, XFS_MMAPLOCK_SHARED);
error = block_page_mkwrite(vma, vmf, xfs_get_blocks);
xfs_iunlock(ip, XFS_MMAPLOCK_SHARED);
return error;
}
const struct file_operations xfs_file_operations = { const struct file_operations xfs_file_operations = {
.llseek = xfs_file_llseek, .llseek = xfs_file_llseek,
.read = new_sync_read, .read = new_sync_read,
...@@ -1439,5 +1448,5 @@ const struct file_operations xfs_dir_file_operations = { ...@@ -1439,5 +1448,5 @@ const struct file_operations xfs_dir_file_operations = {
static const struct vm_operations_struct xfs_file_vm_ops = { static const struct vm_operations_struct xfs_file_vm_ops = {
.fault = xfs_filemap_fault, .fault = xfs_filemap_fault,
.map_pages = filemap_map_pages, .map_pages = filemap_map_pages,
.page_mkwrite = xfs_vm_page_mkwrite, .page_mkwrite = xfs_filemap_page_mkwrite,
}; };
...@@ -686,6 +686,7 @@ DEFINE_INODE_EVENT(xfs_inode_clear_eofblocks_tag); ...@@ -686,6 +686,7 @@ DEFINE_INODE_EVENT(xfs_inode_clear_eofblocks_tag);
DEFINE_INODE_EVENT(xfs_inode_free_eofblocks_invalid); DEFINE_INODE_EVENT(xfs_inode_free_eofblocks_invalid);
DEFINE_INODE_EVENT(xfs_filemap_fault); DEFINE_INODE_EVENT(xfs_filemap_fault);
DEFINE_INODE_EVENT(xfs_filemap_page_mkwrite);
DECLARE_EVENT_CLASS(xfs_iref_class, DECLARE_EVENT_CLASS(xfs_iref_class,
TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip), TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment