Commit 9608703e authored by Jan Kara's avatar Jan Kara

mm: Fix comments mentioning i_mutex

inode->i_mutex has been replaced with inode->i_rwsem long ago. Fix
comments still mentioning i_mutex.
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDarrick J. Wong <djwong@kernel.org>
Acked-by: default avatarHugh Dickins <hughd@google.com>
Signed-off-by: default avatarJan Kara <jack@suse.cz>
parent e73f0f0e
...@@ -76,7 +76,7 @@ ...@@ -76,7 +76,7 @@
* ->swap_lock (exclusive_swap_page, others) * ->swap_lock (exclusive_swap_page, others)
* ->i_pages lock * ->i_pages lock
* *
* ->i_mutex * ->i_rwsem
* ->i_mmap_rwsem (truncate->unmap_mapping_range) * ->i_mmap_rwsem (truncate->unmap_mapping_range)
* *
* ->mmap_lock * ->mmap_lock
...@@ -87,7 +87,7 @@ ...@@ -87,7 +87,7 @@
* ->mmap_lock * ->mmap_lock
* ->lock_page (access_process_vm) * ->lock_page (access_process_vm)
* *
* ->i_mutex (generic_perform_write) * ->i_rwsem (generic_perform_write)
* ->mmap_lock (fault_in_pages_readable->do_page_fault) * ->mmap_lock (fault_in_pages_readable->do_page_fault)
* *
* bdi->wb.list_lock * bdi->wb.list_lock
...@@ -3704,12 +3704,12 @@ EXPORT_SYMBOL(generic_perform_write); ...@@ -3704,12 +3704,12 @@ EXPORT_SYMBOL(generic_perform_write);
* modification times and calls proper subroutines depending on whether we * modification times and calls proper subroutines depending on whether we
* do direct IO or a standard buffered write. * do direct IO or a standard buffered write.
* *
* It expects i_mutex to be grabbed unless we work on a block device or similar * It expects i_rwsem to be grabbed unless we work on a block device or similar
* object which does not need locking at all. * object which does not need locking at all.
* *
* This function does *not* take care of syncing data in case of O_SYNC write. * This function does *not* take care of syncing data in case of O_SYNC write.
* A caller has to handle it. This is mainly due to the fact that we want to * A caller has to handle it. This is mainly due to the fact that we want to
* avoid syncing under i_mutex. * avoid syncing under i_rwsem.
* *
* Return: * Return:
* * number of bytes written, even for truncated writes * * number of bytes written, even for truncated writes
...@@ -3797,7 +3797,7 @@ EXPORT_SYMBOL(__generic_file_write_iter); ...@@ -3797,7 +3797,7 @@ EXPORT_SYMBOL(__generic_file_write_iter);
* *
* This is a wrapper around __generic_file_write_iter() to be used by most * This is a wrapper around __generic_file_write_iter() to be used by most
* filesystems. It takes care of syncing the file in case of O_SYNC file * filesystems. It takes care of syncing the file in case of O_SYNC file
* and acquires i_mutex as needed. * and acquires i_rwsem as needed.
* Return: * Return:
* * negative error code if no data has been written at all of * * negative error code if no data has been written at all of
* vfs_fsync_range() failed for a synchronous write * vfs_fsync_range() failed for a synchronous write
......
...@@ -910,7 +910,7 @@ static long madvise_remove(struct vm_area_struct *vma, ...@@ -910,7 +910,7 @@ static long madvise_remove(struct vm_area_struct *vma,
+ ((loff_t)vma->vm_pgoff << PAGE_SHIFT); + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
/* /*
* Filesystem's fallocate may need to take i_mutex. We need to * Filesystem's fallocate may need to take i_rwsem. We need to
* explicitly grab a reference because the vma (and hence the * explicitly grab a reference because the vma (and hence the
* vma's reference to the file) can go away as soon as we drop * vma's reference to the file) can go away as soon as we drop
* mmap_lock. * mmap_lock.
......
...@@ -866,7 +866,7 @@ static int me_pagecache_clean(struct page *p, unsigned long pfn) ...@@ -866,7 +866,7 @@ static int me_pagecache_clean(struct page *p, unsigned long pfn)
/* /*
* Truncation is a bit tricky. Enable it per file system for now. * Truncation is a bit tricky. Enable it per file system for now.
* *
* Open: to take i_mutex or not for this? Right now we don't. * Open: to take i_rwsem or not for this? Right now we don't.
*/ */
ret = truncate_error_page(p, pfn, mapping); ret = truncate_error_page(p, pfn, mapping);
out: out:
......
...@@ -20,9 +20,9 @@ ...@@ -20,9 +20,9 @@
/* /*
* Lock ordering in mm: * Lock ordering in mm:
* *
* inode->i_mutex (while writing or truncating, not reading or faulting) * inode->i_rwsem (while writing or truncating, not reading or faulting)
* mm->mmap_lock * mm->mmap_lock
* page->flags PG_locked (lock_page) * (see huegtlbfs below) * page->flags PG_locked (lock_page) * (see hugetlbfs below)
* hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share) * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share)
* mapping->i_mmap_rwsem * mapping->i_mmap_rwsem
* hugetlb_fault_mutex (hugetlbfs specific page fault mutex) * hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
...@@ -41,7 +41,7 @@ ...@@ -41,7 +41,7 @@
* in arch-dependent flush_dcache_mmap_lock, * in arch-dependent flush_dcache_mmap_lock,
* within bdi.wb->list_lock in __sync_single_inode) * within bdi.wb->list_lock in __sync_single_inode)
* *
* anon_vma->rwsem,mapping->i_mutex (memory_failure, collect_procs_anon) * anon_vma->rwsem,mapping->i_mmap_rwsem (memory_failure, collect_procs_anon)
* ->tasklist_lock * ->tasklist_lock
* pte map lock * pte map lock
* *
......
...@@ -96,7 +96,7 @@ static struct vfsmount *shm_mnt; ...@@ -96,7 +96,7 @@ static struct vfsmount *shm_mnt;
/* /*
* shmem_fallocate communicates with shmem_fault or shmem_writepage via * shmem_fallocate communicates with shmem_fault or shmem_writepage via
* inode->i_private (with i_mutex making sure that it has only one user at * inode->i_private (with i_rwsem making sure that it has only one user at
* a time): we would prefer not to enlarge the shmem inode just for that. * a time): we would prefer not to enlarge the shmem inode just for that.
*/ */
struct shmem_falloc { struct shmem_falloc {
...@@ -774,7 +774,7 @@ static int shmem_free_swap(struct address_space *mapping, ...@@ -774,7 +774,7 @@ static int shmem_free_swap(struct address_space *mapping,
* Determine (in bytes) how many of the shmem object's pages mapped by the * Determine (in bytes) how many of the shmem object's pages mapped by the
* given offsets are swapped out. * given offsets are swapped out.
* *
* This is safe to call without i_mutex or the i_pages lock thanks to RCU, * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
* as long as the inode doesn't go away and racy results are not a problem. * as long as the inode doesn't go away and racy results are not a problem.
*/ */
unsigned long shmem_partial_swap_usage(struct address_space *mapping, unsigned long shmem_partial_swap_usage(struct address_space *mapping,
...@@ -806,7 +806,7 @@ unsigned long shmem_partial_swap_usage(struct address_space *mapping, ...@@ -806,7 +806,7 @@ unsigned long shmem_partial_swap_usage(struct address_space *mapping,
* Determine (in bytes) how many of the shmem object's pages mapped by the * Determine (in bytes) how many of the shmem object's pages mapped by the
* given vma is swapped out. * given vma is swapped out.
* *
* This is safe to call without i_mutex or the i_pages lock thanks to RCU, * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
* as long as the inode doesn't go away and racy results are not a problem. * as long as the inode doesn't go away and racy results are not a problem.
*/ */
unsigned long shmem_swap_usage(struct vm_area_struct *vma) unsigned long shmem_swap_usage(struct vm_area_struct *vma)
...@@ -1069,7 +1069,7 @@ static int shmem_setattr(struct user_namespace *mnt_userns, ...@@ -1069,7 +1069,7 @@ static int shmem_setattr(struct user_namespace *mnt_userns,
loff_t oldsize = inode->i_size; loff_t oldsize = inode->i_size;
loff_t newsize = attr->ia_size; loff_t newsize = attr->ia_size;
/* protected by i_mutex */ /* protected by i_rwsem */
if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
(newsize > oldsize && (info->seals & F_SEAL_GROW))) (newsize > oldsize && (info->seals & F_SEAL_GROW)))
return -EPERM; return -EPERM;
...@@ -2071,7 +2071,7 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf) ...@@ -2071,7 +2071,7 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf)
/* /*
* Trinity finds that probing a hole which tmpfs is punching can * Trinity finds that probing a hole which tmpfs is punching can
* prevent the hole-punch from ever completing: which in turn * prevent the hole-punch from ever completing: which in turn
* locks writers out with its hold on i_mutex. So refrain from * locks writers out with its hold on i_rwsem. So refrain from
* faulting pages into the hole while it's being punched. Although * faulting pages into the hole while it's being punched. Although
* shmem_undo_range() does remove the additions, it may be unable to * shmem_undo_range() does remove the additions, it may be unable to
* keep up, as each new page needs its own unmap_mapping_range() call, * keep up, as each new page needs its own unmap_mapping_range() call,
...@@ -2082,7 +2082,7 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf) ...@@ -2082,7 +2082,7 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf)
* we just need to make racing faults a rare case. * we just need to make racing faults a rare case.
* *
* The implementation below would be much simpler if we just used a * The implementation below would be much simpler if we just used a
* standard mutex or completion: but we cannot take i_mutex in fault, * standard mutex or completion: but we cannot take i_rwsem in fault,
* and bloating every shmem inode for this unlikely case would be sad. * and bloating every shmem inode for this unlikely case would be sad.
*/ */
if (unlikely(inode->i_private)) { if (unlikely(inode->i_private)) {
...@@ -2482,7 +2482,7 @@ shmem_write_begin(struct file *file, struct address_space *mapping, ...@@ -2482,7 +2482,7 @@ shmem_write_begin(struct file *file, struct address_space *mapping,
struct shmem_inode_info *info = SHMEM_I(inode); struct shmem_inode_info *info = SHMEM_I(inode);
pgoff_t index = pos >> PAGE_SHIFT; pgoff_t index = pos >> PAGE_SHIFT;
/* i_mutex is held by caller */ /* i_rwsem is held by caller */
if (unlikely(info->seals & (F_SEAL_GROW | if (unlikely(info->seals & (F_SEAL_GROW |
F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) { F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
...@@ -2582,7 +2582,7 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) ...@@ -2582,7 +2582,7 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
/* /*
* We must evaluate after, since reads (unlike writes) * We must evaluate after, since reads (unlike writes)
* are called without i_mutex protection against truncate * are called without i_rwsem protection against truncate
*/ */
nr = PAGE_SIZE; nr = PAGE_SIZE;
i_size = i_size_read(inode); i_size = i_size_read(inode);
...@@ -2652,7 +2652,7 @@ static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence) ...@@ -2652,7 +2652,7 @@ static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
return -ENXIO; return -ENXIO;
inode_lock(inode); inode_lock(inode);
/* We're holding i_mutex so we can access i_size directly */ /* We're holding i_rwsem so we can access i_size directly */
offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence); offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence);
if (offset >= 0) if (offset >= 0)
offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE); offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
...@@ -2681,7 +2681,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset, ...@@ -2681,7 +2681,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1; loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq); DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
/* protected by i_mutex */ /* protected by i_rwsem */
if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
error = -EPERM; error = -EPERM;
goto out; goto out;
......
...@@ -412,7 +412,7 @@ EXPORT_SYMBOL(truncate_inode_pages_range); ...@@ -412,7 +412,7 @@ EXPORT_SYMBOL(truncate_inode_pages_range);
* @mapping: mapping to truncate * @mapping: mapping to truncate
* @lstart: offset from which to truncate * @lstart: offset from which to truncate
* *
* Called under (and serialised by) inode->i_mutex. * Called under (and serialised by) inode->i_rwsem.
* *
* Note: When this function returns, there can be a page in the process of * Note: When this function returns, there can be a page in the process of
* deletion (inside __delete_from_page_cache()) in the specified range. Thus * deletion (inside __delete_from_page_cache()) in the specified range. Thus
...@@ -429,7 +429,7 @@ EXPORT_SYMBOL(truncate_inode_pages); ...@@ -429,7 +429,7 @@ EXPORT_SYMBOL(truncate_inode_pages);
* truncate_inode_pages_final - truncate *all* pages before inode dies * truncate_inode_pages_final - truncate *all* pages before inode dies
* @mapping: mapping to truncate * @mapping: mapping to truncate
* *
* Called under (and serialized by) inode->i_mutex. * Called under (and serialized by) inode->i_rwsem.
* *
* Filesystems have to use this in the .evict_inode path to inform the * Filesystems have to use this in the .evict_inode path to inform the
* VM that this is the final truncate and the inode is going away. * VM that this is the final truncate and the inode is going away.
...@@ -748,7 +748,7 @@ EXPORT_SYMBOL(truncate_pagecache); ...@@ -748,7 +748,7 @@ EXPORT_SYMBOL(truncate_pagecache);
* setattr function when ATTR_SIZE is passed in. * setattr function when ATTR_SIZE is passed in.
* *
* Must be called with a lock serializing truncates and writes (generally * Must be called with a lock serializing truncates and writes (generally
* i_mutex but e.g. xfs uses a different lock) and before all filesystem * i_rwsem but e.g. xfs uses a different lock) and before all filesystem
* specific block truncation has been performed. * specific block truncation has been performed.
*/ */
void truncate_setsize(struct inode *inode, loff_t newsize) void truncate_setsize(struct inode *inode, loff_t newsize)
...@@ -777,7 +777,7 @@ EXPORT_SYMBOL(truncate_setsize); ...@@ -777,7 +777,7 @@ EXPORT_SYMBOL(truncate_setsize);
* *
* The function must be called after i_size is updated so that page fault * The function must be called after i_size is updated so that page fault
* coming after we unlock the page will already see the new i_size. * coming after we unlock the page will already see the new i_size.
* The function must be called while we still hold i_mutex - this not only * The function must be called while we still hold i_rwsem - this not only
* makes sure i_size is stable but also that userspace cannot observe new * makes sure i_size is stable but also that userspace cannot observe new
* i_size value before we are prepared to store mmap writes at new inode size. * i_size value before we are prepared to store mmap writes at new inode size.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment