Commit f0a9ad1d authored by Hugh Dickins's avatar Hugh Dickins Committed by Andrew Morton

shmem: factor shmem_falloc_wait() out of shmem_fault()

That Trinity livelock shmem_falloc avoidance block is unlikely, and a
distraction from the proper business of shmem_fault(): separate it out. 
(This used to help compilers save stack on the fault path too, but both
gcc and clang nowadays seem to make better choices anyway.)

Link: https://lkml.kernel.org/r/6fe379a4-6176-9225-9263-fe60d2633c0@google.comSigned-off-by: default avatarHugh Dickins <hughd@google.com>
Reviewed-by: default avatarJan Kara <jack@suse.cz>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Carlos Maiolino <cem@kernel.org>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Chuck Lever <chuck.lever@oracle.com>
Cc: Darrick J. Wong <djwong@kernel.org>
Cc: Dave Chinner <dchinner@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Tim Chen <tim.c.chen@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent e3e1a506
...@@ -2148,23 +2148,15 @@ int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop, ...@@ -2148,23 +2148,15 @@ int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop,
* entry unconditionally - even if something else had already woken the * entry unconditionally - even if something else had already woken the
* target. * target.
*/ */
static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) static int synchronous_wake_function(wait_queue_entry_t *wait,
unsigned int mode, int sync, void *key)
{ {
int ret = default_wake_function(wait, mode, sync, key); int ret = default_wake_function(wait, mode, sync, key);
list_del_init(&wait->entry); list_del_init(&wait->entry);
return ret; return ret;
} }
static vm_fault_t shmem_fault(struct vm_fault *vmf) /*
{
struct vm_area_struct *vma = vmf->vma;
struct inode *inode = file_inode(vma->vm_file);
gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
struct folio *folio = NULL;
int err;
vm_fault_t ret = VM_FAULT_LOCKED;
/*
* Trinity finds that probing a hole which tmpfs is punching can * Trinity finds that probing a hole which tmpfs is punching can
* prevent the hole-punch from ever completing: which in turn * prevent the hole-punch from ever completing: which in turn
* locks writers out with its hold on i_rwsem. So refrain from * locks writers out with its hold on i_rwsem. So refrain from
...@@ -2181,8 +2173,11 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf) ...@@ -2181,8 +2173,11 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf)
* standard mutex or completion: but we cannot take i_rwsem in fault, * standard mutex or completion: but we cannot take i_rwsem in fault,
* and bloating every shmem inode for this unlikely case would be sad. * and bloating every shmem inode for this unlikely case would be sad.
*/ */
if (unlikely(inode->i_private)) { static vm_fault_t shmem_falloc_wait(struct vm_fault *vmf, struct inode *inode)
{
struct shmem_falloc *shmem_falloc; struct shmem_falloc *shmem_falloc;
struct file *fpin = NULL;
vm_fault_t ret = 0;
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
shmem_falloc = inode->i_private; shmem_falloc = inode->i_private;
...@@ -2190,15 +2185,11 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf) ...@@ -2190,15 +2185,11 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf)
shmem_falloc->waitq && shmem_falloc->waitq &&
vmf->pgoff >= shmem_falloc->start && vmf->pgoff >= shmem_falloc->start &&
vmf->pgoff < shmem_falloc->next) { vmf->pgoff < shmem_falloc->next) {
struct file *fpin;
wait_queue_head_t *shmem_falloc_waitq; wait_queue_head_t *shmem_falloc_waitq;
DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function); DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
ret = VM_FAULT_NOPAGE; ret = VM_FAULT_NOPAGE;
fpin = maybe_unlock_mmap_for_io(vmf, NULL); fpin = maybe_unlock_mmap_for_io(vmf, NULL);
if (fpin)
ret = VM_FAULT_RETRY;
shmem_falloc_waitq = shmem_falloc->waitq; shmem_falloc_waitq = shmem_falloc->waitq;
prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait, prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
TASK_UNINTERRUPTIBLE); TASK_UNINTERRUPTIBLE);
...@@ -2214,21 +2205,42 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf) ...@@ -2214,21 +2205,42 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf)
*/ */
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
finish_wait(shmem_falloc_waitq, &shmem_fault_wait); finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
}
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
if (fpin) {
if (fpin)
fput(fpin); fput(fpin);
return ret; ret = VM_FAULT_RETRY;
} }
spin_unlock(&inode->i_lock); return ret;
}
static vm_fault_t shmem_fault(struct vm_fault *vmf)
{
struct inode *inode = file_inode(vmf->vma->vm_file);
gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
struct folio *folio = NULL;
vm_fault_t ret = 0;
int err;
/*
* Trinity finds that probing a hole which tmpfs is punching can
* prevent the hole-punch from ever completing: noted in i_private.
*/
if (unlikely(inode->i_private)) {
ret = shmem_falloc_wait(vmf, inode);
if (ret)
return ret;
} }
WARN_ON_ONCE(vmf->page != NULL);
err = shmem_get_folio_gfp(inode, vmf->pgoff, &folio, SGP_CACHE, err = shmem_get_folio_gfp(inode, vmf->pgoff, &folio, SGP_CACHE,
gfp, vmf, &ret); gfp, vmf, &ret);
if (err) if (err)
return vmf_error(err); return vmf_error(err);
if (folio) if (folio) {
vmf->page = folio_file_page(folio, vmf->pgoff); vmf->page = folio_file_page(folio, vmf->pgoff);
ret |= VM_FAULT_LOCKED;
}
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment