Commit 388f7934 authored by Vegard Nossum's avatar Vegard Nossum Committed by Linus Torvalds

mm: use mmget_not_zero() helper

We already have the helper, we can convert the rest of the kernel
mechanically using:

  git grep -l 'atomic_inc_not_zero.*mm_users' | xargs sed -i 's/atomic_inc_not_zero(&\(.*\)->mm_users)/mmget_not_zero\(\1\)/'

This is needed for a later patch that hooks into the helper, but might
be a worthwhile cleanup on its own.

Link: http://lkml.kernel.org/r/20161218123229.22952-3-vegard.nossum@oracle.comSigned-off-by: default avatarVegard Nossum <vegard.nossum@oracle.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 3fce371b
...@@ -507,7 +507,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) ...@@ -507,7 +507,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
flags |= FOLL_WRITE; flags |= FOLL_WRITE;
ret = -EFAULT; ret = -EFAULT;
if (atomic_inc_not_zero(&mm->mm_users)) { if (mmget_not_zero(mm)) {
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
while (pinned < npages) { while (pinned < npages) {
ret = get_user_pages_remote ret = get_user_pages_remote
......
...@@ -579,7 +579,7 @@ static irqreturn_t prq_event_thread(int irq, void *d) ...@@ -579,7 +579,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
if (!svm->mm) if (!svm->mm)
goto bad_req; goto bad_req;
/* If the mm is already defunct, don't handle faults. */ /* If the mm is already defunct, don't handle faults. */
if (!atomic_inc_not_zero(&svm->mm->mm_users)) if (!mmget_not_zero(svm->mm))
goto bad_req; goto bad_req;
down_read(&svm->mm->mmap_sem); down_read(&svm->mm->mmap_sem);
vma = find_extend_vma(svm->mm, address); vma = find_extend_vma(svm->mm, address);
......
...@@ -813,7 +813,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf, ...@@ -813,7 +813,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
return -ENOMEM; return -ENOMEM;
copied = 0; copied = 0;
if (!atomic_inc_not_zero(&mm->mm_users)) if (!mmget_not_zero(mm))
goto free; goto free;
/* Maybe we should limit FOLL_FORCE to actual ptrace users? */ /* Maybe we should limit FOLL_FORCE to actual ptrace users? */
...@@ -921,7 +921,7 @@ static ssize_t environ_read(struct file *file, char __user *buf, ...@@ -921,7 +921,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
return -ENOMEM; return -ENOMEM;
ret = 0; ret = 0;
if (!atomic_inc_not_zero(&mm->mm_users)) if (!mmget_not_zero(mm))
goto free; goto free;
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
......
...@@ -167,7 +167,7 @@ static void *m_start(struct seq_file *m, loff_t *ppos) ...@@ -167,7 +167,7 @@ static void *m_start(struct seq_file *m, loff_t *ppos)
return ERR_PTR(-ESRCH); return ERR_PTR(-ESRCH);
mm = priv->mm; mm = priv->mm;
if (!mm || !atomic_inc_not_zero(&mm->mm_users)) if (!mm || !mmget_not_zero(mm))
return NULL; return NULL;
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
...@@ -1352,7 +1352,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, ...@@ -1352,7 +1352,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
unsigned long end_vaddr; unsigned long end_vaddr;
int ret = 0, copied = 0; int ret = 0, copied = 0;
if (!mm || !atomic_inc_not_zero(&mm->mm_users)) if (!mm || !mmget_not_zero(mm))
goto out; goto out;
ret = -EINVAL; ret = -EINVAL;
......
...@@ -219,7 +219,7 @@ static void *m_start(struct seq_file *m, loff_t *pos) ...@@ -219,7 +219,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
return ERR_PTR(-ESRCH); return ERR_PTR(-ESRCH);
mm = priv->mm; mm = priv->mm;
if (!mm || !atomic_inc_not_zero(&mm->mm_users)) if (!mm || !mmget_not_zero(mm))
return NULL; return NULL;
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
......
...@@ -747,7 +747,7 @@ build_map_info(struct address_space *mapping, loff_t offset, bool is_register) ...@@ -747,7 +747,7 @@ build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
continue; continue;
} }
if (!atomic_inc_not_zero(&vma->vm_mm->mm_users)) if (!mmget_not_zero(vma->vm_mm))
continue; continue;
info = prev; info = prev;
......
...@@ -1763,7 +1763,7 @@ int try_to_unuse(unsigned int type, bool frontswap, ...@@ -1763,7 +1763,7 @@ int try_to_unuse(unsigned int type, bool frontswap,
while (swap_count(*swap_map) && !retval && while (swap_count(*swap_map) && !retval &&
(p = p->next) != &start_mm->mmlist) { (p = p->next) != &start_mm->mmlist) {
mm = list_entry(p, struct mm_struct, mmlist); mm = list_entry(p, struct mm_struct, mmlist);
if (!atomic_inc_not_zero(&mm->mm_users)) if (!mmget_not_zero(mm))
continue; continue;
spin_unlock(&mmlist_lock); spin_unlock(&mmlist_lock);
mmput(prev_mm); mmput(prev_mm);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment