Commit 3fce371b authored by Vegard Nossum's avatar Vegard Nossum Committed by Linus Torvalds

mm: add new mmget() helper

Apart from adding the helper function itself, the rest of the kernel is
converted mechanically using:

  git grep -l 'atomic_inc.*mm_users' | xargs sed -i 's/atomic_inc(&\(.*\)->mm_users);/mmget\(\1\);/'
  git grep -l 'atomic_inc.*mm_users' | xargs sed -i 's/atomic_inc(&\(.*\)\.mm_users);/mmget\(\&\1\);/'

This is needed for a later patch that hooks into the helper, but might
be a worthwhile cleanup on its own.

(Michal Hocko provided most of the kerneldoc comment.)

Link: http://lkml.kernel.org/r/20161218123229.22952-2-vegard.nossum@oracle.comSigned-off-by: default avatarVegard Nossum <vegard.nossum@oracle.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f1f10076
...@@ -139,7 +139,7 @@ void start_kernel_secondary(void) ...@@ -139,7 +139,7 @@ void start_kernel_secondary(void)
/* MMU, Caches, Vector Table, Interrupts etc */ /* MMU, Caches, Vector Table, Interrupts etc */
setup_processor(); setup_processor();
atomic_inc(&mm->mm_users); mmget(mm);
mmgrab(mm); mmgrab(mm);
current->active_mm = mm; current->active_mm = mm;
cpumask_set_cpu(cpu, mm_cpumask(mm)); cpumask_set_cpu(cpu, mm_cpumask(mm));
......
...@@ -307,7 +307,7 @@ void secondary_start_kernel(void) ...@@ -307,7 +307,7 @@ void secondary_start_kernel(void)
local_irq_disable(); local_irq_disable();
/* Attach the new idle task to the global mm. */ /* Attach the new idle task to the global mm. */
atomic_inc(&mm->mm_users); mmget(mm);
mmgrab(mm); mmgrab(mm);
current->active_mm = mm; current->active_mm = mm;
......
...@@ -188,7 +188,7 @@ int cxn_pin_by_pid(pid_t pid) ...@@ -188,7 +188,7 @@ int cxn_pin_by_pid(pid_t pid)
task_lock(tsk); task_lock(tsk);
if (tsk->mm) { if (tsk->mm) {
mm = tsk->mm; mm = tsk->mm;
atomic_inc(&mm->mm_users); mmget(mm);
ret = 0; ret = 0;
} }
task_unlock(tsk); task_unlock(tsk);
......
...@@ -344,7 +344,7 @@ asmlinkage void secondary_start_kernel(void) ...@@ -344,7 +344,7 @@ asmlinkage void secondary_start_kernel(void)
* All kernel threads share the same mm context; grab a * All kernel threads share the same mm context; grab a
* reference and switch to it. * reference and switch to it.
*/ */
atomic_inc(&mm->mm_users); mmget(mm);
mmgrab(mm); mmgrab(mm);
current->active_mm = mm; current->active_mm = mm;
cpumask_set_cpu(cpu, mm_cpumask(mm)); cpumask_set_cpu(cpu, mm_cpumask(mm));
......
...@@ -179,7 +179,7 @@ asmlinkage void start_secondary(void) ...@@ -179,7 +179,7 @@ asmlinkage void start_secondary(void)
enable_mmu(); enable_mmu();
mmgrab(mm); mmgrab(mm);
atomic_inc(&mm->mm_users); mmget(mm);
current->active_mm = mm; current->active_mm = mm;
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
enter_lazy_tlb(mm, current); enter_lazy_tlb(mm, current);
......
...@@ -135,7 +135,7 @@ void secondary_start_kernel(void) ...@@ -135,7 +135,7 @@ void secondary_start_kernel(void)
/* All kernel threads share the same mm context. */ /* All kernel threads share the same mm context. */
atomic_inc(&mm->mm_users); mmget(mm);
mmgrab(mm); mmgrab(mm);
current->active_mm = mm; current->active_mm = mm;
cpumask_set_cpu(cpu, mm_cpumask(mm)); cpumask_set_cpu(cpu, mm_cpumask(mm));
......
...@@ -2948,6 +2948,27 @@ static inline void mmdrop_async(struct mm_struct *mm) ...@@ -2948,6 +2948,27 @@ static inline void mmdrop_async(struct mm_struct *mm)
} }
} }
/**
* mmget() - Pin the address space associated with a &struct mm_struct.
* @mm: The address space to pin.
*
* Make sure that the address space of the given &struct mm_struct doesn't
* go away. This does not protect against parts of the address space being
* modified or freed, however.
*
* Never use this function to pin this address space for an
* unbounded/indefinite amount of time.
*
* Use mmput() to release the reference acquired by mmget().
*
* See also <Documentation/vm/active_mm.txt> for an in-depth explanation
* of &mm_struct.mm_count vs &mm_struct.mm_users.
*/
static inline void mmget(struct mm_struct *mm)
{
atomic_inc(&mm->mm_users);
}
static inline bool mmget_not_zero(struct mm_struct *mm) static inline bool mmget_not_zero(struct mm_struct *mm)
{ {
return atomic_inc_not_zero(&mm->mm_users); return atomic_inc_not_zero(&mm->mm_users);
......
...@@ -1000,7 +1000,7 @@ struct mm_struct *get_task_mm(struct task_struct *task) ...@@ -1000,7 +1000,7 @@ struct mm_struct *get_task_mm(struct task_struct *task)
if (task->flags & PF_KTHREAD) if (task->flags & PF_KTHREAD)
mm = NULL; mm = NULL;
else else
atomic_inc(&mm->mm_users); mmget(mm);
} }
task_unlock(task); task_unlock(task);
return mm; return mm;
...@@ -1188,7 +1188,7 @@ static int copy_mm(unsigned long clone_flags, struct task_struct *tsk) ...@@ -1188,7 +1188,7 @@ static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
vmacache_flush(tsk); vmacache_flush(tsk);
if (clone_flags & CLONE_VM) { if (clone_flags & CLONE_VM) {
atomic_inc(&oldmm->mm_users); mmget(oldmm);
mm = oldmm; mm = oldmm;
goto good_mm; goto good_mm;
} }
......
...@@ -1671,7 +1671,7 @@ int try_to_unuse(unsigned int type, bool frontswap, ...@@ -1671,7 +1671,7 @@ int try_to_unuse(unsigned int type, bool frontswap,
* that. * that.
*/ */
start_mm = &init_mm; start_mm = &init_mm;
atomic_inc(&init_mm.mm_users); mmget(&init_mm);
/* /*
* Keep on scanning until all entries have gone. Usually, * Keep on scanning until all entries have gone. Usually,
...@@ -1720,7 +1720,7 @@ int try_to_unuse(unsigned int type, bool frontswap, ...@@ -1720,7 +1720,7 @@ int try_to_unuse(unsigned int type, bool frontswap,
if (atomic_read(&start_mm->mm_users) == 1) { if (atomic_read(&start_mm->mm_users) == 1) {
mmput(start_mm); mmput(start_mm);
start_mm = &init_mm; start_mm = &init_mm;
atomic_inc(&init_mm.mm_users); mmget(&init_mm);
} }
/* /*
...@@ -1757,8 +1757,8 @@ int try_to_unuse(unsigned int type, bool frontswap, ...@@ -1757,8 +1757,8 @@ int try_to_unuse(unsigned int type, bool frontswap,
struct mm_struct *prev_mm = start_mm; struct mm_struct *prev_mm = start_mm;
struct mm_struct *mm; struct mm_struct *mm;
atomic_inc(&new_start_mm->mm_users); mmget(new_start_mm);
atomic_inc(&prev_mm->mm_users); mmget(prev_mm);
spin_lock(&mmlist_lock); spin_lock(&mmlist_lock);
while (swap_count(*swap_map) && !retval && while (swap_count(*swap_map) && !retval &&
(p = p->next) != &start_mm->mmlist) { (p = p->next) != &start_mm->mmlist) {
...@@ -1781,7 +1781,7 @@ int try_to_unuse(unsigned int type, bool frontswap, ...@@ -1781,7 +1781,7 @@ int try_to_unuse(unsigned int type, bool frontswap,
if (set_start_mm && *swap_map < swcount) { if (set_start_mm && *swap_map < swcount) {
mmput(new_start_mm); mmput(new_start_mm);
atomic_inc(&mm->mm_users); mmget(mm);
new_start_mm = mm; new_start_mm = mm;
set_start_mm = 0; set_start_mm = 0;
} }
......
...@@ -204,7 +204,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva, ...@@ -204,7 +204,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
work->addr = hva; work->addr = hva;
work->arch = *arch; work->arch = *arch;
work->mm = current->mm; work->mm = current->mm;
atomic_inc(&work->mm->mm_users); mmget(work->mm);
kvm_get_kvm(work->vcpu->kvm); kvm_get_kvm(work->vcpu->kvm);
/* this can't really happen otherwise gfn_to_pfn_async /* this can't really happen otherwise gfn_to_pfn_async
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment