Commit 1b36ba81 authored by Namhyung Kim's avatar Namhyung Kim Committed by Linus Torvalds

mm: wrap follow_pte() using __cond_lock()

The follow_pte() conditionally grabs *@ptlp in case of returning 0.
Rename and wrap it using __cond_lock() removes following warnings:

 mm/memory.c:2337:9: warning: context imbalance in 'do_wp_page' - unexpected unlock
 mm/memory.c:3142:19: warning: context imbalance in 'handle_mm_fault' - different lock contexts for basic block
Signed-off-by: default avatarNamhyung Kim <namhyung@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e6219ec8
...@@ -3351,7 +3351,7 @@ int in_gate_area_no_task(unsigned long addr) ...@@ -3351,7 +3351,7 @@ int in_gate_area_no_task(unsigned long addr)
#endif /* __HAVE_ARCH_GATE_AREA */ #endif /* __HAVE_ARCH_GATE_AREA */
static int follow_pte(struct mm_struct *mm, unsigned long address, static int __follow_pte(struct mm_struct *mm, unsigned long address,
pte_t **ptepp, spinlock_t **ptlp) pte_t **ptepp, spinlock_t **ptlp)
{ {
pgd_t *pgd; pgd_t *pgd;
...@@ -3388,6 +3388,17 @@ static int follow_pte(struct mm_struct *mm, unsigned long address, ...@@ -3388,6 +3388,17 @@ static int follow_pte(struct mm_struct *mm, unsigned long address,
return -EINVAL; return -EINVAL;
} }
static inline int follow_pte(struct mm_struct *mm, unsigned long address,
pte_t **ptepp, spinlock_t **ptlp)
{
int res;
/* (void) is needed to make gcc happy */
(void) __cond_lock(*ptlp,
!(res = __follow_pte(mm, address, ptepp, ptlp)));
return res;
}
/** /**
* follow_pfn - look up PFN at a user virtual address * follow_pfn - look up PFN at a user virtual address
* @vma: memory mapping * @vma: memory mapping
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment