Commit 69b04754 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

[PATCH] mm: arm ready for split ptlock

Prepare arm for the split page_table_lock: three issues.

Signal handling's preserve and restore of iwmmxt context currently involves
reading and writing that context to and from user space, while holding
page_table_lock to secure the user page(s) against kswapd.  If we split the
lock, then the structure might span two pages, secured by to read into and
write from a kernel stack buffer, copying that out and in without locking (the
structure is 160 bytes in size, and here we're near the top of the kernel
stack).  Or would the overhead be noticeable?

arm_syscall's cmpxchg emulation use pte_offset_map_lock, instead of
pte_offset_map and mm-wide page_table_lock; and strictly, it should now also
take mmap_sem before descending to pmd, to guard against another thread
munmapping, and the page table pulled out beneath this thread.

Updated two comments in fault-armv.c.  adjust_pte is interesting, since its
modification of a pte in one part of the mm depends on the lock held when
calling update_mmu_cache for a pte in some other part of that mm.  This can't
be done with a split page_table_lock (and we've already taken the lowest lock
in the hierarchy here): so we'll have to disable split on arm, unless
CONFIG_CPU_CACHE_VIPT to ensures adjust_pte never used.
Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 60ec5585
...@@ -139,93 +139,33 @@ struct iwmmxt_sigframe { ...@@ -139,93 +139,33 @@ struct iwmmxt_sigframe {
unsigned long storage[0x98/4]; unsigned long storage[0x98/4];
}; };
static int page_present(struct mm_struct *mm, void __user *uptr, int wr)
{
unsigned long addr = (unsigned long)uptr;
pgd_t *pgd = pgd_offset(mm, addr);
if (pgd_present(*pgd)) {
pmd_t *pmd = pmd_offset(pgd, addr);
if (pmd_present(*pmd)) {
pte_t *pte = pte_offset_map(pmd, addr);
return (pte_present(*pte) && (!wr || pte_write(*pte)));
}
}
return 0;
}
static int copy_locked(void __user *uptr, void *kptr, size_t size, int write,
void (*copyfn)(void *, void __user *))
{
unsigned char v, __user *userptr = uptr;
int err = 0;
do {
struct mm_struct *mm;
if (write) {
__put_user_error(0, userptr, err);
__put_user_error(0, userptr + size - 1, err);
} else {
__get_user_error(v, userptr, err);
__get_user_error(v, userptr + size - 1, err);
}
if (err)
break;
mm = current->mm;
spin_lock(&mm->page_table_lock);
if (page_present(mm, userptr, write) &&
page_present(mm, userptr + size - 1, write)) {
copyfn(kptr, uptr);
} else
err = 1;
spin_unlock(&mm->page_table_lock);
} while (err);
return err;
}
static int preserve_iwmmxt_context(struct iwmmxt_sigframe *frame) static int preserve_iwmmxt_context(struct iwmmxt_sigframe *frame)
{ {
int err = 0; char kbuf[sizeof(*frame) + 8];
struct iwmmxt_sigframe *kframe;
/* the iWMMXt context must be 64 bit aligned */ /* the iWMMXt context must be 64 bit aligned */
WARN_ON((unsigned long)frame & 7); kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
kframe->magic0 = IWMMXT_MAGIC0;
__put_user_error(IWMMXT_MAGIC0, &frame->magic0, err); kframe->magic1 = IWMMXT_MAGIC1;
__put_user_error(IWMMXT_MAGIC1, &frame->magic1, err); iwmmxt_task_copy(current_thread_info(), &kframe->storage);
return __copy_to_user(frame, kframe, sizeof(*frame));
/*
* iwmmxt_task_copy() doesn't check user permissions.
* Let's do a dummy write on the upper boundary to ensure
* access to user mem is OK all way up.
*/
err |= copy_locked(&frame->storage, current_thread_info(),
sizeof(frame->storage), 1, iwmmxt_task_copy);
return err;
} }
static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame) static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame)
{ {
unsigned long magic0, magic1; char kbuf[sizeof(*frame) + 8];
int err = 0; struct iwmmxt_sigframe *kframe;
/* the iWMMXt context is 64 bit aligned */ /* the iWMMXt context must be 64 bit aligned */
WARN_ON((unsigned long)frame & 7); kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
if (__copy_from_user(kframe, frame, sizeof(*frame)))
/* return -1;
* Validate iWMMXt context signature. if (kframe->magic0 != IWMMXT_MAGIC0 ||
* Also, iwmmxt_task_restore() doesn't check user permissions. kframe->magic1 != IWMMXT_MAGIC1)
* Let's do a dummy write on the upper boundary to ensure return -1;
* access to user mem is OK all way up. iwmmxt_task_restore(current_thread_info(), &kframe->storage);
*/ return 0;
__get_user_error(magic0, &frame->magic0, err);
__get_user_error(magic1, &frame->magic1, err);
if (!err && magic0 == IWMMXT_MAGIC0 && magic1 == IWMMXT_MAGIC1)
err = copy_locked(&frame->storage, current_thread_info(),
sizeof(frame->storage), 0, iwmmxt_task_restore);
return err;
} }
#endif #endif
......
...@@ -483,29 +483,33 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) ...@@ -483,29 +483,33 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
unsigned long addr = regs->ARM_r2; unsigned long addr = regs->ARM_r2;
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
pgd_t *pgd; pmd_t *pmd; pte_t *pte; pgd_t *pgd; pmd_t *pmd; pte_t *pte;
spinlock_t *ptl;
regs->ARM_cpsr &= ~PSR_C_BIT; regs->ARM_cpsr &= ~PSR_C_BIT;
spin_lock(&mm->page_table_lock); down_read(&mm->mmap_sem);
pgd = pgd_offset(mm, addr); pgd = pgd_offset(mm, addr);
if (!pgd_present(*pgd)) if (!pgd_present(*pgd))
goto bad_access; goto bad_access;
pmd = pmd_offset(pgd, addr); pmd = pmd_offset(pgd, addr);
if (!pmd_present(*pmd)) if (!pmd_present(*pmd))
goto bad_access; goto bad_access;
pte = pte_offset_map(pmd, addr); pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
if (!pte_present(*pte) || !pte_write(*pte)) if (!pte_present(*pte) || !pte_write(*pte)) {
pte_unmap_unlock(pte, ptl);
goto bad_access; goto bad_access;
}
val = *(unsigned long *)addr; val = *(unsigned long *)addr;
val -= regs->ARM_r0; val -= regs->ARM_r0;
if (val == 0) { if (val == 0) {
*(unsigned long *)addr = regs->ARM_r1; *(unsigned long *)addr = regs->ARM_r1;
regs->ARM_cpsr |= PSR_C_BIT; regs->ARM_cpsr |= PSR_C_BIT;
} }
spin_unlock(&mm->page_table_lock); pte_unmap_unlock(pte, ptl);
up_read(&mm->mmap_sem);
return val; return val;
bad_access: bad_access:
spin_unlock(&mm->page_table_lock); up_read(&mm->mmap_sem);
/* simulate a write access fault */ /* simulate a write access fault */
do_DataAbort(addr, 15 + (1 << 11), regs); do_DataAbort(addr, 15 + (1 << 11), regs);
return -1; return -1;
......
...@@ -26,6 +26,11 @@ static unsigned long shared_pte_mask = L_PTE_CACHEABLE; ...@@ -26,6 +26,11 @@ static unsigned long shared_pte_mask = L_PTE_CACHEABLE;
/* /*
* We take the easy way out of this problem - we make the * We take the easy way out of this problem - we make the
* PTE uncacheable. However, we leave the write buffer on. * PTE uncacheable. However, we leave the write buffer on.
*
* Note that the pte lock held when calling update_mmu_cache must also
* guard the pte (somewhere else in the same mm) that we modify here.
* Therefore those configurations which might call adjust_pte (those
* without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock.
*/ */
static int adjust_pte(struct vm_area_struct *vma, unsigned long address) static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
{ {
...@@ -127,7 +132,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page); ...@@ -127,7 +132,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page);
* 2. If we have multiple shared mappings of the same space in * 2. If we have multiple shared mappings of the same space in
* an object, we need to deal with the cache aliasing issues. * an object, we need to deal with the cache aliasing issues.
* *
* Note that the page_table_lock will be held. * Note that the pte lock will be held.
*/ */
void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment