Commit b462705a authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

[PATCH] mm: arches skip ptlock

Convert those few architectures which are calling pud_alloc, pmd_alloc,
pte_alloc_map on a user mm, not to take the page_table_lock first, nor drop it
after.  Each of these can continue to use pte_alloc_map, no need to change
over to pte_alloc_map_lock, they're neither racy nor swappable.

In the sparc64 io_remap_pfn_range, flush_tlb_range then falls outside of the
page_table_lock: that's okay, on sparc64 it's like flush_tlb_mm, and that has
always been called from outside of page_table_lock in dup_mmap.
Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent c74df32c
...@@ -179,11 +179,6 @@ pgd_t *get_pgd_slow(struct mm_struct *mm) ...@@ -179,11 +179,6 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
if (!vectors_high()) { if (!vectors_high()) {
/*
* This lock is here just to satisfy pmd_alloc and pte_lock
*/
spin_lock(&mm->page_table_lock);
/* /*
* On ARM, first page must always be allocated since it * On ARM, first page must always be allocated since it
* contains the machine vectors. * contains the machine vectors.
...@@ -201,23 +196,14 @@ pgd_t *get_pgd_slow(struct mm_struct *mm) ...@@ -201,23 +196,14 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
set_pte(new_pte, *init_pte); set_pte(new_pte, *init_pte);
pte_unmap_nested(init_pte); pte_unmap_nested(init_pte);
pte_unmap(new_pte); pte_unmap(new_pte);
spin_unlock(&mm->page_table_lock);
} }
return new_pgd; return new_pgd;
no_pte: no_pte:
spin_unlock(&mm->page_table_lock);
pmd_free(new_pmd); pmd_free(new_pmd);
free_pages((unsigned long)new_pgd, 2);
return NULL;
no_pmd: no_pmd:
spin_unlock(&mm->page_table_lock);
free_pages((unsigned long)new_pgd, 2); free_pages((unsigned long)new_pgd, 2);
return NULL;
no_pgd: no_pgd:
return NULL; return NULL;
} }
......
...@@ -78,12 +78,6 @@ pgd_t *get_pgd_slow(struct mm_struct *mm) ...@@ -78,12 +78,6 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
if (!new_pgd) if (!new_pgd)
goto no_pgd; goto no_pgd;
/*
* This lock is here just to satisfy pmd_alloc and pte_lock
* FIXME: I bet we could avoid taking it pretty much altogether
*/
spin_lock(&mm->page_table_lock);
/* /*
* On ARM, first page must always be allocated since it contains * On ARM, first page must always be allocated since it contains
* the machine vectors. * the machine vectors.
...@@ -113,23 +107,14 @@ pgd_t *get_pgd_slow(struct mm_struct *mm) ...@@ -113,23 +107,14 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
(PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
spin_unlock(&mm->page_table_lock);
/* update MEMC tables */ /* update MEMC tables */
cpu_memc_update_all(new_pgd); cpu_memc_update_all(new_pgd);
return new_pgd; return new_pgd;
no_pte: no_pte:
spin_unlock(&mm->page_table_lock);
pmd_free(new_pmd); pmd_free(new_pmd);
free_pgd_slow(new_pgd);
return NULL;
no_pmd: no_pmd:
spin_unlock(&mm->page_table_lock);
free_pgd_slow(new_pgd); free_pgd_slow(new_pgd);
return NULL;
no_pgd: no_pgd:
return NULL; return NULL;
} }
......
...@@ -81,9 +81,8 @@ int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from, ...@@ -81,9 +81,8 @@ int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
dir = pgd_offset(mm, from); dir = pgd_offset(mm, from);
flush_cache_range(vma, beg, end); flush_cache_range(vma, beg, end);
spin_lock(&mm->page_table_lock);
while (from < end) { while (from < end) {
pmd_t *pmd = pmd_alloc(current->mm, dir, from); pmd_t *pmd = pmd_alloc(mm, dir, from);
error = -ENOMEM; error = -ENOMEM;
if (!pmd) if (!pmd)
break; break;
...@@ -93,7 +92,6 @@ int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from, ...@@ -93,7 +92,6 @@ int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
from = (from + PGDIR_SIZE) & PGDIR_MASK; from = (from + PGDIR_SIZE) & PGDIR_MASK;
dir++; dir++;
} }
spin_unlock(&mm->page_table_lock);
flush_tlb_range(vma, beg, end); flush_tlb_range(vma, beg, end);
return error; return error;
......
...@@ -135,9 +135,8 @@ int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from, ...@@ -135,9 +135,8 @@ int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
dir = pgd_offset(mm, from); dir = pgd_offset(mm, from);
flush_cache_range(vma, beg, end); flush_cache_range(vma, beg, end);
spin_lock(&mm->page_table_lock);
while (from < end) { while (from < end) {
pud_t *pud = pud_alloc(current->mm, dir, from); pud_t *pud = pud_alloc(mm, dir, from);
error = -ENOMEM; error = -ENOMEM;
if (!pud) if (!pud)
break; break;
...@@ -147,8 +146,7 @@ int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from, ...@@ -147,8 +146,7 @@ int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
from = (from + PGDIR_SIZE) & PGDIR_MASK; from = (from + PGDIR_SIZE) & PGDIR_MASK;
dir++; dir++;
} }
flush_tlb_range(vma, beg, end);
spin_unlock(&mm->page_table_lock);
flush_tlb_range(vma, beg, end);
return error; return error;
} }
...@@ -28,7 +28,6 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc, ...@@ -28,7 +28,6 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
pmd_t *pmd; pmd_t *pmd;
pte_t *pte; pte_t *pte;
spin_lock(&mm->page_table_lock);
pgd = pgd_offset(mm, proc); pgd = pgd_offset(mm, proc);
pud = pud_alloc(mm, pgd, proc); pud = pud_alloc(mm, pgd, proc);
if (!pud) if (!pud)
...@@ -63,7 +62,6 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc, ...@@ -63,7 +62,6 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
*pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT)); *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT));
*pte = pte_mkexec(*pte); *pte = pte_mkexec(*pte);
*pte = pte_wrprotect(*pte); *pte = pte_wrprotect(*pte);
spin_unlock(&mm->page_table_lock);
return(0); return(0);
out_pmd: out_pmd:
...@@ -71,7 +69,6 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc, ...@@ -71,7 +69,6 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
out_pte: out_pte:
pmd_free(pmd); pmd_free(pmd);
out: out:
spin_unlock(&mm->page_table_lock);
return(-ENOMEM); return(-ENOMEM);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment