Commit 0f1a394b authored by Paul Mundt's avatar Paul Mundt

sh: lockless UTLB miss fast-path.

With the refactored update_mmu_cache() introduced in older kernels,
there's no longer any need to take the page_table_lock in this path,
so simply drop it completely.

Without this, performance degradation is seen on SMP on heavily
threaded workloads that don't use the split ptlock, and ultimately
we have no reason to contend for the lock in the first place.
Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent 1c6b2ca5
...@@ -258,9 +258,6 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs, ...@@ -258,9 +258,6 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
pmd_t *pmd; pmd_t *pmd;
pte_t *pte; pte_t *pte;
pte_t entry; pte_t entry;
struct mm_struct *mm = current->mm;
spinlock_t *ptl = NULL;
int ret = 1;
#ifdef CONFIG_SH_KGDB #ifdef CONFIG_SH_KGDB
if (kgdb_nofault && kgdb_bus_err_hook) if (kgdb_nofault && kgdb_bus_err_hook)
...@@ -274,12 +271,11 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs, ...@@ -274,12 +271,11 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
*/ */
if (address >= P3SEG && address < P3_ADDR_MAX) { if (address >= P3SEG && address < P3_ADDR_MAX) {
pgd = pgd_offset_k(address); pgd = pgd_offset_k(address);
mm = NULL;
} else { } else {
if (unlikely(address >= TASK_SIZE || !mm)) if (unlikely(address >= TASK_SIZE || !current->mm))
return 1; return 1;
pgd = pgd_offset(mm, address); pgd = pgd_offset(current->mm, address);
} }
pud = pud_offset(pgd, address); pud = pud_offset(pgd, address);
...@@ -289,16 +285,12 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs, ...@@ -289,16 +285,12 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
if (pmd_none_or_clear_bad(pmd)) if (pmd_none_or_clear_bad(pmd))
return 1; return 1;
if (mm) pte = pte_offset_kernel(pmd, address);
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
else
pte = pte_offset_kernel(pmd, address);
entry = *pte; entry = *pte;
if (unlikely(pte_none(entry) || pte_not_present(entry))) if (unlikely(pte_none(entry) || pte_not_present(entry)))
goto unlock; return 1;
if (unlikely(writeaccess && !pte_write(entry))) if (unlikely(writeaccess && !pte_write(entry)))
goto unlock; return 1;
if (writeaccess) if (writeaccess)
entry = pte_mkdirty(entry); entry = pte_mkdirty(entry);
...@@ -306,9 +298,6 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs, ...@@ -306,9 +298,6 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
set_pte(pte, entry); set_pte(pte, entry);
update_mmu_cache(NULL, address, entry); update_mmu_cache(NULL, address, entry);
ret = 0;
unlock: return 0;
if (mm)
pte_unmap_unlock(pte, ptl);
return ret;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment