Commit bda079d3 authored by John David Anglin's avatar John David Anglin Committed by Helge Deller

parisc: use spin_lock_irqsave/spin_unlock_irqrestore for PTE updates

User applications running on SMP kernels have long suffered from instability
and random segmentation faults.  This patch improves the situation although
there is more work to be done.

One of the problems is the various routines in pgtable.h that update page table
entries use different locking mechanisms, or no lock at all (set_pte_at).  This
change modifies the routines to all use the same lock pa_dbit_lock.  This lock
is used for dirty bit updates in the interruption code. The patch also purges
the TLB entries associated with the PTE to ensure that inconsistent values are
not used after the page table entry is updated.  The UP and SMP code are now
identical.

The change also includes a minor update to the purge_tlb_entries function in
cache.c to improve its efficiency.
Signed-off-by: default avatarJohn David Anglin <dave.anglin@bell.net>
Cc: Helge Deller <deller@gmx.de>
Signed-off-by: default avatarHelge Deller <deller@gmx.de>
parent cf71130d
...@@ -16,6 +16,8 @@ ...@@ -16,6 +16,8 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/cache.h> #include <asm/cache.h>
extern spinlock_t pa_dbit_lock;
/* /*
* kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
* memory. For the return value to be meaningful, ADDR must be >= * memory. For the return value to be meaningful, ADDR must be >=
...@@ -44,8 +46,11 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long); ...@@ -44,8 +46,11 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
#define set_pte_at(mm, addr, ptep, pteval) \ #define set_pte_at(mm, addr, ptep, pteval) \
do { \ do { \
unsigned long flags; \
spin_lock_irqsave(&pa_dbit_lock, flags); \
set_pte(ptep, pteval); \ set_pte(ptep, pteval); \
purge_tlb_entries(mm, addr); \ purge_tlb_entries(mm, addr); \
spin_unlock_irqrestore(&pa_dbit_lock, flags); \
} while (0) } while (0)
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
...@@ -435,48 +440,46 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); ...@@ -435,48 +440,46 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{ {
#ifdef CONFIG_SMP pte_t pte;
unsigned long flags;
if (!pte_young(*ptep)) if (!pte_young(*ptep))
return 0; return 0;
return test_and_clear_bit(xlate_pabit(_PAGE_ACCESSED_BIT), &pte_val(*ptep));
#else spin_lock_irqsave(&pa_dbit_lock, flags);
pte_t pte = *ptep; pte = *ptep;
if (!pte_young(pte)) if (!pte_young(pte)) {
spin_unlock_irqrestore(&pa_dbit_lock, flags);
return 0; return 0;
set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte)); }
set_pte(ptep, pte_mkold(pte));
purge_tlb_entries(vma->vm_mm, addr);
spin_unlock_irqrestore(&pa_dbit_lock, flags);
return 1; return 1;
#endif
} }
extern spinlock_t pa_dbit_lock;
struct mm_struct; struct mm_struct;
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{ {
pte_t old_pte; pte_t old_pte;
unsigned long flags;
spin_lock(&pa_dbit_lock); spin_lock_irqsave(&pa_dbit_lock, flags);
old_pte = *ptep; old_pte = *ptep;
pte_clear(mm,addr,ptep); pte_clear(mm,addr,ptep);
spin_unlock(&pa_dbit_lock); purge_tlb_entries(mm, addr);
spin_unlock_irqrestore(&pa_dbit_lock, flags);
return old_pte; return old_pte;
} }
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{ {
#ifdef CONFIG_SMP unsigned long flags;
unsigned long new, old; spin_lock_irqsave(&pa_dbit_lock, flags);
set_pte(ptep, pte_wrprotect(*ptep));
do {
old = pte_val(*ptep);
new = pte_val(pte_wrprotect(__pte (old)));
} while (cmpxchg((unsigned long *) ptep, old, new) != old);
purge_tlb_entries(mm, addr); purge_tlb_entries(mm, addr);
#else spin_unlock_irqrestore(&pa_dbit_lock, flags);
pte_t old_pte = *ptep;
set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
#endif
} }
#define pte_same(A,B) (pte_val(A) == pte_val(B)) #define pte_same(A,B) (pte_val(A) == pte_val(B))
......
...@@ -421,14 +421,11 @@ void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) ...@@ -421,14 +421,11 @@ void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
/* Note: purge_tlb_entries can be called at startup with /* Note: purge_tlb_entries can be called at startup with
no context. */ no context. */
/* Disable preemption while we play with %sr1. */
preempt_disable();
mtsp(mm->context, 1);
purge_tlb_start(flags); purge_tlb_start(flags);
mtsp(mm->context, 1);
pdtlb(addr); pdtlb(addr);
pitlb(addr); pitlb(addr);
purge_tlb_end(flags); purge_tlb_end(flags);
preempt_enable();
} }
EXPORT_SYMBOL(purge_tlb_entries); EXPORT_SYMBOL(purge_tlb_entries);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment