Commit 7ee175f7 authored by Tony Luck's avatar Tony Luck

Pull mm-context-fix into release branch

parents dcf82962 badea125
...@@ -2,10 +2,12 @@ ...@@ -2,10 +2,12 @@
#define __MMU_H #define __MMU_H
/* /*
* Type for a context number. We declare it volatile to ensure proper ordering when it's * Type for a context number. We declare it volatile to ensure proper
* accessed outside of spinlock'd critical sections (e.g., as done in activate_mm() and * ordering when it's accessed outside of spinlock'd critical sections
* init_new_context()). * (e.g., as done in activate_mm() and init_new_context()).
*/ */
typedef volatile unsigned long mm_context_t; typedef volatile unsigned long mm_context_t;
typedef unsigned long nv_mm_context_t;
#endif #endif
...@@ -55,34 +55,46 @@ static inline void ...@@ -55,34 +55,46 @@ static inline void
delayed_tlb_flush (void) delayed_tlb_flush (void)
{ {
extern void local_flush_tlb_all (void); extern void local_flush_tlb_all (void);
unsigned long flags;
if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) { if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) {
local_flush_tlb_all(); spin_lock_irqsave(&ia64_ctx.lock, flags);
__ia64_per_cpu_var(ia64_need_tlb_flush) = 0; {
if (__ia64_per_cpu_var(ia64_need_tlb_flush)) {
local_flush_tlb_all();
__ia64_per_cpu_var(ia64_need_tlb_flush) = 0;
}
}
spin_unlock_irqrestore(&ia64_ctx.lock, flags);
} }
} }
static inline mm_context_t static inline nv_mm_context_t
get_mmu_context (struct mm_struct *mm) get_mmu_context (struct mm_struct *mm)
{ {
unsigned long flags; unsigned long flags;
mm_context_t context = mm->context; nv_mm_context_t context = mm->context;
if (context) if (unlikely(!context)) {
return context; spin_lock_irqsave(&ia64_ctx.lock, flags);
{
spin_lock_irqsave(&ia64_ctx.lock, flags); /* re-check, now that we've got the lock: */
{ context = mm->context;
/* re-check, now that we've got the lock: */ if (context == 0) {
context = mm->context; cpus_clear(mm->cpu_vm_mask);
if (context == 0) { if (ia64_ctx.next >= ia64_ctx.limit)
cpus_clear(mm->cpu_vm_mask); wrap_mmu_context(mm);
if (ia64_ctx.next >= ia64_ctx.limit) mm->context = context = ia64_ctx.next++;
wrap_mmu_context(mm); }
mm->context = context = ia64_ctx.next++;
} }
spin_unlock_irqrestore(&ia64_ctx.lock, flags);
} }
spin_unlock_irqrestore(&ia64_ctx.lock, flags); /*
* Ensure we're not starting to use "context" before any old
* uses of it are gone from our TLB.
*/
delayed_tlb_flush();
return context; return context;
} }
...@@ -104,7 +116,7 @@ destroy_context (struct mm_struct *mm) ...@@ -104,7 +116,7 @@ destroy_context (struct mm_struct *mm)
} }
static inline void static inline void
reload_context (mm_context_t context) reload_context (nv_mm_context_t context)
{ {
unsigned long rid; unsigned long rid;
unsigned long rid_incr = 0; unsigned long rid_incr = 0;
...@@ -138,7 +150,7 @@ reload_context (mm_context_t context) ...@@ -138,7 +150,7 @@ reload_context (mm_context_t context)
static inline void static inline void
activate_context (struct mm_struct *mm) activate_context (struct mm_struct *mm)
{ {
mm_context_t context; nv_mm_context_t context;
do { do {
context = get_mmu_context(mm); context = get_mmu_context(mm);
...@@ -157,8 +169,6 @@ activate_context (struct mm_struct *mm) ...@@ -157,8 +169,6 @@ activate_context (struct mm_struct *mm)
static inline void static inline void
activate_mm (struct mm_struct *prev, struct mm_struct *next) activate_mm (struct mm_struct *prev, struct mm_struct *next)
{ {
delayed_tlb_flush();
/* /*
* We may get interrupts here, but that's OK because interrupt handlers cannot * We may get interrupts here, but that's OK because interrupt handlers cannot
* touch user-space. * touch user-space.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment