Commit 94934195 authored by Anton Blanchard's avatar Anton Blanchard

ppc64: Disable irqs in init_new_context, destroy_context

parent 53405a4e
...@@ -72,19 +72,19 @@ mmu_context_underflow(void) ...@@ -72,19 +72,19 @@ mmu_context_underflow(void)
panic("mmu_context_underflow"); panic("mmu_context_underflow");
} }
/* /*
* Set up the context for a new address space. * Set up the context for a new address space.
*/ */
static inline int static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{ {
long head, size; long head;
unsigned long flags;
spin_lock( &mmu_context_queue.lock ); spin_lock_irqsave(&mmu_context_queue.lock, flags);
if ( (size = mmu_context_queue.size) <= 0 ) { if (mmu_context_queue.size <= 0) {
spin_unlock( &mmu_context_queue.lock ); spin_unlock_irqrestore(&mmu_context_queue.lock, flags);
return -ENOMEM; return -ENOMEM;
} }
...@@ -93,9 +93,9 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm) ...@@ -93,9 +93,9 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
head = (head < LAST_USER_CONTEXT-1) ? head+1 : 0; head = (head < LAST_USER_CONTEXT-1) ? head+1 : 0;
mmu_context_queue.head = head; mmu_context_queue.head = head;
mmu_context_queue.size = size-1; mmu_context_queue.size--;
spin_unlock( &mmu_context_queue.lock ); spin_unlock_irqrestore(&mmu_context_queue.lock, flags);
return 0; return 0;
} }
...@@ -106,12 +106,13 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm) ...@@ -106,12 +106,13 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
static inline void static inline void
destroy_context(struct mm_struct *mm) destroy_context(struct mm_struct *mm)
{ {
long index, size = mmu_context_queue.size; long index;
unsigned long flags;
spin_lock( &mmu_context_queue.lock ); spin_lock_irqsave(&mmu_context_queue.lock, flags);
if ( (size = mmu_context_queue.size) >= NUM_USER_CONTEXT ) { if (mmu_context_queue.size >= NUM_USER_CONTEXT) {
spin_unlock( &mmu_context_queue.lock ); spin_unlock_irqrestore(&mmu_context_queue.lock, flags);
mmu_context_underflow(); mmu_context_underflow();
} }
...@@ -125,10 +126,10 @@ destroy_context(struct mm_struct *mm) ...@@ -125,10 +126,10 @@ destroy_context(struct mm_struct *mm)
mmu_context_queue.tail = index; mmu_context_queue.tail = index;
#endif #endif
mmu_context_queue.size = size+1; mmu_context_queue.size++;
mmu_context_queue.elements[index] = mm->context; mmu_context_queue.elements[index] = mm->context;
spin_unlock( &mmu_context_queue.lock ); spin_unlock_irqrestore(&mmu_context_queue.lock, flags);
} }
extern void flush_stab(struct task_struct *tsk, struct mm_struct *mm); extern void flush_stab(struct task_struct *tsk, struct mm_struct *mm);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment