Commit 96eacb6b authored by Paul Mackerras's avatar Paul Mackerras

[PATCH] PPC64 Segment table code cleanup - replace flush_stab() with switch_stab()

preload_stab() is only ever called (once) from flush_stab(), and
flush_stab() is only ever called from switch_mm().  So, combine both
functions into the more accurately named switch_stab(), called from
switch_mm().
Signed-off-by: default avatarDavid Gibson <dwg@au1.ibm.com>
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
parent 8fe68cca
......@@ -152,42 +152,18 @@ int ste_allocate(unsigned long ea)
}
/*
* preload some userspace segments into the segment table.
* Do the segment table work for a context switch: flush all user
* entries from the table, then preload some probably useful entries
* for the new task
*/
static void preload_stab(struct task_struct *tsk, struct mm_struct *mm)
{
unsigned long pc = KSTK_EIP(tsk);
unsigned long stack = KSTK_ESP(tsk);
unsigned long unmapped_base;
if (test_tsk_thread_flag(tsk, TIF_32BIT))
unmapped_base = TASK_UNMAPPED_BASE_USER32;
else
unmapped_base = TASK_UNMAPPED_BASE_USER64;
__ste_allocate(pc, mm);
if (GET_ESID(pc) == GET_ESID(stack))
return;
__ste_allocate(stack, mm);
if ((GET_ESID(pc) == GET_ESID(unmapped_base))
|| (GET_ESID(stack) == GET_ESID(unmapped_base)))
return;
__ste_allocate(unmapped_base, mm);
/* Order update */
asm volatile("sync" : : : "memory");
}
/* Flush all user entries from the segment table of the current processor. */
void flush_stab(struct task_struct *tsk, struct mm_struct *mm)
void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
{
struct stab_entry *stab = (struct stab_entry *) get_paca()->stab_addr;
struct stab_entry *ste;
unsigned long offset = __get_cpu_var(stab_cache_ptr);
unsigned long pc = KSTK_EIP(tsk);
unsigned long stack = KSTK_ESP(tsk);
unsigned long unmapped_base;
/* Force previous translations to complete. DRENG */
asm volatile("isync" : : : "memory");
......@@ -222,7 +198,27 @@ void flush_stab(struct task_struct *tsk, struct mm_struct *mm)
__get_cpu_var(stab_cache_ptr) = 0;
preload_stab(tsk, mm);
/* Now preload some entries for the new task */
if (test_tsk_thread_flag(tsk, TIF_32BIT))
unmapped_base = TASK_UNMAPPED_BASE_USER32;
else
unmapped_base = TASK_UNMAPPED_BASE_USER64;
__ste_allocate(pc, mm);
if (GET_ESID(pc) == GET_ESID(stack))
return;
__ste_allocate(stack, mm);
if ((GET_ESID(pc) == GET_ESID(unmapped_base))
|| (GET_ESID(stack) == GET_ESID(unmapped_base)))
return;
__ste_allocate(unmapped_base, mm);
/* Order update */
asm volatile("sync" : : : "memory");
}
extern void slb_initialize(void);
......
......@@ -135,7 +135,7 @@ destroy_context(struct mm_struct *mm)
spin_unlock_irqrestore(&mmu_context_queue.lock, flags);
}
extern void flush_stab(struct task_struct *tsk, struct mm_struct *mm);
extern void switch_stab(struct task_struct *tsk, struct mm_struct *mm);
extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
/*
......@@ -163,7 +163,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
if (cur_cpu_spec->cpu_features & CPU_FTR_SLB)
switch_slb(tsk, next);
else
flush_stab(tsk, next);
switch_stab(tsk, next);
}
#define deactivate_mm(tsk,mm) do { } while (0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment