Commit e423c0c3 authored by Russell King's avatar Russell King

Merge branch 'intr-ctxsw' of...

Merge branch 'intr-ctxsw' of git://git.kernel.org/pub/scm/linux/kernel/git/cmarinas/linux into devel-stable
parents 69964ea4 b9d4d42a
...@@ -34,11 +34,4 @@ typedef struct { ...@@ -34,11 +34,4 @@ typedef struct {
#endif #endif
/*
* switch_mm() may do a full cache flush over the context switch,
* so enable interrupts over the context switch to avoid high
* latency.
*/
#define __ARCH_WANT_INTERRUPTS_ON_CTXSW
#endif #endif
...@@ -43,45 +43,104 @@ void __check_kvm_seq(struct mm_struct *mm); ...@@ -43,45 +43,104 @@ void __check_kvm_seq(struct mm_struct *mm);
#define ASID_FIRST_VERSION (1 << ASID_BITS) #define ASID_FIRST_VERSION (1 << ASID_BITS)
extern unsigned int cpu_last_asid; extern unsigned int cpu_last_asid;
#ifdef CONFIG_SMP
DECLARE_PER_CPU(struct mm_struct *, current_mm);
#endif
void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); void __init_new_context(struct task_struct *tsk, struct mm_struct *mm);
void __new_context(struct mm_struct *mm); void __new_context(struct mm_struct *mm);
void cpu_set_reserved_ttbr0(void);
static inline void check_context(struct mm_struct *mm) static inline void switch_new_context(struct mm_struct *mm)
{ {
/* unsigned long flags;
* This code is executed with interrupts enabled. Therefore,
* mm->context.id cannot be updated to the latest ASID version
* on a different CPU (and condition below not triggered)
* without first getting an IPI to reset the context. The
* alternative is to take a read_lock on mm->context.id_lock
* (after changing its type to rwlock_t).
*/
if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS))
__new_context(mm); __new_context(mm);
local_irq_save(flags);
cpu_switch_mm(mm->pgd, mm);
local_irq_restore(flags);
}
static inline void check_and_switch_context(struct mm_struct *mm,
struct task_struct *tsk)
{
if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
__check_kvm_seq(mm); __check_kvm_seq(mm);
/*
* Required during context switch to avoid speculative page table
* walking with the wrong TTBR.
*/
cpu_set_reserved_ttbr0();
if (!((mm->context.id ^ cpu_last_asid) >> ASID_BITS))
/*
* The ASID is from the current generation, just switch to the
* new pgd. This condition is only true for calls from
* context_switch() and interrupts are already disabled.
*/
cpu_switch_mm(mm->pgd, mm);
else if (irqs_disabled())
/*
* Defer the new ASID allocation until after the context
* switch critical region since __new_context() cannot be
* called with interrupts disabled (it sends IPIs).
*/
set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
else
/*
* That is a direct call to switch_mm() or activate_mm() with
* interrupts enabled and a new context.
*/
switch_new_context(mm);
} }
#define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0) #define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0)
#else #define finish_arch_post_lock_switch \
finish_arch_post_lock_switch
static inline void check_context(struct mm_struct *mm) static inline void finish_arch_post_lock_switch(void)
{ {
if (test_and_clear_thread_flag(TIF_SWITCH_MM))
switch_new_context(current->mm);
}
#else /* !CONFIG_CPU_HAS_ASID */
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
static inline void check_and_switch_context(struct mm_struct *mm,
struct task_struct *tsk)
{
if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
__check_kvm_seq(mm); __check_kvm_seq(mm);
#endif
if (irqs_disabled())
/*
* cpu_switch_mm() needs to flush the VIVT caches. To avoid
* high interrupt latencies, defer the call and continue
* running with the old mm. Since we only support UP systems
* on non-ASID CPUs, the old mm will remain valid until the
* finish_arch_post_lock_switch() call.
*/
set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
else
cpu_switch_mm(mm->pgd, mm);
}
#define finish_arch_post_lock_switch \
finish_arch_post_lock_switch
static inline void finish_arch_post_lock_switch(void)
{
if (test_and_clear_thread_flag(TIF_SWITCH_MM)) {
struct mm_struct *mm = current->mm;
cpu_switch_mm(mm->pgd, mm);
}
} }
#endif /* CONFIG_MMU */
#define init_new_context(tsk,mm) 0 #define init_new_context(tsk,mm) 0
#endif #endif /* CONFIG_CPU_HAS_ASID */
#define destroy_context(mm) do { } while(0) #define destroy_context(mm) do { } while(0)
...@@ -119,12 +178,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -119,12 +178,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
__flush_icache_all(); __flush_icache_all();
#endif #endif
if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) { if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
#ifdef CONFIG_SMP check_and_switch_context(next, tsk);
struct mm_struct **crt_mm = &per_cpu(current_mm, cpu);
*crt_mm = next;
#endif
check_context(next);
cpu_switch_mm(next->pgd, next);
if (cache_is_vivt()) if (cache_is_vivt())
cpumask_clear_cpu(cpu, mm_cpumask(prev)); cpumask_clear_cpu(cpu, mm_cpumask(prev));
} }
......
...@@ -146,6 +146,7 @@ extern void vfp_flush_hwstate(struct thread_info *); ...@@ -146,6 +146,7 @@ extern void vfp_flush_hwstate(struct thread_info *);
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 20 #define TIF_RESTORE_SIGMASK 20
#define TIF_SECCOMP 21 #define TIF_SECCOMP 21
#define TIF_SWITCH_MM 22 /* deferred switch_mm */
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
......
...@@ -18,30 +18,39 @@ ...@@ -18,30 +18,39 @@
static DEFINE_RAW_SPINLOCK(cpu_asid_lock); static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
unsigned int cpu_last_asid = ASID_FIRST_VERSION; unsigned int cpu_last_asid = ASID_FIRST_VERSION;
#ifdef CONFIG_SMP
DEFINE_PER_CPU(struct mm_struct *, current_mm);
#endif
#ifdef CONFIG_ARM_LPAE #ifdef CONFIG_ARM_LPAE
#define cpu_set_asid(asid) { \ void cpu_set_reserved_ttbr0(void)
unsigned long ttbl, ttbh; \ {
asm volatile( \ unsigned long ttbl = __pa(swapper_pg_dir);
" mrrc p15, 0, %0, %1, c2 @ read TTBR0\n" \ unsigned long ttbh = 0;
" mov %1, %2, lsl #(48 - 32) @ set ASID\n" \
" mcrr p15, 0, %0, %1, c2 @ set TTBR0\n" \ /*
: "=&r" (ttbl), "=&r" (ttbh) \ * Set TTBR0 to swapper_pg_dir which contains only global entries. The
: "r" (asid & ~ASID_MASK)); \ * ASID is set to 0.
*/
asm volatile(
" mcrr p15, 0, %0, %1, c2 @ set TTBR0\n"
:
: "r" (ttbl), "r" (ttbh));
isb();
} }
#else #else
#define cpu_set_asid(asid) \ void cpu_set_reserved_ttbr0(void)
asm(" mcr p15, 0, %0, c13, c0, 1\n" : : "r" (asid)) {
u32 ttb;
/* Copy TTBR1 into TTBR0 */
asm volatile(
" mrc p15, 0, %0, c2, c0, 1 @ read TTBR1\n"
" mcr p15, 0, %0, c2, c0, 0 @ set TTBR0\n"
: "=r" (ttb));
isb();
}
#endif #endif
/* /*
* We fork()ed a process, and we need a new context for the child * We fork()ed a process, and we need a new context for the child
* to run in. We reserve version 0 for initial tasks so we will * to run in.
* always allocate an ASID. The ASID 0 is reserved for the TTBR
* register changing sequence.
*/ */
void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{ {
...@@ -51,9 +60,7 @@ void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) ...@@ -51,9 +60,7 @@ void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
static void flush_context(void) static void flush_context(void)
{ {
/* set the reserved ASID before flushing the TLB */ cpu_set_reserved_ttbr0();
cpu_set_asid(0);
isb();
local_flush_tlb_all(); local_flush_tlb_all();
if (icache_is_vivt_asid_tagged()) { if (icache_is_vivt_asid_tagged()) {
__flush_icache_all(); __flush_icache_all();
...@@ -98,14 +105,7 @@ static void reset_context(void *info) ...@@ -98,14 +105,7 @@ static void reset_context(void *info)
{ {
unsigned int asid; unsigned int asid;
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
struct mm_struct *mm = per_cpu(current_mm, cpu); struct mm_struct *mm = current->active_mm;
/*
* Check if a current_mm was set on this CPU as it might still
* be in the early booting stages and using the reserved ASID.
*/
if (!mm)
return;
smp_rmb(); smp_rmb();
asid = cpu_last_asid + cpu + 1; asid = cpu_last_asid + cpu + 1;
...@@ -114,8 +114,7 @@ static void reset_context(void *info) ...@@ -114,8 +114,7 @@ static void reset_context(void *info)
set_mm_context(mm, asid); set_mm_context(mm, asid);
/* set the new ASID */ /* set the new ASID */
cpu_set_asid(mm->context.id); cpu_switch_mm(mm->pgd, mm);
isb();
} }
#else #else
......
...@@ -46,18 +46,13 @@ ENTRY(cpu_v7_switch_mm) ...@@ -46,18 +46,13 @@ ENTRY(cpu_v7_switch_mm)
#ifdef CONFIG_ARM_ERRATA_430973 #ifdef CONFIG_ARM_ERRATA_430973
mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
#endif #endif
#ifdef CONFIG_ARM_ERRATA_754322
dsb
#endif
mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID
isb
1: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
isb
#ifdef CONFIG_ARM_ERRATA_754322 #ifdef CONFIG_ARM_ERRATA_754322
dsb dsb
#endif #endif
mcr p15, 0, r1, c13, c0, 1 @ set context ID mcr p15, 0, r1, c13, c0, 1 @ set context ID
isb isb
mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
isb
#endif #endif
mov pc, lr mov pc, lr
ENDPROC(cpu_v7_switch_mm) ENDPROC(cpu_v7_switch_mm)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment