Commit 9f400745 authored by Will Deacon's avatar Will Deacon Committed by Greg Kroah-Hartman

ARM: 7659/1: mm: make mm->context.id an atomic64_t variable

commit 8a4e3a9e upstream.

mm->context.id is updated under asid_lock when a new ASID is allocated
to an mm_struct. However, it is also read without the lock when a task
is being scheduled and checking whether or not the current ASID
generation is up-to-date.

If two threads of the same process are being scheduled in parallel and
the bottom bits of the generation in their mm->context.id match the
current generation (that is, the mm_struct has not been used for ~2^24
rollovers) then the non-atomic, lockless access to mm->context.id may
yield the incorrect ASID.

This patch fixes this issue by making mm->context.id and atomic64_t,
ensuring that the generation is always read consistently. For code that
only requires access to the ASID bits (e.g. TLB flushing by mm), then
the value is accessed directly, which GCC converts to an ldrb.
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 810f6379
...@@ -5,15 +5,15 @@ ...@@ -5,15 +5,15 @@
typedef struct { typedef struct {
#ifdef CONFIG_CPU_HAS_ASID #ifdef CONFIG_CPU_HAS_ASID
u64 id; atomic64_t id;
#endif #endif
unsigned int vmalloc_seq; unsigned int vmalloc_seq;
} mm_context_t; } mm_context_t;
#ifdef CONFIG_CPU_HAS_ASID #ifdef CONFIG_CPU_HAS_ASID
#define ASID_BITS 8 #define ASID_BITS 8
#define ASID_MASK ((~0ULL) << ASID_BITS) #define ASID_MASK ((~0ULL) << ASID_BITS)
#define ASID(mm) ((mm)->context.id & ~ASID_MASK) #define ASID(mm) ((mm)->context.id.counter & ~ASID_MASK)
#else #else
#define ASID(mm) (0) #define ASID(mm) (0)
#endif #endif
...@@ -26,7 +26,7 @@ typedef struct { ...@@ -26,7 +26,7 @@ typedef struct {
* modified for 2.6 by Hyok S. Choi <hyok.choi@samsung.com> * modified for 2.6 by Hyok S. Choi <hyok.choi@samsung.com>
*/ */
typedef struct { typedef struct {
unsigned long end_brk; unsigned long end_brk;
} mm_context_t; } mm_context_t;
#endif #endif
......
...@@ -25,7 +25,7 @@ void __check_vmalloc_seq(struct mm_struct *mm); ...@@ -25,7 +25,7 @@ void __check_vmalloc_seq(struct mm_struct *mm);
#ifdef CONFIG_CPU_HAS_ASID #ifdef CONFIG_CPU_HAS_ASID
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
#define init_new_context(tsk,mm) ({ mm->context.id = 0; }) #define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; })
#else /* !CONFIG_CPU_HAS_ASID */ #else /* !CONFIG_CPU_HAS_ASID */
......
...@@ -107,7 +107,7 @@ int main(void) ...@@ -107,7 +107,7 @@ int main(void)
BLANK(); BLANK();
#endif #endif
#ifdef CONFIG_CPU_HAS_ASID #ifdef CONFIG_CPU_HAS_ASID
DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id)); DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id.counter));
BLANK(); BLANK();
#endif #endif
DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm));
......
...@@ -149,9 +149,9 @@ static int is_reserved_asid(u64 asid) ...@@ -149,9 +149,9 @@ static int is_reserved_asid(u64 asid)
return 0; return 0;
} }
static void new_context(struct mm_struct *mm, unsigned int cpu) static u64 new_context(struct mm_struct *mm, unsigned int cpu)
{ {
u64 asid = mm->context.id; u64 asid = atomic64_read(&mm->context.id);
u64 generation = atomic64_read(&asid_generation); u64 generation = atomic64_read(&asid_generation);
if (asid != 0 && is_reserved_asid(asid)) { if (asid != 0 && is_reserved_asid(asid)) {
...@@ -178,13 +178,14 @@ static void new_context(struct mm_struct *mm, unsigned int cpu) ...@@ -178,13 +178,14 @@ static void new_context(struct mm_struct *mm, unsigned int cpu)
cpumask_clear(mm_cpumask(mm)); cpumask_clear(mm_cpumask(mm));
} }
mm->context.id = asid; return asid;
} }
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
{ {
unsigned long flags; unsigned long flags;
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
u64 asid;
if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
__check_vmalloc_seq(mm); __check_vmalloc_seq(mm);
...@@ -195,19 +196,23 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) ...@@ -195,19 +196,23 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
*/ */
cpu_set_reserved_ttbr0(); cpu_set_reserved_ttbr0();
if (!((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS) asid = atomic64_read(&mm->context.id);
&& atomic64_xchg(&per_cpu(active_asids, cpu), mm->context.id)) if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
&& atomic64_xchg(&per_cpu(active_asids, cpu), asid))
goto switch_mm_fastpath; goto switch_mm_fastpath;
raw_spin_lock_irqsave(&cpu_asid_lock, flags); raw_spin_lock_irqsave(&cpu_asid_lock, flags);
/* Check that our ASID belongs to the current generation. */ /* Check that our ASID belongs to the current generation. */
if ((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS) asid = atomic64_read(&mm->context.id);
new_context(mm, cpu); if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
asid = new_context(mm, cpu);
atomic64_set(&mm->context.id, asid);
}
if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
local_flush_tlb_all(); local_flush_tlb_all();
atomic64_set(&per_cpu(active_asids, cpu), mm->context.id); atomic64_set(&per_cpu(active_asids, cpu), asid);
cpumask_set_cpu(cpu, mm_cpumask(mm)); cpumask_set_cpu(cpu, mm_cpumask(mm));
raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment