Commit 53e857f3 authored by Martin Schwidefsky's avatar Martin Schwidefsky

s390/mm,tlb: race of lazy TLB flush vs. recreation of TLB entries

Git commit 050eef36 "[S390] fix tlb flushing vs. concurrent
/proc accesses" introduced the attach counter to avoid using the
mm_users value to decide between IPTE for every PTE and lazy TLB
flushing with IDTE. That fixed the problem with mm_users but it
introduced another subtle race, fortunately one that is very hard
to hit.
The background is the requirement of the architecture that a valid
PTE may not be changed while it can be used concurrently by another
cpu. The decision between IPTE and lazy TLB flushing needs to be
done while the PTE is still valid. Now if the virtual cpu is
temporarily stopped after the decision to use lazy TLB flushing but
before the invalid bit of the PTE has been set, another cpu can attach
the mm, find that flush_mm is set, do the IDTE, return to userspace,
and recreate a TLB that uses the PTE in question. When the first,
stopped cpu continues it will change the PTE while it is attached on
another cpu. The first cpu will do another IDTE shortly after the
modification of the PTE which makes the race window quite short.

To fix this race the CPU that wants to attach the address space of a
user space thread needs to wait for the end of the PTE modification.
The number of concurrent TLB flushers for an mm is tracked in the
upper 16 bits of the attach_count and finish_arch_post_lock_switch
is used to wait for the end of the flush operation if required.
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent a53efe5f
...@@ -48,13 +48,42 @@ static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk) ...@@ -48,13 +48,42 @@ static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk) struct task_struct *tsk)
{ {
cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); int cpu = smp_processor_id();
update_mm(next, tsk);
if (prev == next)
return;
if (atomic_inc_return(&next->context.attach_count) >> 16) {
/* Delay update_mm until all TLB flushes are done. */
set_tsk_thread_flag(tsk, TIF_TLB_WAIT);
} else {
cpumask_set_cpu(cpu, mm_cpumask(next));
update_mm(next, tsk);
if (next->context.flush_mm)
/* Flush pending TLBs */
__tlb_flush_mm(next);
}
atomic_dec(&prev->context.attach_count); atomic_dec(&prev->context.attach_count);
WARN_ON(atomic_read(&prev->context.attach_count) < 0); WARN_ON(atomic_read(&prev->context.attach_count) < 0);
atomic_inc(&next->context.attach_count); }
/* Check for TLBs not flushed yet */
__tlb_flush_mm_lazy(next); #define finish_arch_post_lock_switch finish_arch_post_lock_switch
static inline void finish_arch_post_lock_switch(void)
{
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
if (!test_tsk_thread_flag(tsk, TIF_TLB_WAIT))
return;
preempt_disable();
clear_tsk_thread_flag(tsk, TIF_TLB_WAIT);
while (atomic_read(&mm->context.attach_count) >> 16)
cpu_relax();
cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
update_mm(mm, tsk);
if (mm->context.flush_mm)
__tlb_flush_mm(mm);
preempt_enable();
} }
#define enter_lazy_tlb(mm,tsk) do { } while (0) #define enter_lazy_tlb(mm,tsk) do { } while (0)
......
...@@ -1034,30 +1034,41 @@ static inline int ptep_test_and_clear_user_young(struct mm_struct *mm, ...@@ -1034,30 +1034,41 @@ static inline int ptep_test_and_clear_user_young(struct mm_struct *mm,
static inline void __ptep_ipte(unsigned long address, pte_t *ptep) static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
{ {
if (!(pte_val(*ptep) & _PAGE_INVALID)) { unsigned long pto = (unsigned long) ptep;
#ifndef CONFIG_64BIT #ifndef CONFIG_64BIT
/* pto must point to the start of the segment table */ /* pto in ESA mode must point to the start of the segment table */
pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00); pto &= 0x7ffffc00;
#else
/* ipte in zarch mode can do the math */
pte_t *pto = ptep;
#endif #endif
asm volatile( /* Invalidation + global TLB flush for the pte */
" ipte %2,%3" asm volatile(
: "=m" (*ptep) : "m" (*ptep), " ipte %2,%3"
"a" (pto), "a" (address)); : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
} }
static inline void ptep_flush_direct(struct mm_struct *mm,
unsigned long address, pte_t *ptep)
{
if (pte_val(*ptep) & _PAGE_INVALID)
return;
__ptep_ipte(address, ptep);
} }
static inline void ptep_flush_lazy(struct mm_struct *mm, static inline void ptep_flush_lazy(struct mm_struct *mm,
unsigned long address, pte_t *ptep) unsigned long address, pte_t *ptep)
{ {
int active = (mm == current->active_mm) ? 1 : 0; int active, count;
if (atomic_read(&mm->context.attach_count) > active) if (pte_val(*ptep) & _PAGE_INVALID)
__ptep_ipte(address, ptep); return;
else active = (mm == current->active_mm) ? 1 : 0;
count = atomic_add_return(0x10000, &mm->context.attach_count);
if ((count & 0xffff) <= active) {
pte_val(*ptep) |= _PAGE_INVALID;
mm->context.flush_mm = 1; mm->context.flush_mm = 1;
} else
__ptep_ipte(address, ptep);
atomic_sub(0x10000, &mm->context.attach_count);
} }
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
...@@ -1074,7 +1085,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, ...@@ -1074,7 +1085,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
} }
pte = *ptep; pte = *ptep;
__ptep_ipte(addr, ptep); ptep_flush_direct(vma->vm_mm, addr, ptep);
young = pte_young(pte); young = pte_young(pte);
pte = pte_mkold(pte); pte = pte_mkold(pte);
...@@ -1145,7 +1156,6 @@ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, ...@@ -1145,7 +1156,6 @@ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
pte = *ptep; pte = *ptep;
ptep_flush_lazy(mm, address, ptep); ptep_flush_lazy(mm, address, ptep);
pte_val(*ptep) |= _PAGE_INVALID;
if (mm_has_pgste(mm)) { if (mm_has_pgste(mm)) {
pgste = pgste_update_all(&pte, pgste); pgste = pgste_update_all(&pte, pgste);
...@@ -1182,7 +1192,7 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, ...@@ -1182,7 +1192,7 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
} }
pte = *ptep; pte = *ptep;
__ptep_ipte(address, ptep); ptep_flush_direct(vma->vm_mm, address, ptep);
pte_val(*ptep) = _PAGE_INVALID; pte_val(*ptep) = _PAGE_INVALID;
if (mm_has_pgste(vma->vm_mm)) { if (mm_has_pgste(vma->vm_mm)) {
...@@ -1263,7 +1273,7 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma, ...@@ -1263,7 +1273,7 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma,
pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste); pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste);
} }
__ptep_ipte(address, ptep); ptep_flush_direct(vma->vm_mm, address, ptep);
if (mm_has_pgste(vma->vm_mm)) { if (mm_has_pgste(vma->vm_mm)) {
pgste_set_pte(ptep, entry); pgste_set_pte(ptep, entry);
...@@ -1447,12 +1457,16 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd) ...@@ -1447,12 +1457,16 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd)
static inline void pmdp_flush_lazy(struct mm_struct *mm, static inline void pmdp_flush_lazy(struct mm_struct *mm,
unsigned long address, pmd_t *pmdp) unsigned long address, pmd_t *pmdp)
{ {
int active = (mm == current->active_mm) ? 1 : 0; int active, count;
if ((atomic_read(&mm->context.attach_count) & 0xffff) > active) active = (mm == current->active_mm) ? 1 : 0;
__pmd_idte(address, pmdp); count = atomic_add_return(0x10000, &mm->context.attach_count);
else if ((count & 0xffff) <= active) {
pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
mm->context.flush_mm = 1; mm->context.flush_mm = 1;
} else
__pmd_idte(address, pmdp);
atomic_sub(0x10000, &mm->context.attach_count);
} }
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
......
...@@ -81,6 +81,7 @@ static inline struct thread_info *current_thread_info(void) ...@@ -81,6 +81,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ #define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
#define TIF_SIGPENDING 2 /* signal pending */ #define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_TLB_WAIT 4 /* wait for TLB flush completion */
#define TIF_PER_TRAP 6 /* deliver sigtrap on return to user */ #define TIF_PER_TRAP 6 /* deliver sigtrap on return to user */
#define TIF_MCCK_PENDING 7 /* machine check handling is pending */ #define TIF_MCCK_PENDING 7 /* machine check handling is pending */
#define TIF_SYSCALL_TRACE 8 /* syscall trace active */ #define TIF_SYSCALL_TRACE 8 /* syscall trace active */
...@@ -96,6 +97,7 @@ static inline struct thread_info *current_thread_info(void) ...@@ -96,6 +97,7 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) #define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_TLB_WAIT (1<<TIF_TLB_WAIT)
#define _TIF_PER_TRAP (1<<TIF_PER_TRAP) #define _TIF_PER_TRAP (1<<TIF_PER_TRAP)
#define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING) #define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING)
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
......
...@@ -43,6 +43,7 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ ...@@ -43,6 +43,7 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_MCCK_PENDING) _TIF_MCCK_PENDING)
_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
_TIF_SYSCALL_TRACEPOINT) _TIF_SYSCALL_TRACEPOINT)
_TIF_TRANSFER = (_TIF_MCCK_PENDING | _TIF_TLB_WAIT)
STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
STACK_SIZE = 1 << STACK_SHIFT STACK_SIZE = 1 << STACK_SHIFT
...@@ -159,10 +160,12 @@ ENTRY(__switch_to) ...@@ -159,10 +160,12 @@ ENTRY(__switch_to)
lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
l %r15,__THREAD_ksp(%r3) # load kernel stack of next l %r15,__THREAD_ksp(%r3) # load kernel stack of next
tm __TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending? lhi %r6,_TIF_TRANSFER # transfer TIF bits
n %r6,__TI_flags(%r4) # isolate TIF bits
jz 0f jz 0f
ni __TI_flags+3(%r4),255-_TIF_MCCK_PENDING # clear flag in prev o %r6,__TI_flags(%r5) # set TIF bits of next
oi __TI_flags+3(%r5),_TIF_MCCK_PENDING # set it in next st %r6,__TI_flags(%r5)
ni __TI_flags+3(%r4),255-_TIF_TRANSFER # clear TIF bits of prev
0: lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 0: lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
br %r14 br %r14
......
...@@ -48,6 +48,7 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ ...@@ -48,6 +48,7 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_MCCK_PENDING) _TIF_MCCK_PENDING)
_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
_TIF_SYSCALL_TRACEPOINT) _TIF_SYSCALL_TRACEPOINT)
_TIF_TRANSFER = (_TIF_MCCK_PENDING | _TIF_TLB_WAIT)
#define BASED(name) name-system_call(%r13) #define BASED(name) name-system_call(%r13)
...@@ -189,10 +190,12 @@ ENTRY(__switch_to) ...@@ -189,10 +190,12 @@ ENTRY(__switch_to)
lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next
lg %r15,__THREAD_ksp(%r3) # load kernel stack of next lg %r15,__THREAD_ksp(%r3) # load kernel stack of next
tm __TI_flags+7(%r4),_TIF_MCCK_PENDING # machine check pending? llill %r6,_TIF_TRANSFER # transfer TIF bits
ng %r6,__TI_flags(%r4) # isolate TIF bits
jz 0f jz 0f
ni __TI_flags+7(%r4),255-_TIF_MCCK_PENDING # clear flag in prev og %r6,__TI_flags(%r5) # set TIF bits of next
oi __TI_flags+7(%r5),_TIF_MCCK_PENDING # set it in next stg %r6,__TI_flags(%r5)
ni __TI_flags+7(%r4),255-_TIF_TRANSFER # clear TIF bits of prev
0: lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 0: lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
br %r14 br %r14
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment