Commit 6b88a32c authored by Catalin Marinas's avatar Catalin Marinas

arm64: kpti: Fix the interaction between ASID switching and software PAN

With ARM64_SW_TTBR0_PAN enabled, the exception entry code checks the
active ASID to decide whether user access was enabled (non-zero ASID)
when the exception was taken. On return from exception, if user access
was previously disabled, it re-instates TTBR0_EL1 from the per-thread
saved value (updated in switch_mm() or efi_set_pgd()).

Commit 7655abb9 ("arm64: mm: Move ASID from TTBR0 to TTBR1") makes a
TTBR0_EL1 + ASID switching non-atomic. Subsequently, commit 27a921e7
("arm64: mm: Fix and re-enable ARM64_SW_TTBR0_PAN") changes the
__uaccess_ttbr0_disable() function and asm macro to first write the
reserved TTBR0_EL1 followed by the ASID=0 update in TTBR1_EL1. If an
exception occurs between these two, the exception return code will
re-instate a valid TTBR0_EL1. Similar scenario can happen in
cpu_switch_mm() between setting the reserved TTBR0_EL1 and the ASID
update in cpu_do_switch_mm().

This patch reverts the entry.S check for ASID == 0 to TTBR0_EL1 and
disables the interrupts around the TTBR0_EL1 and ASID switching code in
__uaccess_ttbr0_disable(). It also ensures that, when returning from the
EFI runtime services, efi_set_pgd() doesn't leave a non-zero ASID in
TTBR1_EL1 by using uaccess_ttbr0_{enable,disable}.

The accesses to current_thread_info()->ttbr0 are updated to use
READ_ONCE/WRITE_ONCE.

As a safety measure, __uaccess_ttbr0_enable() always masks out any
existing non-zero ASID TTBR1_EL1 before writing in the new ASID.

Fixes: 27a921e7 ("arm64: mm: Fix and re-enable ARM64_SW_TTBR0_PAN")
Acked-by: default avatarWill Deacon <will.deacon@arm.com>
Reported-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: default avatarJames Morse <james.morse@arm.com>
Tested-by: default avatarJames Morse <james.morse@arm.com>
Co-developed-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 558daf69
...@@ -14,11 +14,11 @@ ...@@ -14,11 +14,11 @@
#ifdef CONFIG_ARM64_SW_TTBR0_PAN #ifdef CONFIG_ARM64_SW_TTBR0_PAN
.macro __uaccess_ttbr0_disable, tmp1 .macro __uaccess_ttbr0_disable, tmp1
mrs \tmp1, ttbr1_el1 // swapper_pg_dir mrs \tmp1, ttbr1_el1 // swapper_pg_dir
bic \tmp1, \tmp1, #TTBR_ASID_MASK
sub \tmp1, \tmp1, #RESERVED_TTBR0_SIZE // reserved_ttbr0 just before swapper_pg_dir sub \tmp1, \tmp1, #RESERVED_TTBR0_SIZE // reserved_ttbr0 just before swapper_pg_dir
msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1 msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1
isb isb
add \tmp1, \tmp1, #RESERVED_TTBR0_SIZE add \tmp1, \tmp1, #RESERVED_TTBR0_SIZE
bic \tmp1, \tmp1, #TTBR_ASID_MASK
msr ttbr1_el1, \tmp1 // set reserved ASID msr ttbr1_el1, \tmp1 // set reserved ASID
isb isb
.endm .endm
...@@ -35,9 +35,11 @@ ...@@ -35,9 +35,11 @@
isb isb
.endm .endm
.macro uaccess_ttbr0_disable, tmp1 .macro uaccess_ttbr0_disable, tmp1, tmp2
alternative_if_not ARM64_HAS_PAN alternative_if_not ARM64_HAS_PAN
save_and_disable_irq \tmp2 // avoid preemption
__uaccess_ttbr0_disable \tmp1 __uaccess_ttbr0_disable \tmp1
restore_irq \tmp2
alternative_else_nop_endif alternative_else_nop_endif
.endm .endm
...@@ -49,7 +51,7 @@ alternative_if_not ARM64_HAS_PAN ...@@ -49,7 +51,7 @@ alternative_if_not ARM64_HAS_PAN
alternative_else_nop_endif alternative_else_nop_endif
.endm .endm
#else #else
.macro uaccess_ttbr0_disable, tmp1 .macro uaccess_ttbr0_disable, tmp1, tmp2
.endm .endm
.macro uaccess_ttbr0_enable, tmp1, tmp2, tmp3 .macro uaccess_ttbr0_enable, tmp1, tmp2, tmp3
...@@ -59,8 +61,8 @@ alternative_else_nop_endif ...@@ -59,8 +61,8 @@ alternative_else_nop_endif
/* /*
* These macros are no-ops when UAO is present. * These macros are no-ops when UAO is present.
*/ */
.macro uaccess_disable_not_uao, tmp1 .macro uaccess_disable_not_uao, tmp1, tmp2
uaccess_ttbr0_disable \tmp1 uaccess_ttbr0_disable \tmp1, \tmp2
alternative_if ARM64_ALT_PAN_NOT_UAO alternative_if ARM64_ALT_PAN_NOT_UAO
SET_PSTATE_PAN(1) SET_PSTATE_PAN(1)
alternative_else_nop_endif alternative_else_nop_endif
......
...@@ -121,19 +121,21 @@ static inline void efi_set_pgd(struct mm_struct *mm) ...@@ -121,19 +121,21 @@ static inline void efi_set_pgd(struct mm_struct *mm)
if (mm != current->active_mm) { if (mm != current->active_mm) {
/* /*
* Update the current thread's saved ttbr0 since it is * Update the current thread's saved ttbr0 since it is
* restored as part of a return from exception. Set * restored as part of a return from exception. Enable
* the hardware TTBR0_EL1 using cpu_switch_mm() * access to the valid TTBR0_EL1 and invoke the errata
* directly to enable potential errata workarounds. * workaround directly since there is no return from
* exception when invoking the EFI run-time services.
*/ */
update_saved_ttbr0(current, mm); update_saved_ttbr0(current, mm);
cpu_switch_mm(mm->pgd, mm); uaccess_ttbr0_enable();
post_ttbr_update_workaround();
} else { } else {
/* /*
* Defer the switch to the current thread's TTBR0_EL1 * Defer the switch to the current thread's TTBR0_EL1
* until uaccess_enable(). Restore the current * until uaccess_enable(). Restore the current
* thread's saved ttbr0 corresponding to its active_mm * thread's saved ttbr0 corresponding to its active_mm
*/ */
cpu_set_reserved_ttbr0(); uaccess_ttbr0_disable();
update_saved_ttbr0(current, current->active_mm); update_saved_ttbr0(current, current->active_mm);
} }
} }
......
...@@ -184,7 +184,7 @@ static inline void update_saved_ttbr0(struct task_struct *tsk, ...@@ -184,7 +184,7 @@ static inline void update_saved_ttbr0(struct task_struct *tsk,
else else
ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48; ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;
task_thread_info(tsk)->ttbr0 = ttbr; WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
} }
#else #else
static inline void update_saved_ttbr0(struct task_struct *tsk, static inline void update_saved_ttbr0(struct task_struct *tsk,
...@@ -239,6 +239,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -239,6 +239,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
#define activate_mm(prev,next) switch_mm(prev, next, current) #define activate_mm(prev,next) switch_mm(prev, next, current)
void verify_cpu_asid_bits(void); void verify_cpu_asid_bits(void);
void post_ttbr_update_workaround(void);
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
......
...@@ -105,16 +105,18 @@ static inline void set_fs(mm_segment_t fs) ...@@ -105,16 +105,18 @@ static inline void set_fs(mm_segment_t fs)
#ifdef CONFIG_ARM64_SW_TTBR0_PAN #ifdef CONFIG_ARM64_SW_TTBR0_PAN
static inline void __uaccess_ttbr0_disable(void) static inline void __uaccess_ttbr0_disable(void)
{ {
unsigned long ttbr; unsigned long flags, ttbr;
local_irq_save(flags);
ttbr = read_sysreg(ttbr1_el1); ttbr = read_sysreg(ttbr1_el1);
ttbr &= ~TTBR_ASID_MASK;
/* reserved_ttbr0 placed before swapper_pg_dir */ /* reserved_ttbr0 placed before swapper_pg_dir */
write_sysreg(ttbr - RESERVED_TTBR0_SIZE, ttbr0_el1); write_sysreg(ttbr - RESERVED_TTBR0_SIZE, ttbr0_el1);
isb(); isb();
/* Set reserved ASID */ /* Set reserved ASID */
ttbr &= ~TTBR_ASID_MASK;
write_sysreg(ttbr, ttbr1_el1); write_sysreg(ttbr, ttbr1_el1);
isb(); isb();
local_irq_restore(flags);
} }
static inline void __uaccess_ttbr0_enable(void) static inline void __uaccess_ttbr0_enable(void)
...@@ -127,10 +129,11 @@ static inline void __uaccess_ttbr0_enable(void) ...@@ -127,10 +129,11 @@ static inline void __uaccess_ttbr0_enable(void)
* roll-over and an update of 'ttbr0'. * roll-over and an update of 'ttbr0'.
*/ */
local_irq_save(flags); local_irq_save(flags);
ttbr0 = current_thread_info()->ttbr0; ttbr0 = READ_ONCE(current_thread_info()->ttbr0);
/* Restore active ASID */ /* Restore active ASID */
ttbr1 = read_sysreg(ttbr1_el1); ttbr1 = read_sysreg(ttbr1_el1);
ttbr1 &= ~TTBR_ASID_MASK; /* safety measure */
ttbr1 |= ttbr0 & TTBR_ASID_MASK; ttbr1 |= ttbr0 & TTBR_ASID_MASK;
write_sysreg(ttbr1, ttbr1_el1); write_sysreg(ttbr1, ttbr1_el1);
isb(); isb();
......
...@@ -204,7 +204,7 @@ alternative_if ARM64_HAS_PAN ...@@ -204,7 +204,7 @@ alternative_if ARM64_HAS_PAN
alternative_else_nop_endif alternative_else_nop_endif
.if \el != 0 .if \el != 0
mrs x21, ttbr1_el1 mrs x21, ttbr0_el1
tst x21, #TTBR_ASID_MASK // Check for the reserved ASID tst x21, #TTBR_ASID_MASK // Check for the reserved ASID
orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR
b.eq 1f // TTBR0 access already disabled b.eq 1f // TTBR0 access already disabled
......
...@@ -50,7 +50,7 @@ uao_user_alternative 9f, strh, sttrh, wzr, x0, 2 ...@@ -50,7 +50,7 @@ uao_user_alternative 9f, strh, sttrh, wzr, x0, 2
b.mi 5f b.mi 5f
uao_user_alternative 9f, strb, sttrb, wzr, x0, 0 uao_user_alternative 9f, strb, sttrb, wzr, x0, 0
5: mov x0, #0 5: mov x0, #0
uaccess_disable_not_uao x2 uaccess_disable_not_uao x2, x3
ret ret
ENDPROC(__clear_user) ENDPROC(__clear_user)
......
...@@ -67,7 +67,7 @@ ENTRY(__arch_copy_from_user) ...@@ -67,7 +67,7 @@ ENTRY(__arch_copy_from_user)
uaccess_enable_not_uao x3, x4, x5 uaccess_enable_not_uao x3, x4, x5
add end, x0, x2 add end, x0, x2
#include "copy_template.S" #include "copy_template.S"
uaccess_disable_not_uao x3 uaccess_disable_not_uao x3, x4
mov x0, #0 // Nothing to copy mov x0, #0 // Nothing to copy
ret ret
ENDPROC(__arch_copy_from_user) ENDPROC(__arch_copy_from_user)
......
...@@ -68,7 +68,7 @@ ENTRY(raw_copy_in_user) ...@@ -68,7 +68,7 @@ ENTRY(raw_copy_in_user)
uaccess_enable_not_uao x3, x4, x5 uaccess_enable_not_uao x3, x4, x5
add end, x0, x2 add end, x0, x2
#include "copy_template.S" #include "copy_template.S"
uaccess_disable_not_uao x3 uaccess_disable_not_uao x3, x4
mov x0, #0 mov x0, #0
ret ret
ENDPROC(raw_copy_in_user) ENDPROC(raw_copy_in_user)
......
...@@ -66,7 +66,7 @@ ENTRY(__arch_copy_to_user) ...@@ -66,7 +66,7 @@ ENTRY(__arch_copy_to_user)
uaccess_enable_not_uao x3, x4, x5 uaccess_enable_not_uao x3, x4, x5
add end, x0, x2 add end, x0, x2
#include "copy_template.S" #include "copy_template.S"
uaccess_disable_not_uao x3 uaccess_disable_not_uao x3, x4
mov x0, #0 mov x0, #0
ret ret
ENDPROC(__arch_copy_to_user) ENDPROC(__arch_copy_to_user)
......
...@@ -72,7 +72,7 @@ USER(9f, ic ivau, x4 ) // invalidate I line PoU ...@@ -72,7 +72,7 @@ USER(9f, ic ivau, x4 ) // invalidate I line PoU
isb isb
mov x0, #0 mov x0, #0
1: 1:
uaccess_ttbr0_disable x1 uaccess_ttbr0_disable x1, x2
ret ret
9: 9:
mov x0, #-EFAULT mov x0, #-EFAULT
......
...@@ -153,6 +153,9 @@ ENDPROC(cpu_do_resume) ...@@ -153,6 +153,9 @@ ENDPROC(cpu_do_resume)
ENTRY(cpu_do_switch_mm) ENTRY(cpu_do_switch_mm)
mrs x2, ttbr1_el1 mrs x2, ttbr1_el1
mmid x1, x1 // get mm->context.id mmid x1, x1 // get mm->context.id
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
bfi x0, x1, #48, #16 // set the ASID field in TTBR0
#endif
bfi x2, x1, #48, #16 // set the ASID bfi x2, x1, #48, #16 // set the ASID
msr ttbr1_el1, x2 // in TTBR1 (since TCR.A1 is set) msr ttbr1_el1, x2 // in TTBR1 (since TCR.A1 is set)
isb isb
......
...@@ -107,6 +107,6 @@ ENTRY(privcmd_call) ...@@ -107,6 +107,6 @@ ENTRY(privcmd_call)
/* /*
* Disable userspace access from kernel once the hyp call completed. * Disable userspace access from kernel once the hyp call completed.
*/ */
uaccess_ttbr0_disable x6 uaccess_ttbr0_disable x6, x7
ret ret
ENDPROC(privcmd_call); ENDPROC(privcmd_call);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment