Commit bd4fb6d2 authored by Will Deacon's avatar Will Deacon

arm64: Add support for SB barrier and patch in over DSB; ISB sequences

We currently use a DSB; ISB sequence to inhibit speculation in set_fs().
Whilst this works for current CPUs, future CPUs may implement a new SB
barrier instruction which acts as an architected speculation barrier.

On CPUs that support it, patch in an SB; NOP sequence over the DSB; ISB
sequence and advertise the presence of the new instruction to userspace.
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent 0b587c84
...@@ -122,6 +122,19 @@ ...@@ -122,6 +122,19 @@
hint #20 hint #20
.endm .endm
/*
* Speculation barrier
*/
.macro sb
alternative_if_not ARM64_HAS_SB
dsb nsh
isb
alternative_else
SB_BARRIER_INSN
nop
alternative_endif
.endm
/* /*
* Sanitise a 64-bit bounded index wrt speculation, returning zero if out * Sanitise a 64-bit bounded index wrt speculation, returning zero if out
* of bounds. * of bounds.
......
...@@ -34,6 +34,10 @@ ...@@ -34,6 +34,10 @@
#define psb_csync() asm volatile("hint #17" : : : "memory") #define psb_csync() asm volatile("hint #17" : : : "memory")
#define csdb() asm volatile("hint #20" : : : "memory") #define csdb() asm volatile("hint #20" : : : "memory")
#define spec_bar() asm volatile(ALTERNATIVE("dsb nsh\nisb\n", \
SB_BARRIER_INSN"nop\n", \
ARM64_HAS_SB))
#define mb() dsb(sy) #define mb() dsb(sy)
#define rmb() dsb(ld) #define rmb() dsb(ld)
#define wmb() dsb(st) #define wmb() dsb(st)
......
...@@ -54,7 +54,8 @@ ...@@ -54,7 +54,8 @@
#define ARM64_HAS_CRC32 33 #define ARM64_HAS_CRC32 33
#define ARM64_SSBS 34 #define ARM64_SSBS 34
#define ARM64_WORKAROUND_1188873 35 #define ARM64_WORKAROUND_1188873 35
#define ARM64_HAS_SB 36
#define ARM64_NCAPS 36 #define ARM64_NCAPS 37
#endif /* __ASM_CPUCAPS_H */ #endif /* __ASM_CPUCAPS_H */
...@@ -104,6 +104,11 @@ ...@@ -104,6 +104,11 @@
#define SET_PSTATE_UAO(x) __emit_inst(0xd500401f | PSTATE_UAO | ((!!x) << PSTATE_Imm_shift)) #define SET_PSTATE_UAO(x) __emit_inst(0xd500401f | PSTATE_UAO | ((!!x) << PSTATE_Imm_shift))
#define SET_PSTATE_SSBS(x) __emit_inst(0xd500401f | PSTATE_SSBS | ((!!x) << PSTATE_Imm_shift)) #define SET_PSTATE_SSBS(x) __emit_inst(0xd500401f | PSTATE_SSBS | ((!!x) << PSTATE_Imm_shift))
#define __SYS_BARRIER_INSN(CRm, op2, Rt) \
__emit_inst(0xd5000000 | sys_insn(0, 3, 3, (CRm), (op2)) | ((Rt) & 0x1f))
#define SB_BARRIER_INSN __SYS_BARRIER_INSN(0, 7, 31)
#define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2) #define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2)
#define SYS_DC_CSW sys_insn(1, 0, 7, 10, 2) #define SYS_DC_CSW sys_insn(1, 0, 7, 10, 2)
#define SYS_DC_CISW sys_insn(1, 0, 7, 14, 2) #define SYS_DC_CISW sys_insn(1, 0, 7, 14, 2)
...@@ -528,6 +533,7 @@ ...@@ -528,6 +533,7 @@
#define ID_AA64ISAR0_AES_SHIFT 4 #define ID_AA64ISAR0_AES_SHIFT 4
/* id_aa64isar1 */ /* id_aa64isar1 */
#define ID_AA64ISAR1_SB_SHIFT 36
#define ID_AA64ISAR1_LRCPC_SHIFT 20 #define ID_AA64ISAR1_LRCPC_SHIFT 20
#define ID_AA64ISAR1_FCMA_SHIFT 16 #define ID_AA64ISAR1_FCMA_SHIFT 16
#define ID_AA64ISAR1_JSCVT_SHIFT 12 #define ID_AA64ISAR1_JSCVT_SHIFT 12
......
...@@ -45,8 +45,7 @@ static inline void set_fs(mm_segment_t fs) ...@@ -45,8 +45,7 @@ static inline void set_fs(mm_segment_t fs)
* Prevent a mispredicted conditional call to set_fs from forwarding * Prevent a mispredicted conditional call to set_fs from forwarding
* the wrong address limit to access_ok under speculation. * the wrong address limit to access_ok under speculation.
*/ */
dsb(nsh); spec_bar();
isb();
/* On user-mode return, check fs is correct */ /* On user-mode return, check fs is correct */
set_thread_flag(TIF_FSCHECK); set_thread_flag(TIF_FSCHECK);
......
...@@ -49,5 +49,6 @@ ...@@ -49,5 +49,6 @@
#define HWCAP_ILRCPC (1 << 26) #define HWCAP_ILRCPC (1 << 26)
#define HWCAP_FLAGM (1 << 27) #define HWCAP_FLAGM (1 << 27)
#define HWCAP_SSBS (1 << 28) #define HWCAP_SSBS (1 << 28)
#define HWCAP_SB (1 << 29)
#endif /* _UAPI__ASM_HWCAP_H */ #endif /* _UAPI__ASM_HWCAP_H */
...@@ -142,6 +142,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = { ...@@ -142,6 +142,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
}; };
static const struct arm64_ftr_bits ftr_id_aa64isar1[] = { static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SB_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
...@@ -1398,6 +1399,16 @@ static const struct arm64_cpu_capabilities arm64_features[] = { ...@@ -1398,6 +1399,16 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.cpu_enable = cpu_enable_cnp, .cpu_enable = cpu_enable_cnp,
}, },
#endif #endif
{
.desc = "Speculation barrier (SB)",
.capability = ARM64_HAS_SB,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64ISAR1_EL1,
.field_pos = ID_AA64ISAR1_SB_SHIFT,
.sign = FTR_UNSIGNED,
.min_field_value = 1,
},
{}, {},
}; };
...@@ -1439,6 +1450,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { ...@@ -1439,6 +1450,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FCMA), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FCMA),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ILRCPC), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ILRCPC),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_SB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SB),
HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_USCAT), HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_USCAT),
#ifdef CONFIG_ARM64_SVE #ifdef CONFIG_ARM64_SVE
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE),
......
...@@ -82,6 +82,7 @@ static const char *const hwcap_str[] = { ...@@ -82,6 +82,7 @@ static const char *const hwcap_str[] = {
"ilrcpc", "ilrcpc",
"flagm", "flagm",
"ssbs", "ssbs",
"sb",
NULL NULL
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment