Commit 9cce9c6c authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Catalin Marinas

arm64: mm: Handle LVA support as a CPU feature

Currently, we detect CPU support for 52-bit virtual addressing (LVA)
extremely early, before creating the kernel page tables or enabling the
MMU. We cannot override the feature this early, and so large virtual
addressing is always enabled on CPUs that implement support for it if
the software support for it was enabled at build time. It also means we
rely on non-trivial code in asm to deal with this feature.

Given that both the ID map and the TTBR1 mapping of the kernel image are
guaranteed to be 48-bit addressable, it is not actually necessary to
enable support this early, and instead, we can model it as a CPU
feature. That way, we can rely on code patching to get the correct
TCR.T1SZ values programmed on secondary boot and resume from suspend.

On the primary boot path, we simply enable the MMU with 48-bit virtual
addressing initially, and update TCR.T1SZ if LVA is supported from C
code, right before creating the kernel mapping. Given that TTBR1 still
points to reserved_pg_dir at this point, updating TCR.T1SZ should be
safe without the need for explicit TLB maintenance.

Since this gets rid of all accesses to the vabits_actual variable from
asm code that occurred before TCR.T1SZ had been programmed, we no longer
have a need for this variable, and we can replace it with a C expression
that produces the correct value directly, based on the value of TCR.T1SZ.
Signed-off-by: default avatarArd Biesheuvel <ardb@kernel.org>
Link: https://lore.kernel.org/r/20240214122845.2033971-70-ardb+git@google.comSigned-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent e0f92f0d
...@@ -995,6 +995,15 @@ static inline bool cpu_has_pac(void) ...@@ -995,6 +995,15 @@ static inline bool cpu_has_pac(void)
&id_aa64isar2_override); &id_aa64isar2_override);
} }
static inline bool cpu_has_lva(void)
{
u64 mmfr2;
mmfr2 = read_sysreg_s(SYS_ID_AA64MMFR2_EL1);
return cpuid_feature_extract_unsigned_field(mmfr2,
ID_AA64MMFR2_EL1_VARange_SHIFT);
}
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif #endif
...@@ -209,9 +209,20 @@ ...@@ -209,9 +209,20 @@
#include <asm/boot.h> #include <asm/boot.h>
#include <asm/bug.h> #include <asm/bug.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/sysreg.h>
static inline u64 __pure read_tcr(void)
{
u64 tcr;
// read_sysreg() uses asm volatile, so avoid it here
asm("mrs %0, tcr_el1" : "=r"(tcr));
return tcr;
}
#if VA_BITS > 48 #if VA_BITS > 48
extern u64 vabits_actual; // For reasons of #include hell, we can't use TCR_T1SZ_OFFSET/TCR_T1SZ_MASK here
#define vabits_actual (64 - ((read_tcr() >> 16) & 63))
#else #else
#define vabits_actual ((u64)VA_BITS) #define vabits_actual ((u64)VA_BITS)
#endif #endif
......
...@@ -2692,6 +2692,19 @@ static const struct arm64_cpu_capabilities arm64_features[] = { ...@@ -2692,6 +2692,19 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_SYSTEM_FEATURE, .type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_lpa2, .matches = has_lpa2,
}, },
#ifdef CONFIG_ARM64_VA_BITS_52
{
.desc = "52-bit Virtual Addressing (LVA)",
.capability = ARM64_HAS_VA52,
.type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
.sys_reg = SYS_ID_AA64MMFR2_EL1,
.sign = FTR_UNSIGNED,
.field_width = 4,
.field_pos = ID_AA64MMFR2_EL1_VARange_SHIFT,
.matches = has_cpuid_feature,
.min_field_value = ID_AA64MMFR2_EL1_VARange_52,
},
#endif
{}, {},
}; };
......
...@@ -80,7 +80,6 @@ ...@@ -80,7 +80,6 @@
* x19 primary_entry() .. start_kernel() whether we entered with the MMU on * x19 primary_entry() .. start_kernel() whether we entered with the MMU on
* x20 primary_entry() .. __primary_switch() CPU boot mode * x20 primary_entry() .. __primary_switch() CPU boot mode
* x21 primary_entry() .. start_kernel() FDT pointer passed at boot in x0 * x21 primary_entry() .. start_kernel() FDT pointer passed at boot in x0
* x25 primary_entry() .. start_kernel() supported VA size
*/ */
SYM_CODE_START(primary_entry) SYM_CODE_START(primary_entry)
bl record_mmu_state bl record_mmu_state
...@@ -125,14 +124,6 @@ SYM_CODE_START(primary_entry) ...@@ -125,14 +124,6 @@ SYM_CODE_START(primary_entry)
* On return, the CPU will be ready for the MMU to be turned on and * On return, the CPU will be ready for the MMU to be turned on and
* the TCR will have been set. * the TCR will have been set.
*/ */
#if VA_BITS > 48
mrs_s x0, SYS_ID_AA64MMFR2_EL1
tst x0, ID_AA64MMFR2_EL1_VARange_MASK
mov x0, #VA_BITS
mov x25, #VA_BITS_MIN
csel x25, x25, x0, eq
mov x0, x25
#endif
bl __cpu_setup // initialise processor bl __cpu_setup // initialise processor
b __primary_switch b __primary_switch
SYM_CODE_END(primary_entry) SYM_CODE_END(primary_entry)
...@@ -242,11 +233,6 @@ SYM_FUNC_START_LOCAL(__primary_switched) ...@@ -242,11 +233,6 @@ SYM_FUNC_START_LOCAL(__primary_switched)
mov x0, x20 mov x0, x20
bl set_cpu_boot_mode_flag bl set_cpu_boot_mode_flag
#if VA_BITS > 48
adr_l x8, vabits_actual // Set this early so KASAN early init
str x25, [x8] // ... observes the correct value
dc civac, x8 // Make visible to booting secondaries
#endif
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
bl kasan_early_init bl kasan_early_init
#endif #endif
...@@ -376,10 +362,13 @@ SYM_FUNC_START_LOCAL(secondary_startup) ...@@ -376,10 +362,13 @@ SYM_FUNC_START_LOCAL(secondary_startup)
* Common entry point for secondary CPUs. * Common entry point for secondary CPUs.
*/ */
mov x20, x0 // preserve boot mode mov x20, x0 // preserve boot mode
#ifdef CONFIG_ARM64_VA_BITS_52
alternative_if ARM64_HAS_VA52
bl __cpu_secondary_check52bitva bl __cpu_secondary_check52bitva
#if VA_BITS > 48 alternative_else_nop_endif
ldr_l x0, vabits_actual
#endif #endif
bl __cpu_setup // initialise processor bl __cpu_setup // initialise processor
adrp x1, swapper_pg_dir adrp x1, swapper_pg_dir
adrp x2, idmap_pg_dir adrp x2, idmap_pg_dir
...@@ -482,12 +471,8 @@ SYM_FUNC_START(__enable_mmu) ...@@ -482,12 +471,8 @@ SYM_FUNC_START(__enable_mmu)
ret ret
SYM_FUNC_END(__enable_mmu) SYM_FUNC_END(__enable_mmu)
#ifdef CONFIG_ARM64_VA_BITS_52
SYM_FUNC_START(__cpu_secondary_check52bitva) SYM_FUNC_START(__cpu_secondary_check52bitva)
#if VA_BITS > 48
ldr_l x0, vabits_actual
cmp x0, #52
b.ne 2f
mrs_s x0, SYS_ID_AA64MMFR2_EL1 mrs_s x0, SYS_ID_AA64MMFR2_EL1
and x0, x0, ID_AA64MMFR2_EL1_VARange_MASK and x0, x0, ID_AA64MMFR2_EL1_VARange_MASK
cbnz x0, 2f cbnz x0, 2f
...@@ -498,9 +483,9 @@ SYM_FUNC_START(__cpu_secondary_check52bitva) ...@@ -498,9 +483,9 @@ SYM_FUNC_START(__cpu_secondary_check52bitva)
wfi wfi
b 1b b 1b
#endif
2: ret 2: ret
SYM_FUNC_END(__cpu_secondary_check52bitva) SYM_FUNC_END(__cpu_secondary_check52bitva)
#endif
SYM_FUNC_START_LOCAL(__no_granule_support) SYM_FUNC_START_LOCAL(__no_granule_support)
/* Indicate that this CPU can't boot and is stuck in the kernel */ /* Indicate that this CPU can't boot and is stuck in the kernel */
......
...@@ -36,7 +36,6 @@ PROVIDE(__pi___memcpy = __pi_memcpy); ...@@ -36,7 +36,6 @@ PROVIDE(__pi___memcpy = __pi_memcpy);
PROVIDE(__pi___memmove = __pi_memmove); PROVIDE(__pi___memmove = __pi_memmove);
PROVIDE(__pi___memset = __pi_memset); PROVIDE(__pi___memset = __pi_memset);
PROVIDE(__pi_vabits_actual = vabits_actual);
PROVIDE(__pi_id_aa64isar1_override = id_aa64isar1_override); PROVIDE(__pi_id_aa64isar1_override = id_aa64isar1_override);
PROVIDE(__pi_id_aa64isar2_override = id_aa64isar2_override); PROVIDE(__pi_id_aa64isar2_override = id_aa64isar2_override);
PROVIDE(__pi_id_aa64mmfr1_override = id_aa64mmfr1_override); PROVIDE(__pi_id_aa64mmfr1_override = id_aa64mmfr1_override);
......
...@@ -165,6 +165,9 @@ asmlinkage void __init early_map_kernel(u64 boot_status, void *fdt) ...@@ -165,6 +165,9 @@ asmlinkage void __init early_map_kernel(u64 boot_status, void *fdt)
chosen = fdt_path_offset(fdt, chosen_str); chosen = fdt_path_offset(fdt, chosen_str);
init_feature_override(boot_status, fdt, chosen); init_feature_override(boot_status, fdt, chosen);
if (VA_BITS > VA_BITS_MIN && cpu_has_lva())
sysreg_clear_set(tcr_el1, TCR_T1SZ_MASK, TCR_T1SZ(VA_BITS));
/* /*
* The virtual KASLR displacement modulo 2MiB is decided by the * The virtual KASLR displacement modulo 2MiB is decided by the
* physical placement of the image, as otherwise, we might not be able * physical placement of the image, as otherwise, we might not be able
......
...@@ -102,9 +102,6 @@ SYM_CODE_START(cpu_resume) ...@@ -102,9 +102,6 @@ SYM_CODE_START(cpu_resume)
mov x0, xzr mov x0, xzr
bl init_kernel_el bl init_kernel_el
mov x19, x0 // preserve boot mode mov x19, x0 // preserve boot mode
#if VA_BITS > 48
ldr_l x0, vabits_actual
#endif
bl __cpu_setup bl __cpu_setup
/* enable the MMU early - so we can access sleep_save_stash by va */ /* enable the MMU early - so we can access sleep_save_stash by va */
adrp x1, swapper_pg_dir adrp x1, swapper_pg_dir
......
...@@ -45,11 +45,6 @@ ...@@ -45,11 +45,6 @@
#define NO_CONT_MAPPINGS BIT(1) #define NO_CONT_MAPPINGS BIT(1)
#define NO_EXEC_MAPPINGS BIT(2) /* assumes FEAT_HPDS is not used */ #define NO_EXEC_MAPPINGS BIT(2) /* assumes FEAT_HPDS is not used */
#if VA_BITS > 48
u64 vabits_actual __ro_after_init = VA_BITS_MIN;
EXPORT_SYMBOL(vabits_actual);
#endif
u64 kimage_voffset __ro_after_init; u64 kimage_voffset __ro_after_init;
EXPORT_SYMBOL(kimage_voffset); EXPORT_SYMBOL(kimage_voffset);
......
...@@ -397,8 +397,6 @@ SYM_FUNC_END(idmap_kpti_install_ng_mappings) ...@@ -397,8 +397,6 @@ SYM_FUNC_END(idmap_kpti_install_ng_mappings)
* *
* Initialise the processor for turning the MMU on. * Initialise the processor for turning the MMU on.
* *
* Input:
* x0 - actual number of VA bits (ignored unless VA_BITS > 48)
* Output: * Output:
* Return in x0 the value of the SCTLR_EL1 register. * Return in x0 the value of the SCTLR_EL1 register.
*/ */
...@@ -422,16 +420,17 @@ SYM_FUNC_START(__cpu_setup) ...@@ -422,16 +420,17 @@ SYM_FUNC_START(__cpu_setup)
mair .req x17 mair .req x17
tcr .req x16 tcr .req x16
mov_q mair, MAIR_EL1_SET mov_q mair, MAIR_EL1_SET
mov_q tcr, TCR_T0SZ(IDMAP_VA_BITS) | TCR_T1SZ(VA_BITS) | TCR_CACHE_FLAGS | \ mov_q tcr, TCR_T0SZ(IDMAP_VA_BITS) | TCR_T1SZ(VA_BITS_MIN) | TCR_CACHE_FLAGS | \
TCR_SMP_FLAGS | TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \ TCR_SMP_FLAGS | TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS | TCR_MTE_FLAGS TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS | TCR_MTE_FLAGS
tcr_clear_errata_bits tcr, x9, x5 tcr_clear_errata_bits tcr, x9, x5
#ifdef CONFIG_ARM64_VA_BITS_52 #ifdef CONFIG_ARM64_VA_BITS_52
sub x9, xzr, x0 mov x9, #64 - VA_BITS
add x9, x9, #64 alternative_if ARM64_HAS_VA52
tcr_set_t1sz tcr, x9 tcr_set_t1sz tcr, x9
alternative_else_nop_endif
#endif #endif
/* /*
......
...@@ -50,6 +50,7 @@ HAS_STAGE2_FWB ...@@ -50,6 +50,7 @@ HAS_STAGE2_FWB
HAS_TCR2 HAS_TCR2
HAS_TIDCP1 HAS_TIDCP1
HAS_TLB_RANGE HAS_TLB_RANGE
HAS_VA52
HAS_VIRT_HOST_EXTN HAS_VIRT_HOST_EXTN
HAS_WFXT HAS_WFXT
HW_DBM HW_DBM
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment