Commit 2c624fe6 authored by Steve Capper's avatar Steve Capper Committed by Will Deacon

arm64: mm: Remove vabits_user

Previous patches have enabled 52-bit kernel + user VAs and there is no
longer any scenario where user VA != kernel VA size.

This patch removes the, now redundant, vabits_user variable and replaces
usage with vabits_actual where appropriate.
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarSteve Capper <steve.capper@arm.com>
Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent b6d00d47
...@@ -194,9 +194,6 @@ static inline unsigned long kaslr_offset(void) ...@@ -194,9 +194,6 @@ static inline unsigned long kaslr_offset(void)
return kimage_vaddr - KIMAGE_VADDR; return kimage_vaddr - KIMAGE_VADDR;
} }
/* the actual size of a user virtual address */
extern u64 vabits_user;
/* /*
* Allow all memory at the discovery stage. We will clip it later. * Allow all memory at the discovery stage. We will clip it later.
*/ */
......
...@@ -69,7 +69,7 @@ extern int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg); ...@@ -69,7 +69,7 @@ extern int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg);
* The EL0 pointer bits used by a pointer authentication code. * The EL0 pointer bits used by a pointer authentication code.
* This is dependent on TBI0 being enabled, or bits 63:56 would also apply. * This is dependent on TBI0 being enabled, or bits 63:56 would also apply.
*/ */
#define ptrauth_user_pac_mask() GENMASK(54, vabits_user) #define ptrauth_user_pac_mask() GENMASK(54, vabits_actual)
/* Only valid for EL0 TTBR0 instruction pointers */ /* Only valid for EL0 TTBR0 instruction pointers */
static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr) static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr)
......
...@@ -43,7 +43,7 @@ ...@@ -43,7 +43,7 @@
*/ */
#define DEFAULT_MAP_WINDOW_64 (UL(1) << VA_BITS_MIN) #define DEFAULT_MAP_WINDOW_64 (UL(1) << VA_BITS_MIN)
#define TASK_SIZE_64 (UL(1) << vabits_user) #define TASK_SIZE_64 (UL(1) << vabits_actual)
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
#if defined(CONFIG_ARM64_64K_PAGES) && defined(CONFIG_KUSER_HELPERS) #if defined(CONFIG_ARM64_64K_PAGES) && defined(CONFIG_KUSER_HELPERS)
......
...@@ -316,11 +316,6 @@ __create_page_tables: ...@@ -316,11 +316,6 @@ __create_page_tables:
#endif #endif
mov x5, #VA_BITS_MIN mov x5, #VA_BITS_MIN
1: 1:
adr_l x6, vabits_user
str x5, [x6]
dmb sy
dc ivac, x6 // Invalidate potentially stale cache line
adr_l x6, vabits_actual adr_l x6, vabits_actual
str x5, [x6] str x5, [x6]
dmb sy dmb sy
...@@ -795,7 +790,7 @@ ENDPROC(__enable_mmu) ...@@ -795,7 +790,7 @@ ENDPROC(__enable_mmu)
ENTRY(__cpu_secondary_check52bitva) ENTRY(__cpu_secondary_check52bitva)
#ifdef CONFIG_ARM64_VA_BITS_52 #ifdef CONFIG_ARM64_VA_BITS_52
ldr_l x0, vabits_user ldr_l x0, vabits_actual
cmp x0, #52 cmp x0, #52
b.ne 2f b.ne 2f
......
...@@ -140,8 +140,7 @@ static void show_pte(unsigned long addr) ...@@ -140,8 +140,7 @@ static void show_pte(unsigned long addr)
pr_alert("%s pgtable: %luk pages, %llu-bit VAs, pgdp=%016lx\n", pr_alert("%s pgtable: %luk pages, %llu-bit VAs, pgdp=%016lx\n",
mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K, mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K,
mm == &init_mm ? vabits_actual : (int)vabits_user, vabits_actual, (unsigned long)virt_to_phys(mm->pgd));
(unsigned long)virt_to_phys(mm->pgd));
pgdp = pgd_offset(mm, addr); pgdp = pgd_offset(mm, addr);
pgd = READ_ONCE(*pgdp); pgd = READ_ONCE(*pgdp);
pr_alert("[%016lx] pgd=%016llx", addr, pgd_val(pgd)); pr_alert("[%016lx] pgd=%016llx", addr, pgd_val(pgd));
......
...@@ -40,8 +40,6 @@ ...@@ -40,8 +40,6 @@
u64 idmap_t0sz = TCR_T0SZ(VA_BITS); u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
u64 idmap_ptrs_per_pgd = PTRS_PER_PGD; u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
u64 vabits_user __ro_after_init;
EXPORT_SYMBOL(vabits_user);
u64 __section(".mmuoff.data.write") vabits_actual; u64 __section(".mmuoff.data.write") vabits_actual;
EXPORT_SYMBOL(vabits_actual); EXPORT_SYMBOL(vabits_actual);
......
...@@ -439,7 +439,7 @@ ENTRY(__cpu_setup) ...@@ -439,7 +439,7 @@ ENTRY(__cpu_setup)
tcr_clear_errata_bits x10, x9, x5 tcr_clear_errata_bits x10, x9, x5
#ifdef CONFIG_ARM64_VA_BITS_52 #ifdef CONFIG_ARM64_VA_BITS_52
ldr_l x9, vabits_user ldr_l x9, vabits_actual
sub x9, xzr, x9 sub x9, xzr, x9
add x9, x9, #64 add x9, x9, #64
tcr_set_t1sz x10, x9 tcr_set_t1sz x10, x9
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment