Commit 806dc825 authored by Catalin Marinas's avatar Catalin Marinas

Merge branch 'for-next/asm-cleanups' into for-next/core

* for-next/asm-cleanups:
  : Various asm clean-ups (alignment, mov_q vs ldr, .idmap)
  arm64: move kimage_vaddr to .rodata
  arm64: use mov_q instead of literal ldr
parents 0829a076 6cf9a2dc
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
ENTRY(__cpu_soft_restart) ENTRY(__cpu_soft_restart)
/* Clear sctlr_el1 flags. */ /* Clear sctlr_el1 flags. */
mrs x12, sctlr_el1 mrs x12, sctlr_el1
ldr x13, =SCTLR_ELx_FLAGS mov_q x13, SCTLR_ELx_FLAGS
bic x12, x12, x13 bic x12, x12, x13
pre_disable_mmu_workaround pre_disable_mmu_workaround
msr sctlr_el1, x12 msr sctlr_el1, x12
......
...@@ -457,17 +457,19 @@ SYM_FUNC_START_LOCAL(__primary_switched) ...@@ -457,17 +457,19 @@ SYM_FUNC_START_LOCAL(__primary_switched)
b start_kernel b start_kernel
SYM_FUNC_END(__primary_switched) SYM_FUNC_END(__primary_switched)
.pushsection ".rodata", "a"
SYM_DATA_START(kimage_vaddr)
.quad _text - TEXT_OFFSET
SYM_DATA_END(kimage_vaddr)
EXPORT_SYMBOL(kimage_vaddr)
.popsection
/* /*
* end early head section, begin head code that is also used for * end early head section, begin head code that is also used for
* hotplug and needs to have the same protections as the text region * hotplug and needs to have the same protections as the text region
*/ */
.section ".idmap.text","awx" .section ".idmap.text","awx"
SYM_DATA_START(kimage_vaddr)
.quad _text - TEXT_OFFSET
SYM_DATA_END(kimage_vaddr)
EXPORT_SYMBOL(kimage_vaddr)
/* /*
* If we're fortunate enough to boot at EL2, ensure that the world is * If we're fortunate enough to boot at EL2, ensure that the world is
* sane before dropping to EL1. * sane before dropping to EL1.
......
...@@ -63,7 +63,7 @@ el1_sync: ...@@ -63,7 +63,7 @@ el1_sync:
beq 9f // Nothing to reset! beq 9f // Nothing to reset!
/* Someone called kvm_call_hyp() against the hyp-stub... */ /* Someone called kvm_call_hyp() against the hyp-stub... */
ldr x0, =HVC_STUB_ERR mov_q x0, HVC_STUB_ERR
eret eret
9: mov x0, xzr 9: mov x0, xzr
......
...@@ -41,7 +41,7 @@ ENTRY(arm64_relocate_new_kernel) ...@@ -41,7 +41,7 @@ ENTRY(arm64_relocate_new_kernel)
cmp x0, #CurrentEL_EL2 cmp x0, #CurrentEL_EL2
b.ne 1f b.ne 1f
mrs x0, sctlr_el2 mrs x0, sctlr_el2
ldr x1, =SCTLR_ELx_FLAGS mov_q x1, SCTLR_ELx_FLAGS
bic x0, x0, x1 bic x0, x0, x1
pre_disable_mmu_workaround pre_disable_mmu_workaround
msr sctlr_el2, x0 msr sctlr_el2, x0
...@@ -113,8 +113,6 @@ ENTRY(arm64_relocate_new_kernel) ...@@ -113,8 +113,6 @@ ENTRY(arm64_relocate_new_kernel)
ENDPROC(arm64_relocate_new_kernel) ENDPROC(arm64_relocate_new_kernel)
.ltorg
.align 3 /* To keep the 64-bit values below naturally aligned. */ .align 3 /* To keep the 64-bit values below naturally aligned. */
.Lcopy_end: .Lcopy_end:
......
...@@ -60,7 +60,7 @@ alternative_else_nop_endif ...@@ -60,7 +60,7 @@ alternative_else_nop_endif
msr ttbr0_el2, x4 msr ttbr0_el2, x4
mrs x4, tcr_el1 mrs x4, tcr_el1
ldr x5, =TCR_EL2_MASK mov_q x5, TCR_EL2_MASK
and x4, x4, x5 and x4, x4, x5
mov x5, #TCR_EL2_RES1 mov x5, #TCR_EL2_RES1
orr x4, x4, x5 orr x4, x4, x5
...@@ -102,7 +102,7 @@ alternative_else_nop_endif ...@@ -102,7 +102,7 @@ alternative_else_nop_endif
* as well as the EE bit on BE. Drop the A flag since the compiler * as well as the EE bit on BE. Drop the A flag since the compiler
* is allowed to generate unaligned accesses. * is allowed to generate unaligned accesses.
*/ */
ldr x4, =(SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A)) mov_q x4, (SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A))
CPU_BE( orr x4, x4, #SCTLR_ELx_EE) CPU_BE( orr x4, x4, #SCTLR_ELx_EE)
msr sctlr_el2, x4 msr sctlr_el2, x4
isb isb
...@@ -142,7 +142,7 @@ reset: ...@@ -142,7 +142,7 @@ reset:
* case we coming via HVC_SOFT_RESTART. * case we coming via HVC_SOFT_RESTART.
*/ */
mrs x5, sctlr_el2 mrs x5, sctlr_el2
ldr x6, =SCTLR_ELx_FLAGS mov_q x6, SCTLR_ELx_FLAGS
bic x5, x5, x6 // Clear SCTL_M and etc bic x5, x5, x6 // Clear SCTL_M and etc
pre_disable_mmu_workaround pre_disable_mmu_workaround
msr sctlr_el2, x5 msr sctlr_el2, x5
...@@ -155,11 +155,9 @@ reset: ...@@ -155,11 +155,9 @@ reset:
eret eret
1: /* Bad stub call */ 1: /* Bad stub call */
ldr x0, =HVC_STUB_ERR mov_q x0, HVC_STUB_ERR
eret eret
SYM_CODE_END(__kvm_handle_stub_hvc) SYM_CODE_END(__kvm_handle_stub_hvc)
.ltorg
.popsection .popsection
...@@ -411,7 +411,7 @@ SYM_FUNC_START(__cpu_setup) ...@@ -411,7 +411,7 @@ SYM_FUNC_START(__cpu_setup)
* Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for * Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for
* both user and kernel. * both user and kernel.
*/ */
ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ mov_q x10, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \ TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
TCR_TBI0 | TCR_A1 | TCR_KASAN_FLAGS TCR_TBI0 | TCR_A1 | TCR_KASAN_FLAGS
tcr_clear_errata_bits x10, x9, x5 tcr_clear_errata_bits x10, x9, x5
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment