Commit b23ec74c authored by Catalin Marinas's avatar Catalin Marinas

Merge branches 'for-next/doc', 'for-next/sve', 'for-next/sysreg',...

Merge branches 'for-next/doc', 'for-next/sve', 'for-next/sysreg', 'for-next/gettimeofday', 'for-next/stacktrace', 'for-next/atomics', 'for-next/el1-exceptions', 'for-next/a510-erratum-2658417', 'for-next/defconfig', 'for-next/tpidr2_el0' and 'for-next/ftrace', remote-tracking branch 'arm64/for-next/perf' into for-next/core

* arm64/for-next/perf:
  arm64: asm/perf_regs.h: Avoid C++-style comment in UAPI header
  arm64/sve: Add Perf extensions documentation
  perf: arm64: Add SVE vector granule register to user regs
  MAINTAINERS: add maintainers for Alibaba' T-Head PMU driver
  drivers/perf: add DDR Sub-System Driveway PMU driver for Yitian 710 SoC
  docs: perf: Add description for Alibaba's T-Head PMU driver

* for-next/doc:
  : Documentation/arm64 updates
  arm64/sve: Document our actual ABI for clearing registers on syscall

* for-next/sve:
  : SVE updates
  arm64/sysreg: Add hwcap for SVE EBF16

* for-next/sysreg: (35 commits)
  : arm64 system registers generation (more conversions)
  arm64/sysreg: Fix a few missed conversions
  arm64/sysreg: Convert ID_AA64AFRn_EL1 to automatic generation
  arm64/sysreg: Convert ID_AA64DFR1_EL1 to automatic generation
  arm64/sysreg: Convert ID_AA64FDR0_EL1 to automatic generation
  arm64/sysreg: Use feature numbering for PMU and SPE revisions
  arm64/sysreg: Add _EL1 into ID_AA64DFR0_EL1 definition names
  arm64/sysreg: Align field names in ID_AA64DFR0_EL1 with architecture
  arm64/sysreg: Add defintion for ALLINT
  arm64/sysreg: Convert SCXTNUM_EL1 to automatic generation
  arm64/sysreg: Convert TIPDR_EL1 to automatic generation
  arm64/sysreg: Convert ID_AA64PFR1_EL1 to automatic generation
  arm64/sysreg: Convert ID_AA64PFR0_EL1 to automatic generation
  arm64/sysreg: Convert ID_AA64MMFR2_EL1 to automatic generation
  arm64/sysreg: Convert ID_AA64MMFR1_EL1 to automatic generation
  arm64/sysreg: Convert ID_AA64MMFR0_EL1 to automatic generation
  arm64/sysreg: Convert HCRX_EL2 to automatic generation
  arm64/sysreg: Standardise naming of ID_AA64PFR1_EL1 SME enumeration
  arm64/sysreg: Standardise naming of ID_AA64PFR1_EL1 BTI enumeration
  arm64/sysreg: Standardise naming of ID_AA64PFR1_EL1 fractional version fields
  arm64/sysreg: Standardise naming for MTE feature enumeration
  ...

* for-next/gettimeofday:
  : Use self-synchronising counter access in gettimeofday() (if FEAT_ECV)
  arm64: vdso: use SYS_CNTVCTSS_EL0 for gettimeofday
  arm64: alternative: patch alternatives in the vDSO
  arm64: module: move find_section to header

* for-next/stacktrace:
  : arm64 stacktrace cleanups and improvements
  arm64: stacktrace: track hyp stacks in unwinder's address space
  arm64: stacktrace: track all stack boundaries explicitly
  arm64: stacktrace: remove stack type from fp translator
  arm64: stacktrace: rework stack boundary discovery
  arm64: stacktrace: add stackinfo_on_stack() helper
  arm64: stacktrace: move SDEI stack helpers to stacktrace code
  arm64: stacktrace: rename unwind_next_common() -> unwind_next_frame_record()
  arm64: stacktrace: simplify unwind_next_common()
  arm64: stacktrace: fix kerneldoc comments

* for-next/atomics:
  : arm64 atomics improvements
  arm64: atomic: always inline the assembly
  arm64: atomics: remove LL/SC trampolines

* for-next/el1-exceptions:
  : Improve the reporting of EL1 exceptions
  arm64: rework BTI exception handling
  arm64: rework FPAC exception handling
  arm64: consistently pass ESR_ELx to die()
  arm64: die(): pass 'err' as long
  arm64: report EL1 UNDEFs better

* for-next/a510-erratum-2658417:
  : Cortex-A510: 2658417: remove BF16 support due to incorrect result
  arm64: errata: remove BF16 HWCAP due to incorrect result on Cortex-A510
  arm64: cpufeature: Expose get_arm64_ftr_reg() outside cpufeature.c
  arm64: cpufeature: Force HWCAP to be based on the sysreg visible to user-space

* for-next/defconfig:
  : arm64 defconfig updates
  arm64: defconfig: Add Coresight as module
  arm64: Enable docker support in defconfig
  arm64: defconfig: Enable memory hotplug and hotremove config
  arm64: configs: Enable all PMUs provided by Arm

* for-next/tpidr2_el0:
  : arm64 ptrace() support for TPIDR2_EL0
  kselftest/arm64: Add coverage of TPIDR2_EL0 ptrace interface
  arm64/ptrace: Support access to TPIDR2_EL0
  arm64/ptrace: Document extension of NT_ARM_TLS to cover TPIDR2_EL0
  kselftest/arm64: Add test coverage for NT_ARM_TLS

* for-next/ftrace:
  : arm64 ftraces updates/fixes
  arm64: ftrace: fix module PLTs with mcount
  arm64: module: Remove unused plt_entry_is_initialized()
  arm64: module: Make plt_equals_entry() static
...@@ -272,6 +272,9 @@ HWCAP2_WFXT ...@@ -272,6 +272,9 @@ HWCAP2_WFXT
HWCAP2_EBF16 HWCAP2_EBF16
Functionality implied by ID_AA64ISAR1_EL1.BF16 == 0b0010. Functionality implied by ID_AA64ISAR1_EL1.BF16 == 0b0010.
HWCAP2_SVE_EBF16
Functionality implied by ID_AA64ZFR0_EL1.BF16 == 0b0010.
4. Unused AT_HWCAP bits 4. Unused AT_HWCAP bits
----------------------- -----------------------
......
...@@ -110,6 +110,8 @@ stable kernels. ...@@ -110,6 +110,8 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A510 | #2441009 | ARM64_ERRATUM_2441009 | | ARM | Cortex-A510 | #2441009 | ARM64_ERRATUM_2441009 |
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A510 | #2658417 | ARM64_ERRATUM_2658417 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A710 | #2119858 | ARM64_ERRATUM_2119858 | | ARM | Cortex-A710 | #2119858 | ARM64_ERRATUM_2119858 |
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A710 | #2054223 | ARM64_ERRATUM_2054223 | | ARM | Cortex-A710 | #2054223 | ARM64_ERRATUM_2054223 |
......
...@@ -331,6 +331,9 @@ The regset data starts with struct user_za_header, containing: ...@@ -331,6 +331,9 @@ The regset data starts with struct user_za_header, containing:
been read if a PTRACE_GETREGSET of NT_ARM_ZA were executed for each thread been read if a PTRACE_GETREGSET of NT_ARM_ZA were executed for each thread
when the coredump was generated. when the coredump was generated.
* The NT_ARM_TLS note will be extended to two registers, the second register
will contain TPIDR2_EL0 on systems that support SME and will be read as
zero with writes ignored otherwise.
9. System runtime configuration 9. System runtime configuration
-------------------------------- --------------------------------
......
...@@ -111,7 +111,7 @@ the SVE instruction set architecture. ...@@ -111,7 +111,7 @@ the SVE instruction set architecture.
* On syscall, V0..V31 are preserved (as without SVE). Thus, bits [127:0] of * On syscall, V0..V31 are preserved (as without SVE). Thus, bits [127:0] of
Z0..Z31 are preserved. All other bits of Z0..Z31, and all of P0..P15 and FFR Z0..Z31 are preserved. All other bits of Z0..Z31, and all of P0..P15 and FFR
become unspecified on return from a syscall. become zero on return from a syscall.
* The SVE registers are not used to pass arguments to or receive results from * The SVE registers are not used to pass arguments to or receive results from
any syscall. any syscall.
......
...@@ -733,6 +733,19 @@ config ARM64_ERRATUM_2077057 ...@@ -733,6 +733,19 @@ config ARM64_ERRATUM_2077057
If unsure, say Y. If unsure, say Y.
config ARM64_ERRATUM_2658417
bool "Cortex-A510: 2658417: remove BF16 support due to incorrect result"
default y
help
This option adds the workaround for ARM Cortex-A510 erratum 2658417.
Affected Cortex-A510 (r0p0 to r1p1) may produce the wrong result for
BFMMLA or VMMLA instructions in rare circumstances when a pair of
A510 CPUs are using shared neon hardware. As the sharing is not
discoverable by the kernel, hide the BF16 HWCAP to indicate that
user-space should not be using these instructions.
If unsure, say Y.
config ARM64_ERRATUM_2119858 config ARM64_ERRATUM_2119858
bool "Cortex-A710/X2: 2119858: workaround TRBE overwriting trace data in FILL mode" bool "Cortex-A710/X2: 2119858: workaround TRBE overwriting trace data in FILL mode"
default y default y
......
...@@ -18,6 +18,7 @@ CONFIG_NUMA_BALANCING=y ...@@ -18,6 +18,7 @@ CONFIG_NUMA_BALANCING=y
CONFIG_MEMCG=y CONFIG_MEMCG=y
CONFIG_BLK_CGROUP=y CONFIG_BLK_CGROUP=y
CONFIG_CGROUP_PIDS=y CONFIG_CGROUP_PIDS=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_HUGETLB=y CONFIG_CGROUP_HUGETLB=y
CONFIG_CPUSETS=y CONFIG_CPUSETS=y
CONFIG_CGROUP_DEVICE=y CONFIG_CGROUP_DEVICE=y
...@@ -102,6 +103,8 @@ CONFIG_ARM_SCMI_CPUFREQ=y ...@@ -102,6 +103,8 @@ CONFIG_ARM_SCMI_CPUFREQ=y
CONFIG_ARM_TEGRA186_CPUFREQ=y CONFIG_ARM_TEGRA186_CPUFREQ=y
CONFIG_QORIQ_CPUFREQ=y CONFIG_QORIQ_CPUFREQ=y
CONFIG_ACPI=y CONFIG_ACPI=y
CONFIG_ACPI_HOTPLUG_MEMORY=y
CONFIG_ACPI_HMAT=y
CONFIG_ACPI_APEI=y CONFIG_ACPI_APEI=y
CONFIG_ACPI_APEI_GHES=y CONFIG_ACPI_APEI_GHES=y
CONFIG_ACPI_APEI_PCIEAER=y CONFIG_ACPI_APEI_PCIEAER=y
...@@ -126,6 +129,8 @@ CONFIG_MODULES=y ...@@ -126,6 +129,8 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_UNLOAD=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
# CONFIG_COMPAT_BRK is not set # CONFIG_COMPAT_BRK is not set
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y CONFIG_KSM=y
CONFIG_MEMORY_FAILURE=y CONFIG_MEMORY_FAILURE=y
CONFIG_TRANSPARENT_HUGEPAGE=y CONFIG_TRANSPARENT_HUGEPAGE=y
...@@ -139,12 +144,16 @@ CONFIG_IP_PNP_DHCP=y ...@@ -139,12 +144,16 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y CONFIG_IP_PNP_BOOTP=y
CONFIG_IPV6=m CONFIG_IPV6=m
CONFIG_NETFILTER=y CONFIG_NETFILTER=y
CONFIG_BRIDGE_NETFILTER=m
CONFIG_NF_CONNTRACK=m CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_EVENTS=y CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_NETFILTER_XT_MARK=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_LOG=m CONFIG_NETFILTER_XT_TARGET_LOG=m
CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
CONFIG_NETFILTER_XT_MATCH_IPVS=m
CONFIG_IP_VS=m
CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m CONFIG_IP_NF_TARGET_REJECT=m
...@@ -1229,8 +1238,14 @@ CONFIG_PHY_UNIPHIER_USB3=y ...@@ -1229,8 +1238,14 @@ CONFIG_PHY_UNIPHIER_USB3=y
CONFIG_PHY_TEGRA_XUSB=y CONFIG_PHY_TEGRA_XUSB=y
CONFIG_PHY_AM654_SERDES=m CONFIG_PHY_AM654_SERDES=m
CONFIG_PHY_J721E_WIZ=m CONFIG_PHY_J721E_WIZ=m
CONFIG_ARM_CCI_PMU=m
CONFIG_ARM_CCN=m
CONFIG_ARM_CMN=m
CONFIG_ARM_SMMU_V3_PMU=m CONFIG_ARM_SMMU_V3_PMU=m
CONFIG_ARM_DSU_PMU=m
CONFIG_FSL_IMX8_DDR_PMU=m CONFIG_FSL_IMX8_DDR_PMU=m
CONFIG_ARM_SPE_PMU=m
CONFIG_ARM_DMC620_PMU=m
CONFIG_QCOM_L2_PMU=y CONFIG_QCOM_L2_PMU=y
CONFIG_QCOM_L3_PMU=y CONFIG_QCOM_L3_PMU=y
CONFIG_HISI_PMU=y CONFIG_HISI_PMU=y
...@@ -1325,4 +1340,12 @@ CONFIG_DEBUG_FS=y ...@@ -1325,4 +1340,12 @@ CONFIG_DEBUG_FS=y
# CONFIG_SCHED_DEBUG is not set # CONFIG_SCHED_DEBUG is not set
# CONFIG_DEBUG_PREEMPT is not set # CONFIG_DEBUG_PREEMPT is not set
# CONFIG_FTRACE is not set # CONFIG_FTRACE is not set
CONFIG_CORESIGHT=m
CONFIG_CORESIGHT_LINK_AND_SINK_TMC=m
CONFIG_CORESIGHT_CATU=m
CONFIG_CORESIGHT_SINK_TPIU=m
CONFIG_CORESIGHT_SINK_ETBV10=m
CONFIG_CORESIGHT_STM=m
CONFIG_CORESIGHT_CPU_DEBUG=m
CONFIG_CORESIGHT_CTI=m
CONFIG_MEMTEST=y CONFIG_MEMTEST=y
...@@ -384,8 +384,8 @@ alternative_cb_end ...@@ -384,8 +384,8 @@ alternative_cb_end
.macro tcr_compute_pa_size, tcr, pos, tmp0, tmp1 .macro tcr_compute_pa_size, tcr, pos, tmp0, tmp1
mrs \tmp0, ID_AA64MMFR0_EL1 mrs \tmp0, ID_AA64MMFR0_EL1
// Narrow PARange to fit the PS field in TCR_ELx // Narrow PARange to fit the PS field in TCR_ELx
ubfx \tmp0, \tmp0, #ID_AA64MMFR0_PARANGE_SHIFT, #3 ubfx \tmp0, \tmp0, #ID_AA64MMFR0_EL1_PARANGE_SHIFT, #3
mov \tmp1, #ID_AA64MMFR0_PARANGE_MAX mov \tmp1, #ID_AA64MMFR0_EL1_PARANGE_MAX
cmp \tmp0, \tmp1 cmp \tmp0, \tmp1
csel \tmp0, \tmp1, \tmp0, hi csel \tmp0, \tmp1, \tmp0, hi
bfi \tcr, \tmp0, \pos, #3 bfi \tcr, \tmp0, \pos, #3
...@@ -512,7 +512,7 @@ alternative_endif ...@@ -512,7 +512,7 @@ alternative_endif
*/ */
.macro reset_pmuserenr_el0, tmpreg .macro reset_pmuserenr_el0, tmpreg
mrs \tmpreg, id_aa64dfr0_el1 mrs \tmpreg, id_aa64dfr0_el1
sbfx \tmpreg, \tmpreg, #ID_AA64DFR0_PMUVER_SHIFT, #4 sbfx \tmpreg, \tmpreg, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4
cmp \tmpreg, #1 // Skip if no PMU present cmp \tmpreg, #1 // Skip if no PMU present
b.lt 9000f b.lt 9000f
msr pmuserenr_el0, xzr // Disable PMU access from EL0 msr pmuserenr_el0, xzr // Disable PMU access from EL0
...@@ -524,7 +524,7 @@ alternative_endif ...@@ -524,7 +524,7 @@ alternative_endif
*/ */
.macro reset_amuserenr_el0, tmpreg .macro reset_amuserenr_el0, tmpreg
mrs \tmpreg, id_aa64pfr0_el1 // Check ID_AA64PFR0_EL1 mrs \tmpreg, id_aa64pfr0_el1 // Check ID_AA64PFR0_EL1
ubfx \tmpreg, \tmpreg, #ID_AA64PFR0_AMU_SHIFT, #4 ubfx \tmpreg, \tmpreg, #ID_AA64PFR0_EL1_AMU_SHIFT, #4
cbz \tmpreg, .Lskip_\@ // Skip if no AMU present cbz \tmpreg, .Lskip_\@ // Skip if no AMU present
msr_s SYS_AMUSERENR_EL0, xzr // Disable AMU access from EL0 msr_s SYS_AMUSERENR_EL0, xzr // Disable AMU access from EL0
.Lskip_\@: .Lskip_\@:
...@@ -612,7 +612,7 @@ alternative_endif ...@@ -612,7 +612,7 @@ alternative_endif
.macro offset_ttbr1, ttbr, tmp .macro offset_ttbr1, ttbr, tmp
#ifdef CONFIG_ARM64_VA_BITS_52 #ifdef CONFIG_ARM64_VA_BITS_52
mrs_s \tmp, SYS_ID_AA64MMFR2_EL1 mrs_s \tmp, SYS_ID_AA64MMFR2_EL1
and \tmp, \tmp, #(0xf << ID_AA64MMFR2_LVA_SHIFT) and \tmp, \tmp, #(0xf << ID_AA64MMFR2_EL1_VARange_SHIFT)
cbnz \tmp, .Lskipoffs_\@ cbnz \tmp, .Lskipoffs_\@
orr \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET orr \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
.Lskipoffs_\@ : .Lskipoffs_\@ :
......
...@@ -12,19 +12,6 @@ ...@@ -12,19 +12,6 @@
#include <linux/stringify.h> #include <linux/stringify.h>
#ifdef CONFIG_ARM64_LSE_ATOMICS
#define __LL_SC_FALLBACK(asm_ops) \
" b 3f\n" \
" .subsection 1\n" \
"3:\n" \
asm_ops "\n" \
" b 4f\n" \
" .previous\n" \
"4:\n"
#else
#define __LL_SC_FALLBACK(asm_ops) asm_ops
#endif
#ifndef CONFIG_CC_HAS_K_CONSTRAINT #ifndef CONFIG_CC_HAS_K_CONSTRAINT
#define K #define K
#endif #endif
...@@ -36,38 +23,36 @@ asm_ops "\n" \ ...@@ -36,38 +23,36 @@ asm_ops "\n" \
*/ */
#define ATOMIC_OP(op, asm_op, constraint) \ #define ATOMIC_OP(op, asm_op, constraint) \
static inline void \ static __always_inline void \
__ll_sc_atomic_##op(int i, atomic_t *v) \ __ll_sc_atomic_##op(int i, atomic_t *v) \
{ \ { \
unsigned long tmp; \ unsigned long tmp; \
int result; \ int result; \
\ \
asm volatile("// atomic_" #op "\n" \ asm volatile("// atomic_" #op "\n" \
__LL_SC_FALLBACK( \
" prfm pstl1strm, %2\n" \ " prfm pstl1strm, %2\n" \
"1: ldxr %w0, %2\n" \ "1: ldxr %w0, %2\n" \
" " #asm_op " %w0, %w0, %w3\n" \ " " #asm_op " %w0, %w0, %w3\n" \
" stxr %w1, %w0, %2\n" \ " stxr %w1, %w0, %2\n" \
" cbnz %w1, 1b\n") \ " cbnz %w1, 1b\n" \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
: __stringify(constraint) "r" (i)); \ : __stringify(constraint) "r" (i)); \
} }
#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\ #define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
static inline int \ static __always_inline int \
__ll_sc_atomic_##op##_return##name(int i, atomic_t *v) \ __ll_sc_atomic_##op##_return##name(int i, atomic_t *v) \
{ \ { \
unsigned long tmp; \ unsigned long tmp; \
int result; \ int result; \
\ \
asm volatile("// atomic_" #op "_return" #name "\n" \ asm volatile("// atomic_" #op "_return" #name "\n" \
__LL_SC_FALLBACK( \
" prfm pstl1strm, %2\n" \ " prfm pstl1strm, %2\n" \
"1: ld" #acq "xr %w0, %2\n" \ "1: ld" #acq "xr %w0, %2\n" \
" " #asm_op " %w0, %w0, %w3\n" \ " " #asm_op " %w0, %w0, %w3\n" \
" st" #rel "xr %w1, %w0, %2\n" \ " st" #rel "xr %w1, %w0, %2\n" \
" cbnz %w1, 1b\n" \ " cbnz %w1, 1b\n" \
" " #mb ) \ " " #mb \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
: __stringify(constraint) "r" (i) \ : __stringify(constraint) "r" (i) \
: cl); \ : cl); \
...@@ -76,20 +61,19 @@ __ll_sc_atomic_##op##_return##name(int i, atomic_t *v) \ ...@@ -76,20 +61,19 @@ __ll_sc_atomic_##op##_return##name(int i, atomic_t *v) \
} }
#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint) \ #define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint) \
static inline int \ static __always_inline int \
__ll_sc_atomic_fetch_##op##name(int i, atomic_t *v) \ __ll_sc_atomic_fetch_##op##name(int i, atomic_t *v) \
{ \ { \
unsigned long tmp; \ unsigned long tmp; \
int val, result; \ int val, result; \
\ \
asm volatile("// atomic_fetch_" #op #name "\n" \ asm volatile("// atomic_fetch_" #op #name "\n" \
__LL_SC_FALLBACK( \
" prfm pstl1strm, %3\n" \ " prfm pstl1strm, %3\n" \
"1: ld" #acq "xr %w0, %3\n" \ "1: ld" #acq "xr %w0, %3\n" \
" " #asm_op " %w1, %w0, %w4\n" \ " " #asm_op " %w1, %w0, %w4\n" \
" st" #rel "xr %w2, %w1, %3\n" \ " st" #rel "xr %w2, %w1, %3\n" \
" cbnz %w2, 1b\n" \ " cbnz %w2, 1b\n" \
" " #mb ) \ " " #mb \
: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \ : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
: __stringify(constraint) "r" (i) \ : __stringify(constraint) "r" (i) \
: cl); \ : cl); \
...@@ -135,38 +119,36 @@ ATOMIC_OPS(andnot, bic, ) ...@@ -135,38 +119,36 @@ ATOMIC_OPS(andnot, bic, )
#undef ATOMIC_OP #undef ATOMIC_OP
#define ATOMIC64_OP(op, asm_op, constraint) \ #define ATOMIC64_OP(op, asm_op, constraint) \
static inline void \ static __always_inline void \
__ll_sc_atomic64_##op(s64 i, atomic64_t *v) \ __ll_sc_atomic64_##op(s64 i, atomic64_t *v) \
{ \ { \
s64 result; \ s64 result; \
unsigned long tmp; \ unsigned long tmp; \
\ \
asm volatile("// atomic64_" #op "\n" \ asm volatile("// atomic64_" #op "\n" \
__LL_SC_FALLBACK( \
" prfm pstl1strm, %2\n" \ " prfm pstl1strm, %2\n" \
"1: ldxr %0, %2\n" \ "1: ldxr %0, %2\n" \
" " #asm_op " %0, %0, %3\n" \ " " #asm_op " %0, %0, %3\n" \
" stxr %w1, %0, %2\n" \ " stxr %w1, %0, %2\n" \
" cbnz %w1, 1b") \ " cbnz %w1, 1b" \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
: __stringify(constraint) "r" (i)); \ : __stringify(constraint) "r" (i)); \
} }
#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\ #define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
static inline long \ static __always_inline long \
__ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \ __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \
{ \ { \
s64 result; \ s64 result; \
unsigned long tmp; \ unsigned long tmp; \
\ \
asm volatile("// atomic64_" #op "_return" #name "\n" \ asm volatile("// atomic64_" #op "_return" #name "\n" \
__LL_SC_FALLBACK( \
" prfm pstl1strm, %2\n" \ " prfm pstl1strm, %2\n" \
"1: ld" #acq "xr %0, %2\n" \ "1: ld" #acq "xr %0, %2\n" \
" " #asm_op " %0, %0, %3\n" \ " " #asm_op " %0, %0, %3\n" \
" st" #rel "xr %w1, %0, %2\n" \ " st" #rel "xr %w1, %0, %2\n" \
" cbnz %w1, 1b\n" \ " cbnz %w1, 1b\n" \
" " #mb ) \ " " #mb \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
: __stringify(constraint) "r" (i) \ : __stringify(constraint) "r" (i) \
: cl); \ : cl); \
...@@ -175,20 +157,19 @@ __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \ ...@@ -175,20 +157,19 @@ __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \
} }
#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\ #define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\
static inline long \ static __always_inline long \
__ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \ __ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \
{ \ { \
s64 result, val; \ s64 result, val; \
unsigned long tmp; \ unsigned long tmp; \
\ \
asm volatile("// atomic64_fetch_" #op #name "\n" \ asm volatile("// atomic64_fetch_" #op #name "\n" \
__LL_SC_FALLBACK( \
" prfm pstl1strm, %3\n" \ " prfm pstl1strm, %3\n" \
"1: ld" #acq "xr %0, %3\n" \ "1: ld" #acq "xr %0, %3\n" \
" " #asm_op " %1, %0, %4\n" \ " " #asm_op " %1, %0, %4\n" \
" st" #rel "xr %w2, %1, %3\n" \ " st" #rel "xr %w2, %1, %3\n" \
" cbnz %w2, 1b\n" \ " cbnz %w2, 1b\n" \
" " #mb ) \ " " #mb \
: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \ : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
: __stringify(constraint) "r" (i) \ : __stringify(constraint) "r" (i) \
: cl); \ : cl); \
...@@ -233,14 +214,13 @@ ATOMIC64_OPS(andnot, bic, ) ...@@ -233,14 +214,13 @@ ATOMIC64_OPS(andnot, bic, )
#undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP #undef ATOMIC64_OP
static inline s64 static __always_inline s64
__ll_sc_atomic64_dec_if_positive(atomic64_t *v) __ll_sc_atomic64_dec_if_positive(atomic64_t *v)
{ {
s64 result; s64 result;
unsigned long tmp; unsigned long tmp;
asm volatile("// atomic64_dec_if_positive\n" asm volatile("// atomic64_dec_if_positive\n"
__LL_SC_FALLBACK(
" prfm pstl1strm, %2\n" " prfm pstl1strm, %2\n"
"1: ldxr %0, %2\n" "1: ldxr %0, %2\n"
" subs %0, %0, #1\n" " subs %0, %0, #1\n"
...@@ -248,7 +228,7 @@ __ll_sc_atomic64_dec_if_positive(atomic64_t *v) ...@@ -248,7 +228,7 @@ __ll_sc_atomic64_dec_if_positive(atomic64_t *v)
" stlxr %w1, %0, %2\n" " stlxr %w1, %0, %2\n"
" cbnz %w1, 1b\n" " cbnz %w1, 1b\n"
" dmb ish\n" " dmb ish\n"
"2:") "2:"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: :
: "cc", "memory"); : "cc", "memory");
...@@ -257,7 +237,7 @@ __ll_sc_atomic64_dec_if_positive(atomic64_t *v) ...@@ -257,7 +237,7 @@ __ll_sc_atomic64_dec_if_positive(atomic64_t *v)
} }
#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl, constraint) \ #define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl, constraint) \
static inline u##sz \ static __always_inline u##sz \
__ll_sc__cmpxchg_case_##name##sz(volatile void *ptr, \ __ll_sc__cmpxchg_case_##name##sz(volatile void *ptr, \
unsigned long old, \ unsigned long old, \
u##sz new) \ u##sz new) \
...@@ -274,7 +254,6 @@ __ll_sc__cmpxchg_case_##name##sz(volatile void *ptr, \ ...@@ -274,7 +254,6 @@ __ll_sc__cmpxchg_case_##name##sz(volatile void *ptr, \
old = (u##sz)old; \ old = (u##sz)old; \
\ \
asm volatile( \ asm volatile( \
__LL_SC_FALLBACK( \
" prfm pstl1strm, %[v]\n" \ " prfm pstl1strm, %[v]\n" \
"1: ld" #acq "xr" #sfx "\t%" #w "[oldval], %[v]\n" \ "1: ld" #acq "xr" #sfx "\t%" #w "[oldval], %[v]\n" \
" eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \ " eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \
...@@ -282,7 +261,7 @@ __ll_sc__cmpxchg_case_##name##sz(volatile void *ptr, \ ...@@ -282,7 +261,7 @@ __ll_sc__cmpxchg_case_##name##sz(volatile void *ptr, \
" st" #rel "xr" #sfx "\t%w[tmp], %" #w "[new], %[v]\n" \ " st" #rel "xr" #sfx "\t%w[tmp], %" #w "[new], %[v]\n" \
" cbnz %w[tmp], 1b\n" \ " cbnz %w[tmp], 1b\n" \
" " #mb "\n" \ " " #mb "\n" \
"2:") \ "2:" \
: [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \ : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
[v] "+Q" (*(u##sz *)ptr) \ [v] "+Q" (*(u##sz *)ptr) \
: [old] __stringify(constraint) "r" (old), [new] "r" (new) \ : [old] __stringify(constraint) "r" (old), [new] "r" (new) \
...@@ -316,7 +295,7 @@ __CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory", L) ...@@ -316,7 +295,7 @@ __CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory", L)
#undef __CMPXCHG_CASE #undef __CMPXCHG_CASE
#define __CMPXCHG_DBL(name, mb, rel, cl) \ #define __CMPXCHG_DBL(name, mb, rel, cl) \
static inline long \ static __always_inline long \
__ll_sc__cmpxchg_double##name(unsigned long old1, \ __ll_sc__cmpxchg_double##name(unsigned long old1, \
unsigned long old2, \ unsigned long old2, \
unsigned long new1, \ unsigned long new1, \
...@@ -326,7 +305,6 @@ __ll_sc__cmpxchg_double##name(unsigned long old1, \ ...@@ -326,7 +305,6 @@ __ll_sc__cmpxchg_double##name(unsigned long old1, \
unsigned long tmp, ret; \ unsigned long tmp, ret; \
\ \
asm volatile("// __cmpxchg_double" #name "\n" \ asm volatile("// __cmpxchg_double" #name "\n" \
__LL_SC_FALLBACK( \
" prfm pstl1strm, %2\n" \ " prfm pstl1strm, %2\n" \
"1: ldxp %0, %1, %2\n" \ "1: ldxp %0, %1, %2\n" \
" eor %0, %0, %3\n" \ " eor %0, %0, %3\n" \
...@@ -336,7 +314,7 @@ __ll_sc__cmpxchg_double##name(unsigned long old1, \ ...@@ -336,7 +314,7 @@ __ll_sc__cmpxchg_double##name(unsigned long old1, \
" st" #rel "xp %w0, %5, %6, %2\n" \ " st" #rel "xp %w0, %5, %6, %2\n" \
" cbnz %w0, 1b\n" \ " cbnz %w0, 1b\n" \
" " #mb "\n" \ " " #mb "\n" \
"2:") \ "2:" \
: "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr) \ : "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr) \
: "r" (old1), "r" (old2), "r" (new1), "r" (new2) \ : "r" (old1), "r" (old2), "r" (new1), "r" (new2) \
: cl); \ : cl); \
......
...@@ -11,7 +11,8 @@ ...@@ -11,7 +11,8 @@
#define __ASM_ATOMIC_LSE_H #define __ASM_ATOMIC_LSE_H
#define ATOMIC_OP(op, asm_op) \ #define ATOMIC_OP(op, asm_op) \
static inline void __lse_atomic_##op(int i, atomic_t *v) \ static __always_inline void \
__lse_atomic_##op(int i, atomic_t *v) \
{ \ { \
asm volatile( \ asm volatile( \
__LSE_PREAMBLE \ __LSE_PREAMBLE \
...@@ -25,7 +26,7 @@ ATOMIC_OP(or, stset) ...@@ -25,7 +26,7 @@ ATOMIC_OP(or, stset)
ATOMIC_OP(xor, steor) ATOMIC_OP(xor, steor)
ATOMIC_OP(add, stadd) ATOMIC_OP(add, stadd)
static inline void __lse_atomic_sub(int i, atomic_t *v) static __always_inline void __lse_atomic_sub(int i, atomic_t *v)
{ {
__lse_atomic_add(-i, v); __lse_atomic_add(-i, v);
} }
...@@ -33,7 +34,8 @@ static inline void __lse_atomic_sub(int i, atomic_t *v) ...@@ -33,7 +34,8 @@ static inline void __lse_atomic_sub(int i, atomic_t *v)
#undef ATOMIC_OP #undef ATOMIC_OP
#define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \ #define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \
static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v) \ static __always_inline int \
__lse_atomic_fetch_##op##name(int i, atomic_t *v) \
{ \ { \
int old; \ int old; \
\ \
...@@ -63,7 +65,8 @@ ATOMIC_FETCH_OPS(add, ldadd) ...@@ -63,7 +65,8 @@ ATOMIC_FETCH_OPS(add, ldadd)
#undef ATOMIC_FETCH_OPS #undef ATOMIC_FETCH_OPS
#define ATOMIC_FETCH_OP_SUB(name) \ #define ATOMIC_FETCH_OP_SUB(name) \
static inline int __lse_atomic_fetch_sub##name(int i, atomic_t *v) \ static __always_inline int \
__lse_atomic_fetch_sub##name(int i, atomic_t *v) \
{ \ { \
return __lse_atomic_fetch_add##name(-i, v); \ return __lse_atomic_fetch_add##name(-i, v); \
} }
...@@ -76,12 +79,14 @@ ATOMIC_FETCH_OP_SUB( ) ...@@ -76,12 +79,14 @@ ATOMIC_FETCH_OP_SUB( )
#undef ATOMIC_FETCH_OP_SUB #undef ATOMIC_FETCH_OP_SUB
#define ATOMIC_OP_ADD_SUB_RETURN(name) \ #define ATOMIC_OP_ADD_SUB_RETURN(name) \
static inline int __lse_atomic_add_return##name(int i, atomic_t *v) \ static __always_inline int \
__lse_atomic_add_return##name(int i, atomic_t *v) \
{ \ { \
return __lse_atomic_fetch_add##name(i, v) + i; \ return __lse_atomic_fetch_add##name(i, v) + i; \
} \ } \
\ \
static inline int __lse_atomic_sub_return##name(int i, atomic_t *v) \ static __always_inline int \
__lse_atomic_sub_return##name(int i, atomic_t *v) \
{ \ { \
return __lse_atomic_fetch_sub(i, v) - i; \ return __lse_atomic_fetch_sub(i, v) - i; \
} }
...@@ -93,13 +98,14 @@ ATOMIC_OP_ADD_SUB_RETURN( ) ...@@ -93,13 +98,14 @@ ATOMIC_OP_ADD_SUB_RETURN( )
#undef ATOMIC_OP_ADD_SUB_RETURN #undef ATOMIC_OP_ADD_SUB_RETURN
static inline void __lse_atomic_and(int i, atomic_t *v) static __always_inline void __lse_atomic_and(int i, atomic_t *v)
{ {
return __lse_atomic_andnot(~i, v); return __lse_atomic_andnot(~i, v);
} }
#define ATOMIC_FETCH_OP_AND(name, mb, cl...) \ #define ATOMIC_FETCH_OP_AND(name, mb, cl...) \
static inline int __lse_atomic_fetch_and##name(int i, atomic_t *v) \ static __always_inline int \
__lse_atomic_fetch_and##name(int i, atomic_t *v) \
{ \ { \
return __lse_atomic_fetch_andnot##name(~i, v); \ return __lse_atomic_fetch_andnot##name(~i, v); \
} }
...@@ -112,7 +118,8 @@ ATOMIC_FETCH_OP_AND( , al, "memory") ...@@ -112,7 +118,8 @@ ATOMIC_FETCH_OP_AND( , al, "memory")
#undef ATOMIC_FETCH_OP_AND #undef ATOMIC_FETCH_OP_AND
#define ATOMIC64_OP(op, asm_op) \ #define ATOMIC64_OP(op, asm_op) \
static inline void __lse_atomic64_##op(s64 i, atomic64_t *v) \ static __always_inline void \
__lse_atomic64_##op(s64 i, atomic64_t *v) \
{ \ { \
asm volatile( \ asm volatile( \
__LSE_PREAMBLE \ __LSE_PREAMBLE \
...@@ -126,7 +133,7 @@ ATOMIC64_OP(or, stset) ...@@ -126,7 +133,7 @@ ATOMIC64_OP(or, stset)
ATOMIC64_OP(xor, steor) ATOMIC64_OP(xor, steor)
ATOMIC64_OP(add, stadd) ATOMIC64_OP(add, stadd)
static inline void __lse_atomic64_sub(s64 i, atomic64_t *v) static __always_inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
{ {
__lse_atomic64_add(-i, v); __lse_atomic64_add(-i, v);
} }
...@@ -134,7 +141,8 @@ static inline void __lse_atomic64_sub(s64 i, atomic64_t *v) ...@@ -134,7 +141,8 @@ static inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
#undef ATOMIC64_OP #undef ATOMIC64_OP
#define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \ #define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \
static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\ static __always_inline long \
__lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \
{ \ { \
s64 old; \ s64 old; \
\ \
...@@ -164,7 +172,8 @@ ATOMIC64_FETCH_OPS(add, ldadd) ...@@ -164,7 +172,8 @@ ATOMIC64_FETCH_OPS(add, ldadd)
#undef ATOMIC64_FETCH_OPS #undef ATOMIC64_FETCH_OPS
#define ATOMIC64_FETCH_OP_SUB(name) \ #define ATOMIC64_FETCH_OP_SUB(name) \
static inline long __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v) \ static __always_inline long \
__lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v) \
{ \ { \
return __lse_atomic64_fetch_add##name(-i, v); \ return __lse_atomic64_fetch_add##name(-i, v); \
} }
...@@ -177,12 +186,14 @@ ATOMIC64_FETCH_OP_SUB( ) ...@@ -177,12 +186,14 @@ ATOMIC64_FETCH_OP_SUB( )
#undef ATOMIC64_FETCH_OP_SUB #undef ATOMIC64_FETCH_OP_SUB
#define ATOMIC64_OP_ADD_SUB_RETURN(name) \ #define ATOMIC64_OP_ADD_SUB_RETURN(name) \
static inline long __lse_atomic64_add_return##name(s64 i, atomic64_t *v)\ static __always_inline long \
__lse_atomic64_add_return##name(s64 i, atomic64_t *v) \
{ \ { \
return __lse_atomic64_fetch_add##name(i, v) + i; \ return __lse_atomic64_fetch_add##name(i, v) + i; \
} \ } \
\ \
static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v)\ static __always_inline long \
__lse_atomic64_sub_return##name(s64 i, atomic64_t *v) \
{ \ { \
return __lse_atomic64_fetch_sub##name(i, v) - i; \ return __lse_atomic64_fetch_sub##name(i, v) - i; \
} }
...@@ -194,13 +205,14 @@ ATOMIC64_OP_ADD_SUB_RETURN( ) ...@@ -194,13 +205,14 @@ ATOMIC64_OP_ADD_SUB_RETURN( )
#undef ATOMIC64_OP_ADD_SUB_RETURN #undef ATOMIC64_OP_ADD_SUB_RETURN
static inline void __lse_atomic64_and(s64 i, atomic64_t *v) static __always_inline void __lse_atomic64_and(s64 i, atomic64_t *v)
{ {
return __lse_atomic64_andnot(~i, v); return __lse_atomic64_andnot(~i, v);
} }
#define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \ #define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \
static inline long __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v) \ static __always_inline long \
__lse_atomic64_fetch_and##name(s64 i, atomic64_t *v) \
{ \ { \
return __lse_atomic64_fetch_andnot##name(~i, v); \ return __lse_atomic64_fetch_andnot##name(~i, v); \
} }
...@@ -212,7 +224,7 @@ ATOMIC64_FETCH_OP_AND( , al, "memory") ...@@ -212,7 +224,7 @@ ATOMIC64_FETCH_OP_AND( , al, "memory")
#undef ATOMIC64_FETCH_OP_AND #undef ATOMIC64_FETCH_OP_AND
static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v) static __always_inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v)
{ {
unsigned long tmp; unsigned long tmp;
......
...@@ -45,10 +45,6 @@ static inline unsigned int arch_slab_minalign(void) ...@@ -45,10 +45,6 @@ static inline unsigned int arch_slab_minalign(void)
#define arch_slab_minalign() arch_slab_minalign() #define arch_slab_minalign() arch_slab_minalign()
#endif #endif
#define CTR_CACHE_MINLINE_MASK \
(0xf << CTR_EL0_DMINLINE_SHIFT | \
CTR_EL0_IMINLINE_MASK << CTR_EL0_IMINLINE_SHIFT)
#define CTR_L1IP(ctr) SYS_FIELD_GET(CTR_EL0, L1Ip, ctr) #define CTR_L1IP(ctr) SYS_FIELD_GET(CTR_EL0, L1Ip, ctr)
#define ICACHEF_ALIASING 0 #define ICACHEF_ALIASING 0
......
...@@ -553,7 +553,7 @@ cpuid_feature_cap_perfmon_field(u64 features, int field, u64 cap) ...@@ -553,7 +553,7 @@ cpuid_feature_cap_perfmon_field(u64 features, int field, u64 cap)
u64 mask = GENMASK_ULL(field + 3, field); u64 mask = GENMASK_ULL(field + 3, field);
/* Treat IMPLEMENTATION DEFINED functionality as unimplemented */ /* Treat IMPLEMENTATION DEFINED functionality as unimplemented */
if (val == ID_AA64DFR0_PMUVER_IMP_DEF) if (val == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
val = 0; val = 0;
if (val > cap) { if (val > cap) {
...@@ -597,43 +597,43 @@ static inline s64 arm64_ftr_value(const struct arm64_ftr_bits *ftrp, u64 val) ...@@ -597,43 +597,43 @@ static inline s64 arm64_ftr_value(const struct arm64_ftr_bits *ftrp, u64 val)
static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0) static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0)
{ {
return cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL_SHIFT) == 0x1 || return cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_BIGEND_SHIFT) == 0x1 ||
cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL0_SHIFT) == 0x1; cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_BIGENDEL0_SHIFT) == 0x1;
} }
static inline bool id_aa64pfr0_32bit_el1(u64 pfr0) static inline bool id_aa64pfr0_32bit_el1(u64 pfr0)
{ {
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_SHIFT); u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_EL1_SHIFT);
return val == ID_AA64PFR0_ELx_32BIT_64BIT; return val == ID_AA64PFR0_EL1_ELx_32BIT_64BIT;
} }
static inline bool id_aa64pfr0_32bit_el0(u64 pfr0) static inline bool id_aa64pfr0_32bit_el0(u64 pfr0)
{ {
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL0_SHIFT); u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_EL0_SHIFT);
return val == ID_AA64PFR0_ELx_32BIT_64BIT; return val == ID_AA64PFR0_EL1_ELx_32BIT_64BIT;
} }
static inline bool id_aa64pfr0_sve(u64 pfr0) static inline bool id_aa64pfr0_sve(u64 pfr0)
{ {
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_SVE_SHIFT); u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_SVE_SHIFT);
return val > 0; return val > 0;
} }
static inline bool id_aa64pfr1_sme(u64 pfr1) static inline bool id_aa64pfr1_sme(u64 pfr1)
{ {
u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_SME_SHIFT); u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_EL1_SME_SHIFT);
return val > 0; return val > 0;
} }
static inline bool id_aa64pfr1_mte(u64 pfr1) static inline bool id_aa64pfr1_mte(u64 pfr1)
{ {
u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_MTE_SHIFT); u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_EL1_MTE_SHIFT);
return val >= ID_AA64PFR1_MTE; return val >= ID_AA64PFR1_EL1_MTE_MTE2;
} }
void __init setup_cpu_features(void); void __init setup_cpu_features(void);
...@@ -659,7 +659,7 @@ static inline bool supports_csv2p3(int scope) ...@@ -659,7 +659,7 @@ static inline bool supports_csv2p3(int scope)
pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
csv2_val = cpuid_feature_extract_unsigned_field(pfr0, csv2_val = cpuid_feature_extract_unsigned_field(pfr0,
ID_AA64PFR0_CSV2_SHIFT); ID_AA64PFR0_EL1_CSV2_SHIFT);
return csv2_val == 3; return csv2_val == 3;
} }
...@@ -694,10 +694,10 @@ static inline bool system_supports_4kb_granule(void) ...@@ -694,10 +694,10 @@ static inline bool system_supports_4kb_granule(void)
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
val = cpuid_feature_extract_unsigned_field(mmfr0, val = cpuid_feature_extract_unsigned_field(mmfr0,
ID_AA64MMFR0_TGRAN4_SHIFT); ID_AA64MMFR0_EL1_TGRAN4_SHIFT);
return (val >= ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN) && return (val >= ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN) &&
(val <= ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX); (val <= ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MAX);
} }
static inline bool system_supports_64kb_granule(void) static inline bool system_supports_64kb_granule(void)
...@@ -707,10 +707,10 @@ static inline bool system_supports_64kb_granule(void) ...@@ -707,10 +707,10 @@ static inline bool system_supports_64kb_granule(void)
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
val = cpuid_feature_extract_unsigned_field(mmfr0, val = cpuid_feature_extract_unsigned_field(mmfr0,
ID_AA64MMFR0_TGRAN64_SHIFT); ID_AA64MMFR0_EL1_TGRAN64_SHIFT);
return (val >= ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN) && return (val >= ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MIN) &&
(val <= ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX); (val <= ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MAX);
} }
static inline bool system_supports_16kb_granule(void) static inline bool system_supports_16kb_granule(void)
...@@ -720,10 +720,10 @@ static inline bool system_supports_16kb_granule(void) ...@@ -720,10 +720,10 @@ static inline bool system_supports_16kb_granule(void)
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
val = cpuid_feature_extract_unsigned_field(mmfr0, val = cpuid_feature_extract_unsigned_field(mmfr0,
ID_AA64MMFR0_TGRAN16_SHIFT); ID_AA64MMFR0_EL1_TGRAN16_SHIFT);
return (val >= ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN) && return (val >= ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MIN) &&
(val <= ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX); (val <= ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MAX);
} }
static inline bool system_supports_mixed_endian_el0(void) static inline bool system_supports_mixed_endian_el0(void)
...@@ -738,7 +738,7 @@ static inline bool system_supports_mixed_endian(void) ...@@ -738,7 +738,7 @@ static inline bool system_supports_mixed_endian(void)
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
val = cpuid_feature_extract_unsigned_field(mmfr0, val = cpuid_feature_extract_unsigned_field(mmfr0,
ID_AA64MMFR0_BIGENDEL_SHIFT); ID_AA64MMFR0_EL1_BIGEND_SHIFT);
return val == 0x1; return val == 0x1;
} }
...@@ -840,13 +840,13 @@ extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt); ...@@ -840,13 +840,13 @@ extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange) static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)
{ {
switch (parange) { switch (parange) {
case ID_AA64MMFR0_PARANGE_32: return 32; case ID_AA64MMFR0_EL1_PARANGE_32: return 32;
case ID_AA64MMFR0_PARANGE_36: return 36; case ID_AA64MMFR0_EL1_PARANGE_36: return 36;
case ID_AA64MMFR0_PARANGE_40: return 40; case ID_AA64MMFR0_EL1_PARANGE_40: return 40;
case ID_AA64MMFR0_PARANGE_42: return 42; case ID_AA64MMFR0_EL1_PARANGE_42: return 42;
case ID_AA64MMFR0_PARANGE_44: return 44; case ID_AA64MMFR0_EL1_PARANGE_44: return 44;
case ID_AA64MMFR0_PARANGE_48: return 48; case ID_AA64MMFR0_EL1_PARANGE_48: return 48;
case ID_AA64MMFR0_PARANGE_52: return 52; case ID_AA64MMFR0_EL1_PARANGE_52: return 52;
/* /*
* A future PE could use a value unknown to the kernel. * A future PE could use a value unknown to the kernel.
* However, by the "D10.1.4 Principles of the ID scheme * However, by the "D10.1.4 Principles of the ID scheme
...@@ -868,14 +868,14 @@ static inline bool cpu_has_hw_af(void) ...@@ -868,14 +868,14 @@ static inline bool cpu_has_hw_af(void)
mmfr1 = read_cpuid(ID_AA64MMFR1_EL1); mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
return cpuid_feature_extract_unsigned_field(mmfr1, return cpuid_feature_extract_unsigned_field(mmfr1,
ID_AA64MMFR1_HADBS_SHIFT); ID_AA64MMFR1_EL1_HAFDBS_SHIFT);
} }
static inline bool cpu_has_pan(void) static inline bool cpu_has_pan(void)
{ {
u64 mmfr1 = read_cpuid(ID_AA64MMFR1_EL1); u64 mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
return cpuid_feature_extract_unsigned_field(mmfr1, return cpuid_feature_extract_unsigned_field(mmfr1,
ID_AA64MMFR1_PAN_SHIFT); ID_AA64MMFR1_EL1_PAN_SHIFT);
} }
#ifdef CONFIG_ARM64_AMU_EXTN #ifdef CONFIG_ARM64_AMU_EXTN
...@@ -896,8 +896,8 @@ static inline unsigned int get_vmid_bits(u64 mmfr1) ...@@ -896,8 +896,8 @@ static inline unsigned int get_vmid_bits(u64 mmfr1)
int vmid_bits; int vmid_bits;
vmid_bits = cpuid_feature_extract_unsigned_field(mmfr1, vmid_bits = cpuid_feature_extract_unsigned_field(mmfr1,
ID_AA64MMFR1_VMIDBITS_SHIFT); ID_AA64MMFR1_EL1_VMIDBits_SHIFT);
if (vmid_bits == ID_AA64MMFR1_VMIDBITS_16) if (vmid_bits == ID_AA64MMFR1_EL1_VMIDBits_16)
return 16; return 16;
/* /*
...@@ -907,6 +907,8 @@ static inline unsigned int get_vmid_bits(u64 mmfr1) ...@@ -907,6 +907,8 @@ static inline unsigned int get_vmid_bits(u64 mmfr1)
return 8; return 8;
} }
struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id);
extern struct arm64_ftr_override id_aa64mmfr1_override; extern struct arm64_ftr_override id_aa64mmfr1_override;
extern struct arm64_ftr_override id_aa64pfr0_override; extern struct arm64_ftr_override id_aa64pfr0_override;
extern struct arm64_ftr_override id_aa64pfr1_override; extern struct arm64_ftr_override id_aa64pfr1_override;
......
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
.macro __init_el2_debug .macro __init_el2_debug
mrs x1, id_aa64dfr0_el1 mrs x1, id_aa64dfr0_el1
sbfx x0, x1, #ID_AA64DFR0_PMUVER_SHIFT, #4 sbfx x0, x1, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4
cmp x0, #1 cmp x0, #1
b.lt .Lskip_pmu_\@ // Skip if no PMU present b.lt .Lskip_pmu_\@ // Skip if no PMU present
mrs x0, pmcr_el0 // Disable debug access traps mrs x0, pmcr_el0 // Disable debug access traps
...@@ -49,7 +49,7 @@ ...@@ -49,7 +49,7 @@
csel x2, xzr, x0, lt // all PMU counters from EL1 csel x2, xzr, x0, lt // all PMU counters from EL1
/* Statistical profiling */ /* Statistical profiling */
ubfx x0, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4 ubfx x0, x1, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4
cbz x0, .Lskip_spe_\@ // Skip if SPE not present cbz x0, .Lskip_spe_\@ // Skip if SPE not present
mrs_s x0, SYS_PMBIDR_EL1 // If SPE available at EL2, mrs_s x0, SYS_PMBIDR_EL1 // If SPE available at EL2,
...@@ -65,7 +65,7 @@ ...@@ -65,7 +65,7 @@
.Lskip_spe_\@: .Lskip_spe_\@:
/* Trace buffer */ /* Trace buffer */
ubfx x0, x1, #ID_AA64DFR0_TRBE_SHIFT, #4 ubfx x0, x1, #ID_AA64DFR0_EL1_TraceBuffer_SHIFT, #4
cbz x0, .Lskip_trace_\@ // Skip if TraceBuffer is not present cbz x0, .Lskip_trace_\@ // Skip if TraceBuffer is not present
mrs_s x0, SYS_TRBIDR_EL1 mrs_s x0, SYS_TRBIDR_EL1
...@@ -83,7 +83,7 @@ ...@@ -83,7 +83,7 @@
/* LORegions */ /* LORegions */
.macro __init_el2_lor .macro __init_el2_lor
mrs x1, id_aa64mmfr1_el1 mrs x1, id_aa64mmfr1_el1
ubfx x0, x1, #ID_AA64MMFR1_LOR_SHIFT, 4 ubfx x0, x1, #ID_AA64MMFR1_EL1_LO_SHIFT, 4
cbz x0, .Lskip_lor_\@ cbz x0, .Lskip_lor_\@
msr_s SYS_LORC_EL1, xzr msr_s SYS_LORC_EL1, xzr
.Lskip_lor_\@: .Lskip_lor_\@:
...@@ -97,7 +97,7 @@ ...@@ -97,7 +97,7 @@
/* GICv3 system register access */ /* GICv3 system register access */
.macro __init_el2_gicv3 .macro __init_el2_gicv3
mrs x0, id_aa64pfr0_el1 mrs x0, id_aa64pfr0_el1
ubfx x0, x0, #ID_AA64PFR0_GIC_SHIFT, #4 ubfx x0, x0, #ID_AA64PFR0_EL1_GIC_SHIFT, #4
cbz x0, .Lskip_gicv3_\@ cbz x0, .Lskip_gicv3_\@
mrs_s x0, SYS_ICC_SRE_EL2 mrs_s x0, SYS_ICC_SRE_EL2
...@@ -132,12 +132,12 @@ ...@@ -132,12 +132,12 @@
/* Disable any fine grained traps */ /* Disable any fine grained traps */
.macro __init_el2_fgt .macro __init_el2_fgt
mrs x1, id_aa64mmfr0_el1 mrs x1, id_aa64mmfr0_el1
ubfx x1, x1, #ID_AA64MMFR0_FGT_SHIFT, #4 ubfx x1, x1, #ID_AA64MMFR0_EL1_FGT_SHIFT, #4
cbz x1, .Lskip_fgt_\@ cbz x1, .Lskip_fgt_\@
mov x0, xzr mov x0, xzr
mrs x1, id_aa64dfr0_el1 mrs x1, id_aa64dfr0_el1
ubfx x1, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4 ubfx x1, x1, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4
cmp x1, #3 cmp x1, #3
b.lt .Lset_debug_fgt_\@ b.lt .Lset_debug_fgt_\@
/* Disable PMSNEVFR_EL1 read and write traps */ /* Disable PMSNEVFR_EL1 read and write traps */
...@@ -149,7 +149,7 @@ ...@@ -149,7 +149,7 @@
mov x0, xzr mov x0, xzr
mrs x1, id_aa64pfr1_el1 mrs x1, id_aa64pfr1_el1
ubfx x1, x1, #ID_AA64PFR1_SME_SHIFT, #4 ubfx x1, x1, #ID_AA64PFR1_EL1_SME_SHIFT, #4
cbz x1, .Lset_fgt_\@ cbz x1, .Lset_fgt_\@
/* Disable nVHE traps of TPIDR2 and SMPRI */ /* Disable nVHE traps of TPIDR2 and SMPRI */
...@@ -162,7 +162,7 @@ ...@@ -162,7 +162,7 @@
msr_s SYS_HFGITR_EL2, xzr msr_s SYS_HFGITR_EL2, xzr
mrs x1, id_aa64pfr0_el1 // AMU traps UNDEF without AMU mrs x1, id_aa64pfr0_el1 // AMU traps UNDEF without AMU
ubfx x1, x1, #ID_AA64PFR0_AMU_SHIFT, #4 ubfx x1, x1, #ID_AA64PFR0_EL1_AMU_SHIFT, #4
cbz x1, .Lskip_fgt_\@ cbz x1, .Lskip_fgt_\@
msr_s SYS_HAFGRTR_EL2, xzr msr_s SYS_HAFGRTR_EL2, xzr
......
...@@ -58,8 +58,9 @@ asmlinkage void call_on_irq_stack(struct pt_regs *regs, ...@@ -58,8 +58,9 @@ asmlinkage void call_on_irq_stack(struct pt_regs *regs,
asmlinkage void asm_exit_to_user_mode(struct pt_regs *regs); asmlinkage void asm_exit_to_user_mode(struct pt_regs *regs);
void do_mem_abort(unsigned long far, unsigned long esr, struct pt_regs *regs); void do_mem_abort(unsigned long far, unsigned long esr, struct pt_regs *regs);
void do_undefinstr(struct pt_regs *regs); void do_undefinstr(struct pt_regs *regs, unsigned long esr);
void do_bti(struct pt_regs *regs); void do_el0_bti(struct pt_regs *regs);
void do_el1_bti(struct pt_regs *regs, unsigned long esr);
void do_debug_exception(unsigned long addr_if_watchpoint, unsigned long esr, void do_debug_exception(unsigned long addr_if_watchpoint, unsigned long esr,
struct pt_regs *regs); struct pt_regs *regs);
void do_fpsimd_acc(unsigned long esr, struct pt_regs *regs); void do_fpsimd_acc(unsigned long esr, struct pt_regs *regs);
...@@ -72,7 +73,8 @@ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned long esr); ...@@ -72,7 +73,8 @@ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned long esr);
void do_cp15instr(unsigned long esr, struct pt_regs *regs); void do_cp15instr(unsigned long esr, struct pt_regs *regs);
void do_el0_svc(struct pt_regs *regs); void do_el0_svc(struct pt_regs *regs);
void do_el0_svc_compat(struct pt_regs *regs); void do_el0_svc_compat(struct pt_regs *regs);
void do_ptrauth_fault(struct pt_regs *regs, unsigned long esr); void do_el0_fpac(struct pt_regs *regs, unsigned long esr);
void do_el1_fpac(struct pt_regs *regs, unsigned long esr);
void do_serror(struct pt_regs *regs, unsigned long esr); void do_serror(struct pt_regs *regs, unsigned long esr);
void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags); void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags);
......
...@@ -142,7 +142,7 @@ static inline int get_num_brps(void) ...@@ -142,7 +142,7 @@ static inline int get_num_brps(void)
u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1); u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
return 1 + return 1 +
cpuid_feature_extract_unsigned_field(dfr0, cpuid_feature_extract_unsigned_field(dfr0,
ID_AA64DFR0_BRPS_SHIFT); ID_AA64DFR0_EL1_BRPs_SHIFT);
} }
/* Determine number of WRP registers available. */ /* Determine number of WRP registers available. */
...@@ -151,7 +151,7 @@ static inline int get_num_wrps(void) ...@@ -151,7 +151,7 @@ static inline int get_num_wrps(void)
u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1); u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
return 1 + return 1 +
cpuid_feature_extract_unsigned_field(dfr0, cpuid_feature_extract_unsigned_field(dfr0,
ID_AA64DFR0_WRPS_SHIFT); ID_AA64DFR0_EL1_WRPs_SHIFT);
} }
#endif /* __ASM_BREAKPOINT_H */ #endif /* __ASM_BREAKPOINT_H */
...@@ -119,6 +119,7 @@ ...@@ -119,6 +119,7 @@
#define KERNEL_HWCAP_SME_FA64 __khwcap2_feature(SME_FA64) #define KERNEL_HWCAP_SME_FA64 __khwcap2_feature(SME_FA64)
#define KERNEL_HWCAP_WFXT __khwcap2_feature(WFXT) #define KERNEL_HWCAP_WFXT __khwcap2_feature(WFXT)
#define KERNEL_HWCAP_EBF16 __khwcap2_feature(EBF16) #define KERNEL_HWCAP_EBF16 __khwcap2_feature(EBF16)
#define KERNEL_HWCAP_SVE_EBF16 __khwcap2_feature(SVE_EBF16)
/* /*
* This yields a mask that user programs can use to figure out what * This yields a mask that user programs can use to figure out what
......
...@@ -16,9 +16,9 @@ ...@@ -16,9 +16,9 @@
static inline u64 kvm_get_parange(u64 mmfr0) static inline u64 kvm_get_parange(u64 mmfr0)
{ {
u64 parange = cpuid_feature_extract_unsigned_field(mmfr0, u64 parange = cpuid_feature_extract_unsigned_field(mmfr0,
ID_AA64MMFR0_PARANGE_SHIFT); ID_AA64MMFR0_EL1_PARANGE_SHIFT);
if (parange > ID_AA64MMFR0_PARANGE_MAX) if (parange > ID_AA64MMFR0_EL1_PARANGE_MAX)
parange = ID_AA64MMFR0_PARANGE_MAX; parange = ID_AA64MMFR0_EL1_PARANGE_MAX;
return parange; return parange;
} }
......
...@@ -58,11 +58,20 @@ static inline bool is_forbidden_offset_for_adrp(void *place) ...@@ -58,11 +58,20 @@ static inline bool is_forbidden_offset_for_adrp(void *place)
} }
struct plt_entry get_plt_entry(u64 dst, void *pc); struct plt_entry get_plt_entry(u64 dst, void *pc);
bool plt_entries_equal(const struct plt_entry *a, const struct plt_entry *b);
static inline bool plt_entry_is_initialized(const struct plt_entry *e) static inline const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
const char *name)
{ {
return e->adrp || e->add || e->br; const Elf_Shdr *s, *se;
const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
if (strcmp(name, secstrs + s->sh_name) == 0)
return s;
}
return NULL;
} }
#endif /* __ASM_MODULE_H */ #endif /* __ASM_MODULE_H */
...@@ -410,7 +410,7 @@ long get_tagged_addr_ctrl(struct task_struct *task); ...@@ -410,7 +410,7 @@ long get_tagged_addr_ctrl(struct task_struct *task);
* The top of the current task's task stack * The top of the current task's task stack
*/ */
#define current_top_of_stack() ((unsigned long)current->stack + THREAD_SIZE) #define current_top_of_stack() ((unsigned long)current->stack + THREAD_SIZE)
#define on_thread_stack() (on_task_stack(current, current_stack_pointer, 1, NULL)) #define on_thread_stack() (on_task_stack(current, current_stack_pointer, 1))
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __ASM_PROCESSOR_H */ #endif /* __ASM_PROCESSOR_H */
...@@ -43,22 +43,5 @@ unsigned long do_sdei_event(struct pt_regs *regs, ...@@ -43,22 +43,5 @@ unsigned long do_sdei_event(struct pt_regs *regs,
unsigned long sdei_arch_get_entry_point(int conduit); unsigned long sdei_arch_get_entry_point(int conduit);
#define sdei_arch_get_entry_point(x) sdei_arch_get_entry_point(x) #define sdei_arch_get_entry_point(x) sdei_arch_get_entry_point(x)
struct stack_info;
bool _on_sdei_stack(unsigned long sp, unsigned long size,
struct stack_info *info);
static inline bool on_sdei_stack(unsigned long sp, unsigned long size,
struct stack_info *info)
{
if (!IS_ENABLED(CONFIG_VMAP_STACK))
return false;
if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE))
return false;
if (in_nmi())
return _on_sdei_stack(sp, size, info);
return false;
}
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __ASM_SDEI_H */ #endif /* __ASM_SDEI_H */
...@@ -22,39 +22,86 @@ extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, ...@@ -22,39 +22,86 @@ extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
DECLARE_PER_CPU(unsigned long *, irq_stack_ptr); DECLARE_PER_CPU(unsigned long *, irq_stack_ptr);
static inline bool on_irq_stack(unsigned long sp, unsigned long size, static inline struct stack_info stackinfo_get_irq(void)
struct stack_info *info)
{ {
unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr); unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr);
unsigned long high = low + IRQ_STACK_SIZE; unsigned long high = low + IRQ_STACK_SIZE;
return on_stack(sp, size, low, high, STACK_TYPE_IRQ, info); return (struct stack_info) {
.low = low,
.high = high,
};
} }
static inline bool on_task_stack(const struct task_struct *tsk, static inline bool on_irq_stack(unsigned long sp, unsigned long size)
unsigned long sp, unsigned long size, {
struct stack_info *info) struct stack_info info = stackinfo_get_irq();
return stackinfo_on_stack(&info, sp, size);
}
static inline struct stack_info stackinfo_get_task(const struct task_struct *tsk)
{ {
unsigned long low = (unsigned long)task_stack_page(tsk); unsigned long low = (unsigned long)task_stack_page(tsk);
unsigned long high = low + THREAD_SIZE; unsigned long high = low + THREAD_SIZE;
return on_stack(sp, size, low, high, STACK_TYPE_TASK, info); return (struct stack_info) {
.low = low,
.high = high,
};
}
static inline bool on_task_stack(const struct task_struct *tsk,
unsigned long sp, unsigned long size)
{
struct stack_info info = stackinfo_get_task(tsk);
return stackinfo_on_stack(&info, sp, size);
} }
#ifdef CONFIG_VMAP_STACK #ifdef CONFIG_VMAP_STACK
DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack); DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
static inline bool on_overflow_stack(unsigned long sp, unsigned long size, static inline struct stack_info stackinfo_get_overflow(void)
struct stack_info *info)
{ {
unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack); unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack);
unsigned long high = low + OVERFLOW_STACK_SIZE; unsigned long high = low + OVERFLOW_STACK_SIZE;
return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info); return (struct stack_info) {
.low = low,
.high = high,
};
}
#else
#define stackinfo_get_overflow() stackinfo_get_unknown()
#endif
#if defined(CONFIG_ARM_SDE_INTERFACE) && defined(CONFIG_VMAP_STACK)
DECLARE_PER_CPU(unsigned long *, sdei_stack_normal_ptr);
DECLARE_PER_CPU(unsigned long *, sdei_stack_critical_ptr);
static inline struct stack_info stackinfo_get_sdei_normal(void)
{
unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
unsigned long high = low + SDEI_STACK_SIZE;
return (struct stack_info) {
.low = low,
.high = high,
};
}
static inline struct stack_info stackinfo_get_sdei_critical(void)
{
unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr);
unsigned long high = low + SDEI_STACK_SIZE;
return (struct stack_info) {
.low = low,
.high = high,
};
} }
#else #else
static inline bool on_overflow_stack(unsigned long sp, unsigned long size, #define stackinfo_get_sdei_normal() stackinfo_get_unknown()
struct stack_info *info) { return false; } #define stackinfo_get_sdei_critical() stackinfo_get_unknown()
#endif #endif
#endif /* __ASM_STACKTRACE_H */ #endif /* __ASM_STACKTRACE_H */
...@@ -2,13 +2,6 @@ ...@@ -2,13 +2,6 @@
/* /*
* Common arm64 stack unwinder code. * Common arm64 stack unwinder code.
* *
* To implement a new arm64 stack unwinder:
* 1) Include this header
*
* 2) Call into unwind_next_common() from your top level unwind
* function, passing it the validation and translation callbacks
* (though the later can be NULL if no translation is required).
*
* See: arch/arm64/kernel/stacktrace.c for the reference implementation. * See: arch/arm64/kernel/stacktrace.c for the reference implementation.
* *
* Copyright (C) 2012 ARM Ltd. * Copyright (C) 2012 ARM Ltd.
...@@ -16,78 +9,60 @@ ...@@ -16,78 +9,60 @@
#ifndef __ASM_STACKTRACE_COMMON_H #ifndef __ASM_STACKTRACE_COMMON_H
#define __ASM_STACKTRACE_COMMON_H #define __ASM_STACKTRACE_COMMON_H
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/kprobes.h> #include <linux/kprobes.h>
#include <linux/types.h> #include <linux/types.h>
enum stack_type {
STACK_TYPE_UNKNOWN,
STACK_TYPE_TASK,
STACK_TYPE_IRQ,
STACK_TYPE_OVERFLOW,
STACK_TYPE_SDEI_NORMAL,
STACK_TYPE_SDEI_CRITICAL,
STACK_TYPE_HYP,
__NR_STACK_TYPES
};
struct stack_info { struct stack_info {
unsigned long low; unsigned long low;
unsigned long high; unsigned long high;
enum stack_type type;
}; };
/* /**
* A snapshot of a frame record or fp/lr register values, along with some * struct unwind_state - state used for robust unwinding.
* accounting information necessary for robust unwinding.
* *
* @fp: The fp value in the frame record (or the real fp) * @fp: The fp value in the frame record (or the real fp)
* @pc: The lr value in the frame record (or the real lr) * @pc: The lr value in the frame record (or the real lr)
* *
* @stacks_done: Stacks which have been entirely unwound, for which it is no
* longer valid to unwind to.
*
* @prev_fp: The fp that pointed to this frame record, or a synthetic value
* of 0. This is used to ensure that within a stack, each
* subsequent frame record is at an increasing address.
* @prev_type: The type of stack this frame record was on, or a synthetic
* value of STACK_TYPE_UNKNOWN. This is used to detect a
* transition from one stack to another.
*
* @kr_cur: When KRETPROBES is selected, holds the kretprobe instance * @kr_cur: When KRETPROBES is selected, holds the kretprobe instance
* associated with the most recently encountered replacement lr * associated with the most recently encountered replacement lr
* value. * value.
* *
* @task: The task being unwound. * @task: The task being unwound.
*
* @stack: The stack currently being unwound.
* @stacks: An array of stacks which can be unwound.
* @nr_stacks: The number of stacks in @stacks.
*/ */
struct unwind_state { struct unwind_state {
unsigned long fp; unsigned long fp;
unsigned long pc; unsigned long pc;
DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES);
unsigned long prev_fp;
enum stack_type prev_type;
#ifdef CONFIG_KRETPROBES #ifdef CONFIG_KRETPROBES
struct llist_node *kr_cur; struct llist_node *kr_cur;
#endif #endif
struct task_struct *task; struct task_struct *task;
struct stack_info stack;
struct stack_info *stacks;
int nr_stacks;
}; };
static inline bool on_stack(unsigned long sp, unsigned long size, static inline struct stack_info stackinfo_get_unknown(void)
unsigned long low, unsigned long high, {
enum stack_type type, struct stack_info *info) return (struct stack_info) {
.low = 0,
.high = 0,
};
}
static inline bool stackinfo_on_stack(const struct stack_info *info,
unsigned long sp, unsigned long size)
{ {
if (!low) if (!info->low)
return false; return false;
if (sp < low || sp + size < sp || sp + size > high) if (sp < info->low || sp + size < sp || sp + size > info->high)
return false; return false;
if (info) {
info->low = low;
info->high = high;
info->type = type;
}
return true; return true;
} }
...@@ -99,99 +74,101 @@ static inline void unwind_init_common(struct unwind_state *state, ...@@ -99,99 +74,101 @@ static inline void unwind_init_common(struct unwind_state *state,
state->kr_cur = NULL; state->kr_cur = NULL;
#endif #endif
/* state->stack = stackinfo_get_unknown();
* Prime the first unwind.
*
* In unwind_next() we'll check that the FP points to a valid stack,
* which can't be STACK_TYPE_UNKNOWN, and the first unwind will be
* treated as a transition to whichever stack that happens to be. The
* prev_fp value won't be used, but we set it to 0 such that it is
* definitely not an accessible stack address.
*/
bitmap_zero(state->stacks_done, __NR_STACK_TYPES);
state->prev_fp = 0;
state->prev_type = STACK_TYPE_UNKNOWN;
} }
/* static struct stack_info *unwind_find_next_stack(const struct unwind_state *state,
* stack_trace_translate_fp_fn() - Translates a non-kernel frame pointer to unsigned long sp,
* a kernel address. unsigned long size)
* {
* @fp: the frame pointer to be updated to its kernel address. for (int i = 0; i < state->nr_stacks; i++) {
* @type: the stack type associated with frame pointer @fp struct stack_info *info = &state->stacks[i];
*
* Returns true and success and @fp is updated to the corresponding
* kernel virtual address; otherwise returns false.
*/
typedef bool (*stack_trace_translate_fp_fn)(unsigned long *fp,
enum stack_type type);
/* if (stackinfo_on_stack(info, sp, size))
* on_accessible_stack_fn() - Check whether a stack range is on any return info;
* of the possible stacks. }
return NULL;
}
/**
* unwind_consume_stack() - Check if an object is on an accessible stack,
* updating stack boundaries so that future unwind steps cannot consume this
* object again.
*
* @state: the current unwind state.
* @sp: the base address of the object.
* @size: the size of the object.
* *
* @tsk: task whose stack is being unwound * Return: 0 upon success, an error code otherwise.
* @sp: stack address being checked
* @size: size of the stack range being checked
* @info: stack unwinding context
*/ */
typedef bool (*on_accessible_stack_fn)(const struct task_struct *tsk, static inline int unwind_consume_stack(struct unwind_state *state,
unsigned long sp, unsigned long size, unsigned long sp,
struct stack_info *info); unsigned long size)
static inline int unwind_next_common(struct unwind_state *state,
struct stack_info *info,
on_accessible_stack_fn accessible,
stack_trace_translate_fp_fn translate_fp)
{ {
unsigned long fp = state->fp, kern_fp = fp; struct stack_info *next;
struct task_struct *tsk = state->task;
if (fp & 0x7) if (stackinfo_on_stack(&state->stack, sp, size))
return -EINVAL; goto found;
if (!accessible(tsk, fp, 16, info)) next = unwind_find_next_stack(state, sp, size);
return -EINVAL; if (!next)
if (test_bit(info->type, state->stacks_done))
return -EINVAL; return -EINVAL;
/* /*
* If fp is not from the current address space perform the necessary * Stack transitions are strictly one-way, and once we've
* translation before dereferencing it to get the next fp. * transitioned from one stack to another, it's never valid to
*/ * unwind back to the old stack.
if (translate_fp && !translate_fp(&kern_fp, info->type))
return -EINVAL;
/*
* As stacks grow downward, any valid record on the same stack must be
* at a strictly higher address than the prior record.
* *
* Stacks can nest in several valid orders, e.g. * Remove the current stack from the list of stacks so that it cannot
* be found on a subsequent transition.
*
* Note that stacks can nest in several valid orders, e.g.
* *
* TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL * TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL
* TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW * TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW
* HYP -> OVERFLOW * HYP -> OVERFLOW
* *
* ... but the nesting itself is strict. Once we transition from one * ... so we do not check the specific order of stack
* stack to another, it's never valid to unwind back to that first * transitions.
* stack.
*/ */
if (info->type == state->prev_type) { state->stack = *next;
if (fp <= state->prev_fp) *next = stackinfo_get_unknown();
found:
/*
* Future unwind steps can only consume stack above this frame record.
* Update the current stack to start immediately above it.
*/
state->stack.low = sp + size;
return 0;
}
/**
* unwind_next_frame_record() - Unwind to the next frame record.
*
* @state: the current unwind state.
*
* Return: 0 upon success, an error code otherwise.
*/
static inline int
unwind_next_frame_record(struct unwind_state *state)
{
unsigned long fp = state->fp;
int err;
if (fp & 0x7)
return -EINVAL; return -EINVAL;
} else {
__set_bit(state->prev_type, state->stacks_done); err = unwind_consume_stack(state, fp, 16);
} if (err)
return err;
/* /*
* Record this frame record's values and location. The prev_fp and * Record this frame record's values.
* prev_type are only meaningful to the next unwind_next() invocation.
*/ */
state->fp = READ_ONCE(*(unsigned long *)(kern_fp)); state->fp = READ_ONCE(*(unsigned long *)(fp));
state->pc = READ_ONCE(*(unsigned long *)(kern_fp + 8)); state->pc = READ_ONCE(*(unsigned long *)(fp + 8));
state->prev_fp = fp;
state->prev_type = info->type;
return 0; return 0;
} }
......
...@@ -20,8 +20,8 @@ ...@@ -20,8 +20,8 @@
#include <asm/stacktrace/common.h> #include <asm/stacktrace/common.h>
/* /**
* kvm_nvhe_unwind_init - Start an unwind from the given nVHE HYP fp and pc * kvm_nvhe_unwind_init() - Start an unwind from the given nVHE HYP fp and pc
* *
* @state : unwind_state to initialize * @state : unwind_state to initialize
* @fp : frame pointer at which to start the unwinding. * @fp : frame pointer at which to start the unwinding.
......
...@@ -190,19 +190,6 @@ ...@@ -190,19 +190,6 @@
#define SYS_MVFR1_EL1 sys_reg(3, 0, 0, 3, 1) #define SYS_MVFR1_EL1 sys_reg(3, 0, 0, 3, 1)
#define SYS_MVFR2_EL1 sys_reg(3, 0, 0, 3, 2) #define SYS_MVFR2_EL1 sys_reg(3, 0, 0, 3, 2)
#define SYS_ID_AA64PFR0_EL1 sys_reg(3, 0, 0, 4, 0)
#define SYS_ID_AA64PFR1_EL1 sys_reg(3, 0, 0, 4, 1)
#define SYS_ID_AA64DFR0_EL1 sys_reg(3, 0, 0, 5, 0)
#define SYS_ID_AA64DFR1_EL1 sys_reg(3, 0, 0, 5, 1)
#define SYS_ID_AA64AFR0_EL1 sys_reg(3, 0, 0, 5, 4)
#define SYS_ID_AA64AFR1_EL1 sys_reg(3, 0, 0, 5, 5)
#define SYS_ID_AA64MMFR0_EL1 sys_reg(3, 0, 0, 7, 0)
#define SYS_ID_AA64MMFR1_EL1 sys_reg(3, 0, 0, 7, 1)
#define SYS_ID_AA64MMFR2_EL1 sys_reg(3, 0, 0, 7, 2)
#define SYS_ACTLR_EL1 sys_reg(3, 0, 1, 0, 1) #define SYS_ACTLR_EL1 sys_reg(3, 0, 1, 0, 1)
#define SYS_RGSR_EL1 sys_reg(3, 0, 1, 0, 5) #define SYS_RGSR_EL1 sys_reg(3, 0, 1, 0, 5)
#define SYS_GCR_EL1 sys_reg(3, 0, 1, 0, 6) #define SYS_GCR_EL1 sys_reg(3, 0, 1, 0, 6)
...@@ -436,19 +423,11 @@ ...@@ -436,19 +423,11 @@
#define SYS_ICC_IGRPEN0_EL1 sys_reg(3, 0, 12, 12, 6) #define SYS_ICC_IGRPEN0_EL1 sys_reg(3, 0, 12, 12, 6)
#define SYS_ICC_IGRPEN1_EL1 sys_reg(3, 0, 12, 12, 7) #define SYS_ICC_IGRPEN1_EL1 sys_reg(3, 0, 12, 12, 7)
#define SYS_TPIDR_EL1 sys_reg(3, 0, 13, 0, 4)
#define SYS_SCXTNUM_EL1 sys_reg(3, 0, 13, 0, 7)
#define SYS_CNTKCTL_EL1 sys_reg(3, 0, 14, 1, 0) #define SYS_CNTKCTL_EL1 sys_reg(3, 0, 14, 1, 0)
#define SYS_CCSIDR_EL1 sys_reg(3, 1, 0, 0, 0) #define SYS_CCSIDR_EL1 sys_reg(3, 1, 0, 0, 0)
#define SYS_AIDR_EL1 sys_reg(3, 1, 0, 0, 7) #define SYS_AIDR_EL1 sys_reg(3, 1, 0, 0, 7)
#define SMIDR_EL1_IMPLEMENTER_SHIFT 24
#define SMIDR_EL1_SMPS_SHIFT 15
#define SMIDR_EL1_AFFINITY_SHIFT 0
#define SYS_RNDR_EL0 sys_reg(3, 3, 2, 4, 0) #define SYS_RNDR_EL0 sys_reg(3, 3, 2, 4, 0)
#define SYS_RNDRRS_EL0 sys_reg(3, 3, 2, 4, 1) #define SYS_RNDRRS_EL0 sys_reg(3, 3, 2, 4, 1)
...@@ -537,7 +516,6 @@ ...@@ -537,7 +516,6 @@
#define SYS_HFGWTR_EL2 sys_reg(3, 4, 1, 1, 5) #define SYS_HFGWTR_EL2 sys_reg(3, 4, 1, 1, 5)
#define SYS_HFGITR_EL2 sys_reg(3, 4, 1, 1, 6) #define SYS_HFGITR_EL2 sys_reg(3, 4, 1, 1, 6)
#define SYS_TRFCR_EL2 sys_reg(3, 4, 1, 2, 1) #define SYS_TRFCR_EL2 sys_reg(3, 4, 1, 2, 1)
#define SYS_HCRX_EL2 sys_reg(3, 4, 1, 2, 2)
#define SYS_HDFGRTR_EL2 sys_reg(3, 4, 3, 1, 4) #define SYS_HDFGRTR_EL2 sys_reg(3, 4, 3, 1, 4)
#define SYS_HDFGWTR_EL2 sys_reg(3, 4, 3, 1, 5) #define SYS_HDFGWTR_EL2 sys_reg(3, 4, 3, 1, 5)
#define SYS_HAFGRTR_EL2 sys_reg(3, 4, 3, 1, 6) #define SYS_HAFGRTR_EL2 sys_reg(3, 4, 3, 1, 6)
...@@ -690,164 +668,30 @@ ...@@ -690,164 +668,30 @@
#define MAIR_ATTRIDX(attr, idx) ((attr) << ((idx) * 8)) #define MAIR_ATTRIDX(attr, idx) ((attr) << ((idx) * 8))
/* id_aa64pfr0 */ /* id_aa64pfr0 */
#define ID_AA64PFR0_CSV3_SHIFT 60 #define ID_AA64PFR0_EL1_ELx_64BIT_ONLY 0x1
#define ID_AA64PFR0_CSV2_SHIFT 56 #define ID_AA64PFR0_EL1_ELx_32BIT_64BIT 0x2
#define ID_AA64PFR0_DIT_SHIFT 48
#define ID_AA64PFR0_AMU_SHIFT 44
#define ID_AA64PFR0_MPAM_SHIFT 40
#define ID_AA64PFR0_SEL2_SHIFT 36
#define ID_AA64PFR0_SVE_SHIFT 32
#define ID_AA64PFR0_RAS_SHIFT 28
#define ID_AA64PFR0_GIC_SHIFT 24
#define ID_AA64PFR0_ASIMD_SHIFT 20
#define ID_AA64PFR0_FP_SHIFT 16
#define ID_AA64PFR0_EL3_SHIFT 12
#define ID_AA64PFR0_EL2_SHIFT 8
#define ID_AA64PFR0_EL1_SHIFT 4
#define ID_AA64PFR0_EL0_SHIFT 0
#define ID_AA64PFR0_AMU 0x1
#define ID_AA64PFR0_SVE 0x1
#define ID_AA64PFR0_RAS_V1 0x1
#define ID_AA64PFR0_RAS_V1P1 0x2
#define ID_AA64PFR0_FP_NI 0xf
#define ID_AA64PFR0_FP_SUPPORTED 0x0
#define ID_AA64PFR0_ASIMD_NI 0xf
#define ID_AA64PFR0_ASIMD_SUPPORTED 0x0
#define ID_AA64PFR0_ELx_64BIT_ONLY 0x1
#define ID_AA64PFR0_ELx_32BIT_64BIT 0x2
/* id_aa64pfr1 */
#define ID_AA64PFR1_SME_SHIFT 24
#define ID_AA64PFR1_MPAMFRAC_SHIFT 16
#define ID_AA64PFR1_RASFRAC_SHIFT 12
#define ID_AA64PFR1_MTE_SHIFT 8
#define ID_AA64PFR1_SSBS_SHIFT 4
#define ID_AA64PFR1_BT_SHIFT 0
#define ID_AA64PFR1_SSBS_PSTATE_NI 0
#define ID_AA64PFR1_SSBS_PSTATE_ONLY 1
#define ID_AA64PFR1_SSBS_PSTATE_INSNS 2
#define ID_AA64PFR1_BT_BTI 0x1
#define ID_AA64PFR1_SME 1
#define ID_AA64PFR1_MTE_NI 0x0
#define ID_AA64PFR1_MTE_EL0 0x1
#define ID_AA64PFR1_MTE 0x2
#define ID_AA64PFR1_MTE_ASYMM 0x3
/* id_aa64mmfr0 */ /* id_aa64mmfr0 */
#define ID_AA64MMFR0_ECV_SHIFT 60 #define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN 0x0
#define ID_AA64MMFR0_FGT_SHIFT 56 #define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MAX 0x7
#define ID_AA64MMFR0_EXS_SHIFT 44 #define ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MIN 0x0
#define ID_AA64MMFR0_TGRAN4_2_SHIFT 40 #define ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MAX 0x7
#define ID_AA64MMFR0_TGRAN64_2_SHIFT 36 #define ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MIN 0x1
#define ID_AA64MMFR0_TGRAN16_2_SHIFT 32 #define ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MAX 0xf
#define ID_AA64MMFR0_TGRAN4_SHIFT 28
#define ID_AA64MMFR0_TGRAN64_SHIFT 24
#define ID_AA64MMFR0_TGRAN16_SHIFT 20
#define ID_AA64MMFR0_BIGENDEL0_SHIFT 16
#define ID_AA64MMFR0_SNSMEM_SHIFT 12
#define ID_AA64MMFR0_BIGENDEL_SHIFT 8
#define ID_AA64MMFR0_ASID_SHIFT 4
#define ID_AA64MMFR0_PARANGE_SHIFT 0
#define ID_AA64MMFR0_ASID_8 0x0
#define ID_AA64MMFR0_ASID_16 0x2
#define ID_AA64MMFR0_TGRAN4_NI 0xf
#define ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN 0x0
#define ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX 0x7
#define ID_AA64MMFR0_TGRAN64_NI 0xf
#define ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN 0x0
#define ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX 0x7
#define ID_AA64MMFR0_TGRAN16_NI 0x0
#define ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN 0x1
#define ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX 0xf
#define ID_AA64MMFR0_PARANGE_32 0x0
#define ID_AA64MMFR0_PARANGE_36 0x1
#define ID_AA64MMFR0_PARANGE_40 0x2
#define ID_AA64MMFR0_PARANGE_42 0x3
#define ID_AA64MMFR0_PARANGE_44 0x4
#define ID_AA64MMFR0_PARANGE_48 0x5
#define ID_AA64MMFR0_PARANGE_52 0x6
#define ARM64_MIN_PARANGE_BITS 32 #define ARM64_MIN_PARANGE_BITS 32
#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT 0x0 #define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_DEFAULT 0x0
#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE 0x1 #define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_NONE 0x1
#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN 0x2 #define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MIN 0x2
#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX 0x7 #define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX 0x7
#ifdef CONFIG_ARM64_PA_BITS_52 #ifdef CONFIG_ARM64_PA_BITS_52
#define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_52 #define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_52
#else #else
#define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_48 #define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_48
#endif #endif
/* id_aa64mmfr1 */
#define ID_AA64MMFR1_ECBHB_SHIFT 60
#define ID_AA64MMFR1_TIDCP1_SHIFT 52
#define ID_AA64MMFR1_HCX_SHIFT 40
#define ID_AA64MMFR1_AFP_SHIFT 44
#define ID_AA64MMFR1_ETS_SHIFT 36
#define ID_AA64MMFR1_TWED_SHIFT 32
#define ID_AA64MMFR1_XNX_SHIFT 28
#define ID_AA64MMFR1_SPECSEI_SHIFT 24
#define ID_AA64MMFR1_PAN_SHIFT 20
#define ID_AA64MMFR1_LOR_SHIFT 16
#define ID_AA64MMFR1_HPD_SHIFT 12
#define ID_AA64MMFR1_VHE_SHIFT 8
#define ID_AA64MMFR1_VMIDBITS_SHIFT 4
#define ID_AA64MMFR1_HADBS_SHIFT 0
#define ID_AA64MMFR1_VMIDBITS_8 0
#define ID_AA64MMFR1_VMIDBITS_16 2
#define ID_AA64MMFR1_TIDCP1_NI 0
#define ID_AA64MMFR1_TIDCP1_IMP 1
/* id_aa64mmfr2 */
#define ID_AA64MMFR2_E0PD_SHIFT 60
#define ID_AA64MMFR2_EVT_SHIFT 56
#define ID_AA64MMFR2_BBM_SHIFT 52
#define ID_AA64MMFR2_TTL_SHIFT 48
#define ID_AA64MMFR2_FWB_SHIFT 40
#define ID_AA64MMFR2_IDS_SHIFT 36
#define ID_AA64MMFR2_AT_SHIFT 32
#define ID_AA64MMFR2_ST_SHIFT 28
#define ID_AA64MMFR2_NV_SHIFT 24
#define ID_AA64MMFR2_CCIDX_SHIFT 20
#define ID_AA64MMFR2_LVA_SHIFT 16
#define ID_AA64MMFR2_IESB_SHIFT 12
#define ID_AA64MMFR2_LSM_SHIFT 8
#define ID_AA64MMFR2_UAO_SHIFT 4
#define ID_AA64MMFR2_CNP_SHIFT 0
/* id_aa64dfr0 */
#define ID_AA64DFR0_MTPMU_SHIFT 48
#define ID_AA64DFR0_TRBE_SHIFT 44
#define ID_AA64DFR0_TRACE_FILT_SHIFT 40
#define ID_AA64DFR0_DOUBLELOCK_SHIFT 36
#define ID_AA64DFR0_PMSVER_SHIFT 32
#define ID_AA64DFR0_CTX_CMPS_SHIFT 28
#define ID_AA64DFR0_WRPS_SHIFT 20
#define ID_AA64DFR0_BRPS_SHIFT 12
#define ID_AA64DFR0_PMUVER_SHIFT 8
#define ID_AA64DFR0_TRACEVER_SHIFT 4
#define ID_AA64DFR0_DEBUGVER_SHIFT 0
#define ID_AA64DFR0_PMUVER_8_0 0x1
#define ID_AA64DFR0_PMUVER_8_1 0x4
#define ID_AA64DFR0_PMUVER_8_4 0x5
#define ID_AA64DFR0_PMUVER_8_5 0x6
#define ID_AA64DFR0_PMUVER_8_7 0x7
#define ID_AA64DFR0_PMUVER_IMP_DEF 0xf
#define ID_AA64DFR0_PMSVER_8_2 0x1
#define ID_AA64DFR0_PMSVER_8_3 0x2
#define ID_DFR0_PERFMON_SHIFT 24 #define ID_DFR0_PERFMON_SHIFT 24
#define ID_DFR0_PERFMON_8_0 0x3 #define ID_DFR0_PERFMON_8_0 0x3
...@@ -955,20 +799,20 @@ ...@@ -955,20 +799,20 @@
#define ID_PFR1_PROGMOD_SHIFT 0 #define ID_PFR1_PROGMOD_SHIFT 0
#if defined(CONFIG_ARM64_4K_PAGES) #if defined(CONFIG_ARM64_4K_PAGES)
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN4_SHIFT #define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN4_SHIFT
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN #define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX #define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MAX
#define ID_AA64MMFR0_TGRAN_2_SHIFT ID_AA64MMFR0_TGRAN4_2_SHIFT #define ID_AA64MMFR0_EL1_TGRAN_2_SHIFT ID_AA64MMFR0_EL1_TGRAN4_2_SHIFT
#elif defined(CONFIG_ARM64_16K_PAGES) #elif defined(CONFIG_ARM64_16K_PAGES)
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN16_SHIFT #define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN16_SHIFT
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN #define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MIN
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX #define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MAX
#define ID_AA64MMFR0_TGRAN_2_SHIFT ID_AA64MMFR0_TGRAN16_2_SHIFT #define ID_AA64MMFR0_EL1_TGRAN_2_SHIFT ID_AA64MMFR0_EL1_TGRAN16_2_SHIFT
#elif defined(CONFIG_ARM64_64K_PAGES) #elif defined(CONFIG_ARM64_64K_PAGES)
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN64_SHIFT #define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN64_SHIFT
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN #define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MIN
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX #define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MAX
#define ID_AA64MMFR0_TGRAN_2_SHIFT ID_AA64MMFR0_TGRAN64_2_SHIFT #define ID_AA64MMFR0_EL1_TGRAN_2_SHIFT ID_AA64MMFR0_EL1_TGRAN64_2_SHIFT
#endif #endif
#define MVFR2_FPMISC_SHIFT 4 #define MVFR2_FPMISC_SHIFT 4
...@@ -1028,9 +872,6 @@ ...@@ -1028,9 +872,6 @@
#define TRFCR_ELx_ExTRE BIT(1) #define TRFCR_ELx_ExTRE BIT(1)
#define TRFCR_ELx_E0TRE BIT(0) #define TRFCR_ELx_E0TRE BIT(0)
/* HCRX_EL2 definitions */
#define HCRX_EL2_SMPME_MASK (1 << 5)
/* GIC Hypervisor interface registers */ /* GIC Hypervisor interface registers */
/* ICH_MISR_EL2 bit definitions */ /* ICH_MISR_EL2 bit definitions */
#define ICH_MISR_EOI (1 << 0) #define ICH_MISR_EOI (1 << 0)
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
struct pt_regs; struct pt_regs;
void die(const char *msg, struct pt_regs *regs, int err); void die(const char *msg, struct pt_regs *regs, long err);
struct siginfo; struct siginfo;
void arm64_notify_die(const char *str, struct pt_regs *regs, void arm64_notify_die(const char *str, struct pt_regs *regs,
......
...@@ -26,6 +26,9 @@ ...@@ -26,6 +26,9 @@
(void *)(vdso_offset_##name - VDSO_LBASE + (unsigned long)(base)); \ (void *)(vdso_offset_##name - VDSO_LBASE + (unsigned long)(base)); \
}) })
extern char vdso_start[], vdso_end[];
extern char vdso32_start[], vdso32_end[];
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif /* __ASM_VDSO_H */ #endif /* __ASM_VDSO_H */
...@@ -7,8 +7,10 @@ ...@@ -7,8 +7,10 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <asm/alternative.h>
#include <asm/barrier.h> #include <asm/barrier.h>
#include <asm/unistd.h> #include <asm/unistd.h>
#include <asm/sysreg.h>
#define VDSO_HAS_CLOCK_GETRES 1 #define VDSO_HAS_CLOCK_GETRES 1
...@@ -78,11 +80,20 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode, ...@@ -78,11 +80,20 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
return 0; return 0;
/* /*
* This isb() is required to prevent that the counter value * If FEAT_ECV is available, use the self-synchronizing counter.
* Otherwise the isb is required to prevent that the counter value
* is speculated. * is speculated.
*/ */
isb(); asm volatile(
asm volatile("mrs %0, cntvct_el0" : "=r" (res) :: "memory"); ALTERNATIVE("isb\n"
"mrs %0, cntvct_el0",
"nop\n"
__mrs_s("%0", SYS_CNTVCTSS_EL0),
ARM64_HAS_ECV)
: "=r" (res)
:
: "memory");
arch_counter_enforce_ordering(res); arch_counter_enforce_ordering(res);
return res; return res;
......
...@@ -92,5 +92,6 @@ ...@@ -92,5 +92,6 @@
#define HWCAP2_SME_FA64 (1 << 30) #define HWCAP2_SME_FA64 (1 << 30)
#define HWCAP2_WFXT (1UL << 31) #define HWCAP2_WFXT (1UL << 31)
#define HWCAP2_EBF16 (1UL << 32) #define HWCAP2_EBF16 (1UL << 32)
#define HWCAP2_SVE_EBF16 (1UL << 33)
#endif /* _UAPI__ASM_HWCAP_H */ #endif /* _UAPI__ASM_HWCAP_H */
...@@ -10,11 +10,14 @@ ...@@ -10,11 +10,14 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/elf.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/insn.h> #include <asm/insn.h>
#include <asm/module.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/vdso.h>
#include <linux/stop_machine.h> #include <linux/stop_machine.h>
#define __ALT_PTR(a, f) ((void *)&(a)->f + (a)->f) #define __ALT_PTR(a, f) ((void *)&(a)->f + (a)->f)
...@@ -192,6 +195,30 @@ static void __nocfi __apply_alternatives(struct alt_region *region, bool is_modu ...@@ -192,6 +195,30 @@ static void __nocfi __apply_alternatives(struct alt_region *region, bool is_modu
} }
} }
void apply_alternatives_vdso(void)
{
struct alt_region region;
const struct elf64_hdr *hdr;
const struct elf64_shdr *shdr;
const struct elf64_shdr *alt;
DECLARE_BITMAP(all_capabilities, ARM64_NPATCHABLE);
bitmap_fill(all_capabilities, ARM64_NPATCHABLE);
hdr = (struct elf64_hdr *)vdso_start;
shdr = (void *)hdr + hdr->e_shoff;
alt = find_section(hdr, shdr, ".altinstructions");
if (!alt)
return;
region = (struct alt_region){
.begin = (void *)hdr + alt->sh_offset,
.end = (void *)hdr + alt->sh_offset + alt->sh_size,
};
__apply_alternatives(&region, false, &all_capabilities[0]);
}
/* /*
* We might be patching the stop_machine state machine, so implement a * We might be patching the stop_machine state machine, so implement a
* really simple polling protocol here. * really simple polling protocol here.
...@@ -225,6 +252,7 @@ static int __apply_alternatives_multi_stop(void *unused) ...@@ -225,6 +252,7 @@ static int __apply_alternatives_multi_stop(void *unused)
void __init apply_alternatives_all(void) void __init apply_alternatives_all(void)
{ {
apply_alternatives_vdso();
/* better not try code patching on a live SMP system */ /* better not try code patching on a live SMP system */
stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask); stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask);
} }
......
...@@ -121,6 +121,22 @@ cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused) ...@@ -121,6 +121,22 @@ cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0); sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
} }
static DEFINE_RAW_SPINLOCK(reg_user_mask_modification);
static void __maybe_unused
cpu_clear_bf16_from_user_emulation(const struct arm64_cpu_capabilities *__unused)
{
struct arm64_ftr_reg *regp;
regp = get_arm64_ftr_reg(SYS_ID_AA64ISAR1_EL1);
if (!regp)
return;
raw_spin_lock(&reg_user_mask_modification);
if (regp->user_mask & ID_AA64ISAR1_EL1_BF16_MASK)
regp->user_mask &= ~ID_AA64ISAR1_EL1_BF16_MASK;
raw_spin_unlock(&reg_user_mask_modification);
}
#define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \ #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
.matches = is_affected_midr_range, \ .matches = is_affected_midr_range, \
.midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max) .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
...@@ -691,6 +707,16 @@ const struct arm64_cpu_capabilities arm64_errata[] = { ...@@ -691,6 +707,16 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
CAP_MIDR_RANGE_LIST(broken_aarch32_aes), CAP_MIDR_RANGE_LIST(broken_aarch32_aes),
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
}, },
#endif
#ifdef CONFIG_ARM64_ERRATUM_2658417
{
.desc = "ARM erratum 2658417",
.capability = ARM64_WORKAROUND_2658417,
/* Cortex-A510 r0p0 - r1p1 */
ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1),
MIDR_FIXED(MIDR_CPU_VAR_REV(1,1), BIT(25)),
.cpu_enable = cpu_clear_bf16_from_user_emulation,
},
#endif #endif
{ {
} }
......
This diff is collapsed.
...@@ -115,6 +115,7 @@ static const char *const hwcap_str[] = { ...@@ -115,6 +115,7 @@ static const char *const hwcap_str[] = {
[KERNEL_HWCAP_SME_FA64] = "smefa64", [KERNEL_HWCAP_SME_FA64] = "smefa64",
[KERNEL_HWCAP_WFXT] = "wfxt", [KERNEL_HWCAP_WFXT] = "wfxt",
[KERNEL_HWCAP_EBF16] = "ebf16", [KERNEL_HWCAP_EBF16] = "ebf16",
[KERNEL_HWCAP_SVE_EBF16] = "sveebf16",
}; };
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
......
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
u8 debug_monitors_arch(void) u8 debug_monitors_arch(void)
{ {
return cpuid_feature_extract_unsigned_field(read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1), return cpuid_feature_extract_unsigned_field(read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1),
ID_AA64DFR0_DEBUGVER_SHIFT); ID_AA64DFR0_EL1_DebugVer_SHIFT);
} }
/* /*
......
...@@ -379,11 +379,20 @@ static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr) ...@@ -379,11 +379,20 @@ static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
exit_to_kernel_mode(regs); exit_to_kernel_mode(regs);
} }
static void noinstr el1_undef(struct pt_regs *regs) static void noinstr el1_undef(struct pt_regs *regs, unsigned long esr)
{ {
enter_from_kernel_mode(regs); enter_from_kernel_mode(regs);
local_daif_inherit(regs); local_daif_inherit(regs);
do_undefinstr(regs); do_undefinstr(regs, esr);
local_daif_mask();
exit_to_kernel_mode(regs);
}
static void noinstr el1_bti(struct pt_regs *regs, unsigned long esr)
{
enter_from_kernel_mode(regs);
local_daif_inherit(regs);
do_el1_bti(regs, esr);
local_daif_mask(); local_daif_mask();
exit_to_kernel_mode(regs); exit_to_kernel_mode(regs);
} }
...@@ -402,7 +411,7 @@ static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr) ...@@ -402,7 +411,7 @@ static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
{ {
enter_from_kernel_mode(regs); enter_from_kernel_mode(regs);
local_daif_inherit(regs); local_daif_inherit(regs);
do_ptrauth_fault(regs, esr); do_el1_fpac(regs, esr);
local_daif_mask(); local_daif_mask();
exit_to_kernel_mode(regs); exit_to_kernel_mode(regs);
} }
...@@ -425,7 +434,10 @@ asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs) ...@@ -425,7 +434,10 @@ asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs)
break; break;
case ESR_ELx_EC_SYS64: case ESR_ELx_EC_SYS64:
case ESR_ELx_EC_UNKNOWN: case ESR_ELx_EC_UNKNOWN:
el1_undef(regs); el1_undef(regs, esr);
break;
case ESR_ELx_EC_BTI:
el1_bti(regs, esr);
break; break;
case ESR_ELx_EC_BREAKPT_CUR: case ESR_ELx_EC_BREAKPT_CUR:
case ESR_ELx_EC_SOFTSTP_CUR: case ESR_ELx_EC_SOFTSTP_CUR:
...@@ -582,11 +594,11 @@ static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr) ...@@ -582,11 +594,11 @@ static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr)
exit_to_user_mode(regs); exit_to_user_mode(regs);
} }
static void noinstr el0_undef(struct pt_regs *regs) static void noinstr el0_undef(struct pt_regs *regs, unsigned long esr)
{ {
enter_from_user_mode(regs); enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
do_undefinstr(regs); do_undefinstr(regs, esr);
exit_to_user_mode(regs); exit_to_user_mode(regs);
} }
...@@ -594,7 +606,7 @@ static void noinstr el0_bti(struct pt_regs *regs) ...@@ -594,7 +606,7 @@ static void noinstr el0_bti(struct pt_regs *regs)
{ {
enter_from_user_mode(regs); enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
do_bti(regs); do_el0_bti(regs);
exit_to_user_mode(regs); exit_to_user_mode(regs);
} }
...@@ -629,7 +641,7 @@ static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr) ...@@ -629,7 +641,7 @@ static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
{ {
enter_from_user_mode(regs); enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
do_ptrauth_fault(regs, esr); do_el0_fpac(regs, esr);
exit_to_user_mode(regs); exit_to_user_mode(regs);
} }
...@@ -670,7 +682,7 @@ asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs) ...@@ -670,7 +682,7 @@ asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs)
el0_pc(regs, esr); el0_pc(regs, esr);
break; break;
case ESR_ELx_EC_UNKNOWN: case ESR_ELx_EC_UNKNOWN:
el0_undef(regs); el0_undef(regs, esr);
break; break;
case ESR_ELx_EC_BTI: case ESR_ELx_EC_BTI:
el0_bti(regs); el0_bti(regs);
...@@ -788,7 +800,7 @@ asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs) ...@@ -788,7 +800,7 @@ asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs)
case ESR_ELx_EC_CP14_MR: case ESR_ELx_EC_CP14_MR:
case ESR_ELx_EC_CP14_LS: case ESR_ELx_EC_CP14_LS:
case ESR_ELx_EC_CP14_64: case ESR_ELx_EC_CP14_64:
el0_undef(regs); el0_undef(regs, esr);
break; break;
case ESR_ELx_EC_CP15_32: case ESR_ELx_EC_CP15_32:
case ESR_ELx_EC_CP15_64: case ESR_ELx_EC_CP15_64:
......
...@@ -217,11 +217,26 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, ...@@ -217,11 +217,26 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long pc = rec->ip; unsigned long pc = rec->ip;
u32 old = 0, new; u32 old = 0, new;
new = aarch64_insn_gen_nop();
/*
* When using mcount, callsites in modules may have been initalized to
* call an arbitrary module PLT (which redirects to the _mcount stub)
* rather than the ftrace PLT we'll use at runtime (which redirects to
* the ftrace trampoline). We can ignore the old PLT when initializing
* the callsite.
*
* Note: 'mod' is only set at module load time.
*/
if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) &&
IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && mod) {
return aarch64_insn_patch_text_nosync((void *)pc, new);
}
if (!ftrace_find_callable_addr(rec, mod, &addr)) if (!ftrace_find_callable_addr(rec, mod, &addr))
return -EINVAL; return -EINVAL;
old = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK); old = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
new = aarch64_insn_gen_nop();
return ftrace_modify_code(pc, old, new, true); return ftrace_modify_code(pc, old, new, true);
} }
......
...@@ -99,7 +99,7 @@ SYM_CODE_START(primary_entry) ...@@ -99,7 +99,7 @@ SYM_CODE_START(primary_entry)
*/ */
#if VA_BITS > 48 #if VA_BITS > 48
mrs_s x0, SYS_ID_AA64MMFR2_EL1 mrs_s x0, SYS_ID_AA64MMFR2_EL1
tst x0, #0xf << ID_AA64MMFR2_LVA_SHIFT tst x0, #0xf << ID_AA64MMFR2_EL1_VARange_SHIFT
mov x0, #VA_BITS mov x0, #VA_BITS
mov x25, #VA_BITS_MIN mov x25, #VA_BITS_MIN
csel x25, x25, x0, eq csel x25, x25, x0, eq
...@@ -656,10 +656,10 @@ SYM_FUNC_END(__secondary_too_slow) ...@@ -656,10 +656,10 @@ SYM_FUNC_END(__secondary_too_slow)
*/ */
SYM_FUNC_START(__enable_mmu) SYM_FUNC_START(__enable_mmu)
mrs x3, ID_AA64MMFR0_EL1 mrs x3, ID_AA64MMFR0_EL1
ubfx x3, x3, #ID_AA64MMFR0_TGRAN_SHIFT, 4 ubfx x3, x3, #ID_AA64MMFR0_EL1_TGRAN_SHIFT, 4
cmp x3, #ID_AA64MMFR0_TGRAN_SUPPORTED_MIN cmp x3, #ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN
b.lt __no_granule_support b.lt __no_granule_support
cmp x3, #ID_AA64MMFR0_TGRAN_SUPPORTED_MAX cmp x3, #ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX
b.gt __no_granule_support b.gt __no_granule_support
phys_to_ttbr x2, x2 phys_to_ttbr x2, x2
msr ttbr0_el1, x2 // load TTBR0 msr ttbr0_el1, x2 // load TTBR0
...@@ -677,7 +677,7 @@ SYM_FUNC_START(__cpu_secondary_check52bitva) ...@@ -677,7 +677,7 @@ SYM_FUNC_START(__cpu_secondary_check52bitva)
b.ne 2f b.ne 2f
mrs_s x0, SYS_ID_AA64MMFR2_EL1 mrs_s x0, SYS_ID_AA64MMFR2_EL1
and x0, x0, #(0xf << ID_AA64MMFR2_LVA_SHIFT) and x0, x0, #(0xf << ID_AA64MMFR2_EL1_VARange_SHIFT)
cbnz x0, 2f cbnz x0, 2f
update_early_cpu_boot_status \ update_early_cpu_boot_status \
......
...@@ -98,7 +98,7 @@ SYM_CODE_START_LOCAL(elx_sync) ...@@ -98,7 +98,7 @@ SYM_CODE_START_LOCAL(elx_sync)
SYM_CODE_END(elx_sync) SYM_CODE_END(elx_sync)
SYM_CODE_START_LOCAL(__finalise_el2) SYM_CODE_START_LOCAL(__finalise_el2)
check_override id_aa64pfr0 ID_AA64PFR0_SVE_SHIFT .Linit_sve .Lskip_sve check_override id_aa64pfr0 ID_AA64PFR0_EL1_SVE_SHIFT .Linit_sve .Lskip_sve
.Linit_sve: /* SVE register access */ .Linit_sve: /* SVE register access */
mrs x0, cptr_el2 // Disable SVE traps mrs x0, cptr_el2 // Disable SVE traps
...@@ -109,7 +109,7 @@ SYM_CODE_START_LOCAL(__finalise_el2) ...@@ -109,7 +109,7 @@ SYM_CODE_START_LOCAL(__finalise_el2)
msr_s SYS_ZCR_EL2, x1 // length for EL1. msr_s SYS_ZCR_EL2, x1 // length for EL1.
.Lskip_sve: .Lskip_sve:
check_override id_aa64pfr1 ID_AA64PFR1_SME_SHIFT .Linit_sme .Lskip_sme check_override id_aa64pfr1 ID_AA64PFR1_EL1_SME_SHIFT .Linit_sme .Lskip_sme
.Linit_sme: /* SME register access and priority mapping */ .Linit_sme: /* SME register access and priority mapping */
mrs x0, cptr_el2 // Disable SME traps mrs x0, cptr_el2 // Disable SME traps
...@@ -142,7 +142,7 @@ SYM_CODE_START_LOCAL(__finalise_el2) ...@@ -142,7 +142,7 @@ SYM_CODE_START_LOCAL(__finalise_el2)
msr_s SYS_SMPRIMAP_EL2, xzr // Make all priorities equal msr_s SYS_SMPRIMAP_EL2, xzr // Make all priorities equal
mrs x1, id_aa64mmfr1_el1 // HCRX_EL2 present? mrs x1, id_aa64mmfr1_el1 // HCRX_EL2 present?
ubfx x1, x1, #ID_AA64MMFR1_HCX_SHIFT, #4 ubfx x1, x1, #ID_AA64MMFR1_EL1_HCX_SHIFT, #4
cbz x1, .Lskip_sme cbz x1, .Lskip_sme
mrs_s x1, SYS_HCRX_EL2 mrs_s x1, SYS_HCRX_EL2
...@@ -157,7 +157,7 @@ SYM_CODE_START_LOCAL(__finalise_el2) ...@@ -157,7 +157,7 @@ SYM_CODE_START_LOCAL(__finalise_el2)
tbnz x1, #0, 1f tbnz x1, #0, 1f
// Needs to be VHE capable, obviously // Needs to be VHE capable, obviously
check_override id_aa64mmfr1 ID_AA64MMFR1_VHE_SHIFT 2f 1f check_override id_aa64mmfr1 ID_AA64MMFR1_EL1_VH_SHIFT 2f 1f
1: mov_q x0, HVC_STUB_ERR 1: mov_q x0, HVC_STUB_ERR
eret eret
......
...@@ -50,7 +50,7 @@ static const struct ftr_set_desc mmfr1 __initconst = { ...@@ -50,7 +50,7 @@ static const struct ftr_set_desc mmfr1 __initconst = {
.name = "id_aa64mmfr1", .name = "id_aa64mmfr1",
.override = &id_aa64mmfr1_override, .override = &id_aa64mmfr1_override,
.fields = { .fields = {
FIELD("vh", ID_AA64MMFR1_VHE_SHIFT, mmfr1_vh_filter), FIELD("vh", ID_AA64MMFR1_EL1_VH_SHIFT, mmfr1_vh_filter),
{} {}
}, },
}; };
...@@ -74,7 +74,7 @@ static const struct ftr_set_desc pfr0 __initconst = { ...@@ -74,7 +74,7 @@ static const struct ftr_set_desc pfr0 __initconst = {
.name = "id_aa64pfr0", .name = "id_aa64pfr0",
.override = &id_aa64pfr0_override, .override = &id_aa64pfr0_override,
.fields = { .fields = {
FIELD("sve", ID_AA64PFR0_SVE_SHIFT, pfr0_sve_filter), FIELD("sve", ID_AA64PFR0_EL1_SVE_SHIFT, pfr0_sve_filter),
{} {}
}, },
}; };
...@@ -98,9 +98,9 @@ static const struct ftr_set_desc pfr1 __initconst = { ...@@ -98,9 +98,9 @@ static const struct ftr_set_desc pfr1 __initconst = {
.name = "id_aa64pfr1", .name = "id_aa64pfr1",
.override = &id_aa64pfr1_override, .override = &id_aa64pfr1_override,
.fields = { .fields = {
FIELD("bt", ID_AA64PFR1_BT_SHIFT, NULL ), FIELD("bt", ID_AA64PFR1_EL1_BT_SHIFT, NULL ),
FIELD("mte", ID_AA64PFR1_MTE_SHIFT, NULL), FIELD("mte", ID_AA64PFR1_EL1_MTE_SHIFT, NULL),
FIELD("sme", ID_AA64PFR1_SME_SHIFT, pfr1_sme_filter), FIELD("sme", ID_AA64PFR1_EL1_SME_SHIFT, pfr1_sme_filter),
{} {}
}, },
}; };
......
...@@ -37,7 +37,8 @@ struct plt_entry get_plt_entry(u64 dst, void *pc) ...@@ -37,7 +37,8 @@ struct plt_entry get_plt_entry(u64 dst, void *pc)
return plt; return plt;
} }
bool plt_entries_equal(const struct plt_entry *a, const struct plt_entry *b) static bool plt_entries_equal(const struct plt_entry *a,
const struct plt_entry *b)
{ {
u64 p, q; u64 p, q;
......
...@@ -476,21 +476,6 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, ...@@ -476,21 +476,6 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
return -ENOEXEC; return -ENOEXEC;
} }
static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
const char *name)
{
const Elf_Shdr *s, *se;
const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
if (strcmp(name, secstrs + s->sh_name) == 0)
return s;
}
return NULL;
}
static inline void __init_plt(struct plt_entry *plt, unsigned long addr) static inline void __init_plt(struct plt_entry *plt, unsigned long addr)
{ {
*plt = get_plt_entry(addr, plt); *plt = get_plt_entry(addr, plt);
......
...@@ -390,7 +390,7 @@ static const struct attribute_group armv8_pmuv3_caps_attr_group = { ...@@ -390,7 +390,7 @@ static const struct attribute_group armv8_pmuv3_caps_attr_group = {
*/ */
static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu) static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu)
{ {
return (cpu_pmu->pmuver >= ID_AA64DFR0_PMUVER_8_5); return (cpu_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P5);
} }
static inline bool armv8pmu_event_has_user_read(struct perf_event *event) static inline bool armv8pmu_event_has_user_read(struct perf_event *event)
...@@ -1145,8 +1145,8 @@ static void __armv8pmu_probe_pmu(void *info) ...@@ -1145,8 +1145,8 @@ static void __armv8pmu_probe_pmu(void *info)
dfr0 = read_sysreg(id_aa64dfr0_el1); dfr0 = read_sysreg(id_aa64dfr0_el1);
pmuver = cpuid_feature_extract_unsigned_field(dfr0, pmuver = cpuid_feature_extract_unsigned_field(dfr0,
ID_AA64DFR0_PMUVER_SHIFT); ID_AA64DFR0_EL1_PMUVer_SHIFT);
if (pmuver == ID_AA64DFR0_PMUVER_IMP_DEF || pmuver == 0) if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF || pmuver == 0)
return; return;
cpu_pmu->pmuver = pmuver; cpu_pmu->pmuver = pmuver;
...@@ -1172,7 +1172,7 @@ static void __armv8pmu_probe_pmu(void *info) ...@@ -1172,7 +1172,7 @@ static void __armv8pmu_probe_pmu(void *info)
pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS); pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
/* store PMMIR_EL1 register for sysfs */ /* store PMMIR_EL1 register for sysfs */
if (pmuver >= ID_AA64DFR0_PMUVER_8_4 && (pmceid_raw[1] & BIT(31))) if (pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P4 && (pmceid_raw[1] & BIT(31)))
cpu_pmu->reg_pmmir = read_cpuid(PMMIR_EL1); cpu_pmu->reg_pmmir = read_cpuid(PMMIR_EL1);
else else
cpu_pmu->reg_pmmir = 0; cpu_pmu->reg_pmmir = 0;
......
...@@ -168,7 +168,7 @@ static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void) ...@@ -168,7 +168,7 @@ static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void)
/* If the CPU has CSV2 set, we're safe */ /* If the CPU has CSV2 set, we're safe */
pfr0 = read_cpuid(ID_AA64PFR0_EL1); pfr0 = read_cpuid(ID_AA64PFR0_EL1);
if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT)) if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_CSV2_SHIFT))
return SPECTRE_UNAFFECTED; return SPECTRE_UNAFFECTED;
/* Alternatively, we have a list of unaffected CPUs */ /* Alternatively, we have a list of unaffected CPUs */
...@@ -945,7 +945,7 @@ static bool supports_ecbhb(int scope) ...@@ -945,7 +945,7 @@ static bool supports_ecbhb(int scope)
mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
return cpuid_feature_extract_unsigned_field(mmfr1, return cpuid_feature_extract_unsigned_field(mmfr1,
ID_AA64MMFR1_ECBHB_SHIFT); ID_AA64MMFR1_EL1_ECBHB_SHIFT);
} }
bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
......
...@@ -121,7 +121,7 @@ static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) ...@@ -121,7 +121,7 @@ static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
{ {
return ((addr & ~(THREAD_SIZE - 1)) == return ((addr & ~(THREAD_SIZE - 1)) ==
(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) || (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) ||
on_irq_stack(addr, sizeof(unsigned long), NULL); on_irq_stack(addr, sizeof(unsigned long));
} }
/** /**
...@@ -666,10 +666,18 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset, ...@@ -666,10 +666,18 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
static int tls_get(struct task_struct *target, const struct user_regset *regset, static int tls_get(struct task_struct *target, const struct user_regset *regset,
struct membuf to) struct membuf to)
{ {
int ret;
if (target == current) if (target == current)
tls_preserve_current_state(); tls_preserve_current_state();
return membuf_store(&to, target->thread.uw.tp_value); ret = membuf_store(&to, target->thread.uw.tp_value);
if (system_supports_tpidr2())
ret = membuf_store(&to, target->thread.tpidr2_el0);
else
ret = membuf_zero(&to, sizeof(u64));
return ret;
} }
static int tls_set(struct task_struct *target, const struct user_regset *regset, static int tls_set(struct task_struct *target, const struct user_regset *regset,
...@@ -677,13 +685,20 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset, ...@@ -677,13 +685,20 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset,
const void *kbuf, const void __user *ubuf) const void *kbuf, const void __user *ubuf)
{ {
int ret; int ret;
unsigned long tls = target->thread.uw.tp_value; unsigned long tls[2];
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); tls[0] = target->thread.uw.tp_value;
if (system_supports_sme())
tls[1] = target->thread.tpidr2_el0;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, tls, 0, count);
if (ret) if (ret)
return ret; return ret;
target->thread.uw.tp_value = tls; target->thread.uw.tp_value = tls[0];
if (system_supports_sme())
target->thread.tpidr2_el0 = tls[1];
return ret; return ret;
} }
...@@ -1392,7 +1407,7 @@ static const struct user_regset aarch64_regsets[] = { ...@@ -1392,7 +1407,7 @@ static const struct user_regset aarch64_regsets[] = {
}, },
[REGSET_TLS] = { [REGSET_TLS] = {
.core_note_type = NT_ARM_TLS, .core_note_type = NT_ARM_TLS,
.n = 1, .n = 2,
.size = sizeof(void *), .size = sizeof(void *),
.align = sizeof(void *), .align = sizeof(void *),
.regset_get = tls_get, .regset_get = tls_get,
......
...@@ -162,38 +162,6 @@ static int init_sdei_scs(void) ...@@ -162,38 +162,6 @@ static int init_sdei_scs(void)
return err; return err;
} }
static bool on_sdei_normal_stack(unsigned long sp, unsigned long size,
struct stack_info *info)
{
unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
unsigned long high = low + SDEI_STACK_SIZE;
return on_stack(sp, size, low, high, STACK_TYPE_SDEI_NORMAL, info);
}
static bool on_sdei_critical_stack(unsigned long sp, unsigned long size,
struct stack_info *info)
{
unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr);
unsigned long high = low + SDEI_STACK_SIZE;
return on_stack(sp, size, low, high, STACK_TYPE_SDEI_CRITICAL, info);
}
bool _on_sdei_stack(unsigned long sp, unsigned long size, struct stack_info *info)
{
if (!IS_ENABLED(CONFIG_VMAP_STACK))
return false;
if (on_sdei_critical_stack(sp, size, info))
return true;
if (on_sdei_normal_stack(sp, size, info))
return true;
return false;
}
unsigned long sdei_arch_get_entry_point(int conduit) unsigned long sdei_arch_get_entry_point(int conduit)
{ {
/* /*
......
...@@ -67,31 +67,6 @@ static inline void unwind_init_from_task(struct unwind_state *state, ...@@ -67,31 +67,6 @@ static inline void unwind_init_from_task(struct unwind_state *state,
state->pc = thread_saved_pc(task); state->pc = thread_saved_pc(task);
} }
/*
* We can only safely access per-cpu stacks from current in a non-preemptible
* context.
*/
static bool on_accessible_stack(const struct task_struct *tsk,
unsigned long sp, unsigned long size,
struct stack_info *info)
{
if (info)
info->type = STACK_TYPE_UNKNOWN;
if (on_task_stack(tsk, sp, size, info))
return true;
if (tsk != current || preemptible())
return false;
if (on_irq_stack(sp, size, info))
return true;
if (on_overflow_stack(sp, size, info))
return true;
if (on_sdei_stack(sp, size, info))
return true;
return false;
}
/* /*
* Unwind from one frame record (A) to the next frame record (B). * Unwind from one frame record (A) to the next frame record (B).
* *
...@@ -103,14 +78,13 @@ static int notrace unwind_next(struct unwind_state *state) ...@@ -103,14 +78,13 @@ static int notrace unwind_next(struct unwind_state *state)
{ {
struct task_struct *tsk = state->task; struct task_struct *tsk = state->task;
unsigned long fp = state->fp; unsigned long fp = state->fp;
struct stack_info info;
int err; int err;
/* Final frame; nothing to unwind */ /* Final frame; nothing to unwind */
if (fp == (unsigned long)task_pt_regs(tsk)->stackframe) if (fp == (unsigned long)task_pt_regs(tsk)->stackframe)
return -ENOENT; return -ENOENT;
err = unwind_next_common(state, &info, on_accessible_stack, NULL); err = unwind_next_frame_record(state);
if (err) if (err)
return err; return err;
...@@ -190,11 +164,47 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl) ...@@ -190,11 +164,47 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
barrier(); barrier();
} }
/*
* Per-cpu stacks are only accessible when unwinding the current task in a
* non-preemptible context.
*/
#define STACKINFO_CPU(name) \
({ \
((task == current) && !preemptible()) \
? stackinfo_get_##name() \
: stackinfo_get_unknown(); \
})
/*
* SDEI stacks are only accessible when unwinding the current task in an NMI
* context.
*/
#define STACKINFO_SDEI(name) \
({ \
((task == current) && in_nmi()) \
? stackinfo_get_sdei_##name() \
: stackinfo_get_unknown(); \
})
noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry, noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry,
void *cookie, struct task_struct *task, void *cookie, struct task_struct *task,
struct pt_regs *regs) struct pt_regs *regs)
{ {
struct unwind_state state; struct stack_info stacks[] = {
stackinfo_get_task(task),
STACKINFO_CPU(irq),
#if defined(CONFIG_VMAP_STACK)
STACKINFO_CPU(overflow),
#endif
#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_ARM_SDE_INTERFACE)
STACKINFO_SDEI(normal),
STACKINFO_SDEI(critical),
#endif
};
struct unwind_state state = {
.stacks = stacks,
.nr_stacks = ARRAY_SIZE(stacks),
};
if (regs) { if (regs) {
if (task != current) if (task != current)
......
...@@ -180,12 +180,12 @@ static void dump_kernel_instr(const char *lvl, struct pt_regs *regs) ...@@ -180,12 +180,12 @@ static void dump_kernel_instr(const char *lvl, struct pt_regs *regs)
#define S_SMP " SMP" #define S_SMP " SMP"
static int __die(const char *str, int err, struct pt_regs *regs) static int __die(const char *str, long err, struct pt_regs *regs)
{ {
static int die_counter; static int die_counter;
int ret; int ret;
pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n", pr_emerg("Internal error: %s: %016lx [#%d]" S_PREEMPT S_SMP "\n",
str, err, ++die_counter); str, err, ++die_counter);
/* trap and error numbers are mostly meaningless on ARM */ /* trap and error numbers are mostly meaningless on ARM */
...@@ -206,7 +206,7 @@ static DEFINE_RAW_SPINLOCK(die_lock); ...@@ -206,7 +206,7 @@ static DEFINE_RAW_SPINLOCK(die_lock);
/* /*
* This function is protected against re-entrancy. * This function is protected against re-entrancy.
*/ */
void die(const char *str, struct pt_regs *regs, int err) void die(const char *str, struct pt_regs *regs, long err)
{ {
int ret; int ret;
unsigned long flags; unsigned long flags;
...@@ -485,7 +485,7 @@ void arm64_notify_segfault(unsigned long addr) ...@@ -485,7 +485,7 @@ void arm64_notify_segfault(unsigned long addr)
force_signal_inject(SIGSEGV, code, addr, 0); force_signal_inject(SIGSEGV, code, addr, 0);
} }
void do_undefinstr(struct pt_regs *regs) void do_undefinstr(struct pt_regs *regs, unsigned long esr)
{ {
/* check for AArch32 breakpoint instructions */ /* check for AArch32 breakpoint instructions */
if (!aarch32_break_handler(regs)) if (!aarch32_break_handler(regs))
...@@ -494,28 +494,38 @@ void do_undefinstr(struct pt_regs *regs) ...@@ -494,28 +494,38 @@ void do_undefinstr(struct pt_regs *regs)
if (call_undef_hook(regs) == 0) if (call_undef_hook(regs) == 0)
return; return;
BUG_ON(!user_mode(regs)); if (!user_mode(regs))
die("Oops - Undefined instruction", regs, esr);
force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
} }
NOKPROBE_SYMBOL(do_undefinstr); NOKPROBE_SYMBOL(do_undefinstr);
void do_bti(struct pt_regs *regs) void do_el0_bti(struct pt_regs *regs)
{ {
BUG_ON(!user_mode(regs));
force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
} }
NOKPROBE_SYMBOL(do_bti);
void do_ptrauth_fault(struct pt_regs *regs, unsigned long esr) void do_el1_bti(struct pt_regs *regs, unsigned long esr)
{
die("Oops - BTI", regs, esr);
}
NOKPROBE_SYMBOL(do_el1_bti);
void do_el0_fpac(struct pt_regs *regs, unsigned long esr)
{
force_signal_inject(SIGILL, ILL_ILLOPN, regs->pc, esr);
}
void do_el1_fpac(struct pt_regs *regs, unsigned long esr)
{ {
/* /*
* Unexpected FPAC exception or pointer authentication failure in * Unexpected FPAC exception in the kernel: kill the task before it
* the kernel: kill the task before it does any more harm. * does any more harm.
*/ */
BUG_ON(!user_mode(regs)); die("Oops - FPAC", regs, esr);
force_signal_inject(SIGILL, ILL_ILLOPN, regs->pc, esr);
} }
NOKPROBE_SYMBOL(do_ptrauth_fault); NOKPROBE_SYMBOL(do_el1_fpac)
#define __user_cache_maint(insn, address, res) \ #define __user_cache_maint(insn, address, res) \
if (address >= TASK_SIZE_MAX) { \ if (address >= TASK_SIZE_MAX) { \
...@@ -758,7 +768,7 @@ void do_cp15instr(unsigned long esr, struct pt_regs *regs) ...@@ -758,7 +768,7 @@ void do_cp15instr(unsigned long esr, struct pt_regs *regs)
hook_base = cp15_64_hooks; hook_base = cp15_64_hooks;
break; break;
default: default:
do_undefinstr(regs); do_undefinstr(regs, esr);
return; return;
} }
...@@ -773,7 +783,7 @@ void do_cp15instr(unsigned long esr, struct pt_regs *regs) ...@@ -773,7 +783,7 @@ void do_cp15instr(unsigned long esr, struct pt_regs *regs)
* EL0. Fall back to our usual undefined instruction handler * EL0. Fall back to our usual undefined instruction handler
* so that we handle these consistently. * so that we handle these consistently.
*/ */
do_undefinstr(regs); do_undefinstr(regs, esr);
} }
NOKPROBE_SYMBOL(do_cp15instr); NOKPROBE_SYMBOL(do_cp15instr);
#endif #endif
...@@ -793,7 +803,7 @@ void do_sysinstr(unsigned long esr, struct pt_regs *regs) ...@@ -793,7 +803,7 @@ void do_sysinstr(unsigned long esr, struct pt_regs *regs)
* back to our usual undefined instruction handler so that we handle * back to our usual undefined instruction handler so that we handle
* these consistently. * these consistently.
*/ */
do_undefinstr(regs); do_undefinstr(regs, esr);
} }
NOKPROBE_SYMBOL(do_sysinstr); NOKPROBE_SYMBOL(do_sysinstr);
...@@ -970,7 +980,7 @@ static int bug_handler(struct pt_regs *regs, unsigned long esr) ...@@ -970,7 +980,7 @@ static int bug_handler(struct pt_regs *regs, unsigned long esr)
{ {
switch (report_bug(regs->pc, regs)) { switch (report_bug(regs->pc, regs)) {
case BUG_TRAP_TYPE_BUG: case BUG_TRAP_TYPE_BUG:
die("Oops - BUG", regs, 0); die("Oops - BUG", regs, esr);
break; break;
case BUG_TRAP_TYPE_WARN: case BUG_TRAP_TYPE_WARN:
...@@ -1038,7 +1048,7 @@ static int kasan_handler(struct pt_regs *regs, unsigned long esr) ...@@ -1038,7 +1048,7 @@ static int kasan_handler(struct pt_regs *regs, unsigned long esr)
* This is something that might be fixed at some point in the future. * This is something that might be fixed at some point in the future.
*/ */
if (!recover) if (!recover)
die("Oops - KASAN", regs, 0); die("Oops - KASAN", regs, esr);
/* If thread survives, skip over the brk instruction and continue: */ /* If thread survives, skip over the brk instruction and continue: */
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
......
...@@ -29,9 +29,6 @@ ...@@ -29,9 +29,6 @@
#include <asm/signal32.h> #include <asm/signal32.h>
#include <asm/vdso.h> #include <asm/vdso.h>
extern char vdso_start[], vdso_end[];
extern char vdso32_start[], vdso32_end[];
enum vdso_abi { enum vdso_abi {
VDSO_ABI_AA64, VDSO_ABI_AA64,
VDSO_ABI_AA32, VDSO_ABI_AA32,
......
...@@ -48,6 +48,13 @@ SECTIONS ...@@ -48,6 +48,13 @@ SECTIONS
PROVIDE (_etext = .); PROVIDE (_etext = .);
PROVIDE (etext = .); PROVIDE (etext = .);
. = ALIGN(4);
.altinstructions : {
__alt_instructions = .;
*(.altinstructions)
__alt_instructions_end = .;
}
.dynamic : { *(.dynamic) } :text :dynamic .dynamic : { *(.dynamic) } :text :dynamic
.rela.dyn : ALIGN(8) { *(.rela .rela*) } .rela.dyn : ALIGN(8) { *(.rela .rela*) }
......
...@@ -295,12 +295,12 @@ void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu) ...@@ -295,12 +295,12 @@ void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu)
* If SPE is present on this CPU and is available at current EL, * If SPE is present on this CPU and is available at current EL,
* we may need to check if the host state needs to be saved. * we may need to check if the host state needs to be saved.
*/ */
if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_PMSVER_SHIFT) && if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_PMSVer_SHIFT) &&
!(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(SYS_PMBIDR_EL1_P_SHIFT))) !(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(SYS_PMBIDR_EL1_P_SHIFT)))
vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_SPE); vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_SPE);
/* Check if we have TRBE implemented and available at the host */ /* Check if we have TRBE implemented and available at the host */
if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_TRBE_SHIFT) && if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceBuffer_SHIFT) &&
!(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_PROG)) !(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_PROG))
vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_TRBE); vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_TRBE);
} }
......
...@@ -35,9 +35,9 @@ ...@@ -35,9 +35,9 @@
* - Data Independent Timing * - Data Independent Timing
*/ */
#define PVM_ID_AA64PFR0_ALLOW (\ #define PVM_ID_AA64PFR0_ALLOW (\
ARM64_FEATURE_MASK(ID_AA64PFR0_FP) | \ ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP) | \
ARM64_FEATURE_MASK(ID_AA64PFR0_ASIMD) | \ ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD) | \
ARM64_FEATURE_MASK(ID_AA64PFR0_DIT) \ ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_DIT) \
) )
/* /*
...@@ -49,11 +49,11 @@ ...@@ -49,11 +49,11 @@
* Supported by KVM * Supported by KVM
*/ */
#define PVM_ID_AA64PFR0_RESTRICT_UNSIGNED (\ #define PVM_ID_AA64PFR0_RESTRICT_UNSIGNED (\
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL0), ID_AA64PFR0_ELx_64BIT_ONLY) | \ FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1), ID_AA64PFR0_ELx_64BIT_ONLY) | \ FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL2), ID_AA64PFR0_ELx_64BIT_ONLY) | \ FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL2), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL3), ID_AA64PFR0_ELx_64BIT_ONLY) | \ FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL3), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_RAS), ID_AA64PFR0_RAS_V1) \ FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), ID_AA64PFR0_EL1_RAS_IMP) \
) )
/* /*
...@@ -62,8 +62,8 @@ ...@@ -62,8 +62,8 @@
* - Speculative Store Bypassing * - Speculative Store Bypassing
*/ */
#define PVM_ID_AA64PFR1_ALLOW (\ #define PVM_ID_AA64PFR1_ALLOW (\
ARM64_FEATURE_MASK(ID_AA64PFR1_BT) | \ ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_BT) | \
ARM64_FEATURE_MASK(ID_AA64PFR1_SSBS) \ ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SSBS) \
) )
/* /*
...@@ -74,10 +74,10 @@ ...@@ -74,10 +74,10 @@
* - Non-context synchronizing exception entry and exit * - Non-context synchronizing exception entry and exit
*/ */
#define PVM_ID_AA64MMFR0_ALLOW (\ #define PVM_ID_AA64MMFR0_ALLOW (\
ARM64_FEATURE_MASK(ID_AA64MMFR0_BIGENDEL) | \ ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_BIGEND) | \
ARM64_FEATURE_MASK(ID_AA64MMFR0_SNSMEM) | \ ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_SNSMEM) | \
ARM64_FEATURE_MASK(ID_AA64MMFR0_BIGENDEL0) | \ ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_BIGENDEL0) | \
ARM64_FEATURE_MASK(ID_AA64MMFR0_EXS) \ ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_EXS) \
) )
/* /*
...@@ -86,8 +86,8 @@ ...@@ -86,8 +86,8 @@
* - 16-bit ASID * - 16-bit ASID
*/ */
#define PVM_ID_AA64MMFR0_RESTRICT_UNSIGNED (\ #define PVM_ID_AA64MMFR0_RESTRICT_UNSIGNED (\
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_PARANGE), ID_AA64MMFR0_PARANGE_40) | \ FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_PARANGE), ID_AA64MMFR0_EL1_PARANGE_40) | \
FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_ASID), ID_AA64MMFR0_ASID_16) \ FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_ASIDBITS), ID_AA64MMFR0_EL1_ASIDBITS_16) \
) )
/* /*
...@@ -100,12 +100,12 @@ ...@@ -100,12 +100,12 @@
* - Enhanced Translation Synchronization * - Enhanced Translation Synchronization
*/ */
#define PVM_ID_AA64MMFR1_ALLOW (\ #define PVM_ID_AA64MMFR1_ALLOW (\
ARM64_FEATURE_MASK(ID_AA64MMFR1_HADBS) | \ ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_HAFDBS) | \
ARM64_FEATURE_MASK(ID_AA64MMFR1_VMIDBITS) | \ ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_VMIDBits) | \
ARM64_FEATURE_MASK(ID_AA64MMFR1_HPD) | \ ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_HPDS) | \
ARM64_FEATURE_MASK(ID_AA64MMFR1_PAN) | \ ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_PAN) | \
ARM64_FEATURE_MASK(ID_AA64MMFR1_SPECSEI) | \ ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_SpecSEI) | \
ARM64_FEATURE_MASK(ID_AA64MMFR1_ETS) \ ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_ETS) \
) )
/* /*
...@@ -120,14 +120,14 @@ ...@@ -120,14 +120,14 @@
* - E0PDx mechanism * - E0PDx mechanism
*/ */
#define PVM_ID_AA64MMFR2_ALLOW (\ #define PVM_ID_AA64MMFR2_ALLOW (\
ARM64_FEATURE_MASK(ID_AA64MMFR2_CNP) | \ ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_CnP) | \
ARM64_FEATURE_MASK(ID_AA64MMFR2_UAO) | \ ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_UAO) | \
ARM64_FEATURE_MASK(ID_AA64MMFR2_IESB) | \ ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_IESB) | \
ARM64_FEATURE_MASK(ID_AA64MMFR2_AT) | \ ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_AT) | \
ARM64_FEATURE_MASK(ID_AA64MMFR2_IDS) | \ ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_IDS) | \
ARM64_FEATURE_MASK(ID_AA64MMFR2_TTL) | \ ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_TTL) | \
ARM64_FEATURE_MASK(ID_AA64MMFR2_BBM) | \ ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_BBM) | \
ARM64_FEATURE_MASK(ID_AA64MMFR2_E0PD) \ ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_E0PD) \
) )
/* /*
......
...@@ -20,35 +20,35 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu) ...@@ -20,35 +20,35 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
u64 cptr_set = 0; u64 cptr_set = 0;
/* Protected KVM does not support AArch32 guests. */ /* Protected KVM does not support AArch32 guests. */
BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL0), BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0),
PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_ELx_64BIT_ONLY); PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1), BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1),
PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_ELx_64BIT_ONLY); PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
/* /*
* Linux guests assume support for floating-point and Advanced SIMD. Do * Linux guests assume support for floating-point and Advanced SIMD. Do
* not change the trapping behavior for these from the KVM default. * not change the trapping behavior for these from the KVM default.
*/ */
BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_FP), BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP),
PVM_ID_AA64PFR0_ALLOW)); PVM_ID_AA64PFR0_ALLOW));
BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_ASIMD), BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD),
PVM_ID_AA64PFR0_ALLOW)); PVM_ID_AA64PFR0_ALLOW));
/* Trap RAS unless all current versions are supported */ /* Trap RAS unless all current versions are supported */
if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_RAS), feature_ids) < if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), feature_ids) <
ID_AA64PFR0_RAS_V1P1) { ID_AA64PFR0_EL1_RAS_V1P1) {
hcr_set |= HCR_TERR | HCR_TEA; hcr_set |= HCR_TERR | HCR_TEA;
hcr_clear |= HCR_FIEN; hcr_clear |= HCR_FIEN;
} }
/* Trap AMU */ /* Trap AMU */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_AMU), feature_ids)) { if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU), feature_ids)) {
hcr_clear |= HCR_AMVOFFEN; hcr_clear |= HCR_AMVOFFEN;
cptr_set |= CPTR_EL2_TAM; cptr_set |= CPTR_EL2_TAM;
} }
/* Trap SVE */ /* Trap SVE */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_SVE), feature_ids)) if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids))
cptr_set |= CPTR_EL2_TZ; cptr_set |= CPTR_EL2_TZ;
vcpu->arch.hcr_el2 |= hcr_set; vcpu->arch.hcr_el2 |= hcr_set;
...@@ -66,7 +66,7 @@ static void pvm_init_traps_aa64pfr1(struct kvm_vcpu *vcpu) ...@@ -66,7 +66,7 @@ static void pvm_init_traps_aa64pfr1(struct kvm_vcpu *vcpu)
u64 hcr_clear = 0; u64 hcr_clear = 0;
/* Memory Tagging: Trap and Treat as Untagged if not supported. */ /* Memory Tagging: Trap and Treat as Untagged if not supported. */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_MTE), feature_ids)) { if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE), feature_ids)) {
hcr_set |= HCR_TID5; hcr_set |= HCR_TID5;
hcr_clear |= HCR_DCT | HCR_ATA; hcr_clear |= HCR_DCT | HCR_ATA;
} }
...@@ -86,32 +86,32 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu) ...@@ -86,32 +86,32 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
u64 cptr_set = 0; u64 cptr_set = 0;
/* Trap/constrain PMU */ /* Trap/constrain PMU */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMUVER), feature_ids)) { if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), feature_ids)) {
mdcr_set |= MDCR_EL2_TPM | MDCR_EL2_TPMCR; mdcr_set |= MDCR_EL2_TPM | MDCR_EL2_TPMCR;
mdcr_clear |= MDCR_EL2_HPME | MDCR_EL2_MTPME | mdcr_clear |= MDCR_EL2_HPME | MDCR_EL2_MTPME |
MDCR_EL2_HPMN_MASK; MDCR_EL2_HPMN_MASK;
} }
/* Trap Debug */ /* Trap Debug */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER), feature_ids)) if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), feature_ids))
mdcr_set |= MDCR_EL2_TDRA | MDCR_EL2_TDA | MDCR_EL2_TDE; mdcr_set |= MDCR_EL2_TDRA | MDCR_EL2_TDA | MDCR_EL2_TDE;
/* Trap OS Double Lock */ /* Trap OS Double Lock */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DOUBLELOCK), feature_ids)) if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DoubleLock), feature_ids))
mdcr_set |= MDCR_EL2_TDOSA; mdcr_set |= MDCR_EL2_TDOSA;
/* Trap SPE */ /* Trap SPE */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVER), feature_ids)) { if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer), feature_ids)) {
mdcr_set |= MDCR_EL2_TPMS; mdcr_set |= MDCR_EL2_TPMS;
mdcr_clear |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT; mdcr_clear |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
} }
/* Trap Trace Filter */ /* Trap Trace Filter */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_TRACE_FILT), feature_ids)) if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceFilt), feature_ids))
mdcr_set |= MDCR_EL2_TTRF; mdcr_set |= MDCR_EL2_TTRF;
/* Trap Trace */ /* Trap Trace */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_TRACEVER), feature_ids)) if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids))
cptr_set |= CPTR_EL2_TTA; cptr_set |= CPTR_EL2_TTA;
vcpu->arch.mdcr_el2 |= mdcr_set; vcpu->arch.mdcr_el2 |= mdcr_set;
...@@ -128,7 +128,7 @@ static void pvm_init_traps_aa64mmfr0(struct kvm_vcpu *vcpu) ...@@ -128,7 +128,7 @@ static void pvm_init_traps_aa64mmfr0(struct kvm_vcpu *vcpu)
u64 mdcr_set = 0; u64 mdcr_set = 0;
/* Trap Debug Communications Channel registers */ /* Trap Debug Communications Channel registers */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_FGT), feature_ids)) if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_FGT), feature_ids))
mdcr_set |= MDCR_EL2_TDCC; mdcr_set |= MDCR_EL2_TDCC;
vcpu->arch.mdcr_el2 |= mdcr_set; vcpu->arch.mdcr_el2 |= mdcr_set;
...@@ -143,7 +143,7 @@ static void pvm_init_traps_aa64mmfr1(struct kvm_vcpu *vcpu) ...@@ -143,7 +143,7 @@ static void pvm_init_traps_aa64mmfr1(struct kvm_vcpu *vcpu)
u64 hcr_set = 0; u64 hcr_set = 0;
/* Trap LOR */ /* Trap LOR */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_LOR), feature_ids)) if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_LO), feature_ids))
hcr_set |= HCR_TLOR; hcr_set |= HCR_TLOR;
vcpu->arch.hcr_el2 |= hcr_set; vcpu->arch.hcr_el2 |= hcr_set;
......
...@@ -39,41 +39,32 @@ static void hyp_prepare_backtrace(unsigned long fp, unsigned long pc) ...@@ -39,41 +39,32 @@ static void hyp_prepare_backtrace(unsigned long fp, unsigned long pc)
DEFINE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)], pkvm_stacktrace); DEFINE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)], pkvm_stacktrace);
static bool on_overflow_stack(unsigned long sp, unsigned long size, static struct stack_info stackinfo_get_overflow(void)
struct stack_info *info)
{ {
unsigned long low = (unsigned long)this_cpu_ptr(overflow_stack); unsigned long low = (unsigned long)this_cpu_ptr(overflow_stack);
unsigned long high = low + OVERFLOW_STACK_SIZE; unsigned long high = low + OVERFLOW_STACK_SIZE;
return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info); return (struct stack_info) {
.low = low,
.high = high,
};
} }
static bool on_hyp_stack(unsigned long sp, unsigned long size, static struct stack_info stackinfo_get_hyp(void)
struct stack_info *info)
{ {
struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params); struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
unsigned long high = params->stack_hyp_va; unsigned long high = params->stack_hyp_va;
unsigned long low = high - PAGE_SIZE; unsigned long low = high - PAGE_SIZE;
return on_stack(sp, size, low, high, STACK_TYPE_HYP, info); return (struct stack_info) {
} .low = low,
.high = high,
static bool on_accessible_stack(const struct task_struct *tsk, };
unsigned long sp, unsigned long size,
struct stack_info *info)
{
if (info)
info->type = STACK_TYPE_UNKNOWN;
return (on_overflow_stack(sp, size, info) ||
on_hyp_stack(sp, size, info));
} }
static int unwind_next(struct unwind_state *state) static int unwind_next(struct unwind_state *state)
{ {
struct stack_info info; return unwind_next_frame_record(state);
return unwind_next_common(state, &info, on_accessible_stack, NULL);
} }
static void notrace unwind(struct unwind_state *state, static void notrace unwind(struct unwind_state *state,
...@@ -129,7 +120,14 @@ static bool pkvm_save_backtrace_entry(void *arg, unsigned long where) ...@@ -129,7 +120,14 @@ static bool pkvm_save_backtrace_entry(void *arg, unsigned long where)
*/ */
static void pkvm_save_backtrace(unsigned long fp, unsigned long pc) static void pkvm_save_backtrace(unsigned long fp, unsigned long pc)
{ {
struct unwind_state state; struct stack_info stacks[] = {
stackinfo_get_overflow(),
stackinfo_get_hyp(),
};
struct unwind_state state = {
.stacks = stacks,
.nr_stacks = ARRAY_SIZE(stacks),
};
int idx = 0; int idx = 0;
kvm_nvhe_unwind_init(&state, fp, pc); kvm_nvhe_unwind_init(&state, fp, pc);
......
...@@ -92,9 +92,9 @@ static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu) ...@@ -92,9 +92,9 @@ static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu)
PVM_ID_AA64PFR0_RESTRICT_UNSIGNED); PVM_ID_AA64PFR0_RESTRICT_UNSIGNED);
/* Spectre and Meltdown mitigation in KVM */ /* Spectre and Meltdown mitigation in KVM */
set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2), set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2),
(u64)kvm->arch.pfr0_csv2); (u64)kvm->arch.pfr0_csv2);
set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3), set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3),
(u64)kvm->arch.pfr0_csv3); (u64)kvm->arch.pfr0_csv3);
return (id_aa64pfr0_el1_sys_val & allow_mask) | set_mask; return (id_aa64pfr0_el1_sys_val & allow_mask) | set_mask;
...@@ -106,7 +106,7 @@ static u64 get_pvm_id_aa64pfr1(const struct kvm_vcpu *vcpu) ...@@ -106,7 +106,7 @@ static u64 get_pvm_id_aa64pfr1(const struct kvm_vcpu *vcpu)
u64 allow_mask = PVM_ID_AA64PFR1_ALLOW; u64 allow_mask = PVM_ID_AA64PFR1_ALLOW;
if (!kvm_has_mte(kvm)) if (!kvm_has_mte(kvm))
allow_mask &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE); allow_mask &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
return id_aa64pfr1_el1_sys_val & allow_mask; return id_aa64pfr1_el1_sys_val & allow_mask;
} }
...@@ -281,8 +281,8 @@ static bool pvm_access_id_aarch32(struct kvm_vcpu *vcpu, ...@@ -281,8 +281,8 @@ static bool pvm_access_id_aarch32(struct kvm_vcpu *vcpu,
* No support for AArch32 guests, therefore, pKVM has no sanitized copy * No support for AArch32 guests, therefore, pKVM has no sanitized copy
* of AArch32 feature id registers. * of AArch32 feature id registers.
*/ */
BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1), BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1),
PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) > ID_AA64PFR0_ELx_64BIT_ONLY); PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) > ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
return pvm_access_raz_wi(vcpu, p, r); return pvm_access_raz_wi(vcpu, p, r);
} }
......
...@@ -61,7 +61,7 @@ struct kvm_pgtable_walk_data { ...@@ -61,7 +61,7 @@ struct kvm_pgtable_walk_data {
static bool kvm_phys_is_valid(u64 phys) static bool kvm_phys_is_valid(u64 phys)
{ {
return phys < BIT(id_aa64mmfr0_parange_to_phys_shift(ID_AA64MMFR0_PARANGE_MAX)); return phys < BIT(id_aa64mmfr0_parange_to_phys_shift(ID_AA64MMFR0_EL1_PARANGE_MAX));
} }
static bool kvm_block_mapping_supported(u64 addr, u64 end, u64 phys, u32 level) static bool kvm_block_mapping_supported(u64 addr, u64 end, u64 phys, u32 level)
......
...@@ -33,12 +33,12 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm) ...@@ -33,12 +33,12 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm)
pmuver = kvm->arch.arm_pmu->pmuver; pmuver = kvm->arch.arm_pmu->pmuver;
switch (pmuver) { switch (pmuver) {
case ID_AA64DFR0_PMUVER_8_0: case ID_AA64DFR0_EL1_PMUVer_IMP:
return GENMASK(9, 0); return GENMASK(9, 0);
case ID_AA64DFR0_PMUVER_8_1: case ID_AA64DFR0_EL1_PMUVer_V3P1:
case ID_AA64DFR0_PMUVER_8_4: case ID_AA64DFR0_EL1_PMUVer_V3P4:
case ID_AA64DFR0_PMUVER_8_5: case ID_AA64DFR0_EL1_PMUVer_V3P5:
case ID_AA64DFR0_PMUVER_8_7: case ID_AA64DFR0_EL1_PMUVer_V3P7:
return GENMASK(15, 0); return GENMASK(15, 0);
default: /* Shouldn't be here, just for sanity */ default: /* Shouldn't be here, just for sanity */
WARN_ONCE(1, "Unknown PMU version %d\n", pmuver); WARN_ONCE(1, "Unknown PMU version %d\n", pmuver);
...@@ -774,7 +774,7 @@ void kvm_host_pmu_init(struct arm_pmu *pmu) ...@@ -774,7 +774,7 @@ void kvm_host_pmu_init(struct arm_pmu *pmu)
{ {
struct arm_pmu_entry *entry; struct arm_pmu_entry *entry;
if (pmu->pmuver == 0 || pmu->pmuver == ID_AA64DFR0_PMUVER_IMP_DEF) if (pmu->pmuver == 0 || pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
return; return;
mutex_lock(&arm_pmus_lock); mutex_lock(&arm_pmus_lock);
...@@ -828,7 +828,7 @@ static struct arm_pmu *kvm_pmu_probe_armpmu(void) ...@@ -828,7 +828,7 @@ static struct arm_pmu *kvm_pmu_probe_armpmu(void)
if (event->pmu) { if (event->pmu) {
pmu = to_arm_pmu(event->pmu); pmu = to_arm_pmu(event->pmu);
if (pmu->pmuver == 0 || if (pmu->pmuver == 0 ||
pmu->pmuver == ID_AA64DFR0_PMUVER_IMP_DEF) pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
pmu = NULL; pmu = NULL;
} }
...@@ -856,7 +856,7 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) ...@@ -856,7 +856,7 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
* Don't advertise STALL_SLOT, as PMMIR_EL0 is handled * Don't advertise STALL_SLOT, as PMMIR_EL0 is handled
* as RAZ * as RAZ
*/ */
if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_PMUVER_8_4) if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P4)
val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32); val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32);
base = 32; base = 32;
} }
......
...@@ -359,7 +359,7 @@ int kvm_set_ipa_limit(void) ...@@ -359,7 +359,7 @@ int kvm_set_ipa_limit(void)
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
parange = cpuid_feature_extract_unsigned_field(mmfr0, parange = cpuid_feature_extract_unsigned_field(mmfr0,
ID_AA64MMFR0_PARANGE_SHIFT); ID_AA64MMFR0_EL1_PARANGE_SHIFT);
/* /*
* IPA size beyond 48 bits could not be supported * IPA size beyond 48 bits could not be supported
* on either 4K or 16K page size. Hence let's cap * on either 4K or 16K page size. Hence let's cap
...@@ -367,20 +367,20 @@ int kvm_set_ipa_limit(void) ...@@ -367,20 +367,20 @@ int kvm_set_ipa_limit(void)
* on the system. * on the system.
*/ */
if (PAGE_SIZE != SZ_64K) if (PAGE_SIZE != SZ_64K)
parange = min(parange, (unsigned int)ID_AA64MMFR0_PARANGE_48); parange = min(parange, (unsigned int)ID_AA64MMFR0_EL1_PARANGE_48);
/* /*
* Check with ARMv8.5-GTG that our PAGE_SIZE is supported at * Check with ARMv8.5-GTG that our PAGE_SIZE is supported at
* Stage-2. If not, things will stop very quickly. * Stage-2. If not, things will stop very quickly.
*/ */
switch (cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_TGRAN_2_SHIFT)) { switch (cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_TGRAN_2_SHIFT)) {
case ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE: case ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_NONE:
kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n"); kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n");
return -EINVAL; return -EINVAL;
case ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT: case ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_DEFAULT:
kvm_debug("PAGE_SIZE supported at Stage-2 (default)\n"); kvm_debug("PAGE_SIZE supported at Stage-2 (default)\n");
break; break;
case ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN ... ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX: case ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MIN ... ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX:
kvm_debug("PAGE_SIZE supported at Stage-2 (advertised)\n"); kvm_debug("PAGE_SIZE supported at Stage-2 (advertised)\n");
break; break;
default: default:
......
...@@ -21,6 +21,54 @@ ...@@ -21,6 +21,54 @@
#include <asm/stacktrace/nvhe.h> #include <asm/stacktrace/nvhe.h>
static struct stack_info stackinfo_get_overflow(void)
{
struct kvm_nvhe_stacktrace_info *stacktrace_info
= this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
unsigned long low = (unsigned long)stacktrace_info->overflow_stack_base;
unsigned long high = low + OVERFLOW_STACK_SIZE;
return (struct stack_info) {
.low = low,
.high = high,
};
}
static struct stack_info stackinfo_get_overflow_kern_va(void)
{
unsigned long low = (unsigned long)this_cpu_ptr_nvhe_sym(overflow_stack);
unsigned long high = low + OVERFLOW_STACK_SIZE;
return (struct stack_info) {
.low = low,
.high = high,
};
}
static struct stack_info stackinfo_get_hyp(void)
{
struct kvm_nvhe_stacktrace_info *stacktrace_info
= this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
unsigned long low = (unsigned long)stacktrace_info->stack_base;
unsigned long high = low + PAGE_SIZE;
return (struct stack_info) {
.low = low,
.high = high,
};
}
static struct stack_info stackinfo_get_hyp_kern_va(void)
{
unsigned long low = (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_page);
unsigned long high = low + PAGE_SIZE;
return (struct stack_info) {
.low = low,
.high = high,
};
}
/* /*
* kvm_nvhe_stack_kern_va - Convert KVM nVHE HYP stack addresses to a kernel VAs * kvm_nvhe_stack_kern_va - Convert KVM nVHE HYP stack addresses to a kernel VAs
* *
...@@ -34,73 +82,45 @@ ...@@ -34,73 +82,45 @@
* Returns true on success and updates @addr to its corresponding kernel VA; * Returns true on success and updates @addr to its corresponding kernel VA;
* otherwise returns false. * otherwise returns false.
*/ */
static bool kvm_nvhe_stack_kern_va(unsigned long *addr, static bool kvm_nvhe_stack_kern_va(unsigned long *addr, unsigned long size)
enum stack_type type)
{ {
struct kvm_nvhe_stacktrace_info *stacktrace_info; struct stack_info stack_hyp, stack_kern;
unsigned long hyp_base, kern_base, hyp_offset;
stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info); stack_hyp = stackinfo_get_hyp();
stack_kern = stackinfo_get_hyp_kern_va();
if (stackinfo_on_stack(&stack_hyp, *addr, size))
goto found;
switch (type) { stack_hyp = stackinfo_get_overflow();
case STACK_TYPE_HYP: stack_kern = stackinfo_get_overflow_kern_va();
kern_base = (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_page); if (stackinfo_on_stack(&stack_hyp, *addr, size))
hyp_base = (unsigned long)stacktrace_info->stack_base; goto found;
break;
case STACK_TYPE_OVERFLOW:
kern_base = (unsigned long)this_cpu_ptr_nvhe_sym(overflow_stack);
hyp_base = (unsigned long)stacktrace_info->overflow_stack_base;
break;
default:
return false;
}
hyp_offset = *addr - hyp_base;
*addr = kern_base + hyp_offset; return false;
found:
*addr = *addr - stack_hyp.low + stack_kern.low;
return true; return true;
} }
static bool on_overflow_stack(unsigned long sp, unsigned long size, /*
struct stack_info *info) * Convert a KVN nVHE HYP frame record address to a kernel VA
{ */
struct kvm_nvhe_stacktrace_info *stacktrace_info static bool kvm_nvhe_stack_kern_record_va(unsigned long *addr)
= this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
unsigned long low = (unsigned long)stacktrace_info->overflow_stack_base;
unsigned long high = low + OVERFLOW_STACK_SIZE;
return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info);
}
static bool on_hyp_stack(unsigned long sp, unsigned long size,
struct stack_info *info)
{
struct kvm_nvhe_stacktrace_info *stacktrace_info
= this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
unsigned long low = (unsigned long)stacktrace_info->stack_base;
unsigned long high = low + PAGE_SIZE;
return on_stack(sp, size, low, high, STACK_TYPE_HYP, info);
}
static bool on_accessible_stack(const struct task_struct *tsk,
unsigned long sp, unsigned long size,
struct stack_info *info)
{ {
if (info) return kvm_nvhe_stack_kern_va(addr, 16);
info->type = STACK_TYPE_UNKNOWN;
return (on_overflow_stack(sp, size, info) ||
on_hyp_stack(sp, size, info));
} }
static int unwind_next(struct unwind_state *state) static int unwind_next(struct unwind_state *state)
{ {
struct stack_info info; /*
* The FP is in the hypervisor VA space. Convert it to the kernel VA
* space so it can be unwound by the regular unwind functions.
*/
if (!kvm_nvhe_stack_kern_record_va(&state->fp))
return -EINVAL;
return unwind_next_common(state, &info, on_accessible_stack, return unwind_next_frame_record(state);
kvm_nvhe_stack_kern_va);
} }
static void unwind(struct unwind_state *state, static void unwind(struct unwind_state *state,
...@@ -158,7 +178,14 @@ static void kvm_nvhe_dump_backtrace_end(void) ...@@ -158,7 +178,14 @@ static void kvm_nvhe_dump_backtrace_end(void)
static void hyp_dump_backtrace(unsigned long hyp_offset) static void hyp_dump_backtrace(unsigned long hyp_offset)
{ {
struct kvm_nvhe_stacktrace_info *stacktrace_info; struct kvm_nvhe_stacktrace_info *stacktrace_info;
struct unwind_state state; struct stack_info stacks[] = {
stackinfo_get_overflow_kern_va(),
stackinfo_get_hyp_kern_va(),
};
struct unwind_state state = {
.stacks = stacks,
.nr_stacks = ARRAY_SIZE(stacks),
};
stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info); stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
......
...@@ -273,7 +273,7 @@ static bool trap_loregion(struct kvm_vcpu *vcpu, ...@@ -273,7 +273,7 @@ static bool trap_loregion(struct kvm_vcpu *vcpu,
u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
u32 sr = reg_to_encoding(r); u32 sr = reg_to_encoding(r);
if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) { if (!(val & (0xfUL << ID_AA64MMFR1_EL1_LO_SHIFT))) {
kvm_inject_undefined(vcpu); kvm_inject_undefined(vcpu);
return false; return false;
} }
...@@ -1077,22 +1077,22 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, ...@@ -1077,22 +1077,22 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
switch (id) { switch (id) {
case SYS_ID_AA64PFR0_EL1: case SYS_ID_AA64PFR0_EL1:
if (!vcpu_has_sve(vcpu)) if (!vcpu_has_sve(vcpu))
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_SVE); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_AMU); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2); val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3); val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
if (kvm_vgic_global_state.type == VGIC_V3) { if (kvm_vgic_global_state.type == VGIC_V3) {
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_GIC); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_GIC), 1); val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC), 1);
} }
break; break;
case SYS_ID_AA64PFR1_EL1: case SYS_ID_AA64PFR1_EL1:
if (!kvm_has_mte(vcpu->kvm)) if (!kvm_has_mte(vcpu->kvm))
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_SME); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME);
break; break;
case SYS_ID_AA64ISAR1_EL1: case SYS_ID_AA64ISAR1_EL1:
if (!vcpu_has_ptrauth(vcpu)) if (!vcpu_has_ptrauth(vcpu))
...@@ -1110,14 +1110,14 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, ...@@ -1110,14 +1110,14 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
break; break;
case SYS_ID_AA64DFR0_EL1: case SYS_ID_AA64DFR0_EL1:
/* Limit debug to ARMv8.0 */ /* Limit debug to ARMv8.0 */
val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER); val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER), 6); val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), 6);
/* Limit guests to PMUv3 for ARMv8.4 */ /* Limit guests to PMUv3 for ARMv8.4 */
val = cpuid_feature_cap_perfmon_field(val, val = cpuid_feature_cap_perfmon_field(val,
ID_AA64DFR0_PMUVER_SHIFT, ID_AA64DFR0_EL1_PMUVer_SHIFT,
kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_PMUVER_8_4 : 0); kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_EL1_PMUVer_V3P4 : 0);
/* Hide SPE from guests */ /* Hide SPE from guests */
val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVER); val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer);
break; break;
case SYS_ID_DFR0_EL1: case SYS_ID_DFR0_EL1:
/* Limit guests to PMUv3 for ARMv8.4 */ /* Limit guests to PMUv3 for ARMv8.4 */
...@@ -1196,21 +1196,21 @@ static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu, ...@@ -1196,21 +1196,21 @@ static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
* it doesn't promise more than what is actually provided (the * it doesn't promise more than what is actually provided (the
* guest could otherwise be covered in ectoplasmic residue). * guest could otherwise be covered in ectoplasmic residue).
*/ */
csv2 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_CSV2_SHIFT); csv2 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_EL1_CSV2_SHIFT);
if (csv2 > 1 || if (csv2 > 1 ||
(csv2 && arm64_get_spectre_v2_state() != SPECTRE_UNAFFECTED)) (csv2 && arm64_get_spectre_v2_state() != SPECTRE_UNAFFECTED))
return -EINVAL; return -EINVAL;
/* Same thing for CSV3 */ /* Same thing for CSV3 */
csv3 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_CSV3_SHIFT); csv3 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_EL1_CSV3_SHIFT);
if (csv3 > 1 || if (csv3 > 1 ||
(csv3 && arm64_get_meltdown_state() != SPECTRE_UNAFFECTED)) (csv3 && arm64_get_meltdown_state() != SPECTRE_UNAFFECTED))
return -EINVAL; return -EINVAL;
/* We can only differ with CSV[23], and anything else is an error */ /* We can only differ with CSV[23], and anything else is an error */
val ^= read_id_reg(vcpu, rd, false); val ^= read_id_reg(vcpu, rd, false);
val &= ~((0xFUL << ID_AA64PFR0_CSV2_SHIFT) | val &= ~((0xFUL << ID_AA64PFR0_EL1_CSV2_SHIFT) |
(0xFUL << ID_AA64PFR0_CSV3_SHIFT)); (0xFUL << ID_AA64PFR0_EL1_CSV3_SHIFT));
if (val) if (val)
return -EINVAL; return -EINVAL;
...@@ -1825,11 +1825,11 @@ static bool trap_dbgdidr(struct kvm_vcpu *vcpu, ...@@ -1825,11 +1825,11 @@ static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
} else { } else {
u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1); u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT); u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL1_EL3_SHIFT);
p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) | p->regval = ((((dfr >> ID_AA64DFR0_EL1_WRPs_SHIFT) & 0xf) << 28) |
(((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) | (((dfr >> ID_AA64DFR0_EL1_BRPs_SHIFT) & 0xf) << 24) |
(((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20) (((dfr >> ID_AA64DFR0_EL1_CTX_CMPs_SHIFT) & 0xf) << 20)
| (6 << 16) | (1 << 15) | (el3 << 14) | (el3 << 12)); | (6 << 16) | (1 << 15) | (el3 << 14) | (el3 << 12));
return true; return true;
} }
......
...@@ -43,17 +43,17 @@ static u32 get_cpu_asid_bits(void) ...@@ -43,17 +43,17 @@ static u32 get_cpu_asid_bits(void)
{ {
u32 asid; u32 asid;
int fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64MMFR0_EL1), int fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64MMFR0_EL1),
ID_AA64MMFR0_ASID_SHIFT); ID_AA64MMFR0_EL1_ASIDBITS_SHIFT);
switch (fld) { switch (fld) {
default: default:
pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n", pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n",
smp_processor_id(), fld); smp_processor_id(), fld);
fallthrough; fallthrough;
case ID_AA64MMFR0_ASID_8: case ID_AA64MMFR0_EL1_ASIDBITS_8:
asid = 8; asid = 8;
break; break;
case ID_AA64MMFR0_ASID_16: case ID_AA64MMFR0_EL1_ASIDBITS_16:
asid = 16; asid = 16;
} }
......
...@@ -360,7 +360,7 @@ void __init arm64_memblock_init(void) ...@@ -360,7 +360,7 @@ void __init arm64_memblock_init(void)
extern u16 memstart_offset_seed; extern u16 memstart_offset_seed;
u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1); u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
int parange = cpuid_feature_extract_unsigned_field( int parange = cpuid_feature_extract_unsigned_field(
mmfr0, ID_AA64MMFR0_PARANGE_SHIFT); mmfr0, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
s64 range = linear_region_size - s64 range = linear_region_size -
BIT(id_aa64mmfr0_parange_to_phys_shift(parange)); BIT(id_aa64mmfr0_parange_to_phys_shift(parange));
......
...@@ -686,7 +686,7 @@ static bool arm64_early_this_cpu_has_bti(void) ...@@ -686,7 +686,7 @@ static bool arm64_early_this_cpu_has_bti(void)
pfr1 = __read_sysreg_by_encoding(SYS_ID_AA64PFR1_EL1); pfr1 = __read_sysreg_by_encoding(SYS_ID_AA64PFR1_EL1);
return cpuid_feature_extract_unsigned_field(pfr1, return cpuid_feature_extract_unsigned_field(pfr1,
ID_AA64PFR1_BT_SHIFT); ID_AA64PFR1_EL1_BT_SHIFT);
} }
/* /*
......
...@@ -434,8 +434,8 @@ SYM_FUNC_START(__cpu_setup) ...@@ -434,8 +434,8 @@ SYM_FUNC_START(__cpu_setup)
* (ID_AA64PFR1_EL1[11:8] > 1). * (ID_AA64PFR1_EL1[11:8] > 1).
*/ */
mrs x10, ID_AA64PFR1_EL1 mrs x10, ID_AA64PFR1_EL1
ubfx x10, x10, #ID_AA64PFR1_MTE_SHIFT, #4 ubfx x10, x10, #ID_AA64PFR1_EL1_MTE_SHIFT, #4
cmp x10, #ID_AA64PFR1_MTE cmp x10, #ID_AA64PFR1_EL1_MTE_MTE2
b.lt 1f b.lt 1f
/* Normal Tagged memory type at the corresponding MAIR index */ /* Normal Tagged memory type at the corresponding MAIR index */
......
...@@ -68,6 +68,7 @@ WORKAROUND_2038923 ...@@ -68,6 +68,7 @@ WORKAROUND_2038923
WORKAROUND_2064142 WORKAROUND_2064142
WORKAROUND_2077057 WORKAROUND_2077057
WORKAROUND_2457168 WORKAROUND_2457168
WORKAROUND_2658417
WORKAROUND_TRBE_OVERWRITE_FILL_MODE WORKAROUND_TRBE_OVERWRITE_FILL_MODE
WORKAROUND_TSB_FLUSH_FAILURE WORKAROUND_TSB_FLUSH_FAILURE
WORKAROUND_TRBE_WRITE_OUT_OF_RANGE WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
......
...@@ -46,6 +46,127 @@ ...@@ -46,6 +46,127 @@
# feature that introduces them (eg, FEAT_LS64_ACCDATA introduces enumeration # feature that introduces them (eg, FEAT_LS64_ACCDATA introduces enumeration
# item ACCDATA) though it may be more taseful to do something else. # item ACCDATA) though it may be more taseful to do something else.
Sysreg ID_AA64PFR0_EL1 3 0 0 4 0
Enum 63:60 CSV3
0b0000 NI
0b0001 IMP
EndEnum
Enum 59:56 CSV2
0b0000 NI
0b0001 IMP
0b0010 CSV2_2
0b0011 CSV2_3
EndEnum
Enum 55:52 RME
0b0000 NI
0b0001 IMP
EndEnum
Enum 51:48 DIT
0b0000 NI
0b0001 IMP
EndEnum
Enum 47:44 AMU
0b0000 NI
0b0001 IMP
0b0010 V1P1
EndEnum
Enum 43:40 MPAM
0b0000 0
0b0001 1
EndEnum
Enum 39:36 SEL2
0b0000 NI
0b0001 IMP
EndEnum
Enum 35:32 SVE
0b0000 NI
0b0001 IMP
EndEnum
Enum 31:28 RAS
0b0000 NI
0b0001 IMP
0b0010 V1P1
EndEnum
Enum 27:24 GIC
0b0000 NI
0b0001 IMP
0b0010 V4P1
EndEnum
Enum 23:20 AdvSIMD
0b0000 IMP
0b0001 FP16
0b1111 NI
EndEnum
Enum 19:16 FP
0b0000 IMP
0b0001 FP16
0b1111 NI
EndEnum
Enum 15:12 EL3
0b0000 NI
0b0001 IMP
0b0010 AARCH32
EndEnum
Enum 11:8 EL2
0b0000 NI
0b0001 IMP
0b0010 AARCH32
EndEnum
Enum 7:4 EL1
0b0001 IMP
0b0010 AARCH32
EndEnum
Enum 3:0 EL0
0b0001 IMP
0b0010 AARCH32
EndEnum
EndSysreg
Sysreg ID_AA64PFR1_EL1 3 0 0 4 1
Res0 63:40
Enum 39:36 NMI
0b0000 NI
0b0001 IMP
EndEnum
Enum 35:32 CSV2_frac
0b0000 NI
0b0001 CSV2_1p1
0b0010 CSV2_1p2
EndEnum
Enum 31:28 RNDR_trap
0b0000 NI
0b0001 IMP
EndEnum
Enum 27:24 SME
0b0000 NI
0b0001 IMP
EndEnum
Res0 23:20
Enum 19:16 MPAM_frac
0b0000 MINOR_0
0b0001 MINOR_1
EndEnum
Enum 15:12 RAS_frac
0b0000 NI
0b0001 RASv1p1
EndEnum
Enum 11:8 MTE
0b0000 NI
0b0001 IMP
0b0010 MTE2
0b0011 MTE3
EndEnum
Enum 7:4 SSBS
0b0000 NI
0b0001 IMP
0b0010 SSBS2
EndEnum
Enum 3:0 BT
0b0000 NI
0b0001 IMP
EndEnum
EndSysreg
Sysreg ID_AA64ZFR0_EL1 3 0 0 4 4 Sysreg ID_AA64ZFR0_EL1 3 0 0 4 4
Res0 63:60 Res0 63:60
Enum 59:56 F64MM Enum 59:56 F64MM
...@@ -98,7 +219,9 @@ Enum 63 FA64 ...@@ -98,7 +219,9 @@ Enum 63 FA64
0b1 IMP 0b1 IMP
EndEnum EndEnum
Res0 62:60 Res0 62:60
Field 59:56 SMEver Enum 59:56 SMEver
0b0000 IMP
EndEnum
Enum 55:52 I16I64 Enum 55:52 I16I64
0b0000 NI 0b0000 NI
0b1111 IMP 0b1111 IMP
...@@ -129,6 +252,89 @@ EndEnum ...@@ -129,6 +252,89 @@ EndEnum
Res0 31:0 Res0 31:0
EndSysreg EndSysreg
Sysreg ID_AA64DFR0_EL1 3 0 0 5 0
Enum 63:60 HPMN0
0b0000 UNPREDICTABLE
0b0001 DEF
EndEnum
Res0 59:56
Enum 55:52 BRBE
0b0000 NI
0b0001 IMP
0b0010 BRBE_V1P1
EndEnum
Enum 51:48 MTPMU
0b0000 NI_IMPDEF
0b0001 IMP
0b1111 NI
EndEnum
Enum 47:44 TraceBuffer
0b0000 NI
0b0001 IMP
EndEnum
Enum 43:40 TraceFilt
0b0000 NI
0b0001 IMP
EndEnum
Enum 39:36 DoubleLock
0b0000 IMP
0b1111 NI
EndEnum
Enum 35:32 PMSVer
0b0000 NI
0b0001 IMP
0b0010 V1P1
0b0011 V1P2
0b0100 V1P3
EndEnum
Field 31:28 CTX_CMPs
Res0 27:24
Field 23:20 WRPs
Res0 19:16
Field 15:12 BRPs
Enum 11:8 PMUVer
0b0000 NI
0b0001 IMP
0b0100 V3P1
0b0101 V3P4
0b0110 V3P5
0b0111 V3P7
0b1000 V3P8
0b1111 IMP_DEF
EndEnum
Enum 7:4 TraceVer
0b0000 NI
0b0001 IMP
EndEnum
Enum 3:0 DebugVer
0b0110 IMP
0b0111 VHE
0b1000 V8P2
0b1001 V8P4
0b1010 V8P8
EndEnum
EndSysreg
Sysreg ID_AA64DFR1_EL1 3 0 0 5 1
Res0 63:0
EndSysreg
Sysreg ID_AA64AFR0_EL1 3 0 0 5 4
Res0 63:32
Field 31:28 IMPDEF7
Field 27:24 IMPDEF6
Field 23:20 IMPDEF5
Field 19:16 IMPDEF4
Field 15:12 IMPDEF3
Field 11:8 IMPDEF2
Field 7:4 IMPDEF1
Field 3:0 IMPDEF0
EndSysreg
Sysreg ID_AA64AFR1_EL1 3 0 0 5 5
Res0 63:0
EndSysreg
Sysreg ID_AA64ISAR0_EL1 3 0 0 6 0 Sysreg ID_AA64ISAR0_EL1 3 0 0 6 0
Enum 63:60 RNDR Enum 63:60 RNDR
0b0000 NI 0b0000 NI
...@@ -313,6 +519,217 @@ Enum 3:0 WFxT ...@@ -313,6 +519,217 @@ Enum 3:0 WFxT
EndEnum EndEnum
EndSysreg EndSysreg
Sysreg ID_AA64MMFR0_EL1 3 0 0 7 0
Enum 63:60 ECV
0b0000 NI
0b0001 IMP
0b0010 CNTPOFF
EndEnum
Enum 59:56 FGT
0b0000 NI
0b0001 IMP
EndEnum
Res0 55:48
Enum 47:44 EXS
0b0000 NI
0b0001 IMP
EndEnum
Enum 43:40 TGRAN4_2
0b0000 TGRAN4
0b0001 NI
0b0010 IMP
0b0011 52_BIT
EndEnum
Enum 39:36 TGRAN64_2
0b0000 TGRAN64
0b0001 NI
0b0010 IMP
EndEnum
Enum 35:32 TGRAN16_2
0b0000 TGRAN16
0b0001 NI
0b0010 IMP
0b0011 52_BIT
EndEnum
Enum 31:28 TGRAN4
0b0000 IMP
0b0001 52_BIT
0b1111 NI
EndEnum
Enum 27:24 TGRAN64
0b0000 IMP
0b1111 NI
EndEnum
Enum 23:20 TGRAN16
0b0000 NI
0b0001 IMP
0b0010 52_BIT
EndEnum
Enum 19:16 BIGENDEL0
0b0000 NI
0b0001 IMP
EndEnum
Enum 15:12 SNSMEM
0b0000 NI
0b0001 IMP
EndEnum
Enum 11:8 BIGEND
0b0000 NI
0b0001 IMP
EndEnum
Enum 7:4 ASIDBITS
0b0000 8
0b0010 16
EndEnum
Enum 3:0 PARANGE
0b0000 32
0b0001 36
0b0010 40
0b0011 42
0b0100 44
0b0101 48
0b0110 52
EndEnum
EndSysreg
Sysreg ID_AA64MMFR1_EL1 3 0 0 7 1
Enum 63:60 ECBHB
0b0000 NI
0b0001 IMP
EndEnum
Enum 59:56 CMOW
0b0000 NI
0b0001 IMP
EndEnum
Enum 55:52 TIDCP1
0b0000 NI
0b0001 IMP
EndEnum
Enum 51:48 nTLBPA
0b0000 NI
0b0001 IMP
EndEnum
Enum 47:44 AFP
0b0000 NI
0b0001 IMP
EndEnum
Enum 43:40 HCX
0b0000 NI
0b0001 IMP
EndEnum
Enum 39:36 ETS
0b0000 NI
0b0001 IMP
EndEnum
Enum 35:32 TWED
0b0000 NI
0b0001 IMP
EndEnum
Enum 31:28 XNX
0b0000 NI
0b0001 IMP
EndEnum
Enum 27:24 SpecSEI
0b0000 NI
0b0001 IMP
EndEnum
Enum 23:20 PAN
0b0000 NI
0b0001 IMP
0b0010 PAN2
0b0011 PAN3
EndEnum
Enum 19:16 LO
0b0000 NI
0b0001 IMP
EndEnum
Enum 15:12 HPDS
0b0000 NI
0b0001 IMP
0b0010 HPDS2
EndEnum
Enum 11:8 VH
0b0000 NI
0b0001 IMP
EndEnum
Enum 7:4 VMIDBits
0b0000 8
0b0010 16
EndEnum
Enum 3:0 HAFDBS
0b0000 NI
0b0001 AF
0b0010 DBM
EndEnum
EndSysreg
Sysreg ID_AA64MMFR2_EL1 3 0 0 7 2
Enum 63:60 E0PD
0b0000 NI
0b0001 IMP
EndEnum
Enum 59:56 EVT
0b0000 NI
0b0001 IMP
0b0010 TTLBxS
EndEnum
Enum 55:52 BBM
0b0000 0
0b0001 1
0b0010 2
EndEnum
Enum 51:48 TTL
0b0000 NI
0b0001 IMP
EndEnum
Res0 47:44
Enum 43:40 FWB
0b0000 NI
0b0001 IMP
EndEnum
Enum 39:36 IDS
0b0000 0x0
0b0001 0x18
EndEnum
Enum 35:32 AT
0b0000 NI
0b0001 IMP
EndEnum
Enum 31:28 ST
0b0000 39
0b0001 48_47
EndEnum
Enum 27:24 NV
0b0000 NI
0b0001 IMP
0b0010 NV2
EndEnum
Enum 23:20 CCIDX
0b0000 32
0b0001 64
EndEnum
Enum 19:16 VARange
0b0000 48
0b0001 52
EndEnum
Enum 15:12 IESB
0b0000 NI
0b0001 IMP
EndEnum
Enum 11:8 LSM
0b0000 NI
0b0001 IMP
EndEnum
Enum 7:4 UAO
0b0000 NI
0b0001 IMP
EndEnum
Enum 3:0 CnP
0b0000 NI
0b0001 IMP
EndEnum
EndSysreg
Sysreg SCTLR_EL1 3 0 1 0 0 Sysreg SCTLR_EL1 3 0 1 0 0
Field 63 TIDCP Field 63 TIDCP
Field 62 SPINMASK Field 62 SPINMASK
...@@ -427,6 +844,12 @@ Sysreg SMCR_EL1 3 0 1 2 6 ...@@ -427,6 +844,12 @@ Sysreg SMCR_EL1 3 0 1 2 6
Fields SMCR_ELx Fields SMCR_ELx
EndSysreg EndSysreg
Sysreg ALLINT 3 0 4 3 0
Res0 63:14
Field 13 ALLINT
Res0 12:0
EndSysreg
Sysreg FAR_EL1 3 0 6 0 0 Sysreg FAR_EL1 3 0 6 0 0
Field 63:0 ADDR Field 63:0 ADDR
EndSysreg EndSysreg
...@@ -440,6 +863,14 @@ Sysreg CONTEXTIDR_EL1 3 0 13 0 1 ...@@ -440,6 +863,14 @@ Sysreg CONTEXTIDR_EL1 3 0 13 0 1
Fields CONTEXTIDR_ELx Fields CONTEXTIDR_ELx
EndSysreg EndSysreg
Sysreg TPIDR_EL1 3 0 13 0 4
Field 63:0 ThreadID
EndSysreg
Sysreg SCXTNUM_EL1 3 0 13 0 7
Field 63:0 SoftwareContextNumber
EndSysreg
Sysreg CLIDR_EL1 3 1 0 0 1 Sysreg CLIDR_EL1 3 1 0 0 1
Res0 63:47 Res0 63:47
Field 46:33 Ttypen Field 46:33 Ttypen
...@@ -514,6 +945,22 @@ Sysreg ZCR_EL2 3 4 1 2 0 ...@@ -514,6 +945,22 @@ Sysreg ZCR_EL2 3 4 1 2 0
Fields ZCR_ELx Fields ZCR_ELx
EndSysreg EndSysreg
Sysreg HCRX_EL2 3 4 1 2 2
Res0 63:12
Field 11 MSCEn
Field 10 MCE2
Field 9 CMOW
Field 8 VFNMI
Field 7 VINMI
Field 6 TALLINT
Field 5 SMPME
Field 4 FGTnXS
Field 3 FnXS
Field 2 EnASR
Field 1 EnALS
Field 0 EnAS0
EndSysreg
Sysreg SMPRIMAP_EL2 3 4 1 2 5 Sysreg SMPRIMAP_EL2 3 4 1 2 5
Field 63:60 P15 Field 63:60 P15
Field 59:56 P14 Field 59:56 P14
......
...@@ -23,8 +23,8 @@ efi_status_t check_platform_features(void) ...@@ -23,8 +23,8 @@ efi_status_t check_platform_features(void)
if (IS_ENABLED(CONFIG_ARM64_4K_PAGES)) if (IS_ENABLED(CONFIG_ARM64_4K_PAGES))
return EFI_SUCCESS; return EFI_SUCCESS;
tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_TGRAN_SHIFT) & 0xf; tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_EL1_TGRAN_SHIFT) & 0xf;
if (tg < ID_AA64MMFR0_TGRAN_SUPPORTED_MIN || tg > ID_AA64MMFR0_TGRAN_SUPPORTED_MAX) { if (tg < ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN || tg > ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX) {
if (IS_ENABLED(CONFIG_ARM64_64K_PAGES)) if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
efi_err("This 64 KB granular kernel is not supported by your CPU\n"); efi_err("This 64 KB granular kernel is not supported by your CPU\n");
else else
......
...@@ -966,7 +966,7 @@ static inline bool cpu_supports_sysreg_trace(void) ...@@ -966,7 +966,7 @@ static inline bool cpu_supports_sysreg_trace(void)
{ {
u64 dfr0 = read_sysreg_s(SYS_ID_AA64DFR0_EL1); u64 dfr0 = read_sysreg_s(SYS_ID_AA64DFR0_EL1);
return ((dfr0 >> ID_AA64DFR0_TRACEVER_SHIFT) & 0xfUL) > 0; return ((dfr0 >> ID_AA64DFR0_EL1_TraceVer_SHIFT) & 0xfUL) > 0;
} }
static bool etm4_init_sysreg_access(struct etmv4_drvdata *drvdata, static bool etm4_init_sysreg_access(struct etmv4_drvdata *drvdata,
...@@ -1054,7 +1054,7 @@ static void cpu_detect_trace_filtering(struct etmv4_drvdata *drvdata) ...@@ -1054,7 +1054,7 @@ static void cpu_detect_trace_filtering(struct etmv4_drvdata *drvdata)
u64 trfcr; u64 trfcr;
drvdata->trfcr = 0; drvdata->trfcr = 0;
if (!cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_TRACE_FILT_SHIFT)) if (!cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceFilt_SHIFT))
return; return;
/* /*
......
...@@ -20,7 +20,8 @@ ...@@ -20,7 +20,8 @@
static inline bool is_trbe_available(void) static inline bool is_trbe_available(void)
{ {
u64 aa64dfr0 = read_sysreg_s(SYS_ID_AA64DFR0_EL1); u64 aa64dfr0 = read_sysreg_s(SYS_ID_AA64DFR0_EL1);
unsigned int trbe = cpuid_feature_extract_unsigned_field(aa64dfr0, ID_AA64DFR0_TRBE_SHIFT); unsigned int trbe = cpuid_feature_extract_unsigned_field(aa64dfr0,
ID_AA64DFR0_EL1_TraceBuffer_SHIFT);
return trbe >= 0b0001; return trbe >= 0b0001;
} }
......
...@@ -150,7 +150,7 @@ static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm) ...@@ -150,7 +150,7 @@ static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm)
} }
reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
par = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_PARANGE_SHIFT); par = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_IPS, par); tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_IPS, par);
cd->ttbr = virt_to_phys(mm->pgd); cd->ttbr = virt_to_phys(mm->pgd);
...@@ -425,13 +425,13 @@ bool arm_smmu_sva_supported(struct arm_smmu_device *smmu) ...@@ -425,13 +425,13 @@ bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
* addresses larger than what we support. * addresses larger than what we support.
*/ */
reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_PARANGE_SHIFT); fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
oas = id_aa64mmfr0_parange_to_phys_shift(fld); oas = id_aa64mmfr0_parange_to_phys_shift(fld);
if (smmu->oas < oas) if (smmu->oas < oas)
return false; return false;
/* We can support bigger ASIDs than the CPU, but not smaller */ /* We can support bigger ASIDs than the CPU, but not smaller */
fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_ASID_SHIFT); fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_ASIDBITS_SHIFT);
asid_bits = fld ? 16 : 8; asid_bits = fld ? 16 : 8;
if (smmu->asid_bits < asid_bits) if (smmu->asid_bits < asid_bits)
return false; return false;
......
...@@ -94,7 +94,7 @@ bool gic_cpuif_has_vsgi(void) ...@@ -94,7 +94,7 @@ bool gic_cpuif_has_vsgi(void)
{ {
unsigned long fld, reg = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); unsigned long fld, reg = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64PFR0_GIC_SHIFT); fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64PFR0_EL1_GIC_SHIFT);
return fld >= 0x3; return fld >= 0x3;
} }
......
...@@ -674,9 +674,9 @@ static irqreturn_t arm_spe_pmu_irq_handler(int irq, void *dev) ...@@ -674,9 +674,9 @@ static irqreturn_t arm_spe_pmu_irq_handler(int irq, void *dev)
static u64 arm_spe_pmsevfr_res0(u16 pmsver) static u64 arm_spe_pmsevfr_res0(u16 pmsver)
{ {
switch (pmsver) { switch (pmsver) {
case ID_AA64DFR0_PMSVER_8_2: case ID_AA64DFR0_EL1_PMSVer_IMP:
return SYS_PMSEVFR_EL1_RES0_8_2; return SYS_PMSEVFR_EL1_RES0_8_2;
case ID_AA64DFR0_PMSVER_8_3: case ID_AA64DFR0_EL1_PMSVer_V1P1:
/* Return the highest version we support in default */ /* Return the highest version we support in default */
default: default:
return SYS_PMSEVFR_EL1_RES0_8_3; return SYS_PMSEVFR_EL1_RES0_8_3;
...@@ -958,7 +958,7 @@ static void __arm_spe_pmu_dev_probe(void *info) ...@@ -958,7 +958,7 @@ static void __arm_spe_pmu_dev_probe(void *info)
struct device *dev = &spe_pmu->pdev->dev; struct device *dev = &spe_pmu->pdev->dev;
fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64DFR0_EL1), fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64DFR0_EL1),
ID_AA64DFR0_PMSVER_SHIFT); ID_AA64DFR0_EL1_PMSVer_SHIFT);
if (!fld) { if (!fld) {
dev_err(dev, dev_err(dev,
"unsupported ID_AA64DFR0_EL1.PMSVer [%d] on CPU %d\n", "unsupported ID_AA64DFR0_EL1.PMSVer [%d] on CPU %d\n",
......
ptrace
syscall-abi syscall-abi
tpidr2 tpidr2
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
# Copyright (C) 2021 ARM Limited # Copyright (C) 2021 ARM Limited
TEST_GEN_PROGS := syscall-abi tpidr2 TEST_GEN_PROGS := ptrace syscall-abi tpidr2
include ../../lib.mk include ../../lib.mk
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2022 ARM Limited.
*/
#include <errno.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/auxv.h>
#include <sys/prctl.h>
#include <sys/ptrace.h>
#include <sys/types.h>
#include <sys/uio.h>
#include <sys/wait.h>
#include <asm/sigcontext.h>
#include <asm/ptrace.h>
#include "../../kselftest.h"
#define EXPECTED_TESTS 7
#define MAX_TPIDRS 2
static bool have_sme(void)
{
return getauxval(AT_HWCAP2) & HWCAP2_SME;
}
static void test_tpidr(pid_t child)
{
uint64_t read_val[MAX_TPIDRS];
uint64_t write_val[MAX_TPIDRS];
struct iovec read_iov, write_iov;
bool test_tpidr2 = false;
int ret, i;
read_iov.iov_base = read_val;
write_iov.iov_base = write_val;
/* Should be able to read a single TPIDR... */
read_iov.iov_len = sizeof(uint64_t);
ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_TLS, &read_iov);
ksft_test_result(ret == 0, "read_tpidr_one\n");
/* ...write a new value.. */
write_iov.iov_len = sizeof(uint64_t);
write_val[0] = read_val[0]++;
ret = ptrace(PTRACE_SETREGSET, child, NT_ARM_TLS, &write_iov);
ksft_test_result(ret == 0, "write_tpidr_one\n");
/* ...then read it back */
ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_TLS, &read_iov);
ksft_test_result(ret == 0 && write_val[0] == read_val[0],
"verify_tpidr_one\n");
/* If we have TPIDR2 we should be able to read it */
read_iov.iov_len = sizeof(read_val);
ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_TLS, &read_iov);
if (ret == 0) {
/* If we have SME there should be two TPIDRs */
if (read_iov.iov_len >= sizeof(read_val))
test_tpidr2 = true;
if (have_sme() && test_tpidr2) {
ksft_test_result(test_tpidr2, "count_tpidrs\n");
} else {
ksft_test_result(read_iov.iov_len % sizeof(uint64_t) == 0,
"count_tpidrs\n");
}
} else {
ksft_test_result_fail("count_tpidrs\n");
}
if (test_tpidr2) {
/* Try to write new values to all known TPIDRs... */
write_iov.iov_len = sizeof(write_val);
for (i = 0; i < MAX_TPIDRS; i++)
write_val[i] = read_val[i] + 1;
ret = ptrace(PTRACE_SETREGSET, child, NT_ARM_TLS, &write_iov);
ksft_test_result(ret == 0 &&
write_iov.iov_len == sizeof(write_val),
"tpidr2_write\n");
/* ...then read them back */
read_iov.iov_len = sizeof(read_val);
ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_TLS, &read_iov);
if (have_sme()) {
/* Should read back the written value */
ksft_test_result(ret == 0 &&
read_iov.iov_len >= sizeof(read_val) &&
memcmp(read_val, write_val,
sizeof(read_val)) == 0,
"tpidr2_read\n");
} else {
/* TPIDR2 should read as zero */
ksft_test_result(ret == 0 &&
read_iov.iov_len >= sizeof(read_val) &&
read_val[0] == write_val[0] &&
read_val[1] == 0,
"tpidr2_read\n");
}
/* Writing only TPIDR... */
write_iov.iov_len = sizeof(uint64_t);
memcpy(write_val, read_val, sizeof(read_val));
write_val[0] += 1;
ret = ptrace(PTRACE_SETREGSET, child, NT_ARM_TLS, &write_iov);
if (ret == 0) {
/* ...should leave TPIDR2 untouched */
read_iov.iov_len = sizeof(read_val);
ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_TLS,
&read_iov);
ksft_test_result(ret == 0 &&
read_iov.iov_len >= sizeof(read_val) &&
memcmp(read_val, write_val,
sizeof(read_val)) == 0,
"write_tpidr_only\n");
} else {
ksft_test_result_fail("write_tpidr_only\n");
}
} else {
ksft_test_result_skip("tpidr2_write\n");
ksft_test_result_skip("tpidr2_read\n");
ksft_test_result_skip("write_tpidr_only\n");
}
}
static int do_child(void)
{
if (ptrace(PTRACE_TRACEME, -1, NULL, NULL))
ksft_exit_fail_msg("PTRACE_TRACEME", strerror(errno));
if (raise(SIGSTOP))
ksft_exit_fail_msg("raise(SIGSTOP)", strerror(errno));
return EXIT_SUCCESS;
}
static int do_parent(pid_t child)
{
int ret = EXIT_FAILURE;
pid_t pid;
int status;
siginfo_t si;
/* Attach to the child */
while (1) {
int sig;
pid = wait(&status);
if (pid == -1) {
perror("wait");
goto error;
}
/*
* This should never happen but it's hard to flag in
* the framework.
*/
if (pid != child)
continue;
if (WIFEXITED(status) || WIFSIGNALED(status))
ksft_exit_fail_msg("Child died unexpectedly\n");
if (!WIFSTOPPED(status))
goto error;
sig = WSTOPSIG(status);
if (ptrace(PTRACE_GETSIGINFO, pid, NULL, &si)) {
if (errno == ESRCH)
goto disappeared;
if (errno == EINVAL) {
sig = 0; /* bust group-stop */
goto cont;
}
ksft_test_result_fail("PTRACE_GETSIGINFO: %s\n",
strerror(errno));
goto error;
}
if (sig == SIGSTOP && si.si_code == SI_TKILL &&
si.si_pid == pid)
break;
cont:
if (ptrace(PTRACE_CONT, pid, NULL, sig)) {
if (errno == ESRCH)
goto disappeared;
ksft_test_result_fail("PTRACE_CONT: %s\n",
strerror(errno));
goto error;
}
}
ksft_print_msg("Parent is %d, child is %d\n", getpid(), child);
test_tpidr(child);
ret = EXIT_SUCCESS;
error:
kill(child, SIGKILL);
disappeared:
return ret;
}
int main(void)
{
int ret = EXIT_SUCCESS;
pid_t child;
srandom(getpid());
ksft_print_header();
ksft_set_plan(EXPECTED_TESTS);
child = fork();
if (!child)
return do_child();
if (do_parent(child))
ret = EXIT_FAILURE;
ksft_print_cnts();
return ret;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment