Commit 23221d99 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 updates from Will Deacon:
 "Nothing particularly stands out here, probably because people were
  tied up with spectre/meltdown stuff last time around. Still, the main
  pieces are:

   - Rework of our CPU features framework so that we can whitelist CPUs
     that don't require kpti even in a heterogeneous system

   - Support for the IDC/DIC architecture extensions, which allow us to
     elide instruction and data cache maintenance when writing out
     instructions

   - Removal of the large memory model which resulted in suboptimal
     codegen by the compiler and increased the use of literal pools,
     which could potentially be used as ROP gadgets since they are
     mapped as executable

   - Rework of forced signal delivery so that the siginfo_t is
     well-formed and handling of show_unhandled_signals is consolidated
     and made consistent between different fault types

   - More siginfo cleanup based on the initial patches from Eric
     Biederman

   - Workaround for Cortex-A55 erratum #1024718

   - Some small ACPI IORT updates and cleanups from Lorenzo Pieralisi

   - Misc cleanups and non-critical fixes"

* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (70 commits)
  arm64: uaccess: Fix omissions from usercopy whitelist
  arm64: fpsimd: Split cpu field out from struct fpsimd_state
  arm64: tlbflush: avoid writing RES0 bits
  arm64: cmpxchg: Include linux/compiler.h in asm/cmpxchg.h
  arm64: move percpu cmpxchg implementation from cmpxchg.h to percpu.h
  arm64: cmpxchg: Include build_bug.h instead of bug.h for BUILD_BUG
  arm64: lse: Include compiler_types.h and export.h for out-of-line LL/SC
  arm64: fpsimd: include <linux/init.h> in fpsimd.h
  drivers/perf: arm_pmu_platform: do not warn about affinity on uniprocessor
  perf: arm_spe: include linux/vmalloc.h for vmap()
  Revert "arm64: Revert L1_CACHE_SHIFT back to 6 (64-byte cache line size)"
  arm64: cpufeature: Avoid warnings due to unused symbols
  arm64: Add work around for Arm Cortex-A55 Erratum 1024718
  arm64: Delay enabling hardware DBM feature
  arm64: Add MIDR encoding for Arm Cortex-A55 and Cortex-A35
  arm64: capabilities: Handle shared entries
  arm64: capabilities: Add support for checks based on a list of MIDRs
  arm64: Add helpers for checking CPU MIDR against a range
  arm64: capabilities: Clean up midr range helpers
  arm64: capabilities: Change scope of VHE to Boot CPU feature
  ...
parents 5b1f3dc9 65896545
...@@ -110,7 +110,7 @@ infrastructure: ...@@ -110,7 +110,7 @@ infrastructure:
x--------------------------------------------------x x--------------------------------------------------x
| Name | bits | visible | | Name | bits | visible |
|--------------------------------------------------| |--------------------------------------------------|
| RES0 | [63-52] | n | | TS | [55-52] | y |
|--------------------------------------------------| |--------------------------------------------------|
| FHM | [51-48] | y | | FHM | [51-48] | y |
|--------------------------------------------------| |--------------------------------------------------|
...@@ -124,8 +124,6 @@ infrastructure: ...@@ -124,8 +124,6 @@ infrastructure:
|--------------------------------------------------| |--------------------------------------------------|
| RDM | [31-28] | y | | RDM | [31-28] | y |
|--------------------------------------------------| |--------------------------------------------------|
| RES0 | [27-24] | n |
|--------------------------------------------------|
| ATOMICS | [23-20] | y | | ATOMICS | [23-20] | y |
|--------------------------------------------------| |--------------------------------------------------|
| CRC32 | [19-16] | y | | CRC32 | [19-16] | y |
...@@ -135,8 +133,6 @@ infrastructure: ...@@ -135,8 +133,6 @@ infrastructure:
| SHA1 | [11-8] | y | | SHA1 | [11-8] | y |
|--------------------------------------------------| |--------------------------------------------------|
| AES | [7-4] | y | | AES | [7-4] | y |
|--------------------------------------------------|
| RES0 | [3-0] | n |
x--------------------------------------------------x x--------------------------------------------------x
...@@ -144,12 +140,10 @@ infrastructure: ...@@ -144,12 +140,10 @@ infrastructure:
x--------------------------------------------------x x--------------------------------------------------x
| Name | bits | visible | | Name | bits | visible |
|--------------------------------------------------| |--------------------------------------------------|
| RES0 | [63-36] | n | | DIT | [51-48] | y |
|--------------------------------------------------| |--------------------------------------------------|
| SVE | [35-32] | y | | SVE | [35-32] | y |
|--------------------------------------------------| |--------------------------------------------------|
| RES0 | [31-28] | n |
|--------------------------------------------------|
| GIC | [27-24] | n | | GIC | [27-24] | n |
|--------------------------------------------------| |--------------------------------------------------|
| AdvSIMD | [23-20] | y | | AdvSIMD | [23-20] | y |
...@@ -199,6 +193,14 @@ infrastructure: ...@@ -199,6 +193,14 @@ infrastructure:
| DPB | [3-0] | y | | DPB | [3-0] | y |
x--------------------------------------------------x x--------------------------------------------------x
5) ID_AA64MMFR2_EL1 - Memory model feature register 2
x--------------------------------------------------x
| Name | bits | visible |
|--------------------------------------------------|
| AT | [35-32] | y |
x--------------------------------------------------x
Appendix I: Example Appendix I: Example
--------------------------- ---------------------------
......
...@@ -162,3 +162,19 @@ HWCAP_SVE ...@@ -162,3 +162,19 @@ HWCAP_SVE
HWCAP_ASIMDFHM HWCAP_ASIMDFHM
Functionality implied by ID_AA64ISAR0_EL1.FHM == 0b0001. Functionality implied by ID_AA64ISAR0_EL1.FHM == 0b0001.
HWCAP_DIT
Functionality implied by ID_AA64PFR0_EL1.DIT == 0b0001.
HWCAP_USCAT
Functionality implied by ID_AA64MMFR2_EL1.AT == 0b0001.
HWCAP_ILRCPC
Functionality implied by ID_AA64ISR1_EL1.LRCPC == 0b0002.
HWCAP_FLAGM
Functionality implied by ID_AA64ISAR0_EL1.TS == 0b0001.
...@@ -55,6 +55,7 @@ stable kernels. ...@@ -55,6 +55,7 @@ stable kernels.
| ARM | Cortex-A57 | #834220 | ARM64_ERRATUM_834220 | | ARM | Cortex-A57 | #834220 | ARM64_ERRATUM_834220 |
| ARM | Cortex-A72 | #853709 | N/A | | ARM | Cortex-A72 | #853709 | N/A |
| ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 | | ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 |
| ARM | Cortex-A55 | #1024718 | ARM64_ERRATUM_1024718 |
| ARM | MMU-500 | #841119,#826419 | N/A | | ARM | MMU-500 | #841119,#826419 | N/A |
| | | | | | | | | |
| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 | | Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
......
...@@ -459,12 +459,26 @@ config ARM64_ERRATUM_845719 ...@@ -459,12 +459,26 @@ config ARM64_ERRATUM_845719
config ARM64_ERRATUM_843419 config ARM64_ERRATUM_843419
bool "Cortex-A53: 843419: A load or store might access an incorrect address" bool "Cortex-A53: 843419: A load or store might access an incorrect address"
default y default y
select ARM64_MODULE_CMODEL_LARGE if MODULES select ARM64_MODULE_PLTS if MODULES
help help
This option links the kernel with '--fix-cortex-a53-843419' and This option links the kernel with '--fix-cortex-a53-843419' and
builds modules using the large memory model in order to avoid the use enables PLT support to replace certain ADRP instructions, which can
of the ADRP instruction, which can cause a subsequent memory access cause subsequent memory accesses to use an incorrect address on
to use an incorrect address on Cortex-A53 parts up to r0p4. Cortex-A53 parts up to r0p4.
If unsure, say Y.
config ARM64_ERRATUM_1024718
bool "Cortex-A55: 1024718: Update of DBM/AP bits without break before make might result in incorrect update"
default y
help
This option adds work around for Arm Cortex-A55 Erratum 1024718.
Affected Cortex-A55 cores (r0p0, r0p1, r1p0) could cause incorrect
update of the hardware dirty bit when the DBM/AP bits are updated
without a break-before-make. The work around is to disable the usage
of hardware DBM locally on the affected cores. CPUs not affected by
erratum will continue to use the feature.
If unsure, say Y. If unsure, say Y.
...@@ -1108,12 +1122,25 @@ config ARM64_SVE ...@@ -1108,12 +1122,25 @@ config ARM64_SVE
To enable use of this extension on CPUs that implement it, say Y. To enable use of this extension on CPUs that implement it, say Y.
config ARM64_MODULE_CMODEL_LARGE Note that for architectural reasons, firmware _must_ implement SVE
bool support when running on SVE capable hardware. The required support
is present in:
* version 1.5 and later of the ARM Trusted Firmware
* the AArch64 boot wrapper since commit 5e1261e08abf
("bootwrapper: SVE: Enable SVE for EL2 and below").
For other firmware implementations, consult the firmware documentation
or vendor.
If you need the kernel to boot on SVE-capable hardware with broken
firmware, you may need to say N here until you get your firmware
fixed. Otherwise, you may experience firmware panics or lockups when
booting the kernel. If unsure and you are not observing these
symptoms, you should assume that it is safe to say Y.
config ARM64_MODULE_PLTS config ARM64_MODULE_PLTS
bool bool
select ARM64_MODULE_CMODEL_LARGE
select HAVE_MOD_ARCH_SPECIFIC select HAVE_MOD_ARCH_SPECIFIC
config RELOCATABLE config RELOCATABLE
...@@ -1147,12 +1174,12 @@ config RANDOMIZE_BASE ...@@ -1147,12 +1174,12 @@ config RANDOMIZE_BASE
If unsure, say N. If unsure, say N.
config RANDOMIZE_MODULE_REGION_FULL config RANDOMIZE_MODULE_REGION_FULL
bool "Randomize the module region independently from the core kernel" bool "Randomize the module region over a 4 GB range"
depends on RANDOMIZE_BASE depends on RANDOMIZE_BASE
default y default y
help help
Randomizes the location of the module region without considering the Randomizes the location of the module region inside a 4 GB window
location of the core kernel. This way, it is impossible for modules covering the core kernel. This way, it is less likely for modules
to leak information about the location of core kernel data structures to leak information about the location of core kernel data structures
but it does imply that function calls between modules and the core but it does imply that function calls between modules and the core
kernel will need to be resolved via veneers in the module PLT. kernel will need to be resolved via veneers in the module PLT.
......
...@@ -51,7 +51,6 @@ endif ...@@ -51,7 +51,6 @@ endif
KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr) $(brokengasinst) KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr) $(brokengasinst)
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
KBUILD_CFLAGS += $(call cc-option, -mpc-relative-literal-loads)
KBUILD_AFLAGS += $(lseinstr) $(brokengasinst) KBUILD_AFLAGS += $(lseinstr) $(brokengasinst)
KBUILD_CFLAGS += $(call cc-option,-mabi=lp64) KBUILD_CFLAGS += $(call cc-option,-mabi=lp64)
...@@ -77,10 +76,6 @@ endif ...@@ -77,10 +76,6 @@ endif
CHECKFLAGS += -D__aarch64__ -m64 CHECKFLAGS += -D__aarch64__ -m64
ifeq ($(CONFIG_ARM64_MODULE_CMODEL_LARGE), y)
KBUILD_CFLAGS_MODULE += -mcmodel=large
endif
ifeq ($(CONFIG_ARM64_MODULE_PLTS),y) ifeq ($(CONFIG_ARM64_MODULE_PLTS),y)
KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/arm64/kernel/module.lds KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/arm64/kernel/module.lds
endif endif
...@@ -97,12 +92,14 @@ else ...@@ -97,12 +92,14 @@ else
TEXT_OFFSET := 0x00080000 TEXT_OFFSET := 0x00080000
endif endif
# KASAN_SHADOW_OFFSET = VA_START + (1 << (VA_BITS - 3)) - (1 << 61) # KASAN_SHADOW_OFFSET = VA_START + (1 << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT))
# - (1 << (64 - KASAN_SHADOW_SCALE_SHIFT))
# in 32-bit arithmetic # in 32-bit arithmetic
KASAN_SHADOW_SCALE_SHIFT := 3
KASAN_SHADOW_OFFSET := $(shell printf "0x%08x00000000\n" $$(( \ KASAN_SHADOW_OFFSET := $(shell printf "0x%08x00000000\n" $$(( \
(0xffffffff & (-1 << ($(CONFIG_ARM64_VA_BITS) - 32))) \ (0xffffffff & (-1 << ($(CONFIG_ARM64_VA_BITS) - 32))) \
+ (1 << ($(CONFIG_ARM64_VA_BITS) - 32 - 3)) \ + (1 << ($(CONFIG_ARM64_VA_BITS) - 32 - $(KASAN_SHADOW_SCALE_SHIFT))) \
- (1 << (64 - 32 - 3)) )) ) - (1 << (64 - 32 - $(KASAN_SHADOW_SCALE_SHIFT))) )) )
export TEXT_OFFSET GZFLAGS export TEXT_OFFSET GZFLAGS
......
...@@ -202,25 +202,15 @@ lr .req x30 // link register ...@@ -202,25 +202,15 @@ lr .req x30 // link register
/* /*
* Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
* <symbol> is within the range +/- 4 GB of the PC when running * <symbol> is within the range +/- 4 GB of the PC.
* in core kernel context. In module context, a movz/movk sequence
* is used, since modules may be loaded far away from the kernel
* when KASLR is in effect.
*/ */
/* /*
* @dst: destination register (64 bit wide) * @dst: destination register (64 bit wide)
* @sym: name of the symbol * @sym: name of the symbol
*/ */
.macro adr_l, dst, sym .macro adr_l, dst, sym
#ifndef MODULE
adrp \dst, \sym adrp \dst, \sym
add \dst, \dst, :lo12:\sym add \dst, \dst, :lo12:\sym
#else
movz \dst, #:abs_g3:\sym
movk \dst, #:abs_g2_nc:\sym
movk \dst, #:abs_g1_nc:\sym
movk \dst, #:abs_g0_nc:\sym
#endif
.endm .endm
/* /*
...@@ -231,7 +221,6 @@ lr .req x30 // link register ...@@ -231,7 +221,6 @@ lr .req x30 // link register
* the address * the address
*/ */
.macro ldr_l, dst, sym, tmp= .macro ldr_l, dst, sym, tmp=
#ifndef MODULE
.ifb \tmp .ifb \tmp
adrp \dst, \sym adrp \dst, \sym
ldr \dst, [\dst, :lo12:\sym] ldr \dst, [\dst, :lo12:\sym]
...@@ -239,15 +228,6 @@ lr .req x30 // link register ...@@ -239,15 +228,6 @@ lr .req x30 // link register
adrp \tmp, \sym adrp \tmp, \sym
ldr \dst, [\tmp, :lo12:\sym] ldr \dst, [\tmp, :lo12:\sym]
.endif .endif
#else
.ifb \tmp
adr_l \dst, \sym
ldr \dst, [\dst]
.else
adr_l \tmp, \sym
ldr \dst, [\tmp]
.endif
#endif
.endm .endm
/* /*
...@@ -257,28 +237,18 @@ lr .req x30 // link register ...@@ -257,28 +237,18 @@ lr .req x30 // link register
* while <src> needs to be preserved. * while <src> needs to be preserved.
*/ */
.macro str_l, src, sym, tmp .macro str_l, src, sym, tmp
#ifndef MODULE
adrp \tmp, \sym adrp \tmp, \sym
str \src, [\tmp, :lo12:\sym] str \src, [\tmp, :lo12:\sym]
#else
adr_l \tmp, \sym
str \src, [\tmp]
#endif
.endm .endm
/* /*
* @dst: Result of per_cpu(sym, smp_processor_id()), can be SP for * @dst: Result of per_cpu(sym, smp_processor_id()) (can be SP)
* non-module code
* @sym: The name of the per-cpu variable * @sym: The name of the per-cpu variable
* @tmp: scratch register * @tmp: scratch register
*/ */
.macro adr_this_cpu, dst, sym, tmp .macro adr_this_cpu, dst, sym, tmp
#ifndef MODULE
adrp \tmp, \sym adrp \tmp, \sym
add \dst, \tmp, #:lo12:\sym add \dst, \tmp, #:lo12:\sym
#else
adr_l \dst, \sym
#endif
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
mrs \tmp, tpidr_el1 mrs \tmp, tpidr_el1
alternative_else alternative_else
......
...@@ -20,8 +20,12 @@ ...@@ -20,8 +20,12 @@
#define CTR_L1IP_SHIFT 14 #define CTR_L1IP_SHIFT 14
#define CTR_L1IP_MASK 3 #define CTR_L1IP_MASK 3
#define CTR_DMINLINE_SHIFT 16
#define CTR_ERG_SHIFT 20
#define CTR_CWG_SHIFT 24 #define CTR_CWG_SHIFT 24
#define CTR_CWG_MASK 15 #define CTR_CWG_MASK 15
#define CTR_IDC_SHIFT 28
#define CTR_DIC_SHIFT 29
#define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK) #define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK)
......
...@@ -133,6 +133,9 @@ extern void flush_dcache_page(struct page *); ...@@ -133,6 +133,9 @@ extern void flush_dcache_page(struct page *);
static inline void __flush_icache_all(void) static inline void __flush_icache_all(void)
{ {
if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
return;
asm("ic ialluis"); asm("ic ialluis");
dsb(ish); dsb(ish);
} }
......
...@@ -18,7 +18,8 @@ ...@@ -18,7 +18,8 @@
#ifndef __ASM_CMPXCHG_H #ifndef __ASM_CMPXCHG_H
#define __ASM_CMPXCHG_H #define __ASM_CMPXCHG_H
#include <linux/bug.h> #include <linux/build_bug.h>
#include <linux/compiler.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/barrier.h> #include <asm/barrier.h>
...@@ -196,32 +197,6 @@ __CMPXCHG_GEN(_mb) ...@@ -196,32 +197,6 @@ __CMPXCHG_GEN(_mb)
__ret; \ __ret; \
}) })
/* this_cpu_cmpxchg */
#define _protect_cmpxchg_local(pcp, o, n) \
({ \
typeof(*raw_cpu_ptr(&(pcp))) __ret; \
preempt_disable(); \
__ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \
preempt_enable(); \
__ret; \
})
#define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
#define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
#define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
#define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \
({ \
int __ret; \
preempt_disable(); \
__ret = cmpxchg_double_local( raw_cpu_ptr(&(ptr1)), \
raw_cpu_ptr(&(ptr2)), \
o1, o2, n1, n2); \
preempt_enable(); \
__ret; \
})
#define __CMPWAIT_CASE(w, sz, name) \ #define __CMPWAIT_CASE(w, sz, name) \
static inline void __cmpwait_case_##name(volatile void *ptr, \ static inline void __cmpwait_case_##name(volatile void *ptr, \
unsigned long val) \ unsigned long val) \
......
...@@ -45,7 +45,11 @@ ...@@ -45,7 +45,11 @@
#define ARM64_HARDEN_BRANCH_PREDICTOR 24 #define ARM64_HARDEN_BRANCH_PREDICTOR 24
#define ARM64_HARDEN_BP_POST_GUEST_EXIT 25 #define ARM64_HARDEN_BP_POST_GUEST_EXIT 25
#define ARM64_HAS_RAS_EXTN 26 #define ARM64_HAS_RAS_EXTN 26
#define ARM64_WORKAROUND_843419 27
#define ARM64_HAS_CACHE_IDC 28
#define ARM64_HAS_CACHE_DIC 29
#define ARM64_HW_DBM 30
#define ARM64_NCAPS 27 #define ARM64_NCAPS 31
#endif /* __ASM_CPUCAPS_H */ #endif /* __ASM_CPUCAPS_H */
This diff is collapsed.
...@@ -83,6 +83,8 @@ ...@@ -83,6 +83,8 @@
#define ARM_CPU_PART_CORTEX_A53 0xD03 #define ARM_CPU_PART_CORTEX_A53 0xD03
#define ARM_CPU_PART_CORTEX_A73 0xD09 #define ARM_CPU_PART_CORTEX_A73 0xD09
#define ARM_CPU_PART_CORTEX_A75 0xD0A #define ARM_CPU_PART_CORTEX_A75 0xD0A
#define ARM_CPU_PART_CORTEX_A35 0xD04
#define ARM_CPU_PART_CORTEX_A55 0xD05
#define APM_CPU_PART_POTENZA 0x000 #define APM_CPU_PART_POTENZA 0x000
...@@ -102,6 +104,8 @@ ...@@ -102,6 +104,8 @@
#define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72) #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
#define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73) #define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73)
#define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75) #define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75)
#define MIDR_CORTEX_A35 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A35)
#define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX) #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
...@@ -117,6 +121,45 @@ ...@@ -117,6 +121,45 @@
#define read_cpuid(reg) read_sysreg_s(SYS_ ## reg) #define read_cpuid(reg) read_sysreg_s(SYS_ ## reg)
/*
* Represent a range of MIDR values for a given CPU model and a
* range of variant/revision values.
*
* @model - CPU model as defined by MIDR_CPU_MODEL
* @rv_min - Minimum value for the revision/variant as defined by
* MIDR_CPU_VAR_REV
* @rv_max - Maximum value for the variant/revision for the range.
*/
struct midr_range {
u32 model;
u32 rv_min;
u32 rv_max;
};
#define MIDR_RANGE(m, v_min, r_min, v_max, r_max) \
{ \
.model = m, \
.rv_min = MIDR_CPU_VAR_REV(v_min, r_min), \
.rv_max = MIDR_CPU_VAR_REV(v_max, r_max), \
}
#define MIDR_ALL_VERSIONS(m) MIDR_RANGE(m, 0, 0, 0xf, 0xf)
static inline bool is_midr_in_range(u32 midr, struct midr_range const *range)
{
return MIDR_IS_CPU_MODEL_RANGE(midr, range->model,
range->rv_min, range->rv_max);
}
static inline bool
is_midr_in_range_list(u32 midr, struct midr_range const *ranges)
{
while (ranges->model)
if (is_midr_in_range(midr, ranges++))
return true;
return false;
}
/* /*
* The CPU ID never changes at run time, so we might as well tell the * The CPU ID never changes at run time, so we might as well tell the
* compiler that it's constant. Use this function to read the CPU ID * compiler that it's constant. Use this function to read the CPU ID
......
...@@ -240,6 +240,15 @@ ...@@ -240,6 +240,15 @@
(((e) & ESR_ELx_SYS64_ISS_OP2_MASK) >> \ (((e) & ESR_ELx_SYS64_ISS_OP2_MASK) >> \
ESR_ELx_SYS64_ISS_OP2_SHIFT)) ESR_ELx_SYS64_ISS_OP2_SHIFT))
/*
* ISS field definitions for floating-point exception traps
* (FP_EXC_32/FP_EXC_64).
*
* (The FPEXC_* constants are used instead for common bits.)
*/
#define ESR_ELx_FP_EXC_TFV (UL(1) << 23)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <asm/types.h> #include <asm/types.h>
......
...@@ -22,33 +22,9 @@ ...@@ -22,33 +22,9 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/init.h>
#include <linux/stddef.h> #include <linux/stddef.h>
/*
* FP/SIMD storage area has:
* - FPSR and FPCR
* - 32 128-bit data registers
*
* Note that user_fpsimd forms a prefix of this structure, which is
* relied upon in the ptrace FP/SIMD accessors.
*/
struct fpsimd_state {
union {
struct user_fpsimd_state user_fpsimd;
struct {
__uint128_t vregs[32];
u32 fpsr;
u32 fpcr;
/*
* For ptrace compatibility, pad to next 128-bit
* boundary here if extending this struct.
*/
};
};
/* the id of the last cpu to have restored this state */
unsigned int cpu;
};
#if defined(__KERNEL__) && defined(CONFIG_COMPAT) #if defined(__KERNEL__) && defined(CONFIG_COMPAT)
/* Masks for extracting the FPSR and FPCR from the FPSCR */ /* Masks for extracting the FPSR and FPCR from the FPSCR */
#define VFP_FPSCR_STAT_MASK 0xf800009f #define VFP_FPSCR_STAT_MASK 0xf800009f
...@@ -62,8 +38,8 @@ struct fpsimd_state { ...@@ -62,8 +38,8 @@ struct fpsimd_state {
struct task_struct; struct task_struct;
extern void fpsimd_save_state(struct fpsimd_state *state); extern void fpsimd_save_state(struct user_fpsimd_state *state);
extern void fpsimd_load_state(struct fpsimd_state *state); extern void fpsimd_load_state(struct user_fpsimd_state *state);
extern void fpsimd_thread_switch(struct task_struct *next); extern void fpsimd_thread_switch(struct task_struct *next);
extern void fpsimd_flush_thread(void); extern void fpsimd_flush_thread(void);
...@@ -83,7 +59,9 @@ extern void sve_save_state(void *state, u32 *pfpsr); ...@@ -83,7 +59,9 @@ extern void sve_save_state(void *state, u32 *pfpsr);
extern void sve_load_state(void const *state, u32 const *pfpsr, extern void sve_load_state(void const *state, u32 const *pfpsr,
unsigned long vq_minus_1); unsigned long vq_minus_1);
extern unsigned int sve_get_vl(void); extern unsigned int sve_get_vl(void);
extern int sve_kernel_enable(void *);
struct arm64_cpu_capabilities;
extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused);
extern int __ro_after_init sve_max_vl; extern int __ro_after_init sve_max_vl;
......
...@@ -4,8 +4,11 @@ ...@@ -4,8 +4,11 @@
#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS) #if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
#include <linux/compiler_types.h>
#include <linux/export.h>
#include <linux/stringify.h> #include <linux/stringify.h>
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/cpucaps.h>
#ifdef __ASSEMBLER__ #ifdef __ASSEMBLER__
......
...@@ -39,6 +39,8 @@ struct mod_arch_specific { ...@@ -39,6 +39,8 @@ struct mod_arch_specific {
u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela, u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela,
Elf64_Sym *sym); Elf64_Sym *sym);
u64 module_emit_adrp_veneer(struct module *mod, void *loc, u64 val);
#ifdef CONFIG_RANDOMIZE_BASE #ifdef CONFIG_RANDOMIZE_BASE
extern u64 module_alloc_base; extern u64 module_alloc_base;
#else #else
......
...@@ -16,7 +16,10 @@ ...@@ -16,7 +16,10 @@
#ifndef __ASM_PERCPU_H #ifndef __ASM_PERCPU_H
#define __ASM_PERCPU_H #define __ASM_PERCPU_H
#include <linux/preempt.h>
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/cmpxchg.h>
#include <asm/stack_pointer.h> #include <asm/stack_pointer.h>
static inline void set_my_cpu_offset(unsigned long off) static inline void set_my_cpu_offset(unsigned long off)
...@@ -197,6 +200,32 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val, ...@@ -197,6 +200,32 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
return ret; return ret;
} }
/* this_cpu_cmpxchg */
#define _protect_cmpxchg_local(pcp, o, n) \
({ \
typeof(*raw_cpu_ptr(&(pcp))) __ret; \
preempt_disable(); \
__ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \
preempt_enable(); \
__ret; \
})
#define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
#define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
#define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
#define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \
({ \
int __ret; \
preempt_disable(); \
__ret = cmpxchg_double_local( raw_cpu_ptr(&(ptr1)), \
raw_cpu_ptr(&(ptr2)), \
o1, o2, n1, n2); \
preempt_enable(); \
__ret; \
})
#define _percpu_read(pcp) \ #define _percpu_read(pcp) \
({ \ ({ \
typeof(pcp) __retval; \ typeof(pcp) __retval; \
......
...@@ -291,6 +291,7 @@ ...@@ -291,6 +291,7 @@
#define TCR_TBI0 (UL(1) << 37) #define TCR_TBI0 (UL(1) << 37)
#define TCR_HA (UL(1) << 39) #define TCR_HA (UL(1) << 39)
#define TCR_HD (UL(1) << 40) #define TCR_HD (UL(1) << 40)
#define TCR_NFD1 (UL(1) << 54)
/* /*
* TTBR. * TTBR.
......
...@@ -34,10 +34,12 @@ ...@@ -34,10 +34,12 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/build_bug.h>
#include <linux/stddef.h>
#include <linux/string.h> #include <linux/string.h>
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/fpsimd.h> #include <asm/cpufeature.h>
#include <asm/hw_breakpoint.h> #include <asm/hw_breakpoint.h>
#include <asm/lse.h> #include <asm/lse.h>
#include <asm/pgtable-hwdef.h> #include <asm/pgtable-hwdef.h>
...@@ -103,11 +105,19 @@ struct cpu_context { ...@@ -103,11 +105,19 @@ struct cpu_context {
struct thread_struct { struct thread_struct {
struct cpu_context cpu_context; /* cpu context */ struct cpu_context cpu_context; /* cpu context */
/*
* Whitelisted fields for hardened usercopy:
* Maintainers must ensure manually that this contains no
* implicit padding.
*/
struct {
unsigned long tp_value; /* TLS register */ unsigned long tp_value; /* TLS register */
#ifdef CONFIG_COMPAT
unsigned long tp2_value; unsigned long tp2_value;
#endif struct user_fpsimd_state fpsimd_state;
struct fpsimd_state fpsimd_state; } uw;
unsigned int fpsimd_cpu;
void *sve_state; /* SVE registers, if any */ void *sve_state; /* SVE registers, if any */
unsigned int sve_vl; /* SVE vector length */ unsigned int sve_vl; /* SVE vector length */
unsigned int sve_vl_onexec; /* SVE vl after next exec */ unsigned int sve_vl_onexec; /* SVE vl after next exec */
...@@ -116,14 +126,17 @@ struct thread_struct { ...@@ -116,14 +126,17 @@ struct thread_struct {
struct debug_info debug; /* debugging */ struct debug_info debug; /* debugging */
}; };
/*
* Everything usercopied to/from thread_struct is statically-sized, so
* no hardened usercopy whitelist is needed.
*/
static inline void arch_thread_struct_whitelist(unsigned long *offset, static inline void arch_thread_struct_whitelist(unsigned long *offset,
unsigned long *size) unsigned long *size)
{ {
*offset = *size = 0; /* Verify that there is no padding among the whitelisted fields: */
BUILD_BUG_ON(sizeof_field(struct thread_struct, uw) !=
sizeof_field(struct thread_struct, uw.tp_value) +
sizeof_field(struct thread_struct, uw.tp2_value) +
sizeof_field(struct thread_struct, uw.fpsimd_state));
*offset = offsetof(struct thread_struct, uw);
*size = sizeof_field(struct thread_struct, uw);
} }
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
...@@ -131,13 +144,13 @@ static inline void arch_thread_struct_whitelist(unsigned long *offset, ...@@ -131,13 +144,13 @@ static inline void arch_thread_struct_whitelist(unsigned long *offset,
({ \ ({ \
unsigned long *__tls; \ unsigned long *__tls; \
if (is_compat_thread(task_thread_info(t))) \ if (is_compat_thread(task_thread_info(t))) \
__tls = &(t)->thread.tp2_value; \ __tls = &(t)->thread.uw.tp2_value; \
else \ else \
__tls = &(t)->thread.tp_value; \ __tls = &(t)->thread.uw.tp_value; \
__tls; \ __tls; \
}) })
#else #else
#define task_user_tls(t) (&(t)->thread.tp_value) #define task_user_tls(t) (&(t)->thread.uw.tp_value)
#endif #endif
/* Sync TPIDR_EL0 back to thread_struct for current */ /* Sync TPIDR_EL0 back to thread_struct for current */
...@@ -227,9 +240,9 @@ static inline void spin_lock_prefetch(const void *ptr) ...@@ -227,9 +240,9 @@ static inline void spin_lock_prefetch(const void *ptr)
#endif #endif
int cpu_enable_pan(void *__unused); void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused);
int cpu_enable_cache_maint_trap(void *__unused); void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused);
int cpu_clear_disr(void *__unused); void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused);
/* Userspace interface for PR_SVE_{SET,GET}_VL prctl()s: */ /* Userspace interface for PR_SVE_{SET,GET}_VL prctl()s: */
#define SVE_SET_VL(arg) sve_set_current_vl(arg) #define SVE_SET_VL(arg) sve_set_current_vl(arg)
......
...@@ -490,6 +490,7 @@ ...@@ -490,6 +490,7 @@
#define SCTLR_EL1_BUILD_BUG_ON_MISSING_BITS BUILD_BUG_ON((SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != ~0) #define SCTLR_EL1_BUILD_BUG_ON_MISSING_BITS BUILD_BUG_ON((SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != ~0)
/* id_aa64isar0 */ /* id_aa64isar0 */
#define ID_AA64ISAR0_TS_SHIFT 52
#define ID_AA64ISAR0_FHM_SHIFT 48 #define ID_AA64ISAR0_FHM_SHIFT 48
#define ID_AA64ISAR0_DP_SHIFT 44 #define ID_AA64ISAR0_DP_SHIFT 44
#define ID_AA64ISAR0_SM4_SHIFT 40 #define ID_AA64ISAR0_SM4_SHIFT 40
...@@ -511,6 +512,7 @@ ...@@ -511,6 +512,7 @@
/* id_aa64pfr0 */ /* id_aa64pfr0 */
#define ID_AA64PFR0_CSV3_SHIFT 60 #define ID_AA64PFR0_CSV3_SHIFT 60
#define ID_AA64PFR0_CSV2_SHIFT 56 #define ID_AA64PFR0_CSV2_SHIFT 56
#define ID_AA64PFR0_DIT_SHIFT 48
#define ID_AA64PFR0_SVE_SHIFT 32 #define ID_AA64PFR0_SVE_SHIFT 32
#define ID_AA64PFR0_RAS_SHIFT 28 #define ID_AA64PFR0_RAS_SHIFT 28
#define ID_AA64PFR0_GIC_SHIFT 24 #define ID_AA64PFR0_GIC_SHIFT 24
...@@ -568,6 +570,7 @@ ...@@ -568,6 +570,7 @@
#define ID_AA64MMFR1_VMIDBITS_16 2 #define ID_AA64MMFR1_VMIDBITS_16 2
/* id_aa64mmfr2 */ /* id_aa64mmfr2 */
#define ID_AA64MMFR2_AT_SHIFT 32
#define ID_AA64MMFR2_LVA_SHIFT 16 #define ID_AA64MMFR2_LVA_SHIFT 16
#define ID_AA64MMFR2_IESB_SHIFT 12 #define ID_AA64MMFR2_IESB_SHIFT 12
#define ID_AA64MMFR2_LSM_SHIFT 8 #define ID_AA64MMFR2_LSM_SHIFT 8
......
...@@ -45,17 +45,6 @@ extern void __show_regs(struct pt_regs *); ...@@ -45,17 +45,6 @@ extern void __show_regs(struct pt_regs *);
extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
#define show_unhandled_signals_ratelimited() \
({ \
static DEFINE_RATELIMIT_STATE(_rs, \
DEFAULT_RATELIMIT_INTERVAL, \
DEFAULT_RATELIMIT_BURST); \
bool __show_ratelimited = false; \
if (show_unhandled_signals && __ratelimit(&_rs)) \
__show_ratelimited = true; \
__show_ratelimited; \
})
int handle_guest_sea(phys_addr_t addr, unsigned int esr); int handle_guest_sea(phys_addr_t addr, unsigned int esr);
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
...@@ -60,6 +60,15 @@ ...@@ -60,6 +60,15 @@
__tlbi(op, (arg) | USER_ASID_FLAG); \ __tlbi(op, (arg) | USER_ASID_FLAG); \
} while (0) } while (0)
/* This macro creates a properly formatted VA operand for the TLBI */
#define __TLBI_VADDR(addr, asid) \
({ \
unsigned long __ta = (addr) >> 12; \
__ta &= GENMASK_ULL(43, 0); \
__ta |= (unsigned long)(asid) << 48; \
__ta; \
})
/* /*
* TLB Management * TLB Management
* ============== * ==============
...@@ -117,7 +126,7 @@ static inline void flush_tlb_all(void) ...@@ -117,7 +126,7 @@ static inline void flush_tlb_all(void)
static inline void flush_tlb_mm(struct mm_struct *mm) static inline void flush_tlb_mm(struct mm_struct *mm)
{ {
unsigned long asid = ASID(mm) << 48; unsigned long asid = __TLBI_VADDR(0, ASID(mm));
dsb(ishst); dsb(ishst);
__tlbi(aside1is, asid); __tlbi(aside1is, asid);
...@@ -128,7 +137,7 @@ static inline void flush_tlb_mm(struct mm_struct *mm) ...@@ -128,7 +137,7 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
static inline void flush_tlb_page(struct vm_area_struct *vma, static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long uaddr) unsigned long uaddr)
{ {
unsigned long addr = uaddr >> 12 | (ASID(vma->vm_mm) << 48); unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm));
dsb(ishst); dsb(ishst);
__tlbi(vale1is, addr); __tlbi(vale1is, addr);
...@@ -146,7 +155,7 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma, ...@@ -146,7 +155,7 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end, unsigned long start, unsigned long end,
bool last_level) bool last_level)
{ {
unsigned long asid = ASID(vma->vm_mm) << 48; unsigned long asid = ASID(vma->vm_mm);
unsigned long addr; unsigned long addr;
if ((end - start) > MAX_TLB_RANGE) { if ((end - start) > MAX_TLB_RANGE) {
...@@ -154,8 +163,8 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma, ...@@ -154,8 +163,8 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
return; return;
} }
start = asid | (start >> 12); start = __TLBI_VADDR(start, asid);
end = asid | (end >> 12); end = __TLBI_VADDR(end, asid);
dsb(ishst); dsb(ishst);
for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) { for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
...@@ -185,8 +194,8 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end ...@@ -185,8 +194,8 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
return; return;
} }
start >>= 12; start = __TLBI_VADDR(start, 0);
end >>= 12; end = __TLBI_VADDR(end, 0);
dsb(ishst); dsb(ishst);
for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
...@@ -202,7 +211,7 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end ...@@ -202,7 +211,7 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
static inline void __flush_tlb_pgtable(struct mm_struct *mm, static inline void __flush_tlb_pgtable(struct mm_struct *mm,
unsigned long uaddr) unsigned long uaddr)
{ {
unsigned long addr = uaddr >> 12 | (ASID(mm) << 48); unsigned long addr = __TLBI_VADDR(uaddr, ASID(mm));
__tlbi(vae1is, addr); __tlbi(vae1is, addr);
__tlbi_user(vae1is, addr); __tlbi_user(vae1is, addr);
......
...@@ -35,10 +35,10 @@ struct undef_hook { ...@@ -35,10 +35,10 @@ struct undef_hook {
void register_undef_hook(struct undef_hook *hook); void register_undef_hook(struct undef_hook *hook);
void unregister_undef_hook(struct undef_hook *hook); void unregister_undef_hook(struct undef_hook *hook);
void force_signal_inject(int signal, int code, struct pt_regs *regs, void force_signal_inject(int signal, int code, unsigned long address);
unsigned long address); void arm64_notify_segfault(unsigned long addr);
void arm64_force_sig_info(struct siginfo *info, const char *str,
void arm64_notify_segfault(struct pt_regs *regs, unsigned long addr); struct task_struct *tsk);
/* /*
* Move regs->pc to next instruction and do necessary setup before it * Move regs->pc to next instruction and do necessary setup before it
......
...@@ -102,12 +102,6 @@ static inline bool has_vhe(void) ...@@ -102,12 +102,6 @@ static inline bool has_vhe(void)
return false; return false;
} }
#ifdef CONFIG_ARM64_VHE
extern void verify_cpu_run_el(void);
#else
static inline void verify_cpu_run_el(void) {}
#endif
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* ! __ASM__VIRT_H */ #endif /* ! __ASM__VIRT_H */
...@@ -44,5 +44,9 @@ ...@@ -44,5 +44,9 @@
#define HWCAP_SHA512 (1 << 21) #define HWCAP_SHA512 (1 << 21)
#define HWCAP_SVE (1 << 22) #define HWCAP_SVE (1 << 22)
#define HWCAP_ASIMDFHM (1 << 23) #define HWCAP_ASIMDFHM (1 << 23)
#define HWCAP_DIT (1 << 24)
#define HWCAP_USCAT (1 << 25)
#define HWCAP_ILRCPC (1 << 26)
#define HWCAP_FLAGM (1 << 27)
#endif /* _UAPI__ASM_HWCAP_H */ #endif /* _UAPI__ASM_HWCAP_H */
...@@ -21,25 +21,4 @@ ...@@ -21,25 +21,4 @@
#include <asm-generic/siginfo.h> #include <asm-generic/siginfo.h>
/*
* SIGFPE si_codes
*/
#ifdef __KERNEL__
#define FPE_FIXME 0 /* Broken dup of SI_USER */
#endif /* __KERNEL__ */
/*
* SIGBUS si_codes
*/
#ifdef __KERNEL__
#define BUS_FIXME 0 /* Broken dup of SI_USER */
#endif /* __KERNEL__ */
/*
* SIGTRAP si_codes
*/
#ifdef __KERNEL__
#define TRAP_FIXME 0 /* Broken dup of SI_USER */
#endif /* __KERNEL__ */
#endif #endif
...@@ -429,7 +429,7 @@ static int swp_handler(struct pt_regs *regs, u32 instr) ...@@ -429,7 +429,7 @@ static int swp_handler(struct pt_regs *regs, u32 instr)
fault: fault:
pr_debug("SWP{B} emulation: access caused memory abort!\n"); pr_debug("SWP{B} emulation: access caused memory abort!\n");
arm64_notify_segfault(regs, address); arm64_notify_segfault(address);
return 0; return 0;
} }
......
This diff is collapsed.
This diff is collapsed.
...@@ -77,6 +77,10 @@ static const char *const hwcap_str[] = { ...@@ -77,6 +77,10 @@ static const char *const hwcap_str[] = {
"sha512", "sha512",
"sve", "sve",
"asimdfhm", "asimdfhm",
"dit",
"uscat",
"ilrcpc",
"flagm",
NULL NULL
}; };
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <asm/daifflags.h> #include <asm/daifflags.h>
#include <asm/debug-monitors.h> #include <asm/debug-monitors.h>
#include <asm/system_misc.h> #include <asm/system_misc.h>
#include <asm/traps.h>
/* Determine debug architecture. */ /* Determine debug architecture. */
u8 debug_monitors_arch(void) u8 debug_monitors_arch(void)
...@@ -223,7 +224,7 @@ static void send_user_sigtrap(int si_code) ...@@ -223,7 +224,7 @@ static void send_user_sigtrap(int si_code)
if (interrupts_enabled(regs)) if (interrupts_enabled(regs))
local_irq_enable(); local_irq_enable();
force_sig_info(SIGTRAP, &info, current); arm64_force_sig_info(&info, "User debug trap", current);
} }
static int single_step_handler(unsigned long addr, unsigned int esr, static int single_step_handler(unsigned long addr, unsigned int esr,
......
This diff is collapsed.
...@@ -117,53 +117,42 @@ u64 __init kaslr_early_init(u64 dt_phys) ...@@ -117,53 +117,42 @@ u64 __init kaslr_early_init(u64 dt_phys)
/* /*
* OK, so we are proceeding with KASLR enabled. Calculate a suitable * OK, so we are proceeding with KASLR enabled. Calculate a suitable
* kernel image offset from the seed. Let's place the kernel in the * kernel image offset from the seed. Let's place the kernel in the
* lower half of the VMALLOC area (VA_BITS - 2). * middle half of the VMALLOC area (VA_BITS - 2), and stay clear of
* the lower and upper quarters to avoid colliding with other
* allocations.
* Even if we could randomize at page granularity for 16k and 64k pages, * Even if we could randomize at page granularity for 16k and 64k pages,
* let's always round to 2 MB so we don't interfere with the ability to * let's always round to 2 MB so we don't interfere with the ability to
* map using contiguous PTEs * map using contiguous PTEs
*/ */
mask = ((1UL << (VA_BITS - 2)) - 1) & ~(SZ_2M - 1); mask = ((1UL << (VA_BITS - 2)) - 1) & ~(SZ_2M - 1);
offset = seed & mask; offset = BIT(VA_BITS - 3) + (seed & mask);
/* use the top 16 bits to randomize the linear region */ /* use the top 16 bits to randomize the linear region */
memstart_offset_seed = seed >> 48; memstart_offset_seed = seed >> 48;
/*
* The kernel Image should not extend across a 1GB/32MB/512MB alignment
* boundary (for 4KB/16KB/64KB granule kernels, respectively). If this
* happens, round down the KASLR offset by (1 << SWAPPER_TABLE_SHIFT).
*
* NOTE: The references to _text and _end below will already take the
* modulo offset (the physical displacement modulo 2 MB) into
* account, given that the physical placement is controlled by
* the loader, and will not change as a result of the virtual
* mapping we choose.
*/
if ((((u64)_text + offset) >> SWAPPER_TABLE_SHIFT) !=
(((u64)_end + offset) >> SWAPPER_TABLE_SHIFT))
offset = round_down(offset, 1 << SWAPPER_TABLE_SHIFT);
if (IS_ENABLED(CONFIG_KASAN)) if (IS_ENABLED(CONFIG_KASAN))
/* /*
* KASAN does not expect the module region to intersect the * KASAN does not expect the module region to intersect the
* vmalloc region, since shadow memory is allocated for each * vmalloc region, since shadow memory is allocated for each
* module at load time, whereas the vmalloc region is shadowed * module at load time, whereas the vmalloc region is shadowed
* by KASAN zero pages. So keep modules out of the vmalloc * by KASAN zero pages. So keep modules out of the vmalloc
* region if KASAN is enabled. * region if KASAN is enabled, and put the kernel well within
* 4 GB of the module region.
*/ */
return offset; return offset % SZ_2G;
if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) { if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) {
/* /*
* Randomize the module region independently from the core * Randomize the module region over a 4 GB window covering the
* kernel. This prevents modules from leaking any information * kernel. This reduces the risk of modules leaking information
* about the address of the kernel itself, but results in * about the address of the kernel itself, but results in
* branches between modules and the core kernel that are * branches between modules and the core kernel that are
* resolved via PLTs. (Branches between modules will be * resolved via PLTs. (Branches between modules will be
* resolved normally.) * resolved normally.)
*/ */
module_range = VMALLOC_END - VMALLOC_START - MODULES_VSIZE; module_range = SZ_4G - (u64)(_end - _stext);
module_alloc_base = VMALLOC_START; module_alloc_base = max((u64)_end + offset - SZ_4G,
(u64)MODULES_VADDR);
} else { } else {
/* /*
* Randomize the module region by setting module_alloc_base to * Randomize the module region by setting module_alloc_base to
......
...@@ -138,14 +138,25 @@ int dbg_set_reg(int regno, void *mem, struct pt_regs *regs) ...@@ -138,14 +138,25 @@ int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
void void
sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task) sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
{ {
struct pt_regs *thread_regs; struct cpu_context *cpu_context = &task->thread.cpu_context;
/* Initialize to zero */ /* Initialize to zero */
memset((char *)gdb_regs, 0, NUMREGBYTES); memset((char *)gdb_regs, 0, NUMREGBYTES);
thread_regs = task_pt_regs(task);
memcpy((void *)gdb_regs, (void *)thread_regs->regs, GP_REG_BYTES); gdb_regs[19] = cpu_context->x19;
/* Special case for PSTATE (check comments in asm/kgdb.h for details) */ gdb_regs[20] = cpu_context->x20;
dbg_get_reg(33, gdb_regs + GP_REG_BYTES, thread_regs); gdb_regs[21] = cpu_context->x21;
gdb_regs[22] = cpu_context->x22;
gdb_regs[23] = cpu_context->x23;
gdb_regs[24] = cpu_context->x24;
gdb_regs[25] = cpu_context->x25;
gdb_regs[26] = cpu_context->x26;
gdb_regs[27] = cpu_context->x27;
gdb_regs[28] = cpu_context->x28;
gdb_regs[29] = cpu_context->fp;
gdb_regs[31] = cpu_context->sp;
gdb_regs[32] = cpu_context->pc;
} }
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
......
...@@ -36,10 +36,52 @@ u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela, ...@@ -36,10 +36,52 @@ u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela,
return (u64)&plt[i - 1]; return (u64)&plt[i - 1];
pltsec->plt_num_entries++; pltsec->plt_num_entries++;
BUG_ON(pltsec->plt_num_entries > pltsec->plt_max_entries); if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries))
return 0;
return (u64)&plt[i];
}
#ifdef CONFIG_ARM64_ERRATUM_843419
u64 module_emit_adrp_veneer(struct module *mod, void *loc, u64 val)
{
struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
&mod->arch.init;
struct plt_entry *plt = (struct plt_entry *)pltsec->plt->sh_addr;
int i = pltsec->plt_num_entries++;
u32 mov0, mov1, mov2, br;
int rd;
if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries))
return 0;
/* get the destination register of the ADRP instruction */
rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD,
le32_to_cpup((__le32 *)loc));
/* generate the veneer instructions */
mov0 = aarch64_insn_gen_movewide(rd, (u16)~val, 0,
AARCH64_INSN_VARIANT_64BIT,
AARCH64_INSN_MOVEWIDE_INVERSE);
mov1 = aarch64_insn_gen_movewide(rd, (u16)(val >> 16), 16,
AARCH64_INSN_VARIANT_64BIT,
AARCH64_INSN_MOVEWIDE_KEEP);
mov2 = aarch64_insn_gen_movewide(rd, (u16)(val >> 32), 32,
AARCH64_INSN_VARIANT_64BIT,
AARCH64_INSN_MOVEWIDE_KEEP);
br = aarch64_insn_gen_branch_imm((u64)&plt[i].br, (u64)loc + 4,
AARCH64_INSN_BRANCH_NOLINK);
plt[i] = (struct plt_entry){
cpu_to_le32(mov0),
cpu_to_le32(mov1),
cpu_to_le32(mov2),
cpu_to_le32(br)
};
return (u64)&plt[i]; return (u64)&plt[i];
} }
#endif
#define cmp_3way(a,b) ((a) < (b) ? -1 : (a) > (b)) #define cmp_3way(a,b) ((a) < (b) ? -1 : (a) > (b))
...@@ -68,16 +110,21 @@ static bool duplicate_rel(const Elf64_Rela *rela, int num) ...@@ -68,16 +110,21 @@ static bool duplicate_rel(const Elf64_Rela *rela, int num)
} }
static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num, static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num,
Elf64_Word dstidx) Elf64_Word dstidx, Elf_Shdr *dstsec)
{ {
unsigned int ret = 0; unsigned int ret = 0;
Elf64_Sym *s; Elf64_Sym *s;
int i; int i;
for (i = 0; i < num; i++) { for (i = 0; i < num; i++) {
u64 min_align;
switch (ELF64_R_TYPE(rela[i].r_info)) { switch (ELF64_R_TYPE(rela[i].r_info)) {
case R_AARCH64_JUMP26: case R_AARCH64_JUMP26:
case R_AARCH64_CALL26: case R_AARCH64_CALL26:
if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
break;
/* /*
* We only have to consider branch targets that resolve * We only have to consider branch targets that resolve
* to symbols that are defined in a different section. * to symbols that are defined in a different section.
...@@ -109,6 +156,41 @@ static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num, ...@@ -109,6 +156,41 @@ static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num,
if (rela[i].r_addend != 0 || !duplicate_rel(rela, i)) if (rela[i].r_addend != 0 || !duplicate_rel(rela, i))
ret++; ret++;
break; break;
case R_AARCH64_ADR_PREL_PG_HI21_NC:
case R_AARCH64_ADR_PREL_PG_HI21:
if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) ||
!cpus_have_const_cap(ARM64_WORKAROUND_843419))
break;
/*
* Determine the minimal safe alignment for this ADRP
* instruction: the section alignment at which it is
* guaranteed not to appear at a vulnerable offset.
*
* This comes down to finding the least significant zero
* bit in bits [11:3] of the section offset, and
* increasing the section's alignment so that the
* resulting address of this instruction is guaranteed
* to equal the offset in that particular bit (as well
* as all less signficant bits). This ensures that the
* address modulo 4 KB != 0xfff8 or 0xfffc (which would
* have all ones in bits [11:3])
*/
min_align = 2ULL << ffz(rela[i].r_offset | 0x7);
/*
* Allocate veneer space for each ADRP that may appear
* at a vulnerable offset nonetheless. At relocation
* time, some of these will remain unused since some
* ADRP instructions can be patched to ADR instructions
* instead.
*/
if (min_align > SZ_4K)
ret++;
else
dstsec->sh_addralign = max(dstsec->sh_addralign,
min_align);
break;
} }
} }
return ret; return ret;
...@@ -166,10 +248,10 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, ...@@ -166,10 +248,10 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
if (strncmp(secstrings + dstsec->sh_name, ".init", 5) != 0) if (strncmp(secstrings + dstsec->sh_name, ".init", 5) != 0)
core_plts += count_plts(syms, rels, numrels, core_plts += count_plts(syms, rels, numrels,
sechdrs[i].sh_info); sechdrs[i].sh_info, dstsec);
else else
init_plts += count_plts(syms, rels, numrels, init_plts += count_plts(syms, rels, numrels,
sechdrs[i].sh_info); sechdrs[i].sh_info, dstsec);
} }
mod->arch.core.plt->sh_type = SHT_NOBITS; mod->arch.core.plt->sh_type = SHT_NOBITS;
......
...@@ -55,9 +55,10 @@ void *module_alloc(unsigned long size) ...@@ -55,9 +55,10 @@ void *module_alloc(unsigned long size)
* less likely that the module region gets exhausted, so we * less likely that the module region gets exhausted, so we
* can simply omit this fallback in that case. * can simply omit this fallback in that case.
*/ */
p = __vmalloc_node_range(size, MODULE_ALIGN, VMALLOC_START, p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
VMALLOC_END, GFP_KERNEL, PAGE_KERNEL_EXEC, 0, module_alloc_base + SZ_4G, GFP_KERNEL,
NUMA_NO_NODE, __builtin_return_address(0)); PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
__builtin_return_address(0));
if (p && (kasan_module_alloc(p, size) < 0)) { if (p && (kasan_module_alloc(p, size) < 0)) {
vfree(p); vfree(p);
...@@ -197,6 +198,34 @@ static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val, ...@@ -197,6 +198,34 @@ static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val,
return 0; return 0;
} }
static int reloc_insn_adrp(struct module *mod, __le32 *place, u64 val)
{
u32 insn;
if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) ||
!cpus_have_const_cap(ARM64_WORKAROUND_843419) ||
((u64)place & 0xfff) < 0xff8)
return reloc_insn_imm(RELOC_OP_PAGE, place, val, 12, 21,
AARCH64_INSN_IMM_ADR);
/* patch ADRP to ADR if it is in range */
if (!reloc_insn_imm(RELOC_OP_PREL, place, val & ~0xfff, 0, 21,
AARCH64_INSN_IMM_ADR)) {
insn = le32_to_cpu(*place);
insn &= ~BIT(31);
} else {
/* out of range for ADR -> emit a veneer */
val = module_emit_adrp_veneer(mod, place, val & ~0xfff);
if (!val)
return -ENOEXEC;
insn = aarch64_insn_gen_branch_imm((u64)place, val,
AARCH64_INSN_BRANCH_NOLINK);
}
*place = cpu_to_le32(insn);
return 0;
}
int apply_relocate_add(Elf64_Shdr *sechdrs, int apply_relocate_add(Elf64_Shdr *sechdrs,
const char *strtab, const char *strtab,
unsigned int symindex, unsigned int symindex,
...@@ -336,14 +365,13 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, ...@@ -336,14 +365,13 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21, ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
AARCH64_INSN_IMM_ADR); AARCH64_INSN_IMM_ADR);
break; break;
#ifndef CONFIG_ARM64_ERRATUM_843419
case R_AARCH64_ADR_PREL_PG_HI21_NC: case R_AARCH64_ADR_PREL_PG_HI21_NC:
overflow_check = false; overflow_check = false;
case R_AARCH64_ADR_PREL_PG_HI21: case R_AARCH64_ADR_PREL_PG_HI21:
ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21, ovf = reloc_insn_adrp(me, loc, val);
AARCH64_INSN_IMM_ADR); if (ovf && ovf != -ERANGE)
return ovf;
break; break;
#endif
case R_AARCH64_ADD_ABS_LO12_NC: case R_AARCH64_ADD_ABS_LO12_NC:
case R_AARCH64_LDST8_ABS_LO12_NC: case R_AARCH64_LDST8_ABS_LO12_NC:
overflow_check = false; overflow_check = false;
...@@ -386,6 +414,8 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, ...@@ -386,6 +414,8 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
ovf == -ERANGE) { ovf == -ERANGE) {
val = module_emit_plt_entry(me, loc, &rel[i], sym); val = module_emit_plt_entry(me, loc, &rel[i], sym);
if (!val)
return -ENOEXEC;
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2,
26, AARCH64_INSN_IMM_26); 26, AARCH64_INSN_IMM_26);
} }
......
...@@ -257,7 +257,7 @@ static void tls_thread_flush(void) ...@@ -257,7 +257,7 @@ static void tls_thread_flush(void)
write_sysreg(0, tpidr_el0); write_sysreg(0, tpidr_el0);
if (is_compat_task()) { if (is_compat_task()) {
current->thread.tp_value = 0; current->thread.uw.tp_value = 0;
/* /*
* We need to ensure ordering between the shadow state and the * We need to ensure ordering between the shadow state and the
...@@ -351,7 +351,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, ...@@ -351,7 +351,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
* for the new thread. * for the new thread.
*/ */
if (clone_flags & CLONE_SETTLS) if (clone_flags & CLONE_SETTLS)
p->thread.tp_value = childregs->regs[3]; p->thread.uw.tp_value = childregs->regs[3];
} else { } else {
memset(childregs, 0, sizeof(struct pt_regs)); memset(childregs, 0, sizeof(struct pt_regs));
childregs->pstate = PSR_MODE_EL1h; childregs->pstate = PSR_MODE_EL1h;
...@@ -379,7 +379,7 @@ static void tls_thread_switch(struct task_struct *next) ...@@ -379,7 +379,7 @@ static void tls_thread_switch(struct task_struct *next)
tls_preserve_current_state(); tls_preserve_current_state();
if (is_compat_thread(task_thread_info(next))) if (is_compat_thread(task_thread_info(next)))
write_sysreg(next->thread.tp_value, tpidrro_el0); write_sysreg(next->thread.uw.tp_value, tpidrro_el0);
else if (!arm64_kernel_unmapped_at_el0()) else if (!arm64_kernel_unmapped_at_el0())
write_sysreg(0, tpidrro_el0); write_sysreg(0, tpidrro_el0);
......
...@@ -209,7 +209,7 @@ static void ptrace_hbptriggered(struct perf_event *bp, ...@@ -209,7 +209,7 @@ static void ptrace_hbptriggered(struct perf_event *bp,
force_sig_ptrace_errno_trap(si_errno, (void __user *)bkpt->trigger); force_sig_ptrace_errno_trap(si_errno, (void __user *)bkpt->trigger);
} }
#endif #endif
force_sig_info(SIGTRAP, &info, current); arm64_force_sig_info(&info, "Hardware breakpoint trap (ptrace)", current);
} }
/* /*
...@@ -629,7 +629,7 @@ static int __fpr_get(struct task_struct *target, ...@@ -629,7 +629,7 @@ static int __fpr_get(struct task_struct *target,
sve_sync_to_fpsimd(target); sve_sync_to_fpsimd(target);
uregs = &target->thread.fpsimd_state.user_fpsimd; uregs = &target->thread.uw.fpsimd_state;
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs,
start_pos, start_pos + sizeof(*uregs)); start_pos, start_pos + sizeof(*uregs));
...@@ -655,19 +655,19 @@ static int __fpr_set(struct task_struct *target, ...@@ -655,19 +655,19 @@ static int __fpr_set(struct task_struct *target,
struct user_fpsimd_state newstate; struct user_fpsimd_state newstate;
/* /*
* Ensure target->thread.fpsimd_state is up to date, so that a * Ensure target->thread.uw.fpsimd_state is up to date, so that a
* short copyin can't resurrect stale data. * short copyin can't resurrect stale data.
*/ */
sve_sync_to_fpsimd(target); sve_sync_to_fpsimd(target);
newstate = target->thread.fpsimd_state.user_fpsimd; newstate = target->thread.uw.fpsimd_state;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate,
start_pos, start_pos + sizeof(newstate)); start_pos, start_pos + sizeof(newstate));
if (ret) if (ret)
return ret; return ret;
target->thread.fpsimd_state.user_fpsimd = newstate; target->thread.uw.fpsimd_state = newstate;
return ret; return ret;
} }
...@@ -692,7 +692,7 @@ static int tls_get(struct task_struct *target, const struct user_regset *regset, ...@@ -692,7 +692,7 @@ static int tls_get(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count, unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf) void *kbuf, void __user *ubuf)
{ {
unsigned long *tls = &target->thread.tp_value; unsigned long *tls = &target->thread.uw.tp_value;
if (target == current) if (target == current)
tls_preserve_current_state(); tls_preserve_current_state();
...@@ -705,13 +705,13 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset, ...@@ -705,13 +705,13 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset,
const void *kbuf, const void __user *ubuf) const void *kbuf, const void __user *ubuf)
{ {
int ret; int ret;
unsigned long tls = target->thread.tp_value; unsigned long tls = target->thread.uw.tp_value;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
if (ret) if (ret)
return ret; return ret;
target->thread.tp_value = tls; target->thread.uw.tp_value = tls;
return ret; return ret;
} }
...@@ -842,7 +842,7 @@ static int sve_get(struct task_struct *target, ...@@ -842,7 +842,7 @@ static int sve_get(struct task_struct *target,
start = end; start = end;
end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE; end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.fpsimd_state.fpsr, &target->thread.uw.fpsimd_state.fpsr,
start, end); start, end);
if (ret) if (ret)
return ret; return ret;
...@@ -941,7 +941,7 @@ static int sve_set(struct task_struct *target, ...@@ -941,7 +941,7 @@ static int sve_set(struct task_struct *target,
start = end; start = end;
end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE; end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.fpsimd_state.fpsr, &target->thread.uw.fpsimd_state.fpsr,
start, end); start, end);
out: out:
...@@ -1169,7 +1169,7 @@ static int compat_vfp_get(struct task_struct *target, ...@@ -1169,7 +1169,7 @@ static int compat_vfp_get(struct task_struct *target,
compat_ulong_t fpscr; compat_ulong_t fpscr;
int ret, vregs_end_pos; int ret, vregs_end_pos;
uregs = &target->thread.fpsimd_state.user_fpsimd; uregs = &target->thread.uw.fpsimd_state;
if (target == current) if (target == current)
fpsimd_preserve_current_state(); fpsimd_preserve_current_state();
...@@ -1202,7 +1202,7 @@ static int compat_vfp_set(struct task_struct *target, ...@@ -1202,7 +1202,7 @@ static int compat_vfp_set(struct task_struct *target,
compat_ulong_t fpscr; compat_ulong_t fpscr;
int ret, vregs_end_pos; int ret, vregs_end_pos;
uregs = &target->thread.fpsimd_state.user_fpsimd; uregs = &target->thread.uw.fpsimd_state;
vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t); vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
...@@ -1225,7 +1225,7 @@ static int compat_tls_get(struct task_struct *target, ...@@ -1225,7 +1225,7 @@ static int compat_tls_get(struct task_struct *target,
const struct user_regset *regset, unsigned int pos, const struct user_regset *regset, unsigned int pos,
unsigned int count, void *kbuf, void __user *ubuf) unsigned int count, void *kbuf, void __user *ubuf)
{ {
compat_ulong_t tls = (compat_ulong_t)target->thread.tp_value; compat_ulong_t tls = (compat_ulong_t)target->thread.uw.tp_value;
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
} }
...@@ -1235,13 +1235,13 @@ static int compat_tls_set(struct task_struct *target, ...@@ -1235,13 +1235,13 @@ static int compat_tls_set(struct task_struct *target,
const void __user *ubuf) const void __user *ubuf)
{ {
int ret; int ret;
compat_ulong_t tls = target->thread.tp_value; compat_ulong_t tls = target->thread.uw.tp_value;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
if (ret) if (ret)
return ret; return ret;
target->thread.tp_value = tls; target->thread.uw.tp_value = tls;
return ret; return ret;
} }
...@@ -1538,7 +1538,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, ...@@ -1538,7 +1538,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
break; break;
case COMPAT_PTRACE_GET_THREAD_AREA: case COMPAT_PTRACE_GET_THREAD_AREA:
ret = put_user((compat_ulong_t)child->thread.tp_value, ret = put_user((compat_ulong_t)child->thread.uw.tp_value,
(compat_ulong_t __user *)datap); (compat_ulong_t __user *)datap);
break; break;
......
...@@ -28,6 +28,7 @@ asmlinkage u64 absolute_data16(void); ...@@ -28,6 +28,7 @@ asmlinkage u64 absolute_data16(void);
asmlinkage u64 signed_movw(void); asmlinkage u64 signed_movw(void);
asmlinkage u64 unsigned_movw(void); asmlinkage u64 unsigned_movw(void);
asmlinkage u64 relative_adrp(void); asmlinkage u64 relative_adrp(void);
asmlinkage u64 relative_adrp_far(void);
asmlinkage u64 relative_adr(void); asmlinkage u64 relative_adr(void);
asmlinkage u64 relative_data64(void); asmlinkage u64 relative_data64(void);
asmlinkage u64 relative_data32(void); asmlinkage u64 relative_data32(void);
...@@ -43,9 +44,8 @@ static struct { ...@@ -43,9 +44,8 @@ static struct {
{ "R_AARCH64_ABS16", absolute_data16, UL(SYM16_ABS_VAL) }, { "R_AARCH64_ABS16", absolute_data16, UL(SYM16_ABS_VAL) },
{ "R_AARCH64_MOVW_SABS_Gn", signed_movw, UL(SYM64_ABS_VAL) }, { "R_AARCH64_MOVW_SABS_Gn", signed_movw, UL(SYM64_ABS_VAL) },
{ "R_AARCH64_MOVW_UABS_Gn", unsigned_movw, UL(SYM64_ABS_VAL) }, { "R_AARCH64_MOVW_UABS_Gn", unsigned_movw, UL(SYM64_ABS_VAL) },
#ifndef CONFIG_ARM64_ERRATUM_843419
{ "R_AARCH64_ADR_PREL_PG_HI21", relative_adrp, (u64)&sym64_rel }, { "R_AARCH64_ADR_PREL_PG_HI21", relative_adrp, (u64)&sym64_rel },
#endif { "R_AARCH64_ADR_PREL_PG_HI21", relative_adrp_far, (u64)&memstart_addr },
{ "R_AARCH64_ADR_PREL_LO21", relative_adr, (u64)&sym64_rel }, { "R_AARCH64_ADR_PREL_LO21", relative_adr, (u64)&sym64_rel },
{ "R_AARCH64_PREL64", relative_data64, (u64)&sym64_rel }, { "R_AARCH64_PREL64", relative_data64, (u64)&sym64_rel },
{ "R_AARCH64_PREL32", relative_data32, (u64)&sym64_rel }, { "R_AARCH64_PREL32", relative_data32, (u64)&sym64_rel },
......
...@@ -43,15 +43,21 @@ ENTRY(unsigned_movw) ...@@ -43,15 +43,21 @@ ENTRY(unsigned_movw)
ret ret
ENDPROC(unsigned_movw) ENDPROC(unsigned_movw)
#ifndef CONFIG_ARM64_ERRATUM_843419 .align 12
.space 0xff8
ENTRY(relative_adrp) ENTRY(relative_adrp)
adrp x0, sym64_rel adrp x0, sym64_rel
add x0, x0, #:lo12:sym64_rel add x0, x0, #:lo12:sym64_rel
ret ret
ENDPROC(relative_adrp) ENDPROC(relative_adrp)
#endif .align 12
.space 0xffc
ENTRY(relative_adrp_far)
adrp x0, memstart_addr
add x0, x0, #:lo12:memstart_addr
ret
ENDPROC(relative_adrp_far)
ENTRY(relative_adr) ENTRY(relative_adr)
adr x0, sym64_rel adr x0, sym64_rel
......
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#include <asm/fpsimd.h> #include <asm/fpsimd.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/signal32.h> #include <asm/signal32.h>
#include <asm/traps.h>
#include <asm/vdso.h> #include <asm/vdso.h>
/* /*
...@@ -179,7 +180,7 @@ static void __user *apply_user_offset( ...@@ -179,7 +180,7 @@ static void __user *apply_user_offset(
static int preserve_fpsimd_context(struct fpsimd_context __user *ctx) static int preserve_fpsimd_context(struct fpsimd_context __user *ctx)
{ {
struct user_fpsimd_state const *fpsimd = struct user_fpsimd_state const *fpsimd =
&current->thread.fpsimd_state.user_fpsimd; &current->thread.uw.fpsimd_state;
int err; int err;
/* copy the FP and status/control registers */ /* copy the FP and status/control registers */
...@@ -565,11 +566,7 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) ...@@ -565,11 +566,7 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
return regs->regs[0]; return regs->regs[0];
badframe: badframe:
if (show_unhandled_signals) arm64_notify_segfault(regs->sp);
pr_info_ratelimited("%s[%d]: bad frame in %s: pc=%08llx sp=%08llx\n",
current->comm, task_pid_nr(current), __func__,
regs->pc, regs->sp);
force_sig(SIGSEGV, current);
return 0; return 0;
} }
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <asm/esr.h> #include <asm/esr.h>
#include <asm/fpsimd.h> #include <asm/fpsimd.h>
#include <asm/signal32.h> #include <asm/signal32.h>
#include <asm/traps.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/unistd.h> #include <asm/unistd.h>
...@@ -149,7 +150,7 @@ union __fpsimd_vreg { ...@@ -149,7 +150,7 @@ union __fpsimd_vreg {
static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame) static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame)
{ {
struct user_fpsimd_state const *fpsimd = struct user_fpsimd_state const *fpsimd =
&current->thread.fpsimd_state.user_fpsimd; &current->thread.uw.fpsimd_state;
compat_ulong_t magic = VFP_MAGIC; compat_ulong_t magic = VFP_MAGIC;
compat_ulong_t size = VFP_STORAGE_SIZE; compat_ulong_t size = VFP_STORAGE_SIZE;
compat_ulong_t fpscr, fpexc; compat_ulong_t fpscr, fpexc;
...@@ -307,11 +308,7 @@ asmlinkage int compat_sys_sigreturn(struct pt_regs *regs) ...@@ -307,11 +308,7 @@ asmlinkage int compat_sys_sigreturn(struct pt_regs *regs)
return regs->regs[0]; return regs->regs[0];
badframe: badframe:
if (show_unhandled_signals) arm64_notify_segfault(regs->compat_sp);
pr_info_ratelimited("%s[%d]: bad frame in %s: pc=%08llx sp=%08llx\n",
current->comm, task_pid_nr(current), __func__,
regs->pc, regs->compat_sp);
force_sig(SIGSEGV, current);
return 0; return 0;
} }
...@@ -344,11 +341,7 @@ asmlinkage int compat_sys_rt_sigreturn(struct pt_regs *regs) ...@@ -344,11 +341,7 @@ asmlinkage int compat_sys_rt_sigreturn(struct pt_regs *regs)
return regs->regs[0]; return regs->regs[0];
badframe: badframe:
if (show_unhandled_signals) arm64_notify_segfault(regs->compat_sp);
pr_info_ratelimited("%s[%d]: bad frame in %s: pc=%08llx sp=%08llx\n",
current->comm, task_pid_nr(current), __func__,
regs->pc, regs->compat_sp);
force_sig(SIGSEGV, current);
return 0; return 0;
} }
......
...@@ -85,43 +85,6 @@ enum ipi_msg_type { ...@@ -85,43 +85,6 @@ enum ipi_msg_type {
IPI_WAKEUP IPI_WAKEUP
}; };
#ifdef CONFIG_ARM64_VHE
/* Whether the boot CPU is running in HYP mode or not*/
static bool boot_cpu_hyp_mode;
static inline void save_boot_cpu_run_el(void)
{
boot_cpu_hyp_mode = is_kernel_in_hyp_mode();
}
static inline bool is_boot_cpu_in_hyp_mode(void)
{
return boot_cpu_hyp_mode;
}
/*
* Verify that a secondary CPU is running the kernel at the same
* EL as that of the boot CPU.
*/
void verify_cpu_run_el(void)
{
bool in_el2 = is_kernel_in_hyp_mode();
bool boot_cpu_el2 = is_boot_cpu_in_hyp_mode();
if (in_el2 ^ boot_cpu_el2) {
pr_crit("CPU%d: mismatched Exception Level(EL%d) with boot CPU(EL%d)\n",
smp_processor_id(),
in_el2 ? 2 : 1,
boot_cpu_el2 ? 2 : 1);
cpu_panic_kernel();
}
}
#else
static inline void save_boot_cpu_run_el(void) {}
#endif
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
static int op_cpu_kill(unsigned int cpu); static int op_cpu_kill(unsigned int cpu);
#else #else
...@@ -447,13 +410,6 @@ void __init smp_prepare_boot_cpu(void) ...@@ -447,13 +410,6 @@ void __init smp_prepare_boot_cpu(void)
*/ */
jump_label_init(); jump_label_init();
cpuinfo_store_boot_cpu(); cpuinfo_store_boot_cpu();
save_boot_cpu_run_el();
/*
* Run the errata work around checks on the boot CPU, once we have
* initialised the cpu feature infrastructure from
* cpuinfo_store_boot_cpu() above.
*/
update_cpu_errata_workarounds();
} }
static u64 __init of_get_cpu_mpidr(struct device_node *dn) static u64 __init of_get_cpu_mpidr(struct device_node *dn)
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/system_misc.h>
#include <asm/unistd.h> #include <asm/unistd.h>
static long static long
...@@ -67,6 +68,7 @@ do_compat_cache_op(unsigned long start, unsigned long end, int flags) ...@@ -67,6 +68,7 @@ do_compat_cache_op(unsigned long start, unsigned long end, int flags)
*/ */
long compat_arm_syscall(struct pt_regs *regs) long compat_arm_syscall(struct pt_regs *regs)
{ {
siginfo_t info;
unsigned int no = regs->regs[7]; unsigned int no = regs->regs[7];
switch (no) { switch (no) {
...@@ -88,7 +90,7 @@ long compat_arm_syscall(struct pt_regs *regs) ...@@ -88,7 +90,7 @@ long compat_arm_syscall(struct pt_regs *regs)
return do_compat_cache_op(regs->regs[0], regs->regs[1], regs->regs[2]); return do_compat_cache_op(regs->regs[0], regs->regs[1], regs->regs[2]);
case __ARM_NR_compat_set_tls: case __ARM_NR_compat_set_tls:
current->thread.tp_value = regs->regs[0]; current->thread.uw.tp_value = regs->regs[0];
/* /*
* Protect against register corruption from context switch. * Protect against register corruption from context switch.
...@@ -99,6 +101,23 @@ long compat_arm_syscall(struct pt_regs *regs) ...@@ -99,6 +101,23 @@ long compat_arm_syscall(struct pt_regs *regs)
return 0; return 0;
default: default:
/*
* Calls 9f00xx..9f07ff are defined to return -ENOSYS
* if not implemented, rather than raising SIGILL. This
* way the calling program can gracefully determine whether
* a feature is supported.
*/
if ((no & 0xffff) <= 0x7ff)
return -ENOSYS; return -ENOSYS;
break;
} }
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_code = ILL_ILLTRP;
info.si_addr = (void __user *)instruction_pointer(regs) -
(compat_thumb_mode(regs) ? 2 : 4);
arm64_notify_die("Oops - bad compat syscall(2)", regs, &info, no);
return 0;
} }
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/bug.h> #include <asm/bug.h>
#include <asm/cpufeature.h>
#include <asm/daifflags.h> #include <asm/daifflags.h>
#include <asm/debug-monitors.h> #include <asm/debug-monitors.h>
#include <asm/esr.h> #include <asm/esr.h>
...@@ -223,13 +224,46 @@ void die(const char *str, struct pt_regs *regs, int err) ...@@ -223,13 +224,46 @@ void die(const char *str, struct pt_regs *regs, int err)
do_exit(SIGSEGV); do_exit(SIGSEGV);
} }
static bool show_unhandled_signals_ratelimited(void)
{
static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
return show_unhandled_signals && __ratelimit(&rs);
}
void arm64_force_sig_info(struct siginfo *info, const char *str,
struct task_struct *tsk)
{
unsigned int esr = tsk->thread.fault_code;
struct pt_regs *regs = task_pt_regs(tsk);
if (!unhandled_signal(tsk, info->si_signo))
goto send_sig;
if (!show_unhandled_signals_ratelimited())
goto send_sig;
pr_info("%s[%d]: unhandled exception: ", tsk->comm, task_pid_nr(tsk));
if (esr)
pr_cont("%s, ESR 0x%08x, ", esr_get_class_string(esr), esr);
pr_cont("%s", str);
print_vma_addr(KERN_CONT " in ", regs->pc);
pr_cont("\n");
__show_regs(regs);
send_sig:
force_sig_info(info->si_signo, info, tsk);
}
void arm64_notify_die(const char *str, struct pt_regs *regs, void arm64_notify_die(const char *str, struct pt_regs *regs,
struct siginfo *info, int err) struct siginfo *info, int err)
{ {
if (user_mode(regs)) { if (user_mode(regs)) {
WARN_ON(regs != current_pt_regs());
current->thread.fault_address = 0; current->thread.fault_address = 0;
current->thread.fault_code = err; current->thread.fault_code = err;
force_sig_info(info->si_signo, info, current); arm64_force_sig_info(info, str, current);
} else { } else {
die(str, regs, err); die(str, regs, err);
} }
...@@ -311,12 +345,13 @@ static int call_undef_hook(struct pt_regs *regs) ...@@ -311,12 +345,13 @@ static int call_undef_hook(struct pt_regs *regs)
return fn ? fn(regs, instr) : 1; return fn ? fn(regs, instr) : 1;
} }
void force_signal_inject(int signal, int code, struct pt_regs *regs, void force_signal_inject(int signal, int code, unsigned long address)
unsigned long address)
{ {
siginfo_t info; siginfo_t info;
void __user *pc = (void __user *)instruction_pointer(regs);
const char *desc; const char *desc;
struct pt_regs *regs = current_pt_regs();
clear_siginfo(&info);
switch (signal) { switch (signal) {
case SIGILL: case SIGILL:
...@@ -330,17 +365,16 @@ void force_signal_inject(int signal, int code, struct pt_regs *regs, ...@@ -330,17 +365,16 @@ void force_signal_inject(int signal, int code, struct pt_regs *regs,
break; break;
} }
if (unhandled_signal(current, signal) && /* Force signals we don't understand to SIGKILL */
show_unhandled_signals_ratelimited()) { if (WARN_ON(signal != SIGKILL ||
pr_info("%s[%d]: %s: pc=%p\n", siginfo_layout(signal, code) != SIL_FAULT)) {
current->comm, task_pid_nr(current), desc, pc); signal = SIGKILL;
dump_instr(KERN_INFO, regs);
} }
info.si_signo = signal; info.si_signo = signal;
info.si_errno = 0; info.si_errno = 0;
info.si_code = code; info.si_code = code;
info.si_addr = pc; info.si_addr = (void __user *)address;
arm64_notify_die(desc, regs, &info, 0); arm64_notify_die(desc, regs, &info, 0);
} }
...@@ -348,7 +382,7 @@ void force_signal_inject(int signal, int code, struct pt_regs *regs, ...@@ -348,7 +382,7 @@ void force_signal_inject(int signal, int code, struct pt_regs *regs,
/* /*
* Set up process info to signal segmentation fault - called on access error. * Set up process info to signal segmentation fault - called on access error.
*/ */
void arm64_notify_segfault(struct pt_regs *regs, unsigned long addr) void arm64_notify_segfault(unsigned long addr)
{ {
int code; int code;
...@@ -359,7 +393,7 @@ void arm64_notify_segfault(struct pt_regs *regs, unsigned long addr) ...@@ -359,7 +393,7 @@ void arm64_notify_segfault(struct pt_regs *regs, unsigned long addr)
code = SEGV_ACCERR; code = SEGV_ACCERR;
up_read(&current->mm->mmap_sem); up_read(&current->mm->mmap_sem);
force_signal_inject(SIGSEGV, code, regs, addr); force_signal_inject(SIGSEGV, code, addr);
} }
asmlinkage void __exception do_undefinstr(struct pt_regs *regs) asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
...@@ -371,13 +405,12 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) ...@@ -371,13 +405,12 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
if (call_undef_hook(regs) == 0) if (call_undef_hook(regs) == 0)
return; return;
force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0); force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
} }
int cpu_enable_cache_maint_trap(void *__unused) void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
{ {
config_sctlr_el1(SCTLR_EL1_UCI, 0); config_sctlr_el1(SCTLR_EL1_UCI, 0);
return 0;
} }
#define __user_cache_maint(insn, address, res) \ #define __user_cache_maint(insn, address, res) \
...@@ -426,12 +459,12 @@ static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs) ...@@ -426,12 +459,12 @@ static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
__user_cache_maint("ic ivau", address, ret); __user_cache_maint("ic ivau", address, ret);
break; break;
default: default:
force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0); force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
return; return;
} }
if (ret) if (ret)
arm64_notify_segfault(regs, address); arm64_notify_segfault(address);
else else
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
} }
...@@ -600,11 +633,6 @@ asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr) ...@@ -600,11 +633,6 @@ asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
{ {
siginfo_t info; siginfo_t info;
void __user *pc = (void __user *)instruction_pointer(regs); void __user *pc = (void __user *)instruction_pointer(regs);
console_verbose();
pr_crit("Bad EL0 synchronous exception detected on CPU%d, code 0x%08x -- %s\n",
smp_processor_id(), esr, esr_get_class_string(esr));
__show_regs(regs);
info.si_signo = SIGILL; info.si_signo = SIGILL;
info.si_errno = 0; info.si_errno = 0;
...@@ -612,9 +640,9 @@ asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr) ...@@ -612,9 +640,9 @@ asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
info.si_addr = pc; info.si_addr = pc;
current->thread.fault_address = 0; current->thread.fault_address = 0;
current->thread.fault_code = 0; current->thread.fault_code = esr;
force_sig_info(info.si_signo, &info, current); arm64_force_sig_info(&info, "Bad EL0 synchronous exception", current);
} }
#ifdef CONFIG_VMAP_STACK #ifdef CONFIG_VMAP_STACK
......
...@@ -17,6 +17,7 @@ CFLAGS_atomic_ll_sc.o := -fcall-used-x0 -ffixed-x1 -ffixed-x2 \ ...@@ -17,6 +17,7 @@ CFLAGS_atomic_ll_sc.o := -fcall-used-x0 -ffixed-x1 -ffixed-x2 \
-ffixed-x7 -fcall-saved-x8 -fcall-saved-x9 \ -ffixed-x7 -fcall-saved-x8 -fcall-saved-x9 \
-fcall-saved-x10 -fcall-saved-x11 -fcall-saved-x12 \ -fcall-saved-x10 -fcall-saved-x11 -fcall-saved-x12 \
-fcall-saved-x13 -fcall-saved-x14 -fcall-saved-x15 \ -fcall-saved-x13 -fcall-saved-x14 -fcall-saved-x15 \
-fcall-saved-x18 -fcall-saved-x18 -fomit-frame-pointer
CFLAGS_REMOVE_atomic_ll_sc.o := -pg
lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o
...@@ -50,6 +50,10 @@ ENTRY(flush_icache_range) ...@@ -50,6 +50,10 @@ ENTRY(flush_icache_range)
*/ */
ENTRY(__flush_cache_user_range) ENTRY(__flush_cache_user_range)
uaccess_ttbr0_enable x2, x3, x4 uaccess_ttbr0_enable x2, x3, x4
alternative_if ARM64_HAS_CACHE_IDC
dsb ishst
b 7f
alternative_else_nop_endif
dcache_line_size x2, x3 dcache_line_size x2, x3
sub x3, x2, #1 sub x3, x2, #1
bic x4, x0, x3 bic x4, x0, x3
...@@ -60,8 +64,13 @@ user_alt 9f, "dc cvau, x4", "dc civac, x4", ARM64_WORKAROUND_CLEAN_CACHE ...@@ -60,8 +64,13 @@ user_alt 9f, "dc cvau, x4", "dc civac, x4", ARM64_WORKAROUND_CLEAN_CACHE
b.lo 1b b.lo 1b
dsb ish dsb ish
7:
alternative_if ARM64_HAS_CACHE_DIC
isb
b 8f
alternative_else_nop_endif
invalidate_icache_by_line x0, x1, x2, x3, 9f invalidate_icache_by_line x0, x1, x2, x3, 9f
mov x0, #0 8: mov x0, #0
1: 1:
uaccess_ttbr0_disable x1, x2 uaccess_ttbr0_disable x1, x2
ret ret
...@@ -80,6 +89,12 @@ ENDPROC(__flush_cache_user_range) ...@@ -80,6 +89,12 @@ ENDPROC(__flush_cache_user_range)
* - end - virtual end address of region * - end - virtual end address of region
*/ */
ENTRY(invalidate_icache_range) ENTRY(invalidate_icache_range)
alternative_if ARM64_HAS_CACHE_DIC
mov x0, xzr
isb
ret
alternative_else_nop_endif
uaccess_ttbr0_enable x2, x3, x4 uaccess_ttbr0_enable x2, x3, x4
invalidate_icache_by_line x0, x1, x2, x3, 2f invalidate_icache_by_line x0, x1, x2, x3, 2f
...@@ -116,6 +131,10 @@ ENDPIPROC(__flush_dcache_area) ...@@ -116,6 +131,10 @@ ENDPIPROC(__flush_dcache_area)
* - size - size in question * - size - size in question
*/ */
ENTRY(__clean_dcache_area_pou) ENTRY(__clean_dcache_area_pou)
alternative_if ARM64_HAS_CACHE_IDC
dsb ishst
ret
alternative_else_nop_endif
dcache_by_line_op cvau, ish, x0, x1, x2, x3 dcache_by_line_op cvau, ish, x0, x1, x2, x3
ret ret
ENDPROC(__clean_dcache_area_pou) ENDPROC(__clean_dcache_area_pou)
......
This diff is collapsed.
...@@ -36,6 +36,12 @@ ...@@ -36,6 +36,12 @@
#define TCR_TG_FLAGS TCR_TG0_4K | TCR_TG1_4K #define TCR_TG_FLAGS TCR_TG0_4K | TCR_TG1_4K
#endif #endif
#ifdef CONFIG_RANDOMIZE_BASE
#define TCR_KASLR_FLAGS TCR_NFD1
#else
#define TCR_KASLR_FLAGS 0
#endif
#define TCR_SMP_FLAGS TCR_SHARED #define TCR_SMP_FLAGS TCR_SHARED
/* PTWs cacheable, inner/outer WBWA */ /* PTWs cacheable, inner/outer WBWA */
...@@ -432,7 +438,8 @@ ENTRY(__cpu_setup) ...@@ -432,7 +438,8 @@ ENTRY(__cpu_setup)
* both user and kernel. * both user and kernel.
*/ */
ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0 | TCR_A1 TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
TCR_TBI0 | TCR_A1
tcr_set_idmap_t0sz x10, x9 tcr_set_idmap_t0sz x10, x9
/* /*
...@@ -441,16 +448,15 @@ ENTRY(__cpu_setup) ...@@ -441,16 +448,15 @@ ENTRY(__cpu_setup)
tcr_compute_pa_size x10, #TCR_IPS_SHIFT, x5, x6 tcr_compute_pa_size x10, #TCR_IPS_SHIFT, x5, x6
#ifdef CONFIG_ARM64_HW_AFDBM #ifdef CONFIG_ARM64_HW_AFDBM
/* /*
* Hardware update of the Access and Dirty bits. * Enable hardware update of the Access Flags bit.
* Hardware dirty bit management is enabled later,
* via capabilities.
*/ */
mrs x9, ID_AA64MMFR1_EL1 mrs x9, ID_AA64MMFR1_EL1
and x9, x9, #0xf and x9, x9, #0xf
cbz x9, 2f cbz x9, 1f
cmp x9, #2 orr x10, x10, #TCR_HA // hardware Access flag update
b.lt 1f 1:
orr x10, x10, #TCR_HD // hardware Dirty flag update
1: orr x10, x10, #TCR_HA // hardware Access flag update
2:
#endif /* CONFIG_ARM64_HW_AFDBM */ #endif /* CONFIG_ARM64_HW_AFDBM */
msr tcr_el1, x10 msr tcr_el1, x10
ret // return to head.S ret // return to head.S
......
...@@ -26,7 +26,7 @@ static inline void signal_compat_build_tests(void) ...@@ -26,7 +26,7 @@ static inline void signal_compat_build_tests(void)
* new fields are handled in copy_siginfo_to_user32()! * new fields are handled in copy_siginfo_to_user32()!
*/ */
BUILD_BUG_ON(NSIGILL != 11); BUILD_BUG_ON(NSIGILL != 11);
BUILD_BUG_ON(NSIGFPE != 13); BUILD_BUG_ON(NSIGFPE != 14);
BUILD_BUG_ON(NSIGSEGV != 7); BUILD_BUG_ON(NSIGSEGV != 7);
BUILD_BUG_ON(NSIGBUS != 5); BUILD_BUG_ON(NSIGBUS != 5);
BUILD_BUG_ON(NSIGTRAP != 4); BUILD_BUG_ON(NSIGTRAP != 4);
......
...@@ -31,11 +31,6 @@ ...@@ -31,11 +31,6 @@
#define IORT_IOMMU_TYPE ((1 << ACPI_IORT_NODE_SMMU) | \ #define IORT_IOMMU_TYPE ((1 << ACPI_IORT_NODE_SMMU) | \
(1 << ACPI_IORT_NODE_SMMU_V3)) (1 << ACPI_IORT_NODE_SMMU_V3))
/* Until ACPICA headers cover IORT rev. C */
#ifndef ACPI_IORT_SMMU_V3_CAVIUM_CN99XX
#define ACPI_IORT_SMMU_V3_CAVIUM_CN99XX 0x2
#endif
struct iort_its_msi_chip { struct iort_its_msi_chip {
struct list_head list; struct list_head list;
struct fwnode_handle *fw_node; struct fwnode_handle *fw_node;
...@@ -366,7 +361,6 @@ static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node, ...@@ -366,7 +361,6 @@ static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
return NULL; return NULL;
} }
#if (ACPI_CA_VERSION > 0x20170929)
static int iort_get_id_mapping_index(struct acpi_iort_node *node) static int iort_get_id_mapping_index(struct acpi_iort_node *node)
{ {
struct acpi_iort_smmu_v3 *smmu; struct acpi_iort_smmu_v3 *smmu;
...@@ -400,12 +394,6 @@ static int iort_get_id_mapping_index(struct acpi_iort_node *node) ...@@ -400,12 +394,6 @@ static int iort_get_id_mapping_index(struct acpi_iort_node *node)
return -EINVAL; return -EINVAL;
} }
} }
#else
static inline int iort_get_id_mapping_index(struct acpi_iort_node *node)
{
return -EINVAL;
}
#endif
static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node, static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node,
u32 id_in, u32 *id_out, u32 id_in, u32 *id_out,
......
...@@ -122,7 +122,7 @@ static int pmu_parse_irqs(struct arm_pmu *pmu) ...@@ -122,7 +122,7 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
return pmu_parse_percpu_irq(pmu, irq); return pmu_parse_percpu_irq(pmu, irq);
} }
if (!pmu_has_irq_affinity(pdev->dev.of_node)) { if (nr_cpu_ids != 1 && !pmu_has_irq_affinity(pdev->dev.of_node)) {
pr_warn("no interrupt-affinity property for %pOF, guessing.\n", pr_warn("no interrupt-affinity property for %pOF, guessing.\n",
pdev->dev.of_node); pdev->dev.of_node);
} }
......
...@@ -23,16 +23,30 @@ ...@@ -23,16 +23,30 @@
#define DRVNAME PMUNAME "_pmu" #define DRVNAME PMUNAME "_pmu"
#define pr_fmt(fmt) DRVNAME ": " fmt #define pr_fmt(fmt) DRVNAME ": " fmt
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/capability.h>
#include <linux/cpuhotplug.h> #include <linux/cpuhotplug.h>
#include <linux/cpumask.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/of_address.h> #include <linux/of_address.h>
#include <linux/of_device.h> #include <linux/of_device.h>
#include <linux/perf_event.h> #include <linux/perf_event.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/printk.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/smp.h>
#include <linux/vmalloc.h>
#include <asm/barrier.h>
#include <asm/cpufeature.h>
#include <asm/mmu.h>
#include <asm/sysreg.h> #include <asm/sysreg.h>
#define ARM_SPE_BUF_PAD_BYTE 0 #define ARM_SPE_BUF_PAD_BYTE 0
......
...@@ -599,7 +599,6 @@ ...@@ -599,7 +599,6 @@
IRQCHIP_OF_MATCH_TABLE() \ IRQCHIP_OF_MATCH_TABLE() \
ACPI_PROBE_TABLE(irqchip) \ ACPI_PROBE_TABLE(irqchip) \
ACPI_PROBE_TABLE(timer) \ ACPI_PROBE_TABLE(timer) \
ACPI_PROBE_TABLE(iort) \
EARLYCON_TABLE() EARLYCON_TABLE()
#define INIT_TEXT \ #define INIT_TEXT \
......
...@@ -8,6 +8,8 @@ ...@@ -8,6 +8,8 @@
#ifndef __LINUX_SIZES_H__ #ifndef __LINUX_SIZES_H__
#define __LINUX_SIZES_H__ #define __LINUX_SIZES_H__
#include <linux/const.h>
#define SZ_1 0x00000001 #define SZ_1 0x00000001
#define SZ_2 0x00000002 #define SZ_2 0x00000002
#define SZ_4 0x00000004 #define SZ_4 0x00000004
...@@ -44,4 +46,6 @@ ...@@ -44,4 +46,6 @@
#define SZ_1G 0x40000000 #define SZ_1G 0x40000000
#define SZ_2G 0x80000000 #define SZ_2G 0x80000000
#define SZ_4G _AC(0x100000000, ULL)
#endif /* __LINUX_SIZES_H__ */ #endif /* __LINUX_SIZES_H__ */
...@@ -207,7 +207,8 @@ typedef struct siginfo { ...@@ -207,7 +207,8 @@ typedef struct siginfo {
#define __FPE_DECERR 11 /* packed decimal error */ #define __FPE_DECERR 11 /* packed decimal error */
#define __FPE_INVASC 12 /* invalid ASCII digit */ #define __FPE_INVASC 12 /* invalid ASCII digit */
#define __FPE_INVDEC 13 /* invalid decimal digit */ #define __FPE_INVDEC 13 /* invalid decimal digit */
#define NSIGFPE 13 #define FPE_FLTUNK 14 /* undiagnosed floating-point exception */
#define NSIGFPE 14
/* /*
* SIGSEGV si_codes * SIGSEGV si_codes
......
...@@ -2843,10 +2843,6 @@ enum siginfo_layout siginfo_layout(int sig, int si_code) ...@@ -2843,10 +2843,6 @@ enum siginfo_layout siginfo_layout(int sig, int si_code)
#ifdef FPE_FIXME #ifdef FPE_FIXME
if ((sig == SIGFPE) && (si_code == FPE_FIXME)) if ((sig == SIGFPE) && (si_code == FPE_FIXME))
layout = SIL_FAULT; layout = SIL_FAULT;
#endif
#ifdef BUS_FIXME
if ((sig == SIGBUS) && (si_code == BUS_FIXME))
layout = SIL_FAULT;
#endif #endif
} }
return layout; return layout;
......
...@@ -221,6 +221,7 @@ static int symbol_valid(struct sym_entry *s) ...@@ -221,6 +221,7 @@ static int symbol_valid(struct sym_entry *s)
static char *special_prefixes[] = { static char *special_prefixes[] = {
"__crc_", /* modversions */ "__crc_", /* modversions */
"__efistub_", /* arm64 EFI stub namespace */
NULL }; NULL };
static char *special_suffixes[] = { static char *special_suffixes[] = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment