Commit 0ac000e8 authored by Russell King's avatar Russell King

Merge branches 'fixes', 'misc' and 'spectre' into for-linus

...@@ -1704,6 +1704,7 @@ config ARCH_WANT_GENERAL_HUGETLB ...@@ -1704,6 +1704,7 @@ config ARCH_WANT_GENERAL_HUGETLB
config ARM_MODULE_PLTS config ARM_MODULE_PLTS
bool "Use PLTs to allow module memory to spill over into vmalloc area" bool "Use PLTs to allow module memory to spill over into vmalloc area"
depends on MODULES depends on MODULES
default y
help help
Allocate PLTs when loading modules so that jumps and calls whose Allocate PLTs when loading modules so that jumps and calls whose
targets are too far away for their relative offsets to be encoded targets are too far away for their relative offsets to be encoded
...@@ -1714,7 +1715,8 @@ config ARM_MODULE_PLTS ...@@ -1714,7 +1715,8 @@ config ARM_MODULE_PLTS
rounding up to page size, the actual memory footprint is usually rounding up to page size, the actual memory footprint is usually
the same. the same.
Say y if you are getting out of memory errors while loading modules Disabling this is usually safe for small single-platform
configurations. If unsure, say y.
source "mm/Kconfig" source "mm/Kconfig"
......
...@@ -106,7 +106,7 @@ tune-$(CONFIG_CPU_V6K) =$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm) ...@@ -106,7 +106,7 @@ tune-$(CONFIG_CPU_V6K) =$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm)
tune-y := $(tune-y) tune-y := $(tune-y)
ifeq ($(CONFIG_AEABI),y) ifeq ($(CONFIG_AEABI),y)
CFLAGS_ABI :=-mabi=aapcs-linux -mno-thumb-interwork -mfpu=vfp CFLAGS_ABI :=-mabi=aapcs-linux -mfpu=vfp
else else
CFLAGS_ABI :=$(call cc-option,-mapcs-32,-mabi=apcs-gnu) $(call cc-option,-mno-thumb-interwork,) CFLAGS_ABI :=$(call cc-option,-mapcs-32,-mabi=apcs-gnu) $(call cc-option,-mno-thumb-interwork,)
endif endif
......
...@@ -113,7 +113,7 @@ CFLAGS_fdt_ro.o := $(nossp_flags) ...@@ -113,7 +113,7 @@ CFLAGS_fdt_ro.o := $(nossp_flags)
CFLAGS_fdt_rw.o := $(nossp_flags) CFLAGS_fdt_rw.o := $(nossp_flags)
CFLAGS_fdt_wip.o := $(nossp_flags) CFLAGS_fdt_wip.o := $(nossp_flags)
ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj) ccflags-y := -fpic $(call cc-option,-mno-single-pic-base,) -fno-builtin -I$(obj)
asflags-y := -DZIMAGE asflags-y := -DZIMAGE
# Supply kernel BSS size to the decompressor via a linker symbol. # Supply kernel BSS size to the decompressor via a linker symbol.
......
...@@ -447,6 +447,14 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) ...@@ -447,6 +447,14 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
.size \name , . - \name .size \name , . - \name
.endm .endm
.macro csdb
#ifdef CONFIG_THUMB2_KERNEL
.inst.w 0xf3af8014
#else
.inst 0xe320f014
#endif
.endm
.macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
#ifndef CONFIG_CPU_USE_DOMAINS #ifndef CONFIG_CPU_USE_DOMAINS
adds \tmp, \addr, #\size - 1 adds \tmp, \addr, #\size - 1
......
...@@ -17,6 +17,12 @@ ...@@ -17,6 +17,12 @@
#define isb(option) __asm__ __volatile__ ("isb " #option : : : "memory") #define isb(option) __asm__ __volatile__ ("isb " #option : : : "memory")
#define dsb(option) __asm__ __volatile__ ("dsb " #option : : : "memory") #define dsb(option) __asm__ __volatile__ ("dsb " #option : : : "memory")
#define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory") #define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory")
#ifdef CONFIG_THUMB2_KERNEL
#define CSDB ".inst.w 0xf3af8014"
#else
#define CSDB ".inst 0xe320f014"
#endif
#define csdb() __asm__ __volatile__(CSDB : : : "memory")
#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6 #elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6
#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ #define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
: : "r" (0) : "memory") : : "r" (0) : "memory")
...@@ -37,6 +43,13 @@ ...@@ -37,6 +43,13 @@
#define dmb(x) __asm__ __volatile__ ("" : : : "memory") #define dmb(x) __asm__ __volatile__ ("" : : : "memory")
#endif #endif
#ifndef CSDB
#define CSDB
#endif
#ifndef csdb
#define csdb()
#endif
#ifdef CONFIG_ARM_HEAVY_MB #ifdef CONFIG_ARM_HEAVY_MB
extern void (*soc_mb)(void); extern void (*soc_mb)(void);
extern void arm_heavy_mb(void); extern void arm_heavy_mb(void);
...@@ -63,6 +76,25 @@ extern void arm_heavy_mb(void); ...@@ -63,6 +76,25 @@ extern void arm_heavy_mb(void);
#define __smp_rmb() __smp_mb() #define __smp_rmb() __smp_mb()
#define __smp_wmb() dmb(ishst) #define __smp_wmb() dmb(ishst)
#ifdef CONFIG_CPU_SPECTRE
static inline unsigned long array_index_mask_nospec(unsigned long idx,
unsigned long sz)
{
unsigned long mask;
asm volatile(
"cmp %1, %2\n"
" sbc %0, %1, %1\n"
CSDB
: "=r" (mask)
: "r" (idx), "Ir" (sz)
: "cc");
return mask;
}
#define array_index_mask_nospec array_index_mask_nospec
#endif
#include <asm-generic/barrier.h> #include <asm-generic/barrier.h>
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
......
...@@ -10,12 +10,14 @@ ...@@ -10,12 +10,14 @@
#ifndef __ASM_BUGS_H #ifndef __ASM_BUGS_H
#define __ASM_BUGS_H #define __ASM_BUGS_H
#ifdef CONFIG_MMU
extern void check_writebuffer_bugs(void); extern void check_writebuffer_bugs(void);
#define check_bugs() check_writebuffer_bugs() #ifdef CONFIG_MMU
extern void check_bugs(void);
extern void check_other_bugs(void);
#else #else
#define check_bugs() do { } while (0) #define check_bugs() do { } while (0)
#define check_other_bugs() do { } while (0)
#endif #endif
#endif #endif
...@@ -65,6 +65,9 @@ ...@@ -65,6 +65,9 @@
#define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v))) #define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v)))
#define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__) #define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__)
#define BPIALL __ACCESS_CP15(c7, 0, c5, 6)
#define ICIALLU __ACCESS_CP15(c7, 0, c5, 0)
extern unsigned long cr_alignment; /* defined in entry-armv.S */ extern unsigned long cr_alignment; /* defined in entry-armv.S */
static inline unsigned long get_cr(void) static inline unsigned long get_cr(void)
......
...@@ -77,8 +77,16 @@ ...@@ -77,8 +77,16 @@
#define ARM_CPU_PART_CORTEX_A12 0x4100c0d0 #define ARM_CPU_PART_CORTEX_A12 0x4100c0d0
#define ARM_CPU_PART_CORTEX_A17 0x4100c0e0 #define ARM_CPU_PART_CORTEX_A17 0x4100c0e0
#define ARM_CPU_PART_CORTEX_A15 0x4100c0f0 #define ARM_CPU_PART_CORTEX_A15 0x4100c0f0
#define ARM_CPU_PART_CORTEX_A53 0x4100d030
#define ARM_CPU_PART_CORTEX_A57 0x4100d070
#define ARM_CPU_PART_CORTEX_A72 0x4100d080
#define ARM_CPU_PART_CORTEX_A73 0x4100d090
#define ARM_CPU_PART_CORTEX_A75 0x4100d0a0
#define ARM_CPU_PART_MASK 0xff00fff0 #define ARM_CPU_PART_MASK 0xff00fff0
/* Broadcom cores */
#define ARM_CPU_PART_BRAHMA_B15 0x420000f0
/* DEC implemented cores */ /* DEC implemented cores */
#define ARM_CPU_PART_SA1100 0x4400a110 #define ARM_CPU_PART_SA1100 0x4400a110
......
...@@ -77,7 +77,7 @@ extern int kgdb_fault_expected; ...@@ -77,7 +77,7 @@ extern int kgdb_fault_expected;
#define KGDB_MAX_NO_CPUS 1 #define KGDB_MAX_NO_CPUS 1
#define BUFMAX 400 #define BUFMAX 400
#define NUMREGBYTES (DBG_MAX_REG_NUM << 2) #define NUMREGBYTES (GDB_MAX_REGS << 2)
#define NUMCRITREGBYTES (32 << 2) #define NUMCRITREGBYTES (32 << 2)
#define _R0 0 #define _R0 0
......
...@@ -61,8 +61,6 @@ struct kvm_vcpu; ...@@ -61,8 +61,6 @@ struct kvm_vcpu;
extern char __kvm_hyp_init[]; extern char __kvm_hyp_init[];
extern char __kvm_hyp_init_end[]; extern char __kvm_hyp_init_end[];
extern char __kvm_hyp_vector[];
extern void __kvm_flush_vm_context(void); extern void __kvm_flush_vm_context(void);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
extern void __kvm_tlb_flush_vmid(struct kvm *kvm); extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/kvm_types.h> #include <linux/kvm_types.h>
#include <asm/cputype.h>
#include <asm/kvm.h> #include <asm/kvm.h>
#include <asm/kvm_asm.h> #include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h> #include <asm/kvm_mmio.h>
...@@ -308,8 +309,17 @@ static inline void kvm_arm_vhe_guest_exit(void) {} ...@@ -308,8 +309,17 @@ static inline void kvm_arm_vhe_guest_exit(void) {}
static inline bool kvm_arm_harden_branch_predictor(void) static inline bool kvm_arm_harden_branch_predictor(void)
{ {
/* No way to detect it yet, pretend it is not there. */ switch(read_cpuid_part()) {
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
case ARM_CPU_PART_BRAHMA_B15:
case ARM_CPU_PART_CORTEX_A12:
case ARM_CPU_PART_CORTEX_A15:
case ARM_CPU_PART_CORTEX_A17:
return true;
#endif
default:
return false; return false;
}
} }
static inline void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) {} static inline void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) {}
......
...@@ -311,7 +311,28 @@ static inline unsigned int kvm_get_vmid_bits(void) ...@@ -311,7 +311,28 @@ static inline unsigned int kvm_get_vmid_bits(void)
static inline void *kvm_get_hyp_vector(void) static inline void *kvm_get_hyp_vector(void)
{ {
switch(read_cpuid_part()) {
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
case ARM_CPU_PART_CORTEX_A12:
case ARM_CPU_PART_CORTEX_A17:
{
extern char __kvm_hyp_vector_bp_inv[];
return kvm_ksym_ref(__kvm_hyp_vector_bp_inv);
}
case ARM_CPU_PART_BRAHMA_B15:
case ARM_CPU_PART_CORTEX_A15:
{
extern char __kvm_hyp_vector_ic_inv[];
return kvm_ksym_ref(__kvm_hyp_vector_ic_inv);
}
#endif
default:
{
extern char __kvm_hyp_vector[];
return kvm_ksym_ref(__kvm_hyp_vector); return kvm_ksym_ref(__kvm_hyp_vector);
}
}
} }
static inline int kvm_map_vectors(void) static inline int kvm_map_vectors(void)
......
...@@ -12,60 +12,101 @@ ...@@ -12,60 +12,101 @@
/* ID_MMFR0 data relevant to MPU */ /* ID_MMFR0 data relevant to MPU */
#define MMFR0_PMSA (0xF << 4) #define MMFR0_PMSA (0xF << 4)
#define MMFR0_PMSAv7 (3 << 4) #define MMFR0_PMSAv7 (3 << 4)
#define MMFR0_PMSAv8 (4 << 4)
/* MPU D/I Size Register fields */ /* MPU D/I Size Register fields */
#define MPU_RSR_SZ 1 #define PMSAv7_RSR_SZ 1
#define MPU_RSR_EN 0 #define PMSAv7_RSR_EN 0
#define MPU_RSR_SD 8 #define PMSAv7_RSR_SD 8
/* Number of subregions (SD) */ /* Number of subregions (SD) */
#define MPU_NR_SUBREGS 8 #define PMSAv7_NR_SUBREGS 8
#define MPU_MIN_SUBREG_SIZE 256 #define PMSAv7_MIN_SUBREG_SIZE 256
/* The D/I RSR value for an enabled region spanning the whole of memory */ /* The D/I RSR value for an enabled region spanning the whole of memory */
#define MPU_RSR_ALL_MEM 63 #define PMSAv7_RSR_ALL_MEM 63
/* Individual bits in the DR/IR ACR */ /* Individual bits in the DR/IR ACR */
#define MPU_ACR_XN (1 << 12) #define PMSAv7_ACR_XN (1 << 12)
#define MPU_ACR_SHARED (1 << 2) #define PMSAv7_ACR_SHARED (1 << 2)
/* C, B and TEX[2:0] bits only have semantic meanings when grouped */ /* C, B and TEX[2:0] bits only have semantic meanings when grouped */
#define MPU_RGN_CACHEABLE 0xB #define PMSAv7_RGN_CACHEABLE 0xB
#define MPU_RGN_SHARED_CACHEABLE (MPU_RGN_CACHEABLE | MPU_ACR_SHARED) #define PMSAv7_RGN_SHARED_CACHEABLE (PMSAv7_RGN_CACHEABLE | PMSAv7_ACR_SHARED)
#define MPU_RGN_STRONGLY_ORDERED 0 #define PMSAv7_RGN_STRONGLY_ORDERED 0
/* Main region should only be shared for SMP */ /* Main region should only be shared for SMP */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define MPU_RGN_NORMAL (MPU_RGN_CACHEABLE | MPU_ACR_SHARED) #define PMSAv7_RGN_NORMAL (PMSAv7_RGN_CACHEABLE | PMSAv7_ACR_SHARED)
#else #else
#define MPU_RGN_NORMAL MPU_RGN_CACHEABLE #define PMSAv7_RGN_NORMAL PMSAv7_RGN_CACHEABLE
#endif #endif
/* Access permission bits of ACR (only define those that we use)*/ /* Access permission bits of ACR (only define those that we use)*/
#define MPU_AP_PL1RO_PL0NA (0x5 << 8) #define PMSAv7_AP_PL1RO_PL0NA (0x5 << 8)
#define MPU_AP_PL1RW_PL0RW (0x3 << 8) #define PMSAv7_AP_PL1RW_PL0RW (0x3 << 8)
#define MPU_AP_PL1RW_PL0R0 (0x2 << 8) #define PMSAv7_AP_PL1RW_PL0R0 (0x2 << 8)
#define MPU_AP_PL1RW_PL0NA (0x1 << 8) #define PMSAv7_AP_PL1RW_PL0NA (0x1 << 8)
#define PMSAv8_BAR_XN 1
#define PMSAv8_LAR_EN 1
#define PMSAv8_LAR_IDX(n) (((n) & 0x7) << 1)
#define PMSAv8_AP_PL1RW_PL0NA (0 << 1)
#define PMSAv8_AP_PL1RW_PL0RW (1 << 1)
#define PMSAv8_AP_PL1RO_PL0RO (3 << 1)
#ifdef CONFIG_SMP
#define PMSAv8_RGN_SHARED (3 << 3) // inner sharable
#else
#define PMSAv8_RGN_SHARED (0 << 3)
#endif
#define PMSAv8_RGN_DEVICE_nGnRnE 0
#define PMSAv8_RGN_NORMAL 1
#define PMSAv8_MAIR(attr, mt) ((attr) << ((mt) * 8))
#ifdef CONFIG_CPU_V7M
#define PMSAv8_MINALIGN 32
#else
#define PMSAv8_MINALIGN 64
#endif
/* For minimal static MPU region configurations */ /* For minimal static MPU region configurations */
#define MPU_PROBE_REGION 0 #define PMSAv7_PROBE_REGION 0
#define MPU_BG_REGION 1 #define PMSAv7_BG_REGION 1
#define MPU_RAM_REGION 2 #define PMSAv7_RAM_REGION 2
#define MPU_ROM_REGION 3 #define PMSAv7_ROM_REGION 3
/* Fixed for PMSAv8 only */
#define PMSAv8_XIP_REGION 0
#define PMSAv8_KERNEL_REGION 1
/* Maximum number of regions Linux is interested in */ /* Maximum number of regions Linux is interested in */
#define MPU_MAX_REGIONS 16 #define MPU_MAX_REGIONS 16
#define MPU_DATA_SIDE 0 #define PMSAv7_DATA_SIDE 0
#define MPU_INSTR_SIDE 1 #define PMSAv7_INSTR_SIDE 1
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
struct mpu_rgn { struct mpu_rgn {
/* Assume same attributes for d/i-side */ /* Assume same attributes for d/i-side */
u32 drbar; union {
u32 drsr; u32 drbar; /* PMSAv7 */
u32 dracr; u32 prbar; /* PMSAv8 */
};
union {
u32 drsr; /* PMSAv7 */
u32 prlar; /* PMSAv8 */
};
union {
u32 dracr; /* PMSAv7 */
u32 unused; /* not used in PMSAv8 */
};
}; };
struct mpu_rgn_info { struct mpu_rgn_info {
...@@ -75,16 +116,17 @@ struct mpu_rgn_info { ...@@ -75,16 +116,17 @@ struct mpu_rgn_info {
extern struct mpu_rgn_info mpu_rgn_info; extern struct mpu_rgn_info mpu_rgn_info;
#ifdef CONFIG_ARM_MPU #ifdef CONFIG_ARM_MPU
extern void __init pmsav7_adjust_lowmem_bounds(void);
extern void __init pmsav8_adjust_lowmem_bounds(void);
extern void __init adjust_lowmem_bounds_mpu(void); extern void __init pmsav7_setup(void);
extern void __init mpu_setup(void); extern void __init pmsav8_setup(void);
#else #else
static inline void pmsav7_adjust_lowmem_bounds(void) {};
static inline void adjust_lowmem_bounds_mpu(void) {} static inline void pmsav8_adjust_lowmem_bounds(void) {};
static inline void mpu_setup(void) {} static inline void pmsav7_setup(void) {};
static inline void pmsav8_setup(void) {};
#endif /* !CONFIG_ARM_MPU */ #endif
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
...@@ -36,6 +36,10 @@ extern struct processor { ...@@ -36,6 +36,10 @@ extern struct processor {
* Set up any processor specifics * Set up any processor specifics
*/ */
void (*_proc_init)(void); void (*_proc_init)(void);
/*
* Check for processor bugs
*/
void (*check_bugs)(void);
/* /*
* Disable any processor specifics * Disable any processor specifics
*/ */
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/irqflags.h> #include <linux/irqflags.h>
#include <linux/reboot.h> #include <linux/reboot.h>
#include <linux/percpu.h>
extern void cpu_init(void); extern void cpu_init(void);
...@@ -15,6 +16,20 @@ void soft_restart(unsigned long); ...@@ -15,6 +16,20 @@ void soft_restart(unsigned long);
extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
extern void (*arm_pm_idle)(void); extern void (*arm_pm_idle)(void);
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
typedef void (*harden_branch_predictor_fn_t)(void);
DECLARE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
static inline void harden_branch_predictor(void)
{
harden_branch_predictor_fn_t fn = per_cpu(harden_branch_predictor_fn,
smp_processor_id());
if (fn)
fn();
}
#else
#define harden_branch_predictor() do { } while (0)
#endif
#define UDBG_UNDEFINED (1 << 0) #define UDBG_UNDEFINED (1 << 0)
#define UDBG_SYSCALL (1 << 1) #define UDBG_SYSCALL (1 << 1)
#define UDBG_BADABORT (1 << 2) #define UDBG_BADABORT (1 << 2)
......
...@@ -152,7 +152,7 @@ extern int __get_user_64t_4(void *); ...@@ -152,7 +152,7 @@ extern int __get_user_64t_4(void *);
#define __get_user_check(x, p) \ #define __get_user_check(x, p) \
({ \ ({ \
unsigned long __limit = current_thread_info()->addr_limit - 1; \ unsigned long __limit = current_thread_info()->addr_limit - 1; \
register const typeof(*(p)) __user *__p asm("r0") = (p);\ register typeof(*(p)) __user *__p asm("r0") = (p); \
register typeof(x) __r2 asm("r2"); \ register typeof(x) __r2 asm("r2"); \
register unsigned long __l asm("r1") = __limit; \ register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \ register int __e asm("r0"); \
......
...@@ -64,9 +64,17 @@ ...@@ -64,9 +64,17 @@
#define MPU_CTRL_ENABLE 1 #define MPU_CTRL_ENABLE 1
#define MPU_CTRL_PRIVDEFENA (1 << 2) #define MPU_CTRL_PRIVDEFENA (1 << 2)
#define MPU_RNR 0x98 #define PMSAv7_RNR 0x98
#define MPU_RBAR 0x9c #define PMSAv7_RBAR 0x9c
#define MPU_RASR 0xa0 #define PMSAv7_RASR 0xa0
#define PMSAv8_RNR 0x98
#define PMSAv8_RBAR 0x9c
#define PMSAv8_RLAR 0xa0
#define PMSAv8_RBAR_A(n) (PMSAv8_RBAR + 8*(n))
#define PMSAv8_RLAR_A(n) (PMSAv8_RLAR + 8*(n))
#define PMSAv8_MAIR0 0xc0
#define PMSAv8_MAIR1 0xc4
/* Cache opeartions */ /* Cache opeartions */
#define V7M_SCB_ICIALLU 0x250 /* I-cache invalidate all to PoU */ #define V7M_SCB_ICIALLU 0x250 /* I-cache invalidate all to PoU */
......
...@@ -31,6 +31,7 @@ else ...@@ -31,6 +31,7 @@ else
obj-y += entry-armv.o obj-y += entry-armv.o
endif endif
obj-$(CONFIG_MMU) += bugs.o
obj-$(CONFIG_CPU_IDLE) += cpuidle.o obj-$(CONFIG_CPU_IDLE) += cpuidle.o
obj-$(CONFIG_ISA_DMA_API) += dma.o obj-$(CONFIG_ISA_DMA_API) += dma.o
obj-$(CONFIG_FIQ) += fiq.o fiqasm.o obj-$(CONFIG_FIQ) += fiq.o fiqasm.o
......
...@@ -197,6 +197,8 @@ int main(void) ...@@ -197,6 +197,8 @@ int main(void)
DEFINE(MPU_RGN_DRBAR, offsetof(struct mpu_rgn, drbar)); DEFINE(MPU_RGN_DRBAR, offsetof(struct mpu_rgn, drbar));
DEFINE(MPU_RGN_DRSR, offsetof(struct mpu_rgn, drsr)); DEFINE(MPU_RGN_DRSR, offsetof(struct mpu_rgn, drsr));
DEFINE(MPU_RGN_DRACR, offsetof(struct mpu_rgn, dracr)); DEFINE(MPU_RGN_DRACR, offsetof(struct mpu_rgn, dracr));
DEFINE(MPU_RGN_PRBAR, offsetof(struct mpu_rgn, prbar));
DEFINE(MPU_RGN_PRLAR, offsetof(struct mpu_rgn, prlar));
#endif #endif
return 0; return 0;
} }
// SPDX-Identifier: GPL-2.0
#include <linux/init.h>
#include <asm/bugs.h>
#include <asm/proc-fns.h>
void check_other_bugs(void)
{
#ifdef MULTI_CPU
if (processor.check_bugs)
processor.check_bugs();
#endif
}
void __init check_bugs(void)
{
check_writebuffer_bugs();
check_other_bugs();
}
...@@ -242,9 +242,7 @@ local_restart: ...@@ -242,9 +242,7 @@ local_restart:
tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls? tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
bne __sys_trace bne __sys_trace
cmp scno, #NR_syscalls @ check upper syscall limit invoke_syscall tbl, scno, r10, ret_fast_syscall
badr lr, ret_fast_syscall @ return address
ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
add r1, sp, #S_OFF add r1, sp, #S_OFF
2: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE) 2: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
...@@ -278,14 +276,8 @@ __sys_trace: ...@@ -278,14 +276,8 @@ __sys_trace:
mov r1, scno mov r1, scno
add r0, sp, #S_OFF add r0, sp, #S_OFF
bl syscall_trace_enter bl syscall_trace_enter
mov scno, r0
badr lr, __sys_trace_return @ return address invoke_syscall tbl, scno, r10, __sys_trace_return, reload=1
mov scno, r0 @ syscall number (possibly new)
add r1, sp, #S_R0 + S_OFF @ pointer to regs
cmp scno, #NR_syscalls @ check upper syscall limit
ldmccia r1, {r0 - r6} @ have to reload r0 - r6
stmccia sp, {r4, r5} @ and update the stack args
ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
cmp scno, #-1 @ skip the syscall? cmp scno, #-1 @ skip the syscall?
bne 2b bne 2b
add sp, sp, #S_OFF @ restore stack add sp, sp, #S_OFF @ restore stack
...@@ -363,6 +355,10 @@ sys_syscall: ...@@ -363,6 +355,10 @@ sys_syscall:
bic scno, r0, #__NR_OABI_SYSCALL_BASE bic scno, r0, #__NR_OABI_SYSCALL_BASE
cmp scno, #__NR_syscall - __NR_SYSCALL_BASE cmp scno, #__NR_syscall - __NR_SYSCALL_BASE
cmpne scno, #NR_syscalls @ check range cmpne scno, #NR_syscalls @ check range
#ifdef CONFIG_CPU_SPECTRE
movhs scno, #0
csdb
#endif
stmloia sp, {r5, r6} @ shuffle args stmloia sp, {r5, r6} @ shuffle args
movlo r0, r1 movlo r0, r1
movlo r1, r2 movlo r1, r2
......
...@@ -378,6 +378,31 @@ ...@@ -378,6 +378,31 @@
#endif #endif
.endm .endm
.macro invoke_syscall, table, nr, tmp, ret, reload=0
#ifdef CONFIG_CPU_SPECTRE
mov \tmp, \nr
cmp \tmp, #NR_syscalls @ check upper syscall limit
movcs \tmp, #0
csdb
badr lr, \ret @ return address
.if \reload
add r1, sp, #S_R0 + S_OFF @ pointer to regs
ldmccia r1, {r0 - r6} @ reload r0-r6
stmccia sp, {r4, r5} @ update stack arguments
.endif
ldrcc pc, [\table, \tmp, lsl #2] @ call sys_* routine
#else
cmp \nr, #NR_syscalls @ check upper syscall limit
badr lr, \ret @ return address
.if \reload
add r1, sp, #S_R0 + S_OFF @ pointer to regs
ldmccia r1, {r0 - r6} @ reload r0-r6
stmccia sp, {r4, r5} @ update stack arguments
.endif
ldrcc pc, [\table, \nr, lsl #2] @ call sys_* routine
#endif
.endm
/* /*
* These are the registers used in the syscall handler, and allow us to * These are the registers used in the syscall handler, and allow us to
* have in theory up to 7 arguments to a function - r0 to r6. * have in theory up to 7 arguments to a function - r0 to r6.
......
This diff is collapsed.
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <linux/irq_work.h> #include <linux/irq_work.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <asm/bugs.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/cpu.h> #include <asm/cpu.h>
...@@ -236,8 +237,6 @@ int __cpu_disable(void) ...@@ -236,8 +237,6 @@ int __cpu_disable(void)
flush_cache_louis(); flush_cache_louis();
local_flush_tlb_all(); local_flush_tlb_all();
clear_tasks_mm_cpumask(cpu);
return 0; return 0;
} }
...@@ -255,6 +254,7 @@ void __cpu_die(unsigned int cpu) ...@@ -255,6 +254,7 @@ void __cpu_die(unsigned int cpu)
} }
pr_debug("CPU%u: shutdown\n", cpu); pr_debug("CPU%u: shutdown\n", cpu);
clear_tasks_mm_cpumask(cpu);
/* /*
* platform_cpu_kill() is generally expected to do the powering off * platform_cpu_kill() is generally expected to do the powering off
* and/or cutting of clocks to the dying CPU. Optionally, this may * and/or cutting of clocks to the dying CPU. Optionally, this may
...@@ -405,6 +405,9 @@ asmlinkage void secondary_start_kernel(void) ...@@ -405,6 +405,9 @@ asmlinkage void secondary_start_kernel(void)
* before we continue - which happens after __cpu_up returns. * before we continue - which happens after __cpu_up returns.
*/ */
set_cpu_online(cpu, true); set_cpu_online(cpu, true);
check_other_bugs();
complete(&cpu_running); complete(&cpu_running);
local_irq_enable(); local_irq_enable();
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/mm_types.h> #include <linux/mm_types.h>
#include <asm/bugs.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/idmap.h> #include <asm/idmap.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
...@@ -36,6 +37,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) ...@@ -36,6 +37,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
cpu_switch_mm(mm->pgd, mm); cpu_switch_mm(mm->pgd, mm);
local_flush_bp_all(); local_flush_bp_all();
local_flush_tlb_all(); local_flush_tlb_all();
check_other_bugs();
} }
return ret; return ret;
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/memory.h> #include <asm/memory.h>
#include <asm/mpu.h>
#include <asm/page.h> #include <asm/page.h>
#include "vmlinux.lds.h" #include "vmlinux.lds.h"
...@@ -148,6 +149,9 @@ SECTIONS ...@@ -148,6 +149,9 @@ SECTIONS
__init_end = .; __init_end = .;
BSS_SECTION(0, 0, 8) BSS_SECTION(0, 0, 8)
#ifdef CONFIG_ARM_MPU
. = ALIGN(PMSAv8_MINALIGN);
#endif
_end = .; _end = .;
STABS_DEBUG STABS_DEBUG
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/memory.h> #include <asm/memory.h>
#include <asm/mpu.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
...@@ -54,6 +55,9 @@ SECTIONS ...@@ -54,6 +55,9 @@ SECTIONS
. = ALIGN(1<<SECTION_SHIFT); . = ALIGN(1<<SECTION_SHIFT);
#endif #endif
#ifdef CONFIG_ARM_MPU
. = ALIGN(PMSAv8_MINALIGN);
#endif
.text : { /* Real text segment */ .text : { /* Real text segment */
_stext = .; /* Text and read-only data */ _stext = .; /* Text and read-only data */
ARM_TEXT ARM_TEXT
...@@ -143,6 +147,9 @@ SECTIONS ...@@ -143,6 +147,9 @@ SECTIONS
_edata = .; _edata = .;
BSS_SECTION(0, 0, 0) BSS_SECTION(0, 0, 0)
#ifdef CONFIG_ARM_MPU
. = ALIGN(PMSAv8_MINALIGN);
#endif
_end = .; _end = .;
STABS_DEBUG STABS_DEBUG
......
...@@ -27,24 +27,24 @@ ...@@ -27,24 +27,24 @@
#define PROC_INFO \ #define PROC_INFO \
. = ALIGN(4); \ . = ALIGN(4); \
VMLINUX_SYMBOL(__proc_info_begin) = .; \ __proc_info_begin = .; \
*(.proc.info.init) \ *(.proc.info.init) \
VMLINUX_SYMBOL(__proc_info_end) = .; __proc_info_end = .;
#define HYPERVISOR_TEXT \ #define HYPERVISOR_TEXT \
VMLINUX_SYMBOL(__hyp_text_start) = .; \ __hyp_text_start = .; \
*(.hyp.text) \ *(.hyp.text) \
VMLINUX_SYMBOL(__hyp_text_end) = .; __hyp_text_end = .;
#define IDMAP_TEXT \ #define IDMAP_TEXT \
ALIGN_FUNCTION(); \ ALIGN_FUNCTION(); \
VMLINUX_SYMBOL(__idmap_text_start) = .; \ __idmap_text_start = .; \
*(.idmap.text) \ *(.idmap.text) \
VMLINUX_SYMBOL(__idmap_text_end) = .; \ __idmap_text_end = .; \
. = ALIGN(PAGE_SIZE); \ . = ALIGN(PAGE_SIZE); \
VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \ __hyp_idmap_text_start = .; \
*(.hyp.idmap.text) \ *(.hyp.idmap.text) \
VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; __hyp_idmap_text_end = .;
#define ARM_DISCARD \ #define ARM_DISCARD \
*(.ARM.exidx.exit.text) \ *(.ARM.exidx.exit.text) \
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/ */
#include <linux/arm-smccc.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/kvm_arm.h> #include <asm/kvm_arm.h>
#include <asm/kvm_asm.h> #include <asm/kvm_asm.h>
...@@ -71,6 +72,90 @@ __kvm_hyp_vector: ...@@ -71,6 +72,90 @@ __kvm_hyp_vector:
W(b) hyp_irq W(b) hyp_irq
W(b) hyp_fiq W(b) hyp_fiq
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
.align 5
__kvm_hyp_vector_ic_inv:
.global __kvm_hyp_vector_ic_inv
/*
* We encode the exception entry in the bottom 3 bits of
* SP, and we have to guarantee to be 8 bytes aligned.
*/
W(add) sp, sp, #1 /* Reset 7 */
W(add) sp, sp, #1 /* Undef 6 */
W(add) sp, sp, #1 /* Syscall 5 */
W(add) sp, sp, #1 /* Prefetch abort 4 */
W(add) sp, sp, #1 /* Data abort 3 */
W(add) sp, sp, #1 /* HVC 2 */
W(add) sp, sp, #1 /* IRQ 1 */
W(nop) /* FIQ 0 */
mcr p15, 0, r0, c7, c5, 0 /* ICIALLU */
isb
b decode_vectors
.align 5
__kvm_hyp_vector_bp_inv:
.global __kvm_hyp_vector_bp_inv
/*
* We encode the exception entry in the bottom 3 bits of
* SP, and we have to guarantee to be 8 bytes aligned.
*/
W(add) sp, sp, #1 /* Reset 7 */
W(add) sp, sp, #1 /* Undef 6 */
W(add) sp, sp, #1 /* Syscall 5 */
W(add) sp, sp, #1 /* Prefetch abort 4 */
W(add) sp, sp, #1 /* Data abort 3 */
W(add) sp, sp, #1 /* HVC 2 */
W(add) sp, sp, #1 /* IRQ 1 */
W(nop) /* FIQ 0 */
mcr p15, 0, r0, c7, c5, 6 /* BPIALL */
isb
decode_vectors:
#ifdef CONFIG_THUMB2_KERNEL
/*
* Yet another silly hack: Use VPIDR as a temp register.
* Thumb2 is really a pain, as SP cannot be used with most
* of the bitwise instructions. The vect_br macro ensures
* things gets cleaned-up.
*/
mcr p15, 4, r0, c0, c0, 0 /* VPIDR */
mov r0, sp
and r0, r0, #7
sub sp, sp, r0
push {r1, r2}
mov r1, r0
mrc p15, 4, r0, c0, c0, 0 /* VPIDR */
mrc p15, 0, r2, c0, c0, 0 /* MIDR */
mcr p15, 4, r2, c0, c0, 0 /* VPIDR */
#endif
.macro vect_br val, targ
ARM( eor sp, sp, #\val )
ARM( tst sp, #7 )
ARM( eorne sp, sp, #\val )
THUMB( cmp r1, #\val )
THUMB( popeq {r1, r2} )
beq \targ
.endm
vect_br 0, hyp_fiq
vect_br 1, hyp_irq
vect_br 2, hyp_hvc
vect_br 3, hyp_dabt
vect_br 4, hyp_pabt
vect_br 5, hyp_svc
vect_br 6, hyp_undef
vect_br 7, hyp_reset
#endif
.macro invalid_vector label, cause .macro invalid_vector label, cause
.align .align
\label: mov r0, #\cause \label: mov r0, #\cause
...@@ -118,7 +203,7 @@ hyp_hvc: ...@@ -118,7 +203,7 @@ hyp_hvc:
lsr r2, r2, #16 lsr r2, r2, #16
and r2, r2, #0xff and r2, r2, #0xff
cmp r2, #0 cmp r2, #0
bne guest_trap @ Guest called HVC bne guest_hvc_trap @ Guest called HVC
/* /*
* Getting here means host called HVC, we shift parameters and branch * Getting here means host called HVC, we shift parameters and branch
...@@ -149,7 +234,14 @@ hyp_hvc: ...@@ -149,7 +234,14 @@ hyp_hvc:
bx ip bx ip
1: 1:
push {lr} /*
* Pushing r2 here is just a way of keeping the stack aligned to
* 8 bytes on any path that can trigger a HYP exception. Here,
* we may well be about to jump into the guest, and the guest
* exit would otherwise be badly decoded by our fancy
* "decode-exception-without-a-branch" code...
*/
push {r2, lr}
mov lr, r0 mov lr, r0
mov r0, r1 mov r0, r1
...@@ -159,7 +251,21 @@ hyp_hvc: ...@@ -159,7 +251,21 @@ hyp_hvc:
THUMB( orr lr, #1) THUMB( orr lr, #1)
blx lr @ Call the HYP function blx lr @ Call the HYP function
pop {lr} pop {r2, lr}
eret
guest_hvc_trap:
movw r2, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
movt r2, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
ldr r0, [sp] @ Guest's r0
teq r0, r2
bne guest_trap
add sp, sp, #12
@ Returns:
@ r0 = 0
@ r1 = HSR value (perfectly predictable)
@ r2 = ARM_SMCCC_ARCH_WORKAROUND_1
mov r0, #0
eret eret
guest_trap: guest_trap:
......
...@@ -415,6 +415,7 @@ config CPU_V7 ...@@ -415,6 +415,7 @@ config CPU_V7
select CPU_CP15_MPU if !MMU select CPU_CP15_MPU if !MMU
select CPU_HAS_ASID if MMU select CPU_HAS_ASID if MMU
select CPU_PABRT_V7 select CPU_PABRT_V7
select CPU_SPECTRE if MMU
select CPU_THUMB_CAPABLE select CPU_THUMB_CAPABLE
select CPU_TLB_V7 if MMU select CPU_TLB_V7 if MMU
...@@ -826,6 +827,28 @@ config CPU_BPREDICT_DISABLE ...@@ -826,6 +827,28 @@ config CPU_BPREDICT_DISABLE
help help
Say Y here to disable branch prediction. If unsure, say N. Say Y here to disable branch prediction. If unsure, say N.
config CPU_SPECTRE
bool
config HARDEN_BRANCH_PREDICTOR
bool "Harden the branch predictor against aliasing attacks" if EXPERT
depends on CPU_SPECTRE
default y
help
Speculation attacks against some high-performance processors rely
on being able to manipulate the branch predictor for a victim
context by executing aliasing branches in the attacker context.
Such attacks can be partially mitigated against by clearing
internal branch predictor state and limiting the prediction
logic in some situations.
This config option will take CPU-specific actions to harden
the branch predictor against aliasing attacks and may rely on
specific instruction sequences or control bits being set by
the system firmware.
If unsure, say Y.
config TLS_REG_EMUL config TLS_REG_EMUL
bool bool
select NEED_KUSER_HELPERS select NEED_KUSER_HELPERS
......
...@@ -10,7 +10,7 @@ obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \ ...@@ -10,7 +10,7 @@ obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \
ifneq ($(CONFIG_MMU),y) ifneq ($(CONFIG_MMU),y)
obj-y += nommu.o obj-y += nommu.o
obj-$(CONFIG_ARM_MPU) += pmsa-v7.o obj-$(CONFIG_ARM_MPU) += pmsa-v7.o pmsa-v8.o
endif endif
obj-$(CONFIG_ARM_PTDUMP_CORE) += dump.o obj-$(CONFIG_ARM_PTDUMP_CORE) += dump.o
...@@ -97,7 +97,7 @@ obj-$(CONFIG_CPU_MOHAWK) += proc-mohawk.o ...@@ -97,7 +97,7 @@ obj-$(CONFIG_CPU_MOHAWK) += proc-mohawk.o
obj-$(CONFIG_CPU_FEROCEON) += proc-feroceon.o obj-$(CONFIG_CPU_FEROCEON) += proc-feroceon.o
obj-$(CONFIG_CPU_V6) += proc-v6.o obj-$(CONFIG_CPU_V6) += proc-v6.o
obj-$(CONFIG_CPU_V6K) += proc-v6.o obj-$(CONFIG_CPU_V6K) += proc-v6.o
obj-$(CONFIG_CPU_V7) += proc-v7.o obj-$(CONFIG_CPU_V7) += proc-v7.o proc-v7-bugs.o
obj-$(CONFIG_CPU_V7M) += proc-v7m.o obj-$(CONFIG_CPU_V7M) += proc-v7m.o
AFLAGS_proc-v6.o :=-Wa,-march=armv6 AFLAGS_proc-v6.o :=-Wa,-march=armv6
......
...@@ -845,7 +845,7 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, ...@@ -845,7 +845,7 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
unsigned long attrs) unsigned long attrs)
{ {
int ret; int ret;
unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; unsigned long nr_vma_pages = vma_pages(vma);
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long pfn = dma_to_pfn(dev, dma_addr); unsigned long pfn = dma_to_pfn(dev, dma_addr);
unsigned long off = vma->vm_pgoff; unsigned long off = vma->vm_pgoff;
......
...@@ -163,6 +163,9 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr, ...@@ -163,6 +163,9 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
{ {
struct siginfo si; struct siginfo si;
if (addr > TASK_SIZE)
harden_branch_predictor();
#ifdef CONFIG_DEBUG_USER #ifdef CONFIG_DEBUG_USER
if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) || if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) ||
((user_debug & UDBG_BUS) && (sig == SIGBUS))) { ((user_debug & UDBG_BUS) && (sig == SIGBUS))) {
......
...@@ -99,6 +99,38 @@ void __init arm_mm_memblock_reserve(void) ...@@ -99,6 +99,38 @@ void __init arm_mm_memblock_reserve(void)
memblock_reserve(0, 1); memblock_reserve(0, 1);
} }
static void __init adjust_lowmem_bounds_mpu(void)
{
unsigned long pmsa = read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA;
switch (pmsa) {
case MMFR0_PMSAv7:
pmsav7_adjust_lowmem_bounds();
break;
case MMFR0_PMSAv8:
pmsav8_adjust_lowmem_bounds();
break;
default:
break;
}
}
static void __init mpu_setup(void)
{
unsigned long pmsa = read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA;
switch (pmsa) {
case MMFR0_PMSAv7:
pmsav7_setup();
break;
case MMFR0_PMSAv8:
pmsav8_setup();
break;
default:
break;
}
}
void __init adjust_lowmem_bounds(void) void __init adjust_lowmem_bounds(void)
{ {
phys_addr_t end; phys_addr_t end;
......
...@@ -102,7 +102,7 @@ static inline u32 irbar_read(void) ...@@ -102,7 +102,7 @@ static inline u32 irbar_read(void)
static inline void rgnr_write(u32 v) static inline void rgnr_write(u32 v)
{ {
writel_relaxed(v, BASEADDR_V7M_SCB + MPU_RNR); writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv7_RNR);
} }
/* Data-side / unified region attributes */ /* Data-side / unified region attributes */
...@@ -110,28 +110,28 @@ static inline void rgnr_write(u32 v) ...@@ -110,28 +110,28 @@ static inline void rgnr_write(u32 v)
/* Region access control register */ /* Region access control register */
static inline void dracr_write(u32 v) static inline void dracr_write(u32 v)
{ {
u32 rsr = readl_relaxed(BASEADDR_V7M_SCB + MPU_RASR) & GENMASK(15, 0); u32 rsr = readl_relaxed(BASEADDR_V7M_SCB + PMSAv7_RASR) & GENMASK(15, 0);
writel_relaxed((v << 16) | rsr, BASEADDR_V7M_SCB + MPU_RASR); writel_relaxed((v << 16) | rsr, BASEADDR_V7M_SCB + PMSAv7_RASR);
} }
/* Region size register */ /* Region size register */
static inline void drsr_write(u32 v) static inline void drsr_write(u32 v)
{ {
u32 racr = readl_relaxed(BASEADDR_V7M_SCB + MPU_RASR) & GENMASK(31, 16); u32 racr = readl_relaxed(BASEADDR_V7M_SCB + PMSAv7_RASR) & GENMASK(31, 16);
writel_relaxed(v | racr, BASEADDR_V7M_SCB + MPU_RASR); writel_relaxed(v | racr, BASEADDR_V7M_SCB + PMSAv7_RASR);
} }
/* Region base address register */ /* Region base address register */
static inline void drbar_write(u32 v) static inline void drbar_write(u32 v)
{ {
writel_relaxed(v, BASEADDR_V7M_SCB + MPU_RBAR); writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv7_RBAR);
} }
static inline u32 drbar_read(void) static inline u32 drbar_read(void)
{ {
return readl_relaxed(BASEADDR_V7M_SCB + MPU_RBAR); return readl_relaxed(BASEADDR_V7M_SCB + PMSAv7_RBAR);
} }
/* ARMv7-M only supports a unified MPU, so I-side operations are nop */ /* ARMv7-M only supports a unified MPU, so I-side operations are nop */
...@@ -143,11 +143,6 @@ static inline unsigned long irbar_read(void) {return 0;} ...@@ -143,11 +143,6 @@ static inline unsigned long irbar_read(void) {return 0;}
#endif #endif
static int __init mpu_present(void)
{
return ((read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA) == MMFR0_PMSAv7);
}
static bool __init try_split_region(phys_addr_t base, phys_addr_t size, struct region *region) static bool __init try_split_region(phys_addr_t base, phys_addr_t size, struct region *region)
{ {
unsigned long subreg, bslots, sslots; unsigned long subreg, bslots, sslots;
...@@ -161,7 +156,7 @@ static bool __init try_split_region(phys_addr_t base, phys_addr_t size, struct r ...@@ -161,7 +156,7 @@ static bool __init try_split_region(phys_addr_t base, phys_addr_t size, struct r
bdiff = base - abase; bdiff = base - abase;
sdiff = p2size - asize; sdiff = p2size - asize;
subreg = p2size / MPU_NR_SUBREGS; subreg = p2size / PMSAv7_NR_SUBREGS;
if ((bdiff % subreg) || (sdiff % subreg)) if ((bdiff % subreg) || (sdiff % subreg))
return false; return false;
...@@ -172,17 +167,17 @@ static bool __init try_split_region(phys_addr_t base, phys_addr_t size, struct r ...@@ -172,17 +167,17 @@ static bool __init try_split_region(phys_addr_t base, phys_addr_t size, struct r
if (bslots || sslots) { if (bslots || sslots) {
int i; int i;
if (subreg < MPU_MIN_SUBREG_SIZE) if (subreg < PMSAv7_MIN_SUBREG_SIZE)
return false; return false;
if (bslots + sslots > MPU_NR_SUBREGS) if (bslots + sslots > PMSAv7_NR_SUBREGS)
return false; return false;
for (i = 0; i < bslots; i++) for (i = 0; i < bslots; i++)
_set_bit(i, &region->subreg); _set_bit(i, &region->subreg);
for (i = 1; i <= sslots; i++) for (i = 1; i <= sslots; i++)
_set_bit(MPU_NR_SUBREGS - i, &region->subreg); _set_bit(PMSAv7_NR_SUBREGS - i, &region->subreg);
} }
region->base = abase; region->base = abase;
...@@ -233,7 +228,7 @@ static int __init allocate_region(phys_addr_t base, phys_addr_t size, ...@@ -233,7 +228,7 @@ static int __init allocate_region(phys_addr_t base, phys_addr_t size,
} }
/* MPU initialisation functions */ /* MPU initialisation functions */
void __init adjust_lowmem_bounds_mpu(void) void __init pmsav7_adjust_lowmem_bounds(void)
{ {
phys_addr_t specified_mem_size = 0, total_mem_size = 0; phys_addr_t specified_mem_size = 0, total_mem_size = 0;
struct memblock_region *reg; struct memblock_region *reg;
...@@ -243,10 +238,7 @@ void __init adjust_lowmem_bounds_mpu(void) ...@@ -243,10 +238,7 @@ void __init adjust_lowmem_bounds_mpu(void)
unsigned int mem_max_regions; unsigned int mem_max_regions;
int num, i; int num, i;
if (!mpu_present()) /* Free-up PMSAv7_PROBE_REGION */
return;
/* Free-up MPU_PROBE_REGION */
mpu_min_region_order = __mpu_min_region_order(); mpu_min_region_order = __mpu_min_region_order();
/* How many regions are supported */ /* How many regions are supported */
...@@ -301,12 +293,12 @@ void __init adjust_lowmem_bounds_mpu(void) ...@@ -301,12 +293,12 @@ void __init adjust_lowmem_bounds_mpu(void)
num = allocate_region(mem_start, specified_mem_size, mem_max_regions, mem); num = allocate_region(mem_start, specified_mem_size, mem_max_regions, mem);
for (i = 0; i < num; i++) { for (i = 0; i < num; i++) {
unsigned long subreg = mem[i].size / MPU_NR_SUBREGS; unsigned long subreg = mem[i].size / PMSAv7_NR_SUBREGS;
total_mem_size += mem[i].size - subreg * hweight_long(mem[i].subreg); total_mem_size += mem[i].size - subreg * hweight_long(mem[i].subreg);
pr_debug("MPU: base %pa size %pa disable subregions: %*pbl\n", pr_debug("MPU: base %pa size %pa disable subregions: %*pbl\n",
&mem[i].base, &mem[i].size, MPU_NR_SUBREGS, &mem[i].subreg); &mem[i].base, &mem[i].size, PMSAv7_NR_SUBREGS, &mem[i].subreg);
} }
if (total_mem_size != specified_mem_size) { if (total_mem_size != specified_mem_size) {
...@@ -349,7 +341,7 @@ static int __init __mpu_min_region_order(void) ...@@ -349,7 +341,7 @@ static int __init __mpu_min_region_order(void)
u32 drbar_result, irbar_result; u32 drbar_result, irbar_result;
/* We've kept a region free for this probing */ /* We've kept a region free for this probing */
rgnr_write(MPU_PROBE_REGION); rgnr_write(PMSAv7_PROBE_REGION);
isb(); isb();
/* /*
* As per ARM ARM, write 0xFFFFFFFC to DRBAR to find the minimum * As per ARM ARM, write 0xFFFFFFFC to DRBAR to find the minimum
...@@ -388,8 +380,8 @@ static int __init mpu_setup_region(unsigned int number, phys_addr_t start, ...@@ -388,8 +380,8 @@ static int __init mpu_setup_region(unsigned int number, phys_addr_t start,
return -ENOMEM; return -ENOMEM;
/* Writing N to bits 5:1 (RSR_SZ) specifies region size 2^N+1 */ /* Writing N to bits 5:1 (RSR_SZ) specifies region size 2^N+1 */
size_data = ((size_order - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN; size_data = ((size_order - 1) << PMSAv7_RSR_SZ) | 1 << PMSAv7_RSR_EN;
size_data |= subregions << MPU_RSR_SD; size_data |= subregions << PMSAv7_RSR_SD;
if (need_flush) if (need_flush)
flush_cache_all(); flush_cache_all();
...@@ -424,18 +416,15 @@ static int __init mpu_setup_region(unsigned int number, phys_addr_t start, ...@@ -424,18 +416,15 @@ static int __init mpu_setup_region(unsigned int number, phys_addr_t start,
/* /*
* Set up default MPU regions, doing nothing if there is no MPU * Set up default MPU regions, doing nothing if there is no MPU
*/ */
void __init mpu_setup(void) void __init pmsav7_setup(void)
{ {
int i, region = 0, err = 0; int i, region = 0, err = 0;
if (!mpu_present())
return;
/* Setup MPU (order is important) */ /* Setup MPU (order is important) */
/* Background */ /* Background */
err |= mpu_setup_region(region++, 0, 32, err |= mpu_setup_region(region++, 0, 32,
MPU_ACR_XN | MPU_RGN_STRONGLY_ORDERED | MPU_AP_PL1RW_PL0RW, PMSAv7_ACR_XN | PMSAv7_RGN_STRONGLY_ORDERED | PMSAv7_AP_PL1RW_PL0RW,
0, false); 0, false);
#ifdef CONFIG_XIP_KERNEL #ifdef CONFIG_XIP_KERNEL
...@@ -448,13 +437,13 @@ void __init mpu_setup(void) ...@@ -448,13 +437,13 @@ void __init mpu_setup(void)
* with BG region (which is uncachable), thus we need * with BG region (which is uncachable), thus we need
* to clean and invalidate cache. * to clean and invalidate cache.
*/ */
bool need_flush = region == MPU_RAM_REGION; bool need_flush = region == PMSAv7_RAM_REGION;
if (!xip[i].size) if (!xip[i].size)
continue; continue;
err |= mpu_setup_region(region++, xip[i].base, ilog2(xip[i].size), err |= mpu_setup_region(region++, xip[i].base, ilog2(xip[i].size),
MPU_AP_PL1RO_PL0NA | MPU_RGN_NORMAL, PMSAv7_AP_PL1RO_PL0NA | PMSAv7_RGN_NORMAL,
xip[i].subreg, need_flush); xip[i].subreg, need_flush);
} }
#endif #endif
...@@ -465,14 +454,14 @@ void __init mpu_setup(void) ...@@ -465,14 +454,14 @@ void __init mpu_setup(void)
continue; continue;
err |= mpu_setup_region(region++, mem[i].base, ilog2(mem[i].size), err |= mpu_setup_region(region++, mem[i].base, ilog2(mem[i].size),
MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL, PMSAv7_AP_PL1RW_PL0RW | PMSAv7_RGN_NORMAL,
mem[i].subreg, false); mem[i].subreg, false);
} }
/* Vectors */ /* Vectors */
#ifndef CONFIG_CPU_V7M #ifndef CONFIG_CPU_V7M
err |= mpu_setup_region(region++, vectors_base, ilog2(2 * PAGE_SIZE), err |= mpu_setup_region(region++, vectors_base, ilog2(2 * PAGE_SIZE),
MPU_AP_PL1RW_PL0NA | MPU_RGN_NORMAL, PMSAv7_AP_PL1RW_PL0NA | PMSAv7_RGN_NORMAL,
0, false); 0, false);
#endif #endif
if (err) { if (err) {
......
/*
* Based on linux/arch/arm/pmsa-v7.c
*
* ARM PMSAv8 supporting functions.
*/
#include <linux/memblock.h>
#include <linux/range.h>
#include <asm/cp15.h>
#include <asm/cputype.h>
#include <asm/mpu.h>
#include <asm/memory.h>
#include <asm/sections.h>
#include "mm.h"
#ifndef CONFIG_CPU_V7M
#define PRSEL __ACCESS_CP15(c6, 0, c2, 1)
#define PRBAR __ACCESS_CP15(c6, 0, c3, 0)
#define PRLAR __ACCESS_CP15(c6, 0, c3, 1)
static inline u32 prlar_read(void)
{
return read_sysreg(PRLAR);
}
static inline u32 prbar_read(void)
{
return read_sysreg(PRBAR);
}
static inline void prsel_write(u32 v)
{
write_sysreg(v, PRSEL);
}
static inline void prbar_write(u32 v)
{
write_sysreg(v, PRBAR);
}
static inline void prlar_write(u32 v)
{
write_sysreg(v, PRLAR);
}
#else
static inline u32 prlar_read(void)
{
return readl_relaxed(BASEADDR_V7M_SCB + PMSAv8_RLAR);
}
static inline u32 prbar_read(void)
{
return readl_relaxed(BASEADDR_V7M_SCB + PMSAv8_RBAR);
}
static inline void prsel_write(u32 v)
{
writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv8_RNR);
}
static inline void prbar_write(u32 v)
{
writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv8_RBAR);
}
static inline void prlar_write(u32 v)
{
writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv8_RLAR);
}
#endif
static struct range __initdata io[MPU_MAX_REGIONS];
static struct range __initdata mem[MPU_MAX_REGIONS];
static unsigned int __initdata mpu_max_regions;
static __init bool is_region_fixed(int number)
{
switch (number) {
case PMSAv8_XIP_REGION:
case PMSAv8_KERNEL_REGION:
return true;
default:
return false;
}
}
void __init pmsav8_adjust_lowmem_bounds(void)
{
phys_addr_t mem_end;
struct memblock_region *reg;
bool first = true;
for_each_memblock(memory, reg) {
if (first) {
phys_addr_t phys_offset = PHYS_OFFSET;
/*
* Initially only use memory continuous from
* PHYS_OFFSET */
if (reg->base != phys_offset)
panic("First memory bank must be contiguous from PHYS_OFFSET");
mem_end = reg->base + reg->size;
first = false;
} else {
/*
* memblock auto merges contiguous blocks, remove
* all blocks afterwards in one go (we can't remove
* blocks separately while iterating)
*/
pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n",
&mem_end, &reg->base);
memblock_remove(reg->base, 0 - reg->base);
break;
}
}
}
static int __init __mpu_max_regions(void)
{
static int max_regions;
u32 mpuir;
if (max_regions)
return max_regions;
mpuir = read_cpuid_mputype();
max_regions = (mpuir & MPUIR_DREGION_SZMASK) >> MPUIR_DREGION;
return max_regions;
}
static int __init __pmsav8_setup_region(unsigned int number, u32 bar, u32 lar)
{
if (number > mpu_max_regions
|| number >= MPU_MAX_REGIONS)
return -ENOENT;
dsb();
prsel_write(number);
isb();
prbar_write(bar);
prlar_write(lar);
mpu_rgn_info.rgns[number].prbar = bar;
mpu_rgn_info.rgns[number].prlar = lar;
mpu_rgn_info.used++;
return 0;
}
static int __init pmsav8_setup_ram(unsigned int number, phys_addr_t start,phys_addr_t end)
{
u32 bar, lar;
if (is_region_fixed(number))
return -EINVAL;
bar = start;
lar = (end - 1) & ~(PMSAv8_MINALIGN - 1);;
bar |= PMSAv8_AP_PL1RW_PL0RW | PMSAv8_RGN_SHARED;
lar |= PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN;
return __pmsav8_setup_region(number, bar, lar);
}
static int __init pmsav8_setup_io(unsigned int number, phys_addr_t start,phys_addr_t end)
{
u32 bar, lar;
if (is_region_fixed(number))
return -EINVAL;
bar = start;
lar = (end - 1) & ~(PMSAv8_MINALIGN - 1);;
bar |= PMSAv8_AP_PL1RW_PL0RW | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN;
lar |= PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN;
return __pmsav8_setup_region(number, bar, lar);
}
static int __init pmsav8_setup_fixed(unsigned int number, phys_addr_t start,phys_addr_t end)
{
u32 bar, lar;
if (!is_region_fixed(number))
return -EINVAL;
bar = start;
lar = (end - 1) & ~(PMSAv8_MINALIGN - 1);
bar |= PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED;
lar |= PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN;
prsel_write(number);
isb();
if (prbar_read() != bar || prlar_read() != lar)
return -EINVAL;
/* Reserved region was set up early, we just need a record for secondaries */
mpu_rgn_info.rgns[number].prbar = bar;
mpu_rgn_info.rgns[number].prlar = lar;
mpu_rgn_info.used++;
return 0;
}
#ifndef CONFIG_CPU_V7M
static int __init pmsav8_setup_vector(unsigned int number, phys_addr_t start,phys_addr_t end)
{
u32 bar, lar;
if (number == PMSAv8_KERNEL_REGION)
return -EINVAL;
bar = start;
lar = (end - 1) & ~(PMSAv8_MINALIGN - 1);
bar |= PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED;
lar |= PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN;
return __pmsav8_setup_region(number, bar, lar);
}
#endif
void __init pmsav8_setup(void)
{
int i, err = 0;
int region = PMSAv8_KERNEL_REGION;
/* How many regions are supported ? */
mpu_max_regions = __mpu_max_regions();
/* RAM: single chunk of memory */
add_range(mem, ARRAY_SIZE(mem), 0, memblock.memory.regions[0].base,
memblock.memory.regions[0].base + memblock.memory.regions[0].size);
/* IO: cover full 4G range */
add_range(io, ARRAY_SIZE(io), 0, 0, 0xffffffff);
/* RAM and IO: exclude kernel */
subtract_range(mem, ARRAY_SIZE(mem), __pa(KERNEL_START), __pa(KERNEL_END));
subtract_range(io, ARRAY_SIZE(io), __pa(KERNEL_START), __pa(KERNEL_END));
#ifdef CONFIG_XIP_KERNEL
/* RAM and IO: exclude xip */
subtract_range(mem, ARRAY_SIZE(mem), CONFIG_XIP_PHYS_ADDR, __pa(_exiprom));
subtract_range(io, ARRAY_SIZE(io), CONFIG_XIP_PHYS_ADDR, __pa(_exiprom));
#endif
#ifndef CONFIG_CPU_V7M
/* RAM and IO: exclude vectors */
subtract_range(mem, ARRAY_SIZE(mem), vectors_base, vectors_base + 2 * PAGE_SIZE);
subtract_range(io, ARRAY_SIZE(io), vectors_base, vectors_base + 2 * PAGE_SIZE);
#endif
/* IO: exclude RAM */
for (i = 0; i < ARRAY_SIZE(mem); i++)
subtract_range(io, ARRAY_SIZE(io), mem[i].start, mem[i].end);
/* Now program MPU */
#ifdef CONFIG_XIP_KERNEL
/* ROM */
err |= pmsav8_setup_fixed(PMSAv8_XIP_REGION, CONFIG_XIP_PHYS_ADDR, __pa(_exiprom));
#endif
/* Kernel */
err |= pmsav8_setup_fixed(region++, __pa(KERNEL_START), __pa(KERNEL_END));
/* IO */
for (i = 0; i < ARRAY_SIZE(io); i++) {
if (!io[i].end)
continue;
err |= pmsav8_setup_io(region++, io[i].start, io[i].end);
}
/* RAM */
for (i = 0; i < ARRAY_SIZE(mem); i++) {
if (!mem[i].end)
continue;
err |= pmsav8_setup_ram(region++, mem[i].start, mem[i].end);
}
/* Vectors */
#ifndef CONFIG_CPU_V7M
err |= pmsav8_setup_vector(region++, vectors_base, vectors_base + 2 * PAGE_SIZE);
#endif
if (err)
pr_warn("MPU region initialization failure! %d", err);
else
pr_info("Using ARM PMSAv8 Compliant MPU. Used %d of %d regions\n",
mpu_rgn_info.used, mpu_max_regions);
}
...@@ -273,13 +273,14 @@ ...@@ -273,13 +273,14 @@
mcr p15, 0, ip, c7, c10, 4 @ data write barrier mcr p15, 0, ip, c7, c10, 4 @ data write barrier
.endm .endm
.macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0 .macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0, bugs=0
.type \name\()_processor_functions, #object .type \name\()_processor_functions, #object
.align 2 .align 2
ENTRY(\name\()_processor_functions) ENTRY(\name\()_processor_functions)
.word \dabort .word \dabort
.word \pabort .word \pabort
.word cpu_\name\()_proc_init .word cpu_\name\()_proc_init
.word \bugs
.word cpu_\name\()_proc_fin .word cpu_\name\()_proc_fin
.word cpu_\name\()_reset .word cpu_\name\()_reset
.word cpu_\name\()_do_idle .word cpu_\name\()_do_idle
......
...@@ -41,11 +41,6 @@ ...@@ -41,11 +41,6 @@
* even on Cortex-A8 revisions not affected by 430973. * even on Cortex-A8 revisions not affected by 430973.
* If IBE is not set, the flush BTAC/BTB won't do anything. * If IBE is not set, the flush BTAC/BTB won't do anything.
*/ */
ENTRY(cpu_ca8_switch_mm)
#ifdef CONFIG_MMU
mov r2, #0
mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
#endif
ENTRY(cpu_v7_switch_mm) ENTRY(cpu_v7_switch_mm)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
mmid r1, r1 @ get mm->context.id mmid r1, r1 @ get mm->context.id
...@@ -66,7 +61,6 @@ ENTRY(cpu_v7_switch_mm) ...@@ -66,7 +61,6 @@ ENTRY(cpu_v7_switch_mm)
#endif #endif
bx lr bx lr
ENDPROC(cpu_v7_switch_mm) ENDPROC(cpu_v7_switch_mm)
ENDPROC(cpu_ca8_switch_mm)
/* /*
* cpu_v7_set_pte_ext(ptep, pte) * cpu_v7_set_pte_ext(ptep, pte)
......
// SPDX-License-Identifier: GPL-2.0
#include <linux/arm-smccc.h>
#include <linux/kernel.h>
#include <linux/psci.h>
#include <linux/smp.h>
#include <asm/cp15.h>
#include <asm/cputype.h>
#include <asm/proc-fns.h>
#include <asm/system_misc.h>
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
DEFINE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
extern void cpu_v7_iciallu_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
extern void cpu_v7_bpiall_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
extern void cpu_v7_smc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
extern void cpu_v7_hvc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
static void harden_branch_predictor_bpiall(void)
{
write_sysreg(0, BPIALL);
}
static void harden_branch_predictor_iciallu(void)
{
write_sysreg(0, ICIALLU);
}
static void __maybe_unused call_smc_arch_workaround_1(void)
{
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
}
static void __maybe_unused call_hvc_arch_workaround_1(void)
{
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
}
static void cpu_v7_spectre_init(void)
{
const char *spectre_v2_method = NULL;
int cpu = smp_processor_id();
if (per_cpu(harden_branch_predictor_fn, cpu))
return;
switch (read_cpuid_part()) {
case ARM_CPU_PART_CORTEX_A8:
case ARM_CPU_PART_CORTEX_A9:
case ARM_CPU_PART_CORTEX_A12:
case ARM_CPU_PART_CORTEX_A17:
case ARM_CPU_PART_CORTEX_A73:
case ARM_CPU_PART_CORTEX_A75:
if (processor.switch_mm != cpu_v7_bpiall_switch_mm)
goto bl_error;
per_cpu(harden_branch_predictor_fn, cpu) =
harden_branch_predictor_bpiall;
spectre_v2_method = "BPIALL";
break;
case ARM_CPU_PART_CORTEX_A15:
case ARM_CPU_PART_BRAHMA_B15:
if (processor.switch_mm != cpu_v7_iciallu_switch_mm)
goto bl_error;
per_cpu(harden_branch_predictor_fn, cpu) =
harden_branch_predictor_iciallu;
spectre_v2_method = "ICIALLU";
break;
#ifdef CONFIG_ARM_PSCI
default:
/* Other ARM CPUs require no workaround */
if (read_cpuid_implementor() == ARM_CPU_IMP_ARM)
break;
/* fallthrough */
/* Cortex A57/A72 require firmware workaround */
case ARM_CPU_PART_CORTEX_A57:
case ARM_CPU_PART_CORTEX_A72: {
struct arm_smccc_res res;
if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
break;
switch (psci_ops.conduit) {
case PSCI_CONDUIT_HVC:
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
if ((int)res.a0 != 0)
break;
if (processor.switch_mm != cpu_v7_hvc_switch_mm && cpu)
goto bl_error;
per_cpu(harden_branch_predictor_fn, cpu) =
call_hvc_arch_workaround_1;
processor.switch_mm = cpu_v7_hvc_switch_mm;
spectre_v2_method = "hypervisor";
break;
case PSCI_CONDUIT_SMC:
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
if ((int)res.a0 != 0)
break;
if (processor.switch_mm != cpu_v7_smc_switch_mm && cpu)
goto bl_error;
per_cpu(harden_branch_predictor_fn, cpu) =
call_smc_arch_workaround_1;
processor.switch_mm = cpu_v7_smc_switch_mm;
spectre_v2_method = "firmware";
break;
default:
break;
}
}
#endif
}
if (spectre_v2_method)
pr_info("CPU%u: Spectre v2: using %s workaround\n",
smp_processor_id(), spectre_v2_method);
return;
bl_error:
pr_err("CPU%u: Spectre v2: incorrect context switching function, system vulnerable\n",
cpu);
}
#else
static void cpu_v7_spectre_init(void)
{
}
#endif
static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned,
u32 mask, const char *msg)
{
u32 aux_cr;
asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (aux_cr));
if ((aux_cr & mask) != mask) {
if (!*warned)
pr_err("CPU%u: %s", smp_processor_id(), msg);
*warned = true;
return false;
}
return true;
}
static DEFINE_PER_CPU(bool, spectre_warned);
static bool check_spectre_auxcr(bool *warned, u32 bit)
{
return IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR) &&
cpu_v7_check_auxcr_set(warned, bit,
"Spectre v2: firmware did not set auxiliary control register IBE bit, system vulnerable\n");
}
void cpu_v7_ca8_ibe(void)
{
if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(6)))
cpu_v7_spectre_init();
}
void cpu_v7_ca15_ibe(void)
{
if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0)))
cpu_v7_spectre_init();
}
void cpu_v7_bugs_init(void)
{
cpu_v7_spectre_init();
}
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
* *
* This is the "shell" of the ARMv7 processor support. * This is the "shell" of the ARMv7 processor support.
*/ */
#include <linux/arm-smccc.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/assembler.h> #include <asm/assembler.h>
...@@ -93,6 +94,37 @@ ENTRY(cpu_v7_dcache_clean_area) ...@@ -93,6 +94,37 @@ ENTRY(cpu_v7_dcache_clean_area)
ret lr ret lr
ENDPROC(cpu_v7_dcache_clean_area) ENDPROC(cpu_v7_dcache_clean_area)
#ifdef CONFIG_ARM_PSCI
.arch_extension sec
ENTRY(cpu_v7_smc_switch_mm)
stmfd sp!, {r0 - r3}
movw r0, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
movt r0, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
smc #0
ldmfd sp!, {r0 - r3}
b cpu_v7_switch_mm
ENDPROC(cpu_v7_smc_switch_mm)
.arch_extension virt
ENTRY(cpu_v7_hvc_switch_mm)
stmfd sp!, {r0 - r3}
movw r0, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
movt r0, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
hvc #0
ldmfd sp!, {r0 - r3}
b cpu_v7_switch_mm
ENDPROC(cpu_v7_smc_switch_mm)
#endif
ENTRY(cpu_v7_iciallu_switch_mm)
mov r3, #0
mcr p15, 0, r3, c7, c5, 0 @ ICIALLU
b cpu_v7_switch_mm
ENDPROC(cpu_v7_iciallu_switch_mm)
ENTRY(cpu_v7_bpiall_switch_mm)
mov r3, #0
mcr p15, 0, r3, c7, c5, 6 @ flush BTAC/BTB
b cpu_v7_switch_mm
ENDPROC(cpu_v7_bpiall_switch_mm)
string cpu_v7_name, "ARMv7 Processor" string cpu_v7_name, "ARMv7 Processor"
.align .align
...@@ -158,31 +190,6 @@ ENTRY(cpu_v7_do_resume) ...@@ -158,31 +190,6 @@ ENTRY(cpu_v7_do_resume)
ENDPROC(cpu_v7_do_resume) ENDPROC(cpu_v7_do_resume)
#endif #endif
/*
* Cortex-A8
*/
globl_equ cpu_ca8_proc_init, cpu_v7_proc_init
globl_equ cpu_ca8_proc_fin, cpu_v7_proc_fin
globl_equ cpu_ca8_reset, cpu_v7_reset
globl_equ cpu_ca8_do_idle, cpu_v7_do_idle
globl_equ cpu_ca8_dcache_clean_area, cpu_v7_dcache_clean_area
globl_equ cpu_ca8_set_pte_ext, cpu_v7_set_pte_ext
globl_equ cpu_ca8_suspend_size, cpu_v7_suspend_size
#ifdef CONFIG_ARM_CPU_SUSPEND
globl_equ cpu_ca8_do_suspend, cpu_v7_do_suspend
globl_equ cpu_ca8_do_resume, cpu_v7_do_resume
#endif
/*
* Cortex-A9 processor functions
*/
globl_equ cpu_ca9mp_proc_init, cpu_v7_proc_init
globl_equ cpu_ca9mp_proc_fin, cpu_v7_proc_fin
globl_equ cpu_ca9mp_reset, cpu_v7_reset
globl_equ cpu_ca9mp_do_idle, cpu_v7_do_idle
globl_equ cpu_ca9mp_dcache_clean_area, cpu_v7_dcache_clean_area
globl_equ cpu_ca9mp_switch_mm, cpu_v7_switch_mm
globl_equ cpu_ca9mp_set_pte_ext, cpu_v7_set_pte_ext
.globl cpu_ca9mp_suspend_size .globl cpu_ca9mp_suspend_size
.equ cpu_ca9mp_suspend_size, cpu_v7_suspend_size + 4 * 2 .equ cpu_ca9mp_suspend_size, cpu_v7_suspend_size + 4 * 2
#ifdef CONFIG_ARM_CPU_SUSPEND #ifdef CONFIG_ARM_CPU_SUSPEND
...@@ -547,12 +554,79 @@ __v7_setup_stack: ...@@ -547,12 +554,79 @@ __v7_setup_stack:
__INITDATA __INITDATA
.weak cpu_v7_bugs_init
@ define struct processor (see <asm/proc-fns.h> and proc-macros.S) @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
@ generic v7 bpiall on context switch
globl_equ cpu_v7_bpiall_proc_init, cpu_v7_proc_init
globl_equ cpu_v7_bpiall_proc_fin, cpu_v7_proc_fin
globl_equ cpu_v7_bpiall_reset, cpu_v7_reset
globl_equ cpu_v7_bpiall_do_idle, cpu_v7_do_idle
globl_equ cpu_v7_bpiall_dcache_clean_area, cpu_v7_dcache_clean_area
globl_equ cpu_v7_bpiall_set_pte_ext, cpu_v7_set_pte_ext
globl_equ cpu_v7_bpiall_suspend_size, cpu_v7_suspend_size
#ifdef CONFIG_ARM_CPU_SUSPEND
globl_equ cpu_v7_bpiall_do_suspend, cpu_v7_do_suspend
globl_equ cpu_v7_bpiall_do_resume, cpu_v7_do_resume
#endif
define_processor_functions v7_bpiall, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init
#define HARDENED_BPIALL_PROCESSOR_FUNCTIONS v7_bpiall_processor_functions
#else
#define HARDENED_BPIALL_PROCESSOR_FUNCTIONS v7_processor_functions
#endif
#ifndef CONFIG_ARM_LPAE #ifndef CONFIG_ARM_LPAE
define_processor_functions ca8, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 @ Cortex-A8 - always needs bpiall switch_mm implementation
define_processor_functions ca9mp, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 globl_equ cpu_ca8_proc_init, cpu_v7_proc_init
globl_equ cpu_ca8_proc_fin, cpu_v7_proc_fin
globl_equ cpu_ca8_reset, cpu_v7_reset
globl_equ cpu_ca8_do_idle, cpu_v7_do_idle
globl_equ cpu_ca8_dcache_clean_area, cpu_v7_dcache_clean_area
globl_equ cpu_ca8_set_pte_ext, cpu_v7_set_pte_ext
globl_equ cpu_ca8_switch_mm, cpu_v7_bpiall_switch_mm
globl_equ cpu_ca8_suspend_size, cpu_v7_suspend_size
#ifdef CONFIG_ARM_CPU_SUSPEND
globl_equ cpu_ca8_do_suspend, cpu_v7_do_suspend
globl_equ cpu_ca8_do_resume, cpu_v7_do_resume
#endif
define_processor_functions ca8, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_ca8_ibe
@ Cortex-A9 - needs more registers preserved across suspend/resume
@ and bpiall switch_mm for hardening
globl_equ cpu_ca9mp_proc_init, cpu_v7_proc_init
globl_equ cpu_ca9mp_proc_fin, cpu_v7_proc_fin
globl_equ cpu_ca9mp_reset, cpu_v7_reset
globl_equ cpu_ca9mp_do_idle, cpu_v7_do_idle
globl_equ cpu_ca9mp_dcache_clean_area, cpu_v7_dcache_clean_area
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
globl_equ cpu_ca9mp_switch_mm, cpu_v7_bpiall_switch_mm
#else
globl_equ cpu_ca9mp_switch_mm, cpu_v7_switch_mm
#endif
globl_equ cpu_ca9mp_set_pte_ext, cpu_v7_set_pte_ext
define_processor_functions ca9mp, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init
#endif #endif
@ Cortex-A15 - needs iciallu switch_mm for hardening
globl_equ cpu_ca15_proc_init, cpu_v7_proc_init
globl_equ cpu_ca15_proc_fin, cpu_v7_proc_fin
globl_equ cpu_ca15_reset, cpu_v7_reset
globl_equ cpu_ca15_do_idle, cpu_v7_do_idle
globl_equ cpu_ca15_dcache_clean_area, cpu_v7_dcache_clean_area
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
globl_equ cpu_ca15_switch_mm, cpu_v7_iciallu_switch_mm
#else
globl_equ cpu_ca15_switch_mm, cpu_v7_switch_mm
#endif
globl_equ cpu_ca15_set_pte_ext, cpu_v7_set_pte_ext
globl_equ cpu_ca15_suspend_size, cpu_v7_suspend_size
globl_equ cpu_ca15_do_suspend, cpu_v7_do_suspend
globl_equ cpu_ca15_do_resume, cpu_v7_do_resume
define_processor_functions ca15, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_ca15_ibe
#ifdef CONFIG_CPU_PJ4B #ifdef CONFIG_CPU_PJ4B
define_processor_functions pj4b, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 define_processor_functions pj4b, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
#endif #endif
...@@ -669,7 +743,7 @@ __v7_ca7mp_proc_info: ...@@ -669,7 +743,7 @@ __v7_ca7mp_proc_info:
__v7_ca12mp_proc_info: __v7_ca12mp_proc_info:
.long 0x410fc0d0 .long 0x410fc0d0
.long 0xff0ffff0 .long 0xff0ffff0
__v7_proc __v7_ca12mp_proc_info, __v7_ca12mp_setup __v7_proc __v7_ca12mp_proc_info, __v7_ca12mp_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
.size __v7_ca12mp_proc_info, . - __v7_ca12mp_proc_info .size __v7_ca12mp_proc_info, . - __v7_ca12mp_proc_info
/* /*
...@@ -679,7 +753,7 @@ __v7_ca12mp_proc_info: ...@@ -679,7 +753,7 @@ __v7_ca12mp_proc_info:
__v7_ca15mp_proc_info: __v7_ca15mp_proc_info:
.long 0x410fc0f0 .long 0x410fc0f0
.long 0xff0ffff0 .long 0xff0ffff0
__v7_proc __v7_ca15mp_proc_info, __v7_ca15mp_setup __v7_proc __v7_ca15mp_proc_info, __v7_ca15mp_setup, proc_fns = ca15_processor_functions
.size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info
/* /*
...@@ -689,7 +763,7 @@ __v7_ca15mp_proc_info: ...@@ -689,7 +763,7 @@ __v7_ca15mp_proc_info:
__v7_b15mp_proc_info: __v7_b15mp_proc_info:
.long 0x420f00f0 .long 0x420f00f0
.long 0xff0ffff0 .long 0xff0ffff0
__v7_proc __v7_b15mp_proc_info, __v7_b15mp_setup, cache_fns = b15_cache_fns __v7_proc __v7_b15mp_proc_info, __v7_b15mp_setup, proc_fns = ca15_processor_functions, cache_fns = b15_cache_fns
.size __v7_b15mp_proc_info, . - __v7_b15mp_proc_info .size __v7_b15mp_proc_info, . - __v7_b15mp_proc_info
/* /*
...@@ -699,9 +773,25 @@ __v7_b15mp_proc_info: ...@@ -699,9 +773,25 @@ __v7_b15mp_proc_info:
__v7_ca17mp_proc_info: __v7_ca17mp_proc_info:
.long 0x410fc0e0 .long 0x410fc0e0
.long 0xff0ffff0 .long 0xff0ffff0
__v7_proc __v7_ca17mp_proc_info, __v7_ca17mp_setup __v7_proc __v7_ca17mp_proc_info, __v7_ca17mp_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
.size __v7_ca17mp_proc_info, . - __v7_ca17mp_proc_info .size __v7_ca17mp_proc_info, . - __v7_ca17mp_proc_info
/* ARM Ltd. Cortex A73 processor */
.type __v7_ca73_proc_info, #object
__v7_ca73_proc_info:
.long 0x410fd090
.long 0xff0ffff0
__v7_proc __v7_ca73_proc_info, __v7_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
.size __v7_ca73_proc_info, . - __v7_ca73_proc_info
/* ARM Ltd. Cortex A75 processor */
.type __v7_ca75_proc_info, #object
__v7_ca75_proc_info:
.long 0x410fd0a0
.long 0xff0ffff0
__v7_proc __v7_ca75_proc_info, __v7_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
.size __v7_ca75_proc_info, . - __v7_ca75_proc_info
/* /*
* Qualcomm Inc. Krait processors. * Qualcomm Inc. Krait processors.
*/ */
......
...@@ -197,6 +197,7 @@ struct bus_type amba_bustype = { ...@@ -197,6 +197,7 @@ struct bus_type amba_bustype = {
.pm = &amba_pm, .pm = &amba_pm,
.force_dma = true, .force_dma = true,
}; };
EXPORT_SYMBOL_GPL(amba_bustype);
static int __init amba_init(void) static int __init amba_init(void)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment