Commit 55a7b212 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 updates from Will Deacon:

 - RAS reporting via GHES/APEI (ACPI)

 - Indirect ftrace trampolines for modules

 - Improvements to kernel fault reporting

 - Page poisoning

 - Sigframe cleanups and preparation for SVE context

 - Core dump fixes

 - Sparse fixes (mainly relating to endianness)

 - xgene SoC PMU v3 driver

 - Misc cleanups and non-critical fixes

* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (75 commits)
  arm64: fix endianness annotation for 'struct jit_ctx' and friends
  arm64: cpuinfo: constify attribute_group structures.
  arm64: ptrace: Fix incorrect get_user() use in compat_vfp_set()
  arm64: ptrace: Remove redundant overrun check from compat_vfp_set()
  arm64: ptrace: Avoid setting compat FP[SC]R to garbage if get_user fails
  arm64: fix endianness annotation for __apply_alternatives()/get_alt_insn()
  arm64: fix endianness annotation in get_kaslr_seed()
  arm64: add missing conversion to __wsum in ip_fast_csum()
  arm64: fix endianness annotation in acpi_parking_protocol.c
  arm64: use readq() instead of readl() to read 64bit entry_point
  arm64: fix endianness annotation for reloc_insn_movw() & reloc_insn_imm()
  arm64: fix endianness annotation for aarch64_insn_write()
  arm64: fix endianness annotation in aarch64_insn_read()
  arm64: fix endianness annotation in call_undef_hook()
  arm64: fix endianness annotation for debug-monitors.c
  ras: mark stub functions as 'inline'
  arm64: pass endianness info to sparse
  arm64: ftrace: fix !CONFIG_ARM64_MODULE_PLTS kernels
  arm64: signal: Allow expansion of the signal frame
  acpi: apei: check for pending errors when probing GHES entries
  ...
parents e5f76a2e 425e1ed7
...@@ -187,6 +187,16 @@ ...@@ -187,6 +187,16 @@
#define FSC_FAULT (0x04) #define FSC_FAULT (0x04)
#define FSC_ACCESS (0x08) #define FSC_ACCESS (0x08)
#define FSC_PERM (0x0c) #define FSC_PERM (0x0c)
#define FSC_SEA (0x10)
#define FSC_SEA_TTW0 (0x14)
#define FSC_SEA_TTW1 (0x15)
#define FSC_SEA_TTW2 (0x16)
#define FSC_SEA_TTW3 (0x17)
#define FSC_SECC (0x18)
#define FSC_SECC_TTW0 (0x1c)
#define FSC_SECC_TTW1 (0x1d)
#define FSC_SECC_TTW2 (0x1e)
#define FSC_SECC_TTW3 (0x1f)
/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */ /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
#define HPFAR_MASK (~0xf) #define HPFAR_MASK (~0xf)
......
...@@ -22,6 +22,11 @@ extern void (*arm_pm_idle)(void); ...@@ -22,6 +22,11 @@ extern void (*arm_pm_idle)(void);
extern unsigned int user_debug; extern unsigned int user_debug;
static inline int handle_guest_sea(phys_addr_t addr, unsigned int esr)
{
return -1;
}
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif /* __ASM_ARM_SYSTEM_MISC_H */ #endif /* __ASM_ARM_SYSTEM_MISC_H */
...@@ -552,7 +552,7 @@ static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu) ...@@ -552,7 +552,7 @@ static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
return 0; return 0;
} }
static struct of_device_id armv6_pmu_of_device_ids[] = { static const struct of_device_id armv6_pmu_of_device_ids[] = {
{.compatible = "arm,arm11mpcore-pmu", .data = armv6mpcore_pmu_init}, {.compatible = "arm,arm11mpcore-pmu", .data = armv6mpcore_pmu_init},
{.compatible = "arm,arm1176-pmu", .data = armv6_1176_pmu_init}, {.compatible = "arm,arm1176-pmu", .data = armv6_1176_pmu_init},
{.compatible = "arm,arm1136-pmu", .data = armv6_1136_pmu_init}, {.compatible = "arm,arm1136-pmu", .data = armv6_1136_pmu_init},
......
...@@ -3,6 +3,7 @@ config ARM64 ...@@ -3,6 +3,7 @@ config ARM64
select ACPI_CCA_REQUIRED if ACPI select ACPI_CCA_REQUIRED if ACPI
select ACPI_GENERIC_GSI if ACPI select ACPI_GENERIC_GSI if ACPI
select ACPI_GTDT if ACPI select ACPI_GTDT if ACPI
select ACPI_IORT if ACPI
select ACPI_REDUCED_HARDWARE_ONLY if ACPI select ACPI_REDUCED_HARDWARE_ONLY if ACPI
select ACPI_MCFG if ACPI select ACPI_MCFG if ACPI
select ACPI_SPCR_TABLE if ACPI select ACPI_SPCR_TABLE if ACPI
...@@ -19,7 +20,9 @@ config ARM64 ...@@ -19,7 +20,9 @@ config ARM64
select ARCH_HAS_STRICT_KERNEL_RWX select ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_HAS_STRICT_MODULE_RWX select ARCH_HAS_STRICT_MODULE_RWX
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAVE_NMI_SAFE_CMPXCHG if ACPI_APEI_SEA
select ARCH_USE_CMPXCHG_LOCKREF select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_SUPPORTS_MEMORY_FAILURE
select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_NUMA_BALANCING select ARCH_SUPPORTS_NUMA_BALANCING
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
...@@ -93,6 +96,7 @@ config ARM64 ...@@ -93,6 +96,7 @@ config ARM64
select HAVE_IRQ_TIME_ACCOUNTING select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_MEMBLOCK select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP if NUMA select HAVE_MEMBLOCK_NODE_MAP if NUMA
select HAVE_NMI if ACPI_APEI_SEA
select HAVE_PATA_PLATFORM select HAVE_PATA_PLATFORM
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select HAVE_PERF_REGS select HAVE_PERF_REGS
...@@ -245,6 +249,9 @@ config PGTABLE_LEVELS ...@@ -245,6 +249,9 @@ config PGTABLE_LEVELS
config ARCH_SUPPORTS_UPROBES config ARCH_SUPPORTS_UPROBES
def_bool y def_bool y
config ARCH_PROC_KCORE_TEXT
def_bool y
source "init/Kconfig" source "init/Kconfig"
source "kernel/Kconfig.freezer" source "kernel/Kconfig.freezer"
...@@ -983,7 +990,7 @@ config RANDOMIZE_BASE ...@@ -983,7 +990,7 @@ config RANDOMIZE_BASE
config RANDOMIZE_MODULE_REGION_FULL config RANDOMIZE_MODULE_REGION_FULL
bool "Randomize the module region independently from the core kernel" bool "Randomize the module region independently from the core kernel"
depends on RANDOMIZE_BASE && !DYNAMIC_FTRACE depends on RANDOMIZE_BASE
default y default y
help help
Randomizes the location of the module region without considering the Randomizes the location of the module region without considering the
......
...@@ -52,17 +52,19 @@ KBUILD_AFLAGS += $(lseinstr) $(brokengasinst) ...@@ -52,17 +52,19 @@ KBUILD_AFLAGS += $(lseinstr) $(brokengasinst)
ifeq ($(CONFIG_CPU_BIG_ENDIAN), y) ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
KBUILD_CPPFLAGS += -mbig-endian KBUILD_CPPFLAGS += -mbig-endian
CHECKFLAGS += -D__AARCH64EB__
AS += -EB AS += -EB
LD += -EB LD += -EB
UTS_MACHINE := aarch64_be UTS_MACHINE := aarch64_be
else else
KBUILD_CPPFLAGS += -mlittle-endian KBUILD_CPPFLAGS += -mlittle-endian
CHECKFLAGS += -D__AARCH64EL__
AS += -EL AS += -EL
LD += -EL LD += -EL
UTS_MACHINE := aarch64 UTS_MACHINE := aarch64
endif endif
CHECKFLAGS += -D__aarch64__ CHECKFLAGS += -D__aarch64__ -m64
ifeq ($(CONFIG_ARM64_MODULE_CMODEL_LARGE), y) ifeq ($(CONFIG_ARM64_MODULE_CMODEL_LARGE), y)
KBUILD_CFLAGS_MODULE += -mcmodel=large KBUILD_CFLAGS_MODULE += -mcmodel=large
...@@ -70,6 +72,9 @@ endif ...@@ -70,6 +72,9 @@ endif
ifeq ($(CONFIG_ARM64_MODULE_PLTS),y) ifeq ($(CONFIG_ARM64_MODULE_PLTS),y)
KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/arm64/kernel/module.lds KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/arm64/kernel/module.lds
ifeq ($(CONFIG_DYNAMIC_FTRACE),y)
KBUILD_LDFLAGS_MODULE += $(objtree)/arch/arm64/kernel/ftrace-mod.o
endif
endif endif
# Default value # Default value
......
...@@ -42,7 +42,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) ...@@ -42,7 +42,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
} while (--ihl); } while (--ihl);
sum += ((sum >> 32) | (sum << 32)); sum += ((sum >> 32) | (sum << 32));
return csum_fold(sum >> 32); return csum_fold((__force u32)(sum >> 32));
} }
#define ip_fast_csum ip_fast_csum #define ip_fast_csum ip_fast_csum
......
...@@ -48,8 +48,6 @@ void arch_teardown_dma_ops(struct device *dev); ...@@ -48,8 +48,6 @@ void arch_teardown_dma_ops(struct device *dev);
/* do not use this function in a driver */ /* do not use this function in a driver */
static inline bool is_device_dma_coherent(struct device *dev) static inline bool is_device_dma_coherent(struct device *dev)
{ {
if (!dev)
return false;
return dev->archdata.dma_coherent; return dev->archdata.dma_coherent;
} }
......
...@@ -142,6 +142,7 @@ typedef struct user_fpsimd_state elf_fpregset_t; ...@@ -142,6 +142,7 @@ typedef struct user_fpsimd_state elf_fpregset_t;
({ \ ({ \
clear_bit(TIF_32BIT, &current->mm->context.flags); \ clear_bit(TIF_32BIT, &current->mm->context.flags); \
clear_thread_flag(TIF_32BIT); \ clear_thread_flag(TIF_32BIT); \
current->personality &= ~READ_IMPLIES_EXEC; \
}) })
/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ /* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
...@@ -187,6 +188,11 @@ typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG]; ...@@ -187,6 +188,11 @@ typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG];
((x)->e_flags & EF_ARM_EABI_MASK)) ((x)->e_flags & EF_ARM_EABI_MASK))
#define compat_start_thread compat_start_thread #define compat_start_thread compat_start_thread
/*
* Unlike the native SET_PERSONALITY macro, the compat version inherits
* READ_IMPLIES_EXEC across a fork() since this is the behaviour on
* arch/arm/.
*/
#define COMPAT_SET_PERSONALITY(ex) \ #define COMPAT_SET_PERSONALITY(ex) \
({ \ ({ \
set_bit(TIF_32BIT, &current->mm->context.flags); \ set_bit(TIF_32BIT, &current->mm->context.flags); \
......
...@@ -83,6 +83,7 @@ ...@@ -83,6 +83,7 @@
#define ESR_ELx_WNR (UL(1) << 6) #define ESR_ELx_WNR (UL(1) << 6)
/* Shared ISS field definitions for Data/Instruction aborts */ /* Shared ISS field definitions for Data/Instruction aborts */
#define ESR_ELx_FnV (UL(1) << 10)
#define ESR_ELx_EA (UL(1) << 9) #define ESR_ELx_EA (UL(1) << 9)
#define ESR_ELx_S1PTW (UL(1) << 7) #define ESR_ELx_S1PTW (UL(1) << 7)
......
...@@ -48,16 +48,16 @@ do { \ ...@@ -48,16 +48,16 @@ do { \
} while (0) } while (0)
static inline int static inline int
futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr)
{ {
int op = (encoded_op >> 28) & 7; int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15; int cmp = (encoded_op >> 24) & 15;
int oparg = (encoded_op << 8) >> 20; int oparg = (int)(encoded_op << 8) >> 20;
int cmparg = (encoded_op << 20) >> 20; int cmparg = (int)(encoded_op << 20) >> 20;
int oldval = 0, ret, tmp; int oldval = 0, ret, tmp;
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg; oparg = 1U << (oparg & 0x1f);
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT; return -EFAULT;
......
...@@ -204,6 +204,16 @@ ...@@ -204,6 +204,16 @@
#define FSC_FAULT ESR_ELx_FSC_FAULT #define FSC_FAULT ESR_ELx_FSC_FAULT
#define FSC_ACCESS ESR_ELx_FSC_ACCESS #define FSC_ACCESS ESR_ELx_FSC_ACCESS
#define FSC_PERM ESR_ELx_FSC_PERM #define FSC_PERM ESR_ELx_FSC_PERM
#define FSC_SEA ESR_ELx_FSC_EXTABT
#define FSC_SEA_TTW0 (0x14)
#define FSC_SEA_TTW1 (0x15)
#define FSC_SEA_TTW2 (0x16)
#define FSC_SEA_TTW3 (0x17)
#define FSC_SECC (0x18)
#define FSC_SECC_TTW0 (0x1c)
#define FSC_SECC_TTW1 (0x1d)
#define FSC_SECC_TTW2 (0x1e)
#define FSC_SECC_TTW3 (0x1f)
/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */ /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
#define HPFAR_MASK (~UL(0xf)) #define HPFAR_MASK (~UL(0xf))
......
...@@ -30,6 +30,9 @@ struct mod_plt_sec { ...@@ -30,6 +30,9 @@ struct mod_plt_sec {
struct mod_arch_specific { struct mod_arch_specific {
struct mod_plt_sec core; struct mod_plt_sec core;
struct mod_plt_sec init; struct mod_plt_sec init;
/* for CONFIG_DYNAMIC_FTRACE */
void *ftrace_trampoline;
}; };
#endif #endif
......
...@@ -441,7 +441,7 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd) ...@@ -441,7 +441,7 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
#define pud_none(pud) (!pud_val(pud)) #define pud_none(pud) (!pud_val(pud))
#define pud_bad(pud) (!(pud_val(pud) & PUD_TABLE_BIT)) #define pud_bad(pud) (!(pud_val(pud) & PUD_TABLE_BIT))
#define pud_present(pud) (pud_val(pud)) #define pud_present(pud) pte_present(pud_pte(pud))
static inline void set_pud(pud_t *pudp, pud_t pud) static inline void set_pud(pud_t *pudp, pud_t pud)
{ {
......
...@@ -104,6 +104,9 @@ struct thread_struct { ...@@ -104,6 +104,9 @@ struct thread_struct {
#define task_user_tls(t) (&(t)->thread.tp_value) #define task_user_tls(t) (&(t)->thread.tp_value)
#endif #endif
/* Sync TPIDR_EL0 back to thread_struct for current */
void tls_preserve_current_state(void);
#define INIT_THREAD { } #define INIT_THREAD { }
static inline void start_thread_common(struct pt_regs *regs, unsigned long pc) static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
......
...@@ -30,5 +30,6 @@ struct stackframe { ...@@ -30,5 +30,6 @@ struct stackframe {
extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame); extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame);
extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame, extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
int (*fn)(struct stackframe *, void *), void *data); int (*fn)(struct stackframe *, void *), void *data);
extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk);
#endif /* __ASM_STACKTRACE_H */ #endif /* __ASM_STACKTRACE_H */
...@@ -40,7 +40,7 @@ void hook_debug_fault_code(int nr, int (*fn)(unsigned long, unsigned int, ...@@ -40,7 +40,7 @@ void hook_debug_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
int sig, int code, const char *name); int sig, int code, const char *name);
struct mm_struct; struct mm_struct;
extern void show_pte(struct mm_struct *mm, unsigned long addr); extern void show_pte(unsigned long addr);
extern void __show_regs(struct pt_regs *); extern void __show_regs(struct pt_regs *);
extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
...@@ -56,6 +56,8 @@ extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); ...@@ -56,6 +56,8 @@ extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
__show_ratelimited; \ __show_ratelimited; \
}) })
int handle_guest_sea(phys_addr_t addr, unsigned int esr);
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __ASM_SYSTEM_MISC_H */ #endif /* __ASM_SYSTEM_MISC_H */
...@@ -33,6 +33,26 @@ struct sigcontext { ...@@ -33,6 +33,26 @@ struct sigcontext {
__u8 __reserved[4096] __attribute__((__aligned__(16))); __u8 __reserved[4096] __attribute__((__aligned__(16)));
}; };
/*
* Allocation of __reserved[]:
* (Note: records do not necessarily occur in the order shown here.)
*
* size description
*
* 0x210 fpsimd_context
* 0x10 esr_context
* 0x20 extra_context (optional)
* 0x10 terminator (null _aarch64_ctx)
*
* 0xdb0 (reserved for future allocation)
*
* New records that can exceed this space need to be opt-in for userspace, so
* that an expanded signal frame is not generated unexpectedly. The mechanism
* for opting in will depend on the extension that generates each new record.
* The above table documents the maximum set and sizes of records than can be
* generated when userspace does not opt in for any such extension.
*/
/* /*
* Header to be used at the beginning of structures extending the user * Header to be used at the beginning of structures extending the user
* context. Such structures must be placed after the rt_sigframe on the stack * context. Such structures must be placed after the rt_sigframe on the stack
...@@ -61,4 +81,39 @@ struct esr_context { ...@@ -61,4 +81,39 @@ struct esr_context {
__u64 esr; __u64 esr;
}; };
/*
* extra_context: describes extra space in the signal frame for
* additional structures that don't fit in sigcontext.__reserved[].
*
* Note:
*
* 1) fpsimd_context, esr_context and extra_context must be placed in
* sigcontext.__reserved[] if present. They cannot be placed in the
* extra space. Any other record can be placed either in the extra
* space or in sigcontext.__reserved[], unless otherwise specified in
* this file.
*
* 2) There must not be more than one extra_context.
*
* 3) If extra_context is present, it must be followed immediately in
* sigcontext.__reserved[] by the terminating null _aarch64_ctx.
*
* 4) The extra space to which datap points must start at the first
* 16-byte aligned address immediately after the terminating null
* _aarch64_ctx that follows the extra_context structure in
* __reserved[]. The extra space may overrun the end of __reserved[],
* as indicated by a sufficiently large value for the size field.
*
* 5) The extra space must itself be terminated with a null
* _aarch64_ctx.
*/
#define EXTRA_MAGIC 0x45585401
struct extra_context {
struct _aarch64_ctx head;
__u64 datap; /* 16-byte aligned pointer to extra space cast to __u64 */
__u32 size; /* size in bytes of the extra space */
__u32 __reserved[3];
};
#endif /* _UAPI__ASM_SIGCONTEXT_H */ #endif /* _UAPI__ASM_SIGCONTEXT_H */
...@@ -62,3 +62,6 @@ extra-y += $(head-y) vmlinux.lds ...@@ -62,3 +62,6 @@ extra-y += $(head-y) vmlinux.lds
ifeq ($(CONFIG_DEBUG_EFI),y) ifeq ($(CONFIG_DEBUG_EFI),y)
AFLAGS_head.o += -DVMLINUX_PATH="\"$(realpath $(objtree)/vmlinux)\"" AFLAGS_head.o += -DVMLINUX_PATH="\"$(realpath $(objtree)/vmlinux)\""
endif endif
# will be included by each individual module but not by the core kernel itself
extra-$(CONFIG_DYNAMIC_FTRACE) += ftrace-mod.o
...@@ -71,7 +71,7 @@ static int acpi_parking_protocol_cpu_boot(unsigned int cpu) ...@@ -71,7 +71,7 @@ static int acpi_parking_protocol_cpu_boot(unsigned int cpu)
{ {
struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu]; struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu];
struct parking_protocol_mailbox __iomem *mailbox; struct parking_protocol_mailbox __iomem *mailbox;
__le32 cpu_id; u32 cpu_id;
/* /*
* Map mailbox memory with attribute device nGnRE (ie ioremap - * Map mailbox memory with attribute device nGnRE (ie ioremap -
...@@ -123,9 +123,9 @@ static void acpi_parking_protocol_cpu_postboot(void) ...@@ -123,9 +123,9 @@ static void acpi_parking_protocol_cpu_postboot(void)
int cpu = smp_processor_id(); int cpu = smp_processor_id();
struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu]; struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu];
struct parking_protocol_mailbox __iomem *mailbox = cpu_entry->mailbox; struct parking_protocol_mailbox __iomem *mailbox = cpu_entry->mailbox;
__le64 entry_point; u64 entry_point;
entry_point = readl_relaxed(&mailbox->entry_point); entry_point = readq_relaxed(&mailbox->entry_point);
/* /*
* Check if firmware has cleared the entry_point as expected * Check if firmware has cleared the entry_point as expected
* by the protocol specification. * by the protocol specification.
......
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
#include <asm/sections.h> #include <asm/sections.h>
#include <linux/stop_machine.h> #include <linux/stop_machine.h>
#define __ALT_PTR(a,f) (u32 *)((void *)&(a)->f + (a)->f) #define __ALT_PTR(a,f) ((void *)&(a)->f + (a)->f)
#define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset) #define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset)
#define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset) #define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset)
...@@ -60,7 +60,7 @@ static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc) ...@@ -60,7 +60,7 @@ static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
#define align_down(x, a) ((unsigned long)(x) & ~(((unsigned long)(a)) - 1)) #define align_down(x, a) ((unsigned long)(x) & ~(((unsigned long)(a)) - 1))
static u32 get_alt_insn(struct alt_instr *alt, u32 *insnptr, u32 *altinsnptr) static u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnptr)
{ {
u32 insn; u32 insn;
...@@ -109,7 +109,7 @@ static void __apply_alternatives(void *alt_region, bool use_linear_alias) ...@@ -109,7 +109,7 @@ static void __apply_alternatives(void *alt_region, bool use_linear_alias)
{ {
struct alt_instr *alt; struct alt_instr *alt;
struct alt_region *region = alt_region; struct alt_region *region = alt_region;
u32 *origptr, *replptr, *updptr; __le32 *origptr, *replptr, *updptr;
for (alt = region->begin; alt < region->end; alt++) { for (alt = region->begin; alt < region->end; alt++) {
u32 insn; u32 insn;
...@@ -124,7 +124,7 @@ static void __apply_alternatives(void *alt_region, bool use_linear_alias) ...@@ -124,7 +124,7 @@ static void __apply_alternatives(void *alt_region, bool use_linear_alias)
origptr = ALT_ORIG_PTR(alt); origptr = ALT_ORIG_PTR(alt);
replptr = ALT_REPL_PTR(alt); replptr = ALT_REPL_PTR(alt);
updptr = use_linear_alias ? (u32 *)lm_alias(origptr) : origptr; updptr = use_linear_alias ? lm_alias(origptr) : origptr;
nr_inst = alt->alt_len / sizeof(insn); nr_inst = alt->alt_len / sizeof(insn);
for (i = 0; i < nr_inst; i++) { for (i = 0; i < nr_inst; i++) {
......
...@@ -51,6 +51,25 @@ unsigned int compat_elf_hwcap2 __read_mostly; ...@@ -51,6 +51,25 @@ unsigned int compat_elf_hwcap2 __read_mostly;
DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
EXPORT_SYMBOL(cpu_hwcaps); EXPORT_SYMBOL(cpu_hwcaps);
static int dump_cpu_hwcaps(struct notifier_block *self, unsigned long v, void *p)
{
/* file-wide pr_fmt adds "CPU features: " prefix */
pr_emerg("0x%*pb\n", ARM64_NCAPS, &cpu_hwcaps);
return 0;
}
static struct notifier_block cpu_hwcaps_notifier = {
.notifier_call = dump_cpu_hwcaps
};
static int __init register_cpu_hwcaps_dumper(void)
{
atomic_notifier_chain_register(&panic_notifier_list,
&cpu_hwcaps_notifier);
return 0;
}
__initcall(register_cpu_hwcaps_dumper);
DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS); DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS);
EXPORT_SYMBOL(cpu_hwcap_keys); EXPORT_SYMBOL(cpu_hwcap_keys);
...@@ -639,8 +658,10 @@ void update_cpu_features(int cpu, ...@@ -639,8 +658,10 @@ void update_cpu_features(int cpu,
* Mismatched CPU features are a recipe for disaster. Don't even * Mismatched CPU features are a recipe for disaster. Don't even
* pretend to support them. * pretend to support them.
*/ */
WARN_TAINT_ONCE(taint, TAINT_CPU_OUT_OF_SPEC, if (taint) {
"Unsupported CPU feature variation.\n"); pr_warn_once("Unsupported CPU feature variation detected.\n");
add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
}
} }
u64 read_sanitised_ftr_reg(u32 id) u64 read_sanitised_ftr_reg(u32 id)
......
...@@ -227,7 +227,7 @@ static struct attribute *cpuregs_id_attrs[] = { ...@@ -227,7 +227,7 @@ static struct attribute *cpuregs_id_attrs[] = {
NULL NULL
}; };
static struct attribute_group cpuregs_attr_group = { static const struct attribute_group cpuregs_attr_group = {
.attrs = cpuregs_id_attrs, .attrs = cpuregs_id_attrs,
.name = "identification" .name = "identification"
}; };
......
...@@ -341,20 +341,22 @@ int aarch32_break_handler(struct pt_regs *regs) ...@@ -341,20 +341,22 @@ int aarch32_break_handler(struct pt_regs *regs)
if (compat_thumb_mode(regs)) { if (compat_thumb_mode(regs)) {
/* get 16-bit Thumb instruction */ /* get 16-bit Thumb instruction */
get_user(thumb_instr, (u16 __user *)pc); __le16 instr;
thumb_instr = le16_to_cpu(thumb_instr); get_user(instr, (__le16 __user *)pc);
thumb_instr = le16_to_cpu(instr);
if (thumb_instr == AARCH32_BREAK_THUMB2_LO) { if (thumb_instr == AARCH32_BREAK_THUMB2_LO) {
/* get second half of 32-bit Thumb-2 instruction */ /* get second half of 32-bit Thumb-2 instruction */
get_user(thumb_instr, (u16 __user *)(pc + 2)); get_user(instr, (__le16 __user *)(pc + 2));
thumb_instr = le16_to_cpu(thumb_instr); thumb_instr = le16_to_cpu(instr);
bp = thumb_instr == AARCH32_BREAK_THUMB2_HI; bp = thumb_instr == AARCH32_BREAK_THUMB2_HI;
} else { } else {
bp = thumb_instr == AARCH32_BREAK_THUMB; bp = thumb_instr == AARCH32_BREAK_THUMB;
} }
} else { } else {
/* 32-bit ARM instruction */ /* 32-bit ARM instruction */
get_user(arm_instr, (u32 __user *)pc); __le32 instr;
arm_instr = le32_to_cpu(arm_instr); get_user(instr, (__le32 __user *)pc);
arm_instr = le32_to_cpu(instr);
bp = (arm_instr & ~0xf0000000) == AARCH32_BREAK_ARM; bp = (arm_instr & ~0xf0000000) == AARCH32_BREAK_ARM;
} }
......
/*
* Copyright (C) 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
.section ".text.ftrace_trampoline", "ax"
.align 3
0: .quad 0
__ftrace_trampoline:
ldr x16, 0b
br x16
ENDPROC(__ftrace_trampoline)
...@@ -10,10 +10,12 @@ ...@@ -10,10 +10,12 @@
*/ */
#include <linux/ftrace.h> #include <linux/ftrace.h>
#include <linux/module.h>
#include <linux/swab.h> #include <linux/swab.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/debug-monitors.h>
#include <asm/ftrace.h> #include <asm/ftrace.h>
#include <asm/insn.h> #include <asm/insn.h>
...@@ -70,6 +72,58 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) ...@@ -70,6 +72,58 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{ {
unsigned long pc = rec->ip; unsigned long pc = rec->ip;
u32 old, new; u32 old, new;
long offset = (long)pc - (long)addr;
if (offset < -SZ_128M || offset >= SZ_128M) {
#ifdef CONFIG_ARM64_MODULE_PLTS
unsigned long *trampoline;
struct module *mod;
/*
* On kernels that support module PLTs, the offset between the
* branch instruction and its target may legally exceed the
* range of an ordinary relative 'bl' opcode. In this case, we
* need to branch via a trampoline in the module.
*
* NOTE: __module_text_address() must be called with preemption
* disabled, but we can rely on ftrace_lock to ensure that 'mod'
* retains its validity throughout the remainder of this code.
*/
preempt_disable();
mod = __module_text_address(pc);
preempt_enable();
if (WARN_ON(!mod))
return -EINVAL;
/*
* There is only one ftrace trampoline per module. For now,
* this is not a problem since on arm64, all dynamic ftrace
* invocations are routed via ftrace_caller(). This will need
* to be revisited if support for multiple ftrace entry points
* is added in the future, but for now, the pr_err() below
* deals with a theoretical issue only.
*/
trampoline = (unsigned long *)mod->arch.ftrace_trampoline;
if (trampoline[0] != addr) {
if (trampoline[0] != 0) {
pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
return -EINVAL;
}
/* point the trampoline to our ftrace entry point */
module_disable_ro(mod);
trampoline[0] = addr;
module_enable_ro(mod, true);
/* update trampoline before patching in the branch */
smp_wmb();
}
addr = (unsigned long)&trampoline[1];
#else /* CONFIG_ARM64_MODULE_PLTS */
return -EINVAL;
#endif /* CONFIG_ARM64_MODULE_PLTS */
}
old = aarch64_insn_gen_nop(); old = aarch64_insn_gen_nop();
new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK); new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
...@@ -84,12 +138,55 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, ...@@ -84,12 +138,55 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long addr) unsigned long addr)
{ {
unsigned long pc = rec->ip; unsigned long pc = rec->ip;
u32 old, new; bool validate = true;
u32 old = 0, new;
long offset = (long)pc - (long)addr;
if (offset < -SZ_128M || offset >= SZ_128M) {
#ifdef CONFIG_ARM64_MODULE_PLTS
u32 replaced;
/*
* 'mod' is only set at module load time, but if we end up
* dealing with an out-of-range condition, we can assume it
* is due to a module being loaded far away from the kernel.
*/
if (!mod) {
preempt_disable();
mod = __module_text_address(pc);
preempt_enable();
if (WARN_ON(!mod))
return -EINVAL;
}
/*
* The instruction we are about to patch may be a branch and
* link instruction that was redirected via a PLT entry. In
* this case, the normal validation will fail, but we can at
* least check that we are dealing with a branch and link
* instruction that points into the right module.
*/
if (aarch64_insn_read((void *)pc, &replaced))
return -EFAULT;
if (!aarch64_insn_is_bl(replaced) ||
!within_module(pc + aarch64_get_branch_offset(replaced),
mod))
return -EINVAL;
validate = false;
#else /* CONFIG_ARM64_MODULE_PLTS */
return -EINVAL;
#endif /* CONFIG_ARM64_MODULE_PLTS */
} else {
old = aarch64_insn_gen_branch_imm(pc, addr,
AARCH64_INSN_BRANCH_LINK);
}
old = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
new = aarch64_insn_gen_nop(); new = aarch64_insn_gen_nop();
return ftrace_modify_code(pc, old, new, true); return ftrace_modify_code(pc, old, new, validate);
} }
void arch_ftrace_update_code(int command) void arch_ftrace_update_code(int command)
......
...@@ -117,7 +117,7 @@ static void __kprobes patch_unmap(int fixmap) ...@@ -117,7 +117,7 @@ static void __kprobes patch_unmap(int fixmap)
int __kprobes aarch64_insn_read(void *addr, u32 *insnp) int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
{ {
int ret; int ret;
u32 val; __le32 val;
ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE); ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE);
if (!ret) if (!ret)
...@@ -126,7 +126,7 @@ int __kprobes aarch64_insn_read(void *addr, u32 *insnp) ...@@ -126,7 +126,7 @@ int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
return ret; return ret;
} }
static int __kprobes __aarch64_insn_write(void *addr, u32 insn) static int __kprobes __aarch64_insn_write(void *addr, __le32 insn)
{ {
void *waddr = addr; void *waddr = addr;
unsigned long flags = 0; unsigned long flags = 0;
...@@ -145,8 +145,7 @@ static int __kprobes __aarch64_insn_write(void *addr, u32 insn) ...@@ -145,8 +145,7 @@ static int __kprobes __aarch64_insn_write(void *addr, u32 insn)
int __kprobes aarch64_insn_write(void *addr, u32 insn) int __kprobes aarch64_insn_write(void *addr, u32 insn)
{ {
insn = cpu_to_le32(insn); return __aarch64_insn_write(addr, cpu_to_le32(insn));
return __aarch64_insn_write(addr, insn);
} }
static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn) static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn)
......
...@@ -27,7 +27,7 @@ u16 __initdata memstart_offset_seed; ...@@ -27,7 +27,7 @@ u16 __initdata memstart_offset_seed;
static __init u64 get_kaslr_seed(void *fdt) static __init u64 get_kaslr_seed(void *fdt)
{ {
int node, len; int node, len;
u64 *prop; fdt64_t *prop;
u64 ret; u64 ret;
node = fdt_path_offset(fdt, "/chosen"); node = fdt_path_offset(fdt, "/chosen");
......
...@@ -74,7 +74,7 @@ enum aarch64_reloc_op { ...@@ -74,7 +74,7 @@ enum aarch64_reloc_op {
RELOC_OP_PAGE, RELOC_OP_PAGE,
}; };
static u64 do_reloc(enum aarch64_reloc_op reloc_op, void *place, u64 val) static u64 do_reloc(enum aarch64_reloc_op reloc_op, __le32 *place, u64 val)
{ {
switch (reloc_op) { switch (reloc_op) {
case RELOC_OP_ABS: case RELOC_OP_ABS:
...@@ -121,12 +121,12 @@ enum aarch64_insn_movw_imm_type { ...@@ -121,12 +121,12 @@ enum aarch64_insn_movw_imm_type {
AARCH64_INSN_IMM_MOVKZ, AARCH64_INSN_IMM_MOVKZ,
}; };
static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val, static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val,
int lsb, enum aarch64_insn_movw_imm_type imm_type) int lsb, enum aarch64_insn_movw_imm_type imm_type)
{ {
u64 imm; u64 imm;
s64 sval; s64 sval;
u32 insn = le32_to_cpu(*(u32 *)place); u32 insn = le32_to_cpu(*place);
sval = do_reloc(op, place, val); sval = do_reloc(op, place, val);
imm = sval >> lsb; imm = sval >> lsb;
...@@ -154,7 +154,7 @@ static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val, ...@@ -154,7 +154,7 @@ static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
/* Update the instruction with the new encoding. */ /* Update the instruction with the new encoding. */
insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm); insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
*(u32 *)place = cpu_to_le32(insn); *place = cpu_to_le32(insn);
if (imm > U16_MAX) if (imm > U16_MAX)
return -ERANGE; return -ERANGE;
...@@ -162,12 +162,12 @@ static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val, ...@@ -162,12 +162,12 @@ static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
return 0; return 0;
} }
static int reloc_insn_imm(enum aarch64_reloc_op op, void *place, u64 val, static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val,
int lsb, int len, enum aarch64_insn_imm_type imm_type) int lsb, int len, enum aarch64_insn_imm_type imm_type)
{ {
u64 imm, imm_mask; u64 imm, imm_mask;
s64 sval; s64 sval;
u32 insn = le32_to_cpu(*(u32 *)place); u32 insn = le32_to_cpu(*place);
/* Calculate the relocation value. */ /* Calculate the relocation value. */
sval = do_reloc(op, place, val); sval = do_reloc(op, place, val);
...@@ -179,7 +179,7 @@ static int reloc_insn_imm(enum aarch64_reloc_op op, void *place, u64 val, ...@@ -179,7 +179,7 @@ static int reloc_insn_imm(enum aarch64_reloc_op op, void *place, u64 val,
/* Update the instruction's immediate field. */ /* Update the instruction's immediate field. */
insn = aarch64_insn_encode_immediate(imm_type, insn, imm); insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
*(u32 *)place = cpu_to_le32(insn); *place = cpu_to_le32(insn);
/* /*
* Extract the upper value bits (including the sign bit) and * Extract the upper value bits (including the sign bit) and
...@@ -420,8 +420,12 @@ int module_finalize(const Elf_Ehdr *hdr, ...@@ -420,8 +420,12 @@ int module_finalize(const Elf_Ehdr *hdr,
for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) { for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
if (strcmp(".altinstructions", secstrs + s->sh_name) == 0) { if (strcmp(".altinstructions", secstrs + s->sh_name) == 0) {
apply_alternatives((void *)s->sh_addr, s->sh_size); apply_alternatives((void *)s->sh_addr, s->sh_size);
return 0;
} }
#ifdef CONFIG_ARM64_MODULE_PLTS
if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) &&
!strcmp(".text.ftrace_trampoline", secstrs + s->sh_name))
me->arch.ftrace_trampoline = (void *)s->sh_addr;
#endif
} }
return 0; return 0;
......
...@@ -108,7 +108,10 @@ int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) ...@@ -108,7 +108,10 @@ int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
if (!acpi_disabled) { if (!acpi_disabled) {
struct pci_config_window *cfg = bridge->bus->sysdata; struct pci_config_window *cfg = bridge->bus->sysdata;
struct acpi_device *adev = to_acpi_device(cfg->parent); struct acpi_device *adev = to_acpi_device(cfg->parent);
struct device *bus_dev = &bridge->bus->dev;
ACPI_COMPANION_SET(&bridge->dev, adev); ACPI_COMPANION_SET(&bridge->dev, adev);
set_dev_node(bus_dev, acpi_get_node(acpi_device_handle(adev)));
} }
return 0; return 0;
......
...@@ -529,7 +529,7 @@ static struct attribute_group armv8_pmuv3_events_attr_group = { ...@@ -529,7 +529,7 @@ static struct attribute_group armv8_pmuv3_events_attr_group = {
.is_visible = armv8pmu_event_attr_is_visible, .is_visible = armv8pmu_event_attr_is_visible,
}; };
PMU_FORMAT_ATTR(event, "config:0-9"); PMU_FORMAT_ATTR(event, "config:0-15");
static struct attribute *armv8_pmuv3_format_attrs[] = { static struct attribute *armv8_pmuv3_format_attrs[] = {
&format_attr_event.attr, &format_attr_event.attr,
......
...@@ -522,9 +522,9 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) ...@@ -522,9 +522,9 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
pr_err("current sp %lx does not match saved sp %lx\n", pr_err("current sp %lx does not match saved sp %lx\n",
orig_sp, stack_addr); orig_sp, stack_addr);
pr_err("Saved registers for jprobe %p\n", jp); pr_err("Saved registers for jprobe %p\n", jp);
show_regs(saved_regs); __show_regs(saved_regs);
pr_err("Current registers\n"); pr_err("Current registers\n");
show_regs(regs); __show_regs(regs);
BUG(); BUG();
} }
unpause_graph_tracing(); unpause_graph_tracing();
......
...@@ -210,6 +210,7 @@ void __show_regs(struct pt_regs *regs) ...@@ -210,6 +210,7 @@ void __show_regs(struct pt_regs *regs)
void show_regs(struct pt_regs * regs) void show_regs(struct pt_regs * regs)
{ {
__show_regs(regs); __show_regs(regs);
dump_backtrace(regs, NULL);
} }
static void tls_thread_flush(void) static void tls_thread_flush(void)
...@@ -297,12 +298,16 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, ...@@ -297,12 +298,16 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
return 0; return 0;
} }
void tls_preserve_current_state(void)
{
*task_user_tls(current) = read_sysreg(tpidr_el0);
}
static void tls_thread_switch(struct task_struct *next) static void tls_thread_switch(struct task_struct *next)
{ {
unsigned long tpidr, tpidrro; unsigned long tpidr, tpidrro;
tpidr = read_sysreg(tpidr_el0); tls_preserve_current_state();
*task_user_tls(current) = tpidr;
tpidr = *task_user_tls(next); tpidr = *task_user_tls(next);
tpidrro = is_compat_thread(task_thread_info(next)) ? tpidrro = is_compat_thread(task_thread_info(next)) ?
......
...@@ -623,6 +623,10 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset, ...@@ -623,6 +623,10 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset,
{ {
struct user_fpsimd_state *uregs; struct user_fpsimd_state *uregs;
uregs = &target->thread.fpsimd_state.user_fpsimd; uregs = &target->thread.fpsimd_state.user_fpsimd;
if (target == current)
fpsimd_preserve_current_state();
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1); return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1);
} }
...@@ -648,6 +652,10 @@ static int tls_get(struct task_struct *target, const struct user_regset *regset, ...@@ -648,6 +652,10 @@ static int tls_get(struct task_struct *target, const struct user_regset *regset,
void *kbuf, void __user *ubuf) void *kbuf, void __user *ubuf)
{ {
unsigned long *tls = &target->thread.tp_value; unsigned long *tls = &target->thread.tp_value;
if (target == current)
tls_preserve_current_state();
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, tls, 0, -1); return user_regset_copyout(&pos, &count, &kbuf, &ubuf, tls, 0, -1);
} }
...@@ -894,21 +902,27 @@ static int compat_vfp_get(struct task_struct *target, ...@@ -894,21 +902,27 @@ static int compat_vfp_get(struct task_struct *target,
{ {
struct user_fpsimd_state *uregs; struct user_fpsimd_state *uregs;
compat_ulong_t fpscr; compat_ulong_t fpscr;
int ret; int ret, vregs_end_pos;
uregs = &target->thread.fpsimd_state.user_fpsimd; uregs = &target->thread.fpsimd_state.user_fpsimd;
if (target == current)
fpsimd_preserve_current_state();
/* /*
* The VFP registers are packed into the fpsimd_state, so they all sit * The VFP registers are packed into the fpsimd_state, so they all sit
* nicely together for us. We just need to create the fpscr separately. * nicely together for us. We just need to create the fpscr separately.
*/ */
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
VFP_STATE_SIZE - sizeof(compat_ulong_t)); ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs,
0, vregs_end_pos);
if (count && !ret) { if (count && !ret) {
fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) | fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) |
(uregs->fpcr & VFP_FPSCR_CTRL_MASK); (uregs->fpcr & VFP_FPSCR_CTRL_MASK);
ret = put_user(fpscr, (compat_ulong_t *)ubuf);
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &fpscr,
vregs_end_pos, VFP_STATE_SIZE);
} }
return ret; return ret;
...@@ -921,21 +935,22 @@ static int compat_vfp_set(struct task_struct *target, ...@@ -921,21 +935,22 @@ static int compat_vfp_set(struct task_struct *target,
{ {
struct user_fpsimd_state *uregs; struct user_fpsimd_state *uregs;
compat_ulong_t fpscr; compat_ulong_t fpscr;
int ret; int ret, vregs_end_pos;
if (pos + count > VFP_STATE_SIZE)
return -EIO;
uregs = &target->thread.fpsimd_state.user_fpsimd; uregs = &target->thread.fpsimd_state.user_fpsimd;
vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
VFP_STATE_SIZE - sizeof(compat_ulong_t)); vregs_end_pos);
if (count && !ret) { if (count && !ret) {
ret = get_user(fpscr, (compat_ulong_t *)ubuf); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpscr,
vregs_end_pos, VFP_STATE_SIZE);
if (!ret) {
uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK; uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK;
uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK; uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
} }
}
fpsimd_flush_task_state(target); fpsimd_flush_task_state(target);
return ret; return ret;
......
...@@ -194,6 +194,9 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys) ...@@ -194,6 +194,9 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys)
} }
name = of_flat_dt_get_machine_name(); name = of_flat_dt_get_machine_name();
if (!name)
return;
pr_info("Machine model: %s\n", name); pr_info("Machine model: %s\n", name);
dump_stack_set_arch_desc("%s (DT)", name); dump_stack_set_arch_desc("%s (DT)", name);
} }
......
This diff is collapsed.
...@@ -210,6 +210,7 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) ...@@ -210,6 +210,7 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
put_task_stack(tsk); put_task_stack(tsk);
} }
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
void save_stack_trace(struct stack_trace *trace) void save_stack_trace(struct stack_trace *trace)
{ {
......
...@@ -140,7 +140,7 @@ static void dump_instr(const char *lvl, struct pt_regs *regs) ...@@ -140,7 +140,7 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
} }
} }
static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
{ {
struct stackframe frame; struct stackframe frame;
unsigned long irq_stack_ptr; unsigned long irq_stack_ptr;
...@@ -344,22 +344,24 @@ static int call_undef_hook(struct pt_regs *regs) ...@@ -344,22 +344,24 @@ static int call_undef_hook(struct pt_regs *regs)
if (compat_thumb_mode(regs)) { if (compat_thumb_mode(regs)) {
/* 16-bit Thumb instruction */ /* 16-bit Thumb instruction */
if (get_user(instr, (u16 __user *)pc)) __le16 instr_le;
if (get_user(instr_le, (__le16 __user *)pc))
goto exit; goto exit;
instr = le16_to_cpu(instr); instr = le16_to_cpu(instr_le);
if (aarch32_insn_is_wide(instr)) { if (aarch32_insn_is_wide(instr)) {
u32 instr2; u32 instr2;
if (get_user(instr2, (u16 __user *)(pc + 2))) if (get_user(instr_le, (__le16 __user *)(pc + 2)))
goto exit; goto exit;
instr2 = le16_to_cpu(instr2); instr2 = le16_to_cpu(instr_le);
instr = (instr << 16) | instr2; instr = (instr << 16) | instr2;
} }
} else { } else {
/* 32-bit ARM instruction */ /* 32-bit ARM instruction */
if (get_user(instr, (u32 __user *)pc)) __le32 instr_le;
if (get_user(instr_le, (__le32 __user *)pc))
goto exit; goto exit;
instr = le32_to_cpu(instr); instr = le32_to_cpu(instr_le);
} }
raw_spin_lock_irqsave(&undef_lock, flags); raw_spin_lock_irqsave(&undef_lock, flags);
...@@ -728,8 +730,6 @@ static int bug_handler(struct pt_regs *regs, unsigned int esr) ...@@ -728,8 +730,6 @@ static int bug_handler(struct pt_regs *regs, unsigned int esr)
break; break;
case BUG_TRAP_TYPE_WARN: case BUG_TRAP_TYPE_WARN:
/* Ideally, report_bug() should backtrace for us... but no. */
dump_backtrace(regs, NULL);
break; break;
default: default:
......
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
#include <asm/vdso.h> #include <asm/vdso.h>
#include <asm/vdso_datapage.h> #include <asm/vdso_datapage.h>
extern char vdso_start, vdso_end; extern char vdso_start[], vdso_end[];
static unsigned long vdso_pages __ro_after_init; static unsigned long vdso_pages __ro_after_init;
/* /*
...@@ -125,14 +125,14 @@ static int __init vdso_init(void) ...@@ -125,14 +125,14 @@ static int __init vdso_init(void)
struct page **vdso_pagelist; struct page **vdso_pagelist;
unsigned long pfn; unsigned long pfn;
if (memcmp(&vdso_start, "\177ELF", 4)) { if (memcmp(vdso_start, "\177ELF", 4)) {
pr_err("vDSO is not a valid ELF object!\n"); pr_err("vDSO is not a valid ELF object!\n");
return -EINVAL; return -EINVAL;
} }
vdso_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT; vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n", pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n",
vdso_pages + 1, vdso_pages, &vdso_start, 1L, vdso_data); vdso_pages + 1, vdso_pages, vdso_start, 1L, vdso_data);
/* Allocate the vDSO pagelist, plus a page for the data. */ /* Allocate the vDSO pagelist, plus a page for the data. */
vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *), vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *),
...@@ -145,7 +145,7 @@ static int __init vdso_init(void) ...@@ -145,7 +145,7 @@ static int __init vdso_init(void)
/* Grab the vDSO code pages. */ /* Grab the vDSO code pages. */
pfn = sym_to_pfn(&vdso_start); pfn = sym_to_pfn(vdso_start);
for (i = 0; i < vdso_pages; i++) for (i = 0; i < vdso_pages; i++)
vdso_pagelist[i + 1] = pfn_to_page(pfn + i); vdso_pagelist[i + 1] = pfn_to_page(pfn + i);
......
...@@ -95,11 +95,6 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, ...@@ -95,11 +95,6 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flags, dma_addr_t *dma_handle, gfp_t flags,
unsigned long attrs) unsigned long attrs)
{ {
if (dev == NULL) {
WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
return NULL;
}
if (IS_ENABLED(CONFIG_ZONE_DMA) && if (IS_ENABLED(CONFIG_ZONE_DMA) &&
dev->coherent_dma_mask <= DMA_BIT_MASK(32)) dev->coherent_dma_mask <= DMA_BIT_MASK(32))
flags |= GFP_DMA; flags |= GFP_DMA;
...@@ -128,10 +123,6 @@ static void __dma_free_coherent(struct device *dev, size_t size, ...@@ -128,10 +123,6 @@ static void __dma_free_coherent(struct device *dev, size_t size,
bool freed; bool freed;
phys_addr_t paddr = dma_to_phys(dev, dma_handle); phys_addr_t paddr = dma_to_phys(dev, dma_handle);
if (dev == NULL) {
WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
return;
}
freed = dma_release_from_contiguous(dev, freed = dma_release_from_contiguous(dev,
phys_to_page(paddr), phys_to_page(paddr),
......
This diff is collapsed.
...@@ -136,36 +136,27 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) ...@@ -136,36 +136,27 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{ {
pgd_t *pgd; pgd_t *pgd;
pud_t *pud; pud_t *pud;
pmd_t *pmd = NULL; pmd_t *pmd;
pte_t *pte = NULL;
pgd = pgd_offset(mm, addr); pgd = pgd_offset(mm, addr);
pr_debug("%s: addr:0x%lx pgd:%p\n", __func__, addr, pgd); pr_debug("%s: addr:0x%lx pgd:%p\n", __func__, addr, pgd);
if (!pgd_present(*pgd)) if (!pgd_present(*pgd))
return NULL; return NULL;
pud = pud_offset(pgd, addr); pud = pud_offset(pgd, addr);
if (!pud_present(*pud)) if (pud_none(*pud))
return NULL; return NULL;
/* swap or huge page */
if (pud_huge(*pud)) if (!pud_present(*pud) || pud_huge(*pud))
return (pte_t *)pud; return (pte_t *)pud;
/* table; check the next level */
pmd = pmd_offset(pud, addr); pmd = pmd_offset(pud, addr);
if (!pmd_present(*pmd)) if (pmd_none(*pmd))
return NULL; return NULL;
if (!pmd_present(*pmd) || pmd_huge(*pmd))
if (pte_cont(pmd_pte(*pmd))) {
pmd = pmd_offset(
pud, (addr & CONT_PMD_MASK));
return (pte_t *)pmd; return (pte_t *)pmd;
}
if (pmd_huge(*pmd))
return (pte_t *)pmd;
pte = pte_offset_kernel(pmd, addr);
if (pte_present(*pte) && pte_cont(*pte)) {
pte = pte_offset_kernel(
pmd, (addr & CONT_PTE_MASK));
return pte;
}
return NULL; return NULL;
} }
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/elf.h> #include <linux/elf.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/memblock.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/mman.h> #include <linux/mman.h>
#include <linux/export.h> #include <linux/export.h>
...@@ -103,12 +104,18 @@ void arch_pick_mmap_layout(struct mm_struct *mm) ...@@ -103,12 +104,18 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
*/ */
int valid_phys_addr_range(phys_addr_t addr, size_t size) int valid_phys_addr_range(phys_addr_t addr, size_t size)
{ {
if (addr < PHYS_OFFSET) /*
return 0; * Check whether addr is covered by a memory region without the
if (addr + size > __pa(high_memory - 1) + 1) * MEMBLOCK_NOMAP attribute, and whether that region covers the
return 0; * entire range. In theory, this could lead to false negatives
* if the range is covered by distinct but adjacent memory regions
return 1; * that only differ in other attributes. However, few of such
* attributes have been defined, and it is debatable whether it
* follows that /dev/mem read() calls should be able traverse
* such boundaries.
*/
return memblock_is_region_memory(addr, size) &&
memblock_is_map_memory(addr);
} }
/* /*
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/vmalloc.h>
#include <asm/barrier.h> #include <asm/barrier.h>
#include <asm/cputype.h> #include <asm/cputype.h>
......
...@@ -70,7 +70,7 @@ struct jit_ctx { ...@@ -70,7 +70,7 @@ struct jit_ctx {
int idx; int idx;
int epilogue_offset; int epilogue_offset;
int *offset; int *offset;
u32 *image; __le32 *image;
u32 stack_size; u32 stack_size;
}; };
...@@ -131,7 +131,7 @@ static inline int bpf2a64_offset(int bpf_to, int bpf_from, ...@@ -131,7 +131,7 @@ static inline int bpf2a64_offset(int bpf_to, int bpf_from,
static void jit_fill_hole(void *area, unsigned int size) static void jit_fill_hole(void *area, unsigned int size)
{ {
u32 *ptr; __le32 *ptr;
/* We are guaranteed to have aligned memory. */ /* We are guaranteed to have aligned memory. */
for (ptr = area; size >= sizeof(u32); size -= sizeof(u32)) for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
*ptr++ = cpu_to_le32(AARCH64_BREAK_FAULT); *ptr++ = cpu_to_le32(AARCH64_BREAK_FAULT);
...@@ -874,7 +874,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) ...@@ -874,7 +874,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
/* 2. Now, the actual pass. */ /* 2. Now, the actual pass. */
ctx.image = (u32 *)image_ptr; ctx.image = (__le32 *)image_ptr;
ctx.idx = 0; ctx.idx = 0;
build_prologue(&ctx); build_prologue(&ctx);
......
...@@ -39,6 +39,21 @@ config ACPI_APEI_PCIEAER ...@@ -39,6 +39,21 @@ config ACPI_APEI_PCIEAER
PCIe AER errors may be reported via APEI firmware first mode. PCIe AER errors may be reported via APEI firmware first mode.
Turn on this option to enable the corresponding support. Turn on this option to enable the corresponding support.
config ACPI_APEI_SEA
bool "APEI Synchronous External Abort logging/recovering support"
depends on ARM64 && ACPI_APEI_GHES
default y
help
This option should be enabled if the system supports
firmware first handling of SEA (Synchronous External Abort).
SEA happens with certain faults of data abort or instruction
abort synchronous exceptions on ARMv8 systems. If a system
supports firmware first handling of SEA, the platform analyzes
and handles hardware error notifications from SEA, and it may then
form a HW error record for the OS to parse and handle. This
option allows the OS to look for such hardware error record, and
take appropriate action.
config ACPI_APEI_MEMORY_FAILURE config ACPI_APEI_MEMORY_FAILURE
bool "APEI memory error recovering support" bool "APEI memory error recovering support"
depends on ACPI_APEI && MEMORY_FAILURE depends on ACPI_APEI && MEMORY_FAILURE
......
This diff is collapsed.
...@@ -52,6 +52,7 @@ static const int hest_esrc_len_tab[ACPI_HEST_TYPE_RESERVED] = { ...@@ -52,6 +52,7 @@ static const int hest_esrc_len_tab[ACPI_HEST_TYPE_RESERVED] = {
[ACPI_HEST_TYPE_AER_ENDPOINT] = sizeof(struct acpi_hest_aer), [ACPI_HEST_TYPE_AER_ENDPOINT] = sizeof(struct acpi_hest_aer),
[ACPI_HEST_TYPE_AER_BRIDGE] = sizeof(struct acpi_hest_aer_bridge), [ACPI_HEST_TYPE_AER_BRIDGE] = sizeof(struct acpi_hest_aer_bridge),
[ACPI_HEST_TYPE_GENERIC_ERROR] = sizeof(struct acpi_hest_generic), [ACPI_HEST_TYPE_GENERIC_ERROR] = sizeof(struct acpi_hest_generic),
[ACPI_HEST_TYPE_GENERIC_ERROR_V2] = sizeof(struct acpi_hest_generic_v2),
}; };
static int hest_esrc_len(struct acpi_hest_header *hest_hdr) static int hest_esrc_len(struct acpi_hest_header *hest_hdr)
...@@ -141,7 +142,8 @@ static int __init hest_parse_ghes_count(struct acpi_hest_header *hest_hdr, void ...@@ -141,7 +142,8 @@ static int __init hest_parse_ghes_count(struct acpi_hest_header *hest_hdr, void
{ {
int *count = data; int *count = data;
if (hest_hdr->type == ACPI_HEST_TYPE_GENERIC_ERROR) if (hest_hdr->type == ACPI_HEST_TYPE_GENERIC_ERROR ||
hest_hdr->type == ACPI_HEST_TYPE_GENERIC_ERROR_V2)
(*count)++; (*count)++;
return 0; return 0;
} }
...@@ -152,7 +154,8 @@ static int __init hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data) ...@@ -152,7 +154,8 @@ static int __init hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data)
struct ghes_arr *ghes_arr = data; struct ghes_arr *ghes_arr = data;
int rc, i; int rc, i;
if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR) if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR &&
hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR_V2)
return 0; return 0;
if (!((struct acpi_hest_generic *)hest_hdr)->enabled) if (!((struct acpi_hest_generic *)hest_hdr)->enabled)
......
...@@ -234,21 +234,6 @@ static struct acpi_iort_node *iort_scan_node(enum acpi_iort_node_type type, ...@@ -234,21 +234,6 @@ static struct acpi_iort_node *iort_scan_node(enum acpi_iort_node_type type,
return NULL; return NULL;
} }
static acpi_status
iort_match_type_callback(struct acpi_iort_node *node, void *context)
{
return AE_OK;
}
bool iort_node_match(u8 type)
{
struct acpi_iort_node *node;
node = iort_scan_node(type, iort_match_type_callback, NULL);
return node != NULL;
}
static acpi_status iort_match_node_callback(struct acpi_iort_node *node, static acpi_status iort_match_node_callback(struct acpi_iort_node *node,
void *context) void *context)
{ {
......
...@@ -17,6 +17,8 @@ config DEVMEM ...@@ -17,6 +17,8 @@ config DEVMEM
config DEVKMEM config DEVKMEM
bool "/dev/kmem virtual device support" bool "/dev/kmem virtual device support"
# On arm64, VMALLOC_START < PAGE_OFFSET, which confuses kmem read/write
depends on !ARM64
help help
Say Y here if you want to support the /dev/kmem device. The Say Y here if you want to support the /dev/kmem device. The
/dev/kmem device is rarely used, but can be used for certain /dev/kmem device is rarely used, but can be used for certain
......
...@@ -32,6 +32,10 @@ ...@@ -32,6 +32,10 @@
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/aer.h> #include <linux/aer.h>
#include <linux/printk.h>
#include <linux/bcd.h>
#include <acpi/ghes.h>
#include <ras/ras_event.h>
#define INDENT_SP " " #define INDENT_SP " "
...@@ -107,12 +111,15 @@ void cper_print_bits(const char *pfx, unsigned int bits, ...@@ -107,12 +111,15 @@ void cper_print_bits(const char *pfx, unsigned int bits,
static const char * const proc_type_strs[] = { static const char * const proc_type_strs[] = {
"IA32/X64", "IA32/X64",
"IA64", "IA64",
"ARM",
}; };
static const char * const proc_isa_strs[] = { static const char * const proc_isa_strs[] = {
"IA32", "IA32",
"IA64", "IA64",
"X64", "X64",
"ARM A32/T32",
"ARM A64",
}; };
static const char * const proc_error_type_strs[] = { static const char * const proc_error_type_strs[] = {
...@@ -181,6 +188,122 @@ static void cper_print_proc_generic(const char *pfx, ...@@ -181,6 +188,122 @@ static void cper_print_proc_generic(const char *pfx,
printk("%s""IP: 0x%016llx\n", pfx, proc->ip); printk("%s""IP: 0x%016llx\n", pfx, proc->ip);
} }
#if defined(CONFIG_ARM64) || defined(CONFIG_ARM)
static const char * const arm_reg_ctx_strs[] = {
"AArch32 general purpose registers",
"AArch32 EL1 context registers",
"AArch32 EL2 context registers",
"AArch32 secure context registers",
"AArch64 general purpose registers",
"AArch64 EL1 context registers",
"AArch64 EL2 context registers",
"AArch64 EL3 context registers",
"Misc. system register structure",
};
static void cper_print_proc_arm(const char *pfx,
const struct cper_sec_proc_arm *proc)
{
int i, len, max_ctx_type;
struct cper_arm_err_info *err_info;
struct cper_arm_ctx_info *ctx_info;
char newpfx[64];
printk("%sMIDR: 0x%016llx\n", pfx, proc->midr);
len = proc->section_length - (sizeof(*proc) +
proc->err_info_num * (sizeof(*err_info)));
if (len < 0) {
printk("%ssection length: %d\n", pfx, proc->section_length);
printk("%ssection length is too small\n", pfx);
printk("%sfirmware-generated error record is incorrect\n", pfx);
printk("%sERR_INFO_NUM is %d\n", pfx, proc->err_info_num);
return;
}
if (proc->validation_bits & CPER_ARM_VALID_MPIDR)
printk("%sMultiprocessor Affinity Register (MPIDR): 0x%016llx\n",
pfx, proc->mpidr);
if (proc->validation_bits & CPER_ARM_VALID_AFFINITY_LEVEL)
printk("%serror affinity level: %d\n", pfx,
proc->affinity_level);
if (proc->validation_bits & CPER_ARM_VALID_RUNNING_STATE) {
printk("%srunning state: 0x%x\n", pfx, proc->running_state);
printk("%sPower State Coordination Interface state: %d\n",
pfx, proc->psci_state);
}
snprintf(newpfx, sizeof(newpfx), "%s%s", pfx, INDENT_SP);
err_info = (struct cper_arm_err_info *)(proc + 1);
for (i = 0; i < proc->err_info_num; i++) {
printk("%sError info structure %d:\n", pfx, i);
printk("%snum errors: %d\n", pfx, err_info->multiple_error + 1);
if (err_info->validation_bits & CPER_ARM_INFO_VALID_FLAGS) {
if (err_info->flags & CPER_ARM_INFO_FLAGS_FIRST)
printk("%sfirst error captured\n", newpfx);
if (err_info->flags & CPER_ARM_INFO_FLAGS_LAST)
printk("%slast error captured\n", newpfx);
if (err_info->flags & CPER_ARM_INFO_FLAGS_PROPAGATED)
printk("%spropagated error captured\n",
newpfx);
if (err_info->flags & CPER_ARM_INFO_FLAGS_OVERFLOW)
printk("%soverflow occurred, error info is incomplete\n",
newpfx);
}
printk("%serror_type: %d, %s\n", newpfx, err_info->type,
err_info->type < ARRAY_SIZE(proc_error_type_strs) ?
proc_error_type_strs[err_info->type] : "unknown");
if (err_info->validation_bits & CPER_ARM_INFO_VALID_ERR_INFO)
printk("%serror_info: 0x%016llx\n", newpfx,
err_info->error_info);
if (err_info->validation_bits & CPER_ARM_INFO_VALID_VIRT_ADDR)
printk("%svirtual fault address: 0x%016llx\n",
newpfx, err_info->virt_fault_addr);
if (err_info->validation_bits & CPER_ARM_INFO_VALID_PHYSICAL_ADDR)
printk("%sphysical fault address: 0x%016llx\n",
newpfx, err_info->physical_fault_addr);
err_info += 1;
}
ctx_info = (struct cper_arm_ctx_info *)err_info;
max_ctx_type = ARRAY_SIZE(arm_reg_ctx_strs) - 1;
for (i = 0; i < proc->context_info_num; i++) {
int size = sizeof(*ctx_info) + ctx_info->size;
printk("%sContext info structure %d:\n", pfx, i);
if (len < size) {
printk("%ssection length is too small\n", newpfx);
printk("%sfirmware-generated error record is incorrect\n", pfx);
return;
}
if (ctx_info->type > max_ctx_type) {
printk("%sInvalid context type: %d (max: %d)\n",
newpfx, ctx_info->type, max_ctx_type);
return;
}
printk("%sregister context type: %s\n", newpfx,
arm_reg_ctx_strs[ctx_info->type]);
print_hex_dump(newpfx, "", DUMP_PREFIX_OFFSET, 16, 4,
(ctx_info + 1), ctx_info->size, 0);
len -= size;
ctx_info = (struct cper_arm_ctx_info *)((long)ctx_info + size);
}
if (len > 0) {
printk("%sVendor specific error info has %u bytes:\n", pfx,
len);
print_hex_dump(newpfx, "", DUMP_PREFIX_OFFSET, 16, 4, ctx_info,
len, true);
}
}
#endif
static const char * const mem_err_type_strs[] = { static const char * const mem_err_type_strs[] = {
"unknown", "unknown",
"no error", "no error",
...@@ -386,13 +509,38 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie, ...@@ -386,13 +509,38 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
pfx, pcie->bridge.secondary_status, pcie->bridge.control); pfx, pcie->bridge.secondary_status, pcie->bridge.control);
} }
static void cper_estatus_print_section( static void cper_print_tstamp(const char *pfx,
const char *pfx, const struct acpi_hest_generic_data *gdata, int sec_no) struct acpi_hest_generic_data_v300 *gdata)
{
__u8 hour, min, sec, day, mon, year, century, *timestamp;
if (gdata->validation_bits & ACPI_HEST_GEN_VALID_TIMESTAMP) {
timestamp = (__u8 *)&(gdata->time_stamp);
sec = bcd2bin(timestamp[0]);
min = bcd2bin(timestamp[1]);
hour = bcd2bin(timestamp[2]);
day = bcd2bin(timestamp[4]);
mon = bcd2bin(timestamp[5]);
year = bcd2bin(timestamp[6]);
century = bcd2bin(timestamp[7]);
printk("%s%ststamp: %02d%02d-%02d-%02d %02d:%02d:%02d\n", pfx,
(timestamp[3] & 0x1 ? "precise " : "imprecise "),
century, year, mon, day, hour, min, sec);
}
}
static void
cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata,
int sec_no)
{ {
uuid_le *sec_type = (uuid_le *)gdata->section_type; uuid_le *sec_type = (uuid_le *)gdata->section_type;
__u16 severity; __u16 severity;
char newpfx[64]; char newpfx[64];
if (acpi_hest_get_version(gdata) >= 3)
cper_print_tstamp(pfx, (struct acpi_hest_generic_data_v300 *)gdata);
severity = gdata->error_severity; severity = gdata->error_severity;
printk("%s""Error %d, type: %s\n", pfx, sec_no, printk("%s""Error %d, type: %s\n", pfx, sec_no,
cper_severity_str(severity)); cper_severity_str(severity));
...@@ -403,14 +551,16 @@ static void cper_estatus_print_section( ...@@ -403,14 +551,16 @@ static void cper_estatus_print_section(
snprintf(newpfx, sizeof(newpfx), "%s%s", pfx, INDENT_SP); snprintf(newpfx, sizeof(newpfx), "%s%s", pfx, INDENT_SP);
if (!uuid_le_cmp(*sec_type, CPER_SEC_PROC_GENERIC)) { if (!uuid_le_cmp(*sec_type, CPER_SEC_PROC_GENERIC)) {
struct cper_sec_proc_generic *proc_err = (void *)(gdata + 1); struct cper_sec_proc_generic *proc_err = acpi_hest_get_payload(gdata);
printk("%s""section_type: general processor error\n", newpfx); printk("%s""section_type: general processor error\n", newpfx);
if (gdata->error_data_length >= sizeof(*proc_err)) if (gdata->error_data_length >= sizeof(*proc_err))
cper_print_proc_generic(newpfx, proc_err); cper_print_proc_generic(newpfx, proc_err);
else else
goto err_section_too_small; goto err_section_too_small;
} else if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) { } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) {
struct cper_sec_mem_err *mem_err = (void *)(gdata + 1); struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
printk("%s""section_type: memory error\n", newpfx); printk("%s""section_type: memory error\n", newpfx);
if (gdata->error_data_length >= if (gdata->error_data_length >=
sizeof(struct cper_sec_mem_err_old)) sizeof(struct cper_sec_mem_err_old))
...@@ -419,14 +569,32 @@ static void cper_estatus_print_section( ...@@ -419,14 +569,32 @@ static void cper_estatus_print_section(
else else
goto err_section_too_small; goto err_section_too_small;
} else if (!uuid_le_cmp(*sec_type, CPER_SEC_PCIE)) { } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PCIE)) {
struct cper_sec_pcie *pcie = (void *)(gdata + 1); struct cper_sec_pcie *pcie = acpi_hest_get_payload(gdata);
printk("%s""section_type: PCIe error\n", newpfx); printk("%s""section_type: PCIe error\n", newpfx);
if (gdata->error_data_length >= sizeof(*pcie)) if (gdata->error_data_length >= sizeof(*pcie))
cper_print_pcie(newpfx, pcie, gdata); cper_print_pcie(newpfx, pcie, gdata);
else else
goto err_section_too_small; goto err_section_too_small;
} else #if defined(CONFIG_ARM64) || defined(CONFIG_ARM)
printk("%s""section type: unknown, %pUl\n", newpfx, sec_type); } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PROC_ARM)) {
struct cper_sec_proc_arm *arm_err = acpi_hest_get_payload(gdata);
printk("%ssection_type: ARM processor error\n", newpfx);
if (gdata->error_data_length >= sizeof(*arm_err))
cper_print_proc_arm(newpfx, arm_err);
else
goto err_section_too_small;
#endif
} else {
const void *err = acpi_hest_get_payload(gdata);
printk("%ssection type: unknown, %pUl\n", newpfx, sec_type);
printk("%ssection length: %#x\n", newpfx,
gdata->error_data_length);
print_hex_dump(newpfx, "", DUMP_PREFIX_OFFSET, 16, 4, err,
gdata->error_data_length, true);
}
return; return;
...@@ -438,7 +606,7 @@ void cper_estatus_print(const char *pfx, ...@@ -438,7 +606,7 @@ void cper_estatus_print(const char *pfx,
const struct acpi_hest_generic_status *estatus) const struct acpi_hest_generic_status *estatus)
{ {
struct acpi_hest_generic_data *gdata; struct acpi_hest_generic_data *gdata;
unsigned int data_len, gedata_len; unsigned int data_len;
int sec_no = 0; int sec_no = 0;
char newpfx[64]; char newpfx[64];
__u16 severity; __u16 severity;
...@@ -452,11 +620,11 @@ void cper_estatus_print(const char *pfx, ...@@ -452,11 +620,11 @@ void cper_estatus_print(const char *pfx,
data_len = estatus->data_length; data_len = estatus->data_length;
gdata = (struct acpi_hest_generic_data *)(estatus + 1); gdata = (struct acpi_hest_generic_data *)(estatus + 1);
snprintf(newpfx, sizeof(newpfx), "%s%s", pfx, INDENT_SP); snprintf(newpfx, sizeof(newpfx), "%s%s", pfx, INDENT_SP);
while (data_len >= sizeof(*gdata)) {
gedata_len = gdata->error_data_length; while (data_len >= acpi_hest_get_size(gdata)) {
cper_estatus_print_section(newpfx, gdata, sec_no); cper_estatus_print_section(newpfx, gdata, sec_no);
data_len -= gedata_len + sizeof(*gdata); data_len -= acpi_hest_get_record_size(gdata);
gdata = (void *)(gdata + 1) + gedata_len; gdata = acpi_hest_get_next(gdata);
sec_no++; sec_no++;
} }
} }
...@@ -486,12 +654,14 @@ int cper_estatus_check(const struct acpi_hest_generic_status *estatus) ...@@ -486,12 +654,14 @@ int cper_estatus_check(const struct acpi_hest_generic_status *estatus)
return rc; return rc;
data_len = estatus->data_length; data_len = estatus->data_length;
gdata = (struct acpi_hest_generic_data *)(estatus + 1); gdata = (struct acpi_hest_generic_data *)(estatus + 1);
while (data_len >= sizeof(*gdata)) {
gedata_len = gdata->error_data_length; while (data_len >= acpi_hest_get_size(gdata)) {
if (gedata_len > data_len - sizeof(*gdata)) gedata_len = acpi_hest_get_error_length(gdata);
if (gedata_len > data_len - acpi_hest_get_size(gdata))
return -EINVAL; return -EINVAL;
data_len -= gedata_len + sizeof(*gdata);
gdata = (void *)(gdata + 1) + gedata_len; data_len -= acpi_hest_get_record_size(gdata);
gdata = acpi_hest_get_next(gdata);
} }
if (data_len) if (data_len)
return -EINVAL; return -EINVAL;
......
...@@ -39,7 +39,6 @@ config ARM_GIC_V3_ITS ...@@ -39,7 +39,6 @@ config ARM_GIC_V3_ITS
bool bool
depends on PCI depends on PCI
depends on PCI_MSI depends on PCI_MSI
select ACPI_IORT if ACPI
config ARM_NVIC config ARM_NVIC
bool bool
......
...@@ -3,9 +3,10 @@ ...@@ -3,9 +3,10 @@
# #
menu "Performance monitor support" menu "Performance monitor support"
depends on PERF_EVENTS
config ARM_PMU config ARM_PMU
depends on PERF_EVENTS && (ARM || ARM64) depends on ARM || ARM64
bool "ARM PMU framework" bool "ARM PMU framework"
default y default y
help help
...@@ -18,7 +19,7 @@ config ARM_PMU_ACPI ...@@ -18,7 +19,7 @@ config ARM_PMU_ACPI
config QCOM_L2_PMU config QCOM_L2_PMU
bool "Qualcomm Technologies L2-cache PMU" bool "Qualcomm Technologies L2-cache PMU"
depends on ARCH_QCOM && ARM64 && PERF_EVENTS && ACPI depends on ARCH_QCOM && ARM64 && ACPI
help help
Provides support for the L2 cache performance monitor unit (PMU) Provides support for the L2 cache performance monitor unit (PMU)
in Qualcomm Technologies processors. in Qualcomm Technologies processors.
...@@ -27,7 +28,7 @@ config QCOM_L2_PMU ...@@ -27,7 +28,7 @@ config QCOM_L2_PMU
config QCOM_L3_PMU config QCOM_L3_PMU
bool "Qualcomm Technologies L3-cache PMU" bool "Qualcomm Technologies L3-cache PMU"
depends on ARCH_QCOM && ARM64 && PERF_EVENTS && ACPI depends on ARCH_QCOM && ARM64 && ACPI
select QCOM_IRQ_COMBINER select QCOM_IRQ_COMBINER
help help
Provides support for the L3 cache performance monitor unit (PMU) Provides support for the L3 cache performance monitor unit (PMU)
...@@ -36,7 +37,7 @@ config QCOM_L3_PMU ...@@ -36,7 +37,7 @@ config QCOM_L3_PMU
monitoring L3 cache events. monitoring L3 cache events.
config XGENE_PMU config XGENE_PMU
depends on PERF_EVENTS && ARCH_XGENE depends on ARCH_XGENE
bool "APM X-Gene SoC PMU" bool "APM X-Gene SoC PMU"
default n default n
help help
......
This diff is collapsed.
...@@ -7,11 +7,24 @@ ...@@ -7,11 +7,24 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/ras.h> #include <linux/ras.h>
#include <linux/uuid.h>
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#define TRACE_INCLUDE_PATH ../../include/ras #define TRACE_INCLUDE_PATH ../../include/ras
#include <ras/ras_event.h> #include <ras/ras_event.h>
void log_non_standard_event(const uuid_le *sec_type, const uuid_le *fru_id,
const char *fru_text, const u8 sev, const u8 *err,
const u32 len)
{
trace_non_standard_event(sec_type, fru_id, fru_text, sev, err, len);
}
void log_arm_hw_error(struct cper_sec_proc_arm *err)
{
trace_arm_event(err);
}
static int __init ras_init(void) static int __init ras_init(void)
{ {
int rc = 0; int rc = 0;
...@@ -27,7 +40,8 @@ subsys_initcall(ras_init); ...@@ -27,7 +40,8 @@ subsys_initcall(ras_init);
EXPORT_TRACEPOINT_SYMBOL_GPL(extlog_mem_event); EXPORT_TRACEPOINT_SYMBOL_GPL(extlog_mem_event);
#endif #endif
EXPORT_TRACEPOINT_SYMBOL_GPL(mc_event); EXPORT_TRACEPOINT_SYMBOL_GPL(mc_event);
EXPORT_TRACEPOINT_SYMBOL_GPL(non_standard_event);
EXPORT_TRACEPOINT_SYMBOL_GPL(arm_event);
static int __init parse_ras_param(char *str) static int __init parse_ras_param(char *str)
{ {
......
...@@ -504,7 +504,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) ...@@ -504,7 +504,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
if (&m->list == &kclist_head) { if (&m->list == &kclist_head) {
if (clear_user(buffer, tsz)) if (clear_user(buffer, tsz))
return -EFAULT; return -EFAULT;
} else if (is_vmalloc_or_module_addr((void *)start)) { } else if (m->type == KCORE_VMALLOC) {
vread(buf, (char *)start, tsz); vread(buf, (char *)start, tsz);
/* we have to zero-fill user buffer even if no read */ /* we have to zero-fill user buffer even if no read */
if (copy_to_user(buffer, buf, tsz)) if (copy_to_user(buffer, buf, tsz))
......
#ifndef GHES_H
#define GHES_H
#include <acpi/apei.h> #include <acpi/apei.h>
#include <acpi/hed.h> #include <acpi/hed.h>
...@@ -13,7 +16,10 @@ ...@@ -13,7 +16,10 @@
#define GHES_EXITING 0x0002 #define GHES_EXITING 0x0002
struct ghes { struct ghes {
union {
struct acpi_hest_generic *generic; struct acpi_hest_generic *generic;
struct acpi_hest_generic_v2 *generic_v2;
};
struct acpi_hest_generic_status *estatus; struct acpi_hest_generic_status *estatus;
u64 buffer_paddr; u64 buffer_paddr;
unsigned long flags; unsigned long flags;
...@@ -70,3 +76,43 @@ static inline void ghes_edac_unregister(struct ghes *ghes) ...@@ -70,3 +76,43 @@ static inline void ghes_edac_unregister(struct ghes *ghes)
{ {
} }
#endif #endif
static inline int acpi_hest_get_version(struct acpi_hest_generic_data *gdata)
{
return gdata->revision >> 8;
}
static inline void *acpi_hest_get_payload(struct acpi_hest_generic_data *gdata)
{
if (acpi_hest_get_version(gdata) >= 3)
return (void *)(((struct acpi_hest_generic_data_v300 *)(gdata)) + 1);
return gdata + 1;
}
static inline int acpi_hest_get_error_length(struct acpi_hest_generic_data *gdata)
{
return ((struct acpi_hest_generic_data *)(gdata))->error_data_length;
}
static inline int acpi_hest_get_size(struct acpi_hest_generic_data *gdata)
{
if (acpi_hest_get_version(gdata) >= 3)
return sizeof(struct acpi_hest_generic_data_v300);
return sizeof(struct acpi_hest_generic_data);
}
static inline int acpi_hest_get_record_size(struct acpi_hest_generic_data *gdata)
{
return (acpi_hest_get_size(gdata) + acpi_hest_get_error_length(gdata));
}
static inline void *acpi_hest_get_next(struct acpi_hest_generic_data *gdata)
{
return (void *)(gdata) + acpi_hest_get_record_size(gdata);
}
int ghes_notify_sea(void);
#endif /* GHES_H */
...@@ -31,7 +31,6 @@ void iort_deregister_domain_token(int trans_id); ...@@ -31,7 +31,6 @@ void iort_deregister_domain_token(int trans_id);
struct fwnode_handle *iort_find_domain_token(int trans_id); struct fwnode_handle *iort_find_domain_token(int trans_id);
#ifdef CONFIG_ACPI_IORT #ifdef CONFIG_ACPI_IORT
void acpi_iort_init(void); void acpi_iort_init(void);
bool iort_node_match(u8 type);
u32 iort_msi_map_rid(struct device *dev, u32 req_id); u32 iort_msi_map_rid(struct device *dev, u32 req_id);
struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id); struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id);
void acpi_configure_pmsi_domain(struct device *dev); void acpi_configure_pmsi_domain(struct device *dev);
...@@ -41,7 +40,6 @@ void iort_set_dma_mask(struct device *dev); ...@@ -41,7 +40,6 @@ void iort_set_dma_mask(struct device *dev);
const struct iommu_ops *iort_iommu_configure(struct device *dev); const struct iommu_ops *iort_iommu_configure(struct device *dev);
#else #else
static inline void acpi_iort_init(void) { } static inline void acpi_iort_init(void) { }
static inline bool iort_node_match(u8 type) { return false; }
static inline u32 iort_msi_map_rid(struct device *dev, u32 req_id) static inline u32 iort_msi_map_rid(struct device *dev, u32 req_id)
{ return req_id; } { return req_id; }
static inline struct irq_domain *iort_get_device_domain(struct device *dev, static inline struct irq_domain *iort_get_device_domain(struct device *dev,
......
...@@ -180,6 +180,10 @@ enum { ...@@ -180,6 +180,10 @@ enum {
#define CPER_SEC_PROC_IPF \ #define CPER_SEC_PROC_IPF \
UUID_LE(0xE429FAF1, 0x3CB7, 0x11D4, 0x0B, 0xCA, 0x07, 0x00, \ UUID_LE(0xE429FAF1, 0x3CB7, 0x11D4, 0x0B, 0xCA, 0x07, 0x00, \
0x80, 0xC7, 0x3C, 0x88, 0x81) 0x80, 0xC7, 0x3C, 0x88, 0x81)
/* Processor Specific: ARM */
#define CPER_SEC_PROC_ARM \
UUID_LE(0xE19E3D16, 0xBC11, 0x11E4, 0x9C, 0xAA, 0xC2, 0x05, \
0x1D, 0x5D, 0x46, 0xB0)
/* Platform Memory */ /* Platform Memory */
#define CPER_SEC_PLATFORM_MEM \ #define CPER_SEC_PLATFORM_MEM \
UUID_LE(0xA5BC1114, 0x6F64, 0x4EDE, 0xB8, 0x63, 0x3E, 0x83, \ UUID_LE(0xA5BC1114, 0x6F64, 0x4EDE, 0xB8, 0x63, 0x3E, 0x83, \
...@@ -255,6 +259,22 @@ enum { ...@@ -255,6 +259,22 @@ enum {
#define CPER_PCIE_SLOT_SHIFT 3 #define CPER_PCIE_SLOT_SHIFT 3
#define CPER_ARM_VALID_MPIDR BIT(0)
#define CPER_ARM_VALID_AFFINITY_LEVEL BIT(1)
#define CPER_ARM_VALID_RUNNING_STATE BIT(2)
#define CPER_ARM_VALID_VENDOR_INFO BIT(3)
#define CPER_ARM_INFO_VALID_MULTI_ERR BIT(0)
#define CPER_ARM_INFO_VALID_FLAGS BIT(1)
#define CPER_ARM_INFO_VALID_ERR_INFO BIT(2)
#define CPER_ARM_INFO_VALID_VIRT_ADDR BIT(3)
#define CPER_ARM_INFO_VALID_PHYSICAL_ADDR BIT(4)
#define CPER_ARM_INFO_FLAGS_FIRST BIT(0)
#define CPER_ARM_INFO_FLAGS_LAST BIT(1)
#define CPER_ARM_INFO_FLAGS_PROPAGATED BIT(2)
#define CPER_ARM_INFO_FLAGS_OVERFLOW BIT(3)
/* /*
* All tables and structs must be byte-packed to match CPER * All tables and structs must be byte-packed to match CPER
* specification, since the tables are provided by the system BIOS * specification, since the tables are provided by the system BIOS
...@@ -340,6 +360,40 @@ struct cper_ia_proc_ctx { ...@@ -340,6 +360,40 @@ struct cper_ia_proc_ctx {
__u64 mm_reg_addr; __u64 mm_reg_addr;
}; };
/* ARM Processor Error Section */
struct cper_sec_proc_arm {
__u32 validation_bits;
__u16 err_info_num; /* Number of Processor Error Info */
__u16 context_info_num; /* Number of Processor Context Info Records*/
__u32 section_length;
__u8 affinity_level;
__u8 reserved[3]; /* must be zero */
__u64 mpidr;
__u64 midr;
__u32 running_state; /* Bit 0 set - Processor running. PSCI = 0 */
__u32 psci_state;
};
/* ARM Processor Error Information Structure */
struct cper_arm_err_info {
__u8 version;
__u8 length;
__u16 validation_bits;
__u8 type;
__u16 multiple_error;
__u8 flags;
__u64 error_info;
__u64 virt_fault_addr;
__u64 physical_fault_addr;
};
/* ARM Processor Context Information Structure */
struct cper_arm_ctx_info {
__u16 version;
__u16 type;
__u32 size;
};
/* Old Memory Error Section UEFI 2.1, 2.2 */ /* Old Memory Error Section UEFI 2.1, 2.2 */
struct cper_sec_mem_err_old { struct cper_sec_mem_err_old {
__u64 validation_bits; __u64 validation_bits;
......
...@@ -2,6 +2,8 @@ ...@@ -2,6 +2,8 @@
#define __RAS_H__ #define __RAS_H__
#include <asm/errno.h> #include <asm/errno.h>
#include <linux/uuid.h>
#include <linux/cper.h>
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
int ras_userspace_consumers(void); int ras_userspace_consumers(void);
...@@ -22,4 +24,19 @@ static inline void __init cec_init(void) { } ...@@ -22,4 +24,19 @@ static inline void __init cec_init(void) { }
static inline int cec_add_elem(u64 pfn) { return -ENODEV; } static inline int cec_add_elem(u64 pfn) { return -ENODEV; }
#endif #endif
#ifdef CONFIG_RAS
void log_non_standard_event(const guid_t *sec_type,
const guid_t *fru_id, const char *fru_text,
const u8 sev, const u8 *err, const u32 len);
void log_arm_hw_error(struct cper_sec_proc_arm *err);
#else
static inline void
log_non_standard_event(const guid_t *sec_type,
const guid_t *fru_id, const char *fru_text,
const u8 sev, const u8 *err, const u32 len)
{ return; }
static inline void
log_arm_hw_error(struct cper_sec_proc_arm *err) { return; }
#endif
#endif /* __RAS_H__ */ #endif /* __RAS_H__ */
...@@ -18,8 +18,10 @@ ...@@ -18,8 +18,10 @@
#include <uapi/linux/uuid.h> #include <uapi/linux/uuid.h>
#define UUID_SIZE 16
typedef struct { typedef struct {
__u8 b[16]; __u8 b[UUID_SIZE];
} uuid_t; } uuid_t;
#define UUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \ #define UUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
......
...@@ -161,6 +161,96 @@ TRACE_EVENT(mc_event, ...@@ -161,6 +161,96 @@ TRACE_EVENT(mc_event,
__get_str(driver_detail)) __get_str(driver_detail))
); );
/*
* ARM Processor Events Report
*
* This event is generated when hardware detects an ARM processor error
* has occurred. UEFI 2.6 spec section N.2.4.4.
*/
TRACE_EVENT(arm_event,
TP_PROTO(const struct cper_sec_proc_arm *proc),
TP_ARGS(proc),
TP_STRUCT__entry(
__field(u64, mpidr)
__field(u64, midr)
__field(u32, running_state)
__field(u32, psci_state)
__field(u8, affinity)
),
TP_fast_assign(
if (proc->validation_bits & CPER_ARM_VALID_AFFINITY_LEVEL)
__entry->affinity = proc->affinity_level;
else
__entry->affinity = ~0;
if (proc->validation_bits & CPER_ARM_VALID_MPIDR)
__entry->mpidr = proc->mpidr;
else
__entry->mpidr = 0ULL;
__entry->midr = proc->midr;
if (proc->validation_bits & CPER_ARM_VALID_RUNNING_STATE) {
__entry->running_state = proc->running_state;
__entry->psci_state = proc->psci_state;
} else {
__entry->running_state = ~0;
__entry->psci_state = ~0;
}
),
TP_printk("affinity level: %d; MPIDR: %016llx; MIDR: %016llx; "
"running state: %d; PSCI state: %d",
__entry->affinity, __entry->mpidr, __entry->midr,
__entry->running_state, __entry->psci_state)
);
/*
* Non-Standard Section Report
*
* This event is generated when hardware detected a hardware
* error event, which may be of non-standard section as defined
* in UEFI spec appendix "Common Platform Error Record", or may
* be of sections for which TRACE_EVENT is not defined.
*
*/
TRACE_EVENT(non_standard_event,
TP_PROTO(const uuid_le *sec_type,
const uuid_le *fru_id,
const char *fru_text,
const u8 sev,
const u8 *err,
const u32 len),
TP_ARGS(sec_type, fru_id, fru_text, sev, err, len),
TP_STRUCT__entry(
__array(char, sec_type, UUID_SIZE)
__array(char, fru_id, UUID_SIZE)
__string(fru_text, fru_text)
__field(u8, sev)
__field(u32, len)
__dynamic_array(u8, buf, len)
),
TP_fast_assign(
memcpy(__entry->sec_type, sec_type, UUID_SIZE);
memcpy(__entry->fru_id, fru_id, UUID_SIZE);
__assign_str(fru_text, fru_text);
__entry->sev = sev;
__entry->len = len;
memcpy(__get_dynamic_array(buf), err, len);
),
TP_printk("severity: %d; sec type:%pU; FRU: %pU %s; data len:%d; raw data:%s",
__entry->sev, __entry->sec_type,
__entry->fru_id, __get_str(fru_text),
__entry->len,
__print_hex(__get_dynamic_array(buf), __entry->len))
);
/* /*
* PCIe AER Trace event * PCIe AER Trace event
* *
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <asm/kvm_asm.h> #include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h> #include <asm/kvm_emulate.h>
#include <asm/virt.h> #include <asm/virt.h>
#include <asm/system_misc.h>
#include "trace.h" #include "trace.h"
...@@ -1430,6 +1431,25 @@ static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa) ...@@ -1430,6 +1431,25 @@ static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
kvm_set_pfn_accessed(pfn); kvm_set_pfn_accessed(pfn);
} }
static bool is_abort_sea(unsigned long fault_status)
{
switch (fault_status) {
case FSC_SEA:
case FSC_SEA_TTW0:
case FSC_SEA_TTW1:
case FSC_SEA_TTW2:
case FSC_SEA_TTW3:
case FSC_SECC:
case FSC_SECC_TTW0:
case FSC_SECC_TTW1:
case FSC_SECC_TTW2:
case FSC_SECC_TTW3:
return true;
default:
return false;
}
}
/** /**
* kvm_handle_guest_abort - handles all 2nd stage aborts * kvm_handle_guest_abort - handles all 2nd stage aborts
* @vcpu: the VCPU pointer * @vcpu: the VCPU pointer
...@@ -1452,19 +1472,29 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -1452,19 +1472,29 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
gfn_t gfn; gfn_t gfn;
int ret, idx; int ret, idx;
fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
/*
* The host kernel will handle the synchronous external abort. There
* is no need to pass the error into the guest.
*/
if (is_abort_sea(fault_status)) {
if (!handle_guest_sea(fault_ipa, kvm_vcpu_get_hsr(vcpu)))
return 1;
}
is_iabt = kvm_vcpu_trap_is_iabt(vcpu); is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
if (unlikely(!is_iabt && kvm_vcpu_dabt_isextabt(vcpu))) { if (unlikely(!is_iabt && kvm_vcpu_dabt_isextabt(vcpu))) {
kvm_inject_vabt(vcpu); kvm_inject_vabt(vcpu);
return 1; return 1;
} }
fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu), trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
kvm_vcpu_get_hfar(vcpu), fault_ipa); kvm_vcpu_get_hfar(vcpu), fault_ipa);
/* Check the stage-2 fault is trans. fault or write fault */ /* Check the stage-2 fault is trans. fault or write fault */
fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
if (fault_status != FSC_FAULT && fault_status != FSC_PERM && if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
fault_status != FSC_ACCESS) { fault_status != FSC_ACCESS) {
kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n", kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment