Commit 9dd00138 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm

Pull ARM updates from Russell King:

 - improve ARM implementation of pfn_valid()

 - various sparse fixes

 - spelling fixes

 - add further ARMv8 debug architecture versions

 - clang fix for decompressor

 - update to generic vDSO

 - remove Brahma-B53 from spectre hardening

 - initialise broadcast hrtimer device

 - use correct nm executable in decompressor

 - remove old mcount et.al.

* tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm: (26 commits)
  ARM: 8940/1: ftrace: remove mcount(),ftrace_caller_old() and ftrace_call_old()
  ARM: 8939/1: kbuild: use correct nm executable
  ARM: 8938/1: kernel: initialize broadcast hrtimer based clock event device
  ARM: 8937/1: spectre-v2: remove Brahma-B53 from hardening
  ARM: 8933/1: replace Sun/Solaris style flag on section directive
  ARM: 8932/1: Add clock_gettime64 entry point
  ARM: 8931/1: Add clock_getres entry point
  ARM: 8930/1: Add support for generic vDSO
  ARM: 8929/1: use APSR_nzcv instead of r15 as mrc operand
  ARM: 8927/1: ARM/hw_breakpoint: add more ARMv8 debug architecture versions support
  ARM: 8918/2: only build return_address() if needed
  ARM: 8928/1: ARM_ERRATA_775420: Spelling s/date/data/
  ARM: 8925/1: tcm: include <asm/tcm.h> for missing declarations
  ARM: 8924/1: tcm: make dtcm_end and itcm_end static
  ARM: 8923/1: mm: include <asm/vga.h> for vga_base
  ARM: 8922/1: parse_dt_topology() rate is pointer to __be32
  ARM: 8920/1: share get_signal_page from signal.c to process.c
  ARM: 8919/1: make unexported functions static
  ARM: 8917/1: mm: include <asm/set_memory.h>
  ARM: 8916/1: mm: make set_section_perms() static
  ...
parents 2309d076 1a70cf0e
...@@ -1018,7 +1018,7 @@ config ARM_ERRATA_775420 ...@@ -1018,7 +1018,7 @@ config ARM_ERRATA_775420
depends on CPU_V7 depends on CPU_V7
help help
This option enables the workaround for the 775420 Cortex-A9 (r2p2, This option enables the workaround for the 775420 Cortex-A9 (r2p2,
r2p6,r2p8,r2p10,r3p0) erratum. In case a date cache maintenance r2p6,r2p8,r2p10,r3p0) erratum. In case a data cache maintenance
operation aborts with MMU exception, it might cause the processor operation aborts with MMU exception, it might cause the processor
to deadlock. This workaround puts DSB before executing ISB if to deadlock. This workaround puts DSB before executing ISB if
an abort may occur on cache maintenance. an abort may occur on cache maintenance.
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
* size immediately following the kernel, we could build this into * size immediately following the kernel, we could build this into
* a binary blob, and concatenate the zImage using the cat command. * a binary blob, and concatenate the zImage using the cat command.
*/ */
.section .start,#alloc,#execinstr .section .start, "ax"
.type _start, #function .type _start, #function
.globl _start .globl _start
......
...@@ -121,7 +121,7 @@ ccflags-y := -fpic $(call cc-option,-mno-single-pic-base,) -fno-builtin -I$(obj) ...@@ -121,7 +121,7 @@ ccflags-y := -fpic $(call cc-option,-mno-single-pic-base,) -fno-builtin -I$(obj)
asflags-y := -DZIMAGE asflags-y := -DZIMAGE
# Supply kernel BSS size to the decompressor via a linker symbol. # Supply kernel BSS size to the decompressor via a linker symbol.
KBSS_SZ = $(shell echo $$(($$($(CROSS_COMPILE)nm $(obj)/../../../../vmlinux | \ KBSS_SZ = $(shell echo $$(($$($(NM) $(obj)/../../../../vmlinux | \
sed -n -e 's/^\([^ ]*\) [AB] __bss_start$$/-0x\1/p' \ sed -n -e 's/^\([^ ]*\) [AB] __bss_start$$/-0x\1/p' \
-e 's/^\([^ ]*\) [AB] __bss_stop$$/+0x\1/p') )) ) -e 's/^\([^ ]*\) [AB] __bss_stop$$/+0x\1/p') )) )
LDFLAGS_vmlinux = --defsym _kernel_bss_size=$(KBSS_SZ) LDFLAGS_vmlinux = --defsym _kernel_bss_size=$(KBSS_SZ)
...@@ -165,7 +165,7 @@ $(obj)/bswapsdi2.S: $(srctree)/arch/$(SRCARCH)/lib/bswapsdi2.S ...@@ -165,7 +165,7 @@ $(obj)/bswapsdi2.S: $(srctree)/arch/$(SRCARCH)/lib/bswapsdi2.S
# The .data section is already discarded by the linker script so no need # The .data section is already discarded by the linker script so no need
# to bother about it here. # to bother about it here.
check_for_bad_syms = \ check_for_bad_syms = \
bad_syms=$$($(CROSS_COMPILE)nm $@ | sed -n 's/^.\{8\} [bc] \(.*\)/\1/p') && \ bad_syms=$$($(NM) $@ | sed -n 's/^.\{8\} [bc] \(.*\)/\1/p') && \
[ -z "$$bad_syms" ] || \ [ -z "$$bad_syms" ] || \
( echo "following symbols must have non local/private scope:" >&2; \ ( echo "following symbols must have non local/private scope:" >&2; \
echo "$$bad_syms" >&2; false ) echo "$$bad_syms" >&2; false )
......
...@@ -19,7 +19,7 @@ static int node_offset(void *fdt, const char *node_path) ...@@ -19,7 +19,7 @@ static int node_offset(void *fdt, const char *node_path)
} }
static int setprop(void *fdt, const char *node_path, const char *property, static int setprop(void *fdt, const char *node_path, const char *property,
uint32_t *val_array, int size) void *val_array, int size)
{ {
int offset = node_offset(fdt, node_path); int offset = node_offset(fdt, node_path);
if (offset < 0) if (offset < 0)
...@@ -60,7 +60,7 @@ static uint32_t get_cell_size(const void *fdt) ...@@ -60,7 +60,7 @@ static uint32_t get_cell_size(const void *fdt)
{ {
int len; int len;
uint32_t cell_size = 1; uint32_t cell_size = 1;
const uint32_t *size_len = getprop(fdt, "/", "#size-cells", &len); const __be32 *size_len = getprop(fdt, "/", "#size-cells", &len);
if (size_len) if (size_len)
cell_size = fdt32_to_cpu(*size_len); cell_size = fdt32_to_cpu(*size_len);
...@@ -129,7 +129,7 @@ int atags_to_fdt(void *atag_list, void *fdt, int total_space) ...@@ -129,7 +129,7 @@ int atags_to_fdt(void *atag_list, void *fdt, int total_space)
struct tag *atag = atag_list; struct tag *atag = atag_list;
/* In the case of 64 bits memory size, need to reserve 2 cells for /* In the case of 64 bits memory size, need to reserve 2 cells for
* address and size for each bank */ * address and size for each bank */
uint32_t mem_reg_property[2 * 2 * NR_BANKS]; __be32 mem_reg_property[2 * 2 * NR_BANKS];
int memcount = 0; int memcount = 0;
int ret, memsize; int ret, memsize;
...@@ -138,7 +138,7 @@ int atags_to_fdt(void *atag_list, void *fdt, int total_space) ...@@ -138,7 +138,7 @@ int atags_to_fdt(void *atag_list, void *fdt, int total_space)
return 1; return 1;
/* if we get a DTB here we're done already */ /* if we get a DTB here we're done already */
if (*(u32 *)atag_list == fdt32_to_cpu(FDT_MAGIC)) if (*(__be32 *)atag_list == cpu_to_fdt32(FDT_MAGIC))
return 0; return 0;
/* validate the ATAG */ /* validate the ATAG */
...@@ -177,8 +177,8 @@ int atags_to_fdt(void *atag_list, void *fdt, int total_space) ...@@ -177,8 +177,8 @@ int atags_to_fdt(void *atag_list, void *fdt, int total_space)
/* if memsize is 2, that means that /* if memsize is 2, that means that
* each data needs 2 cells of 32 bits, * each data needs 2 cells of 32 bits,
* so the data are 64 bits */ * so the data are 64 bits */
uint64_t *mem_reg_prop64 = __be64 *mem_reg_prop64 =
(uint64_t *)mem_reg_property; (__be64 *)mem_reg_property;
mem_reg_prop64[memcount++] = mem_reg_prop64[memcount++] =
cpu_to_fdt64(atag->u.mem.start); cpu_to_fdt64(atag->u.mem.start);
mem_reg_prop64[memcount++] = mem_reg_prop64[memcount++] =
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
* Author: Nicolas Pitre * Author: Nicolas Pitre
*/ */
.section ".start", #alloc, #execinstr .section ".start", "ax"
mrc p15, 0, r0, c1, c0, 0 @ read control reg mrc p15, 0, r0, c1, c0, 0 @ read control reg
orr r0, r0, #(1 << 7) @ enable big endian mode orr r0, r0, #(1 << 7) @ enable big endian mode
......
...@@ -140,7 +140,7 @@ ...@@ -140,7 +140,7 @@
#endif #endif
.endm .endm
.section ".start", #alloc, #execinstr .section ".start", "ax"
/* /*
* sort out different calling conventions * sort out different calling conventions
*/ */
...@@ -1273,7 +1273,7 @@ iflush: ...@@ -1273,7 +1273,7 @@ iflush:
__armv5tej_mmu_cache_flush: __armv5tej_mmu_cache_flush:
tst r4, #1 tst r4, #1
movne pc, lr movne pc, lr
1: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate D cache 1: mrc p15, 0, APSR_nzcv, c7, c14, 3 @ test,clean,invalidate D cache
bne 1b bne 1b
mcr p15, 0, r0, c7, c5, 0 @ flush I cache mcr p15, 0, r0, c7, c5, 0 @ flush I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB mcr p15, 0, r0, c7, c10, 4 @ drain WB
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
.section .piggydata,#alloc .section .piggydata, "a"
.globl input_data .globl input_data
input_data: input_data:
.incbin "arch/arm/boot/compressed/piggy_data" .incbin "arch/arm/boot/compressed/piggy_data"
......
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */ #define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
extern void mcount(void);
extern void __gnu_mcount_nc(void); extern void __gnu_mcount_nc(void);
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE
...@@ -23,9 +22,6 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr) ...@@ -23,9 +22,6 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
/* With Thumb-2, the recorded addresses have the lsb set */ /* With Thumb-2, the recorded addresses have the lsb set */
return addr & ~1; return addr & ~1;
} }
extern void ftrace_caller_old(void);
extern void ftrace_call_old(void);
#endif #endif
#endif #endif
......
...@@ -53,6 +53,9 @@ static inline void decode_ctrl_reg(u32 reg, ...@@ -53,6 +53,9 @@ static inline void decode_ctrl_reg(u32 reg,
#define ARM_DEBUG_ARCH_V7_MM 4 #define ARM_DEBUG_ARCH_V7_MM 4
#define ARM_DEBUG_ARCH_V7_1 5 #define ARM_DEBUG_ARCH_V7_1 5
#define ARM_DEBUG_ARCH_V8 6 #define ARM_DEBUG_ARCH_V8 6
#define ARM_DEBUG_ARCH_V8_1 7
#define ARM_DEBUG_ARCH_V8_2 8
#define ARM_DEBUG_ARCH_V8_4 9
/* Breakpoint */ /* Breakpoint */
#define ARM_BREAKPOINT_EXECUTE 0 #define ARM_BREAKPOINT_EXECUTE 0
......
...@@ -27,5 +27,7 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) ...@@ -27,5 +27,7 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
return channel ? 15 : 14; return channel ? 15 : 14;
} }
extern void pcibios_report_status(unsigned int status_mask, int warn);
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif #endif
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2018 ARM Limited
*/
#ifndef __ASM_VDSO_GETTIMEOFDAY_H
#define __ASM_VDSO_GETTIMEOFDAY_H
#ifndef __ASSEMBLY__
#include <asm/barrier.h>
#include <asm/cp15.h>
#include <asm/unistd.h>
#include <uapi/linux/time.h>
#define VDSO_HAS_CLOCK_GETRES 1
extern struct vdso_data *__get_datapage(void);
static __always_inline int gettimeofday_fallback(
struct __kernel_old_timeval *_tv,
struct timezone *_tz)
{
register struct timezone *tz asm("r1") = _tz;
register struct __kernel_old_timeval *tv asm("r0") = _tv;
register long ret asm ("r0");
register long nr asm("r7") = __NR_gettimeofday;
asm volatile(
" swi #0\n"
: "=r" (ret)
: "r" (tv), "r" (tz), "r" (nr)
: "memory");
return ret;
}
static __always_inline long clock_gettime_fallback(
clockid_t _clkid,
struct __kernel_timespec *_ts)
{
register struct __kernel_timespec *ts asm("r1") = _ts;
register clockid_t clkid asm("r0") = _clkid;
register long ret asm ("r0");
register long nr asm("r7") = __NR_clock_gettime64;
asm volatile(
" swi #0\n"
: "=r" (ret)
: "r" (clkid), "r" (ts), "r" (nr)
: "memory");
return ret;
}
static __always_inline int clock_getres_fallback(
clockid_t _clkid,
struct __kernel_timespec *_ts)
{
register struct __kernel_timespec *ts asm("r1") = _ts;
register clockid_t clkid asm("r0") = _clkid;
register long ret asm ("r0");
register long nr asm("r7") = __NR_clock_getres_time64;
asm volatile(
" swi #0\n"
: "=r" (ret)
: "r" (clkid), "r" (ts), "r" (nr)
: "memory");
return ret;
}
static __always_inline u64 __arch_get_hw_counter(int clock_mode)
{
#ifdef CONFIG_ARM_ARCH_TIMER
u64 cycle_now;
isb();
cycle_now = read_sysreg(CNTVCT);
return cycle_now;
#else
return -EINVAL; /* use fallback */
#endif
}
static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
{
return __get_datapage();
}
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_VDSO_VSYSCALL_H
#define __ASM_VDSO_VSYSCALL_H
#ifndef __ASSEMBLY__
#include <linux/timekeeper_internal.h>
#include <vdso/datapage.h>
#include <asm/cacheflush.h>
extern struct vdso_data *vdso_data;
extern bool cntvct_ok;
static __always_inline
bool tk_is_cntvct(const struct timekeeper *tk)
{
if (!IS_ENABLED(CONFIG_ARM_ARCH_TIMER))
return false;
if (!tk->tkr_mono.clock->archdata.vdso_direct)
return false;
return true;
}
/*
* Update the vDSO data page to keep in sync with kernel timekeeping.
*/
static __always_inline
struct vdso_data *__arm_get_k_vdso_data(void)
{
return vdso_data;
}
#define __arch_get_k_vdso_data __arm_get_k_vdso_data
static __always_inline
int __arm_update_vdso_data(void)
{
return !cntvct_ok;
}
#define __arch_update_vdso_data __arm_update_vdso_data
static __always_inline
int __arm_get_clock_mode(struct timekeeper *tk)
{
u32 __tk_is_cntvct = tk_is_cntvct(tk);
return __tk_is_cntvct;
}
#define __arch_get_clock_mode __arm_get_clock_mode
static __always_inline
int __arm_use_vsyscall(struct vdso_data *vdata)
{
return vdata[CS_HRES_COARSE].clock_mode;
}
#define __arch_use_vsyscall __arm_use_vsyscall
static __always_inline
void __arm_sync_vdso_data(struct vdso_data *vdata)
{
flush_dcache_page(virt_to_page(vdata));
}
#define __arch_sync_vdso_data __arm_sync_vdso_data
/* The asm-generic header needs to be included after the definitions above */
#include <asm-generic/vdso/vsyscall.h>
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_VDSO_VSYSCALL_H */
...@@ -11,34 +11,11 @@ ...@@ -11,34 +11,11 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <vdso/datapage.h>
#include <asm/page.h> #include <asm/page.h>
/* Try to be cache-friendly on systems that don't implement the
* generic timer: fit the unconditionally updated fields in the first
* 32 bytes.
*/
struct vdso_data {
u32 seq_count; /* sequence count - odd during updates */
u16 tk_is_cntvct; /* fall back to syscall if false */
u16 cs_shift; /* clocksource shift */
u32 xtime_coarse_sec; /* coarse time */
u32 xtime_coarse_nsec;
u32 wtm_clock_sec; /* wall to monotonic offset */
u32 wtm_clock_nsec;
u32 xtime_clock_sec; /* CLOCK_REALTIME - seconds */
u32 cs_mult; /* clocksource multiplier */
u64 cs_cycle_last; /* last cycle value */
u64 cs_mask; /* clocksource mask */
u64 xtime_clock_snsec; /* CLOCK_REALTIME sub-ns base */
u32 tz_minuteswest; /* timezone info for gettimeofday(2) */
u32 tz_dsttime;
};
union vdso_data_store { union vdso_data_store {
struct vdso_data data; struct vdso_data data[CS_BASES];
u8 page[PAGE_SIZE]; u8 page[PAGE_SIZE];
}; };
......
...@@ -17,10 +17,14 @@ CFLAGS_REMOVE_return_address.o = -pg ...@@ -17,10 +17,14 @@ CFLAGS_REMOVE_return_address.o = -pg
# Object file lists. # Object file lists.
obj-y := elf.o entry-common.o irq.o opcodes.o \ obj-y := elf.o entry-common.o irq.o opcodes.o \
process.o ptrace.o reboot.o return_address.o \ process.o ptrace.o reboot.o \
setup.o signal.o sigreturn_codes.o \ setup.o signal.o sigreturn_codes.o \
stacktrace.o sys_arm.o time.o traps.o stacktrace.o sys_arm.o time.o traps.o
ifneq ($(CONFIG_ARM_UNWIND),y)
obj-$(CONFIG_FRAME_POINTER) += return_address.o
endif
obj-$(CONFIG_ATAGS) += atags_parse.o obj-$(CONFIG_ATAGS) += atags_parse.o
obj-$(CONFIG_ATAGS_PROC) += atags_proc.o obj-$(CONFIG_ATAGS_PROC) += atags_proc.o
obj-$(CONFIG_DEPRECATED_PARAM_STRUCT) += atags_compat.o obj-$(CONFIG_DEPRECATED_PARAM_STRUCT) += atags_compat.o
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/errno.h> #include <linux/errno.h>
#include <asm/delay.h> #include <asm/delay.h>
#include <asm/arch_timer.h>
#include <clocksource/arm_arch_timer.h> #include <clocksource/arm_arch_timer.h>
......
...@@ -246,6 +246,9 @@ static int enable_monitor_mode(void) ...@@ -246,6 +246,9 @@ static int enable_monitor_mode(void)
case ARM_DEBUG_ARCH_V7_ECP14: case ARM_DEBUG_ARCH_V7_ECP14:
case ARM_DEBUG_ARCH_V7_1: case ARM_DEBUG_ARCH_V7_1:
case ARM_DEBUG_ARCH_V8: case ARM_DEBUG_ARCH_V8:
case ARM_DEBUG_ARCH_V8_1:
case ARM_DEBUG_ARCH_V8_2:
case ARM_DEBUG_ARCH_V8_4:
ARM_DBG_WRITE(c0, c2, 2, (dscr | ARM_DSCR_MDBGEN)); ARM_DBG_WRITE(c0, c2, 2, (dscr | ARM_DSCR_MDBGEN));
isb(); isb();
break; break;
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/sort.h> #include <linux/sort.h>
#include <linux/moduleloader.h>
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/opcodes.h> #include <asm/opcodes.h>
......
...@@ -36,6 +36,8 @@ ...@@ -36,6 +36,8 @@
#include <asm/tls.h> #include <asm/tls.h>
#include <asm/vdso.h> #include <asm/vdso.h>
#include "signal.h"
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK) #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
#include <linux/stackprotector.h> #include <linux/stackprotector.h>
unsigned long __stack_chk_guard __read_mostly; unsigned long __stack_chk_guard __read_mostly;
......
...@@ -51,7 +51,7 @@ static int psci_boot_secondary(unsigned int cpu, struct task_struct *idle) ...@@ -51,7 +51,7 @@ static int psci_boot_secondary(unsigned int cpu, struct task_struct *idle)
} }
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
int psci_cpu_disable(unsigned int cpu) static int psci_cpu_disable(unsigned int cpu)
{ {
/* Fail early if we don't have CPU_OFF support */ /* Fail early if we don't have CPU_OFF support */
if (!psci_ops.cpu_off) if (!psci_ops.cpu_off)
...@@ -64,7 +64,7 @@ int psci_cpu_disable(unsigned int cpu) ...@@ -64,7 +64,7 @@ int psci_cpu_disable(unsigned int cpu)
return 0; return 0;
} }
void psci_cpu_die(unsigned int cpu) static void psci_cpu_die(unsigned int cpu)
{ {
u32 state = PSCI_POWER_STATE_TYPE_POWER_DOWN << u32 state = PSCI_POWER_STATE_TYPE_POWER_DOWN <<
PSCI_0_2_POWER_STATE_TYPE_SHIFT; PSCI_0_2_POWER_STATE_TYPE_SHIFT;
...@@ -76,7 +76,7 @@ void psci_cpu_die(unsigned int cpu) ...@@ -76,7 +76,7 @@ void psci_cpu_die(unsigned int cpu)
panic("psci: cpu %d failed to shutdown\n", cpu); panic("psci: cpu %d failed to shutdown\n", cpu);
} }
int psci_cpu_kill(unsigned int cpu) static int psci_cpu_kill(unsigned int cpu)
{ {
int err, i; int err, i;
......
...@@ -7,8 +7,6 @@ ...@@ -7,8 +7,6 @@
*/ */
#include <linux/export.h> #include <linux/export.h>
#include <linux/ftrace.h> #include <linux/ftrace.h>
#if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND)
#include <linux/sched.h> #include <linux/sched.h>
#include <asm/stacktrace.h> #include <asm/stacktrace.h>
...@@ -53,6 +51,4 @@ void *return_address(unsigned int level) ...@@ -53,6 +51,4 @@ void *return_address(unsigned int level)
return NULL; return NULL;
} }
#endif /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) */
EXPORT_SYMBOL_GPL(return_address); EXPORT_SYMBOL_GPL(return_address);
...@@ -9,3 +9,5 @@ struct rt_sigframe { ...@@ -9,3 +9,5 @@ struct rt_sigframe {
struct siginfo info; struct siginfo info;
struct sigframe sig; struct sigframe sig;
}; };
extern struct page *get_signal_page(void);
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <asm/memory.h> #include <asm/memory.h>
#include <asm/system_info.h> #include <asm/system_info.h>
#include <asm/traps.h> #include <asm/traps.h>
#include <asm/tcm.h>
#define TCMTR_FORMAT_MASK 0xe0000000U #define TCMTR_FORMAT_MASK 0xe0000000U
...@@ -30,8 +31,8 @@ extern char __itcm_start, __sitcm_text, __eitcm_text; ...@@ -30,8 +31,8 @@ extern char __itcm_start, __sitcm_text, __eitcm_text;
extern char __dtcm_start, __sdtcm_data, __edtcm_data; extern char __dtcm_start, __sdtcm_data, __edtcm_data;
/* These will be increased as we run */ /* These will be increased as we run */
u32 dtcm_end = DTCM_OFFSET; static u32 dtcm_end = DTCM_OFFSET;
u32 itcm_end = ITCM_OFFSET; static u32 itcm_end = ITCM_OFFSET;
/* /*
* TCM memory resources * TCM memory resources
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
* reading the RTC at bootup, etc... * reading the RTC at bootup, etc...
*/ */
#include <linux/clk-provider.h> #include <linux/clk-provider.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h> #include <linux/clocksource.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/export.h> #include <linux/export.h>
...@@ -107,5 +108,6 @@ void __init time_init(void) ...@@ -107,5 +108,6 @@ void __init time_init(void)
of_clk_init(NULL); of_clk_init(NULL);
#endif #endif
timer_probe(); timer_probe();
tick_setup_hrtimer_broadcast();
} }
} }
...@@ -95,7 +95,7 @@ static void __init parse_dt_topology(void) ...@@ -95,7 +95,7 @@ static void __init parse_dt_topology(void)
GFP_NOWAIT); GFP_NOWAIT);
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
const u32 *rate; const __be32 *rate;
int len; int len;
/* too early to use cpu->of_node */ /* too early to use cpu->of_node */
......
...@@ -23,6 +23,8 @@ ...@@ -23,6 +23,8 @@
#include <asm/vdso.h> #include <asm/vdso.h>
#include <asm/vdso_datapage.h> #include <asm/vdso_datapage.h>
#include <clocksource/arm_arch_timer.h> #include <clocksource/arm_arch_timer.h>
#include <vdso/helpers.h>
#include <vdso/vsyscall.h>
#define MAX_SYMNAME 64 #define MAX_SYMNAME 64
...@@ -37,7 +39,7 @@ unsigned int vdso_total_pages __ro_after_init; ...@@ -37,7 +39,7 @@ unsigned int vdso_total_pages __ro_after_init;
* The VDSO data page. * The VDSO data page.
*/ */
static union vdso_data_store vdso_data_store __page_aligned_data; static union vdso_data_store vdso_data_store __page_aligned_data;
static struct vdso_data *vdso_data = &vdso_data_store.data; struct vdso_data *vdso_data = vdso_data_store.data;
static struct page *vdso_data_page __ro_after_init; static struct page *vdso_data_page __ro_after_init;
static const struct vm_special_mapping vdso_data_mapping = { static const struct vm_special_mapping vdso_data_mapping = {
...@@ -77,7 +79,7 @@ struct elfinfo { ...@@ -77,7 +79,7 @@ struct elfinfo {
/* Cached result of boot-time check for whether the arch timer exists, /* Cached result of boot-time check for whether the arch timer exists,
* and if so, whether the virtual counter is useable. * and if so, whether the virtual counter is useable.
*/ */
static bool cntvct_ok __ro_after_init; bool cntvct_ok __ro_after_init;
static bool __init cntvct_functional(void) static bool __init cntvct_functional(void)
{ {
...@@ -262,84 +264,3 @@ void arm_install_vdso(struct mm_struct *mm, unsigned long addr) ...@@ -262,84 +264,3 @@ void arm_install_vdso(struct mm_struct *mm, unsigned long addr)
mm->context.vdso = addr; mm->context.vdso = addr;
} }
static void vdso_write_begin(struct vdso_data *vdata)
{
++vdso_data->seq_count;
smp_wmb(); /* Pairs with smp_rmb in vdso_read_retry */
}
static void vdso_write_end(struct vdso_data *vdata)
{
smp_wmb(); /* Pairs with smp_rmb in vdso_read_begin */
++vdso_data->seq_count;
}
static bool tk_is_cntvct(const struct timekeeper *tk)
{
if (!IS_ENABLED(CONFIG_ARM_ARCH_TIMER))
return false;
if (!tk->tkr_mono.clock->archdata.vdso_direct)
return false;
return true;
}
/**
* update_vsyscall - update the vdso data page
*
* Increment the sequence counter, making it odd, indicating to
* userspace that an update is in progress. Update the fields used
* for coarse clocks and, if the architected system timer is in use,
* the fields used for high precision clocks. Increment the sequence
* counter again, making it even, indicating to userspace that the
* update is finished.
*
* Userspace is expected to sample seq_count before reading any other
* fields from the data page. If seq_count is odd, userspace is
* expected to wait until it becomes even. After copying data from
* the page, userspace must sample seq_count again; if it has changed
* from its previous value, userspace must retry the whole sequence.
*
* Calls to update_vsyscall are serialized by the timekeeping core.
*/
void update_vsyscall(struct timekeeper *tk)
{
struct timespec64 *wtm = &tk->wall_to_monotonic;
if (!cntvct_ok) {
/* The entry points have been zeroed, so there is no
* point in updating the data page.
*/
return;
}
vdso_write_begin(vdso_data);
vdso_data->tk_is_cntvct = tk_is_cntvct(tk);
vdso_data->xtime_coarse_sec = tk->xtime_sec;
vdso_data->xtime_coarse_nsec = (u32)(tk->tkr_mono.xtime_nsec >>
tk->tkr_mono.shift);
vdso_data->wtm_clock_sec = wtm->tv_sec;
vdso_data->wtm_clock_nsec = wtm->tv_nsec;
if (vdso_data->tk_is_cntvct) {
vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
vdso_data->xtime_clock_sec = tk->xtime_sec;
vdso_data->xtime_clock_snsec = tk->tkr_mono.xtime_nsec;
vdso_data->cs_mult = tk->tkr_mono.mult;
vdso_data->cs_shift = tk->tkr_mono.shift;
vdso_data->cs_mask = tk->tkr_mono.mask;
}
vdso_write_end(vdso_data);
flush_dcache_page(virt_to_page(vdso_data));
}
void update_vsyscall_tz(void)
{
vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
vdso_data->tz_dsttime = sys_tz.tz_dsttime;
flush_dcache_page(virt_to_page(vdso_data));
}
...@@ -31,7 +31,6 @@ ...@@ -31,7 +31,6 @@
PCI_STATUS_PARITY) << 16) PCI_STATUS_PARITY) << 16)
extern int setup_arm_irq(int, struct irqaction *); extern int setup_arm_irq(int, struct irqaction *);
extern void pcibios_report_status(u_int status_mask, int warn);
static unsigned long static unsigned long
dc21285_base_address(struct pci_bus *bus, unsigned int devfn) dc21285_base_address(struct pci_bus *bus, unsigned int devfn)
......
...@@ -896,7 +896,10 @@ config VDSO ...@@ -896,7 +896,10 @@ config VDSO
bool "Enable VDSO for acceleration of some system calls" bool "Enable VDSO for acceleration of some system calls"
depends on AEABI && MMU && CPU_V7 depends on AEABI && MMU && CPU_V7
default y if ARM_ARCH_TIMER default y if ARM_ARCH_TIMER
select HAVE_GENERIC_VDSO
select GENERIC_TIME_VSYSCALL select GENERIC_TIME_VSYSCALL
select GENERIC_VDSO_32
select GENERIC_GETTIMEOFDAY
help help
Place in the process address space an ELF shared object Place in the process address space an ELF shared object
providing fast implementations of gettimeofday and providing fast implementations of gettimeofday and
......
...@@ -1559,7 +1559,7 @@ static int arm_coherent_iommu_mmap_attrs(struct device *dev, ...@@ -1559,7 +1559,7 @@ static int arm_coherent_iommu_mmap_attrs(struct device *dev,
* free a page as defined by the above mapping. * free a page as defined by the above mapping.
* Must not be called with IRQs disabled. * Must not be called with IRQs disabled.
*/ */
void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, static void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t handle, unsigned long attrs, int coherent_flag) dma_addr_t handle, unsigned long attrs, int coherent_flag)
{ {
struct page **pages; struct page **pages;
...@@ -1583,13 +1583,14 @@ void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, ...@@ -1583,13 +1583,14 @@ void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
__iommu_free_buffer(dev, pages, size, attrs); __iommu_free_buffer(dev, pages, size, attrs);
} }
void arm_iommu_free_attrs(struct device *dev, size_t size, static void arm_iommu_free_attrs(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t handle, unsigned long attrs) void *cpu_addr, dma_addr_t handle,
unsigned long attrs)
{ {
__arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL); __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL);
} }
void arm_coherent_iommu_free_attrs(struct device *dev, size_t size, static void arm_coherent_iommu_free_attrs(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t handle, unsigned long attrs) void *cpu_addr, dma_addr_t handle, unsigned long attrs)
{ {
__arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT); __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT);
...@@ -1713,7 +1714,7 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, ...@@ -1713,7 +1714,7 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
* possible) and tagged with the appropriate dma address and length. They are * possible) and tagged with the appropriate dma address and length. They are
* obtained via sg_dma_{address,length}. * obtained via sg_dma_{address,length}.
*/ */
int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg, static int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir, unsigned long attrs) int nents, enum dma_data_direction dir, unsigned long attrs)
{ {
return __iommu_map_sg(dev, sg, nents, dir, attrs, true); return __iommu_map_sg(dev, sg, nents, dir, attrs, true);
...@@ -1731,7 +1732,7 @@ int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg, ...@@ -1731,7 +1732,7 @@ int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg,
* tagged with the appropriate dma address and length. They are obtained via * tagged with the appropriate dma address and length. They are obtained via
* sg_dma_{address,length}. * sg_dma_{address,length}.
*/ */
int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, static int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir, unsigned long attrs) int nents, enum dma_data_direction dir, unsigned long attrs)
{ {
return __iommu_map_sg(dev, sg, nents, dir, attrs, false); return __iommu_map_sg(dev, sg, nents, dir, attrs, false);
...@@ -1764,8 +1765,8 @@ static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg, ...@@ -1764,8 +1765,8 @@ static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
* Unmap a set of streaming mode DMA translations. Again, CPU access * Unmap a set of streaming mode DMA translations. Again, CPU access
* rules concerning calls here are the same as for dma_unmap_single(). * rules concerning calls here are the same as for dma_unmap_single().
*/ */
void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, static void arm_coherent_iommu_unmap_sg(struct device *dev,
int nents, enum dma_data_direction dir, struct scatterlist *sg, int nents, enum dma_data_direction dir,
unsigned long attrs) unsigned long attrs)
{ {
__iommu_unmap_sg(dev, sg, nents, dir, attrs, true); __iommu_unmap_sg(dev, sg, nents, dir, attrs, true);
...@@ -1781,7 +1782,8 @@ void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, ...@@ -1781,7 +1782,8 @@ void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
* Unmap a set of streaming mode DMA translations. Again, CPU access * Unmap a set of streaming mode DMA translations. Again, CPU access
* rules concerning calls here are the same as for dma_unmap_single(). * rules concerning calls here are the same as for dma_unmap_single().
*/ */
void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, static void arm_iommu_unmap_sg(struct device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction dir, enum dma_data_direction dir,
unsigned long attrs) unsigned long attrs)
{ {
...@@ -1795,7 +1797,8 @@ void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, ...@@ -1795,7 +1797,8 @@ void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
* @nents: number of buffers to map (returned from dma_map_sg) * @nents: number of buffers to map (returned from dma_map_sg)
* @dir: DMA transfer direction (same as was passed to dma_map_sg) * @dir: DMA transfer direction (same as was passed to dma_map_sg)
*/ */
void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, static void arm_iommu_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sg,
int nents, enum dma_data_direction dir) int nents, enum dma_data_direction dir)
{ {
struct scatterlist *s; struct scatterlist *s;
...@@ -1813,7 +1816,8 @@ void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, ...@@ -1813,7 +1816,8 @@ void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
* @nents: number of buffers to map (returned from dma_map_sg) * @nents: number of buffers to map (returned from dma_map_sg)
* @dir: DMA transfer direction (same as was passed to dma_map_sg) * @dir: DMA transfer direction (same as was passed to dma_map_sg)
*/ */
void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg, static void arm_iommu_sync_sg_for_device(struct device *dev,
struct scatterlist *sg,
int nents, enum dma_data_direction dir) int nents, enum dma_data_direction dir)
{ {
struct scatterlist *s; struct scatterlist *s;
...@@ -2015,7 +2019,7 @@ static void arm_iommu_sync_single_for_device(struct device *dev, ...@@ -2015,7 +2019,7 @@ static void arm_iommu_sync_single_for_device(struct device *dev,
__dma_page_cpu_to_dev(page, offset, size, dir); __dma_page_cpu_to_dev(page, offset, size, dir);
} }
const struct dma_map_ops iommu_ops = { static const struct dma_map_ops iommu_ops = {
.alloc = arm_iommu_alloc_attrs, .alloc = arm_iommu_alloc_attrs,
.free = arm_iommu_free_attrs, .free = arm_iommu_free_attrs,
.mmap = arm_iommu_mmap_attrs, .mmap = arm_iommu_mmap_attrs,
...@@ -2037,7 +2041,7 @@ const struct dma_map_ops iommu_ops = { ...@@ -2037,7 +2041,7 @@ const struct dma_map_ops iommu_ops = {
.dma_supported = arm_dma_supported, .dma_supported = arm_dma_supported,
}; };
const struct dma_map_ops iommu_coherent_ops = { static const struct dma_map_ops iommu_coherent_ops = {
.alloc = arm_coherent_iommu_alloc_attrs, .alloc = arm_coherent_iommu_alloc_attrs,
.free = arm_coherent_iommu_free_attrs, .free = arm_coherent_iommu_free_attrs,
.mmap = arm_coherent_iommu_mmap_attrs, .mmap = arm_coherent_iommu_mmap_attrs,
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include <asm/prom.h> #include <asm/prom.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/set_memory.h>
#include <asm/system_info.h> #include <asm/system_info.h>
#include <asm/tlb.h> #include <asm/tlb.h>
#include <asm/fixmap.h> #include <asm/fixmap.h>
...@@ -180,7 +181,7 @@ int pfn_valid(unsigned long pfn) ...@@ -180,7 +181,7 @@ int pfn_valid(unsigned long pfn)
if (__phys_to_pfn(addr) != pfn) if (__phys_to_pfn(addr) != pfn)
return 0; return 0;
return memblock_is_map_memory(__pfn_to_phys(pfn)); return memblock_is_map_memory(addr);
} }
EXPORT_SYMBOL(pfn_valid); EXPORT_SYMBOL(pfn_valid);
#endif #endif
...@@ -593,7 +594,7 @@ static inline bool arch_has_strict_perms(void) ...@@ -593,7 +594,7 @@ static inline bool arch_has_strict_perms(void)
return !!(get_cr() & CR_XP); return !!(get_cr() & CR_XP);
} }
void set_section_perms(struct section_perm *perms, int n, bool set, static void set_section_perms(struct section_perm *perms, int n, bool set,
struct mm_struct *mm) struct mm_struct *mm)
{ {
size_t i; size_t i;
......
...@@ -10,6 +10,8 @@ ...@@ -10,6 +10,8 @@
#include <linux/ioport.h> #include <linux/ioport.h>
#include <linux/io.h> #include <linux/io.h>
#include <asm/vga.h>
unsigned long vga_base; unsigned long vga_base;
EXPORT_SYMBOL(vga_base); EXPORT_SYMBOL(vga_base);
......
...@@ -491,7 +491,7 @@ cpu_arm1020_name: ...@@ -491,7 +491,7 @@ cpu_arm1020_name:
.align .align
.section ".proc.info.init", #alloc .section ".proc.info.init", "a"
.type __arm1020_proc_info,#object .type __arm1020_proc_info,#object
__arm1020_proc_info: __arm1020_proc_info:
......
...@@ -449,7 +449,7 @@ arm1020e_crval: ...@@ -449,7 +449,7 @@ arm1020e_crval:
.align .align
.section ".proc.info.init", #alloc .section ".proc.info.init", "a"
.type __arm1020e_proc_info,#object .type __arm1020e_proc_info,#object
__arm1020e_proc_info: __arm1020e_proc_info:
......
...@@ -443,7 +443,7 @@ arm1022_crval: ...@@ -443,7 +443,7 @@ arm1022_crval:
.align .align
.section ".proc.info.init", #alloc .section ".proc.info.init", "a"
.type __arm1022_proc_info,#object .type __arm1022_proc_info,#object
__arm1022_proc_info: __arm1022_proc_info:
......
...@@ -138,7 +138,7 @@ ENTRY(arm1026_flush_kern_cache_all) ...@@ -138,7 +138,7 @@ ENTRY(arm1026_flush_kern_cache_all)
mov ip, #0 mov ip, #0
__flush_whole_cache: __flush_whole_cache:
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
1: mrc p15, 0, r15, c7, c14, 3 @ test, clean, invalidate 1: mrc p15, 0, APSR_nzcv, c7, c14, 3 @ test, clean, invalidate
bne 1b bne 1b
#endif #endif
tst r2, #VM_EXEC tst r2, #VM_EXEC
...@@ -363,7 +363,7 @@ ENTRY(cpu_arm1026_switch_mm) ...@@ -363,7 +363,7 @@ ENTRY(cpu_arm1026_switch_mm)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
mov r1, #0 mov r1, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
1: mrc p15, 0, r15, c7, c14, 3 @ test, clean, invalidate 1: mrc p15, 0, APSR_nzcv, c7, c14, 3 @ test, clean, invalidate
bne 1b bne 1b
#endif #endif
#ifndef CONFIG_CPU_ICACHE_DISABLE #ifndef CONFIG_CPU_ICACHE_DISABLE
...@@ -437,7 +437,7 @@ arm1026_crval: ...@@ -437,7 +437,7 @@ arm1026_crval:
string cpu_arm1026_name, "ARM1026EJ-S" string cpu_arm1026_name, "ARM1026EJ-S"
.align .align
.section ".proc.info.init", #alloc .section ".proc.info.init", "a"
.type __arm1026_proc_info,#object .type __arm1026_proc_info,#object
__arm1026_proc_info: __arm1026_proc_info:
......
...@@ -172,7 +172,7 @@ arm720_crval: ...@@ -172,7 +172,7 @@ arm720_crval:
* See <asm/procinfo.h> for a definition of this structure. * See <asm/procinfo.h> for a definition of this structure.
*/ */
.section ".proc.info.init", #alloc .section ".proc.info.init", "a"
.macro arm720_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cpu_flush:req .macro arm720_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cpu_flush:req
.type __\name\()_proc_info,#object .type __\name\()_proc_info,#object
......
...@@ -128,7 +128,7 @@ __arm740_setup: ...@@ -128,7 +128,7 @@ __arm740_setup:
.align .align
.section ".proc.info.init", #alloc .section ".proc.info.init", "a"
.type __arm740_proc_info,#object .type __arm740_proc_info,#object
__arm740_proc_info: __arm740_proc_info:
.long 0x41807400 .long 0x41807400
......
...@@ -72,7 +72,7 @@ __arm7tdmi_setup: ...@@ -72,7 +72,7 @@ __arm7tdmi_setup:
.align .align
.section ".proc.info.init", #alloc .section ".proc.info.init", "a"
.macro arm7tdmi_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, \ .macro arm7tdmi_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, \
extra_hwcaps=0 extra_hwcaps=0
......
...@@ -434,7 +434,7 @@ arm920_crval: ...@@ -434,7 +434,7 @@ arm920_crval:
.align .align
.section ".proc.info.init", #alloc .section ".proc.info.init", "a"
.type __arm920_proc_info,#object .type __arm920_proc_info,#object
__arm920_proc_info: __arm920_proc_info:
......
...@@ -412,7 +412,7 @@ arm922_crval: ...@@ -412,7 +412,7 @@ arm922_crval:
.align .align
.section ".proc.info.init", #alloc .section ".proc.info.init", "a"
.type __arm922_proc_info,#object .type __arm922_proc_info,#object
__arm922_proc_info: __arm922_proc_info:
......
...@@ -477,7 +477,7 @@ arm925_crval: ...@@ -477,7 +477,7 @@ arm925_crval:
.align .align
.section ".proc.info.init", #alloc .section ".proc.info.init", "a"
.macro arm925_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache .macro arm925_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache
.type __\name\()_proc_info,#object .type __\name\()_proc_info,#object
......
...@@ -131,7 +131,7 @@ __flush_whole_cache: ...@@ -131,7 +131,7 @@ __flush_whole_cache:
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
#else #else
1: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate 1: mrc p15, 0, APSR_nzcv, c7, c14, 3 @ test,clean,invalidate
bne 1b bne 1b
#endif #endif
tst r2, #VM_EXEC tst r2, #VM_EXEC
...@@ -358,7 +358,7 @@ ENTRY(cpu_arm926_switch_mm) ...@@ -358,7 +358,7 @@ ENTRY(cpu_arm926_switch_mm)
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
#else #else
@ && 'Clean & Invalidate whole DCache' @ && 'Clean & Invalidate whole DCache'
1: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate 1: mrc p15, 0, APSR_nzcv, c7, c14, 3 @ test,clean,invalidate
bne 1b bne 1b
#endif #endif
mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
...@@ -460,7 +460,7 @@ arm926_crval: ...@@ -460,7 +460,7 @@ arm926_crval:
.align .align
.section ".proc.info.init", #alloc .section ".proc.info.init", "a"
.type __arm926_proc_info,#object .type __arm926_proc_info,#object
__arm926_proc_info: __arm926_proc_info:
......
...@@ -340,7 +340,7 @@ __arm940_setup: ...@@ -340,7 +340,7 @@ __arm940_setup:
.align .align
.section ".proc.info.init", #alloc .section ".proc.info.init", "a"
.type __arm940_proc_info,#object .type __arm940_proc_info,#object
__arm940_proc_info: __arm940_proc_info:
......
...@@ -395,7 +395,7 @@ __arm946_setup: ...@@ -395,7 +395,7 @@ __arm946_setup:
.align .align
.section ".proc.info.init", #alloc .section ".proc.info.init", "a"
.type __arm946_proc_info,#object .type __arm946_proc_info,#object
__arm946_proc_info: __arm946_proc_info:
.long 0x41009460 .long 0x41009460
......
...@@ -66,7 +66,7 @@ __arm9tdmi_setup: ...@@ -66,7 +66,7 @@ __arm9tdmi_setup:
.align .align
.section ".proc.info.init", #alloc .section ".proc.info.init", "a"
.macro arm9tdmi_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req .macro arm9tdmi_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req
.type __\name\()_proc_info, #object .type __\name\()_proc_info, #object
......
...@@ -185,7 +185,7 @@ fa526_cr1_set: ...@@ -185,7 +185,7 @@ fa526_cr1_set:
.align .align
.section ".proc.info.init", #alloc .section ".proc.info.init", "a"
.type __fa526_proc_info,#object .type __fa526_proc_info,#object
__fa526_proc_info: __fa526_proc_info:
......
...@@ -571,7 +571,7 @@ feroceon_crval: ...@@ -571,7 +571,7 @@ feroceon_crval:
.align .align
.section ".proc.info.init", #alloc .section ".proc.info.init", "a"
.macro feroceon_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache:req .macro feroceon_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache:req
.type __\name\()_proc_info,#object .type __\name\()_proc_info,#object
......
...@@ -416,7 +416,7 @@ mohawk_crval: ...@@ -416,7 +416,7 @@ mohawk_crval:
.align .align
.section ".proc.info.init", #alloc .section ".proc.info.init", "a"
.type __88sv331x_proc_info,#object .type __88sv331x_proc_info,#object
__88sv331x_proc_info: __88sv331x_proc_info:
......
...@@ -196,7 +196,7 @@ sa110_crval: ...@@ -196,7 +196,7 @@ sa110_crval:
.align .align
.section ".proc.info.init", #alloc .section ".proc.info.init", "a"
.type __sa110_proc_info,#object .type __sa110_proc_info,#object
__sa110_proc_info: __sa110_proc_info:
......
...@@ -239,7 +239,7 @@ sa1100_crval: ...@@ -239,7 +239,7 @@ sa1100_crval:
.align .align
.section ".proc.info.init", #alloc .section ".proc.info.init", "a"
.macro sa1100_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req .macro sa1100_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req
.type __\name\()_proc_info,#object .type __\name\()_proc_info,#object
......
...@@ -261,7 +261,7 @@ v6_crval: ...@@ -261,7 +261,7 @@ v6_crval:
string cpu_elf_name, "v6" string cpu_elf_name, "v6"
.align .align
.section ".proc.info.init", #alloc .section ".proc.info.init", "a"
/* /*
* Match any ARMv6 processor core. * Match any ARMv6 processor core.
......
...@@ -64,6 +64,9 @@ static void cpu_v7_spectre_init(void) ...@@ -64,6 +64,9 @@ static void cpu_v7_spectre_init(void)
break; break;
#ifdef CONFIG_ARM_PSCI #ifdef CONFIG_ARM_PSCI
case ARM_CPU_PART_BRAHMA_B53:
/* Requires no workaround */
break;
default: default:
/* Other ARM CPUs require no workaround */ /* Other ARM CPUs require no workaround */
if (read_cpuid_implementor() == ARM_CPU_IMP_ARM) if (read_cpuid_implementor() == ARM_CPU_IMP_ARM)
......
...@@ -644,7 +644,7 @@ __v7_setup_stack: ...@@ -644,7 +644,7 @@ __v7_setup_stack:
string cpu_elf_name, "v7" string cpu_elf_name, "v7"
.align .align
.section ".proc.info.init", #alloc .section ".proc.info.init", "a"
/* /*
* Standard v7 proc info content * Standard v7 proc info content
......
...@@ -93,7 +93,7 @@ ENTRY(cpu_cm7_proc_fin) ...@@ -93,7 +93,7 @@ ENTRY(cpu_cm7_proc_fin)
ret lr ret lr
ENDPROC(cpu_cm7_proc_fin) ENDPROC(cpu_cm7_proc_fin)
.section ".init.text", #alloc, #execinstr .section ".init.text", "ax"
__v7m_cm7_setup: __v7m_cm7_setup:
mov r8, #(V7M_SCB_CCR_DC | V7M_SCB_CCR_IC| V7M_SCB_CCR_BP) mov r8, #(V7M_SCB_CCR_DC | V7M_SCB_CCR_IC| V7M_SCB_CCR_BP)
...@@ -177,7 +177,7 @@ ENDPROC(__v7m_setup) ...@@ -177,7 +177,7 @@ ENDPROC(__v7m_setup)
string cpu_elf_name "v7m" string cpu_elf_name "v7m"
string cpu_v7m_name "ARMv7-M" string cpu_v7m_name "ARMv7-M"
.section ".proc.info.init", #alloc .section ".proc.info.init", "a"
.macro __v7m_proc name, initfunc, cache_fns = nop_cache_fns, hwcaps = 0, proc_fns = v7m_processor_functions .macro __v7m_proc name, initfunc, cache_fns = nop_cache_fns, hwcaps = 0, proc_fns = v7m_processor_functions
.long 0 /* proc_info_list.__cpu_mm_mmu_flags */ .long 0 /* proc_info_list.__cpu_mm_mmu_flags */
......
...@@ -496,7 +496,7 @@ xsc3_crval: ...@@ -496,7 +496,7 @@ xsc3_crval:
.align .align
.section ".proc.info.init", #alloc .section ".proc.info.init", "a"
.macro xsc3_proc_info name:req, cpu_val:req, cpu_mask:req .macro xsc3_proc_info name:req, cpu_val:req, cpu_mask:req
.type __\name\()_proc_info,#object .type __\name\()_proc_info,#object
......
...@@ -610,7 +610,7 @@ xscale_crval: ...@@ -610,7 +610,7 @@ xscale_crval:
.align .align
.section ".proc.info.init", #alloc .section ".proc.info.init", "a"
.macro xscale_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache .macro xscale_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache
.type __\name\()_proc_info,#object .type __\name\()_proc_info,#object
......
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
# Absolute relocation type $(ARCH_REL_TYPE_ABS) needs to be defined before
# the inclusion of generic Makefile.
ARCH_REL_TYPE_ABS := R_ARM_JUMP_SLOT|R_ARM_GLOB_DAT|R_ARM_ABS32
include $(srctree)/lib/vdso/Makefile
hostprogs-y := vdsomunge hostprogs-y := vdsomunge
obj-vdso := vgettimeofday.o datapage.o obj-vdso := vgettimeofday.o datapage.o note.o
# Build rules # Build rules
targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.so.raw vdso.lds targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.so.raw vdso.lds
...@@ -24,7 +30,11 @@ CFLAGS_REMOVE_vdso.o = -pg ...@@ -24,7 +30,11 @@ CFLAGS_REMOVE_vdso.o = -pg
# Force -O2 to avoid libgcc dependencies # Force -O2 to avoid libgcc dependencies
CFLAGS_REMOVE_vgettimeofday.o = -pg -Os CFLAGS_REMOVE_vgettimeofday.o = -pg -Os
ifeq ($(c-gettimeofday-y),)
CFLAGS_vgettimeofday.o = -O2 CFLAGS_vgettimeofday.o = -O2
else
CFLAGS_vgettimeofday.o = -O2 -include $(c-gettimeofday-y)
endif
# Disable gcov profiling for VDSO code # Disable gcov profiling for VDSO code
GCOV_PROFILE := n GCOV_PROFILE := n
...@@ -37,7 +47,7 @@ $(obj)/vdso.o : $(obj)/vdso.so ...@@ -37,7 +47,7 @@ $(obj)/vdso.o : $(obj)/vdso.so
# Link rule for the .so file # Link rule for the .so file
$(obj)/vdso.so.raw: $(obj)/vdso.lds $(obj-vdso) FORCE $(obj)/vdso.so.raw: $(obj)/vdso.lds $(obj-vdso) FORCE
$(call if_changed,ld) $(call if_changed,vdsold_and_vdso_check)
$(obj)/vdso.so.dbg: $(obj)/vdso.so.raw $(obj)/vdsomunge FORCE $(obj)/vdso.so.dbg: $(obj)/vdso.so.raw $(obj)/vdsomunge FORCE
$(call if_changed,vdsomunge) $(call if_changed,vdsomunge)
...@@ -47,6 +57,10 @@ $(obj)/%.so: OBJCOPYFLAGS := -S ...@@ -47,6 +57,10 @@ $(obj)/%.so: OBJCOPYFLAGS := -S
$(obj)/%.so: $(obj)/%.so.dbg FORCE $(obj)/%.so: $(obj)/%.so.dbg FORCE
$(call if_changed,objcopy) $(call if_changed,objcopy)
# Actual build commands
quiet_cmd_vdsold_and_vdso_check = LD $@
cmd_vdsold_and_vdso_check = $(cmd_ld); $(cmd_vdso_check)
quiet_cmd_vdsomunge = MUNGE $@ quiet_cmd_vdsomunge = MUNGE $@
cmd_vdsomunge = $(objtree)/$(obj)/vdsomunge $< $@ cmd_vdsomunge = $(objtree)/$(obj)/vdsomunge $< $@
......
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2012-2018 ARM Limited
*
* This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
* Here we can supply some information useful to userland.
*/
#include <linux/uts.h>
#include <linux/version.h>
#include <linux/elfnote.h>
#include <linux/build-salt.h>
ELFNOTE32("Linux", 0, LINUX_VERSION_CODE);
BUILD_SALT;
...@@ -71,6 +71,8 @@ VERSION ...@@ -71,6 +71,8 @@ VERSION
global: global:
__vdso_clock_gettime; __vdso_clock_gettime;
__vdso_gettimeofday; __vdso_gettimeofday;
__vdso_clock_getres;
__vdso_clock_gettime64;
local: *; local: *;
}; };
} }
// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
/* /*
* ARM userspace implementations of gettimeofday() and similar.
*
* Copyright 2015 Mentor Graphics Corporation. * Copyright 2015 Mentor Graphics Corporation.
*/ */
#include <linux/compiler.h>
#include <linux/hrtimer.h>
#include <linux/time.h> #include <linux/time.h>
#include <asm/barrier.h> #include <linux/types.h>
#include <asm/bug.h>
#include <asm/cp15.h>
#include <asm/page.h>
#include <asm/unistd.h>
#include <asm/vdso_datapage.h>
#ifndef CONFIG_AEABI
#error This code depends on AEABI system call conventions
#endif
extern struct vdso_data *__get_datapage(void);
static notrace u32 __vdso_read_begin(const struct vdso_data *vdata)
{
u32 seq;
repeat:
seq = READ_ONCE(vdata->seq_count);
if (seq & 1) {
cpu_relax();
goto repeat;
}
return seq;
}
static notrace u32 vdso_read_begin(const struct vdso_data *vdata)
{
u32 seq;
seq = __vdso_read_begin(vdata);
smp_rmb(); /* Pairs with smp_wmb in vdso_write_end */
return seq;
}
static notrace int vdso_read_retry(const struct vdso_data *vdata, u32 start) int __vdso_clock_gettime(clockid_t clock,
struct old_timespec32 *ts)
{ {
smp_rmb(); /* Pairs with smp_wmb in vdso_write_begin */ return __cvdso_clock_gettime32(clock, ts);
return vdata->seq_count != start;
} }
static notrace long clock_gettime_fallback(clockid_t _clkid, int __vdso_clock_gettime64(clockid_t clock,
struct timespec *_ts) struct __kernel_timespec *ts)
{ {
register struct timespec *ts asm("r1") = _ts; return __cvdso_clock_gettime(clock, ts);
register clockid_t clkid asm("r0") = _clkid;
register long ret asm ("r0");
register long nr asm("r7") = __NR_clock_gettime;
asm volatile(
" swi #0\n"
: "=r" (ret)
: "r" (clkid), "r" (ts), "r" (nr)
: "memory");
return ret;
} }
static notrace int do_realtime_coarse(struct timespec *ts, int __vdso_gettimeofday(struct __kernel_old_timeval *tv,
struct vdso_data *vdata) struct timezone *tz)
{ {
u32 seq; return __cvdso_gettimeofday(tv, tz);
do {
seq = vdso_read_begin(vdata);
ts->tv_sec = vdata->xtime_coarse_sec;
ts->tv_nsec = vdata->xtime_coarse_nsec;
} while (vdso_read_retry(vdata, seq));
return 0;
} }
static notrace int do_monotonic_coarse(struct timespec *ts, int __vdso_clock_getres(clockid_t clock_id,
struct vdso_data *vdata) struct old_timespec32 *res)
{ {
struct timespec tomono; return __cvdso_clock_getres_time32(clock_id, res);
u32 seq;
do {
seq = vdso_read_begin(vdata);
ts->tv_sec = vdata->xtime_coarse_sec;
ts->tv_nsec = vdata->xtime_coarse_nsec;
tomono.tv_sec = vdata->wtm_clock_sec;
tomono.tv_nsec = vdata->wtm_clock_nsec;
} while (vdso_read_retry(vdata, seq));
ts->tv_sec += tomono.tv_sec;
timespec_add_ns(ts, tomono.tv_nsec);
return 0;
}
#ifdef CONFIG_ARM_ARCH_TIMER
static notrace u64 get_ns(struct vdso_data *vdata)
{
u64 cycle_delta;
u64 cycle_now;
u64 nsec;
isb();
cycle_now = read_sysreg(CNTVCT);
cycle_delta = (cycle_now - vdata->cs_cycle_last) & vdata->cs_mask;
nsec = (cycle_delta * vdata->cs_mult) + vdata->xtime_clock_snsec;
nsec >>= vdata->cs_shift;
return nsec;
}
static notrace int do_realtime(struct timespec *ts, struct vdso_data *vdata)
{
u64 nsecs;
u32 seq;
do {
seq = vdso_read_begin(vdata);
if (!vdata->tk_is_cntvct)
return -1;
ts->tv_sec = vdata->xtime_clock_sec;
nsecs = get_ns(vdata);
} while (vdso_read_retry(vdata, seq));
ts->tv_nsec = 0;
timespec_add_ns(ts, nsecs);
return 0;
}
static notrace int do_monotonic(struct timespec *ts, struct vdso_data *vdata)
{
struct timespec tomono;
u64 nsecs;
u32 seq;
do {
seq = vdso_read_begin(vdata);
if (!vdata->tk_is_cntvct)
return -1;
ts->tv_sec = vdata->xtime_clock_sec;
nsecs = get_ns(vdata);
tomono.tv_sec = vdata->wtm_clock_sec;
tomono.tv_nsec = vdata->wtm_clock_nsec;
} while (vdso_read_retry(vdata, seq));
ts->tv_sec += tomono.tv_sec;
ts->tv_nsec = 0;
timespec_add_ns(ts, nsecs + tomono.tv_nsec);
return 0;
}
#else /* CONFIG_ARM_ARCH_TIMER */
static notrace int do_realtime(struct timespec *ts, struct vdso_data *vdata)
{
return -1;
}
static notrace int do_monotonic(struct timespec *ts, struct vdso_data *vdata)
{
return -1;
}
#endif /* CONFIG_ARM_ARCH_TIMER */
notrace int __vdso_clock_gettime(clockid_t clkid, struct timespec *ts)
{
struct vdso_data *vdata;
int ret = -1;
vdata = __get_datapage();
switch (clkid) {
case CLOCK_REALTIME_COARSE:
ret = do_realtime_coarse(ts, vdata);
break;
case CLOCK_MONOTONIC_COARSE:
ret = do_monotonic_coarse(ts, vdata);
break;
case CLOCK_REALTIME:
ret = do_realtime(ts, vdata);
break;
case CLOCK_MONOTONIC:
ret = do_monotonic(ts, vdata);
break;
default:
break;
}
if (ret)
ret = clock_gettime_fallback(clkid, ts);
return ret;
}
static notrace long gettimeofday_fallback(struct timeval *_tv,
struct timezone *_tz)
{
register struct timezone *tz asm("r1") = _tz;
register struct timeval *tv asm("r0") = _tv;
register long ret asm ("r0");
register long nr asm("r7") = __NR_gettimeofday;
asm volatile(
" swi #0\n"
: "=r" (ret)
: "r" (tv), "r" (tz), "r" (nr)
: "memory");
return ret;
}
notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
{
struct timespec ts;
struct vdso_data *vdata;
int ret;
vdata = __get_datapage();
ret = do_realtime(&ts, vdata);
if (ret)
return gettimeofday_fallback(tv, tz);
if (tv) {
tv->tv_sec = ts.tv_sec;
tv->tv_usec = ts.tv_nsec / 1000;
}
if (tz) {
tz->tz_minuteswest = vdata->tz_minuteswest;
tz->tz_dsttime = vdata->tz_dsttime;
}
return ret;
} }
/* Avoid unresolved references emitted by GCC */ /* Avoid unresolved references emitted by GCC */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment