Commit 6325e940 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 updates from Catalin Marinas:
 - eBPF JIT compiler for arm64
 - CPU suspend backend for PSCI (firmware interface) with standard idle
   states defined in DT (generic idle driver to be merged via a
   different tree)
 - Support for CONFIG_DEBUG_SET_MODULE_RONX
 - Support for unmapped cpu-release-addr (outside kernel linear mapping)
 - set_arch_dma_coherent_ops() implemented and bus notifiers removed
 - EFI_STUB improvements when base of DRAM is occupied
 - Typos in KGDB macros
 - Clean-up to (partially) allow kernel building with LLVM
 - Other clean-ups (extern keyword, phys_addr_t usage)

* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (51 commits)
  arm64: Remove unneeded extern keyword
  ARM64: make of_device_ids const
  arm64: Use phys_addr_t type for physical address
  aarch64: filter $x from kallsyms
  arm64: Use DMA_ERROR_CODE to denote failed allocation
  arm64: Fix typos in KGDB macros
  arm64: insn: Add return statements after BUG_ON()
  arm64: debug: don't re-enable debug exceptions on return from el1_dbg
  Revert "arm64: dmi: Add SMBIOS/DMI support"
  arm64: Implement set_arch_dma_coherent_ops() to replace bus notifiers
  of: amba: use of_dma_configure for AMBA devices
  arm64: dmi: Add SMBIOS/DMI support
  arm64: Correct ftrace calls to aarch64_insn_gen_branch_imm()
  arm64:mm: initialize max_mapnr using function set_max_mapnr
  setup: Move unmask of async interrupts after possible earlycon setup
  arm64: LLVMLinux: Fix inline arm64 assembly for use with clang
  arm64: pageattr: Correctly adjust unaligned start addresses
  net: bpf: arm64: fix module memory leak when JIT image build fails
  arm64: add PSCI CPU_SUSPEND based cpu_suspend support
  arm64: kernel: introduce cpu_init_idle CPU operation
  ...
parents 536fd93d 0a6479b0
......@@ -219,6 +219,12 @@ nodes to be present and contain the properties described below.
Value type: <phandle>
Definition: Specifies the ACC[2] node associated with this CPU.
- cpu-idle-states
Usage: Optional
Value type: <prop-encoded-array>
Definition:
# List of phandles to idle state nodes supported
by this cpu [3].
Example 1 (dual-cluster big.LITTLE system 32-bit):
......@@ -415,3 +421,5 @@ cpus {
--
[1] arm/msm/qcom,saw2.txt
[2] arm/msm/qcom,kpss-acc.txt
[3] ARM Linux kernel documentation - idle states bindings
Documentation/devicetree/bindings/arm/idle-states.txt
This diff is collapsed.
......@@ -50,6 +50,16 @@ Main node optional properties:
- migrate : Function ID for MIGRATE operation
Device tree nodes that require usage of PSCI CPU_SUSPEND function (ie idle
state nodes, as per bindings in [1]) must specify the following properties:
- arm,psci-suspend-param
Usage: Required for state nodes[1] if the corresponding
idle-states node entry-method property is set
to "psci".
Value type: <u32>
Definition: power_state parameter to pass to the PSCI
suspend call.
Example:
......@@ -64,7 +74,6 @@ Case 1: PSCI v0.1 only.
migrate = <0x95c10003>;
};
Case 2: PSCI v0.2 only
psci {
......@@ -88,3 +97,6 @@ Case 3: PSCI v0.2 and PSCI v0.1.
...
};
[1] Kernel documentation - ARM idle states bindings
Documentation/devicetree/bindings/arm/idle-states.txt
......@@ -462,9 +462,9 @@ JIT compiler
------------
The Linux kernel has a built-in BPF JIT compiler for x86_64, SPARC, PowerPC,
ARM, MIPS and s390 and can be enabled through CONFIG_BPF_JIT. The JIT compiler
is transparently invoked for each attached filter from user space or for
internal kernel users if it has been previously enabled by root:
ARM, ARM64, MIPS and s390 and can be enabled through CONFIG_BPF_JIT. The JIT
compiler is transparently invoked for each attached filter from user space
or for internal kernel users if it has been previously enabled by root:
echo 1 > /proc/sys/net/core/bpf_jit_enable
......
......@@ -35,6 +35,7 @@ config ARM64
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KGDB
select HAVE_ARCH_TRACEHOOK
select HAVE_BPF_JIT
select HAVE_C_RECORDMCOUNT
select HAVE_CC_STACKPROTECTOR
select HAVE_DEBUG_BUGVERBOSE
......@@ -252,11 +253,11 @@ config SCHED_SMT
places. If unsure say N here.
config NR_CPUS
int "Maximum number of CPUs (2-32)"
range 2 32
int "Maximum number of CPUs (2-64)"
range 2 64
depends on SMP
# These have to remain sorted largest to smallest
default "8"
default "64"
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs"
......
......@@ -43,4 +43,15 @@ config ARM64_RANDOMIZE_TEXT_OFFSET
of TEXT_OFFSET and platforms must not require a specific
value.
config DEBUG_SET_MODULE_RONX
bool "Set loadable kernel module data as NX and text as RO"
depends on MODULES
help
This option helps catch unintended modifications to loadable
kernel module's text and read-only data. It also prevents execution
of module data. Such protection may interfere with run-time code
patching and dynamic kernel tracing - and they might also protect
against certain classes of kernel exploits.
If in doubt, say "N".
endmenu
......@@ -47,6 +47,7 @@ endif
export TEXT_OFFSET GZFLAGS
core-y += arch/arm64/kernel/ arch/arm64/mm/
core-$(CONFIG_NET) += arch/arm64/net/
core-$(CONFIG_KVM) += arch/arm64/kvm/
core-$(CONFIG_XEN) += arch/arm64/xen/
core-$(CONFIG_CRYPTO) += arch/arm64/crypto/
......
......@@ -148,4 +148,8 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
{
}
int set_memory_ro(unsigned long addr, int numpages);
int set_memory_rw(unsigned long addr, int numpages);
int set_memory_x(unsigned long addr, int numpages);
int set_memory_nx(unsigned long addr, int numpages);
#endif
......@@ -39,6 +39,26 @@
extern unsigned long __icache_flags;
#define CCSIDR_EL1_LINESIZE_MASK 0x7
#define CCSIDR_EL1_LINESIZE(x) ((x) & CCSIDR_EL1_LINESIZE_MASK)
#define CCSIDR_EL1_NUMSETS_SHIFT 13
#define CCSIDR_EL1_NUMSETS_MASK (0x7fff << CCSIDR_EL1_NUMSETS_SHIFT)
#define CCSIDR_EL1_NUMSETS(x) \
(((x) & CCSIDR_EL1_NUMSETS_MASK) >> CCSIDR_EL1_NUMSETS_SHIFT)
extern u64 __attribute_const__ icache_get_ccsidr(void);
static inline int icache_get_linesize(void)
{
return 16 << CCSIDR_EL1_LINESIZE(icache_get_ccsidr());
}
static inline int icache_get_numsets(void)
{
return 1 + CCSIDR_EL1_NUMSETS(icache_get_ccsidr());
}
/*
* Whilst the D-side always behaves as PIPT on AArch64, aliasing is
* permitted in the I-cache.
......
......@@ -28,6 +28,8 @@ struct device_node;
* enable-method property.
* @cpu_init: Reads any data necessary for a specific enable-method from the
* devicetree, for a given cpu node and proposed logical id.
* @cpu_init_idle: Reads any data necessary to initialize CPU idle states from
* devicetree, for a given cpu node and proposed logical id.
* @cpu_prepare: Early one-time preparation step for a cpu. If there is a
* mechanism for doing so, tests whether it is possible to boot
* the given CPU.
......@@ -47,6 +49,7 @@ struct device_node;
struct cpu_operations {
const char *name;
int (*cpu_init)(struct device_node *, unsigned int);
int (*cpu_init_idle)(struct device_node *, unsigned int);
int (*cpu_prepare)(unsigned int);
int (*cpu_boot)(unsigned int);
void (*cpu_postboot)(void);
......@@ -61,7 +64,7 @@ struct cpu_operations {
};
extern const struct cpu_operations *cpu_ops[NR_CPUS];
extern int __init cpu_read_ops(struct device_node *dn, int cpu);
extern void __init cpu_read_bootcpu_ops(void);
int __init cpu_read_ops(struct device_node *dn, int cpu);
void __init cpu_read_bootcpu_ops(void);
#endif /* ifndef __ASM_CPU_OPS_H */
#ifndef __ASM_CPUIDLE_H
#define __ASM_CPUIDLE_H
#ifdef CONFIG_CPU_IDLE
extern int cpu_init_idle(unsigned int cpu);
#else
static inline int cpu_init_idle(unsigned int cpu)
{
return -EOPNOTSUPP;
}
#endif
#endif
......@@ -48,11 +48,13 @@
/*
* #imm16 values used for BRK instruction generation
* Allowed values for kgbd are 0x400 - 0x7ff
* 0x100: for triggering a fault on purpose (reserved)
* 0x400: for dynamic BRK instruction
* 0x401: for compile time BRK instruction
*/
#define KGDB_DYN_DGB_BRK_IMM 0x400
#define KDBG_COMPILED_DBG_BRK_IMM 0x401
#define FAULT_BRK_IMM 0x100
#define KGDB_DYN_DBG_BRK_IMM 0x400
#define KGDB_COMPILED_DBG_BRK_IMM 0x401
/*
* BRK instruction encoding
......@@ -60,25 +62,31 @@
*/
#define AARCH64_BREAK_MON 0xd4200000
/*
* BRK instruction for provoking a fault on purpose
* Unlike kgdb, #imm16 value with unallocated handler is used for faulting.
*/
#define AARCH64_BREAK_FAULT (AARCH64_BREAK_MON | (FAULT_BRK_IMM << 5))
/*
* Extract byte from BRK instruction
*/
#define KGDB_DYN_DGB_BRK_INS_BYTE(x) \
#define KGDB_DYN_DBG_BRK_INS_BYTE(x) \
((((AARCH64_BREAK_MON) & 0xffe0001f) >> (x * 8)) & 0xff)
/*
* Extract byte from BRK #imm16
*/
#define KGBD_DYN_DGB_BRK_IMM_BYTE(x) \
(((((KGDB_DYN_DGB_BRK_IMM) & 0xffff) << 5) >> (x * 8)) & 0xff)
#define KGBD_DYN_DBG_BRK_IMM_BYTE(x) \
(((((KGDB_DYN_DBG_BRK_IMM) & 0xffff) << 5) >> (x * 8)) & 0xff)
#define KGDB_DYN_DGB_BRK_BYTE(x) \
(KGDB_DYN_DGB_BRK_INS_BYTE(x) | KGBD_DYN_DGB_BRK_IMM_BYTE(x))
#define KGDB_DYN_DBG_BRK_BYTE(x) \
(KGDB_DYN_DBG_BRK_INS_BYTE(x) | KGBD_DYN_DBG_BRK_IMM_BYTE(x))
#define KGDB_DYN_BRK_INS_BYTE0 KGDB_DYN_DGB_BRK_BYTE(0)
#define KGDB_DYN_BRK_INS_BYTE1 KGDB_DYN_DGB_BRK_BYTE(1)
#define KGDB_DYN_BRK_INS_BYTE2 KGDB_DYN_DGB_BRK_BYTE(2)
#define KGDB_DYN_BRK_INS_BYTE3 KGDB_DYN_DGB_BRK_BYTE(3)
#define KGDB_DYN_BRK_INS_BYTE0 KGDB_DYN_DBG_BRK_BYTE(0)
#define KGDB_DYN_BRK_INS_BYTE1 KGDB_DYN_DBG_BRK_BYTE(1)
#define KGDB_DYN_BRK_INS_BYTE2 KGDB_DYN_DBG_BRK_BYTE(2)
#define KGDB_DYN_BRK_INS_BYTE3 KGDB_DYN_DBG_BRK_BYTE(3)
#define CACHE_FLUSH_IS_SAFE 1
......
......@@ -52,6 +52,13 @@ static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
dev->archdata.dma_ops = ops;
}
static inline int set_arch_dma_coherent_ops(struct device *dev)
{
set_dma_ops(dev, &coherent_swiotlb_dma_ops);
return 0;
}
#define set_arch_dma_coherent_ops set_arch_dma_coherent_ops
#include <asm-generic/dma-mapping-common.h>
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
......
This diff is collapsed.
......@@ -243,7 +243,7 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
* (PHYS_OFFSET and PHYS_MASK taken into account).
*/
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
extern int valid_phys_addr_range(unsigned long addr, size_t size);
extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
extern int devmem_is_allowed(unsigned long pfn);
......
......@@ -29,7 +29,7 @@
static inline void arch_kgdb_breakpoint(void)
{
asm ("brk %0" : : "I" (KDBG_COMPILED_DBG_BRK_IMM));
asm ("brk %0" : : "I" (KGDB_COMPILED_DBG_BRK_IMM));
}
extern void kgdb_handle_bus_error(void);
......
......@@ -26,13 +26,13 @@ static inline void set_my_cpu_offset(unsigned long off)
static inline unsigned long __my_cpu_offset(void)
{
unsigned long off;
register unsigned long *sp asm ("sp");
/*
* We want to allow caching the value, so avoid using volatile and
* instead use a fake stack read to hazard against barrier().
*/
asm("mrs %0, tpidr_el1" : "=r" (off) : "Q" (*sp));
asm("mrs %0, tpidr_el1" : "=r" (off) :
"Q" (*(const unsigned long *)current_stack_pointer));
return off;
}
......
......@@ -149,46 +149,51 @@ extern struct page *empty_zero_page;
#define pte_valid_not_user(pte) \
((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
static inline pte_t pte_wrprotect(pte_t pte)
static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
{
pte_val(pte) &= ~PTE_WRITE;
pte_val(pte) &= ~pgprot_val(prot);
return pte;
}
static inline pte_t pte_mkwrite(pte_t pte)
static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
{
pte_val(pte) |= PTE_WRITE;
pte_val(pte) |= pgprot_val(prot);
return pte;
}
static inline pte_t pte_wrprotect(pte_t pte)
{
return clear_pte_bit(pte, __pgprot(PTE_WRITE));
}
static inline pte_t pte_mkwrite(pte_t pte)
{
return set_pte_bit(pte, __pgprot(PTE_WRITE));
}
static inline pte_t pte_mkclean(pte_t pte)
{
pte_val(pte) &= ~PTE_DIRTY;
return pte;
return clear_pte_bit(pte, __pgprot(PTE_DIRTY));
}
static inline pte_t pte_mkdirty(pte_t pte)
{
pte_val(pte) |= PTE_DIRTY;
return pte;
return set_pte_bit(pte, __pgprot(PTE_DIRTY));
}
static inline pte_t pte_mkold(pte_t pte)
{
pte_val(pte) &= ~PTE_AF;
return pte;
return clear_pte_bit(pte, __pgprot(PTE_AF));
}
static inline pte_t pte_mkyoung(pte_t pte)
{
pte_val(pte) |= PTE_AF;
return pte;
return set_pte_bit(pte, __pgprot(PTE_AF));
}
static inline pte_t pte_mkspecial(pte_t pte)
{
pte_val(pte) |= PTE_SPECIAL;
return pte;
return set_pte_bit(pte, __pgprot(PTE_SPECIAL));
}
static inline void set_pte(pte_t *ptep, pte_t pte)
......
......@@ -32,6 +32,8 @@ extern void cpu_cache_off(void);
extern void cpu_do_idle(void);
extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
void cpu_soft_restart(phys_addr_t cpu_reset,
unsigned long addr) __attribute__((noreturn));
extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr);
extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
......
......@@ -21,6 +21,7 @@ struct sleep_save_sp {
phys_addr_t save_ptr_stash_phys;
};
extern int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long));
extern void cpu_resume(void);
extern int cpu_suspend(unsigned long);
......
......@@ -68,6 +68,11 @@ struct thread_info {
#define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack)
/*
* how to get the current stack pointer from C
*/
register unsigned long current_stack_pointer asm ("sp");
/*
* how to get the thread information struct from C
*/
......@@ -75,8 +80,8 @@ static inline struct thread_info *current_thread_info(void) __attribute_const__;
static inline struct thread_info *current_thread_info(void)
{
register unsigned long sp asm ("sp");
return (struct thread_info *)(sp & ~(THREAD_SIZE - 1));
return (struct thread_info *)
(current_stack_pointer & ~(THREAD_SIZE - 1));
}
#define thread_saved_pc(tsk) \
......
......@@ -26,6 +26,7 @@ arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o
arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
arm64-obj-$(CONFIG_ARM64_CPU_SUSPEND) += sleep.o suspend.o
arm64-obj-$(CONFIG_CPU_IDLE) += cpuidle.o
arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o
arm64-obj-$(CONFIG_KGDB) += kgdb.o
arm64-obj-$(CONFIG_EFI) += efi.o efi-stub.o efi-entry.o
......
/*
* ARM64 CPU idle arch support
*
* Copyright (C) 2014 ARM Ltd.
* Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/of.h>
#include <linux/of_device.h>
#include <asm/cpuidle.h>
#include <asm/cpu_ops.h>
int cpu_init_idle(unsigned int cpu)
{
int ret = -EOPNOTSUPP;
struct device_node *cpu_node = of_cpu_device_node_get(cpu);
if (!cpu_node)
return -ENODEV;
if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_init_idle)
ret = cpu_ops[cpu]->cpu_init_idle(cpu_node, cpu);
of_node_put(cpu_node);
return ret;
}
......@@ -20,8 +20,10 @@
#include <asm/cputype.h>
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/preempt.h>
#include <linux/printk.h>
#include <linux/smp.h>
......@@ -47,8 +49,18 @@ static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
unsigned int cpu = smp_processor_id();
u32 l1ip = CTR_L1IP(info->reg_ctr);
if (l1ip != ICACHE_POLICY_PIPT)
set_bit(ICACHEF_ALIASING, &__icache_flags);
if (l1ip != ICACHE_POLICY_PIPT) {
/*
* VIPT caches are non-aliasing if the VA always equals the PA
* in all bit positions that are covered by the index. This is
* the case if the size of a way (# of sets * line size) does
* not exceed PAGE_SIZE.
*/
u32 waysize = icache_get_numsets() * icache_get_linesize();
if (l1ip != ICACHE_POLICY_VIPT || waysize > PAGE_SIZE)
set_bit(ICACHEF_ALIASING, &__icache_flags);
}
if (l1ip == ICACHE_POLICY_AIVIVT)
set_bit(ICACHEF_AIVIVT, &__icache_flags);
......@@ -190,3 +202,15 @@ void __init cpuinfo_store_boot_cpu(void)
boot_cpu_data = *info;
}
u64 __attribute_const__ icache_get_ccsidr(void)
{
u64 ccsidr;
WARN_ON(preemptible());
/* Select L1 I-cache and read its size ID register */
asm("msr csselr_el1, %1; isb; mrs %0, ccsidr_el1"
: "=r"(ccsidr) : "r"(1L));
return ccsidr;
}
......@@ -28,20 +28,16 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
kernel_size = _edata - _text;
if (*image_addr != (dram_base + TEXT_OFFSET)) {
kernel_memsize = kernel_size + (_end - _edata);
status = efi_relocate_kernel(sys_table, image_addr,
kernel_size, kernel_memsize,
dram_base + TEXT_OFFSET,
PAGE_SIZE);
status = efi_low_alloc(sys_table, kernel_memsize + TEXT_OFFSET,
SZ_2M, reserve_addr);
if (status != EFI_SUCCESS) {
pr_efi_err(sys_table, "Failed to relocate kernel\n");
return status;
}
if (*image_addr != (dram_base + TEXT_OFFSET)) {
pr_efi_err(sys_table, "Failed to alloc kernel memory\n");
efi_free(sys_table, kernel_memsize, *image_addr);
return EFI_LOAD_ERROR;
}
*image_size = kernel_memsize;
memcpy((void *)*reserve_addr + TEXT_OFFSET, (void *)*image_addr,
kernel_size);
*image_addr = *reserve_addr + TEXT_OFFSET;
*reserve_size = kernel_memsize + TEXT_OFFSET;
}
......
......@@ -324,7 +324,6 @@ el1_dbg:
mrs x0, far_el1
mov x2, sp // struct pt_regs
bl do_debug_exception
enable_dbg
kernel_exit 1
el1_inv:
// TODO: add support for undefined instructions in kernel mode
......
......@@ -58,7 +58,8 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
u32 new;
pc = (unsigned long)&ftrace_call;
new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func, true);
new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func,
AARCH64_INSN_BRANCH_LINK);
return ftrace_modify_code(pc, 0, new, false);
}
......@@ -72,7 +73,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
u32 old, new;
old = aarch64_insn_gen_nop();
new = aarch64_insn_gen_branch_imm(pc, addr, true);
new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
return ftrace_modify_code(pc, old, new, true);
}
......@@ -86,7 +87,7 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long pc = rec->ip;
u32 old, new;
old = aarch64_insn_gen_branch_imm(pc, addr, true);
old = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
new = aarch64_insn_gen_nop();
return ftrace_modify_code(pc, old, new, true);
......@@ -154,7 +155,8 @@ static int ftrace_modify_graph_caller(bool enable)
u32 branch, nop;
branch = aarch64_insn_gen_branch_imm(pc,
(unsigned long)ftrace_graph_caller, false);
(unsigned long)ftrace_graph_caller,
AARCH64_INSN_BRANCH_LINK);
nop = aarch64_insn_gen_nop();
if (enable)
......
......@@ -151,7 +151,7 @@ optional_header:
.short 0x20b // PE32+ format
.byte 0x02 // MajorLinkerVersion
.byte 0x14 // MinorLinkerVersion
.long _edata - stext // SizeOfCode
.long _end - stext // SizeOfCode
.long 0 // SizeOfInitializedData
.long 0 // SizeOfUninitializedData
.long efi_stub_entry - efi_head // AddressOfEntryPoint
......@@ -169,7 +169,7 @@ extra_header_fields:
.short 0 // MinorSubsystemVersion
.long 0 // Win32VersionValue
.long _edata - efi_head // SizeOfImage
.long _end - efi_head // SizeOfImage
// Everything before the kernel image is considered part of the header
.long stext - efi_head // SizeOfHeaders
......@@ -216,7 +216,7 @@ section_table:
.byte 0
.byte 0
.byte 0 // end of 0 padding of section name
.long _edata - stext // VirtualSize
.long _end - stext // VirtualSize
.long stext - efi_head // VirtualAddress
.long _edata - stext // SizeOfRawData
.long stext - efi_head // PointerToRawData
......
This diff is collapsed.
......@@ -235,13 +235,13 @@ static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
static struct break_hook kgdb_brkpt_hook = {
.esr_mask = 0xffffffff,
.esr_val = DBG_ESR_VAL_BRK(KGDB_DYN_DGB_BRK_IMM),
.esr_val = DBG_ESR_VAL_BRK(KGDB_DYN_DBG_BRK_IMM),
.fn = kgdb_brk_fn
};
static struct break_hook kgdb_compiled_brkpt_hook = {
.esr_mask = 0xffffffff,
.esr_val = DBG_ESR_VAL_BRK(KDBG_COMPILED_DBG_BRK_IMM),
.esr_val = DBG_ESR_VAL_BRK(KGDB_COMPILED_DBG_BRK_IMM),
.fn = kgdb_compiled_brk_fn
};
......
......@@ -1276,7 +1276,7 @@ arch_initcall(cpu_pmu_reset);
/*
* PMU platform driver and devicetree bindings.
*/
static struct of_device_id armpmu_of_device_ids[] = {
static const struct of_device_id armpmu_of_device_ids[] = {
{.compatible = "arm,armv8-pmuv3"},
{},
};
......
......@@ -57,36 +57,10 @@ unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
#endif
static void setup_restart(void)
{
/*
* Tell the mm system that we are going to reboot -
* we may need it to insert some 1:1 mappings so that
* soft boot works.
*/
setup_mm_for_reboot();
/* Clean and invalidate caches */
flush_cache_all();
/* Turn D-cache off */
cpu_cache_off();
/* Push out any further dirty data, and ensure cache is empty */
flush_cache_all();
}
void soft_restart(unsigned long addr)
{
typedef void (*phys_reset_t)(unsigned long);
phys_reset_t phys_reset;
setup_restart();
/* Switch to the identity mapping */
phys_reset = (phys_reset_t)virt_to_phys(cpu_reset);
phys_reset(addr);
setup_mm_for_reboot();
cpu_soft_restart(virt_to_phys(cpu_reset), addr);
/* Should never get here */
BUG();
}
......
......@@ -21,6 +21,7 @@
#include <linux/reboot.h>
#include <linux/pm.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <uapi/linux/psci.h>
#include <asm/compiler.h>
......@@ -28,6 +29,7 @@
#include <asm/errno.h>
#include <asm/psci.h>
#include <asm/smp_plat.h>
#include <asm/suspend.h>
#include <asm/system_misc.h>
#define PSCI_POWER_STATE_TYPE_STANDBY 0
......@@ -65,6 +67,8 @@ enum psci_function {
PSCI_FN_MAX,
};
static DEFINE_PER_CPU_READ_MOSTLY(struct psci_power_state *, psci_power_state);
static u32 psci_function_id[PSCI_FN_MAX];
static int psci_to_linux_errno(int errno)
......@@ -93,6 +97,18 @@ static u32 psci_power_state_pack(struct psci_power_state state)
& PSCI_0_2_POWER_STATE_AFFL_MASK);
}
static void psci_power_state_unpack(u32 power_state,
struct psci_power_state *state)
{
state->id = (power_state & PSCI_0_2_POWER_STATE_ID_MASK) >>
PSCI_0_2_POWER_STATE_ID_SHIFT;
state->type = (power_state & PSCI_0_2_POWER_STATE_TYPE_MASK) >>
PSCI_0_2_POWER_STATE_TYPE_SHIFT;
state->affinity_level =
(power_state & PSCI_0_2_POWER_STATE_AFFL_MASK) >>
PSCI_0_2_POWER_STATE_AFFL_SHIFT;
}
/*
* The following two functions are invoked via the invoke_psci_fn pointer
* and will not be inlined, allowing us to piggyback on the AAPCS.
......@@ -199,6 +215,63 @@ static int psci_migrate_info_type(void)
return err;
}
static int __maybe_unused cpu_psci_cpu_init_idle(struct device_node *cpu_node,
unsigned int cpu)
{
int i, ret, count = 0;
struct psci_power_state *psci_states;
struct device_node *state_node;
/*
* If the PSCI cpu_suspend function hook has not been initialized
* idle states must not be enabled, so bail out
*/
if (!psci_ops.cpu_suspend)
return -EOPNOTSUPP;
/* Count idle states */
while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
count))) {
count++;
of_node_put(state_node);
}
if (!count)
return -ENODEV;
psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL);
if (!psci_states)
return -ENOMEM;
for (i = 0; i < count; i++) {
u32 psci_power_state;
state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
ret = of_property_read_u32(state_node,
"arm,psci-suspend-param",
&psci_power_state);
if (ret) {
pr_warn(" * %s missing arm,psci-suspend-param property\n",
state_node->full_name);
of_node_put(state_node);
goto free_mem;
}
of_node_put(state_node);
pr_debug("psci-power-state %#x index %d\n", psci_power_state,
i);
psci_power_state_unpack(psci_power_state, &psci_states[i]);
}
/* Idle states parsed correctly, initialize per-cpu pointer */
per_cpu(psci_power_state, cpu) = psci_states;
return 0;
free_mem:
kfree(psci_states);
return ret;
}
static int get_set_conduit_method(struct device_node *np)
{
const char *method;
......@@ -436,8 +509,39 @@ static int cpu_psci_cpu_kill(unsigned int cpu)
#endif
#endif
static int psci_suspend_finisher(unsigned long index)
{
struct psci_power_state *state = __get_cpu_var(psci_power_state);
return psci_ops.cpu_suspend(state[index - 1],
virt_to_phys(cpu_resume));
}
static int __maybe_unused cpu_psci_cpu_suspend(unsigned long index)
{
int ret;
struct psci_power_state *state = __get_cpu_var(psci_power_state);
/*
* idle state index 0 corresponds to wfi, should never be called
* from the cpu_suspend operations
*/
if (WARN_ON_ONCE(!index))
return -EINVAL;
if (state->type == PSCI_POWER_STATE_TYPE_STANDBY)
ret = psci_ops.cpu_suspend(state[index - 1], 0);
else
ret = __cpu_suspend(index, psci_suspend_finisher);
return ret;
}
const struct cpu_operations cpu_psci_ops = {
.name = "psci",
#ifdef CONFIG_CPU_IDLE
.cpu_init_idle = cpu_psci_cpu_init_idle,
.cpu_suspend = cpu_psci_cpu_suspend,
#endif
#ifdef CONFIG_SMP
.cpu_init = cpu_psci_cpu_init,
.cpu_prepare = cpu_psci_cpu_prepare,
......
......@@ -36,13 +36,12 @@ void *return_address(unsigned int level)
{
struct return_address_data data;
struct stackframe frame;
register unsigned long current_sp asm ("sp");
data.level = level + 2;
data.addr = NULL;
frame.fp = (unsigned long)__builtin_frame_address(0);
frame.sp = current_sp;
frame.sp = current_stack_pointer;
frame.pc = (unsigned long)return_address; /* dummy */
walk_stackframe(&frame, save_return_addr, &data);
......
......@@ -365,11 +365,6 @@ u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
void __init setup_arch(char **cmdline_p)
{
/*
* Unmask asynchronous aborts early to catch possible system errors.
*/
local_async_enable();
setup_processor();
setup_machine_fdt(__fdt_pointer);
......@@ -385,6 +380,12 @@ void __init setup_arch(char **cmdline_p)
parse_early_param();
/*
* Unmask asynchronous aborts after bringing up possible earlycon.
* (Report possible System Errors once we can report this occurred)
*/
local_async_enable();
efi_init();
arm64_memblock_init();
......
......@@ -49,28 +49,39 @@
orr \dst, \dst, \mask // dst|=(aff3>>rs3)
.endm
/*
* Save CPU state for a suspend. This saves callee registers, and allocates
* space on the kernel stack to save the CPU specific registers + some
* other data for resume.
* Save CPU state for a suspend and execute the suspend finisher.
* On success it will return 0 through cpu_resume - ie through a CPU
* soft/hard reboot from the reset vector.
* On failure it returns the suspend finisher return value or force
* -EOPNOTSUPP if the finisher erroneously returns 0 (the suspend finisher
* is not allowed to return, if it does this must be considered failure).
* It saves callee registers, and allocates space on the kernel stack
* to save the CPU specific registers + some other data for resume.
*
* x0 = suspend finisher argument
* x1 = suspend finisher function pointer
*/
ENTRY(__cpu_suspend)
ENTRY(__cpu_suspend_enter)
stp x29, lr, [sp, #-96]!
stp x19, x20, [sp,#16]
stp x21, x22, [sp,#32]
stp x23, x24, [sp,#48]
stp x25, x26, [sp,#64]
stp x27, x28, [sp,#80]
/*
* Stash suspend finisher and its argument in x20 and x19
*/
mov x19, x0
mov x20, x1
mov x2, sp
sub sp, sp, #CPU_SUSPEND_SZ // allocate cpu_suspend_ctx
mov x1, sp
mov x0, sp
/*
* x1 now points to struct cpu_suspend_ctx allocated on the stack
* x0 now points to struct cpu_suspend_ctx allocated on the stack
*/
str x2, [x1, #CPU_CTX_SP]
ldr x2, =sleep_save_sp
ldr x2, [x2, #SLEEP_SAVE_SP_VIRT]
str x2, [x0, #CPU_CTX_SP]
ldr x1, =sleep_save_sp
ldr x1, [x1, #SLEEP_SAVE_SP_VIRT]
#ifdef CONFIG_SMP
mrs x7, mpidr_el1
ldr x9, =mpidr_hash
......@@ -82,11 +93,21 @@ ENTRY(__cpu_suspend)
ldp w3, w4, [x9, #MPIDR_HASH_SHIFTS]
ldp w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)]
compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10
add x2, x2, x8, lsl #3
add x1, x1, x8, lsl #3
#endif
bl __cpu_suspend_finisher
bl __cpu_suspend_save
/*
* Grab suspend finisher in x20 and its argument in x19
*/
mov x0, x19
mov x1, x20
/*
* We are ready for power down, fire off the suspend finisher
* in x1, with argument in x0
*/
blr x1
/*
* Never gets here, unless suspend fails.
* Never gets here, unless suspend finisher fails.
* Successful cpu_suspend should return from cpu_resume, returning
* through this code path is considered an error
* If the return value is set to 0 force x0 = -EOPNOTSUPP
......@@ -103,7 +124,7 @@ ENTRY(__cpu_suspend)
ldp x27, x28, [sp, #80]
ldp x29, lr, [sp], #96
ret
ENDPROC(__cpu_suspend)
ENDPROC(__cpu_suspend_enter)
.ltorg
/*
......
......@@ -20,6 +20,7 @@
#include <linux/init.h>
#include <linux/of.h>
#include <linux/smp.h>
#include <linux/types.h>
#include <asm/cacheflush.h>
#include <asm/cpu_ops.h>
......@@ -65,12 +66,21 @@ static int smp_spin_table_cpu_init(struct device_node *dn, unsigned int cpu)
static int smp_spin_table_cpu_prepare(unsigned int cpu)
{
void **release_addr;
__le64 __iomem *release_addr;
if (!cpu_release_addr[cpu])
return -ENODEV;
release_addr = __va(cpu_release_addr[cpu]);
/*
* The cpu-release-addr may or may not be inside the linear mapping.
* As ioremap_cache will either give us a new mapping or reuse the
* existing linear mapping, we can use it to cover both cases. In
* either case the memory will be MT_NORMAL.
*/
release_addr = ioremap_cache(cpu_release_addr[cpu],
sizeof(*release_addr));
if (!release_addr)
return -ENOMEM;
/*
* We write the release address as LE regardless of the native
......@@ -79,15 +89,17 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu)
* boot-loader's endianess before jumping. This is mandated by
* the boot protocol.
*/
release_addr[0] = (void *) cpu_to_le64(__pa(secondary_holding_pen));
__flush_dcache_area(release_addr, sizeof(release_addr[0]));
writeq_relaxed(__pa(secondary_holding_pen), release_addr);
__flush_dcache_area((__force void *)release_addr,
sizeof(*release_addr));
/*
* Send an event to wake up the secondary CPU.
*/
sev();
iounmap(release_addr);
return 0;
}
......
......@@ -111,10 +111,9 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
frame.sp = thread_saved_sp(tsk);
frame.pc = thread_saved_pc(tsk);
} else {
register unsigned long current_sp asm("sp");
data.no_sched_functions = 0;
frame.fp = (unsigned long)__builtin_frame_address(0);
frame.sp = current_sp;
frame.sp = current_stack_pointer;
frame.pc = (unsigned long)save_stack_trace_tsk;
}
......
......@@ -9,22 +9,19 @@
#include <asm/suspend.h>
#include <asm/tlbflush.h>
extern int __cpu_suspend(unsigned long);
extern int __cpu_suspend_enter(unsigned long arg, int (*fn)(unsigned long));
/*
* This is called by __cpu_suspend() to save the state, and do whatever
* This is called by __cpu_suspend_enter() to save the state, and do whatever
* flushing is required to ensure that when the CPU goes to sleep we have
* the necessary data available when the caches are not searched.
*
* @arg: Argument to pass to suspend operations
* @ptr: CPU context virtual address
* @save_ptr: address of the location where the context physical address
* must be saved
* ptr: CPU context virtual address
* save_ptr: address of the location where the context physical address
* must be saved
*/
int __cpu_suspend_finisher(unsigned long arg, struct cpu_suspend_ctx *ptr,
phys_addr_t *save_ptr)
void notrace __cpu_suspend_save(struct cpu_suspend_ctx *ptr,
phys_addr_t *save_ptr)
{
int cpu = smp_processor_id();
*save_ptr = virt_to_phys(ptr);
cpu_do_suspend(ptr);
......@@ -35,8 +32,6 @@ int __cpu_suspend_finisher(unsigned long arg, struct cpu_suspend_ctx *ptr,
*/
__flush_dcache_area(ptr, sizeof(*ptr));
__flush_dcache_area(save_ptr, sizeof(*save_ptr));
return cpu_ops[cpu]->cpu_suspend(arg);
}
/*
......@@ -56,15 +51,15 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
}
/**
* cpu_suspend
* cpu_suspend() - function to enter a low-power state
* @arg: argument to pass to CPU suspend operations
*
* @arg: argument to pass to the finisher function
* Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU
* operations back-end error code otherwise.
*/
int cpu_suspend(unsigned long arg)
{
struct mm_struct *mm = current->active_mm;
int ret, cpu = smp_processor_id();
unsigned long flags;
int cpu = smp_processor_id();
/*
* If cpu_ops have not been registered or suspend
......@@ -72,6 +67,21 @@ int cpu_suspend(unsigned long arg)
*/
if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend)
return -EOPNOTSUPP;
return cpu_ops[cpu]->cpu_suspend(arg);
}
/*
* __cpu_suspend
*
* arg: argument to pass to the finisher function
* fn: finisher function pointer
*
*/
int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
{
struct mm_struct *mm = current->active_mm;
int ret;
unsigned long flags;
/*
* From this point debug exceptions are disabled to prevent
......@@ -86,7 +96,7 @@ int cpu_suspend(unsigned long arg)
* page tables, so that the thread address space is properly
* set-up on function return.
*/
ret = __cpu_suspend(arg);
ret = __cpu_suspend_enter(arg, fn);
if (ret == 0) {
cpu_switch_mm(mm->pgd, mm);
flush_tlb_all();
......@@ -95,7 +105,7 @@ int cpu_suspend(unsigned long arg)
* Restore per-cpu offset before any kernel
* subsystem relying on it has a chance to run.
*/
set_my_cpu_offset(per_cpu_offset(cpu));
set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
/*
* Restore HW breakpoint registers to sane values
......
......@@ -132,7 +132,6 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
{
struct stackframe frame;
const register unsigned long current_sp asm ("sp");
pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
......@@ -145,7 +144,7 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
frame.pc = regs->pc;
} else if (tsk == current) {
frame.fp = (unsigned long)__builtin_frame_address(0);
frame.sp = current_sp;
frame.sp = current_stack_pointer;
frame.pc = (unsigned long)dump_backtrace;
} else {
/*
......
obj-y := dma-mapping.o extable.o fault.o init.o \
cache.o copypage.o flush.o \
ioremap.o mmap.o pgd.o mmu.o \
context.o proc.o
context.o proc.o pageattr.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
......@@ -22,11 +22,8 @@
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/dma-contiguous.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/vmalloc.h>
#include <linux/swiotlb.h>
#include <linux/amba/bus.h>
#include <asm/cacheflush.h>
......@@ -125,7 +122,7 @@ static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
no_map:
__dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
no_mem:
*dma_handle = ~0;
*dma_handle = DMA_ERROR_CODE;
return NULL;
}
......@@ -308,40 +305,12 @@ struct dma_map_ops coherent_swiotlb_dma_ops = {
};
EXPORT_SYMBOL(coherent_swiotlb_dma_ops);
static int dma_bus_notifier(struct notifier_block *nb,
unsigned long event, void *_dev)
{
struct device *dev = _dev;
if (event != BUS_NOTIFY_ADD_DEVICE)
return NOTIFY_DONE;
if (of_property_read_bool(dev->of_node, "dma-coherent"))
set_dma_ops(dev, &coherent_swiotlb_dma_ops);
return NOTIFY_OK;
}
static struct notifier_block platform_bus_nb = {
.notifier_call = dma_bus_notifier,
};
static struct notifier_block amba_bus_nb = {
.notifier_call = dma_bus_notifier,
};
extern int swiotlb_late_init_with_default_size(size_t default_size);
static int __init swiotlb_late_init(void)
{
size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT);
/*
* These must be registered before of_platform_populate().
*/
bus_register_notifier(&platform_bus_type, &platform_bus_nb);
bus_register_notifier(&amba_bustype, &amba_bus_nb);
dma_ops = &noncoherent_swiotlb_dma_ops;
return swiotlb_late_init_with_default_size(swiotlb_size);
......
......@@ -255,7 +255,7 @@ static void __init free_unused_memmap(void)
*/
void __init mem_init(void)
{
max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
#ifndef CONFIG_SPARSEMEM_VMEMMAP
free_unused_memmap();
......
......@@ -102,7 +102,7 @@ EXPORT_SYMBOL_GPL(arch_pick_mmap_layout);
* You really shouldn't be using read() or write() on /dev/mem. This might go
* away in the future.
*/
int valid_phys_addr_range(unsigned long addr, size_t size)
int valid_phys_addr_range(phys_addr_t addr, size_t size)
{
if (addr < PHYS_OFFSET)
return 0;
......
......@@ -94,7 +94,7 @@ static int __init early_cachepolicy(char *p)
*/
asm volatile(
" mrs %0, mair_el1\n"
" bfi %0, %1, #%2, #8\n"
" bfi %0, %1, %2, #8\n"
" msr mair_el1, %0\n"
" isb\n"
: "=&r" (tmp)
......
/*
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
struct page_change_data {
pgprot_t set_mask;
pgprot_t clear_mask;
};
static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
void *data)
{
struct page_change_data *cdata = data;
pte_t pte = *ptep;
pte = clear_pte_bit(pte, cdata->clear_mask);
pte = set_pte_bit(pte, cdata->set_mask);
set_pte(ptep, pte);
return 0;
}
static int change_memory_common(unsigned long addr, int numpages,
pgprot_t set_mask, pgprot_t clear_mask)
{
unsigned long start = addr;
unsigned long size = PAGE_SIZE*numpages;
unsigned long end = start + size;
int ret;
struct page_change_data data;
if (!IS_ALIGNED(addr, PAGE_SIZE)) {
start &= PAGE_MASK;
end = start + size;
WARN_ON_ONCE(1);
}
if (!is_module_address(start) || !is_module_address(end - 1))
return -EINVAL;
data.set_mask = set_mask;
data.clear_mask = clear_mask;
ret = apply_to_page_range(&init_mm, start, size, change_page_range,
&data);
flush_tlb_kernel_range(start, end);
return ret;
}
int set_memory_ro(unsigned long addr, int numpages)
{
return change_memory_common(addr, numpages,
__pgprot(PTE_RDONLY),
__pgprot(PTE_WRITE));
}
EXPORT_SYMBOL_GPL(set_memory_ro);
int set_memory_rw(unsigned long addr, int numpages)
{
return change_memory_common(addr, numpages,
__pgprot(PTE_WRITE),
__pgprot(PTE_RDONLY));
}
EXPORT_SYMBOL_GPL(set_memory_rw);
int set_memory_nx(unsigned long addr, int numpages)
{
return change_memory_common(addr, numpages,
__pgprot(PTE_PXN),
__pgprot(0));
}
EXPORT_SYMBOL_GPL(set_memory_nx);
int set_memory_x(unsigned long addr, int numpages)
{
return change_memory_common(addr, numpages,
__pgprot(0),
__pgprot(PTE_PXN));
}
EXPORT_SYMBOL_GPL(set_memory_x);
......@@ -76,6 +76,21 @@ ENTRY(cpu_reset)
ret x0
ENDPROC(cpu_reset)
ENTRY(cpu_soft_restart)
/* Save address of cpu_reset() and reset address */
mov x19, x0
mov x20, x1
/* Turn D-cache off */
bl cpu_cache_off
/* Push out all dirty data, and ensure cache is empty */
bl flush_cache_all
mov x0, x20
ret x19
ENDPROC(cpu_soft_restart)
/*
* cpu_do_idle()
*
......
#
# ARM64 networking code
#
obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o
/*
* BPF JIT compiler for ARM64
*
* Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _BPF_JIT_H
#define _BPF_JIT_H
#include <asm/insn.h>
/* 5-bit Register Operand */
#define A64_R(x) AARCH64_INSN_REG_##x
#define A64_FP AARCH64_INSN_REG_FP
#define A64_LR AARCH64_INSN_REG_LR
#define A64_ZR AARCH64_INSN_REG_ZR
#define A64_SP AARCH64_INSN_REG_SP
#define A64_VARIANT(sf) \
((sf) ? AARCH64_INSN_VARIANT_64BIT : AARCH64_INSN_VARIANT_32BIT)
/* Compare & branch (immediate) */
#define A64_COMP_BRANCH(sf, Rt, offset, type) \
aarch64_insn_gen_comp_branch_imm(0, offset, Rt, A64_VARIANT(sf), \
AARCH64_INSN_BRANCH_COMP_##type)
#define A64_CBZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, ZERO)
/* Conditional branch (immediate) */
#define A64_COND_BRANCH(cond, offset) \
aarch64_insn_gen_cond_branch_imm(0, offset, cond)
#define A64_COND_EQ AARCH64_INSN_COND_EQ /* == */
#define A64_COND_NE AARCH64_INSN_COND_NE /* != */
#define A64_COND_CS AARCH64_INSN_COND_CS /* unsigned >= */
#define A64_COND_HI AARCH64_INSN_COND_HI /* unsigned > */
#define A64_COND_GE AARCH64_INSN_COND_GE /* signed >= */
#define A64_COND_GT AARCH64_INSN_COND_GT /* signed > */
#define A64_B_(cond, imm19) A64_COND_BRANCH(cond, (imm19) << 2)
/* Unconditional branch (immediate) */
#define A64_BRANCH(offset, type) aarch64_insn_gen_branch_imm(0, offset, \
AARCH64_INSN_BRANCH_##type)
#define A64_B(imm26) A64_BRANCH((imm26) << 2, NOLINK)
#define A64_BL(imm26) A64_BRANCH((imm26) << 2, LINK)
/* Unconditional branch (register) */
#define A64_BLR(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_LINK)
#define A64_RET(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_RETURN)
/* Load/store register (register offset) */
#define A64_LS_REG(Rt, Rn, Rm, size, type) \
aarch64_insn_gen_load_store_reg(Rt, Rn, Rm, \
AARCH64_INSN_SIZE_##size, \
AARCH64_INSN_LDST_##type##_REG_OFFSET)
#define A64_STRB(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 8, STORE)
#define A64_LDRB(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 8, LOAD)
#define A64_STRH(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 16, STORE)
#define A64_LDRH(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 16, LOAD)
#define A64_STR32(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 32, STORE)
#define A64_LDR32(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 32, LOAD)
#define A64_STR64(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 64, STORE)
#define A64_LDR64(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 64, LOAD)
/* Load/store register pair */
#define A64_LS_PAIR(Rt, Rt2, Rn, offset, ls, type) \
aarch64_insn_gen_load_store_pair(Rt, Rt2, Rn, offset, \
AARCH64_INSN_VARIANT_64BIT, \
AARCH64_INSN_LDST_##ls##_PAIR_##type)
/* Rn -= 16; Rn[0] = Rt; Rn[8] = Rt2; */
#define A64_PUSH(Rt, Rt2, Rn) A64_LS_PAIR(Rt, Rt2, Rn, -16, STORE, PRE_INDEX)
/* Rt = Rn[0]; Rt2 = Rn[8]; Rn += 16; */
#define A64_POP(Rt, Rt2, Rn) A64_LS_PAIR(Rt, Rt2, Rn, 16, LOAD, POST_INDEX)
/* Add/subtract (immediate) */
#define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \
aarch64_insn_gen_add_sub_imm(Rd, Rn, imm12, \
A64_VARIANT(sf), AARCH64_INSN_ADSB_##type)
/* Rd = Rn OP imm12 */
#define A64_ADD_I(sf, Rd, Rn, imm12) A64_ADDSUB_IMM(sf, Rd, Rn, imm12, ADD)
#define A64_SUB_I(sf, Rd, Rn, imm12) A64_ADDSUB_IMM(sf, Rd, Rn, imm12, SUB)
/* Rd = Rn */
#define A64_MOV(sf, Rd, Rn) A64_ADD_I(sf, Rd, Rn, 0)
/* Bitfield move */
#define A64_BITFIELD(sf, Rd, Rn, immr, imms, type) \
aarch64_insn_gen_bitfield(Rd, Rn, immr, imms, \
A64_VARIANT(sf), AARCH64_INSN_BITFIELD_MOVE_##type)
/* Signed, with sign replication to left and zeros to right */
#define A64_SBFM(sf, Rd, Rn, ir, is) A64_BITFIELD(sf, Rd, Rn, ir, is, SIGNED)
/* Unsigned, with zeros to left and right */
#define A64_UBFM(sf, Rd, Rn, ir, is) A64_BITFIELD(sf, Rd, Rn, ir, is, UNSIGNED)
/* Rd = Rn << shift */
#define A64_LSL(sf, Rd, Rn, shift) ({ \
int sz = (sf) ? 64 : 32; \
A64_UBFM(sf, Rd, Rn, (unsigned)-(shift) % sz, sz - 1 - (shift)); \
})
/* Rd = Rn >> shift */
#define A64_LSR(sf, Rd, Rn, shift) A64_UBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31)
/* Rd = Rn >> shift; signed */
#define A64_ASR(sf, Rd, Rn, shift) A64_SBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31)
/* Move wide (immediate) */
#define A64_MOVEW(sf, Rd, imm16, shift, type) \
aarch64_insn_gen_movewide(Rd, imm16, shift, \
A64_VARIANT(sf), AARCH64_INSN_MOVEWIDE_##type)
/* Rd = Zeros (for MOVZ);
* Rd |= imm16 << shift (where shift is {0, 16, 32, 48});
* Rd = ~Rd; (for MOVN); */
#define A64_MOVN(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, INVERSE)
#define A64_MOVZ(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, ZERO)
#define A64_MOVK(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, KEEP)
/* Add/subtract (shifted register) */
#define A64_ADDSUB_SREG(sf, Rd, Rn, Rm, type) \
aarch64_insn_gen_add_sub_shifted_reg(Rd, Rn, Rm, 0, \
A64_VARIANT(sf), AARCH64_INSN_ADSB_##type)
/* Rd = Rn OP Rm */
#define A64_ADD(sf, Rd, Rn, Rm) A64_ADDSUB_SREG(sf, Rd, Rn, Rm, ADD)
#define A64_SUB(sf, Rd, Rn, Rm) A64_ADDSUB_SREG(sf, Rd, Rn, Rm, SUB)
#define A64_SUBS(sf, Rd, Rn, Rm) A64_ADDSUB_SREG(sf, Rd, Rn, Rm, SUB_SETFLAGS)
/* Rd = -Rm */
#define A64_NEG(sf, Rd, Rm) A64_SUB(sf, Rd, A64_ZR, Rm)
/* Rn - Rm; set condition flags */
#define A64_CMP(sf, Rn, Rm) A64_SUBS(sf, A64_ZR, Rn, Rm)
/* Data-processing (1 source) */
#define A64_DATA1(sf, Rd, Rn, type) aarch64_insn_gen_data1(Rd, Rn, \
A64_VARIANT(sf), AARCH64_INSN_DATA1_##type)
/* Rd = BSWAPx(Rn) */
#define A64_REV16(sf, Rd, Rn) A64_DATA1(sf, Rd, Rn, REVERSE_16)
#define A64_REV32(sf, Rd, Rn) A64_DATA1(sf, Rd, Rn, REVERSE_32)
#define A64_REV64(Rd, Rn) A64_DATA1(1, Rd, Rn, REVERSE_64)
/* Data-processing (2 source) */
/* Rd = Rn OP Rm */
#define A64_UDIV(sf, Rd, Rn, Rm) aarch64_insn_gen_data2(Rd, Rn, Rm, \
A64_VARIANT(sf), AARCH64_INSN_DATA2_UDIV)
/* Data-processing (3 source) */
/* Rd = Ra + Rn * Rm */
#define A64_MADD(sf, Rd, Ra, Rn, Rm) aarch64_insn_gen_data3(Rd, Ra, Rn, Rm, \
A64_VARIANT(sf), AARCH64_INSN_DATA3_MADD)
/* Rd = Rn * Rm */
#define A64_MUL(sf, Rd, Rn, Rm) A64_MADD(sf, Rd, A64_ZR, Rn, Rm)
/* Logical (shifted register) */
#define A64_LOGIC_SREG(sf, Rd, Rn, Rm, type) \
aarch64_insn_gen_logical_shifted_reg(Rd, Rn, Rm, 0, \
A64_VARIANT(sf), AARCH64_INSN_LOGIC_##type)
/* Rd = Rn OP Rm */
#define A64_AND(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, AND)
#define A64_ORR(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, ORR)
#define A64_EOR(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, EOR)
#define A64_ANDS(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, AND_SETFLAGS)
/* Rn & Rm; set condition flags */
#define A64_TST(sf, Rn, Rm) A64_ANDS(sf, A64_ZR, Rn, Rm)
#endif /* _BPF_JIT_H */
This diff is collapsed.
......@@ -160,11 +160,10 @@ EXPORT_SYMBOL(of_device_alloc);
* can use Platform bus notifier and handle BUS_NOTIFY_ADD_DEVICE event
* to fix up DMA configuration.
*/
static void of_dma_configure(struct platform_device *pdev)
static void of_dma_configure(struct device *dev)
{
u64 dma_addr, paddr, size;
int ret;
struct device *dev = &pdev->dev;
/*
* Set default dma-mask to 32 bit. Drivers are expected to setup
......@@ -229,7 +228,7 @@ static struct platform_device *of_platform_device_create_pdata(
if (!dev)
goto err_clear_flag;
of_dma_configure(dev);
of_dma_configure(&dev->dev);
dev->dev.bus = &platform_bus_type;
dev->dev.platform_data = platform_data;
......@@ -291,7 +290,6 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
}
/* setup generic device info */
dev->dev.coherent_dma_mask = ~0;
dev->dev.of_node = of_node_get(node);
dev->dev.parent = parent;
dev->dev.platform_data = platform_data;
......@@ -299,6 +297,7 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
dev_set_name(&dev->dev, "%s", bus_id);
else
of_device_make_bus_id(&dev->dev);
of_dma_configure(&dev->dev);
/* Allow the HW Peripheral ID to be overridden */
prop = of_get_property(node, "arm,primecell-periphid", NULL);
......
......@@ -3388,7 +3388,7 @@ static inline int is_arm_mapping_symbol(const char *str)
{
if (str[0] == '.' && str[1] == 'L')
return true;
return str[0] == '$' && strchr("atd", str[1])
return str[0] == '$' && strchr("axtd", str[1])
&& (str[2] == '\0' || str[2] == '.');
}
......
......@@ -84,7 +84,7 @@ static void usage(void)
*/
static inline int is_arm_mapping_symbol(const char *str)
{
return str[0] == '$' && strchr("atd", str[1])
return str[0] == '$' && strchr("axtd", str[1])
&& (str[2] == '\0' || str[2] == '.');
}
......
......@@ -1147,7 +1147,7 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
static inline int is_arm_mapping_symbol(const char *str)
{
return str[0] == '$' && strchr("atd", str[1])
return str[0] == '$' && strchr("axtd", str[1])
&& (str[2] == '\0' || str[2] == '.');
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment