Commit 79a69d34 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/cmarinas/linux-aarch64

Pull arm64 patches from Catalin Marinas:

 - SMP support for the PSCI booting protocol (power state coordination
   interface).

 - Simple earlyprintk support.

 - Platform devices populated by default from the DT (SoC-agnostic).

 - CONTEXTIDR support (used by external trace tools).

* tag 'arm64-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/cmarinas/linux-aarch64:
  arm64: mm: update CONTEXTIDR register to contain PID of current process
  arm64: atomics: fix grossly inconsistent asm constraints for exclusives
  arm64: compat: use compat_uptr_t type for compat_ucontext.uc_link
  arm64: Select ARCH_WANT_FRAME_POINTERS
  arm64: Add kvm_para.h and xor.h generic headers
  arm64: SMP: enable PSCI boot method
  arm64: psci: add support for PSCI invocations from the kernel
  arm64: SMP: rework the SMP code to be enabling method agnostic
  arm64: perf: add guest vs host discrimination
  arm64: add COMPAT_PSR_*_BIT flags
  arm64: Add simple earlyprintk support
  arm64: Populate the platform devices
parents 6db167df ec45d1cf
...@@ -35,6 +35,8 @@ ffffffbc00000000 ffffffbdffffffff 8GB vmemmap ...@@ -35,6 +35,8 @@ ffffffbc00000000 ffffffbdffffffff 8GB vmemmap
ffffffbe00000000 ffffffbffbbfffff ~8GB [guard, future vmmemap] ffffffbe00000000 ffffffbffbbfffff ~8GB [guard, future vmmemap]
ffffffbffbc00000 ffffffbffbdfffff 2MB earlyprintk device
ffffffbffbe00000 ffffffbffbe0ffff 64KB PCI I/O space ffffffbffbe00000 ffffffbffbe0ffff 64KB PCI I/O space
ffffffbbffff0000 ffffffbcffffffff ~2MB [guard] ffffffbbffff0000 ffffffbcffffffff ~2MB [guard]
......
...@@ -2,6 +2,7 @@ config ARM64 ...@@ -2,6 +2,7 @@ config ARM64
def_bool y def_bool y
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
select ARCH_WANT_FRAME_POINTERS
select ARM_AMBA select ARM_AMBA
select CLONE_BACKWARDS select CLONE_BACKWARDS
select COMMON_CLK select COMMON_CLK
......
...@@ -24,4 +24,21 @@ config DEBUG_STACK_USAGE ...@@ -24,4 +24,21 @@ config DEBUG_STACK_USAGE
Enables the display of the minimum amount of free stack which each Enables the display of the minimum amount of free stack which each
task has ever had available in the sysrq-T output. task has ever had available in the sysrq-T output.
config EARLY_PRINTK
bool "Early printk support"
default y
help
Say Y here if you want to have an early console using the
earlyprintk=<name>[,<addr>][,<options>] kernel parameter. It
is assumed that the early console device has been initialised
by the boot loader prior to starting the Linux kernel.
config PID_IN_CONTEXTIDR
bool "Write the current PID to the CONTEXTIDR register"
help
Enabling this option causes the kernel to write the current PID to
the CONTEXTIDR register, at the expense of some additional
instructions during context switch. Say Y here only if you are
planning to use hardware trace tools with this kernel.
endmenu endmenu
...@@ -19,6 +19,7 @@ generic-y += ipcbuf.h ...@@ -19,6 +19,7 @@ generic-y += ipcbuf.h
generic-y += irq_regs.h generic-y += irq_regs.h
generic-y += kdebug.h generic-y += kdebug.h
generic-y += kmap_types.h generic-y += kmap_types.h
generic-y += kvm_para.h
generic-y += local.h generic-y += local.h
generic-y += local64.h generic-y += local64.h
generic-y += mman.h generic-y += mman.h
...@@ -48,3 +49,4 @@ generic-y += trace_clock.h ...@@ -48,3 +49,4 @@ generic-y += trace_clock.h
generic-y += types.h generic-y += types.h
generic-y += unaligned.h generic-y += unaligned.h
generic-y += user.h generic-y += user.h
generic-y += xor.h
...@@ -49,12 +49,12 @@ static inline void atomic_add(int i, atomic_t *v) ...@@ -49,12 +49,12 @@ static inline void atomic_add(int i, atomic_t *v)
int result; int result;
asm volatile("// atomic_add\n" asm volatile("// atomic_add\n"
"1: ldxr %w0, [%3]\n" "1: ldxr %w0, %2\n"
" add %w0, %w0, %w4\n" " add %w0, %w0, %w3\n"
" stxr %w1, %w0, [%3]\n" " stxr %w1, %w0, %2\n"
" cbnz %w1, 1b" " cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+o" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "r" (&v->counter), "Ir" (i) : "Ir" (i)
: "cc"); : "cc");
} }
...@@ -64,13 +64,13 @@ static inline int atomic_add_return(int i, atomic_t *v) ...@@ -64,13 +64,13 @@ static inline int atomic_add_return(int i, atomic_t *v)
int result; int result;
asm volatile("// atomic_add_return\n" asm volatile("// atomic_add_return\n"
"1: ldaxr %w0, [%3]\n" "1: ldaxr %w0, %2\n"
" add %w0, %w0, %w4\n" " add %w0, %w0, %w3\n"
" stlxr %w1, %w0, [%3]\n" " stlxr %w1, %w0, %2\n"
" cbnz %w1, 1b" " cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+o" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "r" (&v->counter), "Ir" (i) : "Ir" (i)
: "cc"); : "cc", "memory");
return result; return result;
} }
...@@ -81,12 +81,12 @@ static inline void atomic_sub(int i, atomic_t *v) ...@@ -81,12 +81,12 @@ static inline void atomic_sub(int i, atomic_t *v)
int result; int result;
asm volatile("// atomic_sub\n" asm volatile("// atomic_sub\n"
"1: ldxr %w0, [%3]\n" "1: ldxr %w0, %2\n"
" sub %w0, %w0, %w4\n" " sub %w0, %w0, %w3\n"
" stxr %w1, %w0, [%3]\n" " stxr %w1, %w0, %2\n"
" cbnz %w1, 1b" " cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+o" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "r" (&v->counter), "Ir" (i) : "Ir" (i)
: "cc"); : "cc");
} }
...@@ -96,13 +96,13 @@ static inline int atomic_sub_return(int i, atomic_t *v) ...@@ -96,13 +96,13 @@ static inline int atomic_sub_return(int i, atomic_t *v)
int result; int result;
asm volatile("// atomic_sub_return\n" asm volatile("// atomic_sub_return\n"
"1: ldaxr %w0, [%3]\n" "1: ldaxr %w0, %2\n"
" sub %w0, %w0, %w4\n" " sub %w0, %w0, %w3\n"
" stlxr %w1, %w0, [%3]\n" " stlxr %w1, %w0, %2\n"
" cbnz %w1, 1b" " cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+o" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "r" (&v->counter), "Ir" (i) : "Ir" (i)
: "cc"); : "cc", "memory");
return result; return result;
} }
...@@ -113,15 +113,15 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) ...@@ -113,15 +113,15 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
int oldval; int oldval;
asm volatile("// atomic_cmpxchg\n" asm volatile("// atomic_cmpxchg\n"
"1: ldaxr %w1, [%3]\n" "1: ldaxr %w1, %2\n"
" cmp %w1, %w4\n" " cmp %w1, %w3\n"
" b.ne 2f\n" " b.ne 2f\n"
" stlxr %w0, %w5, [%3]\n" " stlxr %w0, %w4, %2\n"
" cbnz %w0, 1b\n" " cbnz %w0, 1b\n"
"2:" "2:"
: "=&r" (tmp), "=&r" (oldval), "+o" (ptr->counter) : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
: "r" (&ptr->counter), "Ir" (old), "r" (new) : "Ir" (old), "r" (new)
: "cc"); : "cc", "memory");
return oldval; return oldval;
} }
...@@ -131,12 +131,12 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) ...@@ -131,12 +131,12 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
unsigned long tmp, tmp2; unsigned long tmp, tmp2;
asm volatile("// atomic_clear_mask\n" asm volatile("// atomic_clear_mask\n"
"1: ldxr %0, [%3]\n" "1: ldxr %0, %2\n"
" bic %0, %0, %4\n" " bic %0, %0, %3\n"
" stxr %w1, %0, [%3]\n" " stxr %w1, %0, %2\n"
" cbnz %w1, 1b" " cbnz %w1, 1b"
: "=&r" (tmp), "=&r" (tmp2), "+o" (*addr) : "=&r" (tmp), "=&r" (tmp2), "+Q" (*addr)
: "r" (addr), "Ir" (mask) : "Ir" (mask)
: "cc"); : "cc");
} }
...@@ -182,12 +182,12 @@ static inline void atomic64_add(u64 i, atomic64_t *v) ...@@ -182,12 +182,12 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
unsigned long tmp; unsigned long tmp;
asm volatile("// atomic64_add\n" asm volatile("// atomic64_add\n"
"1: ldxr %0, [%3]\n" "1: ldxr %0, %2\n"
" add %0, %0, %4\n" " add %0, %0, %3\n"
" stxr %w1, %0, [%3]\n" " stxr %w1, %0, %2\n"
" cbnz %w1, 1b" " cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+o" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "r" (&v->counter), "Ir" (i) : "Ir" (i)
: "cc"); : "cc");
} }
...@@ -197,13 +197,13 @@ static inline long atomic64_add_return(long i, atomic64_t *v) ...@@ -197,13 +197,13 @@ static inline long atomic64_add_return(long i, atomic64_t *v)
unsigned long tmp; unsigned long tmp;
asm volatile("// atomic64_add_return\n" asm volatile("// atomic64_add_return\n"
"1: ldaxr %0, [%3]\n" "1: ldaxr %0, %2\n"
" add %0, %0, %4\n" " add %0, %0, %3\n"
" stlxr %w1, %0, [%3]\n" " stlxr %w1, %0, %2\n"
" cbnz %w1, 1b" " cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+o" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "r" (&v->counter), "Ir" (i) : "Ir" (i)
: "cc"); : "cc", "memory");
return result; return result;
} }
...@@ -214,12 +214,12 @@ static inline void atomic64_sub(u64 i, atomic64_t *v) ...@@ -214,12 +214,12 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
unsigned long tmp; unsigned long tmp;
asm volatile("// atomic64_sub\n" asm volatile("// atomic64_sub\n"
"1: ldxr %0, [%3]\n" "1: ldxr %0, %2\n"
" sub %0, %0, %4\n" " sub %0, %0, %3\n"
" stxr %w1, %0, [%3]\n" " stxr %w1, %0, %2\n"
" cbnz %w1, 1b" " cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+o" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "r" (&v->counter), "Ir" (i) : "Ir" (i)
: "cc"); : "cc");
} }
...@@ -229,13 +229,13 @@ static inline long atomic64_sub_return(long i, atomic64_t *v) ...@@ -229,13 +229,13 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
unsigned long tmp; unsigned long tmp;
asm volatile("// atomic64_sub_return\n" asm volatile("// atomic64_sub_return\n"
"1: ldaxr %0, [%3]\n" "1: ldaxr %0, %2\n"
" sub %0, %0, %4\n" " sub %0, %0, %3\n"
" stlxr %w1, %0, [%3]\n" " stlxr %w1, %0, %2\n"
" cbnz %w1, 1b" " cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+o" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "r" (&v->counter), "Ir" (i) : "Ir" (i)
: "cc"); : "cc", "memory");
return result; return result;
} }
...@@ -246,15 +246,15 @@ static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new) ...@@ -246,15 +246,15 @@ static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
unsigned long res; unsigned long res;
asm volatile("// atomic64_cmpxchg\n" asm volatile("// atomic64_cmpxchg\n"
"1: ldaxr %1, [%3]\n" "1: ldaxr %1, %2\n"
" cmp %1, %4\n" " cmp %1, %3\n"
" b.ne 2f\n" " b.ne 2f\n"
" stlxr %w0, %5, [%3]\n" " stlxr %w0, %4, %2\n"
" cbnz %w0, 1b\n" " cbnz %w0, 1b\n"
"2:" "2:"
: "=&r" (res), "=&r" (oldval), "+o" (ptr->counter) : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
: "r" (&ptr->counter), "Ir" (old), "r" (new) : "Ir" (old), "r" (new)
: "cc"); : "cc", "memory");
return oldval; return oldval;
} }
...@@ -267,15 +267,15 @@ static inline long atomic64_dec_if_positive(atomic64_t *v) ...@@ -267,15 +267,15 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
unsigned long tmp; unsigned long tmp;
asm volatile("// atomic64_dec_if_positive\n" asm volatile("// atomic64_dec_if_positive\n"
"1: ldaxr %0, [%3]\n" "1: ldaxr %0, %2\n"
" subs %0, %0, #1\n" " subs %0, %0, #1\n"
" b.mi 2f\n" " b.mi 2f\n"
" stlxr %w1, %0, [%3]\n" " stlxr %w1, %0, %2\n"
" cbnz %w1, 1b\n" " cbnz %w1, 1b\n"
"2:" "2:"
: "=&r" (result), "=&r" (tmp), "+o" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "r" (&v->counter) :
: "cc"); : "cc", "memory");
return result; return result;
} }
......
...@@ -29,39 +29,39 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size ...@@ -29,39 +29,39 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
switch (size) { switch (size) {
case 1: case 1:
asm volatile("// __xchg1\n" asm volatile("// __xchg1\n"
"1: ldaxrb %w0, [%3]\n" "1: ldaxrb %w0, %2\n"
" stlxrb %w1, %w2, [%3]\n" " stlxrb %w1, %w3, %2\n"
" cbnz %w1, 1b\n" " cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp) : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
: "r" (x), "r" (ptr) : "r" (x)
: "memory", "cc"); : "cc", "memory");
break; break;
case 2: case 2:
asm volatile("// __xchg2\n" asm volatile("// __xchg2\n"
"1: ldaxrh %w0, [%3]\n" "1: ldaxrh %w0, %2\n"
" stlxrh %w1, %w2, [%3]\n" " stlxrh %w1, %w3, %2\n"
" cbnz %w1, 1b\n" " cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp) : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
: "r" (x), "r" (ptr) : "r" (x)
: "memory", "cc"); : "cc", "memory");
break; break;
case 4: case 4:
asm volatile("// __xchg4\n" asm volatile("// __xchg4\n"
"1: ldaxr %w0, [%3]\n" "1: ldaxr %w0, %2\n"
" stlxr %w1, %w2, [%3]\n" " stlxr %w1, %w3, %2\n"
" cbnz %w1, 1b\n" " cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp) : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
: "r" (x), "r" (ptr) : "r" (x)
: "memory", "cc"); : "cc", "memory");
break; break;
case 8: case 8:
asm volatile("// __xchg8\n" asm volatile("// __xchg8\n"
"1: ldaxr %0, [%3]\n" "1: ldaxr %0, %2\n"
" stlxr %w1, %2, [%3]\n" " stlxr %w1, %3, %2\n"
" cbnz %w1, 1b\n" " cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp) : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
: "r" (x), "r" (ptr) : "r" (x)
: "memory", "cc"); : "cc", "memory");
break; break;
default: default:
BUILD_BUG(); BUILD_BUG();
...@@ -82,14 +82,14 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, ...@@ -82,14 +82,14 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
case 1: case 1:
do { do {
asm volatile("// __cmpxchg1\n" asm volatile("// __cmpxchg1\n"
" ldxrb %w1, [%2]\n" " ldxrb %w1, %2\n"
" mov %w0, #0\n" " mov %w0, #0\n"
" cmp %w1, %w3\n" " cmp %w1, %w3\n"
" b.ne 1f\n" " b.ne 1f\n"
" stxrb %w0, %w4, [%2]\n" " stxrb %w0, %w4, %2\n"
"1:\n" "1:\n"
: "=&r" (res), "=&r" (oldval) : "=&r" (res), "=&r" (oldval), "+Q" (*(u8 *)ptr)
: "r" (ptr), "Ir" (old), "r" (new) : "Ir" (old), "r" (new)
: "cc"); : "cc");
} while (res); } while (res);
break; break;
...@@ -97,29 +97,29 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, ...@@ -97,29 +97,29 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
case 2: case 2:
do { do {
asm volatile("// __cmpxchg2\n" asm volatile("// __cmpxchg2\n"
" ldxrh %w1, [%2]\n" " ldxrh %w1, %2\n"
" mov %w0, #0\n" " mov %w0, #0\n"
" cmp %w1, %w3\n" " cmp %w1, %w3\n"
" b.ne 1f\n" " b.ne 1f\n"
" stxrh %w0, %w4, [%2]\n" " stxrh %w0, %w4, %2\n"
"1:\n" "1:\n"
: "=&r" (res), "=&r" (oldval) : "=&r" (res), "=&r" (oldval), "+Q" (*(u16 *)ptr)
: "r" (ptr), "Ir" (old), "r" (new) : "Ir" (old), "r" (new)
: "memory", "cc"); : "cc");
} while (res); } while (res);
break; break;
case 4: case 4:
do { do {
asm volatile("// __cmpxchg4\n" asm volatile("// __cmpxchg4\n"
" ldxr %w1, [%2]\n" " ldxr %w1, %2\n"
" mov %w0, #0\n" " mov %w0, #0\n"
" cmp %w1, %w3\n" " cmp %w1, %w3\n"
" b.ne 1f\n" " b.ne 1f\n"
" stxr %w0, %w4, [%2]\n" " stxr %w0, %w4, %2\n"
"1:\n" "1:\n"
: "=&r" (res), "=&r" (oldval) : "=&r" (res), "=&r" (oldval), "+Q" (*(u32 *)ptr)
: "r" (ptr), "Ir" (old), "r" (new) : "Ir" (old), "r" (new)
: "cc"); : "cc");
} while (res); } while (res);
break; break;
...@@ -127,14 +127,14 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, ...@@ -127,14 +127,14 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
case 8: case 8:
do { do {
asm volatile("// __cmpxchg8\n" asm volatile("// __cmpxchg8\n"
" ldxr %1, [%2]\n" " ldxr %1, %2\n"
" mov %w0, #0\n" " mov %w0, #0\n"
" cmp %1, %3\n" " cmp %1, %3\n"
" b.ne 1f\n" " b.ne 1f\n"
" stxr %w0, %4, [%2]\n" " stxr %w0, %4, %2\n"
"1:\n" "1:\n"
: "=&r" (res), "=&r" (oldval) : "=&r" (res), "=&r" (oldval), "+Q" (*(u64 *)ptr)
: "r" (ptr), "Ir" (old), "r" (new) : "Ir" (old), "r" (new)
: "cc"); : "cc");
} while (res); } while (res);
break; break;
......
...@@ -39,7 +39,7 @@ ...@@ -39,7 +39,7 @@
" .popsection\n" \ " .popsection\n" \
: "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \ : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \
: "r" (oparg), "Ir" (-EFAULT) \ : "r" (oparg), "Ir" (-EFAULT) \
: "cc") : "cc", "memory")
static inline int static inline int
futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
......
...@@ -230,6 +230,9 @@ extern void __iounmap(volatile void __iomem *addr); ...@@ -230,6 +230,9 @@ extern void __iounmap(volatile void __iomem *addr);
#define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC)) #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
#define iounmap __iounmap #define iounmap __iounmap
#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF)
#define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PTE_PXN | PTE_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
#define ARCH_HAS_IOREMAP_WC #define ARCH_HAS_IOREMAP_WC
#include <asm-generic/iomap.h> #include <asm-generic/iomap.h>
......
...@@ -43,6 +43,7 @@ ...@@ -43,6 +43,7 @@
#define PAGE_OFFSET UL(0xffffffc000000000) #define PAGE_OFFSET UL(0xffffffc000000000)
#define MODULES_END (PAGE_OFFSET) #define MODULES_END (PAGE_OFFSET)
#define MODULES_VADDR (MODULES_END - SZ_64M) #define MODULES_VADDR (MODULES_END - SZ_64M)
#define EARLYCON_IOBASE (MODULES_VADDR - SZ_4M)
#define VA_BITS (39) #define VA_BITS (39)
#define TASK_SIZE_64 (UL(1) << VA_BITS) #define TASK_SIZE_64 (UL(1) << VA_BITS)
......
...@@ -26,5 +26,6 @@ typedef struct { ...@@ -26,5 +26,6 @@ typedef struct {
extern void paging_init(void); extern void paging_init(void);
extern void setup_mm_for_reboot(void); extern void setup_mm_for_reboot(void);
extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
#endif #endif
...@@ -35,6 +35,21 @@ extern unsigned int cpu_last_asid; ...@@ -35,6 +35,21 @@ extern unsigned int cpu_last_asid;
void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); void __init_new_context(struct task_struct *tsk, struct mm_struct *mm);
void __new_context(struct mm_struct *mm); void __new_context(struct mm_struct *mm);
#ifdef CONFIG_PID_IN_CONTEXTIDR
static inline void contextidr_thread_switch(struct task_struct *next)
{
asm(
" msr contextidr_el1, %0\n"
" isb"
:
: "r" (task_pid_nr(next)));
}
#else
static inline void contextidr_thread_switch(struct task_struct *next)
{
}
#endif
/* /*
* Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0. * Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0.
*/ */
......
...@@ -17,6 +17,11 @@ ...@@ -17,6 +17,11 @@
#ifndef __ASM_PERF_EVENT_H #ifndef __ASM_PERF_EVENT_H
#define __ASM_PERF_EVENT_H #define __ASM_PERF_EVENT_H
/* It's quiet around here... */ #ifdef CONFIG_HW_PERF_EVENTS
struct pt_regs;
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
extern unsigned long perf_misc_flags(struct pt_regs *regs);
#define perf_misc_flags(regs) perf_misc_flags(regs)
#endif
#endif #endif
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* Copyright (C) 2013 ARM Limited
*/
#ifndef __ASM_PSCI_H
#define __ASM_PSCI_H
#define PSCI_POWER_STATE_TYPE_STANDBY 0
#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
struct psci_power_state {
u16 id;
u8 type;
u8 affinity_level;
};
struct psci_operations {
int (*cpu_suspend)(struct psci_power_state state,
unsigned long entry_point);
int (*cpu_off)(struct psci_power_state state);
int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
int (*migrate)(unsigned long cpuid);
};
extern struct psci_operations psci_ops;
int psci_init(void);
#endif /* __ASM_PSCI_H */
...@@ -42,6 +42,16 @@ ...@@ -42,6 +42,16 @@
#define COMPAT_PSR_MODE_UND 0x0000001b #define COMPAT_PSR_MODE_UND 0x0000001b
#define COMPAT_PSR_MODE_SYS 0x0000001f #define COMPAT_PSR_MODE_SYS 0x0000001f
#define COMPAT_PSR_T_BIT 0x00000020 #define COMPAT_PSR_T_BIT 0x00000020
#define COMPAT_PSR_F_BIT 0x00000040
#define COMPAT_PSR_I_BIT 0x00000080
#define COMPAT_PSR_A_BIT 0x00000100
#define COMPAT_PSR_E_BIT 0x00000200
#define COMPAT_PSR_J_BIT 0x01000000
#define COMPAT_PSR_Q_BIT 0x08000000
#define COMPAT_PSR_V_BIT 0x10000000
#define COMPAT_PSR_C_BIT 0x20000000
#define COMPAT_PSR_Z_BIT 0x40000000
#define COMPAT_PSR_N_BIT 0x80000000
#define COMPAT_PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */ #define COMPAT_PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
/* /*
* These are 'magic' values for PTRACE_PEEKUSR that return info about where a * These are 'magic' values for PTRACE_PEEKUSR that return info about where a
......
...@@ -66,4 +66,15 @@ extern volatile unsigned long secondary_holding_pen_release; ...@@ -66,4 +66,15 @@ extern volatile unsigned long secondary_holding_pen_release;
extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
struct device_node;
struct smp_enable_ops {
const char *name;
int (*init_cpu)(struct device_node *, int);
int (*prepare_cpu)(int);
};
extern const struct smp_enable_ops smp_spin_table_ops;
extern const struct smp_enable_ops smp_psci_ops;
#endif /* ifndef __ASM_SMP_H */ #endif /* ifndef __ASM_SMP_H */
...@@ -45,13 +45,13 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) ...@@ -45,13 +45,13 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
asm volatile( asm volatile(
" sevl\n" " sevl\n"
"1: wfe\n" "1: wfe\n"
"2: ldaxr %w0, [%1]\n" "2: ldaxr %w0, %1\n"
" cbnz %w0, 1b\n" " cbnz %w0, 1b\n"
" stxr %w0, %w2, [%1]\n" " stxr %w0, %w2, %1\n"
" cbnz %w0, 2b\n" " cbnz %w0, 2b\n"
: "=&r" (tmp) : "=&r" (tmp), "+Q" (lock->lock)
: "r" (&lock->lock), "r" (1) : "r" (1)
: "memory"); : "cc", "memory");
} }
static inline int arch_spin_trylock(arch_spinlock_t *lock) static inline int arch_spin_trylock(arch_spinlock_t *lock)
...@@ -59,13 +59,13 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) ...@@ -59,13 +59,13 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
unsigned int tmp; unsigned int tmp;
asm volatile( asm volatile(
" ldaxr %w0, [%1]\n" " ldaxr %w0, %1\n"
" cbnz %w0, 1f\n" " cbnz %w0, 1f\n"
" stxr %w0, %w2, [%1]\n" " stxr %w0, %w2, %1\n"
"1:\n" "1:\n"
: "=&r" (tmp) : "=&r" (tmp), "+Q" (lock->lock)
: "r" (&lock->lock), "r" (1) : "r" (1)
: "memory"); : "cc", "memory");
return !tmp; return !tmp;
} }
...@@ -73,8 +73,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) ...@@ -73,8 +73,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock) static inline void arch_spin_unlock(arch_spinlock_t *lock)
{ {
asm volatile( asm volatile(
" stlr %w1, [%0]\n" " stlr %w1, %0\n"
: : "r" (&lock->lock), "r" (0) : "memory"); : "=Q" (lock->lock) : "r" (0) : "memory");
} }
/* /*
...@@ -94,13 +94,13 @@ static inline void arch_write_lock(arch_rwlock_t *rw) ...@@ -94,13 +94,13 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
asm volatile( asm volatile(
" sevl\n" " sevl\n"
"1: wfe\n" "1: wfe\n"
"2: ldaxr %w0, [%1]\n" "2: ldaxr %w0, %1\n"
" cbnz %w0, 1b\n" " cbnz %w0, 1b\n"
" stxr %w0, %w2, [%1]\n" " stxr %w0, %w2, %1\n"
" cbnz %w0, 2b\n" " cbnz %w0, 2b\n"
: "=&r" (tmp) : "=&r" (tmp), "+Q" (rw->lock)
: "r" (&rw->lock), "r" (0x80000000) : "r" (0x80000000)
: "memory"); : "cc", "memory");
} }
static inline int arch_write_trylock(arch_rwlock_t *rw) static inline int arch_write_trylock(arch_rwlock_t *rw)
...@@ -108,13 +108,13 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) ...@@ -108,13 +108,13 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
unsigned int tmp; unsigned int tmp;
asm volatile( asm volatile(
" ldaxr %w0, [%1]\n" " ldaxr %w0, %1\n"
" cbnz %w0, 1f\n" " cbnz %w0, 1f\n"
" stxr %w0, %w2, [%1]\n" " stxr %w0, %w2, %1\n"
"1:\n" "1:\n"
: "=&r" (tmp) : "=&r" (tmp), "+Q" (rw->lock)
: "r" (&rw->lock), "r" (0x80000000) : "r" (0x80000000)
: "memory"); : "cc", "memory");
return !tmp; return !tmp;
} }
...@@ -122,8 +122,8 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) ...@@ -122,8 +122,8 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
static inline void arch_write_unlock(arch_rwlock_t *rw) static inline void arch_write_unlock(arch_rwlock_t *rw)
{ {
asm volatile( asm volatile(
" stlr %w1, [%0]\n" " stlr %w1, %0\n"
: : "r" (&rw->lock), "r" (0) : "memory"); : "=Q" (rw->lock) : "r" (0) : "memory");
} }
/* write_can_lock - would write_trylock() succeed? */ /* write_can_lock - would write_trylock() succeed? */
...@@ -148,14 +148,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw) ...@@ -148,14 +148,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
asm volatile( asm volatile(
" sevl\n" " sevl\n"
"1: wfe\n" "1: wfe\n"
"2: ldaxr %w0, [%2]\n" "2: ldaxr %w0, %2\n"
" add %w0, %w0, #1\n" " add %w0, %w0, #1\n"
" tbnz %w0, #31, 1b\n" " tbnz %w0, #31, 1b\n"
" stxr %w1, %w0, [%2]\n" " stxr %w1, %w0, %2\n"
" cbnz %w1, 2b\n" " cbnz %w1, 2b\n"
: "=&r" (tmp), "=&r" (tmp2) : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
: "r" (&rw->lock) :
: "memory"); : "cc", "memory");
} }
static inline void arch_read_unlock(arch_rwlock_t *rw) static inline void arch_read_unlock(arch_rwlock_t *rw)
...@@ -163,13 +163,13 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) ...@@ -163,13 +163,13 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
unsigned int tmp, tmp2; unsigned int tmp, tmp2;
asm volatile( asm volatile(
"1: ldxr %w0, [%2]\n" "1: ldxr %w0, %2\n"
" sub %w0, %w0, #1\n" " sub %w0, %w0, #1\n"
" stlxr %w1, %w0, [%2]\n" " stlxr %w1, %w0, %2\n"
" cbnz %w1, 1b\n" " cbnz %w1, 1b\n"
: "=&r" (tmp), "=&r" (tmp2) : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
: "r" (&rw->lock) :
: "memory"); : "cc", "memory");
} }
static inline int arch_read_trylock(arch_rwlock_t *rw) static inline int arch_read_trylock(arch_rwlock_t *rw)
...@@ -177,14 +177,14 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) ...@@ -177,14 +177,14 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
unsigned int tmp, tmp2 = 1; unsigned int tmp, tmp2 = 1;
asm volatile( asm volatile(
" ldaxr %w0, [%2]\n" " ldaxr %w0, %2\n"
" add %w0, %w0, #1\n" " add %w0, %w0, #1\n"
" tbnz %w0, #31, 1f\n" " tbnz %w0, #31, 1f\n"
" stxr %w1, %w0, [%2]\n" " stxr %w1, %w0, %2\n"
"1:\n" "1:\n"
: "=&r" (tmp), "+r" (tmp2) : "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock)
: "r" (&rw->lock) :
: "memory"); : "cc", "memory");
return !tmp2; return !tmp2;
} }
......
# UAPI Header export list # UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm include include/uapi/asm-generic/Kbuild.asm
generic-y += kvm_para.h
header-y += auxvec.h header-y += auxvec.h
header-y += bitsperlong.h header-y += bitsperlong.h
header-y += byteorder.h header-y += byteorder.h
header-y += fcntl.h header-y += fcntl.h
header-y += hwcap.h header-y += hwcap.h
header-y += kvm_para.h
header-y += param.h header-y += param.h
header-y += ptrace.h header-y += ptrace.h
header-y += setup.h header-y += setup.h
......
...@@ -9,14 +9,15 @@ AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET) ...@@ -9,14 +9,15 @@ AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \ arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \
entry-fpsimd.o process.o ptrace.o setup.o signal.o \ entry-fpsimd.o process.o ptrace.o setup.o signal.o \
sys.o stacktrace.o time.o traps.o io.o vdso.o \ sys.o stacktrace.o time.o traps.o io.o vdso.o \
hyp-stub.o hyp-stub.o psci.o
arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
sys_compat.o sys_compat.o
arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o
arm64-obj-$(CONFIG_SMP) += smp.o arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o smp_psci.o
arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT)+= hw_breakpoint.o arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT)+= hw_breakpoint.o
arm64-obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-y += $(arm64-obj-y) vdso/ obj-y += $(arm64-obj-y) vdso/
obj-m += $(arm64-obj-m) obj-m += $(arm64-obj-m)
......
/*
* Earlyprintk support.
*
* Copyright (C) 2012 ARM Ltd.
* Author: Catalin Marinas <catalin.marinas@arm.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
#include <linux/console.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/io.h>
#include <linux/amba/serial.h>
static void __iomem *early_base;
static void (*printch)(char ch);
/*
* PL011 single character TX.
*/
static void pl011_printch(char ch)
{
while (readl_relaxed(early_base + UART01x_FR) & UART01x_FR_TXFF)
;
writeb_relaxed(ch, early_base + UART01x_DR);
while (readl_relaxed(early_base + UART01x_FR) & UART01x_FR_BUSY)
;
}
struct earlycon_match {
const char *name;
void (*printch)(char ch);
};
static const struct earlycon_match earlycon_match[] __initconst = {
{ .name = "pl011", .printch = pl011_printch, },
{}
};
static void early_write(struct console *con, const char *s, unsigned n)
{
while (n-- > 0) {
if (*s == '\n')
printch('\r');
printch(*s);
s++;
}
}
static struct console early_console = {
.name = "earlycon",
.write = early_write,
.flags = CON_PRINTBUFFER | CON_BOOT,
.index = -1,
};
/*
* Parse earlyprintk=... parameter in the format:
*
* <name>[,<addr>][,<options>]
*
* and register the early console. It is assumed that the UART has been
* initialised by the bootloader already.
*/
static int __init setup_early_printk(char *buf)
{
const struct earlycon_match *match = earlycon_match;
phys_addr_t paddr = 0;
if (!buf) {
pr_warning("No earlyprintk arguments passed.\n");
return 0;
}
while (match->name) {
size_t len = strlen(match->name);
if (!strncmp(buf, match->name, len)) {
buf += len;
break;
}
match++;
}
if (!match->name) {
pr_warning("Unknown earlyprintk arguments: %s\n", buf);
return 0;
}
/* I/O address */
if (!strncmp(buf, ",0x", 3)) {
char *e;
paddr = simple_strtoul(buf + 1, &e, 16);
buf = e;
}
/* no options parsing yet */
if (paddr)
early_base = early_io_map(paddr, EARLYCON_IOBASE);
printch = match->printch;
register_console(&early_console);
return 0;
}
early_param("earlyprintk", setup_early_printk);
...@@ -82,10 +82,8 @@ ...@@ -82,10 +82,8 @@
#ifdef CONFIG_ARM64_64K_PAGES #ifdef CONFIG_ARM64_64K_PAGES
#define MM_MMUFLAGS PTE_ATTRINDX(MT_NORMAL) | PTE_FLAGS #define MM_MMUFLAGS PTE_ATTRINDX(MT_NORMAL) | PTE_FLAGS
#define IO_MMUFLAGS PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_XN | PTE_FLAGS
#else #else
#define MM_MMUFLAGS PMD_ATTRINDX(MT_NORMAL) | PMD_FLAGS #define MM_MMUFLAGS PMD_ATTRINDX(MT_NORMAL) | PMD_FLAGS
#define IO_MMUFLAGS PMD_ATTRINDX(MT_DEVICE_nGnRE) | PMD_SECT_XN | PMD_FLAGS
#endif #endif
/* /*
...@@ -368,6 +366,7 @@ ENDPROC(__calc_phys_offset) ...@@ -368,6 +366,7 @@ ENDPROC(__calc_phys_offset)
* - identity mapping to enable the MMU (low address, TTBR0) * - identity mapping to enable the MMU (low address, TTBR0)
* - first few MB of the kernel linear mapping to jump to once the MMU has * - first few MB of the kernel linear mapping to jump to once the MMU has
* been enabled, including the FDT blob (TTBR1) * been enabled, including the FDT blob (TTBR1)
* - UART mapping if CONFIG_EARLY_PRINTK is enabled (TTBR1)
*/ */
__create_page_tables: __create_page_tables:
pgtbl x25, x26, x24 // idmap_pg_dir and swapper_pg_dir addresses pgtbl x25, x26, x24 // idmap_pg_dir and swapper_pg_dir addresses
...@@ -420,6 +419,15 @@ __create_page_tables: ...@@ -420,6 +419,15 @@ __create_page_tables:
sub x6, x6, #1 // inclusive range sub x6, x6, #1 // inclusive range
create_block_map x0, x7, x3, x5, x6 create_block_map x0, x7, x3, x5, x6
1: 1:
#ifdef CONFIG_EARLY_PRINTK
/*
* Create the pgd entry for the UART mapping. The full mapping is done
* later based earlyprintk kernel parameter.
*/
ldr x5, =EARLYCON_IOBASE // UART virtual address
add x0, x26, #2 * PAGE_SIZE // section table address
create_pgd_entry x26, x0, x5, x6, x7
#endif
ret ret
ENDPROC(__create_page_tables) ENDPROC(__create_page_tables)
.ltorg .ltorg
......
...@@ -1331,6 +1331,11 @@ void perf_callchain_user(struct perf_callchain_entry *entry, ...@@ -1331,6 +1331,11 @@ void perf_callchain_user(struct perf_callchain_entry *entry,
{ {
struct frame_tail __user *tail; struct frame_tail __user *tail;
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
/* We don't support guest os callchain now */
return;
}
tail = (struct frame_tail __user *)regs->regs[29]; tail = (struct frame_tail __user *)regs->regs[29];
while (entry->nr < PERF_MAX_STACK_DEPTH && while (entry->nr < PERF_MAX_STACK_DEPTH &&
...@@ -1355,8 +1360,40 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry, ...@@ -1355,8 +1360,40 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry,
{ {
struct stackframe frame; struct stackframe frame;
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
/* We don't support guest os callchain now */
return;
}
frame.fp = regs->regs[29]; frame.fp = regs->regs[29];
frame.sp = regs->sp; frame.sp = regs->sp;
frame.pc = regs->pc; frame.pc = regs->pc;
walk_stackframe(&frame, callchain_trace, entry); walk_stackframe(&frame, callchain_trace, entry);
} }
unsigned long perf_instruction_pointer(struct pt_regs *regs)
{
if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
return perf_guest_cbs->get_guest_ip();
return instruction_pointer(regs);
}
unsigned long perf_misc_flags(struct pt_regs *regs)
{
int misc = 0;
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
if (perf_guest_cbs->is_user_mode())
misc |= PERF_RECORD_MISC_GUEST_USER;
else
misc |= PERF_RECORD_MISC_GUEST_KERNEL;
} else {
if (user_mode(regs))
misc |= PERF_RECORD_MISC_USER;
else
misc |= PERF_RECORD_MISC_KERNEL;
}
return misc;
}
...@@ -45,9 +45,10 @@ ...@@ -45,9 +45,10 @@
#include <asm/compat.h> #include <asm/compat.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/fpsimd.h>
#include <asm/mmu_context.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/stacktrace.h> #include <asm/stacktrace.h>
#include <asm/fpsimd.h>
static void setup_restart(void) static void setup_restart(void)
{ {
...@@ -314,6 +315,7 @@ struct task_struct *__switch_to(struct task_struct *prev, ...@@ -314,6 +315,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
/* the actual thread switch */ /* the actual thread switch */
last = cpu_switch_to(prev, next); last = cpu_switch_to(prev, next);
contextidr_thread_switch(next);
return last; return last;
} }
......
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* Copyright (C) 2013 ARM Limited
*
* Author: Will Deacon <will.deacon@arm.com>
*/
#define pr_fmt(fmt) "psci: " fmt
#include <linux/init.h>
#include <linux/of.h>
#include <asm/compiler.h>
#include <asm/errno.h>
#include <asm/psci.h>
struct psci_operations psci_ops;
static int (*invoke_psci_fn)(u64, u64, u64, u64);
enum psci_function {
PSCI_FN_CPU_SUSPEND,
PSCI_FN_CPU_ON,
PSCI_FN_CPU_OFF,
PSCI_FN_MIGRATE,
PSCI_FN_MAX,
};
static u32 psci_function_id[PSCI_FN_MAX];
#define PSCI_RET_SUCCESS 0
#define PSCI_RET_EOPNOTSUPP -1
#define PSCI_RET_EINVAL -2
#define PSCI_RET_EPERM -3
static int psci_to_linux_errno(int errno)
{
switch (errno) {
case PSCI_RET_SUCCESS:
return 0;
case PSCI_RET_EOPNOTSUPP:
return -EOPNOTSUPP;
case PSCI_RET_EINVAL:
return -EINVAL;
case PSCI_RET_EPERM:
return -EPERM;
};
return -EINVAL;
}
#define PSCI_POWER_STATE_ID_MASK 0xffff
#define PSCI_POWER_STATE_ID_SHIFT 0
#define PSCI_POWER_STATE_TYPE_MASK 0x1
#define PSCI_POWER_STATE_TYPE_SHIFT 16
#define PSCI_POWER_STATE_AFFL_MASK 0x3
#define PSCI_POWER_STATE_AFFL_SHIFT 24
static u32 psci_power_state_pack(struct psci_power_state state)
{
return ((state.id & PSCI_POWER_STATE_ID_MASK)
<< PSCI_POWER_STATE_ID_SHIFT) |
((state.type & PSCI_POWER_STATE_TYPE_MASK)
<< PSCI_POWER_STATE_TYPE_SHIFT) |
((state.affinity_level & PSCI_POWER_STATE_AFFL_MASK)
<< PSCI_POWER_STATE_AFFL_SHIFT);
}
/*
* The following two functions are invoked via the invoke_psci_fn pointer
* and will not be inlined, allowing us to piggyback on the AAPCS.
*/
static noinline int __invoke_psci_fn_hvc(u64 function_id, u64 arg0, u64 arg1,
u64 arg2)
{
asm volatile(
__asmeq("%0", "x0")
__asmeq("%1", "x1")
__asmeq("%2", "x2")
__asmeq("%3", "x3")
"hvc #0\n"
: "+r" (function_id)
: "r" (arg0), "r" (arg1), "r" (arg2));
return function_id;
}
static noinline int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1,
u64 arg2)
{
asm volatile(
__asmeq("%0", "x0")
__asmeq("%1", "x1")
__asmeq("%2", "x2")
__asmeq("%3", "x3")
"smc #0\n"
: "+r" (function_id)
: "r" (arg0), "r" (arg1), "r" (arg2));
return function_id;
}
static int psci_cpu_suspend(struct psci_power_state state,
unsigned long entry_point)
{
int err;
u32 fn, power_state;
fn = psci_function_id[PSCI_FN_CPU_SUSPEND];
power_state = psci_power_state_pack(state);
err = invoke_psci_fn(fn, power_state, entry_point, 0);
return psci_to_linux_errno(err);
}
static int psci_cpu_off(struct psci_power_state state)
{
int err;
u32 fn, power_state;
fn = psci_function_id[PSCI_FN_CPU_OFF];
power_state = psci_power_state_pack(state);
err = invoke_psci_fn(fn, power_state, 0, 0);
return psci_to_linux_errno(err);
}
static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point)
{
int err;
u32 fn;
fn = psci_function_id[PSCI_FN_CPU_ON];
err = invoke_psci_fn(fn, cpuid, entry_point, 0);
return psci_to_linux_errno(err);
}
static int psci_migrate(unsigned long cpuid)
{
int err;
u32 fn;
fn = psci_function_id[PSCI_FN_MIGRATE];
err = invoke_psci_fn(fn, cpuid, 0, 0);
return psci_to_linux_errno(err);
}
static const struct of_device_id psci_of_match[] __initconst = {
{ .compatible = "arm,psci", },
{},
};
int __init psci_init(void)
{
struct device_node *np;
const char *method;
u32 id;
int err = 0;
np = of_find_matching_node(NULL, psci_of_match);
if (!np)
return -ENODEV;
pr_info("probing function IDs from device-tree\n");
if (of_property_read_string(np, "method", &method)) {
pr_warning("missing \"method\" property\n");
err = -ENXIO;
goto out_put_node;
}
if (!strcmp("hvc", method)) {
invoke_psci_fn = __invoke_psci_fn_hvc;
} else if (!strcmp("smc", method)) {
invoke_psci_fn = __invoke_psci_fn_smc;
} else {
pr_warning("invalid \"method\" property: %s\n", method);
err = -EINVAL;
goto out_put_node;
}
if (!of_property_read_u32(np, "cpu_suspend", &id)) {
psci_function_id[PSCI_FN_CPU_SUSPEND] = id;
psci_ops.cpu_suspend = psci_cpu_suspend;
}
if (!of_property_read_u32(np, "cpu_off", &id)) {
psci_function_id[PSCI_FN_CPU_OFF] = id;
psci_ops.cpu_off = psci_cpu_off;
}
if (!of_property_read_u32(np, "cpu_on", &id)) {
psci_function_id[PSCI_FN_CPU_ON] = id;
psci_ops.cpu_on = psci_cpu_on;
}
if (!of_property_read_u32(np, "migrate", &id)) {
psci_function_id[PSCI_FN_MIGRATE] = id;
psci_ops.migrate = psci_migrate;
}
out_put_node:
of_node_put(np);
return err;
}
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/memblock.h> #include <linux/memblock.h>
#include <linux/of_fdt.h> #include <linux/of_fdt.h>
#include <linux/of_platform.h>
#include <asm/cputype.h> #include <asm/cputype.h>
#include <asm/elf.h> #include <asm/elf.h>
...@@ -49,6 +50,7 @@ ...@@ -49,6 +50,7 @@
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/traps.h> #include <asm/traps.h>
#include <asm/memblock.h> #include <asm/memblock.h>
#include <asm/psci.h>
unsigned int processor_id; unsigned int processor_id;
EXPORT_SYMBOL(processor_id); EXPORT_SYMBOL(processor_id);
...@@ -260,6 +262,8 @@ void __init setup_arch(char **cmdline_p) ...@@ -260,6 +262,8 @@ void __init setup_arch(char **cmdline_p)
unflatten_device_tree(); unflatten_device_tree();
psci_init();
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
smp_init_cpus(); smp_init_cpus();
#endif #endif
...@@ -289,6 +293,13 @@ static int __init topology_init(void) ...@@ -289,6 +293,13 @@ static int __init topology_init(void)
} }
subsys_initcall(topology_init); subsys_initcall(topology_init);
static int __init arm64_device_probe(void)
{
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
return 0;
}
device_initcall(arm64_device_probe);
static const char *hwcap_str[] = { static const char *hwcap_str[] = {
"fp", "fp",
"asimd", "asimd",
......
...@@ -76,7 +76,7 @@ struct compat_sigcontext { ...@@ -76,7 +76,7 @@ struct compat_sigcontext {
struct compat_ucontext { struct compat_ucontext {
compat_ulong_t uc_flags; compat_ulong_t uc_flags;
struct compat_ucontext *uc_link; compat_uptr_t uc_link;
compat_stack_t uc_stack; compat_stack_t uc_stack;
struct compat_sigcontext uc_mcontext; struct compat_sigcontext uc_mcontext;
compat_sigset_t uc_sigmask; compat_sigset_t uc_sigmask;
...@@ -703,7 +703,7 @@ int compat_setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, ...@@ -703,7 +703,7 @@ int compat_setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
err |= copy_siginfo_to_user32(&frame->info, info); err |= copy_siginfo_to_user32(&frame->info, info);
__put_user_error(0, &frame->sig.uc.uc_flags, err); __put_user_error(0, &frame->sig.uc.uc_flags, err);
__put_user_error(NULL, &frame->sig.uc.uc_link, err); __put_user_error(0, &frame->sig.uc.uc_link, err);
memset(&stack, 0, sizeof(stack)); memset(&stack, 0, sizeof(stack));
stack.ss_sp = (compat_uptr_t)current->sas_ss_sp; stack.ss_sp = (compat_uptr_t)current->sas_ss_sp;
......
...@@ -233,7 +233,28 @@ void __init smp_prepare_boot_cpu(void) ...@@ -233,7 +233,28 @@ void __init smp_prepare_boot_cpu(void)
} }
static void (*smp_cross_call)(const struct cpumask *, unsigned int); static void (*smp_cross_call)(const struct cpumask *, unsigned int);
static phys_addr_t cpu_release_addr[NR_CPUS];
static const struct smp_enable_ops *enable_ops[] __initconst = {
&smp_spin_table_ops,
&smp_psci_ops,
NULL,
};
static const struct smp_enable_ops *smp_enable_ops[NR_CPUS];
static const struct smp_enable_ops * __init smp_get_enable_ops(const char *name)
{
const struct smp_enable_ops *ops = enable_ops[0];
while (ops) {
if (!strcmp(name, ops->name))
return ops;
ops++;
}
return NULL;
}
/* /*
* Enumerate the possible CPU set from the device tree. * Enumerate the possible CPU set from the device tree.
...@@ -252,22 +273,22 @@ void __init smp_init_cpus(void) ...@@ -252,22 +273,22 @@ void __init smp_init_cpus(void)
* We currently support only the "spin-table" enable-method. * We currently support only the "spin-table" enable-method.
*/ */
enable_method = of_get_property(dn, "enable-method", NULL); enable_method = of_get_property(dn, "enable-method", NULL);
if (!enable_method || strcmp(enable_method, "spin-table")) { if (!enable_method) {
pr_err("CPU %d: missing or invalid enable-method property: %s\n", pr_err("CPU %d: missing enable-method property\n", cpu);
cpu, enable_method);
goto next; goto next;
} }
/* smp_enable_ops[cpu] = smp_get_enable_ops(enable_method);
* Determine the address from which the CPU is polling.
*/ if (!smp_enable_ops[cpu]) {
if (of_property_read_u64(dn, "cpu-release-addr", pr_err("CPU %d: invalid enable-method property: %s\n",
&cpu_release_addr[cpu])) { cpu, enable_method);
pr_err("CPU %d: missing or invalid cpu-release-addr property\n",
cpu);
goto next; goto next;
} }
if (smp_enable_ops[cpu]->init_cpu(dn, cpu))
goto next;
set_cpu_possible(cpu, true); set_cpu_possible(cpu, true);
next: next:
cpu++; cpu++;
...@@ -281,8 +302,7 @@ void __init smp_init_cpus(void) ...@@ -281,8 +302,7 @@ void __init smp_init_cpus(void)
void __init smp_prepare_cpus(unsigned int max_cpus) void __init smp_prepare_cpus(unsigned int max_cpus)
{ {
int cpu; int cpu, err;
void **release_addr;
unsigned int ncores = num_possible_cpus(); unsigned int ncores = num_possible_cpus();
/* /*
...@@ -291,30 +311,35 @@ void __init smp_prepare_cpus(unsigned int max_cpus) ...@@ -291,30 +311,35 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
if (max_cpus > ncores) if (max_cpus > ncores)
max_cpus = ncores; max_cpus = ncores;
/* Don't bother if we're effectively UP */
if (max_cpus <= 1)
return;
/* /*
* Initialise the present map (which describes the set of CPUs * Initialise the present map (which describes the set of CPUs
* actually populated at the present time) and release the * actually populated at the present time) and release the
* secondaries from the bootloader. * secondaries from the bootloader.
*
* Make sure we online at most (max_cpus - 1) additional CPUs.
*/ */
max_cpus--;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
if (max_cpus == 0) if (max_cpus == 0)
break; break;
if (!cpu_release_addr[cpu]) if (cpu == smp_processor_id())
continue;
if (!smp_enable_ops[cpu])
continue; continue;
release_addr = __va(cpu_release_addr[cpu]); err = smp_enable_ops[cpu]->prepare_cpu(cpu);
release_addr[0] = (void *)__pa(secondary_holding_pen); if (err)
__flush_dcache_area(release_addr, sizeof(release_addr[0])); continue;
set_cpu_present(cpu, true); set_cpu_present(cpu, true);
max_cpus--; max_cpus--;
} }
/*
* Send an event to wake up the secondaries.
*/
sev();
} }
......
/*
* PSCI SMP initialisation
*
* Copyright (C) 2013 ARM Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/init.h>
#include <linux/of.h>
#include <linux/smp.h>
#include <asm/psci.h>
static int __init smp_psci_init_cpu(struct device_node *dn, int cpu)
{
return 0;
}
static int __init smp_psci_prepare_cpu(int cpu)
{
int err;
if (!psci_ops.cpu_on) {
pr_err("psci: no cpu_on method, not booting CPU%d\n", cpu);
return -ENODEV;
}
err = psci_ops.cpu_on(cpu, __pa(secondary_holding_pen));
if (err) {
pr_err("psci: failed to boot CPU%d (%d)\n", cpu, err);
return err;
}
return 0;
}
const struct smp_enable_ops smp_psci_ops __initconst = {
.name = "psci",
.init_cpu = smp_psci_init_cpu,
.prepare_cpu = smp_psci_prepare_cpu,
};
/*
* Spin Table SMP initialisation
*
* Copyright (C) 2013 ARM Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/init.h>
#include <linux/of.h>
#include <linux/smp.h>
#include <asm/cacheflush.h>
static phys_addr_t cpu_release_addr[NR_CPUS];
static int __init smp_spin_table_init_cpu(struct device_node *dn, int cpu)
{
/*
* Determine the address from which the CPU is polling.
*/
if (of_property_read_u64(dn, "cpu-release-addr",
&cpu_release_addr[cpu])) {
pr_err("CPU %d: missing or invalid cpu-release-addr property\n",
cpu);
return -1;
}
return 0;
}
static int __init smp_spin_table_prepare_cpu(int cpu)
{
void **release_addr;
if (!cpu_release_addr[cpu])
return -ENODEV;
release_addr = __va(cpu_release_addr[cpu]);
release_addr[0] = (void *)__pa(secondary_holding_pen);
__flush_dcache_area(release_addr, sizeof(release_addr[0]));
/*
* Send an event to wake up the secondary CPU.
*/
sev();
return 0;
}
const struct smp_enable_ops smp_spin_table_ops __initconst = {
.name = "spin-table",
.init_cpu = smp_spin_table_init_cpu,
.prepare_cpu = smp_spin_table_prepare_cpu,
};
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <linux/nodemask.h> #include <linux/nodemask.h>
#include <linux/memblock.h> #include <linux/memblock.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/io.h>
#include <asm/cputype.h> #include <asm/cputype.h>
#include <asm/sections.h> #include <asm/sections.h>
...@@ -251,6 +252,47 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt, ...@@ -251,6 +252,47 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt,
} while (pgd++, addr = next, addr != end); } while (pgd++, addr = next, addr != end);
} }
#ifdef CONFIG_EARLY_PRINTK
/*
* Create an early I/O mapping using the pgd/pmd entries already populated
* in head.S as this function is called too early to allocated any memory. The
* mapping size is 2MB with 4KB pages or 64KB or 64KB pages.
*/
void __iomem * __init early_io_map(phys_addr_t phys, unsigned long virt)
{
unsigned long size, mask;
bool page64k = IS_ENABLED(ARM64_64K_PAGES);
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
/*
* No early pte entries with !ARM64_64K_PAGES configuration, so using
* sections (pmd).
*/
size = page64k ? PAGE_SIZE : SECTION_SIZE;
mask = ~(size - 1);
pgd = pgd_offset_k(virt);
pud = pud_offset(pgd, virt);
if (pud_none(*pud))
return NULL;
pmd = pmd_offset(pud, virt);
if (page64k) {
if (pmd_none(*pmd))
return NULL;
pte = pte_offset_kernel(pmd, virt);
set_pte(pte, __pte((phys & mask) | PROT_DEVICE_nGnRE));
} else {
set_pmd(pmd, __pmd((phys & mask) | PROT_SECT_DEVICE_nGnRE));
}
return (void __iomem *)((virt & mask) + (phys & ~mask));
}
#endif
static void __init map_mem(void) static void __init map_mem(void)
{ {
struct memblock_region *reg; struct memblock_region *reg;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment