Commit df762ecc authored by Russell King's avatar Russell King

Merge branch 'devel-stable' into for-next

Conflicts:
	arch/arm/include/asm/atomic.h
	arch/arm/include/asm/hardirq.h
	arch/arm/kernel/smp.c
parents ec1e20a0 70d42126
......@@ -52,6 +52,8 @@ config ARM
select HAVE_MOD_ARCH_SPECIFIC if ARM_UNWIND
select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UID16
......@@ -482,6 +484,7 @@ config ARCH_IXP4XX
bool "IXP4xx-based"
depends on MMU
select ARCH_HAS_DMA_SET_COHERENT_MASK
select ARCH_SUPPORTS_BIG_ENDIAN
select ARCH_REQUIRE_GPIOLIB
select CLKSRC_MMIO
select CPU_XSCALE
......@@ -1545,6 +1548,32 @@ config MCPM
for (multi-)cluster based systems, such as big.LITTLE based
systems.
config BIG_LITTLE
bool "big.LITTLE support (Experimental)"
depends on CPU_V7 && SMP
select MCPM
help
This option enables support selections for the big.LITTLE
system architecture.
config BL_SWITCHER
bool "big.LITTLE switcher support"
depends on BIG_LITTLE && MCPM && HOTPLUG_CPU
select CPU_PM
select ARM_CPU_SUSPEND
help
The big.LITTLE "switcher" provides the core functionality to
transparently handle transition between a cluster of A15's
and a cluster of A7's in a big.LITTLE system.
config BL_SWITCHER_DUMMY_IF
tristate "Simple big.LITTLE switcher user interface"
depends on BL_SWITCHER && DEBUG_KERNEL
help
This is a simple and dummy char dev interface to control
the big.LITTLE switcher core code. It is meant for
debugging purposes only.
choice
prompt "Memory split"
default VMSPLIT_3G
......
......@@ -16,6 +16,7 @@ LDFLAGS :=
LDFLAGS_vmlinux :=-p --no-undefined -X
ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
LDFLAGS_vmlinux += --be8
LDFLAGS_MODULE += --be8
endif
OBJCOPYFLAGS :=-O binary -R .comment -S
......
......@@ -135,6 +135,7 @@ start:
.word _edata @ zImage end address
THUMB( .thumb )
1:
ARM_BE8( setend be ) @ go BE8 if compiled for BE8
mrs r9, cpsr
#ifdef CONFIG_ARM_VIRT_EXT
bl __hyp_stub_install @ get into SVC mode, reversibly
......@@ -699,9 +700,7 @@ __armv4_mmu_cache_on:
mrc p15, 0, r0, c1, c0, 0 @ read control reg
orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
orr r0, r0, #0x0030
#ifdef CONFIG_CPU_ENDIAN_BE8
orr r0, r0, #1 << 25 @ big-endian page tables
#endif
ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables
bl __common_mmu_cache_on
mov r0, #0
mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
......@@ -728,9 +727,7 @@ __armv7_mmu_cache_on:
orr r0, r0, #1 << 22 @ U (v6 unaligned access model)
@ (needed for ARM1176)
#ifdef CONFIG_MMU
#ifdef CONFIG_CPU_ENDIAN_BE8
orr r0, r0, #1 << 25 @ big-endian page tables
#endif
ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables
mrcne p15, 0, r6, c2, c0, 2 @ read ttb control reg
orrne r0, r0, #1 @ MMU enabled
movne r1, #0xfffffffd @ domain 0 = client
......
......@@ -17,3 +17,5 @@ obj-$(CONFIG_MCPM) += mcpm_head.o mcpm_entry.o mcpm_platsmp.o vlock.o
AFLAGS_mcpm_head.o := -march=armv7-a
AFLAGS_vlock.o := -march=armv7-a
obj-$(CONFIG_TI_PRIV_EDMA) += edma.o
obj-$(CONFIG_BL_SWITCHER) += bL_switcher.o
obj-$(CONFIG_BL_SWITCHER_DUMMY_IF) += bL_switcher_dummy_if.o
This diff is collapsed.
/*
* arch/arm/common/bL_switcher_dummy_if.c -- b.L switcher dummy interface
*
* Created by: Nicolas Pitre, November 2012
* Copyright: (C) 2012-2013 Linaro Limited
*
* Dummy interface to user space for debugging purpose only.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <asm/uaccess.h>
#include <asm/bL_switcher.h>
static ssize_t bL_switcher_write(struct file *file, const char __user *buf,
size_t len, loff_t *pos)
{
unsigned char val[3];
unsigned int cpu, cluster;
int ret;
pr_debug("%s\n", __func__);
if (len < 3)
return -EINVAL;
if (copy_from_user(val, buf, 3))
return -EFAULT;
/* format: <cpu#>,<cluster#> */
if (val[0] < '0' || val[0] > '9' ||
val[1] != ',' ||
val[2] < '0' || val[2] > '1')
return -EINVAL;
cpu = val[0] - '0';
cluster = val[2] - '0';
ret = bL_switch_request(cpu, cluster);
return ret ? : len;
}
static const struct file_operations bL_switcher_fops = {
.write = bL_switcher_write,
.owner = THIS_MODULE,
};
static struct miscdevice bL_switcher_device = {
MISC_DYNAMIC_MINOR,
"b.L_switcher",
&bL_switcher_fops
};
static int __init bL_switcher_dummy_if_init(void)
{
return misc_register(&bL_switcher_device);
}
static void __exit bL_switcher_dummy_if_exit(void)
{
misc_deregister(&bL_switcher_device);
}
module_init(bL_switcher_dummy_if_init);
module_exit(bL_switcher_dummy_if_exit);
......@@ -27,6 +27,18 @@ void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr)
sync_cache_w(&mcpm_entry_vectors[cluster][cpu]);
}
extern unsigned long mcpm_entry_early_pokes[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER][2];
void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
unsigned long poke_phys_addr, unsigned long poke_val)
{
unsigned long *poke = &mcpm_entry_early_pokes[cluster][cpu][0];
poke[0] = poke_phys_addr;
poke[1] = poke_val;
__cpuc_flush_dcache_area((void *)poke, 8);
outer_clean_range(__pa(poke), __pa(poke + 2));
}
static const struct mcpm_platform_ops *platform_ops;
int __init mcpm_platform_register(const struct mcpm_platform_ops *ops)
......
......@@ -15,6 +15,7 @@
#include <linux/linkage.h>
#include <asm/mcpm.h>
#include <asm/assembler.h>
#include "vlock.h"
......@@ -47,6 +48,7 @@
ENTRY(mcpm_entry_point)
ARM_BE8(setend be)
THUMB( adr r12, BSYM(1f) )
THUMB( bx r12 )
THUMB( .thumb )
......@@ -71,12 +73,19 @@ ENTRY(mcpm_entry_point)
* position independent way.
*/
adr r5, 3f
ldmia r5, {r6, r7, r8, r11}
ldmia r5, {r0, r6, r7, r8, r11}
add r0, r5, r0 @ r0 = mcpm_entry_early_pokes
add r6, r5, r6 @ r6 = mcpm_entry_vectors
ldr r7, [r5, r7] @ r7 = mcpm_power_up_setup_phys
add r8, r5, r8 @ r8 = mcpm_sync
add r11, r5, r11 @ r11 = first_man_locks
@ Perform an early poke, if any
add r0, r0, r4, lsl #3
ldmia r0, {r0, r1}
teq r0, #0
strne r1, [r0]
mov r0, #MCPM_SYNC_CLUSTER_SIZE
mla r8, r0, r10, r8 @ r8 = sync cluster base
......@@ -195,7 +204,8 @@ mcpm_entry_gated:
.align 2
3: .word mcpm_entry_vectors - .
3: .word mcpm_entry_early_pokes - .
.word mcpm_entry_vectors - 3b
.word mcpm_power_up_setup_phys - 3b
.word mcpm_sync - 3b
.word first_man_locks - 3b
......@@ -214,6 +224,10 @@ first_man_locks:
ENTRY(mcpm_entry_vectors)
.space 4 * MAX_NR_CLUSTERS * MAX_CPUS_PER_CLUSTER
.type mcpm_entry_early_pokes, #object
ENTRY(mcpm_entry_early_pokes)
.space 8 * MAX_NR_CLUSTERS * MAX_CPUS_PER_CLUSTER
.type mcpm_power_up_setup_phys, #object
ENTRY(mcpm_power_up_setup_phys)
.space 4 @ set by mcpm_sync_init()
......@@ -3,7 +3,17 @@
#
obj-$(CONFIG_CRYPTO_AES_ARM) += aes-arm.o
obj-$(CONFIG_CRYPTO_AES_ARM_BS) += aes-arm-bs.o
obj-$(CONFIG_CRYPTO_SHA1_ARM) += sha1-arm.o
aes-arm-y := aes-armv4.o aes_glue.o
aes-arm-bs-y := aesbs-core.o aesbs-glue.o
sha1-arm-y := sha1-armv4-large.o sha1_glue.o
quiet_cmd_perl = PERL $@
cmd_perl = $(PERL) $(<) > $(@)
$(src)/aesbs-core.S_shipped: $(src)/bsaes-armv7.pl
$(call cmd,perl)
.PRECIOUS: $(obj)/aesbs-core.S
......@@ -6,22 +6,12 @@
#include <linux/crypto.h>
#include <crypto/aes.h>
#define AES_MAXNR 14
#include "aes_glue.h"
typedef struct {
unsigned int rd_key[4 *(AES_MAXNR + 1)];
int rounds;
} AES_KEY;
struct AES_CTX {
AES_KEY enc_key;
AES_KEY dec_key;
};
asmlinkage void AES_encrypt(const u8 *in, u8 *out, AES_KEY *ctx);
asmlinkage void AES_decrypt(const u8 *in, u8 *out, AES_KEY *ctx);
asmlinkage int private_AES_set_decrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
asmlinkage int private_AES_set_encrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
EXPORT_SYMBOL(AES_encrypt);
EXPORT_SYMBOL(AES_decrypt);
EXPORT_SYMBOL(private_AES_set_encrypt_key);
EXPORT_SYMBOL(private_AES_set_decrypt_key);
static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
......
#define AES_MAXNR 14
struct AES_KEY {
unsigned int rd_key[4 * (AES_MAXNR + 1)];
int rounds;
};
struct AES_CTX {
struct AES_KEY enc_key;
struct AES_KEY dec_key;
};
asmlinkage void AES_encrypt(const u8 *in, u8 *out, struct AES_KEY *ctx);
asmlinkage void AES_decrypt(const u8 *in, u8 *out, struct AES_KEY *ctx);
asmlinkage int private_AES_set_decrypt_key(const unsigned char *userKey,
const int bits, struct AES_KEY *key);
asmlinkage int private_AES_set_encrypt_key(const unsigned char *userKey,
const int bits, struct AES_KEY *key);
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -24,6 +24,7 @@ generic-y += sembuf.h
generic-y += serial.h
generic-y += shmbuf.h
generic-y += siginfo.h
generic-y += simd.h
generic-y += sizes.h
generic-y += socket.h
generic-y += sockios.h
......
......@@ -53,6 +53,13 @@
#define put_byte_3 lsl #0
#endif
/* Select code for any configuration running in BE8 mode */
#ifdef CONFIG_CPU_ENDIAN_BE8
#define ARM_BE8(code...) code
#else
#define ARM_BE8(code...)
#endif
/*
* Data preload for architectures that support it
*/
......
......@@ -12,6 +12,7 @@
#define __ASM_ARM_ATOMIC_H
#include <linux/compiler.h>
#include <linux/prefetch.h>
#include <linux/types.h>
#include <linux/irqflags.h>
#include <asm/barrier.h>
......@@ -41,6 +42,7 @@ static inline void atomic_add(int i, atomic_t *v)
unsigned long tmp;
int result;
prefetchw(&v->counter);
__asm__ __volatile__("@ atomic_add\n"
"1: ldrex %0, [%3]\n"
" add %0, %0, %4\n"
......@@ -79,6 +81,7 @@ static inline void atomic_sub(int i, atomic_t *v)
unsigned long tmp;
int result;
prefetchw(&v->counter);
__asm__ __volatile__("@ atomic_sub\n"
"1: ldrex %0, [%3]\n"
" sub %0, %0, %4\n"
......@@ -260,6 +263,7 @@ static inline void atomic64_set(atomic64_t *v, long long i)
{
long long tmp;
prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_set\n"
"1: ldrexd %0, %H0, [%2]\n"
" strexd %0, %3, %H3, [%2]\n"
......@@ -276,10 +280,11 @@ static inline void atomic64_add(long long i, atomic64_t *v)
long long result;
unsigned long tmp;
prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_add\n"
"1: ldrexd %0, %H0, [%3]\n"
" adds %0, %0, %4\n"
" adc %H0, %H0, %H4\n"
" adds %Q0, %Q0, %Q4\n"
" adc %R0, %R0, %R4\n"
" strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n"
" bne 1b"
......@@ -297,8 +302,8 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
__asm__ __volatile__("@ atomic64_add_return\n"
"1: ldrexd %0, %H0, [%3]\n"
" adds %0, %0, %4\n"
" adc %H0, %H0, %H4\n"
" adds %Q0, %Q0, %Q4\n"
" adc %R0, %R0, %R4\n"
" strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n"
" bne 1b"
......@@ -316,10 +321,11 @@ static inline void atomic64_sub(long long i, atomic64_t *v)
long long result;
unsigned long tmp;
prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_sub\n"
"1: ldrexd %0, %H0, [%3]\n"
" subs %0, %0, %4\n"
" sbc %H0, %H0, %H4\n"
" subs %Q0, %Q0, %Q4\n"
" sbc %R0, %R0, %R4\n"
" strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n"
" bne 1b"
......@@ -337,8 +343,8 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v)
__asm__ __volatile__("@ atomic64_sub_return\n"
"1: ldrexd %0, %H0, [%3]\n"
" subs %0, %0, %4\n"
" sbc %H0, %H0, %H4\n"
" subs %Q0, %Q0, %Q4\n"
" sbc %R0, %R0, %R4\n"
" strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n"
" bne 1b"
......@@ -406,9 +412,9 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
__asm__ __volatile__("@ atomic64_dec_if_positive\n"
"1: ldrexd %0, %H0, [%3]\n"
" subs %0, %0, #1\n"
" sbc %H0, %H0, #0\n"
" teq %H0, #0\n"
" subs %Q0, %Q0, #1\n"
" sbc %R0, %R0, #0\n"
" teq %R0, #0\n"
" bmi 2f\n"
" strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n"
......@@ -437,8 +443,8 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
" teqeq %H0, %H5\n"
" moveq %1, #0\n"
" beq 2f\n"
" adds %0, %0, %6\n"
" adc %H0, %H0, %H6\n"
" adds %Q0, %Q0, %Q6\n"
" adc %R0, %R0, %R6\n"
" strexd %2, %0, %H0, [%4]\n"
" teq %2, #0\n"
" bne 1b\n"
......
/*
* arch/arm/include/asm/bL_switcher.h
*
* Created by: Nicolas Pitre, April 2012
* Copyright: (C) 2012-2013 Linaro Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef ASM_BL_SWITCHER_H
#define ASM_BL_SWITCHER_H
#include <linux/compiler.h>
#include <linux/types.h>
typedef void (*bL_switch_completion_handler)(void *cookie);
int bL_switch_request_cb(unsigned int cpu, unsigned int new_cluster_id,
bL_switch_completion_handler completer,
void *completer_cookie);
static inline int bL_switch_request(unsigned int cpu, unsigned int new_cluster_id)
{
return bL_switch_request_cb(cpu, new_cluster_id, NULL, NULL);
}
/*
* Register here to be notified about runtime enabling/disabling of
* the switcher.
*
* The notifier chain is called with the switcher activation lock held:
* the switcher will not be enabled or disabled during callbacks.
* Callbacks must not call bL_switcher_{get,put}_enabled().
*/
#define BL_NOTIFY_PRE_ENABLE 0
#define BL_NOTIFY_POST_ENABLE 1
#define BL_NOTIFY_PRE_DISABLE 2
#define BL_NOTIFY_POST_DISABLE 3
#ifdef CONFIG_BL_SWITCHER
int bL_switcher_register_notifier(struct notifier_block *nb);
int bL_switcher_unregister_notifier(struct notifier_block *nb);
/*
* Use these functions to temporarily prevent enabling/disabling of
* the switcher.
* bL_switcher_get_enabled() returns true if the switcher is currently
* enabled. Each call to bL_switcher_get_enabled() must be followed
* by a call to bL_switcher_put_enabled(). These functions are not
* recursive.
*/
bool bL_switcher_get_enabled(void);
void bL_switcher_put_enabled(void);
int bL_switcher_trace_trigger(void);
int bL_switcher_get_logical_index(u32 mpidr);
#else
static inline int bL_switcher_register_notifier(struct notifier_block *nb)
{
return 0;
}
static inline int bL_switcher_unregister_notifier(struct notifier_block *nb)
{
return 0;
}
static inline bool bL_switcher_get_enabled(void) { return false; }
static inline void bL_switcher_put_enabled(void) { }
static inline int bL_switcher_trace_trigger(void) { return 0; }
static inline int bL_switcher_get_logical_index(u32 mpidr) { return -EUNATCH; }
#endif /* CONFIG_BL_SWITCHER */
#endif
......@@ -2,6 +2,8 @@
#define _ASMARM_BUG_H
#include <linux/linkage.h>
#include <linux/types.h>
#include <asm/opcodes.h>
#ifdef CONFIG_BUG
......@@ -12,10 +14,10 @@
*/
#ifdef CONFIG_THUMB2_KERNEL
#define BUG_INSTR_VALUE 0xde02
#define BUG_INSTR_TYPE ".hword "
#define BUG_INSTR(__value) __inst_thumb16(__value)
#else
#define BUG_INSTR_VALUE 0xe7f001f2
#define BUG_INSTR_TYPE ".word "
#define BUG_INSTR(__value) __inst_arm(__value)
#endif
......@@ -33,7 +35,7 @@
#define __BUG(__file, __line, __value) \
do { \
asm volatile("1:\t" BUG_INSTR_TYPE #__value "\n" \
asm volatile("1:\t" BUG_INSTR(__value) "\n" \
".pushsection .rodata.str, \"aMS\", %progbits, 1\n" \
"2:\t.asciz " #__file "\n" \
".popsection\n" \
......@@ -48,7 +50,7 @@ do { \
#define __BUG(__file, __line, __value) \
do { \
asm volatile(BUG_INSTR_TYPE #__value); \
asm volatile(BUG_INSTR(__value) "\n"); \
unreachable(); \
} while (0)
#endif /* CONFIG_DEBUG_BUGVERBOSE */
......
......@@ -5,7 +5,7 @@
#include <linux/threads.h>
#include <asm/irq.h>
#define NR_IPI 7
#define NR_IPI 8
typedef struct {
unsigned int __softirq_pending;
......
......@@ -24,8 +24,8 @@
#define TRACER_TIMEOUT 10000
#define etm_writel(t, v, x) \
(__raw_writel((v), (t)->etm_regs + (x)))
#define etm_readl(t, x) (__raw_readl((t)->etm_regs + (x)))
(writel_relaxed((v), (t)->etm_regs + (x)))
#define etm_readl(t, x) (readl_relaxed((t)->etm_regs + (x)))
/* CoreSight Management Registers */
#define CSMR_LOCKACCESS 0xfb0
......@@ -142,8 +142,8 @@
#define ETBFF_TRIGFL BIT(10)
#define etb_writel(t, v, x) \
(__raw_writel((v), (t)->etb_regs + (x)))
#define etb_readl(t, x) (__raw_readl((t)->etb_regs + (x)))
(writel_relaxed((v), (t)->etb_regs + (x)))
#define etb_readl(t, x) (readl_relaxed((t)->etb_regs + (x)))
#define etm_lock(t) do { etm_writel((t), 0, CSMR_LOCKACCESS); } while (0)
#define etm_unlock(t) \
......
......@@ -11,6 +11,7 @@
#define __ARM_KGDB_H__
#include <linux/ptrace.h>
#include <asm/opcodes.h>
/*
* GDB assumes that we're a user process being debugged, so
......@@ -41,7 +42,7 @@
static inline void arch_kgdb_breakpoint(void)
{
asm(".word 0xe7ffdeff");
asm(__inst_arm(0xe7ffdeff));
}
extern void kgdb_handle_bus_error(void);
......
......@@ -49,6 +49,7 @@ struct machine_desc {
bool (*smp_init)(void);
void (*fixup)(struct tag *, char **,
struct meminfo *);
void (*init_meminfo)(void);
void (*reserve)(void);/* reserve mem blocks */
void (*map_io)(void);/* IO mapping function */
void (*init_early)(void);
......
......@@ -41,6 +41,14 @@ extern void mcpm_entry_point(void);
*/
void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr);
/*
* This sets an early poke i.e a value to be poked into some address
* from very early assembly code before the CPU is ungated. The
* address must be physical, and if 0 then nothing will happen.
*/
void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
unsigned long poke_phys_addr, unsigned long poke_val);
/*
* CPU/cluster power operations API for higher subsystems to use.
*/
......
......@@ -172,8 +172,13 @@
* so that all we need to do is modify the 8-bit constant field.
*/
#define __PV_BITS_31_24 0x81000000
#define __PV_BITS_7_0 0x81
extern u64 __pv_phys_offset;
extern u64 __pv_offset;
extern void fixup_pv_table(const void *, unsigned long);
extern const void *__pv_table_begin, *__pv_table_end;
extern unsigned long __pv_phys_offset;
#define PHYS_OFFSET __pv_phys_offset
#define __pv_stub(from,to,instr,type) \
......@@ -185,22 +190,58 @@ extern unsigned long __pv_phys_offset;
: "=r" (to) \
: "r" (from), "I" (type))
static inline unsigned long __virt_to_phys(unsigned long x)
#define __pv_stub_mov_hi(t) \
__asm__ volatile("@ __pv_stub_mov\n" \
"1: mov %R0, %1\n" \
" .pushsection .pv_table,\"a\"\n" \
" .long 1b\n" \
" .popsection\n" \
: "=r" (t) \
: "I" (__PV_BITS_7_0))
#define __pv_add_carry_stub(x, y) \
__asm__ volatile("@ __pv_add_carry_stub\n" \
"1: adds %Q0, %1, %2\n" \
" adc %R0, %R0, #0\n" \
" .pushsection .pv_table,\"a\"\n" \
" .long 1b\n" \
" .popsection\n" \
: "+r" (y) \
: "r" (x), "I" (__PV_BITS_31_24) \
: "cc")
static inline phys_addr_t __virt_to_phys(unsigned long x)
{
unsigned long t;
phys_addr_t t;
if (sizeof(phys_addr_t) == 4) {
__pv_stub(x, t, "add", __PV_BITS_31_24);
} else {
__pv_stub_mov_hi(t);
__pv_add_carry_stub(x, t);
}
return t;
}
static inline unsigned long __phys_to_virt(unsigned long x)
static inline unsigned long __phys_to_virt(phys_addr_t x)
{
unsigned long t;
__pv_stub(x, t, "sub", __PV_BITS_31_24);
return t;
}
#else
#define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET)
#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET)
static inline phys_addr_t __virt_to_phys(unsigned long x)
{
return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET;
}
static inline unsigned long __phys_to_virt(phys_addr_t x)
{
return x - PHYS_OFFSET + PAGE_OFFSET;
}
#endif
#endif
#endif /* __ASSEMBLY__ */
......@@ -238,16 +279,33 @@ static inline phys_addr_t virt_to_phys(const volatile void *x)
static inline void *phys_to_virt(phys_addr_t x)
{
return (void *)(__phys_to_virt((unsigned long)(x)));
return (void *)__phys_to_virt(x);
}
/*
* Drivers should NOT use these either.
*/
#define __pa(x) __virt_to_phys((unsigned long)(x))
#define __va(x) ((void *)__phys_to_virt((unsigned long)(x)))
#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x);
/*
* These are for systems that have a hardware interconnect supported alias of
* physical memory for idmap purposes. Most cases should leave these
* untouched.
*/
static inline phys_addr_t __virt_to_idmap(unsigned long x)
{
if (arch_virt_to_idmap)
return arch_virt_to_idmap(x);
else
return __virt_to_phys(x);
}
#define virt_to_idmap(x) __virt_to_idmap((unsigned long)(x))
/*
* Virtual <-> DMA view memory address translations
* Again, these are *only* valid on the kernel direct mapped RAM
......
......@@ -16,7 +16,7 @@ typedef struct {
#ifdef CONFIG_CPU_HAS_ASID
#define ASID_BITS 8
#define ASID_MASK ((~0ULL) << ASID_BITS)
#define ASID(mm) ((mm)->context.id.counter & ~ASID_MASK)
#define ASID(mm) ((unsigned int)((mm)->context.id.counter & ~ASID_MASK))
#else
#define ASID(mm) (0)
#endif
......
......@@ -22,6 +22,7 @@
#include <asm/hw_breakpoint.h>
#include <asm/ptrace.h>
#include <asm/types.h>
#include <asm/unified.h>
#ifdef __KERNEL__
#define STACK_TOP ((current->personality & ADDR_LIMIT_32BIT) ? \
......@@ -87,6 +88,17 @@ unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) task_pt_regs(tsk)->ARM_pc
#define KSTK_ESP(tsk) task_pt_regs(tsk)->ARM_sp
#ifdef CONFIG_SMP
#define __ALT_SMP_ASM(smp, up) \
"9998: " smp "\n" \
" .pushsection \".alt.smp.init\", \"a\"\n" \
" .long 9998b\n" \
" " up "\n" \
" .popsection\n"
#else
#define __ALT_SMP_ASM(smp, up) up
#endif
/*
* Prefetching support - only ARMv5.
*/
......@@ -97,17 +109,22 @@ static inline void prefetch(const void *ptr)
{
__asm__ __volatile__(
"pld\t%a0"
:
: "p" (ptr)
: "cc");
:: "p" (ptr));
}
#if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
#define ARCH_HAS_PREFETCHW
#define prefetchw(ptr) prefetch(ptr)
#define ARCH_HAS_SPINLOCK_PREFETCH
#define spin_lock_prefetch(x) do { } while (0)
static inline void prefetchw(const void *ptr)
{
__asm__ __volatile__(
".arch_extension mp\n"
__ALT_SMP_ASM(
WASM(pldw) "\t%a0",
WASM(pld) "\t%a0"
)
:: "p" (ptr));
}
#endif
#endif
#define HAVE_ARCH_PICK_MMAP_LAYOUT
......
......@@ -84,6 +84,8 @@ extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask);
extern int register_ipi_completion(struct completion *completion, int cpu);
struct smp_operations {
#ifdef CONFIG_SMP
/*
......
......@@ -5,21 +5,13 @@
#error SMP not supported on pre-ARMv6 CPUs
#endif
#include <asm/processor.h>
#include <linux/prefetch.h>
/*
* sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
* extensions, so when running on UP, we have to patch these instructions away.
*/
#define ALT_SMP(smp, up) \
"9998: " smp "\n" \
" .pushsection \".alt.smp.init\", \"a\"\n" \
" .long 9998b\n" \
" " up "\n" \
" .popsection\n"
#ifdef CONFIG_THUMB2_KERNEL
#define SEV ALT_SMP("sev.w", "nop.w")
/*
* For Thumb-2, special care is needed to ensure that the conditional WFE
* instruction really does assemble to exactly 4 bytes (as required by
......@@ -31,17 +23,18 @@
* the assembler won't change IT instructions which are explicitly present
* in the input.
*/
#define WFE(cond) ALT_SMP( \
#define WFE(cond) __ALT_SMP_ASM( \
"it " cond "\n\t" \
"wfe" cond ".n", \
\
"nop.w" \
)
#else
#define SEV ALT_SMP("sev", "nop")
#define WFE(cond) ALT_SMP("wfe" cond, "nop")
#define WFE(cond) __ALT_SMP_ASM("wfe" cond, "nop")
#endif
#define SEV __ALT_SMP_ASM(WASM(sev), WASM(nop))
static inline void dsb_sev(void)
{
#if __LINUX_ARM_ARCH__ >= 7
......@@ -77,6 +70,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
u32 newval;
arch_spinlock_t lockval;
prefetchw(&lock->slock);
__asm__ __volatile__(
"1: ldrex %0, [%3]\n"
" add %1, %0, %4\n"
......@@ -100,6 +94,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
unsigned long contended, res;
u32 slock;
prefetchw(&lock->slock);
do {
__asm__ __volatile__(
" ldrex %0, [%3]\n"
......@@ -156,6 +151,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned long tmp;
prefetchw(&rw->lock);
__asm__ __volatile__(
"1: ldrex %0, [%1]\n"
" teq %0, #0\n"
......@@ -174,6 +170,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned long contended, res;
prefetchw(&rw->lock);
do {
__asm__ __volatile__(
" ldrex %0, [%2]\n"
......@@ -207,7 +204,7 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
}
/* write_can_lock - would write_trylock() succeed? */
#define arch_write_can_lock(x) ((x)->lock == 0)
#define arch_write_can_lock(x) (ACCESS_ONCE((x)->lock) == 0)
/*
* Read locks are a bit more hairy:
......@@ -225,6 +222,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned long tmp, tmp2;
prefetchw(&rw->lock);
__asm__ __volatile__(
"1: ldrex %0, [%2]\n"
" adds %0, %0, #1\n"
......@@ -245,6 +243,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
smp_mb();
prefetchw(&rw->lock);
__asm__ __volatile__(
"1: ldrex %0, [%2]\n"
" sub %0, %0, #1\n"
......@@ -263,6 +262,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned long contended, res;
prefetchw(&rw->lock);
do {
__asm__ __volatile__(
" ldrex %0, [%2]\n"
......@@ -284,7 +284,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
}
/* read_can_lock - would read_trylock() succeed? */
#define arch_read_can_lock(x) ((x)->lock < 0x80000000)
#define arch_read_can_lock(x) (ACCESS_ONCE((x)->lock) < 0x80000000)
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
......
......@@ -25,7 +25,7 @@ typedef struct {
#define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } }
typedef struct {
volatile unsigned int lock;
u32 lock;
} arch_rwlock_t;
#define __ARCH_RW_LOCK_UNLOCKED { 0 }
......
......@@ -38,6 +38,8 @@
#ifdef __ASSEMBLY__
#define W(instr) instr.w
#define BSYM(sym) sym + 1
#else
#define WASM(instr) #instr ".w"
#endif
#else /* !CONFIG_THUMB2_KERNEL */
......@@ -50,6 +52,8 @@
#ifdef __ASSEMBLY__
#define W(instr) instr
#define BSYM(sym) sym
#else
#define WASM(instr) #instr
#endif
#endif /* CONFIG_THUMB2_KERNEL */
......
......@@ -25,12 +25,14 @@
.macro waituart,rd,rx
1001: ldr \rd, [\rx, #UART01x_FR]
ARM_BE8( rev \rd, \rd )
tst \rd, #UART01x_FR_TXFF
bne 1001b
.endm
.macro busyuart,rd,rx
1001: ldr \rd, [\rx, #UART01x_FR]
ARM_BE8( rev \rd, \rd )
tst \rd, #UART01x_FR_BUSY
bne 1001b
.endm
......@@ -7,6 +7,7 @@ header-y += hwcap.h
header-y += ioctls.h
header-y += kvm_para.h
header-y += mman.h
header-y += perf_regs.h
header-y += posix_types.h
header-y += ptrace.h
header-y += setup.h
......
#ifndef _ASM_ARM_PERF_REGS_H
#define _ASM_ARM_PERF_REGS_H
enum perf_event_arm_regs {
PERF_REG_ARM_R0,
PERF_REG_ARM_R1,
PERF_REG_ARM_R2,
PERF_REG_ARM_R3,
PERF_REG_ARM_R4,
PERF_REG_ARM_R5,
PERF_REG_ARM_R6,
PERF_REG_ARM_R7,
PERF_REG_ARM_R8,
PERF_REG_ARM_R9,
PERF_REG_ARM_R10,
PERF_REG_ARM_FP,
PERF_REG_ARM_IP,
PERF_REG_ARM_SP,
PERF_REG_ARM_LR,
PERF_REG_ARM_PC,
PERF_REG_ARM_MAX,
};
#endif /* _ASM_ARM_PERF_REGS_H */
......@@ -17,7 +17,8 @@ CFLAGS_REMOVE_return_address.o = -pg
obj-y := elf.o entry-common.o irq.o opcodes.o \
process.o ptrace.o return_address.o \
setup.o signal.o stacktrace.o sys_arm.o time.o traps.o
setup.o signal.o sigreturn_codes.o \
stacktrace.o sys_arm.o time.o traps.o
obj-$(CONFIG_ATAGS) += atags_parse.o
obj-$(CONFIG_ATAGS_PROC) += atags_proc.o
......@@ -78,6 +79,7 @@ obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o
obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o
obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o
obj-$(CONFIG_IWMMXT) += iwmmxt.o
obj-$(CONFIG_PERF_EVENTS) += perf_regs.o
obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o perf_event_cpu.o
AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o
......
......@@ -155,4 +155,5 @@ EXPORT_SYMBOL(__gnu_mcount_nc);
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
EXPORT_SYMBOL(__pv_phys_offset);
EXPORT_SYMBOL(__pv_offset);
#endif
......@@ -416,9 +416,8 @@ __und_usr:
bne __und_usr_thumb
sub r4, r2, #4 @ ARM instr at LR - 4
1: ldrt r0, [r4]
#ifdef CONFIG_CPU_ENDIAN_BE8
rev r0, r0 @ little endian instruction
#endif
ARM_BE8(rev r0, r0) @ little endian instruction
@ r0 = 32-bit ARM instruction which caused the exception
@ r2 = PC value for the following instruction (:= regs->ARM_pc)
@ r4 = PC value for the faulting instruction
......
......@@ -393,9 +393,7 @@ ENTRY(vector_swi)
#else
USER( ldr r10, [lr, #-4] ) @ get SWI instruction
#endif
#ifdef CONFIG_CPU_ENDIAN_BE8
rev r10, r10 @ little endian instruction
#endif
ARM_BE8(rev r10, r10) @ little endian instruction
#elif defined(CONFIG_AEABI)
......
......@@ -77,6 +77,7 @@
__HEAD
ENTRY(stext)
ARM_BE8(setend be ) @ ensure we are in BE8 mode
THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM.
THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
......@@ -352,6 +353,9 @@ ENTRY(secondary_startup)
* the processor type - there is no need to check the machine type
* as it has already been validated by the primary processor.
*/
ARM_BE8(setend be) @ ensure we are in BE8 mode
#ifdef CONFIG_ARM_VIRT_EXT
bl __hyp_stub_install_secondary
#endif
......@@ -555,6 +559,14 @@ ENTRY(fixup_smp)
ldmfd sp!, {r4 - r6, pc}
ENDPROC(fixup_smp)
#ifdef __ARMEB__
#define LOW_OFFSET 0x4
#define HIGH_OFFSET 0x0
#else
#define LOW_OFFSET 0x0
#define HIGH_OFFSET 0x4
#endif
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
/* __fixup_pv_table - patch the stub instructions with the delta between
......@@ -565,17 +577,20 @@ ENDPROC(fixup_smp)
__HEAD
__fixup_pv_table:
adr r0, 1f
ldmia r0, {r3-r5, r7}
sub r3, r0, r3 @ PHYS_OFFSET - PAGE_OFFSET
ldmia r0, {r3-r7}
mvn ip, #0
subs r3, r0, r3 @ PHYS_OFFSET - PAGE_OFFSET
add r4, r4, r3 @ adjust table start address
add r5, r5, r3 @ adjust table end address
add r7, r7, r3 @ adjust __pv_phys_offset address
str r8, [r7] @ save computed PHYS_OFFSET to __pv_phys_offset
add r6, r6, r3 @ adjust __pv_phys_offset address
add r7, r7, r3 @ adjust __pv_offset address
str r8, [r6, #LOW_OFFSET] @ save computed PHYS_OFFSET to __pv_phys_offset
strcc ip, [r7, #HIGH_OFFSET] @ save to __pv_offset high bits
mov r6, r3, lsr #24 @ constant for add/sub instructions
teq r3, r6, lsl #24 @ must be 16MiB aligned
THUMB( it ne @ cross section branch )
bne __error
str r6, [r7, #4] @ save to __pv_offset
str r3, [r7, #LOW_OFFSET] @ save to __pv_offset low bits
b __fixup_a_pv_table
ENDPROC(__fixup_pv_table)
......@@ -584,10 +599,19 @@ ENDPROC(__fixup_pv_table)
.long __pv_table_begin
.long __pv_table_end
2: .long __pv_phys_offset
.long __pv_offset
.text
__fixup_a_pv_table:
adr r0, 3f
ldr r6, [r0]
add r6, r6, r3
ldr r0, [r6, #HIGH_OFFSET] @ pv_offset high word
ldr r6, [r6, #LOW_OFFSET] @ pv_offset low word
mov r6, r6, lsr #24
cmn r0, #1
#ifdef CONFIG_THUMB2_KERNEL
moveq r0, #0x200000 @ set bit 21, mov to mvn instruction
lsls r6, #24
beq 2f
clz r7, r6
......@@ -601,18 +625,42 @@ __fixup_a_pv_table:
b 2f
1: add r7, r3
ldrh ip, [r7, #2]
and ip, 0x8f00
orr ip, r6 @ mask in offset bits 31-24
ARM_BE8(rev16 ip, ip)
tst ip, #0x4000
and ip, #0x8f00
orrne ip, r6 @ mask in offset bits 31-24
orreq ip, r0 @ mask in offset bits 7-0
ARM_BE8(rev16 ip, ip)
strh ip, [r7, #2]
bne 2f
ldrh ip, [r7]
ARM_BE8(rev16 ip, ip)
bic ip, #0x20
orr ip, ip, r0, lsr #16
ARM_BE8(rev16 ip, ip)
strh ip, [r7]
2: cmp r4, r5
ldrcc r7, [r4], #4 @ use branch for delay slot
bcc 1b
bx lr
#else
moveq r0, #0x400000 @ set bit 22, mov to mvn instruction
b 2f
1: ldr ip, [r7, r3]
#ifdef CONFIG_CPU_ENDIAN_BE8
@ in BE8, we load data in BE, but instructions still in LE
bic ip, ip, #0xff000000
tst ip, #0x000f0000 @ check the rotation field
orrne ip, ip, r6, lsl #24 @ mask in offset bits 31-24
biceq ip, ip, #0x00004000 @ clear bit 22
orreq ip, ip, r0, lsl #24 @ mask in offset bits 7-0
#else
bic ip, ip, #0x000000ff
orr ip, ip, r6 @ mask in offset bits 31-24
tst ip, #0xf00 @ check the rotation field
orrne ip, ip, r6 @ mask in offset bits 31-24
biceq ip, ip, #0x400000 @ clear bit 22
orreq ip, ip, r0 @ mask in offset bits 7-0
#endif
str ip, [r7, r3]
2: cmp r4, r5
ldrcc r7, [r4], #4 @ use branch for delay slot
......@@ -621,28 +669,30 @@ __fixup_a_pv_table:
#endif
ENDPROC(__fixup_a_pv_table)
.align
3: .long __pv_offset
ENTRY(fixup_pv_table)
stmfd sp!, {r4 - r7, lr}
ldr r2, 2f @ get address of __pv_phys_offset
mov r3, #0 @ no offset
mov r4, r0 @ r0 = table start
add r5, r0, r1 @ r1 = table size
ldr r6, [r2, #4] @ get __pv_offset
bl __fixup_a_pv_table
ldmfd sp!, {r4 - r7, pc}
ENDPROC(fixup_pv_table)
.align
2: .long __pv_phys_offset
.data
.globl __pv_phys_offset
.type __pv_phys_offset, %object
__pv_phys_offset:
.long 0
.size __pv_phys_offset, . - __pv_phys_offset
.quad 0
.size __pv_phys_offset, . -__pv_phys_offset
.globl __pv_offset
.type __pv_offset, %object
__pv_offset:
.long 0
.quad 0
.size __pv_offset, . -__pv_offset
#endif
#include "head-common.S"
This diff is collapsed.
......@@ -256,12 +256,11 @@ validate_event(struct pmu_hw_events *hw_events,
struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct pmu *leader_pmu = event->group_leader->pmu;
if (is_software_event(event))
return 1;
if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
if (event->state < PERF_EVENT_STATE_OFF)
return 1;
if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
......
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/perf_event.h>
#include <linux/bug.h>
#include <asm/perf_regs.h>
#include <asm/ptrace.h>
u64 perf_reg_value(struct pt_regs *regs, int idx)
{
if (WARN_ON_ONCE((u32)idx >= PERF_REG_ARM_MAX))
return 0;
return regs->uregs[idx];
}
#define REG_RESERVED (~((1ULL << PERF_REG_ARM_MAX) - 1))
int perf_reg_validate(u64 mask)
{
if (!mask || mask & REG_RESERVED)
return -EINVAL;
return 0;
}
u64 perf_reg_abi(struct task_struct *task)
{
return PERF_SAMPLE_REGS_ABI_32;
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -10,6 +10,11 @@ UNWIND( .fnstart )
and r3, r0, #31 @ Get bit offset
mov r0, r0, lsr #5
add r1, r1, r0, lsl #2 @ Get word offset
#if __LINUX_ARM_ARCH__ >= 7
.arch_extension mp
ALT_SMP(W(pldw) [r1])
ALT_UP(W(nop))
#endif
mov r3, r2, lsl r3
1: ldrex r2, [r1]
\instr r2, r2, r3
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment