Commit 67dadcb3 authored by David Vrabel's avatar David Vrabel

Merge commit 'e26a9e00' into stable/for-linus-3.15

parents eb47f712 e26a9e00
...@@ -13,6 +13,7 @@ obj-$(CONFIG_SHARP_SCOOP) += scoop.o ...@@ -13,6 +13,7 @@ obj-$(CONFIG_SHARP_SCOOP) += scoop.o
obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o
obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o
obj-$(CONFIG_MCPM) += mcpm_head.o mcpm_entry.o mcpm_platsmp.o vlock.o obj-$(CONFIG_MCPM) += mcpm_head.o mcpm_entry.o mcpm_platsmp.o vlock.o
CFLAGS_REMOVE_mcpm_entry.o = -pg
AFLAGS_mcpm_head.o := -march=armv7-a AFLAGS_mcpm_head.o := -march=armv7-a
AFLAGS_vlock.o := -march=armv7-a AFLAGS_vlock.o := -march=armv7-a
obj-$(CONFIG_TI_PRIV_EDMA) += edma.o obj-$(CONFIG_TI_PRIV_EDMA) += edma.o
......
...@@ -30,8 +30,8 @@ ...@@ -30,8 +30,8 @@
* Endian independent macros for shifting bytes within registers. * Endian independent macros for shifting bytes within registers.
*/ */
#ifndef __ARMEB__ #ifndef __ARMEB__
#define pull lsr #define lspull lsr
#define push lsl #define lspush lsl
#define get_byte_0 lsl #0 #define get_byte_0 lsl #0
#define get_byte_1 lsr #8 #define get_byte_1 lsr #8
#define get_byte_2 lsr #16 #define get_byte_2 lsr #16
...@@ -41,8 +41,8 @@ ...@@ -41,8 +41,8 @@
#define put_byte_2 lsl #16 #define put_byte_2 lsl #16
#define put_byte_3 lsl #24 #define put_byte_3 lsl #24
#else #else
#define pull lsl #define lspull lsl
#define push lsr #define lspush lsr
#define get_byte_0 lsr #24 #define get_byte_0 lsr #24
#define get_byte_1 lsr #16 #define get_byte_1 lsr #16
#define get_byte_2 lsr #8 #define get_byte_2 lsr #8
......
...@@ -60,6 +60,7 @@ static inline int atomic_add_return(int i, atomic_t *v) ...@@ -60,6 +60,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
int result; int result;
smp_mb(); smp_mb();
prefetchw(&v->counter);
__asm__ __volatile__("@ atomic_add_return\n" __asm__ __volatile__("@ atomic_add_return\n"
"1: ldrex %0, [%3]\n" "1: ldrex %0, [%3]\n"
...@@ -99,6 +100,7 @@ static inline int atomic_sub_return(int i, atomic_t *v) ...@@ -99,6 +100,7 @@ static inline int atomic_sub_return(int i, atomic_t *v)
int result; int result;
smp_mb(); smp_mb();
prefetchw(&v->counter);
__asm__ __volatile__("@ atomic_sub_return\n" __asm__ __volatile__("@ atomic_sub_return\n"
"1: ldrex %0, [%3]\n" "1: ldrex %0, [%3]\n"
...@@ -121,6 +123,7 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) ...@@ -121,6 +123,7 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
unsigned long res; unsigned long res;
smp_mb(); smp_mb();
prefetchw(&ptr->counter);
do { do {
__asm__ __volatile__("@ atomic_cmpxchg\n" __asm__ __volatile__("@ atomic_cmpxchg\n"
...@@ -138,6 +141,33 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) ...@@ -138,6 +141,33 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
return oldval; return oldval;
} }
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
int oldval, newval;
unsigned long tmp;
smp_mb();
prefetchw(&v->counter);
__asm__ __volatile__ ("@ atomic_add_unless\n"
"1: ldrex %0, [%4]\n"
" teq %0, %5\n"
" beq 2f\n"
" add %1, %0, %6\n"
" strex %2, %1, [%4]\n"
" teq %2, #0\n"
" bne 1b\n"
"2:"
: "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
: "r" (&v->counter), "r" (u), "r" (a)
: "cc");
if (oldval != u)
smp_mb();
return oldval;
}
#else /* ARM_ARCH_6 */ #else /* ARM_ARCH_6 */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -186,10 +216,6 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) ...@@ -186,10 +216,6 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return ret; return ret;
} }
#endif /* __LINUX_ARM_ARCH__ */
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
static inline int __atomic_add_unless(atomic_t *v, int a, int u) static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{ {
int c, old; int c, old;
...@@ -200,6 +226,10 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) ...@@ -200,6 +226,10 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
return c; return c;
} }
#endif /* __LINUX_ARM_ARCH__ */
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#define atomic_inc(v) atomic_add(1, v) #define atomic_inc(v) atomic_add(1, v)
#define atomic_dec(v) atomic_sub(1, v) #define atomic_dec(v) atomic_sub(1, v)
...@@ -299,6 +329,7 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v) ...@@ -299,6 +329,7 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
unsigned long tmp; unsigned long tmp;
smp_mb(); smp_mb();
prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_add_return\n" __asm__ __volatile__("@ atomic64_add_return\n"
"1: ldrexd %0, %H0, [%3]\n" "1: ldrexd %0, %H0, [%3]\n"
...@@ -340,6 +371,7 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v) ...@@ -340,6 +371,7 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v)
unsigned long tmp; unsigned long tmp;
smp_mb(); smp_mb();
prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_sub_return\n" __asm__ __volatile__("@ atomic64_sub_return\n"
"1: ldrexd %0, %H0, [%3]\n" "1: ldrexd %0, %H0, [%3]\n"
...@@ -364,6 +396,7 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old, ...@@ -364,6 +396,7 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
unsigned long res; unsigned long res;
smp_mb(); smp_mb();
prefetchw(&ptr->counter);
do { do {
__asm__ __volatile__("@ atomic64_cmpxchg\n" __asm__ __volatile__("@ atomic64_cmpxchg\n"
...@@ -388,6 +421,7 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new) ...@@ -388,6 +421,7 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
unsigned long tmp; unsigned long tmp;
smp_mb(); smp_mb();
prefetchw(&ptr->counter);
__asm__ __volatile__("@ atomic64_xchg\n" __asm__ __volatile__("@ atomic64_xchg\n"
"1: ldrexd %0, %H0, [%3]\n" "1: ldrexd %0, %H0, [%3]\n"
...@@ -409,6 +443,7 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v) ...@@ -409,6 +443,7 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
unsigned long tmp; unsigned long tmp;
smp_mb(); smp_mb();
prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_dec_if_positive\n" __asm__ __volatile__("@ atomic64_dec_if_positive\n"
"1: ldrexd %0, %H0, [%3]\n" "1: ldrexd %0, %H0, [%3]\n"
...@@ -436,6 +471,7 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) ...@@ -436,6 +471,7 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
int ret = 1; int ret = 1;
smp_mb(); smp_mb();
prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_add_unless\n" __asm__ __volatile__("@ atomic64_add_unless\n"
"1: ldrexd %0, %H0, [%4]\n" "1: ldrexd %0, %H0, [%4]\n"
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#define __ASM_ARM_CMPXCHG_H #define __ASM_ARM_CMPXCHG_H
#include <linux/irqflags.h> #include <linux/irqflags.h>
#include <linux/prefetch.h>
#include <asm/barrier.h> #include <asm/barrier.h>
#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
...@@ -35,6 +36,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size ...@@ -35,6 +36,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
#endif #endif
smp_mb(); smp_mb();
prefetchw((const void *)ptr);
switch (size) { switch (size) {
#if __LINUX_ARM_ARCH__ >= 6 #if __LINUX_ARM_ARCH__ >= 6
...@@ -138,6 +140,8 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, ...@@ -138,6 +140,8 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
{ {
unsigned long oldval, res; unsigned long oldval, res;
prefetchw((const void *)ptr);
switch (size) { switch (size) {
#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ #ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */
case 1: case 1:
...@@ -230,6 +234,8 @@ static inline unsigned long long __cmpxchg64(unsigned long long *ptr, ...@@ -230,6 +234,8 @@ static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
unsigned long long oldval; unsigned long long oldval;
unsigned long res; unsigned long res;
prefetchw(ptr);
__asm__ __volatile__( __asm__ __volatile__(
"1: ldrexd %1, %H1, [%3]\n" "1: ldrexd %1, %H1, [%3]\n"
" teq %1, %4\n" " teq %1, %4\n"
......
...@@ -71,6 +71,7 @@ ...@@ -71,6 +71,7 @@
#define ARM_CPU_PART_CORTEX_A5 0xC050 #define ARM_CPU_PART_CORTEX_A5 0xC050
#define ARM_CPU_PART_CORTEX_A15 0xC0F0 #define ARM_CPU_PART_CORTEX_A15 0xC0F0
#define ARM_CPU_PART_CORTEX_A7 0xC070 #define ARM_CPU_PART_CORTEX_A7 0xC070
#define ARM_CPU_PART_CORTEX_A12 0xC0D0
#define ARM_CPU_XSCALE_ARCH_MASK 0xe000 #define ARM_CPU_XSCALE_ARCH_MASK 0xe000
#define ARM_CPU_XSCALE_ARCH_V1 0x2000 #define ARM_CPU_XSCALE_ARCH_V1 0x2000
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
#define fd_inb(port) inb((port)) #define fd_inb(port) inb((port))
#define fd_request_irq() request_irq(IRQ_FLOPPYDISK,floppy_interrupt,\ #define fd_request_irq() request_irq(IRQ_FLOPPYDISK,floppy_interrupt,\
IRQF_DISABLED,"floppy",NULL) 0,"floppy",NULL)
#define fd_free_irq() free_irq(IRQ_FLOPPYDISK,NULL) #define fd_free_irq() free_irq(IRQ_FLOPPYDISK,NULL)
#define fd_disable_irq() disable_irq(IRQ_FLOPPYDISK) #define fd_disable_irq() disable_irq(IRQ_FLOPPYDISK)
#define fd_enable_irq() enable_irq(IRQ_FLOPPYDISK) #define fd_enable_irq() enable_irq(IRQ_FLOPPYDISK)
......
...@@ -3,11 +3,6 @@ ...@@ -3,11 +3,6 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#if defined(CONFIG_CPU_USE_DOMAINS) && defined(CONFIG_SMP)
/* ARM doesn't provide unprivileged exclusive memory accessors */
#include <asm-generic/futex.h>
#else
#include <linux/futex.h> #include <linux/futex.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/errno.h> #include <asm/errno.h>
...@@ -28,6 +23,7 @@ ...@@ -28,6 +23,7 @@
#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \ #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
smp_mb(); \ smp_mb(); \
prefetchw(uaddr); \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: ldrex %1, [%3]\n" \ "1: ldrex %1, [%3]\n" \
" " insn "\n" \ " " insn "\n" \
...@@ -51,6 +47,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -51,6 +47,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return -EFAULT; return -EFAULT;
smp_mb(); smp_mb();
/* Prefetching cannot fault */
prefetchw(uaddr);
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
"1: ldrex %1, [%4]\n" "1: ldrex %1, [%4]\n"
" teq %1, %2\n" " teq %1, %2\n"
...@@ -164,6 +162,5 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) ...@@ -164,6 +162,5 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
return ret; return ret;
} }
#endif /* !(CPU_USE_DOMAINS && SMP) */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_ARM_FUTEX_H */ #endif /* _ASM_ARM_FUTEX_H */
...@@ -51,6 +51,7 @@ static inline void decode_ctrl_reg(u32 reg, ...@@ -51,6 +51,7 @@ static inline void decode_ctrl_reg(u32 reg,
#define ARM_DEBUG_ARCH_V7_ECP14 3 #define ARM_DEBUG_ARCH_V7_ECP14 3
#define ARM_DEBUG_ARCH_V7_MM 4 #define ARM_DEBUG_ARCH_V7_MM 4
#define ARM_DEBUG_ARCH_V7_1 5 #define ARM_DEBUG_ARCH_V7_1 5
#define ARM_DEBUG_ARCH_V8 6
/* Breakpoint */ /* Breakpoint */
#define ARM_BREAKPOINT_EXECUTE 0 #define ARM_BREAKPOINT_EXECUTE 0
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
* instruction set this cpu supports. * instruction set this cpu supports.
*/ */
#define ELF_HWCAP (elf_hwcap) #define ELF_HWCAP (elf_hwcap)
extern unsigned int elf_hwcap; #define ELF_HWCAP2 (elf_hwcap2)
extern unsigned int elf_hwcap, elf_hwcap2;
#endif #endif
#endif #endif
...@@ -4,7 +4,6 @@ ...@@ -4,7 +4,6 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/types.h> #include <linux/types.h>
#include <asm/system.h>
#define JUMP_LABEL_NOP_SIZE 4 #define JUMP_LABEL_NOP_SIZE 4
......
...@@ -166,9 +166,17 @@ ...@@ -166,9 +166,17 @@
* Physical vs virtual RAM address space conversion. These are * Physical vs virtual RAM address space conversion. These are
* private definitions which should NOT be used outside memory.h * private definitions which should NOT be used outside memory.h
* files. Use virt_to_phys/phys_to_virt/__pa/__va instead. * files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
*
* PFNs are used to describe any physical page; this means
* PFN 0 == physical address 0.
*/ */
#ifndef __virt_to_phys #if defined(__virt_to_phys)
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT #define PHYS_OFFSET PLAT_PHYS_OFFSET
#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
#elif defined(CONFIG_ARM_PATCH_PHYS_VIRT)
/* /*
* Constants used to force the right instruction encodings and shifts * Constants used to force the right instruction encodings and shifts
...@@ -177,12 +185,17 @@ ...@@ -177,12 +185,17 @@
#define __PV_BITS_31_24 0x81000000 #define __PV_BITS_31_24 0x81000000
#define __PV_BITS_7_0 0x81 #define __PV_BITS_7_0 0x81
extern u64 __pv_phys_offset; extern unsigned long __pv_phys_pfn_offset;
extern u64 __pv_offset; extern u64 __pv_offset;
extern void fixup_pv_table(const void *, unsigned long); extern void fixup_pv_table(const void *, unsigned long);
extern const void *__pv_table_begin, *__pv_table_end; extern const void *__pv_table_begin, *__pv_table_end;
#define PHYS_OFFSET __pv_phys_offset #define PHYS_OFFSET ((phys_addr_t)__pv_phys_pfn_offset << PAGE_SHIFT)
#define PHYS_PFN_OFFSET (__pv_phys_pfn_offset)
#define virt_to_pfn(kaddr) \
((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \
PHYS_PFN_OFFSET)
#define __pv_stub(from,to,instr,type) \ #define __pv_stub(from,to,instr,type) \
__asm__("@ __pv_stub\n" \ __asm__("@ __pv_stub\n" \
...@@ -243,6 +256,7 @@ static inline unsigned long __phys_to_virt(phys_addr_t x) ...@@ -243,6 +256,7 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
#else #else
#define PHYS_OFFSET PLAT_PHYS_OFFSET #define PHYS_OFFSET PLAT_PHYS_OFFSET
#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
static inline phys_addr_t __virt_to_phys(unsigned long x) static inline phys_addr_t __virt_to_phys(unsigned long x)
{ {
...@@ -254,18 +268,11 @@ static inline unsigned long __phys_to_virt(phys_addr_t x) ...@@ -254,18 +268,11 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
return x - PHYS_OFFSET + PAGE_OFFSET; return x - PHYS_OFFSET + PAGE_OFFSET;
} }
#endif #define virt_to_pfn(kaddr) \
#endif ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \
PHYS_PFN_OFFSET)
/* #endif
* PFNs are used to describe any physical page; this means
* PFN 0 == physical address 0.
*
* This is the PFN of the first RAM page in the kernel
* direct-mapped view. We assume this is the first page
* of RAM in the mem_map as well.
*/
#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
/* /*
* These are *only* valid on the kernel direct mapped RAM memory. * These are *only* valid on the kernel direct mapped RAM memory.
...@@ -343,9 +350,9 @@ static inline __deprecated void *bus_to_virt(unsigned long x) ...@@ -343,9 +350,9 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
*/ */
#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET #define ARCH_PFN_OFFSET PHYS_PFN_OFFSET
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
#define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \ #define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \
&& pfn_valid(__pa(kaddr) >> PAGE_SHIFT) ) && pfn_valid(virt_to_pfn(kaddr)))
#endif #endif
......
...@@ -140,6 +140,7 @@ ...@@ -140,6 +140,7 @@
#define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 0x0c) << 2) /* 1100 */ #define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 0x0c) << 2) /* 1100 */
#define L_PTE_MT_DEV_WC (_AT(pteval_t, 0x09) << 2) /* 1001 */ #define L_PTE_MT_DEV_WC (_AT(pteval_t, 0x09) << 2) /* 1001 */
#define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */ #define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */
#define L_PTE_MT_VECTORS (_AT(pteval_t, 0x0f) << 2) /* 1111 */
#define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2) #define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
......
...@@ -216,13 +216,16 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd) ...@@ -216,13 +216,16 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
#define pte_none(pte) (!pte_val(pte)) #define pte_none(pte) (!pte_val(pte))
#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) #define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
#define pte_valid(pte) (pte_val(pte) & L_PTE_VALID)
#define pte_accessible(mm, pte) (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY)) #define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY))
#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) #define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) #define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN)) #define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN))
#define pte_special(pte) (0) #define pte_special(pte) (0)
#define pte_present_user(pte) (pte_present(pte) && (pte_val(pte) & L_PTE_USER)) #define pte_valid_user(pte) \
(pte_valid(pte) && (pte_val(pte) & L_PTE_USER) && pte_young(pte))
#if __LINUX_ARM_ARCH__ < 6 #if __LINUX_ARM_ARCH__ < 6
static inline void __sync_icache_dcache(pte_t pteval) static inline void __sync_icache_dcache(pte_t pteval)
...@@ -237,7 +240,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, ...@@ -237,7 +240,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
{ {
unsigned long ext = 0; unsigned long ext = 0;
if (addr < TASK_SIZE && pte_present_user(pteval)) { if (addr < TASK_SIZE && pte_valid_user(pteval)) {
__sync_icache_dcache(pteval); __sync_icache_dcache(pteval);
ext |= PTE_EXT_NG; ext |= PTE_EXT_NG;
} }
......
...@@ -2,7 +2,6 @@ ...@@ -2,7 +2,6 @@
#define __ASM_SYNC_BITOPS_H__ #define __ASM_SYNC_BITOPS_H__
#include <asm/bitops.h> #include <asm/bitops.h>
#include <asm/system.h>
/* sync_bitops functions are equivalent to the SMP implementation of the /* sync_bitops functions are equivalent to the SMP implementation of the
* original functions, independently from CONFIG_SMP being defined. * original functions, independently from CONFIG_SMP being defined.
......
/* FILE TO BE DELETED. DO NOT ADD STUFF HERE! */
#include <asm/barrier.h>
#include <asm/compiler.h>
#include <asm/cmpxchg.h>
#include <asm/switch_to.h>
#include <asm/system_info.h>
#include <asm/system_misc.h>
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#include <asm/unified.h> #include <asm/unified.h>
#include <asm/compiler.h> #include <asm/compiler.h>
#if __LINUX_ARM_ARCH__ < 6 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
#include <asm-generic/uaccess-unaligned.h> #include <asm-generic/uaccess-unaligned.h>
#else #else
#define __get_user_unaligned __get_user #define __get_user_unaligned __get_user
......
...@@ -28,4 +28,13 @@ ...@@ -28,4 +28,13 @@
#define HWCAP_LPAE (1 << 20) #define HWCAP_LPAE (1 << 20)
#define HWCAP_EVTSTRM (1 << 21) #define HWCAP_EVTSTRM (1 << 21)
/*
* HWCAP2 flags - for elf_hwcap2 (in kernel) and AT_HWCAP2
*/
#define HWCAP2_AES (1 << 0)
#define HWCAP2_PMULL (1 << 1)
#define HWCAP2_SHA1 (1 << 2)
#define HWCAP2_SHA2 (1 << 3)
#define HWCAP2_CRC32 (1 << 4)
#endif /* _UAPI__ASMARM_HWCAP_H */ #endif /* _UAPI__ASMARM_HWCAP_H */
...@@ -158,6 +158,6 @@ EXPORT_SYMBOL(__gnu_mcount_nc); ...@@ -158,6 +158,6 @@ EXPORT_SYMBOL(__gnu_mcount_nc);
#endif #endif
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT #ifdef CONFIG_ARM_PATCH_PHYS_VIRT
EXPORT_SYMBOL(__pv_phys_offset); EXPORT_SYMBOL(__pv_phys_pfn_offset);
EXPORT_SYMBOL(__pv_offset); EXPORT_SYMBOL(__pv_offset);
#endif #endif
...@@ -608,41 +608,10 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res, ...@@ -608,41 +608,10 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
*/ */
int pcibios_enable_device(struct pci_dev *dev, int mask) int pcibios_enable_device(struct pci_dev *dev, int mask)
{ {
u16 cmd, old_cmd; if (pci_has_flag(PCI_PROBE_ONLY))
int idx; return 0;
struct resource *r;
pci_read_config_word(dev, PCI_COMMAND, &cmd);
old_cmd = cmd;
for (idx = 0; idx < 6; idx++) {
/* Only set up the requested stuff */
if (!(mask & (1 << idx)))
continue;
r = dev->resource + idx;
if (!r->start && r->end) {
printk(KERN_ERR "PCI: Device %s not available because"
" of resource collisions\n", pci_name(dev));
return -EINVAL;
}
if (r->flags & IORESOURCE_IO)
cmd |= PCI_COMMAND_IO;
if (r->flags & IORESOURCE_MEM)
cmd |= PCI_COMMAND_MEMORY;
}
/* return pci_enable_resources(dev, mask);
* Bridges (eg, cardbus bridges) need to be fully enabled
*/
if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE)
cmd |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
if (cmd != old_cmd) {
printk("PCI: enabling device %s (%04x -> %04x)\n",
pci_name(dev), old_cmd, cmd);
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
return 0;
} }
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
......
...@@ -584,9 +584,10 @@ __fixup_pv_table: ...@@ -584,9 +584,10 @@ __fixup_pv_table:
subs r3, r0, r3 @ PHYS_OFFSET - PAGE_OFFSET subs r3, r0, r3 @ PHYS_OFFSET - PAGE_OFFSET
add r4, r4, r3 @ adjust table start address add r4, r4, r3 @ adjust table start address
add r5, r5, r3 @ adjust table end address add r5, r5, r3 @ adjust table end address
add r6, r6, r3 @ adjust __pv_phys_offset address add r6, r6, r3 @ adjust __pv_phys_pfn_offset address
add r7, r7, r3 @ adjust __pv_offset address add r7, r7, r3 @ adjust __pv_offset address
str r8, [r6, #LOW_OFFSET] @ save computed PHYS_OFFSET to __pv_phys_offset mov r0, r8, lsr #12 @ convert to PFN
str r0, [r6, #LOW_OFFSET] @ save computed PHYS_OFFSET to __pv_phys_pfn_offset
strcc ip, [r7, #HIGH_OFFSET] @ save to __pv_offset high bits strcc ip, [r7, #HIGH_OFFSET] @ save to __pv_offset high bits
mov r6, r3, lsr #24 @ constant for add/sub instructions mov r6, r3, lsr #24 @ constant for add/sub instructions
teq r3, r6, lsl #24 @ must be 16MiB aligned teq r3, r6, lsl #24 @ must be 16MiB aligned
...@@ -600,7 +601,7 @@ ENDPROC(__fixup_pv_table) ...@@ -600,7 +601,7 @@ ENDPROC(__fixup_pv_table)
1: .long . 1: .long .
.long __pv_table_begin .long __pv_table_begin
.long __pv_table_end .long __pv_table_end
2: .long __pv_phys_offset 2: .long __pv_phys_pfn_offset
.long __pv_offset .long __pv_offset
.text .text
...@@ -688,11 +689,11 @@ ENTRY(fixup_pv_table) ...@@ -688,11 +689,11 @@ ENTRY(fixup_pv_table)
ENDPROC(fixup_pv_table) ENDPROC(fixup_pv_table)
.data .data
.globl __pv_phys_offset .globl __pv_phys_pfn_offset
.type __pv_phys_offset, %object .type __pv_phys_pfn_offset, %object
__pv_phys_offset: __pv_phys_pfn_offset:
.quad 0 .word 0
.size __pv_phys_offset, . -__pv_phys_offset .size __pv_phys_pfn_offset, . -__pv_phys_pfn_offset
.globl __pv_offset .globl __pv_offset
.type __pv_offset, %object .type __pv_offset, %object
......
...@@ -167,7 +167,7 @@ static int debug_arch_supported(void) ...@@ -167,7 +167,7 @@ static int debug_arch_supported(void)
/* Can we determine the watchpoint access type from the fsr? */ /* Can we determine the watchpoint access type from the fsr? */
static int debug_exception_updates_fsr(void) static int debug_exception_updates_fsr(void)
{ {
return 0; return get_debug_arch() >= ARM_DEBUG_ARCH_V8;
} }
/* Determine number of WRP registers available. */ /* Determine number of WRP registers available. */
...@@ -257,6 +257,7 @@ static int enable_monitor_mode(void) ...@@ -257,6 +257,7 @@ static int enable_monitor_mode(void)
break; break;
case ARM_DEBUG_ARCH_V7_ECP14: case ARM_DEBUG_ARCH_V7_ECP14:
case ARM_DEBUG_ARCH_V7_1: case ARM_DEBUG_ARCH_V7_1:
case ARM_DEBUG_ARCH_V8:
ARM_DBG_WRITE(c0, c2, 2, (dscr | ARM_DSCR_MDBGEN)); ARM_DBG_WRITE(c0, c2, 2, (dscr | ARM_DSCR_MDBGEN));
isb(); isb();
break; break;
......
...@@ -100,6 +100,9 @@ EXPORT_SYMBOL(system_serial_high); ...@@ -100,6 +100,9 @@ EXPORT_SYMBOL(system_serial_high);
unsigned int elf_hwcap __read_mostly; unsigned int elf_hwcap __read_mostly;
EXPORT_SYMBOL(elf_hwcap); EXPORT_SYMBOL(elf_hwcap);
unsigned int elf_hwcap2 __read_mostly;
EXPORT_SYMBOL(elf_hwcap2);
#ifdef MULTI_CPU #ifdef MULTI_CPU
struct processor processor __read_mostly; struct processor processor __read_mostly;
...@@ -1005,6 +1008,15 @@ static const char *hwcap_str[] = { ...@@ -1005,6 +1008,15 @@ static const char *hwcap_str[] = {
NULL NULL
}; };
static const char *hwcap2_str[] = {
"aes",
"pmull",
"sha1",
"sha2",
"crc32",
NULL
};
static int c_show(struct seq_file *m, void *v) static int c_show(struct seq_file *m, void *v)
{ {
int i, j; int i, j;
...@@ -1028,6 +1040,10 @@ static int c_show(struct seq_file *m, void *v) ...@@ -1028,6 +1040,10 @@ static int c_show(struct seq_file *m, void *v)
if (elf_hwcap & (1 << j)) if (elf_hwcap & (1 << j))
seq_printf(m, "%s ", hwcap_str[j]); seq_printf(m, "%s ", hwcap_str[j]);
for (j = 0; hwcap2_str[j]; j++)
if (elf_hwcap2 & (1 << j))
seq_printf(m, "%s ", hwcap2_str[j]);
seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24); seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
seq_printf(m, "CPU architecture: %s\n", seq_printf(m, "CPU architecture: %s\n",
proc_arch[cpu_architecture()]); proc_arch[cpu_architecture()]);
......
...@@ -68,6 +68,12 @@ EXPORT_SYMBOL(__aeabi_unwind_cpp_pr2); ...@@ -68,6 +68,12 @@ EXPORT_SYMBOL(__aeabi_unwind_cpp_pr2);
struct unwind_ctrl_block { struct unwind_ctrl_block {
unsigned long vrs[16]; /* virtual register set */ unsigned long vrs[16]; /* virtual register set */
const unsigned long *insn; /* pointer to the current instructions word */ const unsigned long *insn; /* pointer to the current instructions word */
unsigned long sp_high; /* highest value of sp allowed */
/*
* 1 : check for stack overflow for each register pop.
* 0 : save overhead if there is plenty of stack remaining.
*/
int check_each_pop;
int entries; /* number of entries left to interpret */ int entries; /* number of entries left to interpret */
int byte; /* current byte number in the instructions word */ int byte; /* current byte number in the instructions word */
}; };
...@@ -235,12 +241,85 @@ static unsigned long unwind_get_byte(struct unwind_ctrl_block *ctrl) ...@@ -235,12 +241,85 @@ static unsigned long unwind_get_byte(struct unwind_ctrl_block *ctrl)
return ret; return ret;
} }
/* Before poping a register check whether it is feasible or not */
static int unwind_pop_register(struct unwind_ctrl_block *ctrl,
unsigned long **vsp, unsigned int reg)
{
if (unlikely(ctrl->check_each_pop))
if (*vsp >= (unsigned long *)ctrl->sp_high)
return -URC_FAILURE;
ctrl->vrs[reg] = *(*vsp)++;
return URC_OK;
}
/* Helper functions to execute the instructions */
static int unwind_exec_pop_subset_r4_to_r13(struct unwind_ctrl_block *ctrl,
unsigned long mask)
{
unsigned long *vsp = (unsigned long *)ctrl->vrs[SP];
int load_sp, reg = 4;
load_sp = mask & (1 << (13 - 4));
while (mask) {
if (mask & 1)
if (unwind_pop_register(ctrl, &vsp, reg))
return -URC_FAILURE;
mask >>= 1;
reg++;
}
if (!load_sp)
ctrl->vrs[SP] = (unsigned long)vsp;
return URC_OK;
}
static int unwind_exec_pop_r4_to_rN(struct unwind_ctrl_block *ctrl,
unsigned long insn)
{
unsigned long *vsp = (unsigned long *)ctrl->vrs[SP];
int reg;
/* pop R4-R[4+bbb] */
for (reg = 4; reg <= 4 + (insn & 7); reg++)
if (unwind_pop_register(ctrl, &vsp, reg))
return -URC_FAILURE;
if (insn & 0x80)
if (unwind_pop_register(ctrl, &vsp, 14))
return -URC_FAILURE;
ctrl->vrs[SP] = (unsigned long)vsp;
return URC_OK;
}
static int unwind_exec_pop_subset_r0_to_r3(struct unwind_ctrl_block *ctrl,
unsigned long mask)
{
unsigned long *vsp = (unsigned long *)ctrl->vrs[SP];
int reg = 0;
/* pop R0-R3 according to mask */
while (mask) {
if (mask & 1)
if (unwind_pop_register(ctrl, &vsp, reg))
return -URC_FAILURE;
mask >>= 1;
reg++;
}
ctrl->vrs[SP] = (unsigned long)vsp;
return URC_OK;
}
/* /*
* Execute the current unwind instruction. * Execute the current unwind instruction.
*/ */
static int unwind_exec_insn(struct unwind_ctrl_block *ctrl) static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
{ {
unsigned long insn = unwind_get_byte(ctrl); unsigned long insn = unwind_get_byte(ctrl);
int ret = URC_OK;
pr_debug("%s: insn = %08lx\n", __func__, insn); pr_debug("%s: insn = %08lx\n", __func__, insn);
...@@ -250,8 +329,6 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl) ...@@ -250,8 +329,6 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
ctrl->vrs[SP] -= ((insn & 0x3f) << 2) + 4; ctrl->vrs[SP] -= ((insn & 0x3f) << 2) + 4;
else if ((insn & 0xf0) == 0x80) { else if ((insn & 0xf0) == 0x80) {
unsigned long mask; unsigned long mask;
unsigned long *vsp = (unsigned long *)ctrl->vrs[SP];
int load_sp, reg = 4;
insn = (insn << 8) | unwind_get_byte(ctrl); insn = (insn << 8) | unwind_get_byte(ctrl);
mask = insn & 0x0fff; mask = insn & 0x0fff;
...@@ -261,29 +338,16 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl) ...@@ -261,29 +338,16 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
return -URC_FAILURE; return -URC_FAILURE;
} }
/* pop R4-R15 according to mask */ ret = unwind_exec_pop_subset_r4_to_r13(ctrl, mask);
load_sp = mask & (1 << (13 - 4)); if (ret)
while (mask) { goto error;
if (mask & 1)
ctrl->vrs[reg] = *vsp++;
mask >>= 1;
reg++;
}
if (!load_sp)
ctrl->vrs[SP] = (unsigned long)vsp;
} else if ((insn & 0xf0) == 0x90 && } else if ((insn & 0xf0) == 0x90 &&
(insn & 0x0d) != 0x0d) (insn & 0x0d) != 0x0d)
ctrl->vrs[SP] = ctrl->vrs[insn & 0x0f]; ctrl->vrs[SP] = ctrl->vrs[insn & 0x0f];
else if ((insn & 0xf0) == 0xa0) { else if ((insn & 0xf0) == 0xa0) {
unsigned long *vsp = (unsigned long *)ctrl->vrs[SP]; ret = unwind_exec_pop_r4_to_rN(ctrl, insn);
int reg; if (ret)
goto error;
/* pop R4-R[4+bbb] */
for (reg = 4; reg <= 4 + (insn & 7); reg++)
ctrl->vrs[reg] = *vsp++;
if (insn & 0x80)
ctrl->vrs[14] = *vsp++;
ctrl->vrs[SP] = (unsigned long)vsp;
} else if (insn == 0xb0) { } else if (insn == 0xb0) {
if (ctrl->vrs[PC] == 0) if (ctrl->vrs[PC] == 0)
ctrl->vrs[PC] = ctrl->vrs[LR]; ctrl->vrs[PC] = ctrl->vrs[LR];
...@@ -291,8 +355,6 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl) ...@@ -291,8 +355,6 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
ctrl->entries = 0; ctrl->entries = 0;
} else if (insn == 0xb1) { } else if (insn == 0xb1) {
unsigned long mask = unwind_get_byte(ctrl); unsigned long mask = unwind_get_byte(ctrl);
unsigned long *vsp = (unsigned long *)ctrl->vrs[SP];
int reg = 0;
if (mask == 0 || mask & 0xf0) { if (mask == 0 || mask & 0xf0) {
pr_warning("unwind: Spare encoding %04lx\n", pr_warning("unwind: Spare encoding %04lx\n",
...@@ -300,14 +362,9 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl) ...@@ -300,14 +362,9 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
return -URC_FAILURE; return -URC_FAILURE;
} }
/* pop R0-R3 according to mask */ ret = unwind_exec_pop_subset_r0_to_r3(ctrl, mask);
while (mask) { if (ret)
if (mask & 1) goto error;
ctrl->vrs[reg] = *vsp++;
mask >>= 1;
reg++;
}
ctrl->vrs[SP] = (unsigned long)vsp;
} else if (insn == 0xb2) { } else if (insn == 0xb2) {
unsigned long uleb128 = unwind_get_byte(ctrl); unsigned long uleb128 = unwind_get_byte(ctrl);
...@@ -320,7 +377,8 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl) ...@@ -320,7 +377,8 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
pr_debug("%s: fp = %08lx sp = %08lx lr = %08lx pc = %08lx\n", __func__, pr_debug("%s: fp = %08lx sp = %08lx lr = %08lx pc = %08lx\n", __func__,
ctrl->vrs[FP], ctrl->vrs[SP], ctrl->vrs[LR], ctrl->vrs[PC]); ctrl->vrs[FP], ctrl->vrs[SP], ctrl->vrs[LR], ctrl->vrs[PC]);
return URC_OK; error:
return ret;
} }
/* /*
...@@ -329,13 +387,13 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl) ...@@ -329,13 +387,13 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
*/ */
int unwind_frame(struct stackframe *frame) int unwind_frame(struct stackframe *frame)
{ {
unsigned long high, low; unsigned long low;
const struct unwind_idx *idx; const struct unwind_idx *idx;
struct unwind_ctrl_block ctrl; struct unwind_ctrl_block ctrl;
/* only go to a higher address on the stack */ /* store the highest address on the stack to avoid crossing it*/
low = frame->sp; low = frame->sp;
high = ALIGN(low, THREAD_SIZE); ctrl.sp_high = ALIGN(low, THREAD_SIZE);
pr_debug("%s(pc = %08lx lr = %08lx sp = %08lx)\n", __func__, pr_debug("%s(pc = %08lx lr = %08lx sp = %08lx)\n", __func__,
frame->pc, frame->lr, frame->sp); frame->pc, frame->lr, frame->sp);
...@@ -382,11 +440,16 @@ int unwind_frame(struct stackframe *frame) ...@@ -382,11 +440,16 @@ int unwind_frame(struct stackframe *frame)
return -URC_FAILURE; return -URC_FAILURE;
} }
ctrl.check_each_pop = 0;
while (ctrl.entries > 0) { while (ctrl.entries > 0) {
int urc = unwind_exec_insn(&ctrl); int urc;
if ((ctrl.sp_high - ctrl.vrs[SP]) < sizeof(ctrl.vrs))
ctrl.check_each_pop = 1;
urc = unwind_exec_insn(&ctrl);
if (urc < 0) if (urc < 0)
return urc; return urc;
if (ctrl.vrs[SP] < low || ctrl.vrs[SP] >= high) if (ctrl.vrs[SP] < low || ctrl.vrs[SP] >= ctrl.sp_high)
return -URC_FAILURE; return -URC_FAILURE;
} }
......
...@@ -37,6 +37,11 @@ UNWIND( .fnstart ) ...@@ -37,6 +37,11 @@ UNWIND( .fnstart )
add r1, r1, r0, lsl #2 @ Get word offset add r1, r1, r0, lsl #2 @ Get word offset
mov r3, r2, lsl r3 @ create mask mov r3, r2, lsl r3 @ create mask
smp_dmb smp_dmb
#if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
.arch_extension mp
ALT_SMP(W(pldw) [r1])
ALT_UP(W(nop))
#endif
1: ldrex r2, [r1] 1: ldrex r2, [r1]
ands r0, r2, r3 @ save old value of bit ands r0, r2, r3 @ save old value of bit
\instr r2, r2, r3 @ toggle bit \instr r2, r2, r3 @ toggle bit
......
...@@ -197,24 +197,24 @@ ...@@ -197,24 +197,24 @@
12: PLD( pld [r1, #124] ) 12: PLD( pld [r1, #124] )
13: ldr4w r1, r4, r5, r6, r7, abort=19f 13: ldr4w r1, r4, r5, r6, r7, abort=19f
mov r3, lr, pull #\pull mov r3, lr, lspull #\pull
subs r2, r2, #32 subs r2, r2, #32
ldr4w r1, r8, r9, ip, lr, abort=19f ldr4w r1, r8, r9, ip, lr, abort=19f
orr r3, r3, r4, push #\push orr r3, r3, r4, lspush #\push
mov r4, r4, pull #\pull mov r4, r4, lspull #\pull
orr r4, r4, r5, push #\push orr r4, r4, r5, lspush #\push
mov r5, r5, pull #\pull mov r5, r5, lspull #\pull
orr r5, r5, r6, push #\push orr r5, r5, r6, lspush #\push
mov r6, r6, pull #\pull mov r6, r6, lspull #\pull
orr r6, r6, r7, push #\push orr r6, r6, r7, lspush #\push
mov r7, r7, pull #\pull mov r7, r7, lspull #\pull
orr r7, r7, r8, push #\push orr r7, r7, r8, lspush #\push
mov r8, r8, pull #\pull mov r8, r8, lspull #\pull
orr r8, r8, r9, push #\push orr r8, r8, r9, lspush #\push
mov r9, r9, pull #\pull mov r9, r9, lspull #\pull
orr r9, r9, ip, push #\push orr r9, r9, ip, lspush #\push
mov ip, ip, pull #\pull mov ip, ip, lspull #\pull
orr ip, ip, lr, push #\push orr ip, ip, lr, lspush #\push
str8w r0, r3, r4, r5, r6, r7, r8, r9, ip, , abort=19f str8w r0, r3, r4, r5, r6, r7, r8, r9, ip, , abort=19f
bge 12b bge 12b
PLD( cmn r2, #96 ) PLD( cmn r2, #96 )
...@@ -225,10 +225,10 @@ ...@@ -225,10 +225,10 @@
14: ands ip, r2, #28 14: ands ip, r2, #28
beq 16f beq 16f
15: mov r3, lr, pull #\pull 15: mov r3, lr, lspull #\pull
ldr1w r1, lr, abort=21f ldr1w r1, lr, abort=21f
subs ip, ip, #4 subs ip, ip, #4
orr r3, r3, lr, push #\push orr r3, r3, lr, lspush #\push
str1w r0, r3, abort=21f str1w r0, r3, abort=21f
bgt 15b bgt 15b
CALGN( cmp r2, #0 ) CALGN( cmp r2, #0 )
......
...@@ -141,7 +141,7 @@ FN_ENTRY ...@@ -141,7 +141,7 @@ FN_ENTRY
tst len, #2 tst len, #2
mov r5, r4, get_byte_0 mov r5, r4, get_byte_0
beq .Lexit beq .Lexit
adcs sum, sum, r4, push #16 adcs sum, sum, r4, lspush #16
strb r5, [dst], #1 strb r5, [dst], #1
mov r5, r4, get_byte_1 mov r5, r4, get_byte_1
strb r5, [dst], #1 strb r5, [dst], #1
...@@ -171,23 +171,23 @@ FN_ENTRY ...@@ -171,23 +171,23 @@ FN_ENTRY
cmp ip, #2 cmp ip, #2
beq .Lsrc2_aligned beq .Lsrc2_aligned
bhi .Lsrc3_aligned bhi .Lsrc3_aligned
mov r4, r5, pull #8 @ C = 0 mov r4, r5, lspull #8 @ C = 0
bics ip, len, #15 bics ip, len, #15
beq 2f beq 2f
1: load4l r5, r6, r7, r8 1: load4l r5, r6, r7, r8
orr r4, r4, r5, push #24 orr r4, r4, r5, lspush #24
mov r5, r5, pull #8 mov r5, r5, lspull #8
orr r5, r5, r6, push #24 orr r5, r5, r6, lspush #24
mov r6, r6, pull #8 mov r6, r6, lspull #8
orr r6, r6, r7, push #24 orr r6, r6, r7, lspush #24
mov r7, r7, pull #8 mov r7, r7, lspull #8
orr r7, r7, r8, push #24 orr r7, r7, r8, lspush #24
stmia dst!, {r4, r5, r6, r7} stmia dst!, {r4, r5, r6, r7}
adcs sum, sum, r4 adcs sum, sum, r4
adcs sum, sum, r5 adcs sum, sum, r5
adcs sum, sum, r6 adcs sum, sum, r6
adcs sum, sum, r7 adcs sum, sum, r7
mov r4, r8, pull #8 mov r4, r8, lspull #8
sub ip, ip, #16 sub ip, ip, #16
teq ip, #0 teq ip, #0
bne 1b bne 1b
...@@ -196,50 +196,50 @@ FN_ENTRY ...@@ -196,50 +196,50 @@ FN_ENTRY
tst ip, #8 tst ip, #8
beq 3f beq 3f
load2l r5, r6 load2l r5, r6
orr r4, r4, r5, push #24 orr r4, r4, r5, lspush #24
mov r5, r5, pull #8 mov r5, r5, lspull #8
orr r5, r5, r6, push #24 orr r5, r5, r6, lspush #24
stmia dst!, {r4, r5} stmia dst!, {r4, r5}
adcs sum, sum, r4 adcs sum, sum, r4
adcs sum, sum, r5 adcs sum, sum, r5
mov r4, r6, pull #8 mov r4, r6, lspull #8
tst ip, #4 tst ip, #4
beq 4f beq 4f
3: load1l r5 3: load1l r5
orr r4, r4, r5, push #24 orr r4, r4, r5, lspush #24
str r4, [dst], #4 str r4, [dst], #4
adcs sum, sum, r4 adcs sum, sum, r4
mov r4, r5, pull #8 mov r4, r5, lspull #8
4: ands len, len, #3 4: ands len, len, #3
beq .Ldone beq .Ldone
mov r5, r4, get_byte_0 mov r5, r4, get_byte_0
tst len, #2 tst len, #2
beq .Lexit beq .Lexit
adcs sum, sum, r4, push #16 adcs sum, sum, r4, lspush #16
strb r5, [dst], #1 strb r5, [dst], #1
mov r5, r4, get_byte_1 mov r5, r4, get_byte_1
strb r5, [dst], #1 strb r5, [dst], #1
mov r5, r4, get_byte_2 mov r5, r4, get_byte_2
b .Lexit b .Lexit
.Lsrc2_aligned: mov r4, r5, pull #16 .Lsrc2_aligned: mov r4, r5, lspull #16
adds sum, sum, #0 adds sum, sum, #0
bics ip, len, #15 bics ip, len, #15
beq 2f beq 2f
1: load4l r5, r6, r7, r8 1: load4l r5, r6, r7, r8
orr r4, r4, r5, push #16 orr r4, r4, r5, lspush #16
mov r5, r5, pull #16 mov r5, r5, lspull #16
orr r5, r5, r6, push #16 orr r5, r5, r6, lspush #16
mov r6, r6, pull #16 mov r6, r6, lspull #16
orr r6, r6, r7, push #16 orr r6, r6, r7, lspush #16
mov r7, r7, pull #16 mov r7, r7, lspull #16
orr r7, r7, r8, push #16 orr r7, r7, r8, lspush #16
stmia dst!, {r4, r5, r6, r7} stmia dst!, {r4, r5, r6, r7}
adcs sum, sum, r4 adcs sum, sum, r4
adcs sum, sum, r5 adcs sum, sum, r5
adcs sum, sum, r6 adcs sum, sum, r6
adcs sum, sum, r7 adcs sum, sum, r7
mov r4, r8, pull #16 mov r4, r8, lspull #16
sub ip, ip, #16 sub ip, ip, #16
teq ip, #0 teq ip, #0
bne 1b bne 1b
...@@ -248,20 +248,20 @@ FN_ENTRY ...@@ -248,20 +248,20 @@ FN_ENTRY
tst ip, #8 tst ip, #8
beq 3f beq 3f
load2l r5, r6 load2l r5, r6
orr r4, r4, r5, push #16 orr r4, r4, r5, lspush #16
mov r5, r5, pull #16 mov r5, r5, lspull #16
orr r5, r5, r6, push #16 orr r5, r5, r6, lspush #16
stmia dst!, {r4, r5} stmia dst!, {r4, r5}
adcs sum, sum, r4 adcs sum, sum, r4
adcs sum, sum, r5 adcs sum, sum, r5
mov r4, r6, pull #16 mov r4, r6, lspull #16
tst ip, #4 tst ip, #4
beq 4f beq 4f
3: load1l r5 3: load1l r5
orr r4, r4, r5, push #16 orr r4, r4, r5, lspush #16
str r4, [dst], #4 str r4, [dst], #4
adcs sum, sum, r4 adcs sum, sum, r4
mov r4, r5, pull #16 mov r4, r5, lspull #16
4: ands len, len, #3 4: ands len, len, #3
beq .Ldone beq .Ldone
mov r5, r4, get_byte_0 mov r5, r4, get_byte_0
...@@ -276,24 +276,24 @@ FN_ENTRY ...@@ -276,24 +276,24 @@ FN_ENTRY
load1b r5 load1b r5
b .Lexit b .Lexit
.Lsrc3_aligned: mov r4, r5, pull #24 .Lsrc3_aligned: mov r4, r5, lspull #24
adds sum, sum, #0 adds sum, sum, #0
bics ip, len, #15 bics ip, len, #15
beq 2f beq 2f
1: load4l r5, r6, r7, r8 1: load4l r5, r6, r7, r8
orr r4, r4, r5, push #8 orr r4, r4, r5, lspush #8
mov r5, r5, pull #24 mov r5, r5, lspull #24
orr r5, r5, r6, push #8 orr r5, r5, r6, lspush #8
mov r6, r6, pull #24 mov r6, r6, lspull #24
orr r6, r6, r7, push #8 orr r6, r6, r7, lspush #8
mov r7, r7, pull #24 mov r7, r7, lspull #24
orr r7, r7, r8, push #8 orr r7, r7, r8, lspush #8
stmia dst!, {r4, r5, r6, r7} stmia dst!, {r4, r5, r6, r7}
adcs sum, sum, r4 adcs sum, sum, r4
adcs sum, sum, r5 adcs sum, sum, r5
adcs sum, sum, r6 adcs sum, sum, r6
adcs sum, sum, r7 adcs sum, sum, r7
mov r4, r8, pull #24 mov r4, r8, lspull #24
sub ip, ip, #16 sub ip, ip, #16
teq ip, #0 teq ip, #0
bne 1b bne 1b
...@@ -302,20 +302,20 @@ FN_ENTRY ...@@ -302,20 +302,20 @@ FN_ENTRY
tst ip, #8 tst ip, #8
beq 3f beq 3f
load2l r5, r6 load2l r5, r6
orr r4, r4, r5, push #8 orr r4, r4, r5, lspush #8
mov r5, r5, pull #24 mov r5, r5, lspull #24
orr r5, r5, r6, push #8 orr r5, r5, r6, lspush #8
stmia dst!, {r4, r5} stmia dst!, {r4, r5}
adcs sum, sum, r4 adcs sum, sum, r4
adcs sum, sum, r5 adcs sum, sum, r5
mov r4, r6, pull #24 mov r4, r6, lspull #24
tst ip, #4 tst ip, #4
beq 4f beq 4f
3: load1l r5 3: load1l r5
orr r4, r4, r5, push #8 orr r4, r4, r5, lspush #8
str r4, [dst], #4 str r4, [dst], #4
adcs sum, sum, r4 adcs sum, sum, r4
mov r4, r5, pull #24 mov r4, r5, lspull #24
4: ands len, len, #3 4: ands len, len, #3
beq .Ldone beq .Ldone
mov r5, r4, get_byte_0 mov r5, r4, get_byte_0
...@@ -326,7 +326,7 @@ FN_ENTRY ...@@ -326,7 +326,7 @@ FN_ENTRY
load1l r4 load1l r4
mov r5, r4, get_byte_0 mov r5, r4, get_byte_0
strb r5, [dst], #1 strb r5, [dst], #1
adcs sum, sum, r4, push #24 adcs sum, sum, r4, lspush #24
mov r5, r4, get_byte_1 mov r5, r4, get_byte_1
b .Lexit b .Lexit
FN_EXIT FN_EXIT
...@@ -47,25 +47,25 @@ ENTRY(__raw_readsl) ...@@ -47,25 +47,25 @@ ENTRY(__raw_readsl)
strb ip, [r1], #1 strb ip, [r1], #1
4: subs r2, r2, #1 4: subs r2, r2, #1
mov ip, r3, pull #24 mov ip, r3, lspull #24
ldrne r3, [r0] ldrne r3, [r0]
orrne ip, ip, r3, push #8 orrne ip, ip, r3, lspush #8
strne ip, [r1], #4 strne ip, [r1], #4
bne 4b bne 4b
b 8f b 8f
5: subs r2, r2, #1 5: subs r2, r2, #1
mov ip, r3, pull #16 mov ip, r3, lspull #16
ldrne r3, [r0] ldrne r3, [r0]
orrne ip, ip, r3, push #16 orrne ip, ip, r3, lspush #16
strne ip, [r1], #4 strne ip, [r1], #4
bne 5b bne 5b
b 7f b 7f
6: subs r2, r2, #1 6: subs r2, r2, #1
mov ip, r3, pull #8 mov ip, r3, lspull #8
ldrne r3, [r0] ldrne r3, [r0]
orrne ip, ip, r3, push #24 orrne ip, ip, r3, lspush #24
strne ip, [r1], #4 strne ip, [r1], #4
bne 6b bne 6b
......
...@@ -41,26 +41,26 @@ ENTRY(__raw_writesl) ...@@ -41,26 +41,26 @@ ENTRY(__raw_writesl)
blt 5f blt 5f
bgt 6f bgt 6f
4: mov ip, r3, pull #16 4: mov ip, r3, lspull #16
ldr r3, [r1], #4 ldr r3, [r1], #4
subs r2, r2, #1 subs r2, r2, #1
orr ip, ip, r3, push #16 orr ip, ip, r3, lspush #16
str ip, [r0] str ip, [r0]
bne 4b bne 4b
mov pc, lr mov pc, lr
5: mov ip, r3, pull #8 5: mov ip, r3, lspull #8
ldr r3, [r1], #4 ldr r3, [r1], #4
subs r2, r2, #1 subs r2, r2, #1
orr ip, ip, r3, push #24 orr ip, ip, r3, lspush #24
str ip, [r0] str ip, [r0]
bne 5b bne 5b
mov pc, lr mov pc, lr
6: mov ip, r3, pull #24 6: mov ip, r3, lspull #24
ldr r3, [r1], #4 ldr r3, [r1], #4
subs r2, r2, #1 subs r2, r2, #1
orr ip, ip, r3, push #8 orr ip, ip, r3, lspush #8
str ip, [r0] str ip, [r0]
bne 6b bne 6b
mov pc, lr mov pc, lr
......
...@@ -147,24 +147,24 @@ ENTRY(memmove) ...@@ -147,24 +147,24 @@ ENTRY(memmove)
12: PLD( pld [r1, #-128] ) 12: PLD( pld [r1, #-128] )
13: ldmdb r1!, {r7, r8, r9, ip} 13: ldmdb r1!, {r7, r8, r9, ip}
mov lr, r3, push #\push mov lr, r3, lspush #\push
subs r2, r2, #32 subs r2, r2, #32
ldmdb r1!, {r3, r4, r5, r6} ldmdb r1!, {r3, r4, r5, r6}
orr lr, lr, ip, pull #\pull orr lr, lr, ip, lspull #\pull
mov ip, ip, push #\push mov ip, ip, lspush #\push
orr ip, ip, r9, pull #\pull orr ip, ip, r9, lspull #\pull
mov r9, r9, push #\push mov r9, r9, lspush #\push
orr r9, r9, r8, pull #\pull orr r9, r9, r8, lspull #\pull
mov r8, r8, push #\push mov r8, r8, lspush #\push
orr r8, r8, r7, pull #\pull orr r8, r8, r7, lspull #\pull
mov r7, r7, push #\push mov r7, r7, lspush #\push
orr r7, r7, r6, pull #\pull orr r7, r7, r6, lspull #\pull
mov r6, r6, push #\push mov r6, r6, lspush #\push
orr r6, r6, r5, pull #\pull orr r6, r6, r5, lspull #\pull
mov r5, r5, push #\push mov r5, r5, lspush #\push
orr r5, r5, r4, pull #\pull orr r5, r5, r4, lspull #\pull
mov r4, r4, push #\push mov r4, r4, lspush #\push
orr r4, r4, r3, pull #\pull orr r4, r4, r3, lspull #\pull
stmdb r0!, {r4 - r9, ip, lr} stmdb r0!, {r4 - r9, ip, lr}
bge 12b bge 12b
PLD( cmn r2, #96 ) PLD( cmn r2, #96 )
...@@ -175,10 +175,10 @@ ENTRY(memmove) ...@@ -175,10 +175,10 @@ ENTRY(memmove)
14: ands ip, r2, #28 14: ands ip, r2, #28
beq 16f beq 16f
15: mov lr, r3, push #\push 15: mov lr, r3, lspush #\push
ldr r3, [r1, #-4]! ldr r3, [r1, #-4]!
subs ip, ip, #4 subs ip, ip, #4
orr lr, lr, r3, pull #\pull orr lr, lr, r3, lspull #\pull
str lr, [r0, #-4]! str lr, [r0, #-4]!
bgt 15b bgt 15b
CALGN( cmp r2, #0 ) CALGN( cmp r2, #0 )
......
...@@ -117,9 +117,9 @@ USER( TUSER( strgtb) r3, [r0], #1) @ May fault ...@@ -117,9 +117,9 @@ USER( TUSER( strgtb) r3, [r0], #1) @ May fault
.Lc2u_1fupi: subs r2, r2, #4 .Lc2u_1fupi: subs r2, r2, #4
addmi ip, r2, #4 addmi ip, r2, #4
bmi .Lc2u_1nowords bmi .Lc2u_1nowords
mov r3, r7, pull #8 mov r3, r7, lspull #8
ldr r7, [r1], #4 ldr r7, [r1], #4
orr r3, r3, r7, push #24 orr r3, r3, r7, lspush #24
USER( TUSER( str) r3, [r0], #4) @ May fault USER( TUSER( str) r3, [r0], #4) @ May fault
mov ip, r0, lsl #32 - PAGE_SHIFT mov ip, r0, lsl #32 - PAGE_SHIFT
rsb ip, ip, #0 rsb ip, ip, #0
...@@ -131,30 +131,30 @@ USER( TUSER( str) r3, [r0], #4) @ May fault ...@@ -131,30 +131,30 @@ USER( TUSER( str) r3, [r0], #4) @ May fault
subs ip, ip, #16 subs ip, ip, #16
blt .Lc2u_1rem8lp blt .Lc2u_1rem8lp
.Lc2u_1cpy8lp: mov r3, r7, pull #8 .Lc2u_1cpy8lp: mov r3, r7, lspull #8
ldmia r1!, {r4 - r7} ldmia r1!, {r4 - r7}
subs ip, ip, #16 subs ip, ip, #16
orr r3, r3, r4, push #24 orr r3, r3, r4, lspush #24
mov r4, r4, pull #8 mov r4, r4, lspull #8
orr r4, r4, r5, push #24 orr r4, r4, r5, lspush #24
mov r5, r5, pull #8 mov r5, r5, lspull #8
orr r5, r5, r6, push #24 orr r5, r5, r6, lspush #24
mov r6, r6, pull #8 mov r6, r6, lspull #8
orr r6, r6, r7, push #24 orr r6, r6, r7, lspush #24
stmia r0!, {r3 - r6} @ Shouldnt fault stmia r0!, {r3 - r6} @ Shouldnt fault
bpl .Lc2u_1cpy8lp bpl .Lc2u_1cpy8lp
.Lc2u_1rem8lp: tst ip, #8 .Lc2u_1rem8lp: tst ip, #8
movne r3, r7, pull #8 movne r3, r7, lspull #8
ldmneia r1!, {r4, r7} ldmneia r1!, {r4, r7}
orrne r3, r3, r4, push #24 orrne r3, r3, r4, lspush #24
movne r4, r4, pull #8 movne r4, r4, lspull #8
orrne r4, r4, r7, push #24 orrne r4, r4, r7, lspush #24
stmneia r0!, {r3 - r4} @ Shouldnt fault stmneia r0!, {r3 - r4} @ Shouldnt fault
tst ip, #4 tst ip, #4
movne r3, r7, pull #8 movne r3, r7, lspull #8
ldrne r7, [r1], #4 ldrne r7, [r1], #4
orrne r3, r3, r7, push #24 orrne r3, r3, r7, lspush #24
TUSER( strne) r3, [r0], #4 @ Shouldnt fault TUSER( strne) r3, [r0], #4 @ Shouldnt fault
ands ip, ip, #3 ands ip, ip, #3
beq .Lc2u_1fupi beq .Lc2u_1fupi
...@@ -172,9 +172,9 @@ USER( TUSER( strgtb) r3, [r0], #1) @ May fault ...@@ -172,9 +172,9 @@ USER( TUSER( strgtb) r3, [r0], #1) @ May fault
.Lc2u_2fupi: subs r2, r2, #4 .Lc2u_2fupi: subs r2, r2, #4
addmi ip, r2, #4 addmi ip, r2, #4
bmi .Lc2u_2nowords bmi .Lc2u_2nowords
mov r3, r7, pull #16 mov r3, r7, lspull #16
ldr r7, [r1], #4 ldr r7, [r1], #4
orr r3, r3, r7, push #16 orr r3, r3, r7, lspush #16
USER( TUSER( str) r3, [r0], #4) @ May fault USER( TUSER( str) r3, [r0], #4) @ May fault
mov ip, r0, lsl #32 - PAGE_SHIFT mov ip, r0, lsl #32 - PAGE_SHIFT
rsb ip, ip, #0 rsb ip, ip, #0
...@@ -186,30 +186,30 @@ USER( TUSER( str) r3, [r0], #4) @ May fault ...@@ -186,30 +186,30 @@ USER( TUSER( str) r3, [r0], #4) @ May fault
subs ip, ip, #16 subs ip, ip, #16
blt .Lc2u_2rem8lp blt .Lc2u_2rem8lp
.Lc2u_2cpy8lp: mov r3, r7, pull #16 .Lc2u_2cpy8lp: mov r3, r7, lspull #16
ldmia r1!, {r4 - r7} ldmia r1!, {r4 - r7}
subs ip, ip, #16 subs ip, ip, #16
orr r3, r3, r4, push #16 orr r3, r3, r4, lspush #16
mov r4, r4, pull #16 mov r4, r4, lspull #16
orr r4, r4, r5, push #16 orr r4, r4, r5, lspush #16
mov r5, r5, pull #16 mov r5, r5, lspull #16
orr r5, r5, r6, push #16 orr r5, r5, r6, lspush #16
mov r6, r6, pull #16 mov r6, r6, lspull #16
orr r6, r6, r7, push #16 orr r6, r6, r7, lspush #16
stmia r0!, {r3 - r6} @ Shouldnt fault stmia r0!, {r3 - r6} @ Shouldnt fault
bpl .Lc2u_2cpy8lp bpl .Lc2u_2cpy8lp
.Lc2u_2rem8lp: tst ip, #8 .Lc2u_2rem8lp: tst ip, #8
movne r3, r7, pull #16 movne r3, r7, lspull #16
ldmneia r1!, {r4, r7} ldmneia r1!, {r4, r7}
orrne r3, r3, r4, push #16 orrne r3, r3, r4, lspush #16
movne r4, r4, pull #16 movne r4, r4, lspull #16
orrne r4, r4, r7, push #16 orrne r4, r4, r7, lspush #16
stmneia r0!, {r3 - r4} @ Shouldnt fault stmneia r0!, {r3 - r4} @ Shouldnt fault
tst ip, #4 tst ip, #4
movne r3, r7, pull #16 movne r3, r7, lspull #16
ldrne r7, [r1], #4 ldrne r7, [r1], #4
orrne r3, r3, r7, push #16 orrne r3, r3, r7, lspush #16
TUSER( strne) r3, [r0], #4 @ Shouldnt fault TUSER( strne) r3, [r0], #4 @ Shouldnt fault
ands ip, ip, #3 ands ip, ip, #3
beq .Lc2u_2fupi beq .Lc2u_2fupi
...@@ -227,9 +227,9 @@ USER( TUSER( strgtb) r3, [r0], #1) @ May fault ...@@ -227,9 +227,9 @@ USER( TUSER( strgtb) r3, [r0], #1) @ May fault
.Lc2u_3fupi: subs r2, r2, #4 .Lc2u_3fupi: subs r2, r2, #4
addmi ip, r2, #4 addmi ip, r2, #4
bmi .Lc2u_3nowords bmi .Lc2u_3nowords
mov r3, r7, pull #24 mov r3, r7, lspull #24
ldr r7, [r1], #4 ldr r7, [r1], #4
orr r3, r3, r7, push #8 orr r3, r3, r7, lspush #8
USER( TUSER( str) r3, [r0], #4) @ May fault USER( TUSER( str) r3, [r0], #4) @ May fault
mov ip, r0, lsl #32 - PAGE_SHIFT mov ip, r0, lsl #32 - PAGE_SHIFT
rsb ip, ip, #0 rsb ip, ip, #0
...@@ -241,30 +241,30 @@ USER( TUSER( str) r3, [r0], #4) @ May fault ...@@ -241,30 +241,30 @@ USER( TUSER( str) r3, [r0], #4) @ May fault
subs ip, ip, #16 subs ip, ip, #16
blt .Lc2u_3rem8lp blt .Lc2u_3rem8lp
.Lc2u_3cpy8lp: mov r3, r7, pull #24 .Lc2u_3cpy8lp: mov r3, r7, lspull #24
ldmia r1!, {r4 - r7} ldmia r1!, {r4 - r7}
subs ip, ip, #16 subs ip, ip, #16
orr r3, r3, r4, push #8 orr r3, r3, r4, lspush #8
mov r4, r4, pull #24 mov r4, r4, lspull #24
orr r4, r4, r5, push #8 orr r4, r4, r5, lspush #8
mov r5, r5, pull #24 mov r5, r5, lspull #24
orr r5, r5, r6, push #8 orr r5, r5, r6, lspush #8
mov r6, r6, pull #24 mov r6, r6, lspull #24
orr r6, r6, r7, push #8 orr r6, r6, r7, lspush #8
stmia r0!, {r3 - r6} @ Shouldnt fault stmia r0!, {r3 - r6} @ Shouldnt fault
bpl .Lc2u_3cpy8lp bpl .Lc2u_3cpy8lp
.Lc2u_3rem8lp: tst ip, #8 .Lc2u_3rem8lp: tst ip, #8
movne r3, r7, pull #24 movne r3, r7, lspull #24
ldmneia r1!, {r4, r7} ldmneia r1!, {r4, r7}
orrne r3, r3, r4, push #8 orrne r3, r3, r4, lspush #8
movne r4, r4, pull #24 movne r4, r4, lspull #24
orrne r4, r4, r7, push #8 orrne r4, r4, r7, lspush #8
stmneia r0!, {r3 - r4} @ Shouldnt fault stmneia r0!, {r3 - r4} @ Shouldnt fault
tst ip, #4 tst ip, #4
movne r3, r7, pull #24 movne r3, r7, lspull #24
ldrne r7, [r1], #4 ldrne r7, [r1], #4
orrne r3, r3, r7, push #8 orrne r3, r3, r7, lspush #8
TUSER( strne) r3, [r0], #4 @ Shouldnt fault TUSER( strne) r3, [r0], #4 @ Shouldnt fault
ands ip, ip, #3 ands ip, ip, #3
beq .Lc2u_3fupi beq .Lc2u_3fupi
...@@ -382,9 +382,9 @@ USER( TUSER( ldr) r7, [r1], #4) @ May fault ...@@ -382,9 +382,9 @@ USER( TUSER( ldr) r7, [r1], #4) @ May fault
.Lcfu_1fupi: subs r2, r2, #4 .Lcfu_1fupi: subs r2, r2, #4
addmi ip, r2, #4 addmi ip, r2, #4
bmi .Lcfu_1nowords bmi .Lcfu_1nowords
mov r3, r7, pull #8 mov r3, r7, lspull #8
USER( TUSER( ldr) r7, [r1], #4) @ May fault USER( TUSER( ldr) r7, [r1], #4) @ May fault
orr r3, r3, r7, push #24 orr r3, r3, r7, lspush #24
str r3, [r0], #4 str r3, [r0], #4
mov ip, r1, lsl #32 - PAGE_SHIFT mov ip, r1, lsl #32 - PAGE_SHIFT
rsb ip, ip, #0 rsb ip, ip, #0
...@@ -396,30 +396,30 @@ USER( TUSER( ldr) r7, [r1], #4) @ May fault ...@@ -396,30 +396,30 @@ USER( TUSER( ldr) r7, [r1], #4) @ May fault
subs ip, ip, #16 subs ip, ip, #16
blt .Lcfu_1rem8lp blt .Lcfu_1rem8lp
.Lcfu_1cpy8lp: mov r3, r7, pull #8 .Lcfu_1cpy8lp: mov r3, r7, lspull #8
ldmia r1!, {r4 - r7} @ Shouldnt fault ldmia r1!, {r4 - r7} @ Shouldnt fault
subs ip, ip, #16 subs ip, ip, #16
orr r3, r3, r4, push #24 orr r3, r3, r4, lspush #24
mov r4, r4, pull #8 mov r4, r4, lspull #8
orr r4, r4, r5, push #24 orr r4, r4, r5, lspush #24
mov r5, r5, pull #8 mov r5, r5, lspull #8
orr r5, r5, r6, push #24 orr r5, r5, r6, lspush #24
mov r6, r6, pull #8 mov r6, r6, lspull #8
orr r6, r6, r7, push #24 orr r6, r6, r7, lspush #24
stmia r0!, {r3 - r6} stmia r0!, {r3 - r6}
bpl .Lcfu_1cpy8lp bpl .Lcfu_1cpy8lp
.Lcfu_1rem8lp: tst ip, #8 .Lcfu_1rem8lp: tst ip, #8
movne r3, r7, pull #8 movne r3, r7, lspull #8
ldmneia r1!, {r4, r7} @ Shouldnt fault ldmneia r1!, {r4, r7} @ Shouldnt fault
orrne r3, r3, r4, push #24 orrne r3, r3, r4, lspush #24
movne r4, r4, pull #8 movne r4, r4, lspull #8
orrne r4, r4, r7, push #24 orrne r4, r4, r7, lspush #24
stmneia r0!, {r3 - r4} stmneia r0!, {r3 - r4}
tst ip, #4 tst ip, #4
movne r3, r7, pull #8 movne r3, r7, lspull #8
USER( TUSER( ldrne) r7, [r1], #4) @ May fault USER( TUSER( ldrne) r7, [r1], #4) @ May fault
orrne r3, r3, r7, push #24 orrne r3, r3, r7, lspush #24
strne r3, [r0], #4 strne r3, [r0], #4
ands ip, ip, #3 ands ip, ip, #3
beq .Lcfu_1fupi beq .Lcfu_1fupi
...@@ -437,9 +437,9 @@ USER( TUSER( ldrne) r7, [r1], #4) @ May fault ...@@ -437,9 +437,9 @@ USER( TUSER( ldrne) r7, [r1], #4) @ May fault
.Lcfu_2fupi: subs r2, r2, #4 .Lcfu_2fupi: subs r2, r2, #4
addmi ip, r2, #4 addmi ip, r2, #4
bmi .Lcfu_2nowords bmi .Lcfu_2nowords
mov r3, r7, pull #16 mov r3, r7, lspull #16
USER( TUSER( ldr) r7, [r1], #4) @ May fault USER( TUSER( ldr) r7, [r1], #4) @ May fault
orr r3, r3, r7, push #16 orr r3, r3, r7, lspush #16
str r3, [r0], #4 str r3, [r0], #4
mov ip, r1, lsl #32 - PAGE_SHIFT mov ip, r1, lsl #32 - PAGE_SHIFT
rsb ip, ip, #0 rsb ip, ip, #0
...@@ -452,30 +452,30 @@ USER( TUSER( ldr) r7, [r1], #4) @ May fault ...@@ -452,30 +452,30 @@ USER( TUSER( ldr) r7, [r1], #4) @ May fault
blt .Lcfu_2rem8lp blt .Lcfu_2rem8lp
.Lcfu_2cpy8lp: mov r3, r7, pull #16 .Lcfu_2cpy8lp: mov r3, r7, lspull #16
ldmia r1!, {r4 - r7} @ Shouldnt fault ldmia r1!, {r4 - r7} @ Shouldnt fault
subs ip, ip, #16 subs ip, ip, #16
orr r3, r3, r4, push #16 orr r3, r3, r4, lspush #16
mov r4, r4, pull #16 mov r4, r4, lspull #16
orr r4, r4, r5, push #16 orr r4, r4, r5, lspush #16
mov r5, r5, pull #16 mov r5, r5, lspull #16
orr r5, r5, r6, push #16 orr r5, r5, r6, lspush #16
mov r6, r6, pull #16 mov r6, r6, lspull #16
orr r6, r6, r7, push #16 orr r6, r6, r7, lspush #16
stmia r0!, {r3 - r6} stmia r0!, {r3 - r6}
bpl .Lcfu_2cpy8lp bpl .Lcfu_2cpy8lp
.Lcfu_2rem8lp: tst ip, #8 .Lcfu_2rem8lp: tst ip, #8
movne r3, r7, pull #16 movne r3, r7, lspull #16
ldmneia r1!, {r4, r7} @ Shouldnt fault ldmneia r1!, {r4, r7} @ Shouldnt fault
orrne r3, r3, r4, push #16 orrne r3, r3, r4, lspush #16
movne r4, r4, pull #16 movne r4, r4, lspull #16
orrne r4, r4, r7, push #16 orrne r4, r4, r7, lspush #16
stmneia r0!, {r3 - r4} stmneia r0!, {r3 - r4}
tst ip, #4 tst ip, #4
movne r3, r7, pull #16 movne r3, r7, lspull #16
USER( TUSER( ldrne) r7, [r1], #4) @ May fault USER( TUSER( ldrne) r7, [r1], #4) @ May fault
orrne r3, r3, r7, push #16 orrne r3, r3, r7, lspush #16
strne r3, [r0], #4 strne r3, [r0], #4
ands ip, ip, #3 ands ip, ip, #3
beq .Lcfu_2fupi beq .Lcfu_2fupi
...@@ -493,9 +493,9 @@ USER( TUSER( ldrgtb) r3, [r1], #0) @ May fault ...@@ -493,9 +493,9 @@ USER( TUSER( ldrgtb) r3, [r1], #0) @ May fault
.Lcfu_3fupi: subs r2, r2, #4 .Lcfu_3fupi: subs r2, r2, #4
addmi ip, r2, #4 addmi ip, r2, #4
bmi .Lcfu_3nowords bmi .Lcfu_3nowords
mov r3, r7, pull #24 mov r3, r7, lspull #24
USER( TUSER( ldr) r7, [r1], #4) @ May fault USER( TUSER( ldr) r7, [r1], #4) @ May fault
orr r3, r3, r7, push #8 orr r3, r3, r7, lspush #8
str r3, [r0], #4 str r3, [r0], #4
mov ip, r1, lsl #32 - PAGE_SHIFT mov ip, r1, lsl #32 - PAGE_SHIFT
rsb ip, ip, #0 rsb ip, ip, #0
...@@ -507,30 +507,30 @@ USER( TUSER( ldr) r7, [r1], #4) @ May fault ...@@ -507,30 +507,30 @@ USER( TUSER( ldr) r7, [r1], #4) @ May fault
subs ip, ip, #16 subs ip, ip, #16
blt .Lcfu_3rem8lp blt .Lcfu_3rem8lp
.Lcfu_3cpy8lp: mov r3, r7, pull #24 .Lcfu_3cpy8lp: mov r3, r7, lspull #24
ldmia r1!, {r4 - r7} @ Shouldnt fault ldmia r1!, {r4 - r7} @ Shouldnt fault
orr r3, r3, r4, push #8 orr r3, r3, r4, lspush #8
mov r4, r4, pull #24 mov r4, r4, lspull #24
orr r4, r4, r5, push #8 orr r4, r4, r5, lspush #8
mov r5, r5, pull #24 mov r5, r5, lspull #24
orr r5, r5, r6, push #8 orr r5, r5, r6, lspush #8
mov r6, r6, pull #24 mov r6, r6, lspull #24
orr r6, r6, r7, push #8 orr r6, r6, r7, lspush #8
stmia r0!, {r3 - r6} stmia r0!, {r3 - r6}
subs ip, ip, #16 subs ip, ip, #16
bpl .Lcfu_3cpy8lp bpl .Lcfu_3cpy8lp
.Lcfu_3rem8lp: tst ip, #8 .Lcfu_3rem8lp: tst ip, #8
movne r3, r7, pull #24 movne r3, r7, lspull #24
ldmneia r1!, {r4, r7} @ Shouldnt fault ldmneia r1!, {r4, r7} @ Shouldnt fault
orrne r3, r3, r4, push #8 orrne r3, r3, r4, lspush #8
movne r4, r4, pull #24 movne r4, r4, lspull #24
orrne r4, r4, r7, push #8 orrne r4, r4, r7, lspush #8
stmneia r0!, {r3 - r4} stmneia r0!, {r3 - r4}
tst ip, #4 tst ip, #4
movne r3, r7, pull #24 movne r3, r7, lspull #24
USER( TUSER( ldrne) r7, [r1], #4) @ May fault USER( TUSER( ldrne) r7, [r1], #4) @ May fault
orrne r3, r3, r7, push #8 orrne r3, r3, r7, lspush #8
strne r3, [r0], #4 strne r3, [r0], #4
ands ip, ip, #3 ands ip, ip, #3
beq .Lcfu_3fupi beq .Lcfu_3fupi
......
...@@ -155,7 +155,7 @@ static irqreturn_t cns3xxx_timer_interrupt(int irq, void *dev_id) ...@@ -155,7 +155,7 @@ static irqreturn_t cns3xxx_timer_interrupt(int irq, void *dev_id)
static struct irqaction cns3xxx_timer_irq = { static struct irqaction cns3xxx_timer_irq = {
.name = "timer", .name = "timer",
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, .flags = IRQF_TIMER | IRQF_IRQPOLL,
.handler = cns3xxx_timer_interrupt, .handler = cns3xxx_timer_interrupt,
}; };
......
...@@ -206,7 +206,7 @@ ebsa110_timer_interrupt(int irq, void *dev_id) ...@@ -206,7 +206,7 @@ ebsa110_timer_interrupt(int irq, void *dev_id)
static struct irqaction ebsa110_timer_irq = { static struct irqaction ebsa110_timer_irq = {
.name = "EBSA110 Timer Tick", .name = "EBSA110 Timer Tick",
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, .flags = IRQF_TIMER | IRQF_IRQPOLL,
.handler = ebsa110_timer_interrupt, .handler = ebsa110_timer_interrupt,
}; };
......
...@@ -105,7 +105,7 @@ static irqreturn_t timer1_interrupt(int irq, void *dev_id) ...@@ -105,7 +105,7 @@ static irqreturn_t timer1_interrupt(int irq, void *dev_id)
static struct irqaction footbridge_timer_irq = { static struct irqaction footbridge_timer_irq = {
.name = "dc21285_timer1", .name = "dc21285_timer1",
.handler = timer1_interrupt, .handler = timer1_interrupt,
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, .flags = IRQF_TIMER | IRQF_IRQPOLL,
.dev_id = &ckevt_dc21285, .dev_id = &ckevt_dc21285,
}; };
...@@ -125,7 +125,7 @@ void __init footbridge_timer_init(void) ...@@ -125,7 +125,7 @@ void __init footbridge_timer_init(void)
clockevents_config_and_register(ce, rate, 0x4, 0xffffff); clockevents_config_and_register(ce, rate, 0x4, 0xffffff);
} }
static u32 notrace footbridge_read_sched_clock(void) static u64 notrace footbridge_read_sched_clock(void)
{ {
return ~*CSR_TIMER3_VALUE; return ~*CSR_TIMER3_VALUE;
} }
...@@ -138,5 +138,5 @@ void __init footbridge_sched_clock(void) ...@@ -138,5 +138,5 @@ void __init footbridge_sched_clock(void)
*CSR_TIMER3_CLR = 0; *CSR_TIMER3_CLR = 0;
*CSR_TIMER3_CNTL = TIMER_CNTL_ENABLE | TIMER_CNTL_DIV16; *CSR_TIMER3_CNTL = TIMER_CNTL_ENABLE | TIMER_CNTL_DIV16;
setup_sched_clock(footbridge_read_sched_clock, 24, rate); sched_clock_register(footbridge_read_sched_clock, 24, rate);
} }
...@@ -334,15 +334,15 @@ void __init dc21285_preinit(void) ...@@ -334,15 +334,15 @@ void __init dc21285_preinit(void)
/* /*
* We don't care if these fail. * We don't care if these fail.
*/ */
dc21285_request_irq(IRQ_PCI_SERR, dc21285_serr_irq, IRQF_DISABLED, dc21285_request_irq(IRQ_PCI_SERR, dc21285_serr_irq, 0,
"PCI system error", &serr_timer); "PCI system error", &serr_timer);
dc21285_request_irq(IRQ_PCI_PERR, dc21285_parity_irq, IRQF_DISABLED, dc21285_request_irq(IRQ_PCI_PERR, dc21285_parity_irq, 0,
"PCI parity error", &perr_timer); "PCI parity error", &perr_timer);
dc21285_request_irq(IRQ_PCI_ABORT, dc21285_abort_irq, IRQF_DISABLED, dc21285_request_irq(IRQ_PCI_ABORT, dc21285_abort_irq, 0,
"PCI abort", NULL); "PCI abort", NULL);
dc21285_request_irq(IRQ_DISCARD_TIMER, dc21285_discard_irq, IRQF_DISABLED, dc21285_request_irq(IRQ_DISCARD_TIMER, dc21285_discard_irq, 0,
"Discard timer", NULL); "Discard timer", NULL);
dc21285_request_irq(IRQ_PCI_DPERR, dc21285_dparity_irq, IRQF_DISABLED, dc21285_request_irq(IRQ_PCI_DPERR, dc21285_dparity_irq, 0,
"PCI data parity", NULL); "PCI data parity", NULL);
if (cfn_mode) { if (cfn_mode) {
......
...@@ -27,7 +27,7 @@ static irqreturn_t pit_timer_interrupt(int irq, void *dev_id) ...@@ -27,7 +27,7 @@ static irqreturn_t pit_timer_interrupt(int irq, void *dev_id)
static struct irqaction pit_timer_irq = { static struct irqaction pit_timer_irq = {
.name = "pit", .name = "pit",
.handler = pit_timer_interrupt, .handler = pit_timer_interrupt,
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, .flags = IRQF_TIMER | IRQF_IRQPOLL,
.dev_id = &i8253_clockevent, .dev_id = &i8253_clockevent,
}; };
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
*/ */
#include <linux/init.h> #include <linux/init.h>
#include <asm/system.h> #include <asm/system_misc.h>
#include <asm/proc-fns.h> #include <asm/proc-fns.h>
static void gemini_idle(void) static void gemini_idle(void)
......
...@@ -358,7 +358,7 @@ static struct clock_event_device integrator_clockevent = { ...@@ -358,7 +358,7 @@ static struct clock_event_device integrator_clockevent = {
static struct irqaction integrator_timer_irq = { static struct irqaction integrator_timer_irq = {
.name = "timer", .name = "timer",
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, .flags = IRQF_TIMER | IRQF_IRQPOLL,
.handler = integrator_timer_interrupt, .handler = integrator_timer_interrupt,
.dev_id = &integrator_clockevent, .dev_id = &integrator_clockevent,
}; };
......
...@@ -312,7 +312,7 @@ static irqreturn_t ixp4xx_timer_interrupt(int irq, void *dev_id) ...@@ -312,7 +312,7 @@ static irqreturn_t ixp4xx_timer_interrupt(int irq, void *dev_id)
static struct irqaction ixp4xx_timer_irq = { static struct irqaction ixp4xx_timer_irq = {
.name = "timer1", .name = "timer1",
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, .flags = IRQF_TIMER | IRQF_IRQPOLL,
.handler = ixp4xx_timer_interrupt, .handler = ixp4xx_timer_interrupt,
.dev_id = &clockevent_ixp4xx, .dev_id = &clockevent_ixp4xx,
}; };
......
...@@ -233,8 +233,7 @@ static int __init dsmg600_gpio_init(void) ...@@ -233,8 +233,7 @@ static int __init dsmg600_gpio_init(void)
gpio_request(DSMG600_RB_GPIO, "reset button"); gpio_request(DSMG600_RB_GPIO, "reset button");
if (request_irq(gpio_to_irq(DSMG600_RB_GPIO), &dsmg600_reset_handler, if (request_irq(gpio_to_irq(DSMG600_RB_GPIO), &dsmg600_reset_handler,
IRQF_DISABLED | IRQF_TRIGGER_LOW, IRQF_TRIGGER_LOW, "DSM-G600 reset button", NULL) < 0) {
"DSM-G600 reset button", NULL) < 0) {
printk(KERN_DEBUG "Reset Button IRQ %d not available\n", printk(KERN_DEBUG "Reset Button IRQ %d not available\n",
gpio_to_irq(DSMG600_RB_GPIO)); gpio_to_irq(DSMG600_RB_GPIO));
......
...@@ -208,16 +208,14 @@ static void __init fsg_init(void) ...@@ -208,16 +208,14 @@ static void __init fsg_init(void)
platform_add_devices(fsg_devices, ARRAY_SIZE(fsg_devices)); platform_add_devices(fsg_devices, ARRAY_SIZE(fsg_devices));
if (request_irq(gpio_to_irq(FSG_RB_GPIO), &fsg_reset_handler, if (request_irq(gpio_to_irq(FSG_RB_GPIO), &fsg_reset_handler,
IRQF_DISABLED | IRQF_TRIGGER_LOW, IRQF_TRIGGER_LOW, "FSG reset button", NULL) < 0) {
"FSG reset button", NULL) < 0) {
printk(KERN_DEBUG "Reset Button IRQ %d not available\n", printk(KERN_DEBUG "Reset Button IRQ %d not available\n",
gpio_to_irq(FSG_RB_GPIO)); gpio_to_irq(FSG_RB_GPIO));
} }
if (request_irq(gpio_to_irq(FSG_SB_GPIO), &fsg_power_handler, if (request_irq(gpio_to_irq(FSG_SB_GPIO), &fsg_power_handler,
IRQF_DISABLED | IRQF_TRIGGER_LOW, IRQF_TRIGGER_LOW, "FSG power button", NULL) < 0) {
"FSG power button", NULL) < 0) {
printk(KERN_DEBUG "Power Button IRQ %d not available\n", printk(KERN_DEBUG "Power Button IRQ %d not available\n",
gpio_to_irq(FSG_SB_GPIO)); gpio_to_irq(FSG_SB_GPIO));
......
...@@ -295,8 +295,7 @@ static void __init nas100d_init(void) ...@@ -295,8 +295,7 @@ static void __init nas100d_init(void)
pm_power_off = nas100d_power_off; pm_power_off = nas100d_power_off;
if (request_irq(gpio_to_irq(NAS100D_RB_GPIO), &nas100d_reset_handler, if (request_irq(gpio_to_irq(NAS100D_RB_GPIO), &nas100d_reset_handler,
IRQF_DISABLED | IRQF_TRIGGER_LOW, IRQF_TRIGGER_LOW, "NAS100D reset button", NULL) < 0) {
"NAS100D reset button", NULL) < 0) {
printk(KERN_DEBUG "Reset Button IRQ %d not available\n", printk(KERN_DEBUG "Reset Button IRQ %d not available\n",
gpio_to_irq(NAS100D_RB_GPIO)); gpio_to_irq(NAS100D_RB_GPIO));
......
...@@ -265,16 +265,14 @@ static void __init nslu2_init(void) ...@@ -265,16 +265,14 @@ static void __init nslu2_init(void)
pm_power_off = nslu2_power_off; pm_power_off = nslu2_power_off;
if (request_irq(gpio_to_irq(NSLU2_RB_GPIO), &nslu2_reset_handler, if (request_irq(gpio_to_irq(NSLU2_RB_GPIO), &nslu2_reset_handler,
IRQF_DISABLED | IRQF_TRIGGER_LOW, IRQF_TRIGGER_LOW, "NSLU2 reset button", NULL) < 0) {
"NSLU2 reset button", NULL) < 0) {
printk(KERN_DEBUG "Reset Button IRQ %d not available\n", printk(KERN_DEBUG "Reset Button IRQ %d not available\n",
gpio_to_irq(NSLU2_RB_GPIO)); gpio_to_irq(NSLU2_RB_GPIO));
} }
if (request_irq(gpio_to_irq(NSLU2_PB_GPIO), &nslu2_power_handler, if (request_irq(gpio_to_irq(NSLU2_PB_GPIO), &nslu2_power_handler,
IRQF_DISABLED | IRQF_TRIGGER_HIGH, IRQF_TRIGGER_HIGH, "NSLU2 power button", NULL) < 0) {
"NSLU2 power button", NULL) < 0) {
printk(KERN_DEBUG "Power Button IRQ %d not available\n", printk(KERN_DEBUG "Power Button IRQ %d not available\n",
gpio_to_irq(NSLU2_PB_GPIO)); gpio_to_irq(NSLU2_PB_GPIO));
......
...@@ -122,7 +122,7 @@ static irqreturn_t ks8695_timer_interrupt(int irq, void *dev_id) ...@@ -122,7 +122,7 @@ static irqreturn_t ks8695_timer_interrupt(int irq, void *dev_id)
static struct irqaction ks8695_timer_irq = { static struct irqaction ks8695_timer_irq = {
.name = "ks8695_tick", .name = "ks8695_tick",
.flags = IRQF_DISABLED | IRQF_TIMER, .flags = IRQF_TIMER,
.handler = ks8695_timer_interrupt, .handler = ks8695_timer_interrupt,
}; };
......
...@@ -90,7 +90,7 @@ static irqreturn_t lpc32xx_timer_interrupt(int irq, void *dev_id) ...@@ -90,7 +90,7 @@ static irqreturn_t lpc32xx_timer_interrupt(int irq, void *dev_id)
static struct irqaction lpc32xx_timer_irq = { static struct irqaction lpc32xx_timer_irq = {
.name = "LPC32XX Timer Tick", .name = "LPC32XX Timer Tick",
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, .flags = IRQF_TIMER | IRQF_IRQPOLL,
.handler = lpc32xx_timer_interrupt, .handler = lpc32xx_timer_interrupt,
}; };
......
...@@ -186,7 +186,7 @@ static void __init timer_config(void) ...@@ -186,7 +186,7 @@ static void __init timer_config(void)
static struct irqaction timer_irq = { static struct irqaction timer_irq = {
.name = "timer", .name = "timer",
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, .flags = IRQF_TIMER | IRQF_IRQPOLL,
.handler = timer_interrupt, .handler = timer_interrupt,
.dev_id = &ckevt, .dev_id = &ckevt,
}; };
......
...@@ -99,7 +99,7 @@ netx_timer_interrupt(int irq, void *dev_id) ...@@ -99,7 +99,7 @@ netx_timer_interrupt(int irq, void *dev_id)
static struct irqaction netx_timer_irq = { static struct irqaction netx_timer_irq = {
.name = "NetX Timer Tick", .name = "NetX Timer Tick",
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, .flags = IRQF_TIMER | IRQF_IRQPOLL,
.handler = netx_timer_interrupt, .handler = netx_timer_interrupt,
}; };
......
...@@ -17,7 +17,6 @@ ...@@ -17,7 +17,6 @@
#include <linux/err.h> #include <linux/err.h>
#include <linux/davinci_emac.h> #include <linux/davinci_emac.h>
#include <asm/system.h>
#include "omap_device.h" #include "omap_device.h"
#include "am35xx.h" #include "am35xx.h"
#include "control.h" #include "control.h"
......
...@@ -141,7 +141,7 @@ static int iomd_request_dma(unsigned int chan, dma_t *dma) ...@@ -141,7 +141,7 @@ static int iomd_request_dma(unsigned int chan, dma_t *dma)
struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma); struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma);
return request_irq(idma->irq, iomd_dma_handle, return request_irq(idma->irq, iomd_dma_handle,
IRQF_DISABLED, idma->dma.device_id, idma); 0, idma->dma.device_id, idma);
} }
static void iomd_free_dma(unsigned int chan, dma_t *dma) static void iomd_free_dma(unsigned int chan, dma_t *dma)
......
...@@ -75,7 +75,6 @@ ioc_timer_interrupt(int irq, void *dev_id) ...@@ -75,7 +75,6 @@ ioc_timer_interrupt(int irq, void *dev_id)
static struct irqaction ioc_timer_irq = { static struct irqaction ioc_timer_irq = {
.name = "timer", .name = "timer",
.flags = IRQF_DISABLED,
.handler = ioc_timer_interrupt .handler = ioc_timer_interrupt
}; };
......
...@@ -112,7 +112,7 @@ static struct clock_event_device ckevt_sa1100_osmr0 = { ...@@ -112,7 +112,7 @@ static struct clock_event_device ckevt_sa1100_osmr0 = {
static struct irqaction sa1100_timer_irq = { static struct irqaction sa1100_timer_irq = {
.name = "ost0", .name = "ost0",
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, .flags = IRQF_TIMER | IRQF_IRQPOLL,
.handler = sa1100_ost0_interrupt, .handler = sa1100_ost0_interrupt,
.dev_id = &ckevt_sa1100_osmr0, .dev_id = &ckevt_sa1100_osmr0,
}; };
......
...@@ -172,7 +172,7 @@ static irqreturn_t spear_timer_interrupt(int irq, void *dev_id) ...@@ -172,7 +172,7 @@ static irqreturn_t spear_timer_interrupt(int irq, void *dev_id)
static struct irqaction spear_timer_irq = { static struct irqaction spear_timer_irq = {
.name = "timer", .name = "timer",
.flags = IRQF_DISABLED | IRQF_TIMER, .flags = IRQF_TIMER,
.handler = spear_timer_interrupt .handler = spear_timer_interrupt
}; };
......
...@@ -8,8 +8,11 @@ obj-y := v2m.o ...@@ -8,8 +8,11 @@ obj-y := v2m.o
obj-$(CONFIG_ARCH_VEXPRESS_CA9X4) += ct-ca9x4.o obj-$(CONFIG_ARCH_VEXPRESS_CA9X4) += ct-ca9x4.o
obj-$(CONFIG_ARCH_VEXPRESS_DCSCB) += dcscb.o dcscb_setup.o obj-$(CONFIG_ARCH_VEXPRESS_DCSCB) += dcscb.o dcscb_setup.o
CFLAGS_dcscb.o += -march=armv7-a CFLAGS_dcscb.o += -march=armv7-a
CFLAGS_REMOVE_dcscb.o = -pg
obj-$(CONFIG_ARCH_VEXPRESS_SPC) += spc.o obj-$(CONFIG_ARCH_VEXPRESS_SPC) += spc.o
CFLAGS_REMOVE_spc.o = -pg
obj-$(CONFIG_ARCH_VEXPRESS_TC2_PM) += tc2_pm.o obj-$(CONFIG_ARCH_VEXPRESS_TC2_PM) += tc2_pm.o
CFLAGS_tc2_pm.o += -march=armv7-a CFLAGS_tc2_pm.o += -march=armv7-a
CFLAGS_REMOVE_tc2_pm.o = -pg
obj-$(CONFIG_SMP) += platsmp.o obj-$(CONFIG_SMP) += platsmp.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
...@@ -111,7 +111,7 @@ static irqreturn_t nuc900_timer0_interrupt(int irq, void *dev_id) ...@@ -111,7 +111,7 @@ static irqreturn_t nuc900_timer0_interrupt(int irq, void *dev_id)
static struct irqaction nuc900_timer0_irq = { static struct irqaction nuc900_timer0_irq = {
.name = "nuc900-timer0", .name = "nuc900-timer0",
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, .flags = IRQF_TIMER | IRQF_IRQPOLL,
.handler = nuc900_timer0_interrupt, .handler = nuc900_timer0_interrupt,
}; };
......
...@@ -446,7 +446,6 @@ config CPU_32v5 ...@@ -446,7 +446,6 @@ config CPU_32v5
config CPU_32v6 config CPU_32v6
bool bool
select CPU_USE_DOMAINS if CPU_V6 && MMU
select TLS_REG_EMUL if !CPU_32v6K && !MMU select TLS_REG_EMUL if !CPU_32v6K && !MMU
config CPU_32v6K config CPU_32v6K
...@@ -671,7 +670,7 @@ config ARM_VIRT_EXT ...@@ -671,7 +670,7 @@ config ARM_VIRT_EXT
config SWP_EMULATE config SWP_EMULATE
bool "Emulate SWP/SWPB instructions" bool "Emulate SWP/SWPB instructions"
depends on !CPU_USE_DOMAINS && CPU_V7 depends on CPU_V7
default y if SMP default y if SMP
select HAVE_PROC_CPU if PROC_FS select HAVE_PROC_CPU if PROC_FS
help help
......
...@@ -331,7 +331,9 @@ static void __init enable_l2(void) ...@@ -331,7 +331,9 @@ static void __init enable_l2(void)
enable_icache(); enable_icache();
if (d) if (d)
enable_dcache(); enable_dcache();
} } else
pr_err(FW_BUG
"Feroceon L2: bootloader left the L2 cache on!\n");
} }
void __init feroceon_l2_init(int __l2_wt_override) void __init feroceon_l2_init(int __l2_wt_override)
......
...@@ -284,9 +284,6 @@ static void __dma_free_buffer(struct page *page, size_t size) ...@@ -284,9 +284,6 @@ static void __dma_free_buffer(struct page *page, size_t size)
} }
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
#ifdef CONFIG_HUGETLB_PAGE
#warning ARM Coherent DMA allocator does not (yet) support huge TLB
#endif
static void *__alloc_from_contiguous(struct device *dev, size_t size, static void *__alloc_from_contiguous(struct device *dev, size_t size,
pgprot_t prot, struct page **ret_page, pgprot_t prot, struct page **ret_page,
......
...@@ -515,6 +515,16 @@ static void __init build_mem_type_table(void) ...@@ -515,6 +515,16 @@ static void __init build_mem_type_table(void)
hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte; hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte;
s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2; s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2;
/*
* We don't use domains on ARMv6 (since this causes problems with
* v6/v7 kernels), so we must use a separate memory type for user
* r/o, kernel r/w to map the vectors page.
*/
#ifndef CONFIG_ARM_LPAE
if (cpu_arch == CPU_ARCH_ARMv6)
vecs_pgprot |= L_PTE_MT_VECTORS;
#endif
/* /*
* ARMv6 and above have extended page tables. * ARMv6 and above have extended page tables.
*/ */
......
...@@ -112,13 +112,9 @@ ...@@ -112,13 +112,9 @@
* 100x 1 0 1 r/o no acc * 100x 1 0 1 r/o no acc
* 10x0 1 0 1 r/o no acc * 10x0 1 0 1 r/o no acc
* 1011 0 0 1 r/w no acc * 1011 0 0 1 r/w no acc
* 110x 0 1 0 r/w r/o
* 11x0 0 1 0 r/w r/o
* 1111 0 1 1 r/w r/w
*
* If !CONFIG_CPU_USE_DOMAINS, the following permissions are changed:
* 110x 1 1 1 r/o r/o * 110x 1 1 1 r/o r/o
* 11x0 1 1 1 r/o r/o * 11x0 1 1 1 r/o r/o
* 1111 0 1 1 r/w r/w
*/ */
.macro armv6_mt_table pfx .macro armv6_mt_table pfx
\pfx\()_mt_table: \pfx\()_mt_table:
...@@ -137,7 +133,7 @@ ...@@ -137,7 +133,7 @@
.long PTE_EXT_TEX(2) @ L_PTE_MT_DEV_NONSHARED .long PTE_EXT_TEX(2) @ L_PTE_MT_DEV_NONSHARED
.long 0x00 @ unused .long 0x00 @ unused
.long 0x00 @ unused .long 0x00 @ unused
.long 0x00 @ unused .long PTE_CACHEABLE | PTE_BUFFERABLE | PTE_EXT_APX @ L_PTE_MT_VECTORS
.endm .endm
.macro armv6_set_pte_ext pfx .macro armv6_set_pte_ext pfx
...@@ -158,24 +154,21 @@ ...@@ -158,24 +154,21 @@
tst r1, #L_PTE_USER tst r1, #L_PTE_USER
orrne r3, r3, #PTE_EXT_AP1 orrne r3, r3, #PTE_EXT_AP1
#ifdef CONFIG_CPU_USE_DOMAINS
@ allow kernel read/write access to read-only user pages
tstne r3, #PTE_EXT_APX tstne r3, #PTE_EXT_APX
bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
#endif @ user read-only -> kernel read-only
bicne r3, r3, #PTE_EXT_AP0
tst r1, #L_PTE_XN tst r1, #L_PTE_XN
orrne r3, r3, #PTE_EXT_XN orrne r3, r3, #PTE_EXT_XN
orr r3, r3, r2 eor r3, r3, r2
tst r1, #L_PTE_YOUNG tst r1, #L_PTE_YOUNG
tstne r1, #L_PTE_PRESENT tstne r1, #L_PTE_PRESENT
moveq r3, #0 moveq r3, #0
#ifndef CONFIG_CPU_USE_DOMAINS
tstne r1, #L_PTE_NONE tstne r1, #L_PTE_NONE
movne r3, #0 movne r3, #0
#endif
str r3, [r0] str r3, [r0]
mcr p15, 0, r0, c7, c10, 1 @ flush_pte mcr p15, 0, r0, c7, c10, 1 @ flush_pte
......
...@@ -90,21 +90,14 @@ ENTRY(cpu_v7_set_pte_ext) ...@@ -90,21 +90,14 @@ ENTRY(cpu_v7_set_pte_ext)
tst r1, #L_PTE_USER tst r1, #L_PTE_USER
orrne r3, r3, #PTE_EXT_AP1 orrne r3, r3, #PTE_EXT_AP1
#ifdef CONFIG_CPU_USE_DOMAINS
@ allow kernel read/write access to read-only user pages
tstne r3, #PTE_EXT_APX
bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
#endif
tst r1, #L_PTE_XN tst r1, #L_PTE_XN
orrne r3, r3, #PTE_EXT_XN orrne r3, r3, #PTE_EXT_XN
tst r1, #L_PTE_YOUNG tst r1, #L_PTE_YOUNG
tstne r1, #L_PTE_VALID tstne r1, #L_PTE_VALID
#ifndef CONFIG_CPU_USE_DOMAINS
eorne r1, r1, #L_PTE_NONE eorne r1, r1, #L_PTE_NONE
tstne r1, #L_PTE_NONE tstne r1, #L_PTE_NONE
#endif
moveq r3, #0 moveq r3, #0
ARM( str r3, [r0, #2048]! ) ARM( str r3, [r0, #2048]! )
......
...@@ -192,6 +192,7 @@ __v7_cr7mp_setup: ...@@ -192,6 +192,7 @@ __v7_cr7mp_setup:
mov r10, #(1 << 0) @ Cache/TLB ops broadcasting mov r10, #(1 << 0) @ Cache/TLB ops broadcasting
b 1f b 1f
__v7_ca7mp_setup: __v7_ca7mp_setup:
__v7_ca12mp_setup:
__v7_ca15mp_setup: __v7_ca15mp_setup:
mov r10, #0 mov r10, #0
1: 1:
...@@ -483,6 +484,16 @@ __v7_ca7mp_proc_info: ...@@ -483,6 +484,16 @@ __v7_ca7mp_proc_info:
__v7_proc __v7_ca7mp_setup __v7_proc __v7_ca7mp_setup
.size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info .size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info
/*
* ARM Ltd. Cortex A12 processor.
*/
.type __v7_ca12mp_proc_info, #object
__v7_ca12mp_proc_info:
.long 0x410fc0d0
.long 0xff0ffff0
__v7_proc __v7_ca12mp_setup
.size __v7_ca12mp_proc_info, . - __v7_ca12mp_proc_info
/* /*
* ARM Ltd. Cortex A15 processor. * ARM Ltd. Cortex A15 processor.
*/ */
......
...@@ -127,7 +127,7 @@ iop_timer_interrupt(int irq, void *dev_id) ...@@ -127,7 +127,7 @@ iop_timer_interrupt(int irq, void *dev_id)
static struct irqaction iop_timer_irq = { static struct irqaction iop_timer_irq = {
.name = "IOP Timer Tick", .name = "IOP Timer Tick",
.handler = iop_timer_interrupt, .handler = iop_timer_interrupt,
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, .flags = IRQF_TIMER | IRQF_IRQPOLL,
.dev_id = &iop_clockevent, .dev_id = &iop_clockevent,
}; };
......
...@@ -333,7 +333,7 @@ static irqreturn_t u300_timer_interrupt(int irq, void *dev_id) ...@@ -333,7 +333,7 @@ static irqreturn_t u300_timer_interrupt(int irq, void *dev_id)
static struct irqaction u300_timer_irq = { static struct irqaction u300_timer_irq = {
.name = "U300 Timer Tick", .name = "U300 Timer Tick",
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, .flags = IRQF_TIMER | IRQF_IRQPOLL,
.handler = u300_timer_interrupt, .handler = u300_timer_interrupt,
}; };
......
...@@ -2971,7 +2971,7 @@ static int acornscsi_probe(struct expansion_card *ec, const struct ecard_id *id) ...@@ -2971,7 +2971,7 @@ static int acornscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
ec->irqaddr = ashost->fast + INT_REG; ec->irqaddr = ashost->fast + INT_REG;
ec->irqmask = 0x0a; ec->irqmask = 0x0a;
ret = request_irq(host->irq, acornscsi_intr, IRQF_DISABLED, "acornscsi", ashost); ret = request_irq(host->irq, acornscsi_intr, 0, "acornscsi", ashost);
if (ret) { if (ret) {
printk(KERN_CRIT "scsi%d: IRQ%d not free: %d\n", printk(KERN_CRIT "scsi%d: IRQ%d not free: %d\n",
host->host_no, ashost->scsi.irq, ret); host->host_no, ashost->scsi.irq, ret);
......
...@@ -262,7 +262,7 @@ static int cumanascsi1_probe(struct expansion_card *ec, ...@@ -262,7 +262,7 @@ static int cumanascsi1_probe(struct expansion_card *ec,
goto out_unmap; goto out_unmap;
} }
ret = request_irq(host->irq, cumanascsi_intr, IRQF_DISABLED, ret = request_irq(host->irq, cumanascsi_intr, 0,
"CumanaSCSI-1", host); "CumanaSCSI-1", host);
if (ret) { if (ret) {
printk("scsi%d: IRQ%d not free: %d\n", printk("scsi%d: IRQ%d not free: %d\n",
......
...@@ -431,7 +431,7 @@ static int cumanascsi2_probe(struct expansion_card *ec, ...@@ -431,7 +431,7 @@ static int cumanascsi2_probe(struct expansion_card *ec,
goto out_free; goto out_free;
ret = request_irq(ec->irq, cumanascsi_2_intr, ret = request_irq(ec->irq, cumanascsi_2_intr,
IRQF_DISABLED, "cumanascsi2", info); 0, "cumanascsi2", info);
if (ret) { if (ret) {
printk("scsi%d: IRQ%d not free: %d\n", printk("scsi%d: IRQ%d not free: %d\n",
host->host_no, ec->irq, ret); host->host_no, ec->irq, ret);
......
...@@ -358,7 +358,7 @@ static int powertecscsi_probe(struct expansion_card *ec, ...@@ -358,7 +358,7 @@ static int powertecscsi_probe(struct expansion_card *ec,
goto out_free; goto out_free;
ret = request_irq(ec->irq, powertecscsi_intr, ret = request_irq(ec->irq, powertecscsi_intr,
IRQF_DISABLED, "powertec", info); 0, "powertec", info);
if (ret) { if (ret) {
printk("scsi%d: IRQ%d not free: %d\n", printk("scsi%d: IRQ%d not free: %d\n",
host->host_no, ec->irq, ret); host->host_no, ec->irq, ret);
......
...@@ -55,7 +55,6 @@ ...@@ -55,7 +55,6 @@
#include <mach/hardware.h> #include <mach/hardware.h>
#include <linux/io.h> #include <linux/io.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/system.h>
#include <mach/platform.h> #include <mach/platform.h>
#include <mach/irqs.h> #include <mach/irqs.h>
......
...@@ -252,6 +252,8 @@ static inline void memblock_dump_all(void) ...@@ -252,6 +252,8 @@ static inline void memblock_dump_all(void)
void memblock_set_current_limit(phys_addr_t limit); void memblock_set_current_limit(phys_addr_t limit);
phys_addr_t memblock_get_current_limit(void);
/* /*
* pfn conversion functions * pfn conversion functions
* *
......
...@@ -1407,6 +1407,11 @@ void __init_memblock memblock_set_current_limit(phys_addr_t limit) ...@@ -1407,6 +1407,11 @@ void __init_memblock memblock_set_current_limit(phys_addr_t limit)
memblock.current_limit = limit; memblock.current_limit = limit;
} }
phys_addr_t __init_memblock memblock_get_current_limit(void)
{
return memblock.current_limit;
}
static void __init_memblock memblock_dump(struct memblock_type *type, char *name) static void __init_memblock memblock_dump(struct memblock_type *type, char *name)
{ {
unsigned long long base, size; unsigned long long base, size;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment