Commit 316dde2f authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm

Pull ARM updates from Russell King:
 "In this ARM merge, we remove more lines than we add.  Changes include:

   - Enable imprecise aborts early, so that bus errors aren't masked
     until later in the boot.  This has the side effect that boot
     loaders which provoke these aborts can cause the kernel to crash
     early in boot, so we install a handler to report this event around
     the site where these are enabled.

   - Remove the buggy but impossible to enable cmpxchg syscall code.

   - Add unwinding annotations to some assembly code.

   - Add support for atomic half-word exchange for ARMv6k+.

   - Reduce ioremap() alignment for SMP/LPAE cases where we don't need
     the large alignment.

   - Addition of an "optimal" 3G configuration for systems with 1G of
     RAM.

   - Increase vmalloc space by 128M.

   - Constify some SMP operations structures, which have never been
     writable.

   - Improve ARMs dma_mmap() support for mapping DMA coherent mappings
     into userspace.

   - Fix to the NMI backtrace code in the IPI case on ARM where the
     failing CPU gets stuck for 10s waiting for its own IPI to be
     delivered.

   - Removal of legacy PM support from the AMBA bus driver.

   - Another fix for the previous fix of vdsomunge"

* 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm: (23 commits)
  ARM: 8449/1: fix bug in vdsomunge swab32 macro
  arm: add missing of_node_put
  ARM: 8447/1: catch pending imprecise abort on unmask
  ARM: 8446/1: amba: Remove unused callbacks for legacy system PM
  ARM: 8443/1: Adding support for atomic half word exchange
  ARM: clean up TWD after previous patch
  ARM: 8441/2: twd: Don't set CLOCK_EVT_FEAT_C3STOP unconditionally
  ARM: 8440/1: remove obsolete documentation
  ARM: make highpte an expert option
  ARM: 8433/1: add a VMSPLIT_3G_OPT config option
  ARM: 8439/1: Fix backtrace generation when IPI is masked
  ARM: 8428/1: kgdb: Fix registers on sleeping tasks
  ARM: 8427/1: dma-mapping: add support for offset parameter in dma_mmap()
  ARM: 8426/1: dma-mapping: add missing range check in dma_mmap()
  ARM: remove user cmpxchg syscall
  ARM: 8438/1: Add unwinding to __clear_user_std()
  ARM: 8436/1: hw_breakpoint: remove unnecessary header
  ARM: 8434/2: Revert "7655/1: smp_twd: make twd_local_timer_of_register() no-op for nosmp"
  ARM: 8432/1: move VMALLOC_END from 0xff000000 to 0xff800000
  ARM: 8430/1: use default ioremap alignment for SMP or LPAE
  ...
parents 2c2b8285 116ef0fc
Victor is known as a "digital talking book player" manufactured by
VisuAide, Inc. to be used by blind people.
For more information related to Victor, see:
http://www.humanware.com/en-usa/products
Of course Victor is using Linux as its main operating system.
The Victor implementation for Linux is maintained by Nicolas Pitre:
nico@visuaide.com
nico@fluxnic.net
For any comments, please feel free to contact me through the above
addresses.
...@@ -54,7 +54,7 @@ VMALLOC_START VMALLOC_END-1 vmalloc() / ioremap() space. ...@@ -54,7 +54,7 @@ VMALLOC_START VMALLOC_END-1 vmalloc() / ioremap() space.
located here through iotable_init(). located here through iotable_init().
VMALLOC_START is based upon the value VMALLOC_START is based upon the value
of the high_memory variable, and VMALLOC_END of the high_memory variable, and VMALLOC_END
is equal to 0xff000000. is equal to 0xff800000.
PAGE_OFFSET high_memory-1 Kernel direct-mapped RAM region. PAGE_OFFSET high_memory-1 Kernel direct-mapped RAM region.
This maps the platforms RAM, and typically This maps the platforms RAM, and typically
......
...@@ -19,6 +19,11 @@ interrupts. ...@@ -19,6 +19,11 @@ interrupts.
- reg : Specify the base address and the size of the TWD timer - reg : Specify the base address and the size of the TWD timer
register window. register window.
Optional
- always-on : a boolean property. If present, the timer is powered through
an always-on power domain, therefore it never loses context.
Example: Example:
twd-timer@2c000600 { twd-timer@2c000600 {
......
...@@ -1411,7 +1411,6 @@ config HAVE_ARM_ARCH_TIMER ...@@ -1411,7 +1411,6 @@ config HAVE_ARM_ARCH_TIMER
config HAVE_ARM_TWD config HAVE_ARM_TWD
bool bool
depends on SMP
select CLKSRC_OF if OF select CLKSRC_OF if OF
help help
This options enables support for the ARM timer and watchdog unit This options enables support for the ARM timer and watchdog unit
...@@ -1471,6 +1470,8 @@ choice ...@@ -1471,6 +1470,8 @@ choice
config VMSPLIT_3G config VMSPLIT_3G
bool "3G/1G user/kernel split" bool "3G/1G user/kernel split"
config VMSPLIT_3G_OPT
bool "3G/1G user/kernel split (for full 1G low memory)"
config VMSPLIT_2G config VMSPLIT_2G
bool "2G/2G user/kernel split" bool "2G/2G user/kernel split"
config VMSPLIT_1G config VMSPLIT_1G
...@@ -1482,6 +1483,7 @@ config PAGE_OFFSET ...@@ -1482,6 +1483,7 @@ config PAGE_OFFSET
default PHYS_OFFSET if !MMU default PHYS_OFFSET if !MMU
default 0x40000000 if VMSPLIT_1G default 0x40000000 if VMSPLIT_1G
default 0x80000000 if VMSPLIT_2G default 0x80000000 if VMSPLIT_2G
default 0xB0000000 if VMSPLIT_3G_OPT
default 0xC0000000 default 0xC0000000
config NR_CPUS config NR_CPUS
...@@ -1696,8 +1698,9 @@ config HIGHMEM ...@@ -1696,8 +1698,9 @@ config HIGHMEM
If unsure, say n. If unsure, say n.
config HIGHPTE config HIGHPTE
bool "Allocate 2nd-level pagetables from highmem" bool "Allocate 2nd-level pagetables from highmem" if EXPERT
depends on HIGHMEM depends on HIGHMEM
default y
help help
The VM uses one page of physical memory for each page table. The VM uses one page of physical memory for each page table.
For systems with a lot of processes, this can use a lot of For systems with a lot of processes, this can use a lot of
......
...@@ -39,6 +39,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size ...@@ -39,6 +39,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
switch (size) { switch (size) {
#if __LINUX_ARM_ARCH__ >= 6 #if __LINUX_ARM_ARCH__ >= 6
#ifndef CONFIG_CPU_V6 /* MIN ARCH >= V6K */
case 1: case 1:
asm volatile("@ __xchg1\n" asm volatile("@ __xchg1\n"
"1: ldrexb %0, [%3]\n" "1: ldrexb %0, [%3]\n"
...@@ -49,6 +50,17 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size ...@@ -49,6 +50,17 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
: "r" (x), "r" (ptr) : "r" (x), "r" (ptr)
: "memory", "cc"); : "memory", "cc");
break; break;
case 2:
asm volatile("@ __xchg2\n"
"1: ldrexh %0, [%3]\n"
" strexh %1, %2, [%3]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (ret), "=&r" (tmp)
: "r" (x), "r" (ptr)
: "memory", "cc");
break;
#endif
case 4: case 4:
asm volatile("@ __xchg4\n" asm volatile("@ __xchg4\n"
"1: ldrex %0, [%3]\n" "1: ldrex %0, [%3]\n"
......
...@@ -54,6 +54,14 @@ static inline void arch_local_irq_disable(void) ...@@ -54,6 +54,14 @@ static inline void arch_local_irq_disable(void)
#define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc") #define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc")
#define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc") #define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc")
#ifndef CONFIG_CPU_V7M
#define local_abt_enable() __asm__("cpsie a @ __sta" : : : "memory", "cc")
#define local_abt_disable() __asm__("cpsid a @ __cla" : : : "memory", "cc")
#else
#define local_abt_enable() do { } while (0)
#define local_abt_disable() do { } while (0)
#endif
#else #else
/* /*
...@@ -136,6 +144,8 @@ static inline void arch_local_irq_disable(void) ...@@ -136,6 +144,8 @@ static inline void arch_local_irq_disable(void)
: "memory", "cc"); \ : "memory", "cc"); \
}) })
#define local_abt_enable() do { } while (0)
#define local_abt_disable() do { } while (0)
#endif #endif
/* /*
......
...@@ -47,7 +47,7 @@ struct machine_desc { ...@@ -47,7 +47,7 @@ struct machine_desc {
unsigned l2c_aux_val; /* L2 cache aux value */ unsigned l2c_aux_val; /* L2 cache aux value */
unsigned l2c_aux_mask; /* L2 cache aux mask */ unsigned l2c_aux_mask; /* L2 cache aux mask */
void (*l2c_write_sec)(unsigned long, unsigned); void (*l2c_write_sec)(unsigned long, unsigned);
struct smp_operations *smp; /* SMP operations */ const struct smp_operations *smp; /* SMP operations */
bool (*smp_init)(void); bool (*smp_init)(void);
void (*fixup)(struct tag *, char **); void (*fixup)(struct tag *, char **);
void (*dt_fixup)(void); void (*dt_fixup)(void);
......
...@@ -76,10 +76,12 @@ ...@@ -76,10 +76,12 @@
*/ */
#define XIP_VIRT_ADDR(physaddr) (MODULES_VADDR + ((physaddr) & 0x000fffff)) #define XIP_VIRT_ADDR(physaddr) (MODULES_VADDR + ((physaddr) & 0x000fffff))
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
/* /*
* Allow 16MB-aligned ioremap pages * Allow 16MB-aligned ioremap pages
*/ */
#define IOREMAP_MAX_ORDER 24 #define IOREMAP_MAX_ORDER 24
#endif
#else /* CONFIG_MMU */ #else /* CONFIG_MMU */
......
...@@ -43,7 +43,7 @@ ...@@ -43,7 +43,7 @@
*/ */
#define VMALLOC_OFFSET (8*1024*1024) #define VMALLOC_OFFSET (8*1024*1024)
#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) #define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
#define VMALLOC_END 0xff000000UL #define VMALLOC_END 0xff800000UL
#define LIBRARY_TEXT_START 0x0c000000 #define LIBRARY_TEXT_START 0x0c000000
......
...@@ -112,7 +112,7 @@ struct smp_operations { ...@@ -112,7 +112,7 @@ struct smp_operations {
struct of_cpu_method { struct of_cpu_method {
const char *method; const char *method;
struct smp_operations *ops; const struct smp_operations *ops;
}; };
#define CPU_METHOD_OF_DECLARE(name, _method, _ops) \ #define CPU_METHOD_OF_DECLARE(name, _method, _ops) \
...@@ -122,6 +122,6 @@ struct of_cpu_method { ...@@ -122,6 +122,6 @@ struct of_cpu_method {
/* /*
* set platform specific SMP operations * set platform specific SMP operations
*/ */
extern void smp_set_ops(struct smp_operations *); extern void smp_set_ops(const struct smp_operations *);
#endif /* ifndef __ASM_ARM_SMP_H */ #endif /* ifndef __ASM_ARM_SMP_H */
...@@ -21,13 +21,6 @@ ...@@ -21,13 +21,6 @@
*/ */
#define __NR_syscalls (392) #define __NR_syscalls (392)
/*
* *NOTE*: This is a ghost syscall private to the kernel. Only the
* __kuser_cmpxchg code in entry-armv.S should be aware of its
* existence. Don't ever use this from user code.
*/
#define __ARM_NR_cmpxchg (__ARM_NR_BASE+0x00fff0)
#define __ARCH_WANT_STAT64 #define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_GETHOSTNAME #define __ARCH_WANT_SYS_GETHOSTNAME
#define __ARCH_WANT_SYS_PAUSE #define __ARCH_WANT_SYS_PAUSE
......
...@@ -101,6 +101,7 @@ void __init arm_dt_init_cpu_maps(void) ...@@ -101,6 +101,7 @@ void __init arm_dt_init_cpu_maps(void)
if (of_property_read_u32(cpu, "reg", &hwid)) { if (of_property_read_u32(cpu, "reg", &hwid)) {
pr_debug(" * %s missing reg property\n", pr_debug(" * %s missing reg property\n",
cpu->full_name); cpu->full_name);
of_node_put(cpu);
return; return;
} }
...@@ -108,8 +109,10 @@ void __init arm_dt_init_cpu_maps(void) ...@@ -108,8 +109,10 @@ void __init arm_dt_init_cpu_maps(void)
* 8 MSBs must be set to 0 in the DT since the reg property * 8 MSBs must be set to 0 in the DT since the reg property
* defines the MPIDR[23:0]. * defines the MPIDR[23:0].
*/ */
if (hwid & ~MPIDR_HWID_BITMASK) if (hwid & ~MPIDR_HWID_BITMASK) {
of_node_put(cpu);
return; return;
}
/* /*
* Duplicate MPIDRs are a recipe for disaster. * Duplicate MPIDRs are a recipe for disaster.
...@@ -119,9 +122,11 @@ void __init arm_dt_init_cpu_maps(void) ...@@ -119,9 +122,11 @@ void __init arm_dt_init_cpu_maps(void)
* to avoid matching valid MPIDR[23:0] values. * to avoid matching valid MPIDR[23:0] values.
*/ */
for (j = 0; j < cpuidx; j++) for (j = 0; j < cpuidx; j++)
if (WARN(tmp_map[j] == hwid, "Duplicate /cpu reg " if (WARN(tmp_map[j] == hwid,
"properties in the DT\n")) "Duplicate /cpu reg properties in the DT\n")) {
of_node_put(cpu);
return; return;
}
/* /*
* Build a stashed array of MPIDR values. Numbering scheme * Build a stashed array of MPIDR values. Numbering scheme
...@@ -143,6 +148,7 @@ void __init arm_dt_init_cpu_maps(void) ...@@ -143,6 +148,7 @@ void __init arm_dt_init_cpu_maps(void)
"max cores %u, capping them\n", "max cores %u, capping them\n",
cpuidx, nr_cpu_ids)) { cpuidx, nr_cpu_ids)) {
cpuidx = nr_cpu_ids; cpuidx = nr_cpu_ids;
of_node_put(cpu);
break; break;
} }
......
...@@ -427,8 +427,7 @@ ENDPROC(__fiq_abt) ...@@ -427,8 +427,7 @@ ENDPROC(__fiq_abt)
.endm .endm
.macro kuser_cmpxchg_check .macro kuser_cmpxchg_check
#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \ #if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS)
!defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
#ifndef CONFIG_MMU #ifndef CONFIG_MMU
#warning "NPTL on non MMU needs fixing" #warning "NPTL on non MMU needs fixing"
#else #else
...@@ -859,20 +858,7 @@ __kuser_helper_start: ...@@ -859,20 +858,7 @@ __kuser_helper_start:
__kuser_cmpxchg64: @ 0xffff0f60 __kuser_cmpxchg64: @ 0xffff0f60
#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) #if defined(CONFIG_CPU_32v6K)
/*
* Poor you. No fast solution possible...
* The kernel itself must perform the operation.
* A special ghost syscall is used for that (see traps.c).
*/
stmfd sp!, {r7, lr}
ldr r7, 1f @ it's 20 bits
swi __ARM_NR_cmpxchg64
ldmfd sp!, {r7, pc}
1: .word __ARM_NR_cmpxchg64
#elif defined(CONFIG_CPU_32v6K)
stmfd sp!, {r4, r5, r6, r7} stmfd sp!, {r4, r5, r6, r7}
ldrd r4, r5, [r0] @ load old val ldrd r4, r5, [r0] @ load old val
...@@ -948,20 +934,7 @@ __kuser_memory_barrier: @ 0xffff0fa0 ...@@ -948,20 +934,7 @@ __kuser_memory_barrier: @ 0xffff0fa0
__kuser_cmpxchg: @ 0xffff0fc0 __kuser_cmpxchg: @ 0xffff0fc0
#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) #if __LINUX_ARM_ARCH__ < 6
/*
* Poor you. No fast solution possible...
* The kernel itself must perform the operation.
* A special ghost syscall is used for that (see traps.c).
*/
stmfd sp!, {r7, lr}
ldr r7, 1f @ it's 20 bits
swi __ARM_NR_cmpxchg
ldmfd sp!, {r7, pc}
1: .word __ARM_NR_cmpxchg
#elif __LINUX_ARM_ARCH__ < 6
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
......
...@@ -35,7 +35,6 @@ ...@@ -35,7 +35,6 @@
#include <asm/cputype.h> #include <asm/cputype.h>
#include <asm/current.h> #include <asm/current.h>
#include <asm/hw_breakpoint.h> #include <asm/hw_breakpoint.h>
#include <asm/kdebug.h>
#include <asm/traps.h> #include <asm/traps.h>
/* Breakpoint currently in use for each BRP. */ /* Breakpoint currently in use for each BRP. */
......
...@@ -74,7 +74,7 @@ int dbg_set_reg(int regno, void *mem, struct pt_regs *regs) ...@@ -74,7 +74,7 @@ int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
void void
sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task) sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
{ {
struct pt_regs *thread_regs; struct thread_info *ti;
int regno; int regno;
/* Just making sure... */ /* Just making sure... */
...@@ -86,24 +86,17 @@ sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task) ...@@ -86,24 +86,17 @@ sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
gdb_regs[regno] = 0; gdb_regs[regno] = 0;
/* Otherwise, we have only some registers from switch_to() */ /* Otherwise, we have only some registers from switch_to() */
thread_regs = task_pt_regs(task); ti = task_thread_info(task);
gdb_regs[_R0] = thread_regs->ARM_r0; gdb_regs[_R4] = ti->cpu_context.r4;
gdb_regs[_R1] = thread_regs->ARM_r1; gdb_regs[_R5] = ti->cpu_context.r5;
gdb_regs[_R2] = thread_regs->ARM_r2; gdb_regs[_R6] = ti->cpu_context.r6;
gdb_regs[_R3] = thread_regs->ARM_r3; gdb_regs[_R7] = ti->cpu_context.r7;
gdb_regs[_R4] = thread_regs->ARM_r4; gdb_regs[_R8] = ti->cpu_context.r8;
gdb_regs[_R5] = thread_regs->ARM_r5; gdb_regs[_R9] = ti->cpu_context.r9;
gdb_regs[_R6] = thread_regs->ARM_r6; gdb_regs[_R10] = ti->cpu_context.sl;
gdb_regs[_R7] = thread_regs->ARM_r7; gdb_regs[_FP] = ti->cpu_context.fp;
gdb_regs[_R8] = thread_regs->ARM_r8; gdb_regs[_SPT] = ti->cpu_context.sp;
gdb_regs[_R9] = thread_regs->ARM_r9; gdb_regs[_PC] = ti->cpu_context.pc;
gdb_regs[_R10] = thread_regs->ARM_r10;
gdb_regs[_FP] = thread_regs->ARM_fp;
gdb_regs[_IP] = thread_regs->ARM_ip;
gdb_regs[_SPT] = thread_regs->ARM_sp;
gdb_regs[_LR] = thread_regs->ARM_lr;
gdb_regs[_PC] = thread_regs->ARM_pc;
gdb_regs[_CPSR] = thread_regs->ARM_cpsr;
} }
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
......
...@@ -80,7 +80,7 @@ static DECLARE_COMPLETION(cpu_running); ...@@ -80,7 +80,7 @@ static DECLARE_COMPLETION(cpu_running);
static struct smp_operations smp_ops; static struct smp_operations smp_ops;
void __init smp_set_ops(struct smp_operations *ops) void __init smp_set_ops(const struct smp_operations *ops)
{ {
if (ops) if (ops)
smp_ops = *ops; smp_ops = *ops;
...@@ -400,6 +400,7 @@ asmlinkage void secondary_start_kernel(void) ...@@ -400,6 +400,7 @@ asmlinkage void secondary_start_kernel(void)
local_irq_enable(); local_irq_enable();
local_fiq_enable(); local_fiq_enable();
local_abt_enable();
/* /*
* OK, it's off to the idle thread for us * OK, it's off to the idle thread for us
...@@ -748,6 +749,15 @@ core_initcall(register_cpufreq_notifier); ...@@ -748,6 +749,15 @@ core_initcall(register_cpufreq_notifier);
static void raise_nmi(cpumask_t *mask) static void raise_nmi(cpumask_t *mask)
{ {
/*
* Generate the backtrace directly if we are running in a calling
* context that is not preemptible by the backtrace IPI. Note
* that nmi_cpu_backtrace() automatically removes the current cpu
* from mask.
*/
if (cpumask_test_cpu(smp_processor_id(), mask) && irqs_disabled())
nmi_cpu_backtrace(NULL);
smp_cross_call(mask, IPI_CPU_BACKTRACE); smp_cross_call(mask, IPI_CPU_BACKTRACE);
} }
......
...@@ -23,7 +23,6 @@ ...@@ -23,7 +23,6 @@
#include <linux/of_irq.h> #include <linux/of_irq.h>
#include <linux/of_address.h> #include <linux/of_address.h>
#include <asm/smp_plat.h>
#include <asm/smp_twd.h> #include <asm/smp_twd.h>
/* set up by the platform code */ /* set up by the platform code */
...@@ -34,6 +33,8 @@ static unsigned long twd_timer_rate; ...@@ -34,6 +33,8 @@ static unsigned long twd_timer_rate;
static DEFINE_PER_CPU(bool, percpu_setup_called); static DEFINE_PER_CPU(bool, percpu_setup_called);
static struct clock_event_device __percpu *twd_evt; static struct clock_event_device __percpu *twd_evt;
static unsigned int twd_features =
CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
static int twd_ppi; static int twd_ppi;
static int twd_shutdown(struct clock_event_device *clk) static int twd_shutdown(struct clock_event_device *clk)
...@@ -294,8 +295,7 @@ static void twd_timer_setup(void) ...@@ -294,8 +295,7 @@ static void twd_timer_setup(void)
writel_relaxed(0, twd_base + TWD_TIMER_CONTROL); writel_relaxed(0, twd_base + TWD_TIMER_CONTROL);
clk->name = "local_timer"; clk->name = "local_timer";
clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | clk->features = twd_features;
CLOCK_EVT_FEAT_C3STOP;
clk->rating = 350; clk->rating = 350;
clk->set_state_shutdown = twd_shutdown; clk->set_state_shutdown = twd_shutdown;
clk->set_state_periodic = twd_set_periodic; clk->set_state_periodic = twd_set_periodic;
...@@ -350,6 +350,8 @@ static int __init twd_local_timer_common_register(struct device_node *np) ...@@ -350,6 +350,8 @@ static int __init twd_local_timer_common_register(struct device_node *np)
goto out_irq; goto out_irq;
twd_get_clock(np); twd_get_clock(np);
if (!of_property_read_bool(np, "always-on"))
twd_features |= CLOCK_EVT_FEAT_C3STOP;
/* /*
* Immediately configure the timer on the boot CPU, unless we need * Immediately configure the timer on the boot CPU, unless we need
...@@ -392,9 +394,6 @@ static void __init twd_local_timer_of_register(struct device_node *np) ...@@ -392,9 +394,6 @@ static void __init twd_local_timer_of_register(struct device_node *np)
{ {
int err; int err;
if (!is_smp() || !setup_max_cpus)
return;
twd_ppi = irq_of_parse_and_map(np, 0); twd_ppi = irq_of_parse_and_map(np, 0);
if (!twd_ppi) { if (!twd_ppi) {
err = -EINVAL; err = -EINVAL;
......
...@@ -625,58 +625,6 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) ...@@ -625,58 +625,6 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
set_tls(regs->ARM_r0); set_tls(regs->ARM_r0);
return 0; return 0;
#ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG
/*
* Atomically store r1 in *r2 if *r2 is equal to r0 for user space.
* Return zero in r0 if *MEM was changed or non-zero if no exchange
* happened. Also set the user C flag accordingly.
* If access permissions have to be fixed up then non-zero is
* returned and the operation has to be re-attempted.
*
* *NOTE*: This is a ghost syscall private to the kernel. Only the
* __kuser_cmpxchg code in entry-armv.S should be aware of its
* existence. Don't ever use this from user code.
*/
case NR(cmpxchg):
for (;;) {
extern void do_DataAbort(unsigned long addr, unsigned int fsr,
struct pt_regs *regs);
unsigned long val;
unsigned long addr = regs->ARM_r2;
struct mm_struct *mm = current->mm;
pgd_t *pgd; pmd_t *pmd; pte_t *pte;
spinlock_t *ptl;
regs->ARM_cpsr &= ~PSR_C_BIT;
down_read(&mm->mmap_sem);
pgd = pgd_offset(mm, addr);
if (!pgd_present(*pgd))
goto bad_access;
pmd = pmd_offset(pgd, addr);
if (!pmd_present(*pmd))
goto bad_access;
pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
if (!pte_present(*pte) || !pte_write(*pte) || !pte_dirty(*pte)) {
pte_unmap_unlock(pte, ptl);
goto bad_access;
}
val = *(unsigned long *)addr;
val -= regs->ARM_r0;
if (val == 0) {
*(unsigned long *)addr = regs->ARM_r1;
regs->ARM_cpsr |= PSR_C_BIT;
}
pte_unmap_unlock(pte, ptl);
up_read(&mm->mmap_sem);
return val;
bad_access:
up_read(&mm->mmap_sem);
/* simulate a write access fault */
do_DataAbort(addr, 15 + (1 << 11), regs);
}
#endif
default: default:
/* Calls 9f00xx..9f07ff are defined to return -ENOSYS /* Calls 9f00xx..9f07ff are defined to return -ENOSYS
if not implemented, rather than raising SIGILL. This if not implemented, rather than raising SIGILL. This
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/unwind.h>
.text .text
...@@ -20,6 +21,8 @@ ...@@ -20,6 +21,8 @@
*/ */
ENTRY(__clear_user_std) ENTRY(__clear_user_std)
WEAK(arm_clear_user) WEAK(arm_clear_user)
UNWIND(.fnstart)
UNWIND(.save {r1, lr})
stmfd sp!, {r1, lr} stmfd sp!, {r1, lr}
mov r2, #0 mov r2, #0
cmp r1, #4 cmp r1, #4
...@@ -44,6 +47,7 @@ WEAK(arm_clear_user) ...@@ -44,6 +47,7 @@ WEAK(arm_clear_user)
USER( strnebt r2, [r0]) USER( strnebt r2, [r0])
mov r0, #0 mov r0, #0
ldmfd sp!, {r1, pc} ldmfd sp!, {r1, pc}
UNWIND(.fnend)
ENDPROC(arm_clear_user) ENDPROC(arm_clear_user)
ENDPROC(__clear_user_std) ENDPROC(__clear_user_std)
......
...@@ -419,28 +419,24 @@ config CPU_THUMBONLY ...@@ -419,28 +419,24 @@ config CPU_THUMBONLY
config CPU_32v3 config CPU_32v3
bool bool
select CPU_USE_DOMAINS if MMU select CPU_USE_DOMAINS if MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select NEED_KUSER_HELPERS select NEED_KUSER_HELPERS
select TLS_REG_EMUL if SMP || !MMU select TLS_REG_EMUL if SMP || !MMU
config CPU_32v4 config CPU_32v4
bool bool
select CPU_USE_DOMAINS if MMU select CPU_USE_DOMAINS if MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select NEED_KUSER_HELPERS select NEED_KUSER_HELPERS
select TLS_REG_EMUL if SMP || !MMU select TLS_REG_EMUL if SMP || !MMU
config CPU_32v4T config CPU_32v4T
bool bool
select CPU_USE_DOMAINS if MMU select CPU_USE_DOMAINS if MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select NEED_KUSER_HELPERS select NEED_KUSER_HELPERS
select TLS_REG_EMUL if SMP || !MMU select TLS_REG_EMUL if SMP || !MMU
config CPU_32v5 config CPU_32v5
bool bool
select CPU_USE_DOMAINS if MMU select CPU_USE_DOMAINS if MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select NEED_KUSER_HELPERS select NEED_KUSER_HELPERS
select TLS_REG_EMUL if SMP || !MMU select TLS_REG_EMUL if SMP || !MMU
...@@ -805,14 +801,6 @@ config TLS_REG_EMUL ...@@ -805,14 +801,6 @@ config TLS_REG_EMUL
a few prototypes like that in existence) and therefore access to a few prototypes like that in existence) and therefore access to
that required register must be emulated. that required register must be emulated.
config NEEDS_SYSCALL_FOR_CMPXCHG
bool
select NEED_KUSER_HELPERS
help
SMP on a pre-ARMv6 processor? Well OK then.
Forget about fast user space cmpxchg support.
It is just not possible.
config NEED_KUSER_HELPERS config NEED_KUSER_HELPERS
bool bool
......
...@@ -1407,12 +1407,19 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, ...@@ -1407,12 +1407,19 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
unsigned long uaddr = vma->vm_start; unsigned long uaddr = vma->vm_start;
unsigned long usize = vma->vm_end - vma->vm_start; unsigned long usize = vma->vm_end - vma->vm_start;
struct page **pages = __iommu_get_pages(cpu_addr, attrs); struct page **pages = __iommu_get_pages(cpu_addr, attrs);
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long off = vma->vm_pgoff;
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
if (!pages) if (!pages)
return -ENXIO; return -ENXIO;
if (off >= nr_pages || (usize >> PAGE_SHIFT) > nr_pages - off)
return -ENXIO;
pages += off;
do { do {
int ret = vm_insert_page(vma, uaddr, *pages++); int ret = vm_insert_page(vma, uaddr, *pages++);
if (ret) { if (ret) {
......
...@@ -593,6 +593,28 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs) ...@@ -593,6 +593,28 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
arm_notify_die("", regs, &info, ifsr, 0); arm_notify_die("", regs, &info, ifsr, 0);
} }
/*
* Abort handler to be used only during first unmasking of asynchronous aborts
* on the boot CPU. This makes sure that the machine will not die if the
* firmware/bootloader left an imprecise abort pending for us to trip over.
*/
static int __init early_abort_handler(unsigned long addr, unsigned int fsr,
struct pt_regs *regs)
{
pr_warn("Hit pending asynchronous external abort (FSR=0x%08x) during "
"first unmask, this is most likely caused by a "
"firmware/bootloader bug.\n", fsr);
return 0;
}
void __init early_abt_enable(void)
{
fsr_info[22].fn = early_abort_handler;
local_abt_enable();
fsr_info[22].fn = do_bad;
}
#ifndef CONFIG_ARM_LPAE #ifndef CONFIG_ARM_LPAE
static int __init exceptions_init(void) static int __init exceptions_init(void)
{ {
......
...@@ -24,5 +24,6 @@ static inline int fsr_fs(unsigned int fsr) ...@@ -24,5 +24,6 @@ static inline int fsr_fs(unsigned int fsr)
void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs); void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
unsigned long search_exception_table(unsigned long addr); unsigned long search_exception_table(unsigned long addr);
void early_abt_enable(void);
#endif /* __ARCH_ARM_FAULT_H */ #endif /* __ARCH_ARM_FAULT_H */
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include <asm/mach/pci.h> #include <asm/mach/pci.h>
#include <asm/fixmap.h> #include <asm/fixmap.h>
#include "fault.h"
#include "mm.h" #include "mm.h"
#include "tcm.h" #include "tcm.h"
...@@ -1363,6 +1364,9 @@ static void __init devicemaps_init(const struct machine_desc *mdesc) ...@@ -1363,6 +1364,9 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
*/ */
local_flush_tlb_all(); local_flush_tlb_all();
flush_cache_all(); flush_cache_all();
/* Enable asynchronous aborts */
early_abt_enable();
} }
static void __init kmap_init(void) static void __init kmap_init(void)
......
...@@ -66,7 +66,7 @@ ...@@ -66,7 +66,7 @@
((((x) & 0x000000ff) << 24) | \ ((((x) & 0x000000ff) << 24) | \
(((x) & 0x0000ff00) << 8) | \ (((x) & 0x0000ff00) << 8) | \
(((x) & 0x00ff0000) >> 8) | \ (((x) & 0x00ff0000) >> 8) | \
(((x) & 0xff000000) << 24)) (((x) & 0xff000000) >> 24))
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define HOST_ORDER ELFDATA2LSB #define HOST_ORDER ELFDATA2LSB
......
...@@ -41,8 +41,6 @@ struct amba_driver { ...@@ -41,8 +41,6 @@ struct amba_driver {
int (*probe)(struct amba_device *, const struct amba_id *); int (*probe)(struct amba_device *, const struct amba_id *);
int (*remove)(struct amba_device *); int (*remove)(struct amba_device *);
void (*shutdown)(struct amba_device *); void (*shutdown)(struct amba_device *);
int (*suspend)(struct amba_device *, pm_message_t);
int (*resume)(struct amba_device *);
const struct amba_id *id_table; const struct amba_id *id_table;
}; };
......
...@@ -43,6 +43,12 @@ static void print_seq_line(struct nmi_seq_buf *s, int start, int end) ...@@ -43,6 +43,12 @@ static void print_seq_line(struct nmi_seq_buf *s, int start, int end)
printk("%.*s", (end - start) + 1, buf); printk("%.*s", (end - start) + 1, buf);
} }
/*
* When raise() is called it will be is passed a pointer to the
* backtrace_mask. Architectures that call nmi_cpu_backtrace()
* directly from their raise() functions may rely on the mask
* they are passed being updated as a side effect of this call.
*/
void nmi_trigger_all_cpu_backtrace(bool include_self, void nmi_trigger_all_cpu_backtrace(bool include_self,
void (*raise)(cpumask_t *mask)) void (*raise)(cpumask_t *mask))
{ {
...@@ -149,7 +155,10 @@ bool nmi_cpu_backtrace(struct pt_regs *regs) ...@@ -149,7 +155,10 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
/* Replace printk to write into the NMI seq */ /* Replace printk to write into the NMI seq */
this_cpu_write(printk_func, nmi_vprintk); this_cpu_write(printk_func, nmi_vprintk);
pr_warn("NMI backtrace for cpu %d\n", cpu); pr_warn("NMI backtrace for cpu %d\n", cpu);
show_regs(regs); if (regs)
show_regs(regs);
else
dump_stack();
this_cpu_write(printk_func, printk_func_save); this_cpu_write(printk_func, printk_func_save);
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment