Commit d898d485 authored by Linus Torvalds's avatar Linus Torvalds

Merge master.kernel.org:/home/rmk/linux-2.6-arm

parents df7addbb 7c612bfd
......@@ -131,7 +131,7 @@ __syscall_start:
.long sys_wait4
/* 115 */ .long sys_swapoff
.long sys_sysinfo
.long sys_ipc_wrapper
.long sys_ipc
.long sys_fsync
.long sys_sigreturn_wrapper
/* 120 */ .long sys_clone_wrapper
......@@ -254,7 +254,7 @@ __syscall_start:
.long sys_fremovexattr
.long sys_tkill
.long sys_sendfile64
/* 240 */ .long sys_futex_wrapper
/* 240 */ .long sys_futex
.long sys_sched_setaffinity
.long sys_sched_getaffinity
.long sys_io_setup
......@@ -284,7 +284,7 @@ __syscall_start:
.long sys_fstatfs64
.long sys_tgkill
.long sys_utimes
/* 270 */ .long sys_arm_fadvise64_64_wrapper
/* 270 */ .long sys_arm_fadvise64_64
.long sys_pciconfig_iobase
.long sys_pciconfig_read
.long sys_pciconfig_write
......@@ -333,7 +333,7 @@ __syscall_start:
.long sys_inotify_init
.long sys_inotify_add_watch
.long sys_inotify_rm_watch
.long sys_mbind_wrapper
.long sys_mbind
/* 320 */ .long sys_get_mempolicy
.long sys_set_mempolicy
__syscall_end:
......
......@@ -611,6 +611,47 @@ ENTRY(__switch_to)
.globl __kuser_helper_start
__kuser_helper_start:
/*
* Reference prototype:
*
* void __kernel_memory_barrier(void)
*
* Input:
*
* lr = return address
*
* Output:
*
* none
*
* Clobbered:
*
* the Z flag might be lost
*
* Definition and user space usage example:
*
* typedef void (__kernel_dmb_t)(void);
* #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0)
*
* Apply any needed memory barrier to preserve consistency with data modified
* manually and __kuser_cmpxchg usage.
*
* This could be used as follows:
*
* #define __kernel_dmb() \
* asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \
* : : : "lr","cc" )
*/
__kuser_memory_barrier: @ 0xffff0fa0
#if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_SMP)
mcr p15, 0, r0, c7, c10, 5 @ dmb
#endif
mov pc, lr
.align 5
/*
* Reference prototype:
*
......@@ -642,6 +683,8 @@ __kuser_helper_start:
* The C flag is also set if *ptr was changed to allow for assembly
* optimization in the calling code.
*
* Note: this routine already includes memory barriers as needed.
*
* For example, a user space atomic_add implementation could look like this:
*
* #define atomic_add(ptr, val) \
......@@ -698,10 +741,16 @@ __kuser_cmpxchg: @ 0xffff0fc0
#else
#ifdef CONFIG_SMP
mcr p15, 0, r0, c7, c10, 5 @ dmb
#endif
ldrex r3, [r2]
subs r3, r3, r0
strexeq r3, r1, [r2]
rsbs r0, r3, #0
#ifdef CONFIG_SMP
mcr p15, 0, r0, c7, c10, 5 @ dmb
#endif
mov pc, lr
#endif
......
......@@ -145,7 +145,7 @@ ENTRY(vector_swi)
#endif
enable_irq
str r4, [sp, #-S_OFF]! @ push fifth arg
stmdb sp!, {r4, r5} @ push fifth and sixth args
get_thread_info tsk
ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing
......@@ -204,7 +204,7 @@ ENTRY(sys_call_table)
* Special system call wrappers
*/
@ r0 = syscall number
@ r5 = syscall table
@ r8 = syscall table
.type sys_syscall, #function
sys_syscall:
eor scno, r0, #__NR_SYSCALL_BASE
......@@ -255,22 +255,6 @@ sys_sigaltstack_wrapper:
ldr r2, [sp, #S_OFF + S_SP]
b do_sigaltstack
sys_futex_wrapper:
str r5, [sp, #4] @ push sixth arg
b sys_futex
sys_arm_fadvise64_64_wrapper:
str r5, [sp, #4] @ push r5 to stack
b sys_arm_fadvise64_64
sys_mbind_wrapper:
str r5, [sp, #4]
b sys_mbind
sys_ipc_wrapper:
str r5, [sp, #4] @ push sixth arg
b sys_ipc
/*
* Note: off_4k (r5) is always units of 4K. If we can't do the requested
* offset, we return EINVAL.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment