Commit 7c612bfd authored by Nicolas Pitre's avatar Nicolas Pitre Committed by Russell King

[ARM] 3210/1: add missing memory barrier helper for NPTL support

Patch from Nicolas Pitre

Strictly speaking, the NPTL kernel helpers are required for pre ARMv6
only.  They are available on ARMv6+ as well for obvious compatibility
reasons.  However there are cases where extra memory barriers are needed
when using an SMP ARMv6 machine but not on pre-ARMv6.

This patch adds a memory barrier kernel helper that glibc can use as
needed for pre-ARMv6 binaries to be forward compatible with an SMP
kernel on ARMv6, as well as the necessary dmb instructions to the
cmpxchg helper.
Signed-off-by: default avatarNicolas Pitre <nico@cam.org>
Acked-by: default avatarDaniel Jacobowitz <dan@codesourcery.com>
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent 567bd980
...@@ -611,6 +611,47 @@ ENTRY(__switch_to) ...@@ -611,6 +611,47 @@ ENTRY(__switch_to)
.globl __kuser_helper_start .globl __kuser_helper_start
__kuser_helper_start: __kuser_helper_start:
/*
* Reference prototype:
*
* void __kernel_memory_barrier(void)
*
* Input:
*
* lr = return address
*
* Output:
*
* none
*
* Clobbered:
*
* the Z flag might be lost
*
* Definition and user space usage example:
*
* typedef void (__kernel_dmb_t)(void);
* #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0)
*
* Apply any needed memory barrier to preserve consistency with data modified
* manually and __kuser_cmpxchg usage.
*
* This could be used as follows:
*
* #define __kernel_dmb() \
* asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \
* : : : "lr","cc" )
*/
__kuser_memory_barrier: @ 0xffff0fa0
#if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_SMP)
mcr p15, 0, r0, c7, c10, 5 @ dmb
#endif
mov pc, lr
.align 5
/* /*
* Reference prototype: * Reference prototype:
* *
...@@ -642,6 +683,8 @@ __kuser_helper_start: ...@@ -642,6 +683,8 @@ __kuser_helper_start:
* The C flag is also set if *ptr was changed to allow for assembly * The C flag is also set if *ptr was changed to allow for assembly
* optimization in the calling code. * optimization in the calling code.
* *
* Note: this routine already includes memory barriers as needed.
*
* For example, a user space atomic_add implementation could look like this: * For example, a user space atomic_add implementation could look like this:
* *
* #define atomic_add(ptr, val) \ * #define atomic_add(ptr, val) \
...@@ -698,10 +741,16 @@ __kuser_cmpxchg: @ 0xffff0fc0 ...@@ -698,10 +741,16 @@ __kuser_cmpxchg: @ 0xffff0fc0
#else #else
#ifdef CONFIG_SMP
mcr p15, 0, r0, c7, c10, 5 @ dmb
#endif
ldrex r3, [r2] ldrex r3, [r2]
subs r3, r3, r0 subs r3, r3, r0
strexeq r3, r1, [r2] strexeq r3, r1, [r2]
rsbs r0, r3, #0 rsbs r0, r3, #0
#ifdef CONFIG_SMP
mcr p15, 0, r0, c7, c10, 5 @ dmb
#endif
mov pc, lr mov pc, lr
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment