Commit 715b49ef authored by Alan Cox's avatar Alan Cox Committed by Linus Torvalds

[PATCH] EDAC: atomic scrub operations

EDAC requires a way to scrub memory if an ECC error is found and the chipset
does not do the work automatically.  That means rewriting memory locations
atomically with respect to all CPUs _and_ bus masters.  That means we can't
use atomic_add(foo, 0) as it gets optimised for non-SMP

This adds a function to include/asm-foo/atomic.h for the platforms currently
supported which implements a scrub of a mapped block.

It also adjusts a few other files include order where atomic.h is included
before types.h as this now causes an error as atomic_scrub uses u32.
Signed-off-by: default avatarAlan Cox <alan@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 3213e913
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
* completion notification. * completion notification.
*/ */
#include <asm/types.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
* *
*/ */
#include <linux/config.h> #include <linux/config.h>
#include <linux/types.h>
#include <linux/file.h> #include <linux/file.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/sunrpc/svc.h> #include <linux/sunrpc/svc.h>
......
...@@ -255,5 +255,17 @@ __asm__ __volatile__(LOCK "orl %0,%1" \ ...@@ -255,5 +255,17 @@ __asm__ __volatile__(LOCK "orl %0,%1" \
#define smp_mb__before_atomic_inc() barrier() #define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier()
/* ECC atomic, DMA, SMP and interrupt safe scrub function */
static __inline__ void atomic_scrub(unsigned long *virt_addr, u32 size)
{
u32 i;
for (i = 0; i < size / 4; i++, virt_addr++)
/* Very carefully read and write to memory atomically
* so we are interrupt, DMA and SMP safe.
*/
__asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr));
}
#include <asm-generic/atomic.h> #include <asm-generic/atomic.h>
#endif #endif
...@@ -426,5 +426,17 @@ __asm__ __volatile__(LOCK "orl %0,%1" \ ...@@ -426,5 +426,17 @@ __asm__ __volatile__(LOCK "orl %0,%1" \
#define smp_mb__before_atomic_inc() barrier() #define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier()
/* ECC atomic, DMA, SMP and interrupt safe scrub function */
static __inline__ void atomic_scrub(u32 *virt_addr, u32 size)
{
u32 i;
for (i = 0; i < size / 4; i++, virt_addr++)
/* Very carefully read and write to memory atomically
* so we are interrupt, DMA and SMP safe.
*/
__asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr));
}
#include <asm-generic/atomic.h> #include <asm-generic/atomic.h>
#endif #endif
...@@ -42,8 +42,8 @@ ...@@ -42,8 +42,8 @@
*/ */
#include <linux/init.h> #include <linux/init.h>
#include <asm/atomic.h>
#include <asm/types.h> #include <asm/types.h>
#include <asm/atomic.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/err.h> #include <linux/err.h>
......
...@@ -30,8 +30,8 @@ ...@@ -30,8 +30,8 @@
*/ */
#include <linux/init.h> #include <linux/init.h>
#include <asm/atomic.h>
#include <asm/types.h> #include <asm/types.h>
#include <asm/atomic.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/mount.h> #include <linux/mount.h>
......
...@@ -40,12 +40,12 @@ ...@@ -40,12 +40,12 @@
*/ */
#include <linux/config.h> #include <linux/config.h>
#include <linux/types.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include <asm/current.h> #include <asm/current.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/ioctls.h> #include <asm/ioctls.h>
#include <linux/types.h>
#include <linux/stddef.h> #include <linux/stddef.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/errno.h> #include <linux/errno.h>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment