Commit 8e86dd73 authored by David S. Miller's avatar David S. Miller

Merge nuts.davemloft.net:/disk1/BK/sparcwork-2.6

into nuts.davemloft.net:/disk1/BK/sparc-2.6
parents bb262c63 8c5c8645
......@@ -135,6 +135,7 @@ EXPORT_SYMBOL(__write_lock);
EXPORT_SYMBOL(__write_unlock);
EXPORT_SYMBOL(__write_trylock);
/* Out of line spin-locking implementation. */
EXPORT_SYMBOL(_raw_spin_lock);
EXPORT_SYMBOL(_raw_spin_lock_flags);
#endif
......@@ -333,7 +334,6 @@ EXPORT_SYMBOL(sys_close);
#endif
/* Special internal versions of library functions. */
EXPORT_SYMBOL(__memcpy);
EXPORT_SYMBOL(__memset);
EXPORT_SYMBOL(_clear_page);
EXPORT_SYMBOL(clear_user_page);
......
......@@ -306,11 +306,7 @@
.globl __memcpy_begin
__memcpy_begin:
.globl __memcpy
.type __memcpy,@function
memcpy_private:
__memcpy:
memcpy: mov ASI_P, asi_src ! IEU0 Group
brnz,pt %o2, __memcpy_entry ! CTI
mov ASI_P, asi_dest ! IEU1
......
......@@ -6,6 +6,18 @@
.text
.align 64
.globl _raw_spin_lock
_raw_spin_lock: /* %o0 = lock_ptr */
1: ldstub [%o0], %g7
brnz,pn %g7, 2f
membar #StoreLoad | #StoreStore
retl
nop
2: ldub [%o0], %g7
brnz,pt %g7, 2b
membar #LoadLoad
ba,a,pt %xcc, 1b
.globl _raw_spin_lock_flags
_raw_spin_lock_flags: /* %o0 = lock_ptr, %o1 = irq_flags */
1: ldstub [%o0], %g7
......
......@@ -18,7 +18,7 @@ extern void _clear_page(void *page);
#define clear_page(X) _clear_page((void *)(X))
struct page;
extern void clear_user_page(void *addr, unsigned long vaddr, struct page *page);
#define copy_page(X,Y) __memcpy((void *)(X), (void *)(Y), PAGE_SIZE)
#define copy_page(X,Y) memcpy((void *)(X), (void *)(Y), PAGE_SIZE)
extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *topage);
/* GROSS, defining this makes gcc pass these types as aggregates,
......
......@@ -41,22 +41,8 @@ typedef unsigned char spinlock_t;
do { membar("#LoadLoad"); \
} while(*((volatile unsigned char *)lock))
static __inline__ void _raw_spin_lock(spinlock_t *lock)
{
__asm__ __volatile__(
"1: ldstub [%0], %%g7\n"
" brnz,pn %%g7, 2f\n"
" membar #StoreLoad | #StoreStore\n"
" .subsection 2\n"
"2: ldub [%0], %%g7\n"
" brnz,pt %%g7, 2b\n"
" membar #LoadLoad\n"
" b,a,pt %%xcc, 1b\n"
" .previous\n"
: /* no outputs */
: "r" (lock)
: "g7", "memory");
}
/* arch/sparc64/lib/spinlock.S */
extern void _raw_spin_lock(spinlock_t *lock);
static __inline__ int _raw_spin_trylock(spinlock_t *lock)
{
......
......@@ -16,9 +16,7 @@
#include <asm/asi.h>
extern void __memmove(void *,const void *,__kernel_size_t);
extern __kernel_size_t __memcpy(void *,const void *,__kernel_size_t);
extern void *__memset(void *,int,__kernel_size_t);
extern void *__builtin_memcpy(void *,const void *,__kernel_size_t);
extern void *__builtin_memset(void *,int,__kernel_size_t);
#ifndef EXPORT_SYMTAB_STROPS
......@@ -37,29 +35,7 @@ extern void *__builtin_memset(void *,int,__kernel_size_t);
#define __HAVE_ARCH_MEMCPY
static inline void *__constant_memcpy(void *to, const void *from, __kernel_size_t n)
{
if(n) {
if(n <= 32) {
__builtin_memcpy(to, from, n);
} else {
__memcpy(to, from, n);
}
}
return to;
}
static inline void *__nonconstant_memcpy(void *to, const void *from, __kernel_size_t n)
{
__memcpy(to, from, n);
return to;
}
#undef memcpy
#define memcpy(t, f, n) \
(__builtin_constant_p(n) ? \
__constant_memcpy((t),(f),(n)) : \
__nonconstant_memcpy((t),(f),(n)))
extern void * memcpy(void *,const void *,__kernel_size_t);
#define __HAVE_ARCH_MEMSET
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment