Commit 5a4aab32 authored by Rusty Russell's avatar Rusty Russell Committed by Linus Torvalds

[PATCH] Sparc64 local_t support

These are the local_t bits for sparc64.

Other platforms such as PPC64 and Alpha (and in fact any "load locked/
store conditional" like platform other than IA64), probably want to do
something similar.
parent 518538b3
...@@ -176,6 +176,8 @@ EXPORT_SYMBOL(up); ...@@ -176,6 +176,8 @@ EXPORT_SYMBOL(up);
/* Atomic counter implementation. */ /* Atomic counter implementation. */
EXPORT_SYMBOL(__atomic_add); EXPORT_SYMBOL(__atomic_add);
EXPORT_SYMBOL(__atomic_sub); EXPORT_SYMBOL(__atomic_sub);
EXPORT_SYMBOL(__atomic64_add);
EXPORT_SYMBOL(__atomic64_sub);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
EXPORT_SYMBOL(atomic_dec_and_lock); EXPORT_SYMBOL(atomic_dec_and_lock);
#endif #endif
......
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
#include <asm/isa.h> #include <asm/isa.h>
#include <asm/starfire.h> #include <asm/starfire.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/sections.h>
spinlock_t mostek_lock = SPIN_LOCK_UNLOCKED; spinlock_t mostek_lock = SPIN_LOCK_UNLOCKED;
spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED; spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED;
...@@ -449,7 +450,6 @@ void sparc64_do_profile(struct pt_regs *regs) ...@@ -449,7 +450,6 @@ void sparc64_do_profile(struct pt_regs *regs)
return; return;
{ {
extern int _stext;
extern int rwlock_impl_begin, rwlock_impl_end; extern int rwlock_impl_begin, rwlock_impl_end;
extern int atomic_impl_begin, atomic_impl_end; extern int atomic_impl_begin, atomic_impl_end;
extern int __memcpy_begin, __memcpy_end; extern int __memcpy_begin, __memcpy_end;
...@@ -468,7 +468,7 @@ void sparc64_do_profile(struct pt_regs *regs) ...@@ -468,7 +468,7 @@ void sparc64_do_profile(struct pt_regs *regs)
pc < (unsigned long) &__bitops_end)) pc < (unsigned long) &__bitops_end))
pc = o7; pc = o7;
pc -= (unsigned long) &_stext; pc -= (unsigned long) _stext;
pc >>= prof_shift; pc >>= prof_shift;
if(pc >= prof_len) if(pc >= prof_len)
......
...@@ -33,4 +33,27 @@ __atomic_sub: /* %o0 = increment, %o1 = atomic_ptr */ ...@@ -33,4 +33,27 @@ __atomic_sub: /* %o0 = increment, %o1 = atomic_ptr */
membar #StoreLoad | #StoreStore membar #StoreLoad | #StoreStore
retl retl
sub %g7, %o0, %o0 sub %g7, %o0, %o0
.globl __atomic64_add
__atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
ldx [%o1], %g5
add %g5, %o0, %g7
casx [%o1], %g5, %g7
cmp %g5, %g7
bne,pn %xcc, __atomic64_add
membar #StoreLoad | #StoreStore
retl
add %g7, %o0, %o0
.globl __atomic64_sub
__atomic64_sub: /* %o0 = increment, %o1 = atomic_ptr */
ldx [%o1], %g5
sub %g5, %o0, %g7
casx [%o1], %g5, %g7
cmp %g5, %g7
bne,pn %xcc, __atomic64_sub
membar #StoreLoad | #StoreStore
retl
sub %g7, %o0, %o0
atomic_impl_end: atomic_impl_end:
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/asi.h> #include <asm/asi.h>
#include <asm/lsu.h> #include <asm/lsu.h>
#include <asm/sections.h>
#define ELEMENTS(arr) (sizeof (arr)/sizeof (arr[0])) #define ELEMENTS(arr) (sizeof (arr)/sizeof (arr[0]))
...@@ -320,10 +321,9 @@ asmlinkage void do_sparc64_fault(struct pt_regs *regs) ...@@ -320,10 +321,9 @@ asmlinkage void do_sparc64_fault(struct pt_regs *regs)
if (regs->tstate & TSTATE_PRIV) { if (regs->tstate & TSTATE_PRIV) {
unsigned long tpc = regs->tpc; unsigned long tpc = regs->tpc;
extern unsigned int _etext;
/* Sanity check the PC. */ /* Sanity check the PC. */
if ((tpc >= KERNBASE && tpc < (unsigned long) &_etext) || if ((tpc >= KERNBASE && tpc < (unsigned long) _etext) ||
(tpc >= MODULES_VADDR && tpc < MODULES_END)) { (tpc >= MODULES_VADDR && tpc < MODULES_END)) {
/* Valid, no problems... */ /* Valid, no problems... */
} else { } else {
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include <asm/starfire.h> #include <asm/starfire.h>
#include <asm/tlb.h> #include <asm/tlb.h>
#include <asm/spitfire.h> #include <asm/spitfire.h>
#include <asm/sections.h>
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
...@@ -54,8 +55,8 @@ unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1; ...@@ -54,8 +55,8 @@ unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
#define CTX_BMAP_SLOTS (1UL << (CTX_VERSION_SHIFT - 6)) #define CTX_BMAP_SLOTS (1UL << (CTX_VERSION_SHIFT - 6))
unsigned long mmu_context_bmap[CTX_BMAP_SLOTS]; unsigned long mmu_context_bmap[CTX_BMAP_SLOTS];
/* References to section boundaries */ /* References to special section boundaries */
extern char __init_begin, __init_end, _start, _end, etext, edata; extern char _start[], _end[];
/* Initial ramdisk setup */ /* Initial ramdisk setup */
extern unsigned int sparc_ramdisk_image; extern unsigned int sparc_ramdisk_image;
...@@ -1331,7 +1332,7 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) ...@@ -1331,7 +1332,7 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
* image. The kernel is hard mapped below PAGE_OFFSET in a * image. The kernel is hard mapped below PAGE_OFFSET in a
* 4MB locked TLB translation. * 4MB locked TLB translation.
*/ */
start_pfn = PAGE_ALIGN((unsigned long) &_end) - start_pfn = PAGE_ALIGN((unsigned long) _end) -
((unsigned long) KERNBASE); ((unsigned long) KERNBASE);
/* Adjust up to the physical address where the kernel begins. */ /* Adjust up to the physical address where the kernel begins. */
...@@ -1347,7 +1348,7 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) ...@@ -1347,7 +1348,7 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
#ifdef CONFIG_BLK_DEV_INITRD #ifdef CONFIG_BLK_DEV_INITRD
/* Now have to check initial ramdisk, so that bootmap does not overwrite it */ /* Now have to check initial ramdisk, so that bootmap does not overwrite it */
if (sparc_ramdisk_image) { if (sparc_ramdisk_image) {
if (sparc_ramdisk_image >= (unsigned long)&_end - 2 * PAGE_SIZE) if (sparc_ramdisk_image >= (unsigned long)_end - 2 * PAGE_SIZE)
sparc_ramdisk_image -= KERNBASE; sparc_ramdisk_image -= KERNBASE;
initrd_start = sparc_ramdisk_image + phys_base; initrd_start = sparc_ramdisk_image + phys_base;
initrd_end = initrd_start + sparc_ramdisk_size; initrd_end = initrd_start + sparc_ramdisk_size;
...@@ -1424,7 +1425,7 @@ void __init paging_init(void) ...@@ -1424,7 +1425,7 @@ void __init paging_init(void)
set_bit(0, mmu_context_bmap); set_bit(0, mmu_context_bmap);
real_end = (unsigned long)&_end; real_end = (unsigned long)_end;
if ((real_end > ((unsigned long)KERNBASE + 0x400000))) if ((real_end > ((unsigned long)KERNBASE + 0x400000)))
bigkernel = 1; bigkernel = 1;
#ifdef CONFIG_BLK_DEV_INITRD #ifdef CONFIG_BLK_DEV_INITRD
...@@ -1716,7 +1717,7 @@ void __init mem_init(void) ...@@ -1716,7 +1717,7 @@ void __init mem_init(void)
memset(sparc64_valid_addr_bitmap, 0, i << 3); memset(sparc64_valid_addr_bitmap, 0, i << 3);
addr = PAGE_OFFSET + phys_base; addr = PAGE_OFFSET + phys_base;
last = PAGE_ALIGN((unsigned long)&_end) - last = PAGE_ALIGN((unsigned long)_end) -
((unsigned long) KERNBASE); ((unsigned long) KERNBASE);
last += PAGE_OFFSET + phys_base; last += PAGE_OFFSET + phys_base;
while (addr < last) { while (addr < last) {
...@@ -1743,11 +1744,11 @@ void __init mem_init(void) ...@@ -1743,11 +1744,11 @@ void __init mem_init(void)
SetPageReserved(mem_map_zero); SetPageReserved(mem_map_zero);
clear_page(page_address(mem_map_zero)); clear_page(page_address(mem_map_zero));
codepages = (((unsigned long) &etext) - ((unsigned long)&_start)); codepages = (((unsigned long) _etext) - ((unsigned long) _start));
codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT; codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;
datapages = (((unsigned long) &edata) - ((unsigned long)&etext)); datapages = (((unsigned long) _edata) - ((unsigned long) _etext));
datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT; datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT;
initpages = (((unsigned long) &__init_end) - ((unsigned long) &__init_begin)); initpages = (((unsigned long) __init_end) - ((unsigned long) __init_begin));
initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT; initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT;
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
...@@ -1810,8 +1811,8 @@ void free_initmem (void) ...@@ -1810,8 +1811,8 @@ void free_initmem (void)
/* /*
* The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes. * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
*/ */
addr = PAGE_ALIGN((unsigned long)(&__init_begin)); addr = PAGE_ALIGN((unsigned long)(__init_begin));
initend = (unsigned long)(&__init_end) & PAGE_MASK; initend = (unsigned long)(__init_end) & PAGE_MASK;
for (; addr < initend; addr += PAGE_SIZE) { for (; addr < initend; addr += PAGE_SIZE) {
unsigned long page; unsigned long page;
struct page *p; struct page *p;
......
...@@ -9,25 +9,46 @@ ...@@ -9,25 +9,46 @@
#define __ARCH_SPARC64_ATOMIC__ #define __ARCH_SPARC64_ATOMIC__
typedef struct { volatile int counter; } atomic_t; typedef struct { volatile int counter; } atomic_t;
typedef struct { volatile long counter; } atomic64_t;
#define ATOMIC_INIT(i) { (i) } #define ATOMIC_INIT(i) { (i) }
#define ATOMIC64_INIT(i) { (i) }
#define atomic_read(v) ((v)->counter) #define atomic_read(v) ((v)->counter)
#define atomic64_read(v) ((v)->counter)
#define atomic_set(v, i) (((v)->counter) = i) #define atomic_set(v, i) (((v)->counter) = i)
#define atomic64_set(v, i) (((v)->counter) = i)
extern int __atomic_add(int, atomic_t *); extern int __atomic_add(int, atomic_t *);
extern int __atomic64_add(int, atomic64_t *);
extern int __atomic_sub(int, atomic_t *); extern int __atomic_sub(int, atomic_t *);
extern int __atomic64_sub(int, atomic64_t *);
#define atomic_add(i, v) ((void)__atomic_add(i, v)) #define atomic_add(i, v) ((void)__atomic_add(i, v))
#define atomic64_add(i, v) ((void)__atomic64_add(i, v))
#define atomic_sub(i, v) ((void)__atomic_sub(i, v)) #define atomic_sub(i, v) ((void)__atomic_sub(i, v))
#define atomic64_sub(i, v) ((void)__atomic64_sub(i, v))
#define atomic_dec_return(v) __atomic_sub(1, v) #define atomic_dec_return(v) __atomic_sub(1, v)
#define atomic64_dec_return(v) __atomic64_sub(1, v)
#define atomic_inc_return(v) __atomic_add(1, v) #define atomic_inc_return(v) __atomic_add(1, v)
#define atomic64_inc_return(v) __atomic64_add(1, v)
#define atomic_sub_and_test(i, v) (__atomic_sub(i, v) == 0) #define atomic_sub_and_test(i, v) (__atomic_sub(i, v) == 0)
#define atomic64_sub_and_test(i, v) (__atomic64_sub(i, v) == 0)
#define atomic_dec_and_test(v) (__atomic_sub(1, v) == 0) #define atomic_dec_and_test(v) (__atomic_sub(1, v) == 0)
#define atomic64_dec_and_test(v) (__atomic64_sub(1, v) == 0)
#define atomic_inc(v) ((void)__atomic_add(1, v)) #define atomic_inc(v) ((void)__atomic_add(1, v))
#define atomic64_inc(v) ((void)__atomic64_add(1, v))
#define atomic_dec(v) ((void)__atomic_sub(1, v)) #define atomic_dec(v) ((void)__atomic_sub(1, v))
#define atomic64_dec(v) ((void)__atomic64_sub(1, v))
/* Atomic operations are already serializing */ /* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier() #define smp_mb__before_atomic_dec() barrier()
......
#ifndef _ARCH_SPARC64_LOCAL_H
#define _ARCH_SPARC64_LOCAL_H
#include <linux/percpu.h>
#include <asm/atomic.h>
typedef atomic64_t local_t;
#define LOCAL_INIT(i) ATOMIC64_INIT(i)
#define local_read(v) atomic64_read(v)
#define local_set(v,i) atomic64_set(v,i)
#define local_inc(v) atomic64_inc(v)
#define local_dec(v) atomic64_inc(v)
#define local_add(i, v) atomic64_add(i, v)
#define local_sub(i, v) atomic64_sub(i, v)
#define __local_inc(v) ((v)->counter++)
#define __local_dec(v) ((v)->counter++)
#define __local_add(i,v) ((v)->counter+=(i))
#define __local_sub(i,v) ((v)->counter-=(i))
/* Use these for per-cpu local_t variables: on some archs they are
* much more efficient than these naive implementations. Note they take
* a variable, not an address.
*/
#define cpu_local_read(v) local_read(&__get_cpu_var(v))
#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i))
#define cpu_local_inc(v) local_inc(&__get_cpu_var(v))
#define cpu_local_dec(v) local_dec(&__get_cpu_var(v))
#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v))
#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v))
#define __cpu_local_inc(v) __local_inc(&__get_cpu_var(v))
#define __cpu_local_dec(v) __local_dec(&__get_cpu_var(v))
#define __cpu_local_add(i, v) __local_add((i), &__get_cpu_var(v))
#define __cpu_local_sub(i, v) __local_sub((i), &__get_cpu_var(v))
#endif /* _ARCH_SPARC64_LOCAL_H */
#ifndef _SPARC64_SECTIONS_H
#define _SPARC64_SECTIONS_H
/* nothing to see, move along */
#include <asm-generic/sections.h>
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment