Commit 67faca7d authored by Michael Matloob's avatar Michael Matloob

runtime: break atomics out into package runtime/internal/atomic

This change breaks out most of the atomics functions in the runtime
into package runtime/internal/atomic. It adds some basic support
in the toolchain for runtime packages, and also modifies linux/arm
atomics to remove the dependency on the runtime's mutex. The mutexes
have been replaced with spinlocks.

all trybots are happy!
In addition to the trybots, I've tested on the darwin/arm64 builder,
on the darwin/arm builder, and on a ppc64le machine.

Change-Id: I6698c8e3cf3834f55ce5824059f44d00dc8e3c2f
Reviewed-on: https://go-review.googlesource.com/14204
Run-TryBot: Michael Matloob <matloob@golang.org>
Reviewed-by: default avatarRuss Cox <rsc@golang.org>
parent d3336057
...@@ -316,7 +316,9 @@ import_package: ...@@ -316,7 +316,9 @@ import_package:
} else if importpkg.Name != $2.Name { } else if importpkg.Name != $2.Name {
Yyerror("conflicting names %s and %s for package %q", importpkg.Name, $2.Name, importpkg.Path); Yyerror("conflicting names %s and %s for package %q", importpkg.Name, $2.Name, importpkg.Path);
} }
if incannedimport == 0 {
importpkg.Direct = true; importpkg.Direct = true;
}
importpkg.Safe = curio.importsafe importpkg.Safe = curio.importsafe
if safemode != 0 && !curio.importsafe { if safemode != 0 && !curio.importsafe {
......
...@@ -31,7 +31,7 @@ import ( ...@@ -31,7 +31,7 @@ import (
// Do not instrument the following packages at all, // Do not instrument the following packages at all,
// at best instrumentation would cause infinite recursion. // at best instrumentation would cause infinite recursion.
var omit_pkgs = []string{"runtime", "runtime/race", "runtime/msan"} var omit_pkgs = []string{"runtime/internal/atomic", "runtime", "runtime/race", "runtime/msan"}
// Only insert racefuncenter/racefuncexit into the following packages. // Only insert racefuncenter/racefuncexit into the following packages.
// Memory accesses in the packages are either uninteresting or will cause false positives. // Memory accesses in the packages are either uninteresting or will cause false positives.
......
...@@ -989,7 +989,7 @@ func dtypesym(t *Type) *Sym { ...@@ -989,7 +989,7 @@ func dtypesym(t *Type) *Sym {
dupok = obj.DUPOK dupok = obj.DUPOK
} }
if compiling_runtime != 0 && (tbase == Types[tbase.Etype] || tbase == bytetype || tbase == runetype || tbase == errortype) { // int, float, etc if localpkg.Name == "runtime" && (tbase == Types[tbase.Etype] || tbase == bytetype || tbase == runetype || tbase == errortype) { // int, float, etc
goto ok goto ok
} }
...@@ -1266,7 +1266,7 @@ func dumptypestructs() { ...@@ -1266,7 +1266,7 @@ func dumptypestructs() {
// so this is as good as any. // so this is as good as any.
// another possible choice would be package main, // another possible choice would be package main,
// but using runtime means fewer copies in .6 files. // but using runtime means fewer copies in .6 files.
if compiling_runtime != 0 { if localpkg.Name == "runtime" {
for i := EType(1); i <= TBOOL; i++ { for i := EType(1); i <= TBOOL; i++ {
dtypesym(Ptrto(Types[i])) dtypesym(Ptrto(Types[i]))
} }
......
This diff is collapsed.
This diff is collapsed.
...@@ -2155,7 +2155,7 @@ func (gcToolchain) gc(b *builder, p *Package, archive, obj string, asmhdr bool, ...@@ -2155,7 +2155,7 @@ func (gcToolchain) gc(b *builder, p *Package, archive, obj string, asmhdr bool,
if p.Name == "main" { if p.Name == "main" {
gcargs[1] = "main" gcargs[1] = "main"
} }
if p.Standard && p.ImportPath == "runtime" { if p.Standard && (p.ImportPath == "runtime" || strings.HasPrefix(p.ImportPath, "runtime/internal")) {
// runtime compiles with a special gc flag to emit // runtime compiles with a special gc flag to emit
// additional reflect type data. // additional reflect type data.
gcargs = append(gcargs, "-+") gcargs = append(gcargs, "-+")
......
...@@ -832,8 +832,8 @@ func (p *Package) load(stk *importStack, bp *build.Package, err error) *Package ...@@ -832,8 +832,8 @@ func (p *Package) load(stk *importStack, bp *build.Package, err error) *Package
importPaths = append(importPaths, "runtime/cgo") importPaths = append(importPaths, "runtime/cgo")
} }
// Everything depends on runtime, except runtime and unsafe. // Everything depends on runtime, except runtime, its subpackages, and unsafe.
if !p.Standard || (p.ImportPath != "runtime" && p.ImportPath != "unsafe") { if !p.Standard || (p.ImportPath != "runtime" && !strings.HasPrefix(p.ImportPath, "runtime/") && p.ImportPath != "unsafe") {
importPaths = append(importPaths, "runtime") importPaths = append(importPaths, "runtime")
// When race detection enabled everything depends on runtime/race. // When race detection enabled everything depends on runtime/race.
// Exclude certain packages to avoid circular dependencies. // Exclude certain packages to avoid circular dependencies.
......
...@@ -36,7 +36,8 @@ var pkgDeps = map[string][]string{ ...@@ -36,7 +36,8 @@ var pkgDeps = map[string][]string{
// L0 is the lowest level, core, nearly unavoidable packages. // L0 is the lowest level, core, nearly unavoidable packages.
"errors": {}, "errors": {},
"io": {"errors", "sync"}, "io": {"errors", "sync"},
"runtime": {"unsafe"}, "runtime": {"unsafe", "runtime/internal/atomic"},
"runtime/internal/atomic": {"unsafe"},
"sync": {"runtime", "sync/atomic", "unsafe"}, "sync": {"runtime", "sync/atomic", "unsafe"},
"sync/atomic": {"unsafe"}, "sync/atomic": {"unsafe"},
"unsafe": {}, "unsafe": {},
...@@ -45,6 +46,7 @@ var pkgDeps = map[string][]string{ ...@@ -45,6 +46,7 @@ var pkgDeps = map[string][]string{
"errors", "errors",
"io", "io",
"runtime", "runtime",
"runtime/internal/atomic",
"sync", "sync",
"sync/atomic", "sync/atomic",
"unsafe", "unsafe",
......
...@@ -469,93 +469,6 @@ CALLFN(·call268435456, 268435456) ...@@ -469,93 +469,6 @@ CALLFN(·call268435456, 268435456)
CALLFNcall536870912, 536870912) CALLFNcall536870912, 536870912)
CALLFNcall1073741824, 1073741824) CALLFNcall1073741824, 1073741824)
// bool cas(int32 *val, int32 old, int32 new)
// Atomically:
// if(*val == old){
// *val = new;
// return 1;
// }else
// return 0;
TEXT runtime·cas(SB), NOSPLIT, $0-13
MOVL ptr+0(FP), BX
MOVL old+4(FP), AX
MOVL new+8(FP), CX
LOCK
CMPXCHGL CX, 0(BX)
SETEQ ret+12(FP)
RET
TEXT runtime·casuintptr(SB), NOSPLIT, $0-13
JMP runtime·cas(SB)
TEXT runtime·atomicloaduintptr(SB), NOSPLIT, $0-8
JMP runtime·atomicload(SB)
TEXT runtime·atomicloaduint(SB), NOSPLIT, $0-8
JMP runtime·atomicload(SB)
TEXT runtime·atomicstoreuintptr(SB), NOSPLIT, $0-8
JMP runtime·atomicstore(SB)
// bool runtime·cas64(uint64 *val, uint64 old, uint64 new)
// Atomically:
// if(*val == *old){
// *val = new;
// return 1;
// } else {
// return 0;
// }
TEXT runtime·cas64(SB), NOSPLIT, $0-21
MOVL ptr+0(FP), BP
MOVL old_lo+4(FP), AX
MOVL old_hi+8(FP), DX
MOVL new_lo+12(FP), BX
MOVL new_hi+16(FP), CX
LOCK
CMPXCHG8B 0(BP)
SETEQ ret+20(FP)
RET
// bool casp(void **p, void *old, void *new)
// Atomically:
// if(*p == old){
// *p = new;
// return 1;
// }else
// return 0;
TEXT runtime·casp1(SB), NOSPLIT, $0-13
MOVL ptr+0(FP), BX
MOVL old+4(FP), AX
MOVL new+8(FP), CX
LOCK
CMPXCHGL CX, 0(BX)
SETEQ ret+12(FP)
RET
// uint32 xadd(uint32 volatile *val, int32 delta)
// Atomically:
// *val += delta;
// return *val;
TEXT runtime·xadd(SB), NOSPLIT, $0-12
MOVL ptr+0(FP), BX
MOVL delta+4(FP), AX
MOVL AX, CX
LOCK
XADDL AX, 0(BX)
ADDL CX, AX
MOVL AX, ret+8(FP)
RET
TEXT runtime·xchg(SB), NOSPLIT, $0-12
MOVL ptr+0(FP), BX
MOVL new+4(FP), AX
XCHGL AX, 0(BX)
MOVL AX, ret+8(FP)
RET
TEXT runtime·xchguintptr(SB), NOSPLIT, $0-12
JMP runtime·xchg(SB)
TEXT runtime·procyield(SB),NOSPLIT,$0-0 TEXT runtime·procyield(SB),NOSPLIT,$0-0
MOVL cycles+0(FP), AX MOVL cycles+0(FP), AX
again: again:
...@@ -564,69 +477,6 @@ again: ...@@ -564,69 +477,6 @@ again:
JNZ again JNZ again
RET RET
TEXT runtime·atomicstorep1(SB), NOSPLIT, $0-8
MOVL ptr+0(FP), BX
MOVL val+4(FP), AX
XCHGL AX, 0(BX)
RET
TEXT runtime·atomicstore(SB), NOSPLIT, $0-8
MOVL ptr+0(FP), BX
MOVL val+4(FP), AX
XCHGL AX, 0(BX)
RET
// uint64 atomicload64(uint64 volatile* addr);
TEXT runtime·atomicload64(SB), NOSPLIT, $0-12
MOVL ptr+0(FP), AX
TESTL $7, AX
JZ 2(PC)
MOVL 0, AX // crash with nil ptr deref
LEAL ret_lo+4(FP), BX
// MOVQ (%EAX), %MM0
BYTE $0x0f; BYTE $0x6f; BYTE $0x00
// MOVQ %MM0, 0(%EBX)
BYTE $0x0f; BYTE $0x7f; BYTE $0x03
// EMMS
BYTE $0x0F; BYTE $0x77
RET
// void runtime·atomicstore64(uint64 volatile* addr, uint64 v);
TEXT runtime·atomicstore64(SB), NOSPLIT, $0-12
MOVL ptr+0(FP), AX
TESTL $7, AX
JZ 2(PC)
MOVL 0, AX // crash with nil ptr deref
// MOVQ and EMMS were introduced on the Pentium MMX.
// MOVQ 0x8(%ESP), %MM0
BYTE $0x0f; BYTE $0x6f; BYTE $0x44; BYTE $0x24; BYTE $0x08
// MOVQ %MM0, (%EAX)
BYTE $0x0f; BYTE $0x7f; BYTE $0x00
// EMMS
BYTE $0x0F; BYTE $0x77
// This is essentially a no-op, but it provides required memory fencing.
// It can be replaced with MFENCE, but MFENCE was introduced only on the Pentium4 (SSE2).
MOVL $0, AX
LOCK
XADDL AX, (SP)
RET
// void runtime·atomicor8(byte volatile*, byte);
TEXT runtime·atomicor8(SB), NOSPLIT, $0-5
MOVL ptr+0(FP), AX
MOVB val+4(FP), BX
LOCK
ORB BX, (AX)
RET
// void runtime·atomicand8(byte volatile*, byte);
TEXT runtime·atomicand8(SB), NOSPLIT, $0-5
MOVL ptr+0(FP), AX
MOVB val+4(FP), BX
LOCK
ANDB BX, (AX)
RET
TEXT ·publicationBarrier(SB),NOSPLIT,$0-0 TEXT ·publicationBarrier(SB),NOSPLIT,$0-0
// Stores are already ordered on x86, so this is just a // Stores are already ordered on x86, so this is just a
// compile barrier. // compile barrier.
......
...@@ -495,111 +495,6 @@ CALLFN(·call268435456, 268435456) ...@@ -495,111 +495,6 @@ CALLFN(·call268435456, 268435456)
CALLFNcall536870912, 536870912) CALLFNcall536870912, 536870912)
CALLFNcall1073741824, 1073741824) CALLFNcall1073741824, 1073741824)
// bool cas(int32 *val, int32 old, int32 new)
// Atomically:
// if(*val == old){
// *val = new;
// return 1;
// } else
// return 0;
TEXT runtime·cas(SB), NOSPLIT, $0-17
MOVQ ptr+0(FP), BX
MOVL old+8(FP), AX
MOVL new+12(FP), CX
LOCK
CMPXCHGL CX, 0(BX)
SETEQ ret+16(FP)
RET
// bool runtime·cas64(uint64 *val, uint64 old, uint64 new)
// Atomically:
// if(*val == *old){
// *val = new;
// return 1;
// } else {
// return 0;
// }
TEXT runtime·cas64(SB), NOSPLIT, $0-25
MOVQ ptr+0(FP), BX
MOVQ old+8(FP), AX
MOVQ new+16(FP), CX
LOCK
CMPXCHGQ CX, 0(BX)
SETEQ ret+24(FP)
RET
TEXT runtime·casuintptr(SB), NOSPLIT, $0-25
JMP runtime·cas64(SB)
TEXT runtime·atomicloaduintptr(SB), NOSPLIT, $0-16
JMP runtime·atomicload64(SB)
TEXT runtime·atomicloaduint(SB), NOSPLIT, $0-16
JMP runtime·atomicload64(SB)
TEXT runtime·atomicstoreuintptr(SB), NOSPLIT, $0-16
JMP runtime·atomicstore64(SB)
// bool casp(void **val, void *old, void *new)
// Atomically:
// if(*val == old){
// *val = new;
// return 1;
// } else
// return 0;
TEXT runtime·casp1(SB), NOSPLIT, $0-25
MOVQ ptr+0(FP), BX
MOVQ old+8(FP), AX
MOVQ new+16(FP), CX
LOCK
CMPXCHGQ CX, 0(BX)
SETEQ ret+24(FP)
RET
// uint32 xadd(uint32 volatile *val, int32 delta)
// Atomically:
// *val += delta;
// return *val;
TEXT runtime·xadd(SB), NOSPLIT, $0-20
MOVQ ptr+0(FP), BX
MOVL delta+8(FP), AX
MOVL AX, CX
LOCK
XADDL AX, 0(BX)
ADDL CX, AX
MOVL AX, ret+16(FP)
RET
TEXT runtime·xadd64(SB), NOSPLIT, $0-24
MOVQ ptr+0(FP), BX
MOVQ delta+8(FP), AX
MOVQ AX, CX
LOCK
XADDQ AX, 0(BX)
ADDQ CX, AX
MOVQ AX, ret+16(FP)
RET
TEXT runtime·xadduintptr(SB), NOSPLIT, $0-24
JMP runtime·xadd64(SB)
TEXT runtime·xchg(SB), NOSPLIT, $0-20
MOVQ ptr+0(FP), BX
MOVL new+8(FP), AX
XCHGL AX, 0(BX)
MOVL AX, ret+16(FP)
RET
TEXT runtime·xchg64(SB), NOSPLIT, $0-24
MOVQ ptr+0(FP), BX
MOVQ new+8(FP), AX
XCHGQ AX, 0(BX)
MOVQ AX, ret+16(FP)
RET
TEXT runtime·xchguintptr(SB), NOSPLIT, $0-24
JMP runtime·xchg64(SB)
TEXT runtime·procyield(SB),NOSPLIT,$0-0 TEXT runtime·procyield(SB),NOSPLIT,$0-0
MOVL cycles+0(FP), AX MOVL cycles+0(FP), AX
again: again:
...@@ -608,39 +503,6 @@ again: ...@@ -608,39 +503,6 @@ again:
JNZ again JNZ again
RET RET
TEXT runtime·atomicstorep1(SB), NOSPLIT, $0-16
MOVQ ptr+0(FP), BX
MOVQ val+8(FP), AX
XCHGQ AX, 0(BX)
RET
TEXT runtime·atomicstore(SB), NOSPLIT, $0-12
MOVQ ptr+0(FP), BX
MOVL val+8(FP), AX
XCHGL AX, 0(BX)
RET
TEXT runtime·atomicstore64(SB), NOSPLIT, $0-16
MOVQ ptr+0(FP), BX
MOVQ val+8(FP), AX
XCHGQ AX, 0(BX)
RET
// void runtime·atomicor8(byte volatile*, byte);
TEXT runtime·atomicor8(SB), NOSPLIT, $0-9
MOVQ ptr+0(FP), AX
MOVB val+8(FP), BX
LOCK
ORB BX, (AX)
RET
// void runtime·atomicand8(byte volatile*, byte);
TEXT runtime·atomicand8(SB), NOSPLIT, $0-9
MOVQ ptr+0(FP), AX
MOVB val+8(FP), BX
LOCK
ANDB BX, (AX)
RET
TEXT ·publicationBarrier(SB),NOSPLIT,$0-0 TEXT ·publicationBarrier(SB),NOSPLIT,$0-0
// Stores are already ordered on x86, so this is just a // Stores are already ordered on x86, so this is just a
......
...@@ -415,111 +415,6 @@ CALLFN(·call268435456, 268435456) ...@@ -415,111 +415,6 @@ CALLFN(·call268435456, 268435456)
CALLFNcall536870912, 536870912) CALLFNcall536870912, 536870912)
CALLFNcall1073741824, 1073741824) CALLFNcall1073741824, 1073741824)
// bool cas(int32 *val, int32 old, int32 new)
// Atomically:
// if(*val == old){
// *val = new;
// return 1;
// } else
// return 0;
TEXT runtime·cas(SB), NOSPLIT, $0-17
MOVL ptr+0(FP), BX
MOVL old+4(FP), AX
MOVL new+8(FP), CX
LOCK
CMPXCHGL CX, 0(BX)
SETEQ ret+16(FP)
RET
TEXT runtime·casuintptr(SB), NOSPLIT, $0-17
JMP runtime·cas(SB)
TEXT runtime·atomicloaduintptr(SB), NOSPLIT, $0-12
JMP runtime·atomicload(SB)
TEXT runtime·atomicloaduint(SB), NOSPLIT, $0-12
JMP runtime·atomicload(SB)
TEXT runtime·atomicstoreuintptr(SB), NOSPLIT, $0-12
JMP runtime·atomicstore(SB)
// bool runtime·cas64(uint64 *val, uint64 old, uint64 new)
// Atomically:
// if(*val == *old){
// *val = new;
// return 1;
// } else {
// return 0;
// }
TEXT runtime·cas64(SB), NOSPLIT, $0-25
MOVL ptr+0(FP), BX
MOVQ old+8(FP), AX
MOVQ new+16(FP), CX
LOCK
CMPXCHGQ CX, 0(BX)
SETEQ ret+24(FP)
RET
// bool casp(void **val, void *old, void *new)
// Atomically:
// if(*val == old){
// *val = new;
// return 1;
// } else
// return 0;
TEXT runtime·casp1(SB), NOSPLIT, $0-17
MOVL ptr+0(FP), BX
MOVL old+4(FP), AX
MOVL new+8(FP), CX
LOCK
CMPXCHGL CX, 0(BX)
SETEQ ret+16(FP)
RET
// uint32 xadd(uint32 volatile *val, int32 delta)
// Atomically:
// *val += delta;
// return *val;
TEXT runtime·xadd(SB), NOSPLIT, $0-12
MOVL ptr+0(FP), BX
MOVL delta+4(FP), AX
MOVL AX, CX
LOCK
XADDL AX, 0(BX)
ADDL CX, AX
MOVL AX, ret+8(FP)
RET
TEXT runtime·xadd64(SB), NOSPLIT, $0-24
MOVL ptr+0(FP), BX
MOVQ delta+8(FP), AX
MOVQ AX, CX
LOCK
XADDQ AX, 0(BX)
ADDQ CX, AX
MOVQ AX, ret+16(FP)
RET
TEXT runtime·xadduintptr(SB), NOSPLIT, $0-12
JMP runtime·xadd(SB)
TEXT runtime·xchg(SB), NOSPLIT, $0-12
MOVL ptr+0(FP), BX
MOVL new+4(FP), AX
XCHGL AX, 0(BX)
MOVL AX, ret+8(FP)
RET
TEXT runtime·xchg64(SB), NOSPLIT, $0-24
MOVL ptr+0(FP), BX
MOVQ new+8(FP), AX
XCHGQ AX, 0(BX)
MOVQ AX, ret+16(FP)
RET
TEXT runtime·xchguintptr(SB), NOSPLIT, $0-12
JMP runtime·xchg(SB)
TEXT runtime·procyield(SB),NOSPLIT,$0-0 TEXT runtime·procyield(SB),NOSPLIT,$0-0
MOVL cycles+0(FP), AX MOVL cycles+0(FP), AX
again: again:
...@@ -528,40 +423,6 @@ again: ...@@ -528,40 +423,6 @@ again:
JNZ again JNZ again
RET RET
TEXT runtime·atomicstorep1(SB), NOSPLIT, $0-8
MOVL ptr+0(FP), BX
MOVL val+4(FP), AX
XCHGL AX, 0(BX)
RET
TEXT runtime·atomicstore(SB), NOSPLIT, $0-8
MOVL ptr+0(FP), BX
MOVL val+4(FP), AX
XCHGL AX, 0(BX)
RET
TEXT runtime·atomicstore64(SB), NOSPLIT, $0-16
MOVL ptr+0(FP), BX
MOVQ val+8(FP), AX
XCHGQ AX, 0(BX)
RET
// void runtime·atomicor8(byte volatile*, byte);
TEXT runtime·atomicor8(SB), NOSPLIT, $0-5
MOVL ptr+0(FP), BX
MOVB val+4(FP), AX
LOCK
ORB AX, 0(BX)
RET
// void runtime·atomicand8(byte volatile*, byte);
TEXT runtime·atomicand8(SB), NOSPLIT, $0-5
MOVL ptr+0(FP), BX
MOVB val+4(FP), AX
LOCK
ANDB AX, 0(BX)
RET
TEXT ·publicationBarrier(SB),NOSPLIT,$0-0 TEXT ·publicationBarrier(SB),NOSPLIT,$0-0
// Stores are already ordered on x86, so this is just a // Stores are already ordered on x86, so this is just a
// compile barrier. // compile barrier.
......
...@@ -695,63 +695,6 @@ TEXT runtime·abort(SB),NOSPLIT,$-4-0 ...@@ -695,63 +695,6 @@ TEXT runtime·abort(SB),NOSPLIT,$-4-0
MOVW $0, R0 MOVW $0, R0
MOVW (R0), R1 MOVW (R0), R1
// bool armcas(int32 *val, int32 old, int32 new)
// Atomically:
// if(*val == old){
// *val = new;
// return 1;
// }else
// return 0;
//
// To implement runtime·cas in sys_$GOOS_arm.s
// using the native instructions, use:
//
// TEXT runtime·cas(SB),NOSPLIT,$0
// B runtime·armcas(SB)
//
TEXT runtime·armcas(SB),NOSPLIT,$0-13
MOVW valptr+0(FP), R1
MOVW old+4(FP), R2
MOVW new+8(FP), R3
casl:
LDREX (R1), R0
CMP R0, R2
BNE casfail
MOVB runtime·goarm(SB), R11
CMP $7, R11
BLT 2(PC)
WORD $0xf57ff05a // dmb ishst
STREX R3, (R1), R0
CMP $0, R0
BNE casl
MOVW $1, R0
MOVB runtime·goarm(SB), R11
CMP $7, R11
BLT 2(PC)
WORD $0xf57ff05b // dmb ish
MOVB R0, ret+12(FP)
RET
casfail:
MOVW $0, R0
MOVB R0, ret+12(FP)
RET
TEXT runtime·casuintptr(SB),NOSPLIT,$0-13
B runtime·cas(SB)
TEXT runtime·atomicloaduintptr(SB),NOSPLIT,$0-8
B runtime·atomicload(SB)
TEXT runtime·atomicloaduint(SB),NOSPLIT,$0-8
B runtime·atomicload(SB)
TEXT runtime·atomicstoreuintptr(SB),NOSPLIT,$0-8
B runtime·atomicstore(SB)
// armPublicationBarrier is a native store/store barrier for ARMv7+. // armPublicationBarrier is a native store/store barrier for ARMv7+.
// On earlier ARM revisions, armPublicationBarrier is a no-op. // On earlier ARM revisions, armPublicationBarrier is a no-op.
// This will not work on SMP ARMv6 machines, if any are in use. // This will not work on SMP ARMv6 machines, if any are in use.
......
...@@ -453,40 +453,6 @@ CALLFN(·call268435456, 268435464 ) ...@@ -453,40 +453,6 @@ CALLFN(·call268435456, 268435464 )
CALLFNcall536870912, 536870920 ) CALLFNcall536870912, 536870920 )
CALLFNcall1073741824, 1073741832 ) CALLFNcall1073741824, 1073741832 )
// bool cas(uint32 *ptr, uint32 old, uint32 new)
// Atomically:
// if(*val == old){
// *val = new;
// return 1;
// } else
// return 0;
TEXT runtime·cas(SB), NOSPLIT, $0-17
MOVD ptr+0(FP), R0
MOVW old+8(FP), R1
MOVW new+12(FP), R2
again:
LDAXRW (R0), R3
CMPW R1, R3
BNE ok
STLXRW R2, (R0), R3
CBNZ R3, again
ok:
CSET EQ, R0
MOVB R0, ret+16(FP)
RET
TEXT runtime·casuintptr(SB), NOSPLIT, $0-25
B runtime·cas64(SB)
TEXT runtime·atomicloaduintptr(SB), NOSPLIT, $-8-16
B runtime·atomicload64(SB)
TEXT runtime·atomicloaduint(SB), NOSPLIT, $-8-16
B runtime·atomicload64(SB)
TEXT runtime·atomicstoreuintptr(SB), NOSPLIT, $0-16
B runtime·atomicstore64(SB)
// AES hashing not implemented for ARM64, issue #10109. // AES hashing not implemented for ARM64, issue #10109.
TEXT runtime·aeshash(SB),NOSPLIT,$-8-0 TEXT runtime·aeshash(SB),NOSPLIT,$-8-0
MOVW $0, R0 MOVW $0, R0
...@@ -501,16 +467,6 @@ TEXT runtime·aeshashstr(SB),NOSPLIT,$-8-0 ...@@ -501,16 +467,6 @@ TEXT runtime·aeshashstr(SB),NOSPLIT,$-8-0
MOVW $0, R0 MOVW $0, R0
MOVW (R0), R1 MOVW (R0), R1
// bool casp(void **val, void *old, void *new)
// Atomically:
// if(*val == old){
// *val = new;
// return 1;
// } else
// return 0;
TEXT runtime·casp1(SB), NOSPLIT, $0-25
B runtime·cas64(SB)
TEXT runtime·procyield(SB),NOSPLIT,$0-0 TEXT runtime·procyield(SB),NOSPLIT,$0-0
MOVWU cycles+0(FP), R0 MOVWU cycles+0(FP), R0
again: again:
......
...@@ -453,217 +453,9 @@ CALLFN(·call268435456, 268435456) ...@@ -453,217 +453,9 @@ CALLFN(·call268435456, 268435456)
CALLFNcall536870912, 536870912) CALLFNcall536870912, 536870912)
CALLFNcall1073741824, 1073741824) CALLFNcall1073741824, 1073741824)
// bool cas(uint32 *ptr, uint32 old, uint32 new)
// Atomically:
// if(*val == old){
// *val = new;
// return 1;
// } else
// return 0;
TEXT runtime·cas(SB), NOSPLIT, $0-17
MOVD ptr+0(FP), R3
MOVWZ old+8(FP), R4
MOVWZ new+12(FP), R5
cas_again:
SYNC
LWAR (R3), R6
CMPW R6, R4
BNE cas_fail
STWCCC R5, (R3)
BNE cas_again
MOVD $1, R3
SYNC
ISYNC
MOVB R3, ret+16(FP)
RET
cas_fail:
MOVD $0, R3
BR -5(PC)
// bool runtime·cas64(uint64 *ptr, uint64 old, uint64 new)
// Atomically:
// if(*val == *old){
// *val = new;
// return 1;
// } else {
// return 0;
// }
TEXT runtime·cas64(SB), NOSPLIT, $0-25
MOVD ptr+0(FP), R3
MOVD old+8(FP), R4
MOVD new+16(FP), R5
cas64_again:
SYNC
LDAR (R3), R6
CMP R6, R4
BNE cas64_fail
STDCCC R5, (R3)
BNE cas64_again
MOVD $1, R3
SYNC
ISYNC
MOVB R3, ret+24(FP)
RET
cas64_fail:
MOVD $0, R3
BR -5(PC)
TEXT runtime·casuintptr(SB), NOSPLIT, $0-25
BR runtime·cas64(SB)
TEXT runtime·atomicloaduintptr(SB), NOSPLIT|NOFRAME, $0-16
BR runtime·atomicload64(SB)
TEXT runtime·atomicloaduint(SB), NOSPLIT|NOFRAME, $0-16
BR runtime·atomicload64(SB)
TEXT runtime·atomicstoreuintptr(SB), NOSPLIT, $0-16
BR runtime·atomicstore64(SB)
// bool casp(void **val, void *old, void *new)
// Atomically:
// if(*val == old){
// *val = new;
// return 1;
// } else
// return 0;
TEXT runtime·casp1(SB), NOSPLIT, $0-25
BR runtime·cas64(SB)
// uint32 xadd(uint32 volatile *ptr, int32 delta)
// Atomically:
// *val += delta;
// return *val;
TEXT runtime·xadd(SB), NOSPLIT, $0-20
MOVD ptr+0(FP), R4
MOVW delta+8(FP), R5
SYNC
LWAR (R4), R3
ADD R5, R3
STWCCC R3, (R4)
BNE -4(PC)
SYNC
ISYNC
MOVW R3, ret+16(FP)
RET
TEXT runtime·xadd64(SB), NOSPLIT, $0-24
MOVD ptr+0(FP), R4
MOVD delta+8(FP), R5
SYNC
LDAR (R4), R3
ADD R5, R3
STDCCC R3, (R4)
BNE -4(PC)
SYNC
ISYNC
MOVD R3, ret+16(FP)
RET
TEXT runtime·xchg(SB), NOSPLIT, $0-20
MOVD ptr+0(FP), R4
MOVW new+8(FP), R5
SYNC
LWAR (R4), R3
STWCCC R5, (R4)
BNE -3(PC)
SYNC
ISYNC
MOVW R3, ret+16(FP)
RET
TEXT runtime·xchg64(SB), NOSPLIT, $0-24
MOVD ptr+0(FP), R4
MOVD new+8(FP), R5
SYNC
LDAR (R4), R3
STDCCC R5, (R4)
BNE -3(PC)
SYNC
ISYNC
MOVD R3, ret+16(FP)
RET
TEXT runtime·xchguintptr(SB), NOSPLIT, $0-24
BR runtime·xchg64(SB)
TEXT runtime·procyield(SB),NOSPLIT,$0-0 TEXT runtime·procyield(SB),NOSPLIT,$0-0
RET RET
TEXT runtime·atomicstorep1(SB), NOSPLIT, $0-16
BR runtime·atomicstore64(SB)
TEXT runtime·atomicstore(SB), NOSPLIT, $0-12
MOVD ptr+0(FP), R3
MOVW val+8(FP), R4
SYNC
MOVW R4, 0(R3)
RET
TEXT runtime·atomicstore64(SB), NOSPLIT, $0-16
MOVD ptr+0(FP), R3
MOVD val+8(FP), R4
SYNC
MOVD R4, 0(R3)
RET
// void runtime·atomicor8(byte volatile*, byte);
TEXT runtime·atomicor8(SB), NOSPLIT, $0-9
MOVD ptr+0(FP), R3
MOVBZ val+8(FP), R4
// Align ptr down to 4 bytes so we can use 32-bit load/store.
// R5 = (R3 << 0) & ~3
RLDCR $0, R3, $~3, R5
// Compute val shift.
#ifdef GOARCH_ppc64
// Big endian. ptr = ptr ^ 3
XOR $3, R3
#endif
// R6 = ((ptr & 3) * 8) = (ptr << 3) & (3*8)
RLDC $3, R3, $(3*8), R6
// Shift val for aligned ptr. R4 = val << R6
SLD R6, R4, R4
again:
SYNC
LWAR (R5), R6
OR R4, R6
STWCCC R6, (R5)
BNE again
SYNC
ISYNC
RET
// void runtime·atomicand8(byte volatile*, byte);
TEXT runtime·atomicand8(SB), NOSPLIT, $0-9
MOVD ptr+0(FP), R3
MOVBZ val+8(FP), R4
// Align ptr down to 4 bytes so we can use 32-bit load/store.
// R5 = (R3 << 0) & ~3
RLDCR $0, R3, $~3, R5
// Compute val shift.
#ifdef GOARCH_ppc64
// Big endian. ptr = ptr ^ 3
XOR $3, R3
#endif
// R6 = ((ptr & 3) * 8) = (ptr << 3) & (3*8)
RLDC $3, R3, $(3*8), R6
// Shift val for aligned ptr. R4 = val << R6 | ^(0xFF << R6)
MOVD $0xFF, R7
SLD R6, R4
SLD R6, R7
XOR $-1, R7
OR R7, R4
again:
SYNC
LWAR (R5), R6
AND R4, R6
STWCCC R6, (R5)
BNE again
SYNC
ISYNC
RET
// void jmpdefer(fv, sp); // void jmpdefer(fv, sp);
// called from deferreturn. // called from deferreturn.
// 1. grab stored LR for caller // 1. grab stored LR for caller
......
...@@ -4,114 +4,6 @@ ...@@ -4,114 +4,6 @@
#include "textflag.h" #include "textflag.h"
// uint32 runtime·atomicload(uint32 volatile* addr)
TEXT ·atomicload(SB),NOSPLIT,$-8-12
MOVD ptr+0(FP), R0
LDARW (R0), R0
MOVW R0, ret+8(FP)
RET
// uint64 runtime·atomicload64(uint64 volatile* addr)
TEXT ·atomicload64(SB),NOSPLIT,$-8-16
MOVD ptr+0(FP), R0
LDAR (R0), R0
MOVD R0, ret+8(FP)
RET
// void *runtime·atomicloadp(void *volatile *addr)
TEXT ·atomicloadp(SB),NOSPLIT,$-8-16
MOVD ptr+0(FP), R0
LDAR (R0), R0
MOVD R0, ret+8(FP)
RET
TEXT runtime·atomicstorep1(SB), NOSPLIT, $0-16
B runtime·atomicstore64(SB)
TEXT runtime·atomicstore(SB), NOSPLIT, $0-12
MOVD ptr+0(FP), R0
MOVW val+8(FP), R1
STLRW R1, (R0)
RET
TEXT runtime·atomicstore64(SB), NOSPLIT, $0-16
MOVD ptr+0(FP), R0
MOVD val+8(FP), R1
STLR R1, (R0)
RET
TEXT runtime·xchg(SB), NOSPLIT, $0-20
again:
MOVD ptr+0(FP), R0
MOVW new+8(FP), R1
LDAXRW (R0), R2
STLXRW R1, (R0), R3
CBNZ R3, again
MOVW R2, ret+16(FP)
RET
TEXT runtime·xchg64(SB), NOSPLIT, $0-24
again:
MOVD ptr+0(FP), R0
MOVD new+8(FP), R1
LDAXR (R0), R2
STLXR R1, (R0), R3
CBNZ R3, again
MOVD R2, ret+16(FP)
RET
// bool runtime·cas64(uint64 *ptr, uint64 old, uint64 new)
// Atomically:
// if(*val == *old){
// *val = new;
// return 1;
// } else {
// return 0;
// }
TEXT runtime·cas64(SB), NOSPLIT, $0-25
MOVD ptr+0(FP), R0
MOVD old+8(FP), R1
MOVD new+16(FP), R2
again:
LDAXR (R0), R3
CMP R1, R3
BNE ok
STLXR R2, (R0), R3
CBNZ R3, again
ok:
CSET EQ, R0
MOVB R0, ret+24(FP)
RET
// uint32 xadd(uint32 volatile *ptr, int32 delta)
// Atomically:
// *val += delta;
// return *val;
TEXT runtime·xadd(SB), NOSPLIT, $0-20
again:
MOVD ptr+0(FP), R0
MOVW delta+8(FP), R1
LDAXRW (R0), R2
ADDW R2, R1, R2
STLXRW R2, (R0), R3
CBNZ R3, again
MOVW R2, ret+16(FP)
RET
TEXT runtime·xadd64(SB), NOSPLIT, $0-24
again:
MOVD ptr+0(FP), R0
MOVD delta+8(FP), R1
LDAXR (R0), R2
ADD R2, R1, R2
STLXR R2, (R0), R3
CBNZ R3, again
MOVD R2, ret+16(FP)
RET
TEXT runtime·xchguintptr(SB), NOSPLIT, $0-24
B runtime·xchg64(SB)
TEXT ·publicationBarrier(SB),NOSPLIT,$-8-0 TEXT ·publicationBarrier(SB),NOSPLIT,$-8-0
DMB $0xe // DMB ST DMB $0xe // DMB ST
RET RET
...@@ -4,7 +4,10 @@ ...@@ -4,7 +4,10 @@
package runtime package runtime
import "unsafe" import (
"runtime/internal/atomic"
"unsafe"
)
// These functions cannot have go:noescape annotations, // These functions cannot have go:noescape annotations,
// because while ptr does not escape, new does. // because while ptr does not escape, new does.
...@@ -18,13 +21,13 @@ import "unsafe" ...@@ -18,13 +21,13 @@ import "unsafe"
//go:nosplit //go:nosplit
func atomicstorep(ptr unsafe.Pointer, new unsafe.Pointer) { func atomicstorep(ptr unsafe.Pointer, new unsafe.Pointer) {
atomicstorep1(noescape(ptr), new) atomic.Storep1(noescape(ptr), new)
writebarrierptr_nostore((*uintptr)(ptr), uintptr(new)) writebarrierptr_nostore((*uintptr)(ptr), uintptr(new))
} }
//go:nosplit //go:nosplit
func casp(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool { func casp(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool {
if !casp1((*unsafe.Pointer)(noescape(unsafe.Pointer(ptr))), noescape(old), new) { if !atomic.Casp1((*unsafe.Pointer)(noescape(unsafe.Pointer(ptr))), noescape(old), new) {
return false return false
} }
writebarrierptr_nostore((*uintptr)(unsafe.Pointer(ptr)), uintptr(new)) writebarrierptr_nostore((*uintptr)(unsafe.Pointer(ptr)), uintptr(new))
...@@ -42,7 +45,7 @@ func sync_atomic_StoreUintptr(ptr *uintptr, new uintptr) ...@@ -42,7 +45,7 @@ func sync_atomic_StoreUintptr(ptr *uintptr, new uintptr)
//go:nosplit //go:nosplit
func sync_atomic_StorePointer(ptr *unsafe.Pointer, new unsafe.Pointer) { func sync_atomic_StorePointer(ptr *unsafe.Pointer, new unsafe.Pointer) {
sync_atomic_StoreUintptr((*uintptr)(unsafe.Pointer(ptr)), uintptr(new)) sync_atomic_StoreUintptr((*uintptr)(unsafe.Pointer(ptr)), uintptr(new))
atomicstorep1(noescape(unsafe.Pointer(ptr)), new) atomic.Storep1(noescape(unsafe.Pointer(ptr)), new)
writebarrierptr_nostore((*uintptr)(unsafe.Pointer(ptr)), uintptr(new)) writebarrierptr_nostore((*uintptr)(unsafe.Pointer(ptr)), uintptr(new))
} }
......
...@@ -6,39 +6,6 @@ ...@@ -6,39 +6,6 @@
#include "textflag.h" #include "textflag.h"
// uint32 runtime·atomicload(uint32 volatile* addr)
TEXT ·atomicload(SB),NOSPLIT|NOFRAME,$0-12
MOVD addr+0(FP), R3
SYNC
MOVWZ 0(R3), R3
CMPW R3, R3, CR7
BC 4, 30, 1(PC) // bne- cr7,0x4
ISYNC
MOVW R3, ret+8(FP)
RET
// uint64 runtime·atomicload64(uint64 volatile* addr)
TEXT ·atomicload64(SB),NOSPLIT|NOFRAME,$0-16
MOVD addr+0(FP), R3
SYNC
MOVD 0(R3), R3
CMP R3, R3, CR7
BC 4, 30, 1(PC) // bne- cr7,0x4
ISYNC
MOVD R3, ret+8(FP)
RET
// void *runtime·atomicloadp(void *volatile *addr)
TEXT ·atomicloadp(SB),NOSPLIT|NOFRAME,$0-16
MOVD addr+0(FP), R3
SYNC
MOVD 0(R3), R3
CMP R3, R3, CR7
BC 4, 30, 1(PC) // bne- cr7,0x4
ISYNC
MOVD R3, ret+8(FP)
RET
TEXT ·publicationBarrier(SB),NOSPLIT|NOFRAME,$0-0 TEXT ·publicationBarrier(SB),NOSPLIT|NOFRAME,$0-0
// LWSYNC is the "export" barrier recommended by Power ISA // LWSYNC is the "export" barrier recommended by Power ISA
// v2.07 book II, appendix B.2.2.2. // v2.07 book II, appendix B.2.2.2.
......
...@@ -11,7 +11,10 @@ package runtime ...@@ -11,7 +11,10 @@ package runtime
// For buffered channels, also: // For buffered channels, also:
// c.qcount > 0 implies that c.recvq is empty. // c.qcount > 0 implies that c.recvq is empty.
// c.qcount < c.dataqsiz implies that c.sendq is empty. // c.qcount < c.dataqsiz implies that c.sendq is empty.
import "unsafe" import (
"runtime/internal/atomic"
"unsafe"
)
const ( const (
maxAlign = 8 maxAlign = 8
...@@ -393,8 +396,8 @@ func chanrecv(t *chantype, c *hchan, ep unsafe.Pointer, block bool) (selected, r ...@@ -393,8 +396,8 @@ func chanrecv(t *chantype, c *hchan, ep unsafe.Pointer, block bool) (selected, r
// The order of operations is important here: reversing the operations can lead to // The order of operations is important here: reversing the operations can lead to
// incorrect behavior when racing with a close. // incorrect behavior when racing with a close.
if !block && (c.dataqsiz == 0 && c.sendq.first == nil || if !block && (c.dataqsiz == 0 && c.sendq.first == nil ||
c.dataqsiz > 0 && atomicloaduint(&c.qcount) == 0) && c.dataqsiz > 0 && atomic.Loaduint(&c.qcount) == 0) &&
atomicload(&c.closed) == 0 { atomic.Load(&c.closed) == 0 {
return return
} }
...@@ -669,7 +672,7 @@ func (q *waitq) dequeue() *sudog { ...@@ -669,7 +672,7 @@ func (q *waitq) dequeue() *sudog {
// if sgp participates in a select and is already signaled, ignore it // if sgp participates in a select and is already signaled, ignore it
if sgp.selectdone != nil { if sgp.selectdone != nil {
// claim the right to signal // claim the right to signal
if *sgp.selectdone != 0 || !cas(sgp.selectdone, 0, 1) { if *sgp.selectdone != 0 || !atomic.Cas(sgp.selectdone, 0, 1) {
continue continue
} }
} }
......
...@@ -50,7 +50,10 @@ ...@@ -50,7 +50,10 @@
package runtime package runtime
import "unsafe" import (
"runtime/internal/atomic"
"unsafe"
)
const ( const (
numBuckets = 1 << 10 numBuckets = 1 << 10
...@@ -173,7 +176,7 @@ func SetCPUProfileRate(hz int) { ...@@ -173,7 +176,7 @@ func SetCPUProfileRate(hz int) {
if n&0x80000000 != 0 { if n&0x80000000 != 0 {
print("runtime: setcpuprofile(off) twice\n") print("runtime: setcpuprofile(off) twice\n")
} }
if cas(&cpuprof.handoff, n, n|0x80000000) { if atomic.Cas(&cpuprof.handoff, n, n|0x80000000) {
if n == 0 { if n == 0 {
// we did the transition from 0 -> nonzero so we wake getprofile // we did the transition from 0 -> nonzero so we wake getprofile
notewakeup(&cpuprof.wait) notewakeup(&cpuprof.wait)
...@@ -276,7 +279,7 @@ func (p *cpuProfile) evict(e *cpuprofEntry) bool { ...@@ -276,7 +279,7 @@ func (p *cpuProfile) evict(e *cpuprofEntry) bool {
// so it cannot allocate memory or block. It can try to swap logs with // so it cannot allocate memory or block. It can try to swap logs with
// the writing goroutine, as explained in the comment at the top of this file. // the writing goroutine, as explained in the comment at the top of this file.
func (p *cpuProfile) flushlog() bool { func (p *cpuProfile) flushlog() bool {
if !cas(&p.handoff, 0, uint32(p.nlog)) { if !atomic.Cas(&p.handoff, 0, uint32(p.nlog)) {
return false return false
} }
notewakeup(&p.wait) notewakeup(&p.wait)
...@@ -318,7 +321,7 @@ func (p *cpuProfile) getprofile() []byte { ...@@ -318,7 +321,7 @@ func (p *cpuProfile) getprofile() []byte {
p.flushing = true p.flushing = true
goto Flush goto Flush
} }
if cas(&p.handoff, n, 0) { if atomic.Cas(&p.handoff, n, 0) {
break break
} }
} }
...@@ -389,7 +392,7 @@ Flush: ...@@ -389,7 +392,7 @@ Flush:
// Finally done. Clean up and return nil. // Finally done. Clean up and return nil.
p.flushing = false p.flushing = false
if !cas(&p.handoff, p.handoff, 0) { if !atomic.Cas(&p.handoff, p.handoff, 0) {
print("runtime: profile flush racing with something\n") print("runtime: profile flush racing with something\n")
} }
return nil return nil
......
...@@ -4,7 +4,10 @@ ...@@ -4,7 +4,10 @@
package runtime package runtime
import "unsafe" import (
"runtime/internal/atomic"
"unsafe"
)
// GOMAXPROCS sets the maximum number of CPUs that can be executing // GOMAXPROCS sets the maximum number of CPUs that can be executing
// simultaneously and returns the previous setting. If n < 1, it does not // simultaneously and returns the previous setting. If n < 1, it does not
...@@ -39,7 +42,7 @@ func NumCPU() int { ...@@ -39,7 +42,7 @@ func NumCPU() int {
// NumCgoCall returns the number of cgo calls made by the current process. // NumCgoCall returns the number of cgo calls made by the current process.
func NumCgoCall() int64 { func NumCgoCall() int64 {
var n int64 var n int64
for mp := (*m)(atomicloadp(unsafe.Pointer(&allm))); mp != nil; mp = mp.alllink { for mp := (*m)(atomic.Loadp(unsafe.Pointer(&allm))); mp != nil; mp = mp.alllink {
n += int64(mp.ncgocall) n += int64(mp.ncgocall)
} }
return n return n
......
...@@ -6,7 +6,10 @@ ...@@ -6,7 +6,10 @@
package runtime package runtime
import "unsafe" import (
"runtime/internal/atomic"
"unsafe"
)
var Fadd64 = fadd64 var Fadd64 = fadd64
var Fsub64 = fsub64 var Fsub64 = fsub64
...@@ -22,7 +25,7 @@ var Sqrt = sqrt ...@@ -22,7 +25,7 @@ var Sqrt = sqrt
var Entersyscall = entersyscall var Entersyscall = entersyscall
var Exitsyscall = exitsyscall var Exitsyscall = exitsyscall
var LockedOSThread = lockedOSThread var LockedOSThread = lockedOSThread
var Xadduintptr = xadduintptr var Xadduintptr = atomic.Xadduintptr
var FuncPC = funcPC var FuncPC = funcPC
......
...@@ -54,6 +54,7 @@ package runtime ...@@ -54,6 +54,7 @@ package runtime
// before the table grows. Typical tables will be somewhat less loaded. // before the table grows. Typical tables will be somewhat less loaded.
import ( import (
"runtime/internal/atomic"
"unsafe" "unsafe"
) )
...@@ -280,7 +281,7 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { ...@@ -280,7 +281,7 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
msanread(key, t.key.size) msanread(key, t.key.size)
} }
if h == nil || h.count == 0 { if h == nil || h.count == 0 {
return atomicloadp(unsafe.Pointer(&zeroptr)) return atomic.Loadp(unsafe.Pointer(&zeroptr))
} }
alg := t.key.alg alg := t.key.alg
hash := alg.hash(key, uintptr(h.hash0)) hash := alg.hash(key, uintptr(h.hash0))
...@@ -315,7 +316,7 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { ...@@ -315,7 +316,7 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
} }
b = b.overflow(t) b = b.overflow(t)
if b == nil { if b == nil {
return atomicloadp(unsafe.Pointer(&zeroptr)) return atomic.Loadp(unsafe.Pointer(&zeroptr))
} }
} }
} }
...@@ -331,7 +332,7 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) ...@@ -331,7 +332,7 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool)
msanread(key, t.key.size) msanread(key, t.key.size)
} }
if h == nil || h.count == 0 { if h == nil || h.count == 0 {
return atomicloadp(unsafe.Pointer(&zeroptr)), false return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
} }
alg := t.key.alg alg := t.key.alg
hash := alg.hash(key, uintptr(h.hash0)) hash := alg.hash(key, uintptr(h.hash0))
...@@ -366,7 +367,7 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) ...@@ -366,7 +367,7 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool)
} }
b = b.overflow(t) b = b.overflow(t)
if b == nil { if b == nil {
return atomicloadp(unsafe.Pointer(&zeroptr)), false return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
} }
} }
} }
...@@ -627,7 +628,7 @@ func mapiterinit(t *maptype, h *hmap, it *hiter) { ...@@ -627,7 +628,7 @@ func mapiterinit(t *maptype, h *hmap, it *hiter) {
// Remember we have an iterator. // Remember we have an iterator.
// Can run concurrently with another hash_iter_init(). // Can run concurrently with another hash_iter_init().
if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator { if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator {
atomicor8(&h.flags, iterator|oldIterator) atomic.Or8(&h.flags, iterator|oldIterator)
} }
mapiternext(it) mapiternext(it)
...@@ -1024,14 +1025,14 @@ var zerosize uintptr = initialZeroSize ...@@ -1024,14 +1025,14 @@ var zerosize uintptr = initialZeroSize
// serve as the zero value for t. // serve as the zero value for t.
func mapzero(t *_type) { func mapzero(t *_type) {
// Is the type small enough for existing buffer? // Is the type small enough for existing buffer?
cursize := uintptr(atomicloadp(unsafe.Pointer(&zerosize))) cursize := uintptr(atomic.Loadp(unsafe.Pointer(&zerosize)))
if t.size <= cursize { if t.size <= cursize {
return return
} }
// Allocate a new buffer. // Allocate a new buffer.
lock(&zerolock) lock(&zerolock)
cursize = uintptr(atomicloadp(unsafe.Pointer(&zerosize))) cursize = uintptr(atomic.Loadp(unsafe.Pointer(&zerosize)))
if cursize < t.size { if cursize < t.size {
for cursize < t.size { for cursize < t.size {
cursize *= 2 cursize *= 2
...@@ -1040,8 +1041,8 @@ func mapzero(t *_type) { ...@@ -1040,8 +1041,8 @@ func mapzero(t *_type) {
throw("map element too large") throw("map element too large")
} }
} }
atomicstorep1(unsafe.Pointer(&zeroptr), persistentalloc(cursize, 64, &memstats.other_sys)) atomic.Storep1(unsafe.Pointer(&zeroptr), persistentalloc(cursize, 64, &memstats.other_sys))
atomicstorep1(unsafe.Pointer(&zerosize), unsafe.Pointer(zerosize)) atomic.Storep1(unsafe.Pointer(&zerosize), unsafe.Pointer(zerosize))
} }
unlock(&zerolock) unlock(&zerolock)
} }
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
package runtime package runtime
import ( import (
"runtime/internal/atomic"
"unsafe" "unsafe"
) )
...@@ -14,7 +15,7 @@ func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer { ...@@ -14,7 +15,7 @@ func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast32)) racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast32))
} }
if h == nil || h.count == 0 { if h == nil || h.count == 0 {
return atomicloadp(unsafe.Pointer(&zeroptr)) return atomic.Loadp(unsafe.Pointer(&zeroptr))
} }
var b *bmap var b *bmap
if h.B == 0 { if h.B == 0 {
...@@ -45,7 +46,7 @@ func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer { ...@@ -45,7 +46,7 @@ func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
} }
b = b.overflow(t) b = b.overflow(t)
if b == nil { if b == nil {
return atomicloadp(unsafe.Pointer(&zeroptr)) return atomic.Loadp(unsafe.Pointer(&zeroptr))
} }
} }
} }
...@@ -56,7 +57,7 @@ func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) { ...@@ -56,7 +57,7 @@ func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast32)) racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast32))
} }
if h == nil || h.count == 0 { if h == nil || h.count == 0 {
return atomicloadp(unsafe.Pointer(&zeroptr)), false return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
} }
var b *bmap var b *bmap
if h.B == 0 { if h.B == 0 {
...@@ -87,7 +88,7 @@ func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) { ...@@ -87,7 +88,7 @@ func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
} }
b = b.overflow(t) b = b.overflow(t)
if b == nil { if b == nil {
return atomicloadp(unsafe.Pointer(&zeroptr)), false return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
} }
} }
} }
...@@ -98,7 +99,7 @@ func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer { ...@@ -98,7 +99,7 @@ func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast64)) racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast64))
} }
if h == nil || h.count == 0 { if h == nil || h.count == 0 {
return atomicloadp(unsafe.Pointer(&zeroptr)) return atomic.Loadp(unsafe.Pointer(&zeroptr))
} }
var b *bmap var b *bmap
if h.B == 0 { if h.B == 0 {
...@@ -129,7 +130,7 @@ func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer { ...@@ -129,7 +130,7 @@ func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
} }
b = b.overflow(t) b = b.overflow(t)
if b == nil { if b == nil {
return atomicloadp(unsafe.Pointer(&zeroptr)) return atomic.Loadp(unsafe.Pointer(&zeroptr))
} }
} }
} }
...@@ -140,7 +141,7 @@ func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) { ...@@ -140,7 +141,7 @@ func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast64)) racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast64))
} }
if h == nil || h.count == 0 { if h == nil || h.count == 0 {
return atomicloadp(unsafe.Pointer(&zeroptr)), false return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
} }
var b *bmap var b *bmap
if h.B == 0 { if h.B == 0 {
...@@ -171,7 +172,7 @@ func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) { ...@@ -171,7 +172,7 @@ func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
} }
b = b.overflow(t) b = b.overflow(t)
if b == nil { if b == nil {
return atomicloadp(unsafe.Pointer(&zeroptr)), false return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
} }
} }
} }
...@@ -182,7 +183,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer { ...@@ -182,7 +183,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_faststr)) racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_faststr))
} }
if h == nil || h.count == 0 { if h == nil || h.count == 0 {
return atomicloadp(unsafe.Pointer(&zeroptr)) return atomic.Loadp(unsafe.Pointer(&zeroptr))
} }
key := stringStructOf(&ky) key := stringStructOf(&ky)
if h.B == 0 { if h.B == 0 {
...@@ -203,7 +204,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer { ...@@ -203,7 +204,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)) return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize))
} }
} }
return atomicloadp(unsafe.Pointer(&zeroptr)) return atomic.Loadp(unsafe.Pointer(&zeroptr))
} }
// long key, try not to do more comparisons than necessary // long key, try not to do more comparisons than necessary
keymaybe := uintptr(bucketCnt) keymaybe := uintptr(bucketCnt)
...@@ -241,7 +242,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer { ...@@ -241,7 +242,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+keymaybe*uintptr(t.valuesize)) return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+keymaybe*uintptr(t.valuesize))
} }
} }
return atomicloadp(unsafe.Pointer(&zeroptr)) return atomic.Loadp(unsafe.Pointer(&zeroptr))
} }
dohash: dohash:
hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
...@@ -273,7 +274,7 @@ dohash: ...@@ -273,7 +274,7 @@ dohash:
} }
b = b.overflow(t) b = b.overflow(t)
if b == nil { if b == nil {
return atomicloadp(unsafe.Pointer(&zeroptr)) return atomic.Loadp(unsafe.Pointer(&zeroptr))
} }
} }
} }
...@@ -284,7 +285,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) { ...@@ -284,7 +285,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_faststr)) racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_faststr))
} }
if h == nil || h.count == 0 { if h == nil || h.count == 0 {
return atomicloadp(unsafe.Pointer(&zeroptr)), false return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
} }
key := stringStructOf(&ky) key := stringStructOf(&ky)
if h.B == 0 { if h.B == 0 {
...@@ -305,7 +306,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) { ...@@ -305,7 +306,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)), true return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)), true
} }
} }
return atomicloadp(unsafe.Pointer(&zeroptr)), false return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
} }
// long key, try not to do more comparisons than necessary // long key, try not to do more comparisons than necessary
keymaybe := uintptr(bucketCnt) keymaybe := uintptr(bucketCnt)
...@@ -341,7 +342,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) { ...@@ -341,7 +342,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+keymaybe*uintptr(t.valuesize)), true return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+keymaybe*uintptr(t.valuesize)), true
} }
} }
return atomicloadp(unsafe.Pointer(&zeroptr)), false return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
} }
dohash: dohash:
hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
...@@ -373,7 +374,7 @@ dohash: ...@@ -373,7 +374,7 @@ dohash:
} }
b = b.overflow(t) b = b.overflow(t)
if b == nil { if b == nil {
return atomicloadp(unsafe.Pointer(&zeroptr)), false return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
} }
} }
} }
...@@ -4,7 +4,10 @@ ...@@ -4,7 +4,10 @@
package runtime package runtime
import "unsafe" import (
"runtime/internal/atomic"
"unsafe"
)
const ( const (
hashSize = 1009 hashSize = 1009
...@@ -43,7 +46,7 @@ func getitab(inter *interfacetype, typ *_type, canfail bool) *itab { ...@@ -43,7 +46,7 @@ func getitab(inter *interfacetype, typ *_type, canfail bool) *itab {
if locked != 0 { if locked != 0 {
lock(&ifaceLock) lock(&ifaceLock)
} }
for m = (*itab)(atomicloadp(unsafe.Pointer(&hash[h]))); m != nil; m = m.link { for m = (*itab)(atomic.Loadp(unsafe.Pointer(&hash[h]))); m != nil; m = m.link {
if m.inter == inter && m._type == typ { if m.inter == inter && m._type == typ {
if m.bad != 0 { if m.bad != 0 {
m = nil m = nil
...@@ -151,7 +154,7 @@ func convT2I(t *_type, inter *interfacetype, cache **itab, elem unsafe.Pointer, ...@@ -151,7 +154,7 @@ func convT2I(t *_type, inter *interfacetype, cache **itab, elem unsafe.Pointer,
if msanenabled { if msanenabled {
msanread(elem, t.size) msanread(elem, t.size)
} }
tab := (*itab)(atomicloadp(unsafe.Pointer(cache))) tab := (*itab)(atomic.Loadp(unsafe.Pointer(cache)))
if tab == nil { if tab == nil {
tab = getitab(inter, t, false) tab = getitab(inter, t, false)
atomicstorep(unsafe.Pointer(cache), unsafe.Pointer(tab)) atomicstorep(unsafe.Pointer(cache), unsafe.Pointer(tab))
......
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package atomic
const (
_CacheLineSize = 64
)
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package atomic
const (
_CacheLineSize = 64
)
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package atomic
const (
thechar = '6'
_BigEndian = 0
_CacheLineSize = 64
_PCQuantum = 1
_Int64Align = 8
hugePageSize = 1 << 21
)
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package atomic
const (
_CacheLineSize = 32
)
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package atomic
const (
_CacheLineSize = 32
)
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package atomic
const (
_CacheLineSize = 64
)
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package atomic
const (
_CacheLineSize = 64
)
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "textflag.h"
TEXT runtimeinternalatomic·nop(SB),NOSPLIT,$0-0
RET
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "textflag.h"
// bool Cas(int32 *val, int32 old, int32 new)
// Atomically:
// if(*val == old){
// *val = new;
// return 1;
// }else
// return 0;
TEXT runtimeinternalatomic·Cas(SB), NOSPLIT, $0-13
MOVL ptr+0(FP), BX
MOVL old+4(FP), AX
MOVL new+8(FP), CX
LOCK
CMPXCHGL CX, 0(BX)
SETEQ ret+12(FP)
RET
TEXT runtimeinternalatomic·Casuintptr(SB), NOSPLIT, $0-13
JMP runtimeinternalatomic·Cas(SB)
TEXT runtimeinternalatomic·Loaduintptr(SB), NOSPLIT, $0-8
JMP runtimeinternalatomic·Load(SB)
TEXT runtimeinternalatomic·Loaduint(SB), NOSPLIT, $0-8
JMP runtimeinternalatomic·Load(SB)
TEXT runtimeinternalatomic·Storeuintptr(SB), NOSPLIT, $0-8
JMP runtimeinternalatomic·Store(SB)
TEXT runtimeinternalatomic·Xadduintptr(SB), NOSPLIT, $0-8
JMP runtimeinternalatomic·Xadd(SB)
TEXT runtimeinternalatomic·Loadint64(SB), NOSPLIT, $0-16
JMP runtimeinternalatomic·Load64(SB)
TEXT runtimeinternalatomic·Xaddint64(SB), NOSPLIT, $0-16
JMP runtimeinternalatomic·Xadd64(SB)
// bool runtimeinternalatomic·Cas64(uint64 *val, uint64 old, uint64 new)
// Atomically:
// if(*val == *old){
// *val = new;
// return 1;
// } else {
// return 0;
// }
TEXT runtimeinternalatomic·Cas64(SB), NOSPLIT, $0-21
MOVL ptr+0(FP), BP
MOVL old_lo+4(FP), AX
MOVL old_hi+8(FP), DX
MOVL new_lo+12(FP), BX
MOVL new_hi+16(FP), CX
LOCK
CMPXCHG8B 0(BP)
SETEQ ret+20(FP)
RET
// bool Casp(void **p, void *old, void *new)
// Atomically:
// if(*p == old){
// *p = new;
// return 1;
// }else
// return 0;
TEXT runtimeinternalatomic·Casp1(SB), NOSPLIT, $0-13
MOVL ptr+0(FP), BX
MOVL old+4(FP), AX
MOVL new+8(FP), CX
LOCK
CMPXCHGL CX, 0(BX)
SETEQ ret+12(FP)
RET
// uint32 Xadd(uint32 volatile *val, int32 delta)
// Atomically:
// *val += delta;
// return *val;
TEXT runtimeinternalatomic·Xadd(SB), NOSPLIT, $0-12
MOVL ptr+0(FP), BX
MOVL delta+4(FP), AX
MOVL AX, CX
LOCK
XADDL AX, 0(BX)
ADDL CX, AX
MOVL AX, ret+8(FP)
RET
TEXT runtimeinternalatomic·Xchg(SB), NOSPLIT, $0-12
MOVL ptr+0(FP), BX
MOVL new+4(FP), AX
XCHGL AX, 0(BX)
MOVL AX, ret+8(FP)
RET
TEXT runtimeinternalatomic·Xchguintptr(SB), NOSPLIT, $0-12
JMP runtimeinternalatomic·Xchg(SB)
TEXT runtimeinternalatomic·Storep1(SB), NOSPLIT, $0-8
MOVL ptr+0(FP), BX
MOVL val+4(FP), AX
XCHGL AX, 0(BX)
RET
TEXT runtimeinternalatomic·Store(SB), NOSPLIT, $0-8
MOVL ptr+0(FP), BX
MOVL val+4(FP), AX
XCHGL AX, 0(BX)
RET
// uint64 atomicload64(uint64 volatile* addr);
TEXT runtimeinternalatomic·Load64(SB), NOSPLIT, $0-12
MOVL ptr+0(FP), AX
TESTL $7, AX
JZ 2(PC)
MOVL 0, AX // crash with nil ptr deref
LEAL ret_lo+4(FP), BX
// MOVQ (%EAX), %MM0
BYTE $0x0f; BYTE $0x6f; BYTE $0x00
// MOVQ %MM0, 0(%EBX)
BYTE $0x0f; BYTE $0x7f; BYTE $0x03
// EMMS
BYTE $0x0F; BYTE $0x77
RET
// void runtimeinternalatomic·Store64(uint64 volatile* addr, uint64 v);
TEXT runtimeinternalatomic·Store64(SB), NOSPLIT, $0-12
MOVL ptr+0(FP), AX
TESTL $7, AX
JZ 2(PC)
MOVL 0, AX // crash with nil ptr deref
// MOVQ and EMMS were introduced on the Pentium MMX.
// MOVQ 0x8(%ESP), %MM0
BYTE $0x0f; BYTE $0x6f; BYTE $0x44; BYTE $0x24; BYTE $0x08
// MOVQ %MM0, (%EAX)
BYTE $0x0f; BYTE $0x7f; BYTE $0x00
// EMMS
BYTE $0x0F; BYTE $0x77
// This is essentially a no-op, but it provides required memory fencing.
// It can be replaced with MFENCE, but MFENCE was introduced only on the Pentium4 (SSE2).
MOVL $0, AX
LOCK
XADDL AX, (SP)
RET
// void runtimeinternalatomic·Or8(byte volatile*, byte);
TEXT runtimeinternalatomic·Or8(SB), NOSPLIT, $0-5
MOVL ptr+0(FP), AX
MOVB val+4(FP), BX
LOCK
ORB BX, (AX)
RET
// void runtimeinternalatomic·And8(byte volatile*, byte);
TEXT runtimeinternalatomic·And8(SB), NOSPLIT, $0-5
MOVL ptr+0(FP), AX
MOVB val+4(FP), BX
LOCK
ANDB BX, (AX)
RET
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "textflag.h"
// bool Cas(int32 *val, int32 old, int32 new)
// Atomically:
// if(*val == old){
// *val = new;
// return 1;
// } else
// return 0;
TEXT runtimeinternalatomic·Cas(SB),NOSPLIT,$0-17
MOVQ ptr+0(FP), BX
MOVL old+8(FP), AX
MOVL new+12(FP), CX
LOCK
CMPXCHGL CX, 0(BX)
SETEQ ret+16(FP)
RET
// bool runtimeinternalatomic·Cas64(uint64 *val, uint64 old, uint64 new)
// Atomically:
// if(*val == *old){
// *val = new;
// return 1;
// } else {
// return 0;
// }
TEXT runtimeinternalatomic·Cas64(SB), NOSPLIT, $0-25
MOVQ ptr+0(FP), BX
MOVQ old+8(FP), AX
MOVQ new+16(FP), CX
LOCK
CMPXCHGQ CX, 0(BX)
SETEQ ret+24(FP)
RET
TEXT runtimeinternalatomic·Casuintptr(SB), NOSPLIT, $0-25
JMP runtimeinternalatomic·Cas64(SB)
TEXT runtimeinternalatomic·Loaduintptr(SB), NOSPLIT, $0-16
JMP runtimeinternalatomic·Load64(SB)
TEXT runtimeinternalatomic·Loaduint(SB), NOSPLIT, $0-16
JMP runtimeinternalatomic·Load64(SB)
TEXT runtimeinternalatomic·Storeuintptr(SB), NOSPLIT, $0-16
JMP runtimeinternalatomic·Store64(SB)
TEXT runtimeinternalatomic·Loadint64(SB), NOSPLIT, $0-16
JMP runtimeinternalatomic·Load64(SB)
TEXT runtimeinternalatomic·Xaddint64(SB), NOSPLIT, $0-16
JMP runtimeinternalatomic·Xadd64(SB)
// bool Casp(void **val, void *old, void *new)
// Atomically:
// if(*val == old){
// *val = new;
// return 1;
// } else
// return 0;
TEXT runtimeinternalatomic·Casp1(SB), NOSPLIT, $0-25
MOVQ ptr+0(FP), BX
MOVQ old+8(FP), AX
MOVQ new+16(FP), CX
LOCK
CMPXCHGQ CX, 0(BX)
SETEQ ret+24(FP)
RET
// uint32 Xadd(uint32 volatile *val, int32 delta)
// Atomically:
// *val += delta;
// return *val;
TEXT runtimeinternalatomic·Xadd(SB), NOSPLIT, $0-20
MOVQ ptr+0(FP), BX
MOVL delta+8(FP), AX
MOVL AX, CX
LOCK
XADDL AX, 0(BX)
ADDL CX, AX
MOVL AX, ret+16(FP)
RET
TEXT runtimeinternalatomic·Xadd64(SB), NOSPLIT, $0-24
MOVQ ptr+0(FP), BX
MOVQ delta+8(FP), AX
MOVQ AX, CX
LOCK
XADDQ AX, 0(BX)
ADDQ CX, AX
MOVQ AX, ret+16(FP)
RET
TEXT runtimeinternalatomic·Xadduintptr(SB), NOSPLIT, $0-24
JMP runtimeinternalatomic·Xadd64(SB)
TEXT runtimeinternalatomic·Xchg(SB), NOSPLIT, $0-20
MOVQ ptr+0(FP), BX
MOVL new+8(FP), AX
XCHGL AX, 0(BX)
MOVL AX, ret+16(FP)
RET
TEXT runtimeinternalatomic·Xchg64(SB), NOSPLIT, $0-24
MOVQ ptr+0(FP), BX
MOVQ new+8(FP), AX
XCHGQ AX, 0(BX)
MOVQ AX, ret+16(FP)
RET
TEXT runtimeinternalatomic·Xchguintptr(SB), NOSPLIT, $0-24
JMP runtimeinternalatomic·Xchg64(SB)
TEXT runtimeinternalatomic·Storep1(SB), NOSPLIT, $0-16
MOVQ ptr+0(FP), BX
MOVQ val+8(FP), AX
XCHGQ AX, 0(BX)
RET
TEXT runtimeinternalatomic·Store(SB), NOSPLIT, $0-12
MOVQ ptr+0(FP), BX
MOVL val+8(FP), AX
XCHGL AX, 0(BX)
RET
TEXT runtimeinternalatomic·Store64(SB), NOSPLIT, $0-16
MOVQ ptr+0(FP), BX
MOVQ val+8(FP), AX
XCHGQ AX, 0(BX)
RET
// void runtimeinternalatomic·Or8(byte volatile*, byte);
TEXT runtimeinternalatomic·Or8(SB), NOSPLIT, $0-9
MOVQ ptr+0(FP), AX
MOVB val+8(FP), BX
LOCK
ORB BX, (AX)
RET
// void runtimeinternalatomic·And8(byte volatile*, byte);
TEXT runtimeinternalatomic·And8(SB), NOSPLIT, $0-9
MOVQ ptr+0(FP), AX
MOVB val+8(FP), BX
LOCK
ANDB BX, (AX)
RET
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "textflag.h"
// bool Cas(int32 *val, int32 old, int32 new)
// Atomically:
// if(*val == old){
// *val = new;
// return 1;
// } else
// return 0;
TEXT runtimeinternalatomic·Cas(SB), NOSPLIT, $0-17
MOVL ptr+0(FP), BX
MOVL old+4(FP), AX
MOVL new+8(FP), CX
LOCK
CMPXCHGL CX, 0(BX)
SETEQ ret+16(FP)
RET
TEXT runtimeinternalatomic·Casuintptr(SB), NOSPLIT, $0-17
JMP runtimeinternalatomic·Cas(SB)
TEXT runtimeinternalatomic·Loaduintptr(SB), NOSPLIT, $0-12
JMP runtimeinternalatomic·Load(SB)
TEXT runtimeinternalatomic·Loaduint(SB), NOSPLIT, $0-12
JMP runtimeinternalatomic·Load(SB)
TEXT runtimeinternalatomic·Storeuintptr(SB), NOSPLIT, $0-12
JMP runtimeinternalatomic·Store(SB)
TEXT runtimeinternalatomic·Loadint64(SB), NOSPLIT, $0-24
JMP runtimeinternalatomic·Load64(SB)
TEXT runtimeinternalatomic·Xaddint64(SB), NOSPLIT, $0-24
JMP runtimeinternalatomic·Xadd64(SB)
// bool runtimeinternalatomic·cas64(uint64 *val, uint64 old, uint64 new)
// Atomically:
// if(*val == *old){
// *val = new;
// return 1;
// } else {
// return 0;
// }
TEXT runtimeinternalatomic·Cas64(SB), NOSPLIT, $0-25
MOVL ptr+0(FP), BX
MOVQ old+8(FP), AX
MOVQ new+16(FP), CX
LOCK
CMPXCHGQ CX, 0(BX)
SETEQ ret+24(FP)
RET
// bool Casp(void **val, void *old, void *new)
// Atomically:
// if(*val == old){
// *val = new;
// return 1;
// } else
// return 0;
TEXT runtimeinternalatomic·Casp1(SB), NOSPLIT, $0-17
MOVL ptr+0(FP), BX
MOVL old+4(FP), AX
MOVL new+8(FP), CX
LOCK
CMPXCHGL CX, 0(BX)
SETEQ ret+16(FP)
RET
// uint32 Xadd(uint32 volatile *val, int32 delta)
// Atomically:
// *val += delta;
// return *val;
TEXT runtimeinternalatomic·Xadd(SB), NOSPLIT, $0-12
MOVL ptr+0(FP), BX
MOVL delta+4(FP), AX
MOVL AX, CX
LOCK
XADDL AX, 0(BX)
ADDL CX, AX
MOVL AX, ret+8(FP)
RET
TEXT runtimeinternalatomic·Xadd64(SB), NOSPLIT, $0-24
MOVL ptr+0(FP), BX
MOVQ delta+8(FP), AX
MOVQ AX, CX
LOCK
XADDQ AX, 0(BX)
ADDQ CX, AX
MOVQ AX, ret+16(FP)
RET
TEXT runtimeinternalatomic·Xadduintptr(SB), NOSPLIT, $0-12
JMP runtimeinternalatomic·Xadd(SB)
TEXT runtimeinternalatomic·Xchg(SB), NOSPLIT, $0-12
MOVL ptr+0(FP), BX
MOVL new+4(FP), AX
XCHGL AX, 0(BX)
MOVL AX, ret+8(FP)
RET
TEXT runtimeinternalatomic·Xchg64(SB), NOSPLIT, $0-24
MOVL ptr+0(FP), BX
MOVQ new+8(FP), AX
XCHGQ AX, 0(BX)
MOVQ AX, ret+16(FP)
RET
TEXT runtimeinternalatomic·Xchguintptr(SB), NOSPLIT, $0-12
JMP runtimeinternalatomic·Xchg(SB)
TEXT runtimeinternalatomic·Storep1(SB), NOSPLIT, $0-8
MOVL ptr+0(FP), BX
MOVL val+4(FP), AX
XCHGL AX, 0(BX)
RET
TEXT runtimeinternalatomic·Store(SB), NOSPLIT, $0-8
MOVL ptr+0(FP), BX
MOVL val+4(FP), AX
XCHGL AX, 0(BX)
RET
TEXT runtimeinternalatomic·Store64(SB), NOSPLIT, $0-16
MOVL ptr+0(FP), BX
MOVQ val+8(FP), AX
XCHGQ AX, 0(BX)
RET
// void runtimeinternalatomic·Or8(byte volatile*, byte);
TEXT runtimeinternalatomic·Or8(SB), NOSPLIT, $0-5
MOVL ptr+0(FP), BX
MOVB val+4(FP), AX
LOCK
ORB AX, 0(BX)
RET
// void runtimeinternalatomic·And8(byte volatile*, byte);
TEXT runtimeinternalatomic·And8(SB), NOSPLIT, $0-5
MOVL ptr+0(FP), BX
MOVB val+4(FP), AX
LOCK
ANDB AX, 0(BX)
RET
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "textflag.h"
// bool armcas(int32 *val, int32 old, int32 new)
// Atomically:
// if(*val == old){
// *val = new;
// return 1;
// }else
// return 0;
//
// To implement runtimeinternalatomic·cas in sys_$GOOS_arm.s
// using the native instructions, use:
//
// TEXT runtimeinternalatomic·cas(SB),NOSPLIT,$0
// B runtimeinternalatomic·armcas(SB)
//
TEXT runtimeinternalatomic·armcas(SB),NOSPLIT,$0-13
MOVW valptr+0(FP), R1
MOVW old+4(FP), R2
MOVW new+8(FP), R3
casl:
LDREX (R1), R0
CMP R0, R2
BNE casfail
MOVB runtime·goarm(SB), R11
CMP $7, R11
BLT 2(PC)
WORD $0xf57ff05a // dmb ishst
STREX R3, (R1), R0
CMP $0, R0
BNE casl
MOVW $1, R0
MOVB runtime·goarm(SB), R11
CMP $7, R11
BLT 2(PC)
WORD $0xf57ff05b // dmb ish
MOVB R0, ret+12(FP)
RET
casfail:
MOVW $0, R0
MOVB R0, ret+12(FP)
RET
TEXT runtimeinternalatomic·Casuintptr(SB),NOSPLIT,$0-13
B runtimeinternalatomic·Cas(SB)
TEXT runtimeinternalatomic·Loaduintptr(SB),NOSPLIT,$0-8
B runtimeinternalatomic·Load(SB)
TEXT runtimeinternalatomic·Loaduint(SB),NOSPLIT,$0-8
B runtimeinternalatomic·Load(SB)
TEXT runtimeinternalatomic·Storeuintptr(SB),NOSPLIT,$0-8
B runtimeinternalatomic·Store(SB)
TEXT runtimeinternalatomic·Xadduintptr(SB),NOSPLIT,$0-8
B runtimeinternalatomic·Xadd(SB)
TEXT runtimeinternalatomic·Loadint64(SB),NOSPLIT,$0-16
B runtimeinternalatomic·Load64(SB)
TEXT runtimeinternalatomic·Xaddint64(SB),NOSPLIT,$0-16
B runtimeinternalatomic·Xadd64(SB)
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "textflag.h"
// bool Cas(uint32 *ptr, uint32 old, uint32 new)
// Atomically:
// if(*val == old){
// *val = new;
// return 1;
// } else
// return 0;
TEXT runtimeinternalatomic·Cas(SB), NOSPLIT, $0-17
MOVD ptr+0(FP), R0
MOVW old+8(FP), R1
MOVW new+12(FP), R2
again:
LDAXRW (R0), R3
CMPW R1, R3
BNE ok
STLXRW R2, (R0), R3
CBNZ R3, again
ok:
CSET EQ, R0
MOVB R0, ret+16(FP)
RET
TEXT runtimeinternalatomic·Casuintptr(SB), NOSPLIT, $0-25
B runtimeinternalatomic·Cas64(SB)
TEXT runtimeinternalatomic·Loaduintptr(SB), NOSPLIT, $-8-16
B runtimeinternalatomic·Load64(SB)
TEXT runtimeinternalatomic·Loaduint(SB), NOSPLIT, $-8-16
B runtimeinternalatomic·Load64(SB)
TEXT runtimeinternalatomic·Storeuintptr(SB), NOSPLIT, $0-16
B runtimeinternalatomic·Store64(SB)
TEXT runtimeinternalatomic·Xadduintptr(SB), NOSPLIT, $0-16
B runtimeinternalatomic·Xadd64(SB)
TEXT runtimeinternalatomic·Loadint64(SB), NOSPLIT, $0-16
B runtimeinternalatomic·Load64(SB)
TEXT runtimeinternalatomic·Xaddint64(SB), NOSPLIT, $0-16
B runtimeinternalatomic·Xadd64(SB)
// bool Casp(void **val, void *old, void *new)
// Atomically:
// if(*val == old){
// *val = new;
// return 1;
// } else
// return 0;
TEXT runtimeinternalatomic·Casp1(SB), NOSPLIT, $0-25
B runtimeinternalatomic·Cas64(SB)
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ppc64 ppc64le
#include "textflag.h"
// bool cas(uint32 *ptr, uint32 old, uint32 new)
// Atomically:
// if(*val == old){
// *val = new;
// return 1;
// } else
// return 0;
TEXT runtimeinternalatomic·Cas(SB), NOSPLIT, $0-17
MOVD ptr+0(FP), R3
MOVWZ old+8(FP), R4
MOVWZ new+12(FP), R5
cas_again:
SYNC
LWAR (R3), R6
CMPW R6, R4
BNE cas_fail
STWCCC R5, (R3)
BNE cas_again
MOVD $1, R3
SYNC
ISYNC
MOVB R3, ret+16(FP)
RET
cas_fail:
MOVD $0, R3
BR -5(PC)
// bool runtimeinternalatomic·Cas64(uint64 *ptr, uint64 old, uint64 new)
// Atomically:
// if(*val == *old){
// *val = new;
// return 1;
// } else {
// return 0;
// }
TEXT runtimeinternalatomic·Cas64(SB), NOSPLIT, $0-25
MOVD ptr+0(FP), R3
MOVD old+8(FP), R4
MOVD new+16(FP), R5
cas64_again:
SYNC
LDAR (R3), R6
CMP R6, R4
BNE cas64_fail
STDCCC R5, (R3)
BNE cas64_again
MOVD $1, R3
SYNC
ISYNC
MOVB R3, ret+24(FP)
RET
cas64_fail:
MOVD $0, R3
BR -5(PC)
TEXT runtimeinternalatomic·Casuintptr(SB), NOSPLIT, $0-25
BR runtimeinternalatomic·Cas64(SB)
TEXT runtimeinternalatomic·Loaduintptr(SB), NOSPLIT|NOFRAME, $0-16
BR runtimeinternalatomic·Load64(SB)
TEXT runtimeinternalatomic·Loaduint(SB), NOSPLIT|NOFRAME, $0-16
BR runtimeinternalatomic·Load64(SB)
TEXT runtimeinternalatomic·Storeuintptr(SB), NOSPLIT, $0-16
BR runtimeinternalatomic·Store64(SB)
TEXT runtimeinternalatomic·Xadduintptr(SB), NOSPLIT, $0-24
BR runtimeinternalatomic·Xadd64(SB)
TEXT runtimeinternalatomic·Loadint64(SB), NOSPLIT, $0-16
BR runtimeinternalatomic·Load64(SB)
TEXT runtimeinternalatomic·Xaddint64(SB), NOSPLIT, $0-16
BR runtimeinternalatomic·Xadd64(SB)
// bool casp(void **val, void *old, void *new)
// Atomically:
// if(*val == old){
// *val = new;
// return 1;
// } else
// return 0;
TEXT runtimeinternalatomic·Casp1(SB), NOSPLIT, $0-25
BR runtimeinternalatomic·Cas64(SB)
// uint32 xadd(uint32 volatile *ptr, int32 delta)
// Atomically:
// *val += delta;
// return *val;
TEXT runtimeinternalatomic·Xadd(SB), NOSPLIT, $0-20
MOVD ptr+0(FP), R4
MOVW delta+8(FP), R5
SYNC
LWAR (R4), R3
ADD R5, R3
STWCCC R3, (R4)
BNE -4(PC)
SYNC
ISYNC
MOVW R3, ret+16(FP)
RET
TEXT runtimeinternalatomic·Xadd64(SB), NOSPLIT, $0-24
MOVD ptr+0(FP), R4
MOVD delta+8(FP), R5
SYNC
LDAR (R4), R3
ADD R5, R3
STDCCC R3, (R4)
BNE -4(PC)
SYNC
ISYNC
MOVD R3, ret+16(FP)
RET
TEXT runtimeinternalatomic·Xchg(SB), NOSPLIT, $0-20
MOVD ptr+0(FP), R4
MOVW new+8(FP), R5
SYNC
LWAR (R4), R3
STWCCC R5, (R4)
BNE -3(PC)
SYNC
ISYNC
MOVW R3, ret+16(FP)
RET
TEXT runtimeinternalatomic·Xchg64(SB), NOSPLIT, $0-24
MOVD ptr+0(FP), R4
MOVD new+8(FP), R5
SYNC
LDAR (R4), R3
STDCCC R5, (R4)
BNE -3(PC)
SYNC
ISYNC
MOVD R3, ret+16(FP)
RET
TEXT runtimeinternalatomic·Xchguintptr(SB), NOSPLIT, $0-24
BR runtimeinternalatomic·Xchg64(SB)
TEXT runtimeinternalatomic·Storep1(SB), NOSPLIT, $0-16
BR runtimeinternalatomic·Store64(SB)
TEXT runtimeinternalatomic·Store(SB), NOSPLIT, $0-12
MOVD ptr+0(FP), R3
MOVW val+8(FP), R4
SYNC
MOVW R4, 0(R3)
RET
TEXT runtimeinternalatomic·Store64(SB), NOSPLIT, $0-16
MOVD ptr+0(FP), R3
MOVD val+8(FP), R4
SYNC
MOVD R4, 0(R3)
RET
// void runtimeinternalatomic·Or8(byte volatile*, byte);
TEXT runtimeinternalatomic·Or8(SB), NOSPLIT, $0-9
MOVD ptr+0(FP), R3
MOVBZ val+8(FP), R4
// Align ptr down to 4 bytes so we can use 32-bit load/store.
// R5 = (R3 << 0) & ~3
RLDCR $0, R3, $~3, R5
// Compute val shift.
#ifdef GOARCH_ppc64
// Big endian. ptr = ptr ^ 3
XOR $3, R3
#endif
// R6 = ((ptr & 3) * 8) = (ptr << 3) & (3*8)
RLDC $3, R3, $(3*8), R6
// Shift val for aligned ptr. R4 = val << R6
SLD R6, R4, R4
again:
SYNC
LWAR (R5), R6
OR R4, R6
STWCCC R6, (R5)
BNE again
SYNC
ISYNC
RET
// void runtimeinternalatomic·And8(byte volatile*, byte);
TEXT runtimeinternalatomic·And8(SB), NOSPLIT, $0-9
MOVD ptr+0(FP), R3
MOVBZ val+8(FP), R4
// Align ptr down to 4 bytes so we can use 32-bit load/store.
// R5 = (R3 << 0) & ~3
RLDCR $0, R3, $~3, R5
// Compute val shift.
#ifdef GOARCH_ppc64
// Big endian. ptr = ptr ^ 3
XOR $3, R3
#endif
// R6 = ((ptr & 3) * 8) = (ptr << 3) & (3*8)
RLDC $3, R3, $(3*8), R6
// Shift val for aligned ptr. R4 = val << R6 | ^(0xFF << R6)
MOVD $0xFF, R7
SLD R6, R4
SLD R6, R7
XOR $-1, R7
OR R7, R4
again:
SYNC
LWAR (R5), R6
AND R4, R6
STWCCC R6, (R5)
BNE again
SYNC
ISYNC
RET
...@@ -2,7 +2,9 @@ ...@@ -2,7 +2,9 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
package runtime // +build 386
package atomic
import "unsafe" import "unsafe"
...@@ -11,69 +13,68 @@ import "unsafe" ...@@ -11,69 +13,68 @@ import "unsafe"
// code by optimizers will preserve the relative order of memory accesses. // code by optimizers will preserve the relative order of memory accesses.
//go:nosplit //go:nosplit
func atomicload(ptr *uint32) uint32 { func Load(ptr *uint32) uint32 {
nop() nop()
return *ptr return *ptr
} }
//go:nosplit //go:nosplit
func atomicloadp(ptr unsafe.Pointer) unsafe.Pointer { func Loadp(ptr unsafe.Pointer) unsafe.Pointer {
nop() nop()
return *(*unsafe.Pointer)(ptr) return *(*unsafe.Pointer)(ptr)
} }
//go:nosplit //go:nosplit
func xadd64(ptr *uint64, delta int64) uint64 { func Xadd64(ptr *uint64, delta int64) uint64 {
for { for {
old := *ptr old := *ptr
if cas64(ptr, old, old+uint64(delta)) { if Cas64(ptr, old, old+uint64(delta)) {
return old + uint64(delta) return old + uint64(delta)
} }
} }
} }
//go:noescape //go:noescape
//go:linkname xadduintptr runtime.xadd func Xadduintptr(ptr *uintptr, delta uintptr) uintptr
func xadduintptr(ptr *uintptr, delta uintptr) uintptr
//go:nosplit //go:nosplit
func xchg64(ptr *uint64, new uint64) uint64 { func Xchg64(ptr *uint64, new uint64) uint64 {
for { for {
old := *ptr old := *ptr
if cas64(ptr, old, new) { if Cas64(ptr, old, new) {
return old return old
} }
} }
} }
//go:noescape //go:noescape
func xadd(ptr *uint32, delta int32) uint32 func Xadd(ptr *uint32, delta int32) uint32
//go:noescape //go:noescape
func xchg(ptr *uint32, new uint32) uint32 func Xchg(ptr *uint32, new uint32) uint32
//go:noescape //go:noescape
func xchguintptr(ptr *uintptr, new uintptr) uintptr func Xchguintptr(ptr *uintptr, new uintptr) uintptr
//go:noescape //go:noescape
func atomicload64(ptr *uint64) uint64 func Load64(ptr *uint64) uint64
//go:noescape //go:noescape
func atomicand8(ptr *uint8, val uint8) func And8(ptr *uint8, val uint8)
//go:noescape //go:noescape
func atomicor8(ptr *uint8, val uint8) func Or8(ptr *uint8, val uint8)
// NOTE: Do not add atomicxor8 (XOR is not idempotent). // NOTE: Do not add atomicxor8 (XOR is not idempotent).
//go:noescape //go:noescape
func cas64(ptr *uint64, old, new uint64) bool func Cas64(ptr *uint64, old, new uint64) bool
//go:noescape //go:noescape
func atomicstore(ptr *uint32, val uint32) func Store(ptr *uint32, val uint32)
//go:noescape //go:noescape
func atomicstore64(ptr *uint64, val uint64) func Store64(ptr *uint64, val uint64)
// NO go:noescape annotation; see atomic_pointer.go. // NO go:noescape annotation; see atomic_pointer.go.
func atomicstorep1(ptr unsafe.Pointer, val unsafe.Pointer) func Storep1(ptr unsafe.Pointer, val unsafe.Pointer)
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
// +build amd64 amd64p32 // +build amd64 amd64p32
package runtime package atomic
import "unsafe" import "unsafe"
...@@ -13,57 +13,57 @@ import "unsafe" ...@@ -13,57 +13,57 @@ import "unsafe"
// code by optimizers will preserve the relative order of memory accesses. // code by optimizers will preserve the relative order of memory accesses.
//go:nosplit //go:nosplit
func atomicload(ptr *uint32) uint32 { func Load(ptr *uint32) uint32 {
nop() nop()
return *ptr return *ptr
} }
//go:nosplit //go:nosplit
func atomicloadp(ptr unsafe.Pointer) unsafe.Pointer { func Loadp(ptr unsafe.Pointer) unsafe.Pointer {
nop() nop()
return *(*unsafe.Pointer)(ptr) return *(*unsafe.Pointer)(ptr)
} }
//go:nosplit //go:nosplit
func atomicload64(ptr *uint64) uint64 { func Load64(ptr *uint64) uint64 {
nop() nop()
return *ptr return *ptr
} }
//go:noescape //go:noescape
func xadd(ptr *uint32, delta int32) uint32 func Xadd(ptr *uint32, delta int32) uint32
//go:noescape //go:noescape
func xadd64(ptr *uint64, delta int64) uint64 func Xadd64(ptr *uint64, delta int64) uint64
//go:noescape //go:noescape
func xadduintptr(ptr *uintptr, delta uintptr) uintptr func Xadduintptr(ptr *uintptr, delta uintptr) uintptr
//go:noescape //go:noescape
func xchg(ptr *uint32, new uint32) uint32 func Xchg(ptr *uint32, new uint32) uint32
//go:noescape //go:noescape
func xchg64(ptr *uint64, new uint64) uint64 func Xchg64(ptr *uint64, new uint64) uint64
//go:noescape //go:noescape
func xchguintptr(ptr *uintptr, new uintptr) uintptr func Xchguintptr(ptr *uintptr, new uintptr) uintptr
//go:noescape //go:noescape
func atomicand8(ptr *uint8, val uint8) func And8(ptr *uint8, val uint8)
//go:noescape //go:noescape
func atomicor8(ptr *uint8, val uint8) func Or8(ptr *uint8, val uint8)
// NOTE: Do not add atomicxor8 (XOR is not idempotent). // NOTE: Do not add atomicxor8 (XOR is not idempotent).
//go:noescape //go:noescape
func cas64(ptr *uint64, old, new uint64) bool func Cas64(ptr *uint64, old, new uint64) bool
//go:noescape //go:noescape
func atomicstore(ptr *uint32, val uint32) func Store(ptr *uint32, val uint32)
//go:noescape //go:noescape
func atomicstore64(ptr *uint64, val uint64) func Store64(ptr *uint64, val uint64)
// NO go:noescape annotation; see atomic_pointer.go. // NO go:noescape annotation; see atomic_pointer.go.
func atomicstorep1(ptr unsafe.Pointer, val unsafe.Pointer) func Storep1(ptr unsafe.Pointer, val unsafe.Pointer)
...@@ -2,154 +2,169 @@ ...@@ -2,154 +2,169 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
package runtime // +build arm
package atomic
import "unsafe" import "unsafe"
type spinlock struct {
v uint32
}
//go:nosplit
func (l *spinlock) lock() {
for {
if Cas(&l.v, 0, 1) {
return
}
}
}
//go:nosplit
func (l *spinlock) unlock() {
Store(&l.v, 0)
}
var locktab [57]struct { var locktab [57]struct {
l mutex l spinlock
pad [_CacheLineSize - unsafe.Sizeof(mutex{})]byte pad [_CacheLineSize - unsafe.Sizeof(spinlock{})]byte
} }
func addrLock(addr *uint64) *mutex { func addrLock(addr *uint64) *spinlock {
return &locktab[(uintptr(unsafe.Pointer(addr))>>3)%uintptr(len(locktab))].l return &locktab[(uintptr(unsafe.Pointer(addr))>>3)%uintptr(len(locktab))].l
} }
// Atomic add and return new value. // Atomic add and return new value.
//go:nosplit //go:nosplit
func xadd(val *uint32, delta int32) uint32 { func Xadd(val *uint32, delta int32) uint32 {
for { for {
oval := *val oval := *val
nval := oval + uint32(delta) nval := oval + uint32(delta)
if cas(val, oval, nval) { if Cas(val, oval, nval) {
return nval return nval
} }
} }
} }
//go:noescape //go:noescape
//go:linkname xadduintptr runtime.xadd func Xadduintptr(ptr *uintptr, delta uintptr) uintptr
func xadduintptr(ptr *uintptr, delta uintptr) uintptr
//go:nosplit //go:nosplit
func xchg(addr *uint32, v uint32) uint32 { func Xchg(addr *uint32, v uint32) uint32 {
for { for {
old := *addr old := *addr
if cas(addr, old, v) { if Cas(addr, old, v) {
return old return old
} }
} }
} }
//go:nosplit //go:nosplit
func xchguintptr(addr *uintptr, v uintptr) uintptr { func Xchguintptr(addr *uintptr, v uintptr) uintptr {
return uintptr(xchg((*uint32)(unsafe.Pointer(addr)), uint32(v))) return uintptr(Xchg((*uint32)(unsafe.Pointer(addr)), uint32(v)))
}
//go:nosplit
func Load(addr *uint32) uint32 {
return Xadd(addr, 0)
} }
// Should be a built-in for unsafe.Pointer?
//go:nosplit //go:nosplit
func atomicload(addr *uint32) uint32 { func add(p unsafe.Pointer, x uintptr) unsafe.Pointer {
return xadd(addr, 0) return unsafe.Pointer(uintptr(p) + x)
} }
//go:nosplit //go:nosplit
func atomicloadp(addr unsafe.Pointer) unsafe.Pointer { func Loadp(addr unsafe.Pointer) unsafe.Pointer {
return unsafe.Pointer(uintptr(xadd((*uint32)(addr), 0))) return unsafe.Pointer(uintptr(Xadd((*uint32)(addr), 0)))
} }
//go:nosplit //go:nosplit
func atomicstorep1(addr unsafe.Pointer, v unsafe.Pointer) { func Storep1(addr unsafe.Pointer, v unsafe.Pointer) {
for { for {
old := *(*unsafe.Pointer)(addr) old := *(*unsafe.Pointer)(addr)
if casp1((*unsafe.Pointer)(addr), old, v) { if Casp1((*unsafe.Pointer)(addr), old, v) {
return return
} }
} }
} }
//go:nosplit //go:nosplit
func atomicstore(addr *uint32, v uint32) { func Store(addr *uint32, v uint32) {
for { for {
old := *addr old := *addr
if cas(addr, old, v) { if Cas(addr, old, v) {
return return
} }
} }
} }
//go:nosplit //go:nosplit
func cas64(addr *uint64, old, new uint64) bool { func Cas64(addr *uint64, old, new uint64) bool {
var ok bool var ok bool
systemstack(func() { addrLock(addr).lock()
lock(addrLock(addr))
if *addr == old { if *addr == old {
*addr = new *addr = new
ok = true ok = true
} }
unlock(addrLock(addr)) addrLock(addr).unlock()
})
return ok return ok
} }
//go:nosplit //go:nosplit
func xadd64(addr *uint64, delta int64) uint64 { func Xadd64(addr *uint64, delta int64) uint64 {
var r uint64 var r uint64
systemstack(func() { addrLock(addr).lock()
lock(addrLock(addr))
r = *addr + uint64(delta) r = *addr + uint64(delta)
*addr = r *addr = r
unlock(addrLock(addr)) addrLock(addr).unlock()
})
return r return r
} }
//go:nosplit //go:nosplit
func xchg64(addr *uint64, v uint64) uint64 { func Xchg64(addr *uint64, v uint64) uint64 {
var r uint64 var r uint64
systemstack(func() { addrLock(addr).lock()
lock(addrLock(addr))
r = *addr r = *addr
*addr = v *addr = v
unlock(addrLock(addr)) addrLock(addr).unlock()
})
return r return r
} }
//go:nosplit //go:nosplit
func atomicload64(addr *uint64) uint64 { func Load64(addr *uint64) uint64 {
var r uint64 var r uint64
systemstack(func() { addrLock(addr).lock()
lock(addrLock(addr))
r = *addr r = *addr
unlock(addrLock(addr)) addrLock(addr).unlock()
})
return r return r
} }
//go:nosplit //go:nosplit
func atomicstore64(addr *uint64, v uint64) { func Store64(addr *uint64, v uint64) {
systemstack(func() { addrLock(addr).lock()
lock(addrLock(addr))
*addr = v *addr = v
unlock(addrLock(addr)) addrLock(addr).unlock()
})
} }
//go:nosplit //go:nosplit
func atomicor8(addr *uint8, v uint8) { func Or8(addr *uint8, v uint8) {
// Align down to 4 bytes and use 32-bit CAS. // Align down to 4 bytes and use 32-bit CAS.
uaddr := uintptr(unsafe.Pointer(addr)) uaddr := uintptr(unsafe.Pointer(addr))
addr32 := (*uint32)(unsafe.Pointer(uaddr &^ 3)) addr32 := (*uint32)(unsafe.Pointer(uaddr &^ 3))
word := uint32(v) << ((uaddr & 3) * 8) // little endian word := uint32(v) << ((uaddr & 3) * 8) // little endian
for { for {
old := *addr32 old := *addr32
if cas(addr32, old, old|word) { if Cas(addr32, old, old|word) {
return return
} }
} }
} }
//go:nosplit //go:nosplit
func atomicand8(addr *uint8, v uint8) { func And8(addr *uint8, v uint8) {
// Align down to 4 bytes and use 32-bit CAS. // Align down to 4 bytes and use 32-bit CAS.
uaddr := uintptr(unsafe.Pointer(addr)) uaddr := uintptr(unsafe.Pointer(addr))
addr32 := (*uint32)(unsafe.Pointer(uaddr &^ 3)) addr32 := (*uint32)(unsafe.Pointer(uaddr &^ 3))
...@@ -158,7 +173,7 @@ func atomicand8(addr *uint8, v uint8) { ...@@ -158,7 +173,7 @@ func atomicand8(addr *uint8, v uint8) {
word |= ^mask word |= ^mask
for { for {
old := *addr32 old := *addr32
if cas(addr32, old, old&word) { if Cas(addr32, old, old&word) {
return return
} }
} }
......
...@@ -2,40 +2,41 @@ ...@@ -2,40 +2,41 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
package runtime // +build arm64
package atomic
import "unsafe" import "unsafe"
//go:noescape //go:noescape
func xadd(ptr *uint32, delta int32) uint32 func Xadd(ptr *uint32, delta int32) uint32
//go:noescape //go:noescape
func xadd64(ptr *uint64, delta int64) uint64 func Xadd64(ptr *uint64, delta int64) uint64
//go:noescape //go:noescape
//go:linkname xadduintptr runtime.xadd64 func Xadduintptr(ptr *uintptr, delta uintptr) uintptr
func xadduintptr(ptr *uintptr, delta uintptr) uintptr
//go:noescape //go:noescape
func xchg(ptr *uint32, new uint32) uint32 func Xchg(ptr *uint32, new uint32) uint32
//go:noescape //go:noescape
func xchg64(ptr *uint64, new uint64) uint64 func Xchg64(ptr *uint64, new uint64) uint64
//go:noescape //go:noescape
func xchguintptr(ptr *uintptr, new uintptr) uintptr func Xchguintptr(ptr *uintptr, new uintptr) uintptr
//go:noescape //go:noescape
func atomicload(ptr *uint32) uint32 func Load(ptr *uint32) uint32
//go:noescape //go:noescape
func atomicload64(ptr *uint64) uint64 func Load64(ptr *uint64) uint64
//go:noescape //go:noescape
func atomicloadp(ptr unsafe.Pointer) unsafe.Pointer func Loadp(ptr unsafe.Pointer) unsafe.Pointer
//go:nosplit //go:nosplit
func atomicor8(addr *uint8, v uint8) { func Or8(addr *uint8, v uint8) {
// TODO(dfc) implement this in asm. // TODO(dfc) implement this in asm.
// Align down to 4 bytes and use 32-bit CAS. // Align down to 4 bytes and use 32-bit CAS.
uaddr := uintptr(unsafe.Pointer(addr)) uaddr := uintptr(unsafe.Pointer(addr))
...@@ -43,14 +44,14 @@ func atomicor8(addr *uint8, v uint8) { ...@@ -43,14 +44,14 @@ func atomicor8(addr *uint8, v uint8) {
word := uint32(v) << ((uaddr & 3) * 8) // little endian word := uint32(v) << ((uaddr & 3) * 8) // little endian
for { for {
old := *addr32 old := *addr32
if cas(addr32, old, old|word) { if Cas(addr32, old, old|word) {
return return
} }
} }
} }
//go:nosplit //go:nosplit
func atomicand8(addr *uint8, v uint8) { func And8(addr *uint8, v uint8) {
// TODO(dfc) implement this in asm. // TODO(dfc) implement this in asm.
// Align down to 4 bytes and use 32-bit CAS. // Align down to 4 bytes and use 32-bit CAS.
uaddr := uintptr(unsafe.Pointer(addr)) uaddr := uintptr(unsafe.Pointer(addr))
...@@ -60,20 +61,20 @@ func atomicand8(addr *uint8, v uint8) { ...@@ -60,20 +61,20 @@ func atomicand8(addr *uint8, v uint8) {
word |= ^mask word |= ^mask
for { for {
old := *addr32 old := *addr32
if cas(addr32, old, old&word) { if Cas(addr32, old, old&word) {
return return
} }
} }
} }
//go:noescape //go:noescape
func cas64(ptr *uint64, old, new uint64) bool func Cas64(ptr *uint64, old, new uint64) bool
//go:noescape //go:noescape
func atomicstore(ptr *uint32, val uint32) func Store(ptr *uint32, val uint32)
//go:noescape //go:noescape
func atomicstore64(ptr *uint64, val uint64) func Store64(ptr *uint64, val uint64)
// NO go:noescape annotation; see atomic_pointer.go. // NO go:noescape annotation; see atomic_pointer.go.
func atomicstorep1(ptr unsafe.Pointer, val unsafe.Pointer) func Storep1(ptr unsafe.Pointer, val unsafe.Pointer)
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "textflag.h"
// uint32 runtimeinternalatomic·Load(uint32 volatile* addr)
TEXT ·Load(SB),NOSPLIT,$-8-12
MOVD ptr+0(FP), R0
LDARW (R0), R0
MOVW R0, ret+8(FP)
RET
// uint64 runtimeinternalatomic·Load64(uint64 volatile* addr)
TEXT ·Load64(SB),NOSPLIT,$-8-16
MOVD ptr+0(FP), R0
LDAR (R0), R0
MOVD R0, ret+8(FP)
RET
// void *runtimeinternalatomic·Loadp(void *volatile *addr)
TEXT ·Loadp(SB),NOSPLIT,$-8-16
MOVD ptr+0(FP), R0
LDAR (R0), R0
MOVD R0, ret+8(FP)
RET
TEXT runtimeinternalatomic·Storep1(SB), NOSPLIT, $0-16
B runtimeinternalatomic·Store64(SB)
TEXT runtimeinternalatomic·Store(SB), NOSPLIT, $0-12
MOVD ptr+0(FP), R0
MOVW val+8(FP), R1
STLRW R1, (R0)
RET
TEXT runtimeinternalatomic·Store64(SB), NOSPLIT, $0-16
MOVD ptr+0(FP), R0
MOVD val+8(FP), R1
STLR R1, (R0)
RET
TEXT runtimeinternalatomic·Xchg(SB), NOSPLIT, $0-20
again:
MOVD ptr+0(FP), R0
MOVW new+8(FP), R1
LDAXRW (R0), R2
STLXRW R1, (R0), R3
CBNZ R3, again
MOVW R2, ret+16(FP)
RET
TEXT runtimeinternalatomic·Xchg64(SB), NOSPLIT, $0-24
again:
MOVD ptr+0(FP), R0
MOVD new+8(FP), R1
LDAXR (R0), R2
STLXR R1, (R0), R3
CBNZ R3, again
MOVD R2, ret+16(FP)
RET
// bool runtimeinternalatomic·Cas64(uint64 *ptr, uint64 old, uint64 new)
// Atomically:
// if(*val == *old){
// *val = new;
// return 1;
// } else {
// return 0;
// }
TEXT runtimeinternalatomic·Cas64(SB), NOSPLIT, $0-25
MOVD ptr+0(FP), R0
MOVD old+8(FP), R1
MOVD new+16(FP), R2
again:
LDAXR (R0), R3
CMP R1, R3
BNE ok
STLXR R2, (R0), R3
CBNZ R3, again
ok:
CSET EQ, R0
MOVB R0, ret+24(FP)
RET
// uint32 xadd(uint32 volatile *ptr, int32 delta)
// Atomically:
// *val += delta;
// return *val;
TEXT runtimeinternalatomic·Xadd(SB), NOSPLIT, $0-20
again:
MOVD ptr+0(FP), R0
MOVW delta+8(FP), R1
LDAXRW (R0), R2
ADDW R2, R1, R2
STLXRW R2, (R0), R3
CBNZ R3, again
MOVW R2, ret+16(FP)
RET
TEXT runtimeinternalatomic·Xadd64(SB), NOSPLIT, $0-24
again:
MOVD ptr+0(FP), R0
MOVD delta+8(FP), R1
LDAXR (R0), R2
ADD R2, R1, R2
STLXR R2, (R0), R3
CBNZ R3, again
MOVD R2, ret+16(FP)
RET
TEXT runtimeinternalatomic·Xchguintptr(SB), NOSPLIT, $0-24
B runtimeinternalatomic·Xchg64(SB)
This diff is collapsed.
This diff is collapsed.
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "textflag.h"
TEXT runtimeinternalatomic·Cas(SB),NOSPLIT,$0
B runtimeinternalatomic·armcas(SB)
TEXT runtimeinternalatomic·Casp1(SB),NOSPLIT,$0
B runtimeinternalatomic·Cas(SB)
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "textflag.h"
// TODO(minux): this is only valid for ARMv6+
// bool armcas(int32 *val, int32 old, int32 new)
// Atomically:
// if(*val == old){
// *val = new;
// return 1;
// }else
// return 0;
TEXT runtimeinternalatomic·Cas(SB),NOSPLIT,$0
B runtimeinternalatomic·armcas(SB)
TEXT runtimeinternalatomic·Casp1(SB),NOSPLIT,$0
B runtimeinternalatomic·Cas(SB)
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment