Commit 0c6b55e7 authored by Keith Randall's avatar Keith Randall

runtime: convert map implementation to Go.

It's a bit slower, but not painfully so.  There is still room for
improvement (saving space so we can use nosplit, and removing the
requirement for hash/eq stubs).

benchmark                              old ns/op     new ns/op     delta
BenchmarkMegMap                        23.5          24.2          +2.98%
BenchmarkMegOneMap                     14.9          15.7          +5.37%
BenchmarkMegEqMap                      71668         72234         +0.79%
BenchmarkMegEmptyMap                   4.05          4.93          +21.73%
BenchmarkSmallStrMap                   21.9          22.5          +2.74%
BenchmarkMapStringKeysEight_16         23.1          26.3          +13.85%
BenchmarkMapStringKeysEight_32         21.9          25.0          +14.16%
BenchmarkMapStringKeysEight_64         21.9          25.1          +14.61%
BenchmarkMapStringKeysEight_1M         21.9          25.0          +14.16%
BenchmarkIntMap                        21.8          12.5          -42.66%
BenchmarkRepeatedLookupStrMapKey32     39.3          30.2          -23.16%
BenchmarkRepeatedLookupStrMapKey1M     322353        322675        +0.10%
BenchmarkNewEmptyMap                   129           136           +5.43%
BenchmarkMapIter                       137           107           -21.90%
BenchmarkMapIterEmpty                  7.14          8.71          +21.99%
BenchmarkSameLengthMap                 5.24          6.82          +30.15%
BenchmarkBigKeyMap                     34.5          35.3          +2.32%
BenchmarkBigValMap                     36.1          36.1          +0.00%
BenchmarkSmallKeyMap                   26.9          26.7          -0.74%

LGTM=rsc
R=golang-codereviews, dave, dvyukov, rsc, gobot, khr
CC=golang-codereviews
https://golang.org/cl/99380043
parent 3b1b8406
......@@ -370,6 +370,14 @@ func (w *Walker) parseFile(dir, file string) (*ast.File, error) {
log.Fatalf("incorrect generated file: %s", err)
}
}
if w.context != nil && file == fmt.Sprintf("zruntime_defs_%s_%s.go", w.context.GOOS, w.context.GOARCH) {
// Just enough to keep the api checker happy.
src := "package runtime; type maptype struct{}; type _type struct{}; type alg struct{}"
f, err = parser.ParseFile(fset, filename, src, 0)
if err != nil {
log.Fatalf("incorrect generated file: %s", err)
}
}
if f == nil {
f, err = parser.ParseFile(fset, filename, nil, 0)
......@@ -488,6 +496,11 @@ func (w *Walker) Import(name string) (pkg *types.Package) {
if !contains(filenames, n) {
filenames = append(filenames, n)
}
n = fmt.Sprintf("zruntime_defs_%s_%s.go", w.context.GOOS, w.context.GOARCH)
if !contains(filenames, n) {
filenames = append(filenames, n)
}
}
// Parse package files.
......
......@@ -231,6 +231,8 @@ ok:
aggr = "cbctxt";
else if(streq(fields.p[1], "SEH"))
aggr = "seh";
else if(streq(fields.p[1], "Alg"))
aggr = "alg";
}
if(hasprefix(lines.p[i], "}"))
aggr = nil;
......
......@@ -1222,7 +1222,7 @@ synthesizemaptypes(DWDie *die)
DWAttr *a;
hash = walktypedef(defgotype(lookup_or_diag("type.runtime.hmap")));
bucket = walktypedef(defgotype(lookup_or_diag("type.runtime.bucket")));
bucket = walktypedef(defgotype(lookup_or_diag("type.runtime.bmap")));
if (hash == nil)
return;
......
......@@ -25,3 +25,24 @@ TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$8
MOVL CX, 4(SP)
CALL ·callMethod(SB)
RET
// Stubs to give reflect package access to runtime services
// TODO: should probably be done another way.
TEXT ·makemap(SB),NOSPLIT,$0-0
JMP runtime·reflect_makemap(SB)
TEXT ·mapaccess(SB),NOSPLIT,$0-0
JMP runtime·reflect_mapaccess(SB)
TEXT ·mapassign(SB),NOSPLIT,$0-0
JMP runtime·reflect_mapassign(SB)
TEXT ·mapdelete(SB),NOSPLIT,$0-0
JMP runtime·reflect_mapdelete(SB)
TEXT ·mapiterinit(SB),NOSPLIT,$0-0
JMP runtime·reflect_mapiterinit(SB)
TEXT ·mapiterkey(SB),NOSPLIT,$0-0
JMP runtime·reflect_mapiterkey(SB)
TEXT ·mapiternext(SB),NOSPLIT,$0-0
JMP runtime·reflect_mapiternext(SB)
TEXT ·maplen(SB),NOSPLIT,$0-0
JMP runtime·reflect_maplen(SB)
TEXT ·ismapkey(SB),NOSPLIT,$0-0
JMP runtime·reflect_ismapkey(SB)
......@@ -25,3 +25,24 @@ TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$16
MOVQ CX, 8(SP)
CALL ·callMethod(SB)
RET
// Stubs to give reflect package access to runtime services
// TODO: should probably be done another way.
TEXT ·makemap(SB),NOSPLIT,$0-0
JMP runtime·reflect_makemap(SB)
TEXT ·mapaccess(SB),NOSPLIT,$0-0
JMP runtime·reflect_mapaccess(SB)
TEXT ·mapassign(SB),NOSPLIT,$0-0
JMP runtime·reflect_mapassign(SB)
TEXT ·mapdelete(SB),NOSPLIT,$0-0
JMP runtime·reflect_mapdelete(SB)
TEXT ·mapiterinit(SB),NOSPLIT,$0-0
JMP runtime·reflect_mapiterinit(SB)
TEXT ·mapiterkey(SB),NOSPLIT,$0-0
JMP runtime·reflect_mapiterkey(SB)
TEXT ·mapiternext(SB),NOSPLIT,$0-0
JMP runtime·reflect_mapiternext(SB)
TEXT ·maplen(SB),NOSPLIT,$0-0
JMP runtime·reflect_maplen(SB)
TEXT ·ismapkey(SB),NOSPLIT,$0-0
JMP runtime·reflect_ismapkey(SB)
......@@ -25,3 +25,24 @@ TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$8
MOVL CX, 4(SP)
CALL ·callMethod(SB)
RET
// Stubs to give reflect package access to runtime services
// TODO: should probably be done another way.
TEXT ·makemap(SB),NOSPLIT,$0-0
JMP runtime·reflect_makemap(SB)
TEXT ·mapaccess(SB),NOSPLIT,$0-0
JMP runtime·reflect_mapaccess(SB)
TEXT ·mapassign(SB),NOSPLIT,$0-0
JMP runtime·reflect_mapassign(SB)
TEXT ·mapdelete(SB),NOSPLIT,$0-0
JMP runtime·reflect_mapdelete(SB)
TEXT ·mapiterinit(SB),NOSPLIT,$0-0
JMP runtime·reflect_mapiterinit(SB)
TEXT ·mapiterkey(SB),NOSPLIT,$0-0
JMP runtime·reflect_mapiterkey(SB)
TEXT ·mapiternext(SB),NOSPLIT,$0-0
JMP runtime·reflect_mapiternext(SB)
TEXT ·maplen(SB),NOSPLIT,$0-0
JMP runtime·reflect_maplen(SB)
TEXT ·ismapkey(SB),NOSPLIT,$0-0
JMP runtime·reflect_ismapkey(SB)
......@@ -25,3 +25,24 @@ TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$8
MOVW R1, 8(R13)
BL ·callMethod(SB)
RET
// Stubs to give reflect package access to runtime services
// TODO: should probably be done another way.
TEXT ·makemap(SB),NOSPLIT,$-4-0
B runtime·reflect_makemap(SB)
TEXT ·mapaccess(SB),NOSPLIT,$-4-0
B runtime·reflect_mapaccess(SB)
TEXT ·mapassign(SB),NOSPLIT,$-4-0
B runtime·reflect_mapassign(SB)
TEXT ·mapdelete(SB),NOSPLIT,$-4-0
B runtime·reflect_mapdelete(SB)
TEXT ·mapiterinit(SB),NOSPLIT,$-4-0
B runtime·reflect_mapiterinit(SB)
TEXT ·mapiterkey(SB),NOSPLIT,$-4-0
B runtime·reflect_mapiterkey(SB)
TEXT ·mapiternext(SB),NOSPLIT,$-4-0
B runtime·reflect_mapiternext(SB)
TEXT ·maplen(SB),NOSPLIT,$-4-0
B runtime·reflect_maplen(SB)
TEXT ·ismapkey(SB),NOSPLIT,$-4-0
B runtime·reflect_ismapkey(SB)
......@@ -425,6 +425,8 @@ runtime·nohash(uintptr *h, uintptr s, void *a)
runtime·panicstring("hash of unhashable type");
}
extern uintptr runtime·nohashcode;
void
runtime·noequal(bool *eq, uintptr s, void *a, void *b)
{
......@@ -471,6 +473,7 @@ byte runtime·aeskeysched[HashRandomBytes];
void
runtime·hashinit(void)
{
runtime·nohashcode = (uintptr)runtime·nohash;
if(NaCl)
return;
......
......@@ -1107,6 +1107,14 @@ TEXT runtime·memeq(SB),NOSPLIT,$0-12
MOVL count+8(FP), BX
JMP runtime·memeqbody(SB)
TEXT runtime·gomemeq(SB),NOSPLIT,$0-13
MOVL a+0(FP), SI
MOVL b+4(FP), DI
MOVL size+8(FP), BX
CALL runtime·memeqbody(SB)
MOVB AX, ret+12(FP)
RET
// eqstring tests whether two strings are equal.
// See runtime_test.go:eqstring_generic for
// equivlaent Go code.
......@@ -2197,3 +2205,81 @@ TEXT runtime·duffcopy(SB), NOSPLIT, $0-0
TEXT runtime·timenow(SB), NOSPLIT, $0-0
JMP time·now(SB)
TEXT runtime·fastrand2(SB), NOSPLIT, $0-4
get_tls(CX)
MOVL g(CX), AX
MOVL g_m(AX), AX
MOVL m_fastrand(AX), DX
ADDL DX, DX
MOVL DX, BX
XORL $0x88888eef, DX
CMOVLMI BX, DX
MOVL DX, m_fastrand(AX)
MOVL DX, ret+0(FP)
RET
// The gohash and goeq trampolines are necessary while we have
// both Go and C calls to alg functions. Once we move all call
// sites to Go, we can redo the hash/eq functions to use the
// Go calling convention and remove these.
// convert call to:
// func (alg unsafe.Pointer, p unsafe.Pointer, size uintpr, seed uintptr) uintptr
// to:
// func (hash *uintptr, size uintptr, p unsafe.Pointer)
TEXT runtime·gohash(SB), NOSPLIT, $12-20
FUNCDATA $FUNCDATA_ArgsPointerMaps,gcargs_gohash<>(SB)
FUNCDATA $FUNCDATA_LocalsPointerMaps,gclocals_gohash<>(SB)
MOVL a+0(FP), AX
MOVL alg_hash(AX), AX
MOVL p+4(FP), CX
MOVL size+8(FP), DX
MOVL seed+12(FP), DI
MOVL DI, ret+16(FP)
LEAL ret+16(FP), SI
MOVL SI, 0(SP)
MOVL DX, 4(SP)
MOVL CX, 8(SP)
PCDATA $PCDATA_StackMapIndex, $0
CALL *AX
RET
DATA gcargs_gohash<>+0x00(SB)/4, $1 // 1 stackmap
DATA gcargs_gohash<>+0x04(SB)/4, $10 // 5 args
DATA gcargs_gohash<>+0x08(SB)/4, $(const_BitsPointer+(const_BitsPointer<<2))
GLOBL gcargs_gohash<>(SB),RODATA,$12
DATA gclocals_gohash<>+0x00(SB)/4, $1 // 1 stackmap
DATA gclocals_gohash<>+0x04(SB)/4, $0 // 0 locals
GLOBL gclocals_gohash<>(SB),RODATA,$8
// convert call to:
// func (alg unsafe.Pointer, p, q unsafe.Pointer, size uintptr) bool
// to:
// func (eq *bool, size uintptr, p, q unsafe.Pointer)
TEXT runtime·goeq(SB), NOSPLIT, $16-17
FUNCDATA $FUNCDATA_ArgsPointerMaps,gcargs_goeq<>(SB)
FUNCDATA $FUNCDATA_LocalsPointerMaps,gclocals_goeq<>(SB)
MOVL alg+0(FP), AX
MOVL alg_equal(AX), AX
MOVL p+4(FP), CX
MOVL q+8(FP), DX
MOVL size+12(FP), DI
LEAL ret+16(FP), SI
MOVL SI, 0(SP)
MOVL DI, 4(SP)
MOVL CX, 8(SP)
MOVL DX, 12(SP)
PCDATA $PCDATA_StackMapIndex, $0
CALL *AX
RET
DATA gcargs_goeq<>+0x00(SB)/4, $1 // 1 stackmap
DATA gcargs_goeq<>+0x04(SB)/4, $10 // 5 args
DATA gcargs_goeq<>+0x08(SB)/4, $(const_BitsPointer+(const_BitsPointer<<2)+(const_BitsPointer<<4))
GLOBL gcargs_goeq<>(SB),RODATA,$12
DATA gclocals_goeq<>+0x00(SB)/4, $1 // 1 stackmap
DATA gclocals_goeq<>+0x04(SB)/4, $0 // 0 locals
GLOBL gclocals_goeq<>(SB),RODATA,$8
......@@ -1075,6 +1075,14 @@ TEXT runtime·memeq(SB),NOSPLIT,$0-24
MOVQ count+16(FP), BX
JMP runtime·memeqbody(SB)
TEXT runtime·gomemeq(SB),NOSPLIT,$0-25
MOVQ a+0(FP), SI
MOVQ b+8(FP), DI
MOVQ size+16(FP), BX
CALL runtime·memeqbody(SB)
MOVB AX, ret+24(FP)
RET
// eqstring tests whether two strings are equal.
// See runtime_test.go:eqstring_generic for
// equivlaent Go code.
......@@ -2236,3 +2244,81 @@ TEXT runtime·duffcopy(SB), NOSPLIT, $0-0
TEXT runtime·timenow(SB), NOSPLIT, $0-0
JMP time·now(SB)
TEXT runtime·fastrand2(SB), NOSPLIT, $0-4
get_tls(CX)
MOVQ g(CX), AX
MOVQ g_m(AX), AX
MOVL m_fastrand(AX), DX
ADDL DX, DX
MOVL DX, BX
XORL $0x88888eef, DX
CMOVLMI BX, DX
MOVL DX, m_fastrand(AX)
MOVL DX, ret+0(FP)
RET
// The gohash and goeq trampolines are necessary while we have
// both Go and C calls to alg functions. Once we move all call
// sites to Go, we can redo the hash/eq functions to use the
// Go calling convention and remove these.
// convert call to:
// func (alg unsafe.Pointer, p unsafe.Pointer, size uintpr, seed uintptr) uintptr
// to:
// func (hash *uintptr, size uintptr, p unsafe.Pointer)
TEXT runtime·gohash(SB), NOSPLIT, $24-40
FUNCDATA $FUNCDATA_ArgsPointerMaps,gcargs_gohash<>(SB)
FUNCDATA $FUNCDATA_LocalsPointerMaps,gclocals_gohash<>(SB)
MOVQ a+0(FP), AX
MOVQ alg_hash(AX), AX
MOVQ p+8(FP), CX
MOVQ size+16(FP), DX
MOVQ seed+24(FP), DI
MOVQ DI, ret+32(FP)
LEAQ ret+32(FP), SI // TODO: go vet complains here: "invalid LEAQ of ret+32(FP); bool is 1-byte value"
MOVQ SI, 0(SP)
MOVQ DX, 8(SP)
MOVQ CX, 16(SP)
PCDATA $PCDATA_StackMapIndex, $0
CALL *AX
RET
DATA gcargs_gohash<>+0x00(SB)/4, $1 // 1 stackmap
DATA gcargs_gohash<>+0x04(SB)/4, $10 // 5 args
DATA gcargs_gohash<>+0x08(SB)/4, $(const_BitsPointer+(const_BitsPointer<<2))
GLOBL gcargs_gohash<>(SB),RODATA,$12
DATA gclocals_gohash<>+0x00(SB)/4, $1 // 1 stackmap
DATA gclocals_gohash<>+0x04(SB)/4, $0 // 0 locals
GLOBL gclocals_gohash<>(SB),RODATA,$8
// convert call to:
// func (alg unsafe.Pointer, p, q unsafe.Pointer, size uintptr) bool
// to:
// func (eq *bool, size uintptr, p, q unsafe.Pointer)
TEXT runtime·goeq(SB), NOSPLIT, $32-33
FUNCDATA $FUNCDATA_ArgsPointerMaps,gcargs_goeq<>(SB)
FUNCDATA $FUNCDATA_LocalsPointerMaps,gclocals_goeq<>(SB)
MOVQ alg+0(FP), AX
MOVQ alg_equal(AX), AX
MOVQ p+8(FP), CX
MOVQ q+16(FP), DX
MOVQ size+24(FP), DI
LEAQ ret+32(FP), SI
MOVQ SI, 0(SP)
MOVQ DI, 8(SP)
MOVQ CX, 16(SP)
MOVQ DX, 24(SP)
PCDATA $PCDATA_StackMapIndex, $0
CALL *AX
RET
DATA gcargs_goeq<>+0x00(SB)/4, $1 // 1 stackmap
DATA gcargs_goeq<>+0x04(SB)/4, $10 // 5 args
DATA gcargs_goeq<>+0x08(SB)/4, $(const_BitsPointer+(const_BitsPointer<<2)+(const_BitsPointer<<4))
GLOBL gcargs_goeq<>(SB),RODATA,$12
DATA gclocals_goeq<>+0x00(SB)/4, $1 // 1 stackmap
DATA gclocals_goeq<>+0x04(SB)/4, $0 // 0 locals
GLOBL gclocals_goeq<>(SB),RODATA,$8
......@@ -653,8 +653,8 @@ TEXT runtime·stackcheck(SB), NOSPLIT, $0-0
RET
TEXT runtime·memclr(SB),NOSPLIT,$0-8
MOVL addr+0(FP), DI
MOVL count+4(FP), CX
MOVL ptr+0(FP), DI
MOVL n+4(FP), CX
MOVQ CX, BX
ANDQ $7, BX
SHRQ $3, CX
......@@ -730,6 +730,14 @@ TEXT runtime·memeq(SB),NOSPLIT,$0-12
MOVL count+8(FP), BX
JMP runtime·memeqbody(SB)
TEXT runtime·gomemeq(SB),NOSPLIT,$0-13
MOVL a+0(FP), SI
MOVL b+4(FP), DI
MOVL size+8(FP), BX
CALL runtime·memeqbody(SB)
MOVB AX, ret+12(FP)
RET
// eqstring tests whether two strings are equal.
// See runtime_test.go:eqstring_generic for
// equivlaent Go code.
......@@ -1108,3 +1116,81 @@ eqret:
TEXT runtime·timenow(SB), NOSPLIT, $0-0
JMP time·now(SB)
TEXT runtime·fastrand2(SB), NOSPLIT, $0-4
get_tls(CX)
MOVL g(CX), AX
MOVL g_m(AX), AX
MOVL m_fastrand(AX), DX
ADDL DX, DX
MOVL DX, BX
XORL $0x88888eef, DX
CMOVLMI BX, DX
MOVL DX, m_fastrand(AX)
MOVL DX, ret+0(FP)
RET
// The gohash and goeq trampolines are necessary while we have
// both Go and C calls to alg functions. Once we move all call
// sites to Go, we can redo the hash/eq functions to use the
// Go calling convention and remove these.
// convert call to:
// func (alg unsafe.Pointer, p unsafe.Pointer, size uintpr, seed uintptr) uintptr
// to:
// func (hash *uintptr, size uintptr, p unsafe.Pointer)
TEXT runtime·gohash(SB), NOSPLIT, $12-20
FUNCDATA $FUNCDATA_ArgsPointerMaps,gcargs_gohash<>(SB)
FUNCDATA $FUNCDATA_LocalsPointerMaps,gclocals_gohash<>(SB)
MOVL a+0(FP), AX
MOVL alg_hash(AX), AX
MOVL p+4(FP), CX
MOVL size+8(FP), DX
MOVL seed+12(FP), DI
MOVL DI, ret+16(FP)
LEAL ret+16(FP), SI
MOVL SI, 0(SP)
MOVL DX, 4(SP)
MOVL CX, 8(SP)
PCDATA $PCDATA_StackMapIndex, $0
CALL *AX
RET
DATA gcargs_gohash<>+0x00(SB)/4, $1 // 1 stackmap
DATA gcargs_gohash<>+0x04(SB)/4, $10 // 5 args
DATA gcargs_gohash<>+0x08(SB)/4, $(const_BitsPointer+(const_BitsPointer<<2))
GLOBL gcargs_gohash<>(SB),RODATA,$12
DATA gclocals_gohash<>+0x00(SB)/4, $1 // 1 stackmap
DATA gclocals_gohash<>+0x04(SB)/4, $0 // 0 locals
GLOBL gclocals_gohash<>(SB),RODATA,$8
// convert call to:
// func (alg unsafe.Pointer, p, q unsafe.Pointer, size uintptr) bool
// to:
// func (eq *bool, size uintptr, p, q unsafe.Pointer)
TEXT runtime·goeq(SB), NOSPLIT, $16-17
FUNCDATA $FUNCDATA_ArgsPointerMaps,gcargs_goeq<>(SB)
FUNCDATA $FUNCDATA_LocalsPointerMaps,gclocals_goeq<>(SB)
MOVL alg+0(FP), AX
MOVL alg_equal(AX), AX
MOVL p+4(FP), CX
MOVL q+8(FP), DX
MOVL size+12(FP), DI
LEAL ret+16(FP), SI
MOVL SI, 0(SP)
MOVL DI, 4(SP)
MOVL CX, 8(SP)
MOVL DX, 12(SP)
PCDATA $PCDATA_StackMapIndex, $0
CALL *AX
RET
DATA gcargs_goeq<>+0x00(SB)/4, $1 // 1 stackmap
DATA gcargs_goeq<>+0x04(SB)/4, $10 // 5 args
DATA gcargs_goeq<>+0x08(SB)/4, $(const_BitsPointer+(const_BitsPointer<<2)+(const_BitsPointer<<4))
GLOBL gcargs_goeq<>(SB),RODATA,$12
DATA gclocals_goeq<>+0x00(SB)/4, $1 // 1 stackmap
DATA gclocals_goeq<>+0x04(SB)/4, $0 // 0 locals
GLOBL gclocals_goeq<>(SB),RODATA,$8
......@@ -670,6 +670,25 @@ _next:
MOVW $0, R0
RET
TEXT runtime·gomemeq(SB),NOSPLIT,$-4-13
MOVW a+0(FP), R1
MOVW b+4(FP), R2
MOVW size+8(FP), R3
ADD R1, R3, R6
MOVW $1, R0
MOVB R0, ret+12(FP)
_next2:
CMP R1, R6
RET.EQ
MOVBU.P 1(R1), R4
MOVBU.P 1(R2), R5
CMP R4, R5
BEQ _next2
MOVW $0, R0
MOVB R0, ret+12(FP)
RET
// eqstring tests whether two strings are equal.
// See runtime_test.go:eqstring_generic for
// equivlaent Go code.
......@@ -1190,3 +1209,77 @@ TEXT runtime·duffcopy(SB), NOSPLIT, $0-0
MOVW.P 4(R1), R0
MOVW.P R0, 4(R2)
RET
TEXT runtime·fastrand2(SB), NOSPLIT, $-4-4
MOVW g_m(g), R1
MOVW m_fastrand(R1), R0
ADD.S R0, R0
EOR.MI $0x88888eef, R0
MOVW R0, m_fastrand(R1)
MOVW R0, ret+0(FP)
RET
// The gohash and goeq trampolines are necessary while we have
// both Go and C calls to alg functions. Once we move all call
// sites to Go, we can redo the hash/eq functions to use the
// Go calling convention and remove these.
// convert call to:
// func (alg unsafe.Pointer, p unsafe.Pointer, size uintpr, seed uintptr) uintptr
// to:
// func (hash *uintptr, size uintptr, p unsafe.Pointer)
TEXT runtime·gohash(SB), NOSPLIT, $12-20
FUNCDATA $FUNCDATA_ArgsPointerMaps,gcargs_gohash<>(SB)
FUNCDATA $FUNCDATA_LocalsPointerMaps,gclocals_gohash<>(SB)
MOVW a+0(FP), R0
MOVW alg_hash(R0), R0
MOVW p+4(FP), R1
MOVW size+8(FP), R2
MOVW seed+12(FP), R3
MOVW R3, ret+16(FP)
ADD $36, R13, R4
MOVW R4, 4(R13)
MOVW R2, 8(R13)
MOVW R1, 12(R13)
PCDATA $PCDATA_StackMapIndex, $0
BL (R0)
RET
DATA gcargs_gohash<>+0x00(SB)/4, $1 // 1 stackmap
DATA gcargs_gohash<>+0x04(SB)/4, $10 // 5 args
DATA gcargs_gohash<>+0x08(SB)/4, $(const_BitsPointer+(const_BitsPointer<<2))
GLOBL gcargs_gohash<>(SB),RODATA,$12
DATA gclocals_gohash<>+0x00(SB)/4, $1 // 1 stackmap
DATA gclocals_gohash<>+0x04(SB)/4, $0 // 0 locals
GLOBL gclocals_gohash<>(SB),RODATA,$8
// convert call to:
// func (alg unsafe.Pointer, p, q unsafe.Pointer, size uintptr) bool
// to:
// func (eq *bool, size uintptr, p, q unsafe.Pointer)
TEXT runtime·goeq(SB), NOSPLIT, $16-17
FUNCDATA $FUNCDATA_ArgsPointerMaps,gcargs_goeq<>(SB)
FUNCDATA $FUNCDATA_LocalsPointerMaps,gclocals_goeq<>(SB)
MOVW alg+0(FP), R0
MOVW alg_equal(R0), R0
MOVW p+4(FP), R1
MOVW q+8(FP), R2
MOVW size+12(FP), R3
ADD $40, R13, R4
MOVW R4, 4(R13)
MOVW R3, 8(R13)
MOVW R2, 12(R13)
MOVW R1, 16(R13)
PCDATA $PCDATA_StackMapIndex, $0
BL (R0)
RET
DATA gcargs_goeq<>+0x00(SB)/4, $1 // 1 stackmap
DATA gcargs_goeq<>+0x04(SB)/4, $10 // 5 args
DATA gcargs_goeq<>+0x08(SB)/4, $(const_BitsPointer+(const_BitsPointer<<2)+(const_BitsPointer<<4))
GLOBL gcargs_goeq<>(SB),RODATA,$12
DATA gclocals_goeq<>+0x00(SB)/4, $1 // 1 stackmap
DATA gclocals_goeq<>+0x04(SB)/4, $0 // 0 locals
GLOBL gclocals_goeq<>(SB),RODATA,$8
......@@ -10,5 +10,5 @@
#include "malloc.h"
#include "type.h"
#include "race.h"
#include "hashmap.h"
#include "chan.h"
#include "mprof.h"
......@@ -80,7 +80,6 @@ var BytesHash = bytesHash
var Int32Hash = int32Hash
var Int64Hash = int64Hash
var hashLoad float64 // declared in hashmap.c
var HashLoad = &hashLoad
func memclrBytes(b []byte)
......
This diff is collapsed.
This diff is collapsed.
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file contains the implementation of Go's map type.
//
// The map is just a hash table. The data is arranged
// into an array of buckets. Each bucket contains up to
// 8 key/value pairs. The low-order bits of the hash are
// used to select a bucket. Each bucket contains a few
// high-order bits of each hash to distinguish the entries
// within a single bucket.
//
// If more than 8 keys hash to a bucket, we chain on
// extra buckets.
//
// When the hashtable grows, we allocate a new array
// of buckets twice as big. Buckets are incrementally
// copied from the old bucket array to the new bucket array.
//
// Map iterators walk through the array of buckets and
// return the keys in walk order (bucket #, then overflow
// chain order, then bucket index). To maintain iteration
// semantics, we never move keys within their bucket (if
// we did, keys might be returned 0 or 2 times). When
// growing the table, iterators remain iterating through the
// old table and must check the new table if the bucket
// they are iterating through has been moved ("evacuated")
// to the new table.
// Maximum number of key/value pairs a bucket can hold.
#define BUCKETSIZE 8
// Maximum average load of a bucket that triggers growth.
#define LOAD 6.5
// Picking LOAD: too large and we have lots of overflow
// buckets, too small and we waste a lot of space. I wrote
// a simple program to check some stats for different loads:
// (64-bit, 8 byte keys and values)
// LOAD %overflow bytes/entry hitprobe missprobe
// 4.00 2.13 20.77 3.00 4.00
// 4.50 4.05 17.30 3.25 4.50
// 5.00 6.85 14.77 3.50 5.00
// 5.50 10.55 12.94 3.75 5.50
// 6.00 15.27 11.67 4.00 6.00
// 6.50 20.90 10.79 4.25 6.50
// 7.00 27.14 10.15 4.50 7.00
// 7.50 34.03 9.73 4.75 7.50
// 8.00 41.10 9.40 5.00 8.00
//
// %overflow = percentage of buckets which have an overflow bucket
// bytes/entry = overhead bytes used per key/value pair
// hitprobe = # of entries to check when looking up a present key
// missprobe = # of entries to check when looking up an absent key
//
// Keep in mind this data is for maximally loaded tables, i.e. just
// before the table grows. Typical tables will be somewhat less loaded.
// Maximum key or value size to keep inline (instead of mallocing per element).
// Must fit in a uint8.
// Fast versions cannot handle big values - the cutoff size for
// fast versions in ../../cmd/gc/walk.c must be at most this value.
#define MAXKEYSIZE 128
#define MAXVALUESIZE 128
typedef struct Bucket Bucket;
struct Bucket
{
// Note: the format of the Bucket is encoded in ../../cmd/gc/reflect.c and
// ../reflect/type.go. Don't change this structure without also changing that code!
uint8 tophash[BUCKETSIZE]; // top 8 bits of hash of each entry (or special mark below)
Bucket *overflow; // overflow bucket, if any
uint64 data[1]; // BUCKETSIZE keys followed by BUCKETSIZE values
};
// NOTE: packing all the keys together and then all the values together makes the
// code a bit more complicated than alternating key/value/key/value/... but it allows
// us to eliminate padding which would be needed for, e.g., map[int64]int8.
// tophash values. We reserve a few possibilities for special marks.
// Each bucket (including its overflow buckets, if any) will have either all or none of its
// entries in the Evacuated* states (except during the evacuate() method, which only happens
// during map writes and thus no one else can observe the map during that time).
enum
{
Empty = 0, // cell is empty
EvacuatedEmpty = 1, // cell is empty, bucket is evacuated.
EvacuatedX = 2, // key/value is valid. Entry has been evacuated to first half of larger table.
EvacuatedY = 3, // same as above, but evacuated to second half of larger table.
MinTopHash = 4, // minimum tophash for a normal filled cell.
};
#define evacuated(b) ((b)->tophash[0] > Empty && (b)->tophash[0] < MinTopHash)
struct Hmap
{
// Note: the format of the Hmap is encoded in ../../cmd/gc/reflect.c and
// ../reflect/type.go. Don't change this structure without also changing that code!
uintgo count; // # live cells == size of map. Must be first (used by len() builtin)
uint32 flags;
uint32 hash0; // hash seed
uint8 B; // log_2 of # of buckets (can hold up to LOAD * 2^B items)
uint8 keysize; // key size in bytes
uint8 valuesize; // value size in bytes
uint16 bucketsize; // bucket size in bytes
byte *buckets; // array of 2^B Buckets. may be nil if count==0.
byte *oldbuckets; // previous bucket array of half the size, non-nil only when growing
uintptr nevacuate; // progress counter for evacuation (buckets less than this have been evacuated)
};
// possible flags
enum
{
IndirectKey = 1, // storing pointers to keys
IndirectValue = 2, // storing pointers to values
Iterator = 4, // there may be an iterator using buckets
OldIterator = 8, // there may be an iterator using oldbuckets
};
// Macros for dereferencing indirect keys
#define IK(h, p) (((h)->flags & IndirectKey) != 0 ? *(byte**)(p) : (p))
#define IV(h, p) (((h)->flags & IndirectValue) != 0 ? *(byte**)(p) : (p))
// If you modify Hiter, also change cmd/gc/reflect.c to indicate
// the layout of this structure.
struct Hiter
{
uint8* key; // Must be in first position. Write nil to indicate iteration end (see cmd/gc/range.c).
uint8* value; // Must be in second position (see cmd/gc/range.c).
MapType *t;
Hmap *h;
byte *buckets; // bucket ptr at hash_iter initialization time
struct Bucket *bptr; // current bucket
uint8 offset; // intra-bucket offset to start from during iteration (should be big enough to hold BUCKETSIZE-1)
bool done;
// state of table at time iterator is initialized
uint8 B;
// iter state
uintptr bucket;
uintptr i;
intptr check_bucket;
};
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Fast hashmap lookup specialized to a specific key type.
// Included by hashmap.c once for each specialized type.
// +build ignore
// Because this file is #included, it cannot be processed by goc2c,
// so we have to handle the Go resuts ourselves.
#pragma textflag NOSPLIT
void
HASH_LOOKUP1(MapType *t, Hmap *h, KEYTYPE key, GoOutput base, ...)
{
uintptr bucket, i;
Bucket *b;
KEYTYPE *k;
byte *v, **valueptr;
uint8 top;
int8 keymaybe;
valueptr = (byte**)&base;
if(debug) {
runtime·prints("runtime.mapaccess1_fastXXX: map=");
runtime·printpointer(h);
runtime·prints("; key=");
t->key->alg->print(t->key->size, &key);
runtime·prints("\n");
}
if(h == nil || h->count == 0) {
*valueptr = t->elem->zero;
return;
}
if(raceenabled)
runtime·racereadpc(h, runtime·getcallerpc(&t), HASH_LOOKUP1);
if(docheck)
check(t, h);
if(h->B == 0) {
// One-bucket table. Don't hash, just check each bucket entry.
b = (Bucket*)h->buckets;
if(FASTKEY(key)) {
for(i = 0, k = (KEYTYPE*)b->data, v = (byte*)(k + BUCKETSIZE); i < BUCKETSIZE; i++, k++, v += h->valuesize) {
if(b->tophash[i] == Empty)
continue;
if(QUICK_NE(key, *k))
continue;
if(QUICK_EQ(key, *k) || SLOW_EQ(key, *k)) {
*valueptr = v;
return;
}
}
} else {
keymaybe = -1;
for(i = 0, k = (KEYTYPE*)b->data, v = (byte*)(k + BUCKETSIZE); i < BUCKETSIZE; i++, k++, v += h->valuesize) {
if(b->tophash[i] == Empty)
continue;
if(QUICK_NE(key, *k))
continue;
if(QUICK_EQ(key, *k)) {
*valueptr = v;
return;
}
if(MAYBE_EQ(key, *k)) {
if(keymaybe >= 0) {
// Two same-length strings in this bucket.
// use slow path.
// TODO: keep track of more than just 1. We could
// afford about 3 equals calls before it would be more
// expensive than 1 hash + 1 equals.
goto dohash;
}
keymaybe = i;
}
}
if(keymaybe >= 0) {
k = (KEYTYPE*)b->data + keymaybe;
if(SLOW_EQ(key, *k)) {
*valueptr = (byte*)((KEYTYPE*)b->data + BUCKETSIZE) + keymaybe * h->valuesize;
return;
}
}
}
} else {
dohash:
bucket = h->hash0;
HASHFUNC(&bucket, sizeof(KEYTYPE), &key);
top = bucket >> (sizeof(uintptr)*8 - 8);
if(top < MinTopHash)
top += MinTopHash;
bucket &= (((uintptr)1 << h->B) - 1);
if(h->oldbuckets != nil) {
i = bucket & (((uintptr)1 << (h->B - 1)) - 1);
b = (Bucket*)(h->oldbuckets + i * h->bucketsize);
if(evacuated(b)) {
b = (Bucket*)(h->buckets + bucket * h->bucketsize);
}
} else {
b = (Bucket*)(h->buckets + bucket * h->bucketsize);
}
do {
for(i = 0, k = (KEYTYPE*)b->data, v = (byte*)(k + BUCKETSIZE); i < BUCKETSIZE; i++, k++, v += h->valuesize) {
if(b->tophash[i] != top)
continue;
if(QUICK_NE(key, *k))
continue;
if(QUICK_EQ(key, *k) || SLOW_EQ(key, *k)) {
*valueptr = v;
return;
}
}
b = b->overflow;
} while(b != nil);
}
*valueptr = t->elem->zero;
}
#pragma textflag NOSPLIT
void
HASH_LOOKUP2(MapType *t, Hmap *h, KEYTYPE key, GoOutput base, ...)
{
uintptr bucket, i;
Bucket *b;
KEYTYPE *k;
byte *v, **valueptr;
uint8 top;
int8 keymaybe;
bool *okptr;
valueptr = (byte**)&base;
okptr = (bool*)(valueptr+1);
if(debug) {
runtime·prints("runtime.mapaccess2_fastXXX: map=");
runtime·printpointer(h);
runtime·prints("; key=");
t->key->alg->print(t->key->size, &key);
runtime·prints("\n");
}
if(h == nil || h->count == 0) {
*valueptr = t->elem->zero;
*okptr = false;
return;
}
if(raceenabled)
runtime·racereadpc(h, runtime·getcallerpc(&t), HASH_LOOKUP2);
if(docheck)
check(t, h);
if(h->B == 0) {
// One-bucket table. Don't hash, just check each bucket entry.
b = (Bucket*)h->buckets;
if(FASTKEY(key)) {
for(i = 0, k = (KEYTYPE*)b->data, v = (byte*)(k + BUCKETSIZE); i < BUCKETSIZE; i++, k++, v += h->valuesize) {
if(b->tophash[i] == Empty)
continue;
if(QUICK_NE(key, *k))
continue;
if(QUICK_EQ(key, *k) || SLOW_EQ(key, *k)) {
*valueptr = v;
*okptr = true;
return;
}
}
} else {
keymaybe = -1;
for(i = 0, k = (KEYTYPE*)b->data, v = (byte*)(k + BUCKETSIZE); i < BUCKETSIZE; i++, k++, v += h->valuesize) {
if(b->tophash[i] == Empty)
continue;
if(QUICK_NE(key, *k))
continue;
if(QUICK_EQ(key, *k)) {
*valueptr = v;
*okptr = true;
return;
}
if(MAYBE_EQ(key, *k)) {
if(keymaybe >= 0) {
// Two same-length strings in this bucket.
// use slow path.
// TODO: keep track of more than just 1. We could
// afford about 3 equals calls before it would be more
// expensive than 1 hash + 1 equals.
goto dohash;
}
keymaybe = i;
}
}
if(keymaybe >= 0) {
k = (KEYTYPE*)b->data + keymaybe;
if(SLOW_EQ(key, *k)) {
*valueptr = (byte*)((KEYTYPE*)b->data + BUCKETSIZE) + keymaybe * h->valuesize;
*okptr = true;
return;
}
}
}
} else {
dohash:
bucket = h->hash0;
HASHFUNC(&bucket, sizeof(KEYTYPE), &key);
top = bucket >> (sizeof(uintptr)*8 - 8);
if(top < MinTopHash)
top += MinTopHash;
bucket &= (((uintptr)1 << h->B) - 1);
if(h->oldbuckets != nil) {
i = bucket & (((uintptr)1 << (h->B - 1)) - 1);
b = (Bucket*)(h->oldbuckets + i * h->bucketsize);
if(evacuated(b)) {
b = (Bucket*)(h->buckets + bucket * h->bucketsize);
}
} else {
b = (Bucket*)(h->buckets + bucket * h->bucketsize);
}
do {
for(i = 0, k = (KEYTYPE*)b->data, v = (byte*)(k + BUCKETSIZE); i < BUCKETSIZE; i++, k++, v += h->valuesize) {
if(b->tophash[i] != top)
continue;
if(QUICK_NE(key, *k))
continue;
if(QUICK_EQ(key, *k) || SLOW_EQ(key, *k)) {
*valueptr = v;
*okptr = true;
return;
}
}
b = b->overflow;
} while(b != nil);
}
*valueptr = t->elem->zero;
*okptr = false;
}
This diff is collapsed.
......@@ -398,7 +398,7 @@ struct SpecialFinalizer
};
// The described object is being heap profiled.
typedef struct Bucket Bucket; // from mprof.goc
typedef struct Bucket Bucket; // from mprof.h
typedef struct SpecialProfile SpecialProfile;
struct SpecialProfile
{
......
......@@ -29,7 +29,7 @@
TEXT runtime·memmove(SB), NOSPLIT, $0-12
MOVL to+0(FP), DI
MOVL fr+4(FP), SI
MOVL from+4(FP), SI
MOVL n+8(FP), BX
// REP instructions have a high startup cost, so we handle small sizes
......
......@@ -31,7 +31,7 @@
TEXT runtime·memmove(SB), NOSPLIT, $0-24
MOVQ to+0(FP), DI
MOVQ fr+8(FP), SI
MOVQ from+8(FP), SI
MOVQ n+16(FP), BX
// REP instructions have a high startup cost, so we handle small sizes
......
......@@ -9,6 +9,7 @@ package runtime
#include "runtime.h"
#include "arch_GOARCH.h"
#include "malloc.h"
#include "mprof.h"
#include "defs_GOOS_GOARCH.h"
#include "type.h"
......@@ -20,58 +21,6 @@ static Lock proflock;
enum { MProf, BProf }; // profile types
// Per-call-stack profiling information.
// Lookup by hashing call stack into a linked-list hash table.
struct Bucket
{
Bucket *next; // next in hash list
Bucket *allnext; // next in list of all mbuckets/bbuckets
int32 typ;
// Generally unions can break precise GC,
// this one is fine because it does not contain pointers.
union
{
struct // typ == MProf
{
// The following complex 3-stage scheme of stats accumulation
// is required to obtain a consistent picture of mallocs and frees
// for some point in time.
// The problem is that mallocs come in real time, while frees
// come only after a GC during concurrent sweeping. So if we would
// naively count them, we would get a skew toward mallocs.
//
// Mallocs are accounted in recent stats.
// Explicit frees are accounted in recent stats.
// GC frees are accounted in prev stats.
// After GC prev stats are added to final stats and
// recent stats are moved into prev stats.
uintptr allocs;
uintptr frees;
uintptr alloc_bytes;
uintptr free_bytes;
uintptr prev_allocs; // since last but one till last gc
uintptr prev_frees;
uintptr prev_alloc_bytes;
uintptr prev_free_bytes;
uintptr recent_allocs; // since last gc till now
uintptr recent_frees;
uintptr recent_alloc_bytes;
uintptr recent_free_bytes;
};
struct // typ == BProf
{
int64 count;
int64 cycles;
};
};
uintptr hash; // hash of size + stk
uintptr size;
uintptr nstk;
uintptr stk[1];
};
enum {
BuckHashSize = 179999,
};
......
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Per-call-stack profiling information.
// Lookup by hashing call stack into a linked-list hash table.
struct Bucket
{
Bucket *next; // next in hash list
Bucket *allnext; // next in list of all mbuckets/bbuckets
int32 typ;
// Generally unions can break precise GC,
// this one is fine because it does not contain pointers.
union
{
struct // typ == MProf
{
// The following complex 3-stage scheme of stats accumulation
// is required to obtain a consistent picture of mallocs and frees
// for some point in time.
// The problem is that mallocs come in real time, while frees
// come only after a GC during concurrent sweeping. So if we would
// naively count them, we would get a skew toward mallocs.
//
// Mallocs are accounted in recent stats.
// Explicit frees are accounted in recent stats.
// GC frees are accounted in prev stats.
// After GC prev stats are added to final stats and
// recent stats are moved into prev stats.
uintptr allocs;
uintptr frees;
uintptr alloc_bytes;
uintptr free_bytes;
uintptr prev_allocs; // since last but one till last gc
uintptr prev_frees;
uintptr prev_alloc_bytes;
uintptr prev_free_bytes;
uintptr recent_allocs; // since last gc till now
uintptr recent_frees;
uintptr recent_alloc_bytes;
uintptr recent_free_bytes;
};
struct // typ == BProf
{
int64 count;
int64 cycles;
};
};
uintptr hash; // hash of size + stk
uintptr size;
uintptr nstk;
uintptr stk[1];
};
......@@ -12,6 +12,13 @@ import (
"unsafe"
)
const (
// TODO: where should these live?
kindNoPointers = 1 << 7
kindArray = 17
kindStruct = 25
)
// RaceDisable disables handling of race events in the current goroutine.
func RaceDisable()
......@@ -32,3 +39,16 @@ func RaceSemrelease(s *uint32)
// private interface for the runtime
const raceenabled = true
func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
kind := t.kind &^ kindNoPointers
if kind == kindArray || kind == kindStruct {
// for composite objects we have to read every address
// because a write might happen to any subobject.
racereadrangepc(addr, int(t.size), callerpc, pc)
} else {
// for non-composite objects we can read just the start
// address, as any write must write the first byte.
racereadpc(addr, callerpc, pc)
}
}
......@@ -8,4 +8,11 @@
package runtime
import (
"unsafe"
)
const raceenabled = false
func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
}
......@@ -144,7 +144,7 @@ func slicerunetostring(a []rune) string {
}
type stringStruct struct {
str *byte
str unsafe.Pointer
len int
}
......@@ -156,7 +156,7 @@ func cstringToGo(str uintptr) (s string) {
}
}
t := (*stringStruct)(unsafe.Pointer(&s))
t.str = (*byte)(unsafe.Pointer(str))
t.str = unsafe.Pointer(str)
t.len = i
return
}
......
......@@ -11,6 +11,10 @@ import "unsafe"
// Assembly implementations are in various files, see comments with
// each function.
const (
ptrSize = unsafe.Sizeof((*byte)(nil))
)
// rawstring allocates storage for a new string. The returned
// string and byte slice both refer to the same storage.
// The storage is not zeroed. Callers should use
......@@ -26,5 +30,55 @@ func rawruneslice(size int) []rune
//go:noescape
func gogetcallerpc(p unsafe.Pointer) uintptr
//go:noescape
func racereadpc(addr unsafe.Pointer, callpc, pc uintptr)
//go:noescape
func racewritepc(addr unsafe.Pointer, callpc, pc uintptr)
//go:noescape
func racereadrangepc(addr unsafe.Pointer, len int, callpc, pc uintptr)
// Should be a built-in for unsafe.Pointer?
func add(p unsafe.Pointer, x uintptr) unsafe.Pointer {
return unsafe.Pointer(uintptr(p) + x)
}
// Make a new object of the given type
// in stubs.goc
func unsafe_New(t *_type) unsafe.Pointer
func unsafe_NewArray(t *_type, n uintptr) unsafe.Pointer
// memclr clears n bytes starting at ptr.
// in memclr_*.s
func memclr(ptr unsafe.Pointer, n uintptr)
// memmove copies n bytes from "from" to "to".
// in memmove_*.s
func memmove(to unsafe.Pointer, from unsafe.Pointer, n uintptr)
// in asm_*.s
func fastrand2() uint32
// in asm_*.s
// if *p == x { *p = y; return true } else { return false }, atomically
//go:noescape
func gocas(p *uint32, x uint32, y uint32) bool
// in asm_*.s
//go:noescape
func gohash(a *alg, p unsafe.Pointer, size uintptr, seed uintptr) uintptr
// in asm_*.s
//go:noescape
func goeq(alg *alg, p, q unsafe.Pointer, size uintptr) bool
// exported value for testing
var hashLoad = loadFactor
// in asm_*.s
//go:noescape
func gomemeq(a, b unsafe.Pointer, size uintptr) bool
// Code pointer for the nohash algorithm. Used for producing better error messages.
var nohashcode uintptr
......@@ -75,3 +75,18 @@ func rawruneslice(size intgo) (b Slice) {
func gostringW(str Slice) (s String) {
s = runtime·gostringw((uint16*)str.array);
}
#pragma textflag NOSPLIT
func runtime·unsafe_New(t *Type) (ret *byte) {
ret = runtime·cnew(t);
}
#pragma textflag NOSPLIT
func runtime·unsafe_NewArray(t *Type, n int) (ret *byte) {
ret = runtime·cnewarray(t, n);
}
#pragma textflag NOSPLIT
func runtime·gocas(p *uint32, x uint32, y uint32) (ret bool) {
ret = runtime·cas(p, x, y);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment