Commit 36f30ba2 authored by Keith Randall's avatar Keith Randall Committed by Keith Randall

cmd/compile,runtime: generate hash functions only for types which are map keys

Right now we generate hash functions for all types, just in case they
are used as map keys. That's a lot of wasted effort and binary size
for types which will never be used as a map key. Instead, generate
hash functions only for types that we know are map keys.

Just doing that is a bit too simple, since maps with an interface type
as a key might have to hash any concrete key type that implements that
interface. So for that case, implement hashing of such types at
runtime (instead of with generated code). It will be slower, but only
for maps with interface types as keys, and maybe only a bit slower as
the aeshash time probably dominates the dispatch time.

Reorg where we keep the equals and hash functions. Move the hash function
from the key type to the map type, saving a field in every non-map type.
That leaves only one function in the alg structure, so get rid of that and
just keep the equal function in the type descriptor itself.

cmd/go now has 10 generated hash functions, instead of 504. Makes
cmd/go 1.0% smaller. Update #6853.

Speed on non-interface keys is unchanged. Speed on interface keys
is ~20% slower:

name                  old time/op  new time/op  delta
MapInterfaceString-8  23.0ns ±21%  27.6ns ±14%  +20.01%  (p=0.002 n=10+10)
MapInterfacePtr-8     19.4ns ±16%  23.7ns ± 7%  +22.48%   (p=0.000 n=10+8)

Change-Id: I7c2e42292a46b5d4e288aaec4029bdbb01089263
Reviewed-on: https://go-review.googlesource.com/c/go/+/191198
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarMartin Möhrmann <moehrmann@google.com>
parent 671bcb59
...@@ -6,6 +6,7 @@ package gc ...@@ -6,6 +6,7 @@ package gc
import ( import (
"cmd/compile/internal/types" "cmd/compile/internal/types"
"cmd/internal/obj"
"fmt" "fmt"
) )
...@@ -183,10 +184,82 @@ func algtype1(t *types.Type) (AlgKind, *types.Type) { ...@@ -183,10 +184,82 @@ func algtype1(t *types.Type) (AlgKind, *types.Type) {
return 0, nil return 0, nil
} }
// Generate a helper function to compute the hash of a value of type t. // genhash returns a symbol which is the closure used to compute
func genhash(sym *types.Sym, t *types.Type) { // the hash of a value of type t.
func genhash(t *types.Type) *obj.LSym {
switch algtype(t) {
default:
// genhash is only called for types that have equality
Fatalf("genhash %v", t)
case AMEM0:
return sysClosure("memhash0")
case AMEM8:
return sysClosure("memhash8")
case AMEM16:
return sysClosure("memhash16")
case AMEM32:
return sysClosure("memhash32")
case AMEM64:
return sysClosure("memhash64")
case AMEM128:
return sysClosure("memhash128")
case ASTRING:
return sysClosure("strhash")
case AINTER:
return sysClosure("interhash")
case ANILINTER:
return sysClosure("nilinterhash")
case AFLOAT32:
return sysClosure("f32hash")
case AFLOAT64:
return sysClosure("f64hash")
case ACPLX64:
return sysClosure("c64hash")
case ACPLX128:
return sysClosure("c128hash")
case AMEM:
// For other sizes of plain memory, we build a closure
// that calls memhash_varlen. The size of the memory is
// encoded in the first slot of the closure.
closure := typeLookup(fmt.Sprintf(".hashfunc%d", t.Width)).Linksym()
if len(closure.P) > 0 { // already generated
return closure
}
if memhashvarlen == nil {
memhashvarlen = sysfunc("memhash_varlen")
}
ot := 0
ot = dsymptr(closure, ot, memhashvarlen, 0)
ot = duintptr(closure, ot, uint64(t.Width)) // size encoded in closure
ggloblsym(closure, int32(ot), obj.DUPOK|obj.RODATA)
return closure
case ASPECIAL:
break
}
closure := typesymprefix(".hashfunc", t).Linksym()
if len(closure.P) > 0 { // already generated
return closure
}
// Generate hash functions for subtypes.
// There are cases where we might not use these hashes,
// but in that case they will get dead-code eliminated.
// (And the closure generated by genhash will also get
// dead-code eliminated, as we call the subtype hashers
// directly.)
switch t.Etype {
case types.TARRAY:
genhash(t.Elem())
case types.TSTRUCT:
for _, f := range t.FieldSlice() {
genhash(f.Type)
}
}
sym := typesymprefix(".hash", t)
if Debug['r'] != 0 { if Debug['r'] != 0 {
fmt.Printf("genhash %v %v\n", sym, t) fmt.Printf("genhash %v %v %v\n", closure, sym, t)
} }
lineno = autogeneratedPos // less confusing than end of input lineno = autogeneratedPos // less confusing than end of input
...@@ -204,13 +277,7 @@ func genhash(sym *types.Sym, t *types.Type) { ...@@ -204,13 +277,7 @@ func genhash(sym *types.Sym, t *types.Type) {
np := asNode(tfn.Type.Params().Field(0).Nname) np := asNode(tfn.Type.Params().Field(0).Nname)
nh := asNode(tfn.Type.Params().Field(1).Nname) nh := asNode(tfn.Type.Params().Field(1).Nname)
// genhash is only called for types that have equality but
// cannot be handled by the standard algorithms,
// so t must be either an array or a struct.
switch t.Etype { switch t.Etype {
default:
Fatalf("genhash %v", t)
case types.TARRAY: case types.TARRAY:
// An array of pure memory would be handled by the // An array of pure memory would be handled by the
// standard algorithm, so the element type must not be // standard algorithm, so the element type must not be
...@@ -302,6 +369,13 @@ func genhash(sym *types.Sym, t *types.Type) { ...@@ -302,6 +369,13 @@ func genhash(sym *types.Sym, t *types.Type) {
fn.Func.SetNilCheckDisabled(true) fn.Func.SetNilCheckDisabled(true)
funccompile(fn) funccompile(fn)
// Build closure. It doesn't close over any variables, so
// it contains just the function pointer.
dsymptr(closure, 0, sym.Linksym(), 0)
ggloblsym(closure, int32(Widthptr), obj.DUPOK|obj.RODATA)
return closure
} }
func hashfor(t *types.Type) *Node { func hashfor(t *types.Type) *Node {
...@@ -325,6 +399,8 @@ func hashfor(t *types.Type) *Node { ...@@ -325,6 +399,8 @@ func hashfor(t *types.Type) *Node {
case ACPLX128: case ACPLX128:
sym = Runtimepkg.Lookup("c128hash") sym = Runtimepkg.Lookup("c128hash")
default: default:
// Note: the caller of hashfor ensured that this symbol
// exists and has a body by calling genhash for t.
sym = typesymprefix(".hash", t) sym = typesymprefix(".hash", t)
} }
...@@ -340,13 +416,82 @@ func hashfor(t *types.Type) *Node { ...@@ -340,13 +416,82 @@ func hashfor(t *types.Type) *Node {
return n return n
} }
// geneq generates a helper function to // sysClosure returns a closure which will call the
// check equality of two values of type t. // given runtime function (with no closed-over variables).
func geneq(sym *types.Sym, t *types.Type) { func sysClosure(name string) *obj.LSym {
s := sysvar(name + "·f")
if len(s.P) == 0 {
f := sysfunc(name)
dsymptr(s, 0, f, 0)
ggloblsym(s, int32(Widthptr), obj.DUPOK|obj.RODATA)
}
return s
}
// geneq returns a symbol which is the closure used to compute
// equality for two objects of type t.
func geneq(t *types.Type) *obj.LSym {
switch algtype(t) {
case ANOEQ:
// The runtime will panic if it tries to compare
// a type with a nil equality function.
return nil
case AMEM0:
return sysClosure("memequal0")
case AMEM8:
return sysClosure("memequal8")
case AMEM16:
return sysClosure("memequal16")
case AMEM32:
return sysClosure("memequal32")
case AMEM64:
return sysClosure("memequal64")
case AMEM128:
return sysClosure("memequal128")
case ASTRING:
return sysClosure("strequal")
case AINTER:
return sysClosure("interequal")
case ANILINTER:
return sysClosure("nilinterequal")
case AFLOAT32:
return sysClosure("f32equal")
case AFLOAT64:
return sysClosure("f64equal")
case ACPLX64:
return sysClosure("c64equal")
case ACPLX128:
return sysClosure("c128equal")
case AMEM:
// make equality closure. The size of the type
// is encoded in the closure.
closure := typeLookup(fmt.Sprintf(".eqfunc%d", t.Width)).Linksym()
if len(closure.P) != 0 {
return closure
}
if memequalvarlen == nil {
memequalvarlen = sysvar("memequal_varlen") // asm func
}
ot := 0
ot = dsymptr(closure, ot, memequalvarlen, 0)
ot = duintptr(closure, ot, uint64(t.Width))
ggloblsym(closure, int32(ot), obj.DUPOK|obj.RODATA)
return closure
case ASPECIAL:
break
}
closure := typesymprefix(".eqfunc", t).Linksym()
if len(closure.P) > 0 { // already generated
return closure
}
sym := typesymprefix(".eq", t)
if Debug['r'] != 0 { if Debug['r'] != 0 {
fmt.Printf("geneq %v %v\n", sym, t) fmt.Printf("geneq %v\n", t)
} }
// Autogenerate code for equality of structs and arrays.
lineno = autogeneratedPos // less confusing than end of input lineno = autogeneratedPos // less confusing than end of input
dclcontext = PEXTERN dclcontext = PEXTERN
...@@ -362,7 +507,7 @@ func geneq(sym *types.Sym, t *types.Type) { ...@@ -362,7 +507,7 @@ func geneq(sym *types.Sym, t *types.Type) {
np := asNode(tfn.Type.Params().Field(0).Nname) np := asNode(tfn.Type.Params().Field(0).Nname)
nq := asNode(tfn.Type.Params().Field(1).Nname) nq := asNode(tfn.Type.Params().Field(1).Nname)
// geneq is only called for types that have equality but // We reach here only for types that have equality but
// cannot be handled by the standard algorithms, // cannot be handled by the standard algorithms,
// so t must be either an array or a struct. // so t must be either an array or a struct.
switch t.Etype { switch t.Etype {
...@@ -481,6 +626,11 @@ func geneq(sym *types.Sym, t *types.Type) { ...@@ -481,6 +626,11 @@ func geneq(sym *types.Sym, t *types.Type) {
// are shallow. // are shallow.
fn.Func.SetNilCheckDisabled(true) fn.Func.SetNilCheckDisabled(true)
funccompile(fn) funccompile(fn)
// Generate a closure which points at the function we just generated.
dsymptr(closure, 0, sym.Linksym(), 0)
ggloblsym(closure, int32(Widthptr), obj.DUPOK|obj.RODATA)
return closure
} }
// eqfield returns the node // eqfield returns the node
......
...@@ -134,38 +134,60 @@ var runtimeDecls = [...]struct { ...@@ -134,38 +134,60 @@ var runtimeDecls = [...]struct {
{"memclrNoHeapPointers", funcTag, 103}, {"memclrNoHeapPointers", funcTag, 103},
{"memclrHasPointers", funcTag, 103}, {"memclrHasPointers", funcTag, 103},
{"memequal", funcTag, 104}, {"memequal", funcTag, 104},
{"memequal0", funcTag, 105},
{"memequal8", funcTag, 105}, {"memequal8", funcTag, 105},
{"memequal16", funcTag, 105}, {"memequal16", funcTag, 105},
{"memequal32", funcTag, 105}, {"memequal32", funcTag, 105},
{"memequal64", funcTag, 105}, {"memequal64", funcTag, 105},
{"memequal128", funcTag, 105}, {"memequal128", funcTag, 105},
{"int64div", funcTag, 106}, {"f32equal", funcTag, 106},
{"uint64div", funcTag, 107}, {"f64equal", funcTag, 106},
{"int64mod", funcTag, 106}, {"c64equal", funcTag, 106},
{"uint64mod", funcTag, 107}, {"c128equal", funcTag, 106},
{"float64toint64", funcTag, 108}, {"strequal", funcTag, 106},
{"float64touint64", funcTag, 109}, {"interequal", funcTag, 106},
{"float64touint32", funcTag, 110}, {"nilinterequal", funcTag, 106},
{"int64tofloat64", funcTag, 111}, {"memhash", funcTag, 107},
{"uint64tofloat64", funcTag, 112}, {"memhash0", funcTag, 108},
{"uint32tofloat64", funcTag, 113}, {"memhash8", funcTag, 108},
{"complex128div", funcTag, 114}, {"memhash16", funcTag, 108},
{"racefuncenter", funcTag, 115}, {"memhash32", funcTag, 108},
{"memhash64", funcTag, 108},
{"memhash128", funcTag, 108},
{"f32hash", funcTag, 108},
{"f64hash", funcTag, 108},
{"c64hash", funcTag, 108},
{"c128hash", funcTag, 108},
{"strhash", funcTag, 108},
{"interhash", funcTag, 108},
{"nilinterhash", funcTag, 108},
{"int64div", funcTag, 109},
{"uint64div", funcTag, 110},
{"int64mod", funcTag, 109},
{"uint64mod", funcTag, 110},
{"float64toint64", funcTag, 111},
{"float64touint64", funcTag, 112},
{"float64touint32", funcTag, 113},
{"int64tofloat64", funcTag, 114},
{"uint64tofloat64", funcTag, 115},
{"uint32tofloat64", funcTag, 116},
{"complex128div", funcTag, 117},
{"racefuncenter", funcTag, 118},
{"racefuncenterfp", funcTag, 5}, {"racefuncenterfp", funcTag, 5},
{"racefuncexit", funcTag, 5}, {"racefuncexit", funcTag, 5},
{"raceread", funcTag, 115}, {"raceread", funcTag, 118},
{"racewrite", funcTag, 115}, {"racewrite", funcTag, 118},
{"racereadrange", funcTag, 116}, {"racereadrange", funcTag, 119},
{"racewriterange", funcTag, 116}, {"racewriterange", funcTag, 119},
{"msanread", funcTag, 116}, {"msanread", funcTag, 119},
{"msanwrite", funcTag, 116}, {"msanwrite", funcTag, 119},
{"x86HasPOPCNT", varTag, 15}, {"x86HasPOPCNT", varTag, 15},
{"x86HasSSE41", varTag, 15}, {"x86HasSSE41", varTag, 15},
{"arm64HasATOMICS", varTag, 15}, {"arm64HasATOMICS", varTag, 15},
} }
func runtimeTypes() []*types.Type { func runtimeTypes() []*types.Type {
var typs [117]*types.Type var typs [120]*types.Type
typs[0] = types.Bytetype typs[0] = types.Bytetype
typs[1] = types.NewPtr(typs[0]) typs[1] = types.NewPtr(typs[0])
typs[2] = types.Types[TANY] typs[2] = types.Types[TANY]
...@@ -272,16 +294,19 @@ func runtimeTypes() []*types.Type { ...@@ -272,16 +294,19 @@ func runtimeTypes() []*types.Type {
typs[103] = functype(nil, []*Node{anonfield(typs[56]), anonfield(typs[50])}, nil) typs[103] = functype(nil, []*Node{anonfield(typs[56]), anonfield(typs[50])}, nil)
typs[104] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[50])}, []*Node{anonfield(typs[15])}) typs[104] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[50])}, []*Node{anonfield(typs[15])})
typs[105] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3])}, []*Node{anonfield(typs[15])}) typs[105] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3])}, []*Node{anonfield(typs[15])})
typs[106] = functype(nil, []*Node{anonfield(typs[19]), anonfield(typs[19])}, []*Node{anonfield(typs[19])}) typs[106] = functype(nil, []*Node{anonfield(typs[56]), anonfield(typs[56])}, []*Node{anonfield(typs[15])})
typs[107] = functype(nil, []*Node{anonfield(typs[21]), anonfield(typs[21])}, []*Node{anonfield(typs[21])}) typs[107] = functype(nil, []*Node{anonfield(typs[56]), anonfield(typs[50]), anonfield(typs[50])}, []*Node{anonfield(typs[50])})
typs[108] = functype(nil, []*Node{anonfield(typs[17])}, []*Node{anonfield(typs[19])}) typs[108] = functype(nil, []*Node{anonfield(typs[56]), anonfield(typs[50])}, []*Node{anonfield(typs[50])})
typs[109] = functype(nil, []*Node{anonfield(typs[17])}, []*Node{anonfield(typs[21])}) typs[109] = functype(nil, []*Node{anonfield(typs[19]), anonfield(typs[19])}, []*Node{anonfield(typs[19])})
typs[110] = functype(nil, []*Node{anonfield(typs[17])}, []*Node{anonfield(typs[64])}) typs[110] = functype(nil, []*Node{anonfield(typs[21]), anonfield(typs[21])}, []*Node{anonfield(typs[21])})
typs[111] = functype(nil, []*Node{anonfield(typs[19])}, []*Node{anonfield(typs[17])}) typs[111] = functype(nil, []*Node{anonfield(typs[17])}, []*Node{anonfield(typs[19])})
typs[112] = functype(nil, []*Node{anonfield(typs[21])}, []*Node{anonfield(typs[17])}) typs[112] = functype(nil, []*Node{anonfield(typs[17])}, []*Node{anonfield(typs[21])})
typs[113] = functype(nil, []*Node{anonfield(typs[64])}, []*Node{anonfield(typs[17])}) typs[113] = functype(nil, []*Node{anonfield(typs[17])}, []*Node{anonfield(typs[64])})
typs[114] = functype(nil, []*Node{anonfield(typs[23]), anonfield(typs[23])}, []*Node{anonfield(typs[23])}) typs[114] = functype(nil, []*Node{anonfield(typs[19])}, []*Node{anonfield(typs[17])})
typs[115] = functype(nil, []*Node{anonfield(typs[50])}, nil) typs[115] = functype(nil, []*Node{anonfield(typs[21])}, []*Node{anonfield(typs[17])})
typs[116] = functype(nil, []*Node{anonfield(typs[50]), anonfield(typs[50])}, nil) typs[116] = functype(nil, []*Node{anonfield(typs[64])}, []*Node{anonfield(typs[17])})
typs[117] = functype(nil, []*Node{anonfield(typs[23]), anonfield(typs[23])}, []*Node{anonfield(typs[23])})
typs[118] = functype(nil, []*Node{anonfield(typs[50])}, nil)
typs[119] = functype(nil, []*Node{anonfield(typs[50]), anonfield(typs[50])}, nil)
return typs[:] return typs[:]
} }
...@@ -179,11 +179,34 @@ func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) ...@@ -179,11 +179,34 @@ func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)
func memclrHasPointers(ptr unsafe.Pointer, n uintptr) func memclrHasPointers(ptr unsafe.Pointer, n uintptr)
func memequal(x, y *any, size uintptr) bool func memequal(x, y *any, size uintptr) bool
func memequal0(x, y *any) bool
func memequal8(x, y *any) bool func memequal8(x, y *any) bool
func memequal16(x, y *any) bool func memequal16(x, y *any) bool
func memequal32(x, y *any) bool func memequal32(x, y *any) bool
func memequal64(x, y *any) bool func memequal64(x, y *any) bool
func memequal128(x, y *any) bool func memequal128(x, y *any) bool
func f32equal(p, q unsafe.Pointer) bool
func f64equal(p, q unsafe.Pointer) bool
func c64equal(p, q unsafe.Pointer) bool
func c128equal(p, q unsafe.Pointer) bool
func strequal(p, q unsafe.Pointer) bool
func interequal(p, q unsafe.Pointer) bool
func nilinterequal(p, q unsafe.Pointer) bool
func memhash(p unsafe.Pointer, h uintptr, size uintptr) uintptr
func memhash0(p unsafe.Pointer, h uintptr) uintptr
func memhash8(p unsafe.Pointer, h uintptr) uintptr
func memhash16(p unsafe.Pointer, h uintptr) uintptr
func memhash32(p unsafe.Pointer, h uintptr) uintptr
func memhash64(p unsafe.Pointer, h uintptr) uintptr
func memhash128(p unsafe.Pointer, h uintptr) uintptr
func f32hash(p unsafe.Pointer, h uintptr) uintptr
func f64hash(p unsafe.Pointer, h uintptr) uintptr
func c64hash(p unsafe.Pointer, h uintptr) uintptr
func c128hash(p unsafe.Pointer, h uintptr) uintptr
func strhash(a unsafe.Pointer, h uintptr) uintptr
func interhash(p unsafe.Pointer, h uintptr) uintptr
func nilinterhash(p unsafe.Pointer, h uintptr) uintptr
// only used on 32-bit // only used on 32-bit
func int64div(int64, int64) int64 func int64div(int64, int64) int64
......
...@@ -825,29 +825,21 @@ func typeptrdata(t *types.Type) int64 { ...@@ -825,29 +825,21 @@ func typeptrdata(t *types.Type) int64 {
// reflect/type.go // reflect/type.go
// runtime/type.go // runtime/type.go
const ( const (
tflagUncommon = 1 << 0 tflagUncommon = 1 << 0
tflagExtraStar = 1 << 1 tflagExtraStar = 1 << 1
tflagNamed = 1 << 2 tflagNamed = 1 << 2
tflagRegularMemory = 1 << 3
) )
var ( var (
algarray *obj.LSym
memhashvarlen *obj.LSym memhashvarlen *obj.LSym
memequalvarlen *obj.LSym memequalvarlen *obj.LSym
) )
// dcommontype dumps the contents of a reflect.rtype (runtime._type). // dcommontype dumps the contents of a reflect.rtype (runtime._type).
func dcommontype(lsym *obj.LSym, t *types.Type) int { func dcommontype(lsym *obj.LSym, t *types.Type) int {
sizeofAlg := 2 * Widthptr
if algarray == nil {
algarray = sysvar("algarray")
}
dowidth(t) dowidth(t)
alg := algtype(t) eqfunc := geneq(t)
var algsym *obj.LSym
if alg == ASPECIAL || alg == AMEM {
algsym = dalgsym(t)
}
sptrWeak := true sptrWeak := true
var sptr *obj.LSym var sptr *obj.LSym
...@@ -871,7 +863,7 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int { ...@@ -871,7 +863,7 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int {
// align uint8 // align uint8
// fieldAlign uint8 // fieldAlign uint8
// kind uint8 // kind uint8
// alg *typeAlg // equal func(unsafe.Pointer, unsafe.Pointer) bool
// gcdata *byte // gcdata *byte
// str nameOff // str nameOff
// ptrToThis typeOff // ptrToThis typeOff
...@@ -888,6 +880,9 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int { ...@@ -888,6 +880,9 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int {
if t.Sym != nil && t.Sym.Name != "" { if t.Sym != nil && t.Sym.Name != "" {
tflag |= tflagNamed tflag |= tflagNamed
} }
if IsRegularMemory(t) {
tflag |= tflagRegularMemory
}
exported := false exported := false
p := t.LongString() p := t.LongString()
...@@ -930,10 +925,10 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int { ...@@ -930,10 +925,10 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int {
i |= objabi.KindGCProg i |= objabi.KindGCProg
} }
ot = duint8(lsym, ot, uint8(i)) // kind ot = duint8(lsym, ot, uint8(i)) // kind
if algsym == nil { if eqfunc != nil {
ot = dsymptr(lsym, ot, algarray, int(alg)*sizeofAlg) ot = dsymptr(lsym, ot, eqfunc, 0) // equality function
} else { } else {
ot = dsymptr(lsym, ot, algsym, 0) ot = duintptr(lsym, ot, 0) // type we can't do == with
} }
ot = dsymptr(lsym, ot, gcsym, 0) // gcdata ot = dsymptr(lsym, ot, gcsym, 0) // gcdata
...@@ -1311,10 +1306,13 @@ func dtypesym(t *types.Type) *obj.LSym { ...@@ -1311,10 +1306,13 @@ func dtypesym(t *types.Type) *obj.LSym {
s1 := dtypesym(t.Key()) s1 := dtypesym(t.Key())
s2 := dtypesym(t.Elem()) s2 := dtypesym(t.Elem())
s3 := dtypesym(bmap(t)) s3 := dtypesym(bmap(t))
hasher := genhash(t.Key())
ot = dcommontype(lsym, t) ot = dcommontype(lsym, t)
ot = dsymptr(lsym, ot, s1, 0) ot = dsymptr(lsym, ot, s1, 0)
ot = dsymptr(lsym, ot, s2, 0) ot = dsymptr(lsym, ot, s2, 0)
ot = dsymptr(lsym, ot, s3, 0) ot = dsymptr(lsym, ot, s3, 0)
ot = dsymptr(lsym, ot, hasher, 0)
var flags uint32 var flags uint32
// Note: flags must match maptype accessors in ../../../../runtime/type.go // Note: flags must match maptype accessors in ../../../../runtime/type.go
// and maptype builder in ../../../../reflect/type.go:MapOf. // and maptype builder in ../../../../reflect/type.go:MapOf.
...@@ -1673,78 +1671,6 @@ func (a typesByString) Less(i, j int) bool { ...@@ -1673,78 +1671,6 @@ func (a typesByString) Less(i, j int) bool {
} }
func (a typesByString) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a typesByString) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func dalgsym(t *types.Type) *obj.LSym {
var lsym *obj.LSym
var hashfunc *obj.LSym
var eqfunc *obj.LSym
// dalgsym is only called for a type that needs an algorithm table,
// which implies that the type is comparable (or else it would use ANOEQ).
if algtype(t) == AMEM {
// we use one algorithm table for all AMEM types of a given size
p := fmt.Sprintf(".alg%d", t.Width)
s := typeLookup(p)
lsym = s.Linksym()
if s.AlgGen() {
return lsym
}
s.SetAlgGen(true)
if memhashvarlen == nil {
memhashvarlen = sysfunc("memhash_varlen")
memequalvarlen = sysvar("memequal_varlen") // asm func
}
// make hash closure
p = fmt.Sprintf(".hashfunc%d", t.Width)
hashfunc = typeLookup(p).Linksym()
ot := 0
ot = dsymptr(hashfunc, ot, memhashvarlen, 0)
ot = duintptr(hashfunc, ot, uint64(t.Width)) // size encoded in closure
ggloblsym(hashfunc, int32(ot), obj.DUPOK|obj.RODATA)
// make equality closure
p = fmt.Sprintf(".eqfunc%d", t.Width)
eqfunc = typeLookup(p).Linksym()
ot = 0
ot = dsymptr(eqfunc, ot, memequalvarlen, 0)
ot = duintptr(eqfunc, ot, uint64(t.Width))
ggloblsym(eqfunc, int32(ot), obj.DUPOK|obj.RODATA)
} else {
// generate an alg table specific to this type
s := typesymprefix(".alg", t)
lsym = s.Linksym()
hash := typesymprefix(".hash", t)
eq := typesymprefix(".eq", t)
hashfunc = typesymprefix(".hashfunc", t).Linksym()
eqfunc = typesymprefix(".eqfunc", t).Linksym()
genhash(hash, t)
geneq(eq, t)
// make Go funcs (closures) for calling hash and equal from Go
dsymptr(hashfunc, 0, hash.Linksym(), 0)
ggloblsym(hashfunc, int32(Widthptr), obj.DUPOK|obj.RODATA)
dsymptr(eqfunc, 0, eq.Linksym(), 0)
ggloblsym(eqfunc, int32(Widthptr), obj.DUPOK|obj.RODATA)
}
// ../../../../runtime/alg.go:/typeAlg
ot := 0
ot = dsymptr(lsym, ot, hashfunc, 0)
ot = dsymptr(lsym, ot, eqfunc, 0)
ggloblsym(lsym, int32(ot), obj.DUPOK|obj.RODATA)
return lsym
}
// maxPtrmaskBytes is the maximum length of a GC ptrmask bitmap, // maxPtrmaskBytes is the maximum length of a GC ptrmask bitmap,
// which holds 1-bit entries describing where pointers are in a given type. // which holds 1-bit entries describing where pointers are in a given type.
// Above this length, the GC information is recorded as a GC program, // Above this length, the GC information is recorded as a GC program,
......
...@@ -47,7 +47,6 @@ const ( ...@@ -47,7 +47,6 @@ const (
symUniq symUniq
symSiggen // type symbol has been generated symSiggen // type symbol has been generated
symAsm // on asmlist, for writing to -asmhdr symAsm // on asmlist, for writing to -asmhdr
symAlgGen // algorithm table has been generated
symFunc // function symbol; uses internal ABI symFunc // function symbol; uses internal ABI
) )
...@@ -55,14 +54,12 @@ func (sym *Sym) OnExportList() bool { return sym.flags&symOnExportList != 0 } ...@@ -55,14 +54,12 @@ func (sym *Sym) OnExportList() bool { return sym.flags&symOnExportList != 0 }
func (sym *Sym) Uniq() bool { return sym.flags&symUniq != 0 } func (sym *Sym) Uniq() bool { return sym.flags&symUniq != 0 }
func (sym *Sym) Siggen() bool { return sym.flags&symSiggen != 0 } func (sym *Sym) Siggen() bool { return sym.flags&symSiggen != 0 }
func (sym *Sym) Asm() bool { return sym.flags&symAsm != 0 } func (sym *Sym) Asm() bool { return sym.flags&symAsm != 0 }
func (sym *Sym) AlgGen() bool { return sym.flags&symAlgGen != 0 }
func (sym *Sym) Func() bool { return sym.flags&symFunc != 0 } func (sym *Sym) Func() bool { return sym.flags&symFunc != 0 }
func (sym *Sym) SetOnExportList(b bool) { sym.flags.set(symOnExportList, b) } func (sym *Sym) SetOnExportList(b bool) { sym.flags.set(symOnExportList, b) }
func (sym *Sym) SetUniq(b bool) { sym.flags.set(symUniq, b) } func (sym *Sym) SetUniq(b bool) { sym.flags.set(symUniq, b) }
func (sym *Sym) SetSiggen(b bool) { sym.flags.set(symSiggen, b) } func (sym *Sym) SetSiggen(b bool) { sym.flags.set(symSiggen, b) }
func (sym *Sym) SetAsm(b bool) { sym.flags.set(symAsm, b) } func (sym *Sym) SetAsm(b bool) { sym.flags.set(symAsm, b) }
func (sym *Sym) SetAlgGen(b bool) { sym.flags.set(symAlgGen, b) }
func (sym *Sym) SetFunc(b bool) { sym.flags.set(symFunc, b) } func (sym *Sym) SetFunc(b bool) { sym.flags.set(symFunc, b) }
func (sym *Sym) IsBlank() bool { func (sym *Sym) IsBlank() bool {
......
...@@ -359,7 +359,7 @@ func decodetypeMethods(arch *sys.Arch, s *sym.Symbol) []methodsig { ...@@ -359,7 +359,7 @@ func decodetypeMethods(arch *sys.Arch, s *sym.Symbol) []methodsig {
case kindChan: // reflect.chanType case kindChan: // reflect.chanType
off += 2 * arch.PtrSize off += 2 * arch.PtrSize
case kindMap: // reflect.mapType case kindMap: // reflect.mapType
off += 3*arch.PtrSize + 8 off += 4*arch.PtrSize + 8
case kindInterface: // reflect.interfaceType case kindInterface: // reflect.interfaceType
off += 3 * arch.PtrSize off += 3 * arch.PtrSize
default: default:
......
...@@ -1162,38 +1162,6 @@ func (ctxt *Link) doxcoff() { ...@@ -1162,38 +1162,6 @@ func (ctxt *Link) doxcoff() {
toc.Attr |= sym.AttrReachable toc.Attr |= sym.AttrReachable
toc.Attr |= sym.AttrVisibilityHidden toc.Attr |= sym.AttrVisibilityHidden
// XCOFF does not allow relocations of data symbol address to a text symbol.
// Such case occurs when a RODATA symbol retrieves a data symbol address.
// When it happens, this RODATA symbol is moved to .data section.
// runtime.algarray is a readonly symbol but stored inside .data section.
// If it stays in .data, all type symbols will be moved to .data which
// cannot be done.
algarray := ctxt.Syms.Lookup("runtime.algarray", 0)
algarray.Type = sym.SRODATA
for {
again := false
for _, s := range ctxt.Syms.Allsym {
if s.Type != sym.SRODATA {
continue
}
for ri := range s.R {
r := &s.R[ri]
if r.Type != objabi.R_ADDR {
continue
}
if r.Sym.Type != sym.Sxxx && r.Sym.Type != sym.STEXT && r.Sym.Type != sym.SRODATA {
s.Type = sym.SDATA
again = true
break
}
}
}
if !again {
break
}
}
// Add entry point to .loader symbols. // Add entry point to .loader symbols.
ep := ctxt.Syms.ROLookup(*flagEntrySymbol, 0) ep := ctxt.Syms.ROLookup(*flagEntrySymbol, 0)
if !ep.Attr.Reachable() { if !ep.Attr.Reachable() {
......
...@@ -290,6 +290,10 @@ const ( ...@@ -290,6 +290,10 @@ const (
// tflagNamed means the type has a name. // tflagNamed means the type has a name.
tflagNamed tflag = 1 << 2 tflagNamed tflag = 1 << 2
// tflagRegularMemory means that equal and hash functions can treat
// this type as a single region of t.size bytes.
tflagRegularMemory tflag = 1 << 3
) )
// rtype is the common implementation of most values. // rtype is the common implementation of most values.
...@@ -298,26 +302,18 @@ const ( ...@@ -298,26 +302,18 @@ const (
// rtype must be kept in sync with ../runtime/type.go:/^type._type. // rtype must be kept in sync with ../runtime/type.go:/^type._type.
type rtype struct { type rtype struct {
size uintptr size uintptr
ptrdata uintptr // number of bytes in the type that can contain pointers ptrdata uintptr // number of bytes in the type that can contain pointers
hash uint32 // hash of type; avoids computation in hash tables hash uint32 // hash of type; avoids computation in hash tables
tflag tflag // extra type information flags tflag tflag // extra type information flags
align uint8 // alignment of variable with this type align uint8 // alignment of variable with this type
fieldAlign uint8 // alignment of struct field with this type fieldAlign uint8 // alignment of struct field with this type
kind uint8 // enumeration for C kind uint8 // enumeration for C
alg *typeAlg // algorithm table
gcdata *byte // garbage collection data
str nameOff // string form
ptrToThis typeOff // type for pointer to this type, may be zero
}
// a copy of runtime.typeAlg
type typeAlg struct {
// function for hashing objects of this type
// (ptr to object, seed) -> hash
hash func(unsafe.Pointer, uintptr) uintptr
// function for comparing objects of this type // function for comparing objects of this type
// (ptr to object A, ptr to object B) -> ==? // (ptr to object A, ptr to object B) -> ==?
equal func(unsafe.Pointer, unsafe.Pointer) bool equal func(unsafe.Pointer, unsafe.Pointer) bool
gcdata *byte // garbage collection data
str nameOff // string form
ptrToThis typeOff // type for pointer to this type, may be zero
} }
// Method on non-interface type // Method on non-interface type
...@@ -397,9 +393,11 @@ type interfaceType struct { ...@@ -397,9 +393,11 @@ type interfaceType struct {
// mapType represents a map type. // mapType represents a map type.
type mapType struct { type mapType struct {
rtype rtype
key *rtype // map key type key *rtype // map key type
elem *rtype // map element (value) type elem *rtype // map element (value) type
bucket *rtype // internal bucket structure bucket *rtype // internal bucket structure
// function for hashing keys (ptr to key, seed) -> hash
hasher func(unsafe.Pointer, uintptr) uintptr
keysize uint8 // size of key slot keysize uint8 // size of key slot
valuesize uint8 // size of value slot valuesize uint8 // size of value slot
bucketsize uint16 // size of bucket bucketsize uint16 // size of bucket
...@@ -1457,7 +1455,7 @@ func (t *rtype) ConvertibleTo(u Type) bool { ...@@ -1457,7 +1455,7 @@ func (t *rtype) ConvertibleTo(u Type) bool {
} }
func (t *rtype) Comparable() bool { func (t *rtype) Comparable() bool {
return t.alg != nil && t.alg.equal != nil return t.equal != nil
} }
// implements reports whether the type V implements the interface type T. // implements reports whether the type V implements the interface type T.
...@@ -1807,7 +1805,7 @@ func ChanOf(dir ChanDir, t Type) Type { ...@@ -1807,7 +1805,7 @@ func ChanOf(dir ChanDir, t Type) Type {
var ichan interface{} = (chan unsafe.Pointer)(nil) var ichan interface{} = (chan unsafe.Pointer)(nil)
prototype := *(**chanType)(unsafe.Pointer(&ichan)) prototype := *(**chanType)(unsafe.Pointer(&ichan))
ch := *prototype ch := *prototype
ch.tflag = 0 ch.tflag = tflagRegularMemory
ch.dir = uintptr(dir) ch.dir = uintptr(dir)
ch.str = resolveReflectName(newName(s, "", false)) ch.str = resolveReflectName(newName(s, "", false))
ch.hash = fnv1(typ.hash, 'c', byte(dir)) ch.hash = fnv1(typ.hash, 'c', byte(dir))
...@@ -1817,8 +1815,6 @@ func ChanOf(dir ChanDir, t Type) Type { ...@@ -1817,8 +1815,6 @@ func ChanOf(dir ChanDir, t Type) Type {
return ti.(Type) return ti.(Type)
} }
func ismapkey(*rtype) bool // implemented in runtime
// MapOf returns the map type with the given key and element types. // MapOf returns the map type with the given key and element types.
// For example, if k represents int and e represents string, // For example, if k represents int and e represents string,
// MapOf(k, e) represents map[int]string. // MapOf(k, e) represents map[int]string.
...@@ -1829,7 +1825,7 @@ func MapOf(key, elem Type) Type { ...@@ -1829,7 +1825,7 @@ func MapOf(key, elem Type) Type {
ktyp := key.(*rtype) ktyp := key.(*rtype)
etyp := elem.(*rtype) etyp := elem.(*rtype)
if !ismapkey(ktyp) { if ktyp.equal == nil {
panic("reflect.MapOf: invalid key type " + ktyp.String()) panic("reflect.MapOf: invalid key type " + ktyp.String())
} }
...@@ -1860,6 +1856,9 @@ func MapOf(key, elem Type) Type { ...@@ -1860,6 +1856,9 @@ func MapOf(key, elem Type) Type {
mt.key = ktyp mt.key = ktyp
mt.elem = etyp mt.elem = etyp
mt.bucket = bucketOf(ktyp, etyp) mt.bucket = bucketOf(ktyp, etyp)
mt.hasher = func(p unsafe.Pointer, seed uintptr) uintptr {
return typehash(ktyp, p, seed)
}
mt.flags = 0 mt.flags = 0
if ktyp.size > maxKeySize { if ktyp.size > maxKeySize {
mt.keysize = uint8(ptrSize) mt.keysize = uint8(ptrSize)
...@@ -2332,7 +2331,6 @@ func StructOf(fields []StructField) Type { ...@@ -2332,7 +2331,6 @@ func StructOf(fields []StructField) Type {
size uintptr size uintptr
typalign uint8 typalign uint8
comparable = true comparable = true
hashable = true
methods []method methods []method
fs = make([]structField, len(fields)) fs = make([]structField, len(fields))
...@@ -2518,8 +2516,7 @@ func StructOf(fields []StructField) Type { ...@@ -2518,8 +2516,7 @@ func StructOf(fields []StructField) Type {
repr = append(repr, ';') repr = append(repr, ';')
} }
comparable = comparable && (ft.alg.equal != nil) comparable = comparable && (ft.equal != nil)
hashable = hashable && (ft.alg.hash != nil)
offset := align(size, uintptr(ft.align)) offset := align(size, uintptr(ft.align))
if ft.align > typalign { if ft.align > typalign {
...@@ -2634,7 +2631,7 @@ func StructOf(fields []StructField) Type { ...@@ -2634,7 +2631,7 @@ func StructOf(fields []StructField) Type {
} }
typ.str = resolveReflectName(newName(str, "", false)) typ.str = resolveReflectName(newName(str, "", false))
typ.tflag = 0 typ.tflag = 0 // TODO: set tflagRegularMemory
typ.hash = hash typ.hash = hash
typ.size = size typ.size = size
typ.ptrdata = typeptrdata(typ.common()) typ.ptrdata = typeptrdata(typ.common())
...@@ -2708,24 +2705,13 @@ func StructOf(fields []StructField) Type { ...@@ -2708,24 +2705,13 @@ func StructOf(fields []StructField) Type {
typ.gcdata = &bv.data[0] typ.gcdata = &bv.data[0]
} }
} }
typ.alg = new(typeAlg) typ.equal = nil
if hashable {
typ.alg.hash = func(p unsafe.Pointer, seed uintptr) uintptr {
o := seed
for _, ft := range typ.fields {
pi := add(p, ft.offset(), "&x.field safe")
o = ft.typ.alg.hash(pi, o)
}
return o
}
}
if comparable { if comparable {
typ.alg.equal = func(p, q unsafe.Pointer) bool { typ.equal = func(p, q unsafe.Pointer) bool {
for _, ft := range typ.fields { for _, ft := range typ.fields {
pi := add(p, ft.offset(), "&x.field safe") pi := add(p, ft.offset(), "&x.field safe")
qi := add(q, ft.offset(), "&x.field safe") qi := add(q, ft.offset(), "&x.field safe")
if !ft.typ.alg.equal(pi, qi) { if !ft.typ.equal(pi, qi) {
return false return false
} }
} }
...@@ -2826,7 +2812,7 @@ func ArrayOf(count int, elem Type) Type { ...@@ -2826,7 +2812,7 @@ func ArrayOf(count int, elem Type) Type {
var iarray interface{} = [1]unsafe.Pointer{} var iarray interface{} = [1]unsafe.Pointer{}
prototype := *(**arrayType)(unsafe.Pointer(&iarray)) prototype := *(**arrayType)(unsafe.Pointer(&iarray))
array := *prototype array := *prototype
array.tflag = 0 array.tflag = typ.tflag & tflagRegularMemory
array.str = resolveReflectName(newName(s, "", false)) array.str = resolveReflectName(newName(s, "", false))
array.hash = fnv1(typ.hash, '[') array.hash = fnv1(typ.hash, '[')
for n := uint32(count); n > 0; n >>= 8 { for n := uint32(count); n > 0; n >>= 8 {
...@@ -2929,12 +2915,10 @@ func ArrayOf(count int, elem Type) Type { ...@@ -2929,12 +2915,10 @@ func ArrayOf(count int, elem Type) Type {
etyp := typ.common() etyp := typ.common()
esize := etyp.Size() esize := etyp.Size()
ealg := etyp.alg
array.alg = new(typeAlg) array.equal = nil
if ealg.equal != nil { if eequal := etyp.equal; eequal != nil {
eequal := ealg.equal array.equal = func(p, q unsafe.Pointer) bool {
array.alg.equal = func(p, q unsafe.Pointer) bool {
for i := 0; i < count; i++ { for i := 0; i < count; i++ {
pi := arrayAt(p, i, esize, "i < count") pi := arrayAt(p, i, esize, "i < count")
qi := arrayAt(q, i, esize, "i < count") qi := arrayAt(q, i, esize, "i < count")
...@@ -2946,16 +2930,6 @@ func ArrayOf(count int, elem Type) Type { ...@@ -2946,16 +2930,6 @@ func ArrayOf(count int, elem Type) Type {
return true return true
} }
} }
if ealg.hash != nil {
ehash := ealg.hash
array.alg.hash = func(ptr unsafe.Pointer, seed uintptr) uintptr {
o := seed
for i := 0; i < count; i++ {
o = ehash(arrayAt(ptr, i, esize, "i < count"), o)
}
return o
}
}
switch { switch {
case count == 1 && !ifaceIndir(typ): case count == 1 && !ifaceIndir(typ):
......
...@@ -2765,6 +2765,9 @@ func typedmemclrpartial(t *rtype, ptr unsafe.Pointer, off, size uintptr) ...@@ -2765,6 +2765,9 @@ func typedmemclrpartial(t *rtype, ptr unsafe.Pointer, off, size uintptr)
//go:noescape //go:noescape
func typedslicecopy(elemType *rtype, dst, src sliceHeader) int func typedslicecopy(elemType *rtype, dst, src sliceHeader) int
//go:noescape
func typehash(t *rtype, p unsafe.Pointer, h uintptr) uintptr
// Dummy annotation marking that the value x escapes, // Dummy annotation marking that the value x escapes,
// for use in cases where the reflect code is so clever that // for use in cases where the reflect code is so clever that
// the compiler cannot follow. // the compiler cannot follow.
......
...@@ -34,17 +34,6 @@ const ( ...@@ -34,17 +34,6 @@ const (
alg_max alg_max
) )
// typeAlg is also copied/used in reflect/type.go.
// keep them in sync.
type typeAlg struct {
// function for hashing objects of this type
// (ptr to object, seed) -> hash
hash func(unsafe.Pointer, uintptr) uintptr
// function for comparing objects of this type
// (ptr to object A, ptr to object B) -> ==?
equal func(unsafe.Pointer, unsafe.Pointer) bool
}
func memhash0(p unsafe.Pointer, h uintptr) uintptr { func memhash0(p unsafe.Pointer, h uintptr) uintptr {
return h return h
} }
...@@ -68,23 +57,9 @@ func memhash_varlen(p unsafe.Pointer, h uintptr) uintptr { ...@@ -68,23 +57,9 @@ func memhash_varlen(p unsafe.Pointer, h uintptr) uintptr {
return memhash(p, h, size) return memhash(p, h, size)
} }
var algarray = [alg_max]typeAlg{ // runtime variable to check if the processor we're running on
alg_NOEQ: {nil, nil}, // actually supports the instructions used by the AES-based
alg_MEM0: {memhash0, memequal0}, // hash implementation.
alg_MEM8: {memhash8, memequal8},
alg_MEM16: {memhash16, memequal16},
alg_MEM32: {memhash32, memequal32},
alg_MEM64: {memhash64, memequal64},
alg_MEM128: {memhash128, memequal128},
alg_STRING: {strhash, strequal},
alg_INTER: {interhash, interequal},
alg_NILINTER: {nilinterhash, nilinterequal},
alg_FLOAT32: {f32hash, f32equal},
alg_FLOAT64: {f64hash, f64equal},
alg_CPLX64: {c64hash, c64equal},
alg_CPLX128: {c128hash, c128equal},
}
var useAeshash bool var useAeshash bool
// in asm_*.s // in asm_*.s
...@@ -144,14 +119,17 @@ func interhash(p unsafe.Pointer, h uintptr) uintptr { ...@@ -144,14 +119,17 @@ func interhash(p unsafe.Pointer, h uintptr) uintptr {
return h return h
} }
t := tab._type t := tab._type
fn := t.alg.hash if t.equal == nil {
if fn == nil { // Check hashability here. We could do this check inside
// typehash, but we want to report the topmost type in
// the error text (e.g. in a struct with a field of slice type
// we want to report the struct, not the slice).
panic(errorString("hash of unhashable type " + t.string())) panic(errorString("hash of unhashable type " + t.string()))
} }
if isDirectIface(t) { if isDirectIface(t) {
return c1 * fn(unsafe.Pointer(&a.data), h^c0) return c1 * typehash(t, unsafe.Pointer(&a.data), h^c0)
} else { } else {
return c1 * fn(a.data, h^c0) return c1 * typehash(t, a.data, h^c0)
} }
} }
...@@ -161,15 +139,72 @@ func nilinterhash(p unsafe.Pointer, h uintptr) uintptr { ...@@ -161,15 +139,72 @@ func nilinterhash(p unsafe.Pointer, h uintptr) uintptr {
if t == nil { if t == nil {
return h return h
} }
fn := t.alg.hash if t.equal == nil {
if fn == nil { // See comment in interhash above.
panic(errorString("hash of unhashable type " + t.string())) panic(errorString("hash of unhashable type " + t.string()))
} }
if isDirectIface(t) { if isDirectIface(t) {
return c1 * fn(unsafe.Pointer(&a.data), h^c0) return c1 * typehash(t, unsafe.Pointer(&a.data), h^c0)
} else { } else {
return c1 * fn(a.data, h^c0) return c1 * typehash(t, a.data, h^c0)
}
}
// typehash computes the hash of the object of type t at address p.
// h is the seed.
// This function is seldom used. Most maps use for hashing either
// fixed functions (e.g. f32hash) or compiler-generated functions
// (e.g. for a type like struct { x, y string }). This implementation
// is slower but more general and is used for hashing interface types
// (called from interhash or nilinterhash, above) or for hashing in
// maps generated by reflect.MapOf (reflect_typehash, below).
func typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr {
if t.tflag&tflagRegularMemory != 0 {
return memhash(p, h, t.size)
} }
switch t.kind & kindMask {
case kindFloat32:
return f32hash(p, h)
case kindFloat64:
return f64hash(p, h)
case kindComplex64:
return c64hash(p, h)
case kindComplex128:
return c128hash(p, h)
case kindString:
return strhash(p, h)
case kindInterface:
i := (*interfacetype)(unsafe.Pointer(t))
if len(i.mhdr) == 0 {
return nilinterhash(p, h)
}
return interhash(p, h)
case kindArray:
a := (*arraytype)(unsafe.Pointer(t))
for i := uintptr(0); i < a.len; i++ {
h = typehash(a.elem, add(p, i*a.elem.size), h)
}
return h
case kindStruct:
s := (*structtype)(unsafe.Pointer(t))
for _, f := range s.fields {
// TODO: maybe we could hash several contiguous fields all at once.
if f.name.isBlank() {
continue
}
h = typehash(f.typ, add(p, f.offset()), h)
}
return h
default:
// Should never happen, as typehash should only be called
// with comparable types.
panic(errorString("hash of unhashable type " + t.string()))
}
}
//go:linkname reflect_typehash reflect.typehash
func reflect_typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr {
return typehash(t, p, h)
} }
func memequal0(p, q unsafe.Pointer) bool { func memequal0(p, q unsafe.Pointer) bool {
...@@ -219,7 +254,7 @@ func efaceeq(t *_type, x, y unsafe.Pointer) bool { ...@@ -219,7 +254,7 @@ func efaceeq(t *_type, x, y unsafe.Pointer) bool {
if t == nil { if t == nil {
return true return true
} }
eq := t.alg.equal eq := t.equal
if eq == nil { if eq == nil {
panic(errorString("comparing uncomparable type " + t.string())) panic(errorString("comparing uncomparable type " + t.string()))
} }
...@@ -236,7 +271,7 @@ func ifaceeq(tab *itab, x, y unsafe.Pointer) bool { ...@@ -236,7 +271,7 @@ func ifaceeq(tab *itab, x, y unsafe.Pointer) bool {
return true return true
} }
t := tab._type t := tab._type
eq := t.alg.equal eq := t.equal
if eq == nil { if eq == nil {
panic(errorString("comparing uncomparable type " + t.string())) panic(errorString("comparing uncomparable type " + t.string()))
} }
...@@ -249,7 +284,7 @@ func ifaceeq(tab *itab, x, y unsafe.Pointer) bool { ...@@ -249,7 +284,7 @@ func ifaceeq(tab *itab, x, y unsafe.Pointer) bool {
// Testing adapters for hash quality tests (see hash_test.go) // Testing adapters for hash quality tests (see hash_test.go)
func stringHash(s string, seed uintptr) uintptr { func stringHash(s string, seed uintptr) uintptr {
return algarray[alg_STRING].hash(noescape(unsafe.Pointer(&s)), seed) return strhash(noescape(unsafe.Pointer(&s)), seed)
} }
func bytesHash(b []byte, seed uintptr) uintptr { func bytesHash(b []byte, seed uintptr) uintptr {
...@@ -258,21 +293,21 @@ func bytesHash(b []byte, seed uintptr) uintptr { ...@@ -258,21 +293,21 @@ func bytesHash(b []byte, seed uintptr) uintptr {
} }
func int32Hash(i uint32, seed uintptr) uintptr { func int32Hash(i uint32, seed uintptr) uintptr {
return algarray[alg_MEM32].hash(noescape(unsafe.Pointer(&i)), seed) return memhash32(noescape(unsafe.Pointer(&i)), seed)
} }
func int64Hash(i uint64, seed uintptr) uintptr { func int64Hash(i uint64, seed uintptr) uintptr {
return algarray[alg_MEM64].hash(noescape(unsafe.Pointer(&i)), seed) return memhash64(noescape(unsafe.Pointer(&i)), seed)
} }
func efaceHash(i interface{}, seed uintptr) uintptr { func efaceHash(i interface{}, seed uintptr) uintptr {
return algarray[alg_NILINTER].hash(noescape(unsafe.Pointer(&i)), seed) return nilinterhash(noescape(unsafe.Pointer(&i)), seed)
} }
func ifaceHash(i interface { func ifaceHash(i interface {
F() F()
}, seed uintptr) uintptr { }, seed uintptr) uintptr {
return algarray[alg_INTER].hash(noescape(unsafe.Pointer(&i)), seed) return interhash(noescape(unsafe.Pointer(&i)), seed)
} }
const hashRandomBytes = sys.PtrSize / 4 * 64 const hashRandomBytes = sys.PtrSize / 4 * 64
......
...@@ -111,7 +111,7 @@ func makechan(t *chantype, size int) *hchan { ...@@ -111,7 +111,7 @@ func makechan(t *chantype, size int) *hchan {
c.dataqsiz = uint(size) c.dataqsiz = uint(size)
if debugChan { if debugChan {
print("makechan: chan=", c, "; elemsize=", elem.size, "; elemalg=", elem.alg, "; dataqsiz=", size, "\n") print("makechan: chan=", c, "; elemsize=", elem.size, "; dataqsiz=", size, "\n")
} }
return c return c
} }
......
...@@ -403,15 +403,14 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { ...@@ -403,15 +403,14 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
} }
if h == nil || h.count == 0 { if h == nil || h.count == 0 {
if t.hashMightPanic() { if t.hashMightPanic() {
t.key.alg.hash(key, 0) // see issue 23734 t.hasher(key, 0) // see issue 23734
} }
return unsafe.Pointer(&zeroVal[0]) return unsafe.Pointer(&zeroVal[0])
} }
if h.flags&hashWriting != 0 { if h.flags&hashWriting != 0 {
throw("concurrent map read and map write") throw("concurrent map read and map write")
} }
alg := t.key.alg hash := t.hasher(key, uintptr(h.hash0))
hash := alg.hash(key, uintptr(h.hash0))
m := bucketMask(h.B) m := bucketMask(h.B)
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil { if c := h.oldbuckets; c != nil {
...@@ -438,7 +437,7 @@ bucketloop: ...@@ -438,7 +437,7 @@ bucketloop:
if t.indirectkey() { if t.indirectkey() {
k = *((*unsafe.Pointer)(k)) k = *((*unsafe.Pointer)(k))
} }
if alg.equal(key, k) { if t.key.equal(key, k) {
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize)) e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
if t.indirectelem() { if t.indirectelem() {
e = *((*unsafe.Pointer)(e)) e = *((*unsafe.Pointer)(e))
...@@ -462,15 +461,14 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) ...@@ -462,15 +461,14 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool)
} }
if h == nil || h.count == 0 { if h == nil || h.count == 0 {
if t.hashMightPanic() { if t.hashMightPanic() {
t.key.alg.hash(key, 0) // see issue 23734 t.hasher(key, 0) // see issue 23734
} }
return unsafe.Pointer(&zeroVal[0]), false return unsafe.Pointer(&zeroVal[0]), false
} }
if h.flags&hashWriting != 0 { if h.flags&hashWriting != 0 {
throw("concurrent map read and map write") throw("concurrent map read and map write")
} }
alg := t.key.alg hash := t.hasher(key, uintptr(h.hash0))
hash := alg.hash(key, uintptr(h.hash0))
m := bucketMask(h.B) m := bucketMask(h.B)
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize))) b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil { if c := h.oldbuckets; c != nil {
...@@ -497,7 +495,7 @@ bucketloop: ...@@ -497,7 +495,7 @@ bucketloop:
if t.indirectkey() { if t.indirectkey() {
k = *((*unsafe.Pointer)(k)) k = *((*unsafe.Pointer)(k))
} }
if alg.equal(key, k) { if t.key.equal(key, k) {
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize)) e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
if t.indirectelem() { if t.indirectelem() {
e = *((*unsafe.Pointer)(e)) e = *((*unsafe.Pointer)(e))
...@@ -514,8 +512,7 @@ func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe ...@@ -514,8 +512,7 @@ func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe
if h == nil || h.count == 0 { if h == nil || h.count == 0 {
return nil, nil return nil, nil
} }
alg := t.key.alg hash := t.hasher(key, uintptr(h.hash0))
hash := alg.hash(key, uintptr(h.hash0))
m := bucketMask(h.B) m := bucketMask(h.B)
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize))) b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil { if c := h.oldbuckets; c != nil {
...@@ -542,7 +539,7 @@ bucketloop: ...@@ -542,7 +539,7 @@ bucketloop:
if t.indirectkey() { if t.indirectkey() {
k = *((*unsafe.Pointer)(k)) k = *((*unsafe.Pointer)(k))
} }
if alg.equal(key, k) { if t.key.equal(key, k) {
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize)) e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
if t.indirectelem() { if t.indirectelem() {
e = *((*unsafe.Pointer)(e)) e = *((*unsafe.Pointer)(e))
...@@ -587,10 +584,9 @@ func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { ...@@ -587,10 +584,9 @@ func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
if h.flags&hashWriting != 0 { if h.flags&hashWriting != 0 {
throw("concurrent map writes") throw("concurrent map writes")
} }
alg := t.key.alg hash := t.hasher(key, uintptr(h.hash0))
hash := alg.hash(key, uintptr(h.hash0))
// Set hashWriting after calling alg.hash, since alg.hash may panic, // Set hashWriting after calling t.hasher, since t.hasher may panic,
// in which case we have not actually done a write. // in which case we have not actually done a write.
h.flags ^= hashWriting h.flags ^= hashWriting
...@@ -627,7 +623,7 @@ bucketloop: ...@@ -627,7 +623,7 @@ bucketloop:
if t.indirectkey() { if t.indirectkey() {
k = *((*unsafe.Pointer)(k)) k = *((*unsafe.Pointer)(k))
} }
if !alg.equal(key, k) { if !t.key.equal(key, k) {
continue continue
} }
// already have a mapping for key. Update it. // already have a mapping for key. Update it.
...@@ -698,7 +694,7 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) { ...@@ -698,7 +694,7 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
} }
if h == nil || h.count == 0 { if h == nil || h.count == 0 {
if t.hashMightPanic() { if t.hashMightPanic() {
t.key.alg.hash(key, 0) // see issue 23734 t.hasher(key, 0) // see issue 23734
} }
return return
} }
...@@ -706,10 +702,9 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) { ...@@ -706,10 +702,9 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
throw("concurrent map writes") throw("concurrent map writes")
} }
alg := t.key.alg hash := t.hasher(key, uintptr(h.hash0))
hash := alg.hash(key, uintptr(h.hash0))
// Set hashWriting after calling alg.hash, since alg.hash may panic, // Set hashWriting after calling t.hasher, since t.hasher may panic,
// in which case we have not actually done a write (delete). // in which case we have not actually done a write (delete).
h.flags ^= hashWriting h.flags ^= hashWriting
...@@ -734,7 +729,7 @@ search: ...@@ -734,7 +729,7 @@ search:
if t.indirectkey() { if t.indirectkey() {
k2 = *((*unsafe.Pointer)(k2)) k2 = *((*unsafe.Pointer)(k2))
} }
if !alg.equal(key, k2) { if !t.key.equal(key, k2) {
continue continue
} }
// Only clear key if there are pointers in it. // Only clear key if there are pointers in it.
...@@ -862,7 +857,6 @@ func mapiternext(it *hiter) { ...@@ -862,7 +857,6 @@ func mapiternext(it *hiter) {
b := it.bptr b := it.bptr
i := it.i i := it.i
checkBucket := it.checkBucket checkBucket := it.checkBucket
alg := t.key.alg
next: next:
if b == nil { if b == nil {
...@@ -916,10 +910,10 @@ next: ...@@ -916,10 +910,10 @@ next:
// through the oldbucket, skipping any keys that will go // through the oldbucket, skipping any keys that will go
// to the other new bucket (each oldbucket expands to two // to the other new bucket (each oldbucket expands to two
// buckets during a grow). // buckets during a grow).
if t.reflexivekey() || alg.equal(k, k) { if t.reflexivekey() || t.key.equal(k, k) {
// If the item in the oldbucket is not destined for // If the item in the oldbucket is not destined for
// the current new bucket in the iteration, skip it. // the current new bucket in the iteration, skip it.
hash := alg.hash(k, uintptr(h.hash0)) hash := t.hasher(k, uintptr(h.hash0))
if hash&bucketMask(it.B) != checkBucket { if hash&bucketMask(it.B) != checkBucket {
continue continue
} }
...@@ -937,7 +931,7 @@ next: ...@@ -937,7 +931,7 @@ next:
} }
} }
if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) || if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) ||
!(t.reflexivekey() || alg.equal(k, k)) { !(t.reflexivekey() || t.key.equal(k, k)) {
// This is the golden data, we can return it. // This is the golden data, we can return it.
// OR // OR
// key!=key, so the entry can't be deleted or updated, so we can just return it. // key!=key, so the entry can't be deleted or updated, so we can just return it.
...@@ -1174,8 +1168,8 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) { ...@@ -1174,8 +1168,8 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
if !h.sameSizeGrow() { if !h.sameSizeGrow() {
// Compute hash to make our evacuation decision (whether we need // Compute hash to make our evacuation decision (whether we need
// to send this key/elem to bucket x or bucket y). // to send this key/elem to bucket x or bucket y).
hash := t.key.alg.hash(k2, uintptr(h.hash0)) hash := t.hasher(k2, uintptr(h.hash0))
if h.flags&iterator != 0 && !t.reflexivekey() && !t.key.alg.equal(k2, k2) { if h.flags&iterator != 0 && !t.reflexivekey() && !t.key.equal(k2, k2) {
// If key != key (NaNs), then the hash could be (and probably // If key != key (NaNs), then the hash could be (and probably
// will be) entirely different from the old hash. Moreover, // will be) entirely different from the old hash. Moreover,
// it isn't reproducible. Reproducibility is required in the // it isn't reproducible. Reproducibility is required in the
...@@ -1269,16 +1263,12 @@ func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) { ...@@ -1269,16 +1263,12 @@ func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) {
} }
} }
func ismapkey(t *_type) bool {
return t.alg.hash != nil
}
// Reflect stubs. Called from ../reflect/asm_*.s // Reflect stubs. Called from ../reflect/asm_*.s
//go:linkname reflect_makemap reflect.makemap //go:linkname reflect_makemap reflect.makemap
func reflect_makemap(t *maptype, cap int) *hmap { func reflect_makemap(t *maptype, cap int) *hmap {
// Check invariants and reflects math. // Check invariants and reflects math.
if !ismapkey(t.key) { if t.key.equal == nil {
throw("runtime.reflect_makemap: unsupported map key type") throw("runtime.reflect_makemap: unsupported map key type")
} }
if t.key.size > maxKeySize && (!t.indirectkey() || t.keysize != uint8(sys.PtrSize)) || if t.key.size > maxKeySize && (!t.indirectkey() || t.keysize != uint8(sys.PtrSize)) ||
...@@ -1381,10 +1371,5 @@ func reflectlite_maplen(h *hmap) int { ...@@ -1381,10 +1371,5 @@ func reflectlite_maplen(h *hmap) int {
return h.count return h.count
} }
//go:linkname reflect_ismapkey reflect.ismapkey
func reflect_ismapkey(t *_type) bool {
return ismapkey(t)
}
const maxZero = 1024 // must match value in cmd/compile/internal/gc/walk.go:zeroValSize const maxZero = 1024 // must match value in cmd/compile/internal/gc/walk.go:zeroValSize
var zeroVal [maxZero]byte var zeroVal [maxZero]byte
...@@ -483,3 +483,33 @@ func BenchmarkMapStringConversion(b *testing.B) { ...@@ -483,3 +483,33 @@ func BenchmarkMapStringConversion(b *testing.B) {
}) })
} }
} }
var BoolSink bool
func BenchmarkMapInterfaceString(b *testing.B) {
m := map[interface{}]bool{}
for i := 0; i < 100; i++ {
m[fmt.Sprintf("%d", i)] = true
}
key := (interface{})("A")
b.ResetTimer()
for i := 0; i < b.N; i++ {
BoolSink = m[key]
}
}
func BenchmarkMapInterfacePtr(b *testing.B) {
m := map[interface{}]bool{}
for i := 0; i < 100; i++ {
i := i
m[&i] = true
}
key := new(int)
b.ResetTimer()
for i := 0; i < b.N; i++ {
BoolSink = m[key]
}
}
...@@ -25,7 +25,7 @@ func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer { ...@@ -25,7 +25,7 @@ func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
// One-bucket table. No need to hash. // One-bucket table. No need to hash.
b = (*bmap)(h.buckets) b = (*bmap)(h.buckets)
} else { } else {
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := bucketMask(h.B) m := bucketMask(h.B)
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil { if c := h.oldbuckets; c != nil {
...@@ -65,7 +65,7 @@ func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) { ...@@ -65,7 +65,7 @@ func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
// One-bucket table. No need to hash. // One-bucket table. No need to hash.
b = (*bmap)(h.buckets) b = (*bmap)(h.buckets)
} else { } else {
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := bucketMask(h.B) m := bucketMask(h.B)
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil { if c := h.oldbuckets; c != nil {
...@@ -100,9 +100,9 @@ func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer { ...@@ -100,9 +100,9 @@ func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
if h.flags&hashWriting != 0 { if h.flags&hashWriting != 0 {
throw("concurrent map writes") throw("concurrent map writes")
} }
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
// Set hashWriting after calling alg.hash for consistency with mapassign. // Set hashWriting after calling t.hasher for consistency with mapassign.
h.flags ^= hashWriting h.flags ^= hashWriting
if h.buckets == nil { if h.buckets == nil {
...@@ -190,9 +190,9 @@ func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer ...@@ -190,9 +190,9 @@ func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer
if h.flags&hashWriting != 0 { if h.flags&hashWriting != 0 {
throw("concurrent map writes") throw("concurrent map writes")
} }
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
// Set hashWriting after calling alg.hash for consistency with mapassign. // Set hashWriting after calling t.hasher for consistency with mapassign.
h.flags ^= hashWriting h.flags ^= hashWriting
if h.buckets == nil { if h.buckets == nil {
...@@ -281,9 +281,9 @@ func mapdelete_fast32(t *maptype, h *hmap, key uint32) { ...@@ -281,9 +281,9 @@ func mapdelete_fast32(t *maptype, h *hmap, key uint32) {
throw("concurrent map writes") throw("concurrent map writes")
} }
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
// Set hashWriting after calling alg.hash for consistency with mapdelete // Set hashWriting after calling t.hasher for consistency with mapdelete
h.flags ^= hashWriting h.flags ^= hashWriting
bucket := hash & bucketMask(h.B) bucket := hash & bucketMask(h.B)
...@@ -400,7 +400,7 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) { ...@@ -400,7 +400,7 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
if !h.sameSizeGrow() { if !h.sameSizeGrow() {
// Compute hash to make our evacuation decision (whether we need // Compute hash to make our evacuation decision (whether we need
// to send this key/elem to bucket x or bucket y). // to send this key/elem to bucket x or bucket y).
hash := t.key.alg.hash(k, uintptr(h.hash0)) hash := t.hasher(k, uintptr(h.hash0))
if hash&newbit != 0 { if hash&newbit != 0 {
useY = 1 useY = 1
} }
......
...@@ -25,7 +25,7 @@ func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer { ...@@ -25,7 +25,7 @@ func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
// One-bucket table. No need to hash. // One-bucket table. No need to hash.
b = (*bmap)(h.buckets) b = (*bmap)(h.buckets)
} else { } else {
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := bucketMask(h.B) m := bucketMask(h.B)
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil { if c := h.oldbuckets; c != nil {
...@@ -65,7 +65,7 @@ func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) { ...@@ -65,7 +65,7 @@ func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
// One-bucket table. No need to hash. // One-bucket table. No need to hash.
b = (*bmap)(h.buckets) b = (*bmap)(h.buckets)
} else { } else {
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := bucketMask(h.B) m := bucketMask(h.B)
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil { if c := h.oldbuckets; c != nil {
...@@ -100,9 +100,9 @@ func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer { ...@@ -100,9 +100,9 @@ func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
if h.flags&hashWriting != 0 { if h.flags&hashWriting != 0 {
throw("concurrent map writes") throw("concurrent map writes")
} }
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
// Set hashWriting after calling alg.hash for consistency with mapassign. // Set hashWriting after calling t.hasher for consistency with mapassign.
h.flags ^= hashWriting h.flags ^= hashWriting
if h.buckets == nil { if h.buckets == nil {
...@@ -190,9 +190,9 @@ func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer ...@@ -190,9 +190,9 @@ func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer
if h.flags&hashWriting != 0 { if h.flags&hashWriting != 0 {
throw("concurrent map writes") throw("concurrent map writes")
} }
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
// Set hashWriting after calling alg.hash for consistency with mapassign. // Set hashWriting after calling t.hasher for consistency with mapassign.
h.flags ^= hashWriting h.flags ^= hashWriting
if h.buckets == nil { if h.buckets == nil {
...@@ -281,9 +281,9 @@ func mapdelete_fast64(t *maptype, h *hmap, key uint64) { ...@@ -281,9 +281,9 @@ func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
throw("concurrent map writes") throw("concurrent map writes")
} }
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
// Set hashWriting after calling alg.hash for consistency with mapdelete // Set hashWriting after calling t.hasher for consistency with mapdelete
h.flags ^= hashWriting h.flags ^= hashWriting
bucket := hash & bucketMask(h.B) bucket := hash & bucketMask(h.B)
...@@ -400,7 +400,7 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) { ...@@ -400,7 +400,7 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
if !h.sameSizeGrow() { if !h.sameSizeGrow() {
// Compute hash to make our evacuation decision (whether we need // Compute hash to make our evacuation decision (whether we need
// to send this key/elem to bucket x or bucket y). // to send this key/elem to bucket x or bucket y).
hash := t.key.alg.hash(k, uintptr(h.hash0)) hash := t.hasher(k, uintptr(h.hash0))
if hash&newbit != 0 { if hash&newbit != 0 {
useY = 1 useY = 1
} }
......
...@@ -76,7 +76,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer { ...@@ -76,7 +76,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
return unsafe.Pointer(&zeroVal[0]) return unsafe.Pointer(&zeroVal[0])
} }
dohash: dohash:
hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
m := bucketMask(h.B) m := bucketMask(h.B)
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil { if c := h.oldbuckets; c != nil {
...@@ -171,7 +171,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) { ...@@ -171,7 +171,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
return unsafe.Pointer(&zeroVal[0]), false return unsafe.Pointer(&zeroVal[0]), false
} }
dohash: dohash:
hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
m := bucketMask(h.B) m := bucketMask(h.B)
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil { if c := h.oldbuckets; c != nil {
...@@ -211,9 +211,9 @@ func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer { ...@@ -211,9 +211,9 @@ func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer {
throw("concurrent map writes") throw("concurrent map writes")
} }
key := stringStructOf(&s) key := stringStructOf(&s)
hash := t.key.alg.hash(noescape(unsafe.Pointer(&s)), uintptr(h.hash0)) hash := t.hasher(noescape(unsafe.Pointer(&s)), uintptr(h.hash0))
// Set hashWriting after calling alg.hash for consistency with mapassign. // Set hashWriting after calling t.hasher for consistency with mapassign.
h.flags ^= hashWriting h.flags ^= hashWriting
if h.buckets == nil { if h.buckets == nil {
...@@ -307,9 +307,9 @@ func mapdelete_faststr(t *maptype, h *hmap, ky string) { ...@@ -307,9 +307,9 @@ func mapdelete_faststr(t *maptype, h *hmap, ky string) {
} }
key := stringStructOf(&ky) key := stringStructOf(&ky)
hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
// Set hashWriting after calling alg.hash for consistency with mapdelete // Set hashWriting after calling t.hasher for consistency with mapdelete
h.flags ^= hashWriting h.flags ^= hashWriting
bucket := hash & bucketMask(h.B) bucket := hash & bucketMask(h.B)
...@@ -429,7 +429,7 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) { ...@@ -429,7 +429,7 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
if !h.sameSizeGrow() { if !h.sameSizeGrow() {
// Compute hash to make our evacuation decision (whether we need // Compute hash to make our evacuation decision (whether we need
// to send this key/elem to bucket x or bucket y). // to send this key/elem to bucket x or bucket y).
hash := t.key.alg.hash(k, uintptr(h.hash0)) hash := t.hasher(k, uintptr(h.hash0))
if hash&newbit != 0 { if hash&newbit != 0 {
useY = 1 useY = 1
} }
......
...@@ -435,11 +435,11 @@ func TestEmptyKeyAndValue(t *testing.T) { ...@@ -435,11 +435,11 @@ func TestEmptyKeyAndValue(t *testing.T) {
// ("quick keys") as well as long keys. // ("quick keys") as well as long keys.
func TestSingleBucketMapStringKeys_DupLen(t *testing.T) { func TestSingleBucketMapStringKeys_DupLen(t *testing.T) {
testMapLookups(t, map[string]string{ testMapLookups(t, map[string]string{
"x": "x1val", "x": "x1val",
"xx": "x2val", "xx": "x2val",
"foo": "fooval", "foo": "fooval",
"bar": "barval", // same key length as "foo" "bar": "barval", // same key length as "foo"
"xxxx": "x4val", "xxxx": "x4val",
strings.Repeat("x", 128): "longval1", strings.Repeat("x", 128): "longval1",
strings.Repeat("y", 128): "longval2", strings.Repeat("y", 128): "longval2",
}) })
...@@ -1156,3 +1156,64 @@ func TestMapTombstones(t *testing.T) { ...@@ -1156,3 +1156,64 @@ func TestMapTombstones(t *testing.T) {
} }
runtime.MapTombstoneCheck(m) runtime.MapTombstoneCheck(m)
} }
type canString int
func (c canString) String() string {
return fmt.Sprintf("%d", int(c))
}
func TestMapInterfaceKey(t *testing.T) {
// Test all the special cases in runtime.typehash.
type GrabBag struct {
f32 float32
f64 float64
c64 complex64
c128 complex128
s string
i0 interface{}
i1 interface {
String() string
}
a [4]string
}
m := map[interface{}]bool{}
// Put a bunch of data in m, so that a bad hash is likely to
// lead to a bad bucket, which will lead to a missed lookup.
for i := 0; i < 1000; i++ {
m[i] = true
}
m[GrabBag{f32: 1.0}] = true
if !m[GrabBag{f32: 1.0}] {
panic("f32 not found")
}
m[GrabBag{f64: 1.0}] = true
if !m[GrabBag{f64: 1.0}] {
panic("f64 not found")
}
m[GrabBag{c64: 1.0i}] = true
if !m[GrabBag{c64: 1.0i}] {
panic("c64 not found")
}
m[GrabBag{c128: 1.0i}] = true
if !m[GrabBag{c128: 1.0i}] {
panic("c128 not found")
}
m[GrabBag{s: "foo"}] = true
if !m[GrabBag{s: "foo"}] {
panic("string not found")
}
m[GrabBag{i0: "foo"}] = true
if !m[GrabBag{i0: "foo"}] {
panic("interface{} not found")
}
m[GrabBag{i1: canString(5)}] = true
if !m[GrabBag{i1: canString(5)}] {
panic("interface{String() string} not found")
}
m[GrabBag{a: [4]string{"foo", "bar", "baz", "bop"}}] = true
if !m[GrabBag{a: [4]string{"foo", "bar", "baz", "bop"}}] {
panic("array not found")
}
}
...@@ -17,9 +17,10 @@ import "unsafe" ...@@ -17,9 +17,10 @@ import "unsafe"
type tflag uint8 type tflag uint8
const ( const (
tflagUncommon tflag = 1 << 0 tflagUncommon tflag = 1 << 0
tflagExtraStar tflag = 1 << 1 tflagExtraStar tflag = 1 << 1
tflagNamed tflag = 1 << 2 tflagNamed tflag = 1 << 2
tflagRegularMemory tflag = 1 << 3 // equal and hash can treat values of this type as a single region of t.size bytes
) )
// Needs to be in sync with ../cmd/link/internal/ld/decodesym.go:/^func.commonsize, // Needs to be in sync with ../cmd/link/internal/ld/decodesym.go:/^func.commonsize,
...@@ -33,7 +34,9 @@ type _type struct { ...@@ -33,7 +34,9 @@ type _type struct {
align uint8 align uint8
fieldalign uint8 fieldalign uint8
kind uint8 kind uint8
alg *typeAlg // function for comparing objects of this type
// (ptr to object A, ptr to object B) -> ==?
equal func(unsafe.Pointer, unsafe.Pointer) bool
// gcdata stores the GC type data for the garbage collector. // gcdata stores the GC type data for the garbage collector.
// If the KindGCProg bit is set in kind, gcdata is a GC program. // If the KindGCProg bit is set in kind, gcdata is a GC program.
// Otherwise it is a ptrmask bitmap. See mbitmap.go for details. // Otherwise it is a ptrmask bitmap. See mbitmap.go for details.
...@@ -358,10 +361,12 @@ type interfacetype struct { ...@@ -358,10 +361,12 @@ type interfacetype struct {
} }
type maptype struct { type maptype struct {
typ _type typ _type
key *_type key *_type
elem *_type elem *_type
bucket *_type // internal type representing a hash bucket bucket *_type // internal type representing a hash bucket
// function for hashing keys (ptr to key, seed) -> hash
hasher func(unsafe.Pointer, uintptr) uintptr
keysize uint8 // size of key slot keysize uint8 // size of key slot
elemsize uint8 // size of elem slot elemsize uint8 // size of elem slot
bucketsize uint16 // size of bucket bucketsize uint16 // size of bucket
...@@ -497,6 +502,16 @@ func (n name) pkgPath() string { ...@@ -497,6 +502,16 @@ func (n name) pkgPath() string {
return pkgPathName.name() return pkgPathName.name()
} }
func (n name) isBlank() bool {
if n.bytes == nil {
return false
}
if n.nameLen() != 1 {
return false
}
return *n.data(3) == '_'
}
// typelinksinit scans the types from extra modules and builds the // typelinksinit scans the types from extra modules and builds the
// moduledata typemap used to de-duplicate type pointers. // moduledata typemap used to de-duplicate type pointers.
func typelinksinit() { func typelinksinit() {
......
...@@ -71,7 +71,7 @@ func test5() { ...@@ -71,7 +71,7 @@ func test5() {
} }
func test6() { func test6() {
defer mustRecover("unhashable") defer mustRecover("unhashable type main.T")
var x T var x T
var z interface{} = x var z interface{} = x
m := make(map[interface{}]int) m := make(map[interface{}]int)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment