Commit 38519e69 authored by Michael Hudson-Doyle's avatar Michael Hudson-Doyle Committed by Ian Lance Taylor

cmd/compile, runtime: stop returning t.zero on hashmap miss

Previously t.zero always pointed to runtime.zerovalue. Change the hashmap code
to always return a runtime pointer directly, and change that pointer to point
to a larger buffer if one is needed.

(It might be better to only copy from the pointer returned by the mapaccess
functions when the value type is small enough and have the compiler insert
explicit zeroing for larger value types, but I tried and failed to do this).

This removes all uses of the zero field of the type data; the field itself can
be removed in a separate change.

Fixes #11491

Change-Id: I5b81752ff4067d74a5a281c41e88f151bae0171e
Reviewed-on: https://go-review.googlesource.com/13784Reviewed-by: default avatarKeith Randall <khr@golang.org>
Run-TryBot: Ian Lance Taylor <iant@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
parent 79a3d239
...@@ -685,8 +685,6 @@ var nodfp *Node ...@@ -685,8 +685,6 @@ var nodfp *Node
var Disable_checknil int var Disable_checknil int
var zerosize int64
type Flow struct { type Flow struct {
Prog *obj.Prog // actual instruction Prog *obj.Prog // actual instruction
P1 *Flow // predecessors of this instruction: p1, P1 *Flow // predecessors of this instruction: p1,
......
...@@ -89,9 +89,6 @@ func dumpobj() { ...@@ -89,9 +89,6 @@ func dumpobj() {
dumpglobls() dumpglobls()
externdcl = tmp externdcl = tmp
zero := Pkglookup("zerovalue", Runtimepkg)
ggloblsym(zero, int32(zerosize), obj.DUPOK|obj.RODATA)
dumpdata() dumpdata()
obj.Writeobjdirect(Ctxt, bout) obj.Writeobjdirect(Ctxt, bout)
......
...@@ -794,13 +794,6 @@ func dcommontype(s *Sym, ot int, t *Type) int { ...@@ -794,13 +794,6 @@ func dcommontype(s *Sym, ot int, t *Type) int {
sptr = weaktypesym(tptr) sptr = weaktypesym(tptr)
} }
// All (non-reflect-allocated) Types share the same zero object.
// Each place in the compiler where a pointer to the zero object
// might be returned by a runtime call (map access return value,
// 2-arg type cast) declares the size of the zerovalue it needs.
// The linker magically takes the max of all the sizes.
zero := Pkglookup("zerovalue", Runtimepkg)
gcsym, useGCProg, ptrdata := dgcsym(t) gcsym, useGCProg, ptrdata := dgcsym(t)
// We use size 0 here so we get the pointer to the zero value, // We use size 0 here so we get the pointer to the zero value,
...@@ -876,7 +869,7 @@ func dcommontype(s *Sym, ot int, t *Type) int { ...@@ -876,7 +869,7 @@ func dcommontype(s *Sym, ot int, t *Type) int {
ot += Widthptr ot += Widthptr
ot = dsymptr(s, ot, sptr, 0) // ptrto type ot = dsymptr(s, ot, sptr, 0) // ptrto type
ot = dsymptr(s, ot, zero, 0) // ptr to zero value ot = duintptr(s, ot, 0) // ptr to zero value (unused)
return ot return ot
} }
......
...@@ -872,11 +872,6 @@ func walkexpr(np **Node, init **NodeList) { ...@@ -872,11 +872,6 @@ func walkexpr(np **Node, init **NodeList) {
typecheck(&n, Etop) typecheck(&n, Etop)
walkexpr(&n, init) walkexpr(&n, init)
// mapaccess needs a zero value to be at least this big.
if zerosize < t.Type.Width {
zerosize = t.Type.Width
}
// TODO: ptr is always non-nil, so disable nil check for this OIND op. // TODO: ptr is always non-nil, so disable nil check for this OIND op.
goto ret goto ret
...@@ -1285,10 +1280,6 @@ func walkexpr(np **Node, init **NodeList) { ...@@ -1285,10 +1280,6 @@ func walkexpr(np **Node, init **NodeList) {
n.Type = t.Type n.Type = t.Type
n.Typecheck = 1 n.Typecheck = 1
// mapaccess needs a zero value to be at least this big.
if zerosize < t.Type.Width {
zerosize = t.Type.Width
}
goto ret goto ret
case ORECV: case ORECV:
......
...@@ -257,7 +257,7 @@ type rtype struct { ...@@ -257,7 +257,7 @@ type rtype struct {
string *string // string form; unnecessary but undeniably useful string *string // string form; unnecessary but undeniably useful
*uncommonType // (relatively) uncommon fields *uncommonType // (relatively) uncommon fields
ptrToThis *rtype // type for pointer to this type, if used in binary or has methods ptrToThis *rtype // type for pointer to this type, if used in binary or has methods
zero unsafe.Pointer // pointer to zero value zero unsafe.Pointer // unused
} }
// a copy of runtime.typeAlg // a copy of runtime.typeAlg
......
...@@ -233,7 +233,7 @@ func makemap(t *maptype, hint int64, h *hmap, bucket unsafe.Pointer) *hmap { ...@@ -233,7 +233,7 @@ func makemap(t *maptype, hint int64, h *hmap, bucket unsafe.Pointer) *hmap {
throw("need padding in bucket (value)") throw("need padding in bucket (value)")
} }
// make sure zero of element type is available. // make sure zeroptr is large enough
mapzero(t.elem) mapzero(t.elem)
// find size parameter which will hold the requested # of elements // find size parameter which will hold the requested # of elements
...@@ -277,7 +277,7 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { ...@@ -277,7 +277,7 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
raceReadObjectPC(t.key, key, callerpc, pc) raceReadObjectPC(t.key, key, callerpc, pc)
} }
if h == nil || h.count == 0 { if h == nil || h.count == 0 {
return unsafe.Pointer(t.elem.zero) return atomicloadp(unsafe.Pointer(&zeroptr))
} }
alg := t.key.alg alg := t.key.alg
hash := alg.hash(key, uintptr(h.hash0)) hash := alg.hash(key, uintptr(h.hash0))
...@@ -312,7 +312,7 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { ...@@ -312,7 +312,7 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
} }
b = b.overflow(t) b = b.overflow(t)
if b == nil { if b == nil {
return unsafe.Pointer(t.elem.zero) return atomicloadp(unsafe.Pointer(&zeroptr))
} }
} }
} }
...@@ -325,7 +325,7 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) ...@@ -325,7 +325,7 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool)
raceReadObjectPC(t.key, key, callerpc, pc) raceReadObjectPC(t.key, key, callerpc, pc)
} }
if h == nil || h.count == 0 { if h == nil || h.count == 0 {
return unsafe.Pointer(t.elem.zero), false return atomicloadp(unsafe.Pointer(&zeroptr)), false
} }
alg := t.key.alg alg := t.key.alg
hash := alg.hash(key, uintptr(h.hash0)) hash := alg.hash(key, uintptr(h.hash0))
...@@ -360,7 +360,7 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) ...@@ -360,7 +360,7 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool)
} }
b = b.overflow(t) b = b.overflow(t)
if b == nil { if b == nil {
return unsafe.Pointer(t.elem.zero), false return atomicloadp(unsafe.Pointer(&zeroptr)), false
} }
} }
} }
...@@ -994,59 +994,39 @@ func reflect_ismapkey(t *_type) bool { ...@@ -994,59 +994,39 @@ func reflect_ismapkey(t *_type) bool {
return ismapkey(t) return ismapkey(t)
} }
var zerobuf struct { var zerolock mutex
lock mutex
p *byte
size uintptr
}
var zerotiny [1024]byte const initialZeroSize = 1024
// mapzero ensures that t.zero points at a zero value for type t. var zeroinitial [initialZeroSize]byte
// Types known to the compiler are in read-only memory and all point
// to a single zero in the bss of a large enough size.
// Types allocated by package reflect are in writable memory and
// start out with zero set to nil; we initialize those on demand.
func mapzero(t *_type) {
// On ARM, atomicloadp is implemented as xadd(p, 0),
// so we cannot use atomicloadp on read-only memory.
// Check whether the pointer is in the heap; if not, it's not writable
// so the zero value must already be set.
if GOARCH == "arm" && !inheap(uintptr(unsafe.Pointer(t))) {
if t.zero == nil {
print("runtime: map element ", *t._string, " missing zero value\n")
throw("mapzero")
}
return
}
// Already done? // All accesses to zeroptr and zerosize must be atomic so that they
// Check without lock, so must use atomicload to sync with atomicstore in allocation case below. // can be accessed without locks in the common case.
if atomicloadp(unsafe.Pointer(&t.zero)) != nil { var zeroptr unsafe.Pointer = unsafe.Pointer(&zeroinitial)
return var zerosize uintptr = initialZeroSize
}
// Small enough for static buffer? // mapzero ensures that zeroptr points to a buffer large enough to
if t.size <= uintptr(len(zerotiny)) { // serve as the zero value for t.
atomicstorep(unsafe.Pointer(&t.zero), unsafe.Pointer(&zerotiny[0])) func mapzero(t *_type) {
// Is the type small enough for existing buffer?
cursize := uintptr(atomicloadp(unsafe.Pointer(&zerosize)))
if t.size <= cursize {
return return
} }
// Use allocated buffer. // Allocate a new buffer.
lock(&zerobuf.lock) lock(&zerolock)
if zerobuf.size < t.size { cursize = uintptr(atomicloadp(unsafe.Pointer(&zerosize)))
if zerobuf.size == 0 { if cursize < t.size {
zerobuf.size = 4 * 1024 for cursize < t.size {
} cursize *= 2
for zerobuf.size < t.size { if cursize == 0 {
zerobuf.size *= 2
if zerobuf.size == 0 {
// need >2GB zero on 32-bit machine // need >2GB zero on 32-bit machine
throw("map element too large") throw("map element too large")
} }
} }
zerobuf.p = (*byte)(persistentalloc(zerobuf.size, 64, &memstats.other_sys)) atomicstorep1(unsafe.Pointer(&zeroptr), persistentalloc(cursize, 64, &memstats.other_sys))
atomicstorep1(unsafe.Pointer(&zerosize), unsafe.Pointer(zerosize))
} }
atomicstorep(unsafe.Pointer(&t.zero), unsafe.Pointer(zerobuf.p)) unlock(&zerolock)
unlock(&zerobuf.lock)
} }
...@@ -14,7 +14,7 @@ func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer { ...@@ -14,7 +14,7 @@ func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast32)) racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast32))
} }
if h == nil || h.count == 0 { if h == nil || h.count == 0 {
return unsafe.Pointer(t.elem.zero) return atomicloadp(unsafe.Pointer(&zeroptr))
} }
var b *bmap var b *bmap
if h.B == 0 { if h.B == 0 {
...@@ -45,7 +45,7 @@ func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer { ...@@ -45,7 +45,7 @@ func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
} }
b = b.overflow(t) b = b.overflow(t)
if b == nil { if b == nil {
return unsafe.Pointer(t.elem.zero) return atomicloadp(unsafe.Pointer(&zeroptr))
} }
} }
} }
...@@ -56,7 +56,7 @@ func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) { ...@@ -56,7 +56,7 @@ func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast32)) racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast32))
} }
if h == nil || h.count == 0 { if h == nil || h.count == 0 {
return unsafe.Pointer(t.elem.zero), false return atomicloadp(unsafe.Pointer(&zeroptr)), false
} }
var b *bmap var b *bmap
if h.B == 0 { if h.B == 0 {
...@@ -87,7 +87,7 @@ func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) { ...@@ -87,7 +87,7 @@ func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
} }
b = b.overflow(t) b = b.overflow(t)
if b == nil { if b == nil {
return unsafe.Pointer(t.elem.zero), false return atomicloadp(unsafe.Pointer(&zeroptr)), false
} }
} }
} }
...@@ -98,7 +98,7 @@ func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer { ...@@ -98,7 +98,7 @@ func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast64)) racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast64))
} }
if h == nil || h.count == 0 { if h == nil || h.count == 0 {
return unsafe.Pointer(t.elem.zero) return atomicloadp(unsafe.Pointer(&zeroptr))
} }
var b *bmap var b *bmap
if h.B == 0 { if h.B == 0 {
...@@ -129,7 +129,7 @@ func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer { ...@@ -129,7 +129,7 @@ func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
} }
b = b.overflow(t) b = b.overflow(t)
if b == nil { if b == nil {
return unsafe.Pointer(t.elem.zero) return atomicloadp(unsafe.Pointer(&zeroptr))
} }
} }
} }
...@@ -140,7 +140,7 @@ func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) { ...@@ -140,7 +140,7 @@ func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast64)) racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast64))
} }
if h == nil || h.count == 0 { if h == nil || h.count == 0 {
return unsafe.Pointer(t.elem.zero), false return atomicloadp(unsafe.Pointer(&zeroptr)), false
} }
var b *bmap var b *bmap
if h.B == 0 { if h.B == 0 {
...@@ -171,7 +171,7 @@ func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) { ...@@ -171,7 +171,7 @@ func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
} }
b = b.overflow(t) b = b.overflow(t)
if b == nil { if b == nil {
return unsafe.Pointer(t.elem.zero), false return atomicloadp(unsafe.Pointer(&zeroptr)), false
} }
} }
} }
...@@ -182,7 +182,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer { ...@@ -182,7 +182,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_faststr)) racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_faststr))
} }
if h == nil || h.count == 0 { if h == nil || h.count == 0 {
return unsafe.Pointer(t.elem.zero) return atomicloadp(unsafe.Pointer(&zeroptr))
} }
key := (*stringStruct)(unsafe.Pointer(&ky)) key := (*stringStruct)(unsafe.Pointer(&ky))
if h.B == 0 { if h.B == 0 {
...@@ -203,7 +203,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer { ...@@ -203,7 +203,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)) return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize))
} }
} }
return unsafe.Pointer(t.elem.zero) return atomicloadp(unsafe.Pointer(&zeroptr))
} }
// long key, try not to do more comparisons than necessary // long key, try not to do more comparisons than necessary
keymaybe := uintptr(bucketCnt) keymaybe := uintptr(bucketCnt)
...@@ -241,7 +241,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer { ...@@ -241,7 +241,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+keymaybe*uintptr(t.valuesize)) return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+keymaybe*uintptr(t.valuesize))
} }
} }
return unsafe.Pointer(t.elem.zero) return atomicloadp(unsafe.Pointer(&zeroptr))
} }
dohash: dohash:
hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
...@@ -273,7 +273,7 @@ dohash: ...@@ -273,7 +273,7 @@ dohash:
} }
b = b.overflow(t) b = b.overflow(t)
if b == nil { if b == nil {
return unsafe.Pointer(t.elem.zero) return atomicloadp(unsafe.Pointer(&zeroptr))
} }
} }
} }
...@@ -284,7 +284,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) { ...@@ -284,7 +284,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_faststr)) racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_faststr))
} }
if h == nil || h.count == 0 { if h == nil || h.count == 0 {
return unsafe.Pointer(t.elem.zero), false return atomicloadp(unsafe.Pointer(&zeroptr)), false
} }
key := (*stringStruct)(unsafe.Pointer(&ky)) key := (*stringStruct)(unsafe.Pointer(&ky))
if h.B == 0 { if h.B == 0 {
...@@ -305,7 +305,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) { ...@@ -305,7 +305,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)), true return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)), true
} }
} }
return unsafe.Pointer(t.elem.zero), false return atomicloadp(unsafe.Pointer(&zeroptr)), false
} }
// long key, try not to do more comparisons than necessary // long key, try not to do more comparisons than necessary
keymaybe := uintptr(bucketCnt) keymaybe := uintptr(bucketCnt)
...@@ -341,7 +341,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) { ...@@ -341,7 +341,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+keymaybe*uintptr(t.valuesize)), true return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+keymaybe*uintptr(t.valuesize)), true
} }
} }
return unsafe.Pointer(t.elem.zero), false return atomicloadp(unsafe.Pointer(&zeroptr)), false
} }
dohash: dohash:
hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
...@@ -373,7 +373,7 @@ dohash: ...@@ -373,7 +373,7 @@ dohash:
} }
b = b.overflow(t) b = b.overflow(t)
if b == nil { if b == nil {
return unsafe.Pointer(t.elem.zero), false return atomicloadp(unsafe.Pointer(&zeroptr)), false
} }
} }
} }
...@@ -27,7 +27,7 @@ type _type struct { ...@@ -27,7 +27,7 @@ type _type struct {
_string *string _string *string
x *uncommontype x *uncommontype
ptrto *_type ptrto *_type
zero *byte // ptr to the zero value for this type zero *byte // unused
} }
type method struct { type method struct {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment