Commit 73cb9a1c authored by Josh Bleecher Snyder's avatar Josh Bleecher Snyder

all: refer to map elements as elements instead of values

The spec carefully and consistently uses "key" and "element"
as map terminology. The implementation, not so much.

This change attempts to make the implementation consistently
hew to the spec's terminology. Beyond consistency, this has
the advantage of avoid some confusion and naming collisions,
since v and value are very generic and commonly used terms.

I believe that I found all everything, but there are a lot of
non-obvious places for these to hide, and grepping for them is hard.
Hopefully this change changes enough of them that we will start using
elem going forward. Any remaining hidden cases can be removed ad hoc
as they are discovered.

The only externally-facing part of this change is in package reflect,
where there is a minor doc change and a function parameter name change.

Updates #27167

Change-Id: I2f2d78f16c360dc39007b9966d5c2046a29d3701
Reviewed-on: https://go-review.googlesource.com/c/go/+/174523
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarBrad Fitzpatrick <bradfitz@golang.org>
parent a8d0047e
......@@ -15,7 +15,7 @@ func typecheckrange(n *Node) {
// Typechecking order is important here:
// 0. first typecheck range expression (slice/map/chan),
// it is evaluated only once and so logically it is not part of the loop.
// 1. typcheck produced values,
// 1. typecheck produced values,
// this part can declare new vars and so it must be typechecked before body,
// because body can contain a closure that captures the vars.
// 2. decldepth++ to denote loop body.
......@@ -298,8 +298,8 @@ func walkrange(n *Node) *Node {
hit := prealloc[n]
th := hit.Type
n.Left = nil
keysym := th.Field(0).Sym // depends on layout of iterator struct. See reflect.go:hiter
valsym := th.Field(1).Sym // ditto
keysym := th.Field(0).Sym // depends on layout of iterator struct. See reflect.go:hiter
elemsym := th.Field(1).Sym // ditto
fn := syslook("mapiterinit")
......@@ -318,11 +318,11 @@ func walkrange(n *Node) *Node {
} else if v2 == nil {
body = []*Node{nod(OAS, v1, key)}
} else {
val := nodSym(ODOT, hit, valsym)
val = nod(ODEREF, val, nil)
elem := nodSym(ODOT, hit, elemsym)
elem = nod(ODEREF, elem, nil)
a := nod(OAS2, nil, nil)
a.List.Set2(v1, v2)
a.Rlist.Set2(key, val)
a.Rlist.Set2(key, elem)
body = []*Node{a}
}
......
......@@ -56,9 +56,9 @@ type Sig struct {
// program for it.
// Make sure this stays in sync with runtime/map.go.
const (
BUCKETSIZE = 8
MAXKEYSIZE = 128
MAXVALSIZE = 128
BUCKETSIZE = 8
MAXKEYSIZE = 128
MAXELEMSIZE = 128
)
func structfieldSize() int { return 3 * Widthptr } // Sizeof(runtime.structfield{})
......@@ -86,14 +86,14 @@ func bmap(t *types.Type) *types.Type {
bucket := types.New(TSTRUCT)
keytype := t.Key()
valtype := t.Elem()
elemtype := t.Elem()
dowidth(keytype)
dowidth(valtype)
dowidth(elemtype)
if keytype.Width > MAXKEYSIZE {
keytype = types.NewPtr(keytype)
}
if valtype.Width > MAXVALSIZE {
valtype = types.NewPtr(valtype)
if elemtype.Width > MAXELEMSIZE {
elemtype = types.NewPtr(elemtype)
}
field := make([]*types.Field, 0, 5)
......@@ -107,10 +107,10 @@ func bmap(t *types.Type) *types.Type {
keys := makefield("keys", arr)
field = append(field, keys)
arr = types.NewArray(valtype, BUCKETSIZE)
arr = types.NewArray(elemtype, BUCKETSIZE)
arr.SetNoalg(true)
values := makefield("values", arr)
field = append(field, values)
elems := makefield("elems", arr)
field = append(field, elems)
// Make sure the overflow pointer is the last memory in the struct,
// because the runtime assumes it can use size-ptrSize as the
......@@ -126,21 +126,21 @@ func bmap(t *types.Type) *types.Type {
// will end with no padding.
// On nacl/amd64p32, however, the max alignment is 64-bit,
// but the overflow pointer will add only a 32-bit field,
// so if the struct needs 64-bit padding (because a key or value does)
// so if the struct needs 64-bit padding (because a key or elem does)
// then it would end with an extra 32-bit padding field.
// Preempt that by emitting the padding here.
if int(valtype.Align) > Widthptr || int(keytype.Align) > Widthptr {
if int(elemtype.Align) > Widthptr || int(keytype.Align) > Widthptr {
field = append(field, makefield("pad", types.Types[TUINTPTR]))
}
// If keys and values have no pointers, the map implementation
// If keys and elems have no pointers, the map implementation
// can keep a list of overflow pointers on the side so that
// buckets can be marked as having no pointers.
// Arrange for the bucket to have no pointers by changing
// the type of the overflow field to uintptr in this case.
// See comment on hmap.overflow in runtime/map.go.
otyp := types.NewPtr(bucket)
if !types.Haspointers(valtype) && !types.Haspointers(keytype) {
if !types.Haspointers(elemtype) && !types.Haspointers(keytype) {
otyp = types.Types[TUINTPTR]
}
overflow := makefield("overflow", otyp)
......@@ -161,38 +161,38 @@ func bmap(t *types.Type) *types.Type {
if keytype.Align > BUCKETSIZE {
Fatalf("key align too big for %v", t)
}
if valtype.Align > BUCKETSIZE {
Fatalf("value align too big for %v", t)
if elemtype.Align > BUCKETSIZE {
Fatalf("elem align too big for %v", t)
}
if keytype.Width > MAXKEYSIZE {
Fatalf("key size to large for %v", t)
}
if valtype.Width > MAXVALSIZE {
Fatalf("value size to large for %v", t)
if elemtype.Width > MAXELEMSIZE {
Fatalf("elem size to large for %v", t)
}
if t.Key().Width > MAXKEYSIZE && !keytype.IsPtr() {
Fatalf("key indirect incorrect for %v", t)
}
if t.Elem().Width > MAXVALSIZE && !valtype.IsPtr() {
Fatalf("value indirect incorrect for %v", t)
if t.Elem().Width > MAXELEMSIZE && !elemtype.IsPtr() {
Fatalf("elem indirect incorrect for %v", t)
}
if keytype.Width%int64(keytype.Align) != 0 {
Fatalf("key size not a multiple of key align for %v", t)
}
if valtype.Width%int64(valtype.Align) != 0 {
Fatalf("value size not a multiple of value align for %v", t)
if elemtype.Width%int64(elemtype.Align) != 0 {
Fatalf("elem size not a multiple of elem align for %v", t)
}
if bucket.Align%keytype.Align != 0 {
Fatalf("bucket align not multiple of key align %v", t)
}
if bucket.Align%valtype.Align != 0 {
Fatalf("bucket align not multiple of value align %v", t)
if bucket.Align%elemtype.Align != 0 {
Fatalf("bucket align not multiple of elem align %v", t)
}
if keys.Offset%int64(keytype.Align) != 0 {
Fatalf("bad alignment of keys in bmap for %v", t)
}
if values.Offset%int64(valtype.Align) != 0 {
Fatalf("bad alignment of values in bmap for %v", t)
if elems.Offset%int64(elemtype.Align) != 0 {
Fatalf("bad alignment of elems in bmap for %v", t)
}
// Double-check that overflow field is final memory in struct,
......@@ -270,7 +270,7 @@ func hiter(t *types.Type) *types.Type {
// build a struct:
// type hiter struct {
// key *Key
// val *Value
// elem *Elem
// t unsafe.Pointer // *MapType
// h *hmap
// buckets *bmap
......@@ -287,8 +287,8 @@ func hiter(t *types.Type) *types.Type {
// }
// must match runtime/map.go:hiter.
fields := []*types.Field{
makefield("key", types.NewPtr(t.Key())), // Used in range.go for TMAP.
makefield("val", types.NewPtr(t.Elem())), // Used in range.go for TMAP.
makefield("key", types.NewPtr(t.Key())), // Used in range.go for TMAP.
makefield("elem", types.NewPtr(t.Elem())), // Used in range.go for TMAP.
makefield("t", types.Types[TUNSAFEPTR]),
makefield("h", types.NewPtr(hmap)),
makefield("buckets", types.NewPtr(bmap)),
......@@ -1284,7 +1284,7 @@ func dtypesym(t *types.Type) *obj.LSym {
ot = duint8(lsym, ot, uint8(t.Key().Width))
}
if t.Elem().Width > MAXVALSIZE {
if t.Elem().Width > MAXELEMSIZE {
ot = duint8(lsym, ot, uint8(Widthptr))
flags |= 2 // indirect value
} else {
......@@ -1894,7 +1894,7 @@ func (p *GCProg) emit(t *types.Type, offset int64) {
// size bytes of zeros.
func zeroaddr(size int64) *Node {
if size >= 1<<31 {
Fatalf("map value too big %d", size)
Fatalf("map elem too big %d", size)
}
if zerosize < size {
zerosize = size
......
......@@ -973,33 +973,33 @@ func maplit(n *Node, m *Node, init *Nodes) {
// build types [count]Tindex and [count]Tvalue
tk := types.NewArray(n.Type.Key(), int64(len(stat)))
tv := types.NewArray(n.Type.Elem(), int64(len(stat)))
te := types.NewArray(n.Type.Elem(), int64(len(stat)))
// TODO(josharian): suppress alg generation for these types?
dowidth(tk)
dowidth(tv)
dowidth(te)
// make and initialize static arrays
vstatk := staticname(tk)
vstatk.Name.SetReadonly(true)
vstatv := staticname(tv)
vstatv.Name.SetReadonly(true)
vstate := staticname(te)
vstate.Name.SetReadonly(true)
datak := nod(OARRAYLIT, nil, nil)
datav := nod(OARRAYLIT, nil, nil)
datae := nod(OARRAYLIT, nil, nil)
for _, r := range stat {
datak.List.Append(r.Left)
datav.List.Append(r.Right)
datae.List.Append(r.Right)
}
fixedlit(inInitFunction, initKindStatic, datak, vstatk, init)
fixedlit(inInitFunction, initKindStatic, datav, vstatv, init)
fixedlit(inInitFunction, initKindStatic, datae, vstate, init)
// loop adding structure elements to map
// for i = 0; i < len(vstatk); i++ {
// map[vstatk[i]] = vstatv[i]
// map[vstatk[i]] = vstate[i]
// }
i := temp(types.Types[TINT])
rhs := nod(OINDEX, vstatv, i)
rhs := nod(OINDEX, vstate, i)
rhs.SetBounded(true)
kidx := nod(OINDEX, vstatk, i)
......@@ -1035,28 +1035,28 @@ func addMapEntries(m *Node, dyn []*Node, init *Nodes) {
nerr := nerrors
// Build list of var[c] = expr.
// Use temporaries so that mapassign1 can have addressable key, val.
// Use temporaries so that mapassign1 can have addressable key, elem.
// TODO(josharian): avoid map key temporaries for mapfast_* assignments with literal keys.
key := temp(m.Type.Key())
val := temp(m.Type.Elem())
tmpkey := temp(m.Type.Key())
tmpelem := temp(m.Type.Elem())
for _, r := range dyn {
index, value := r.Left, r.Right
index, elem := r.Left, r.Right
setlineno(index)
a := nod(OAS, key, index)
a := nod(OAS, tmpkey, index)
a = typecheck(a, ctxStmt)
a = walkstmt(a)
init.Append(a)
setlineno(value)
a = nod(OAS, val, value)
setlineno(elem)
a = nod(OAS, tmpelem, elem)
a = typecheck(a, ctxStmt)
a = walkstmt(a)
init.Append(a)
setlineno(val)
a = nod(OAS, nod(OINDEX, m, key), val)
setlineno(tmpelem)
a = nod(OAS, nod(OINDEX, m, tmpkey), tmpelem)
a = typecheck(a, ctxStmt)
a = walkstmt(a)
init.Append(a)
......@@ -1066,10 +1066,10 @@ func addMapEntries(m *Node, dyn []*Node, init *Nodes) {
}
}
a := nod(OVARKILL, key, nil)
a := nod(OVARKILL, tmpkey, nil)
a = typecheck(a, ctxStmt)
init.Append(a)
a = nod(OVARKILL, val, nil)
a = nod(OVARKILL, tmpelem, nil)
a = typecheck(a, ctxStmt)
init.Append(a)
}
......
......@@ -1210,7 +1210,7 @@ opswitch:
// Allocate one bucket pointed to by hmap.buckets on stack if hint
// is not larger than BUCKETSIZE. In case hint is larger than
// BUCKETSIZE runtime.makemap will allocate the buckets on the heap.
// Maximum key and value size is 128 bytes, larger objects
// Maximum key and elem size is 128 bytes, larger objects
// are stored with an indirection. So max bucket size is 2048+eps.
if !Isconst(hint, CTINT) ||
hint.Val().U.(*Mpint).CmpInt64(BUCKETSIZE) <= 0 {
......@@ -2462,7 +2462,7 @@ var mapassign = mkmapnames("mapassign", "ptr")
var mapdelete = mkmapnames("mapdelete", "")
func mapfast(t *types.Type) int {
// Check runtime/map.go:maxValueSize before changing.
// Check runtime/map.go:maxElemSize before changing.
if t.Elem().Width > 128 {
return mapslow
}
......
......@@ -1237,7 +1237,7 @@ func (it *MapIter) Value() Value {
t := (*mapType)(unsafe.Pointer(it.m.typ))
vtype := t.elem
return copyVal(vtype, it.m.flag.ro()|flag(vtype.Kind()), mapitervalue(it.it))
return copyVal(vtype, it.m.flag.ro()|flag(vtype.Kind()), mapiterelem(it.it))
}
// Next advances the map iterator and reports whether there is another
......@@ -1635,13 +1635,13 @@ func (v Value) SetCap(n int) {
s.Cap = n
}
// SetMapIndex sets the value associated with key in the map v to val.
// SetMapIndex sets the element associated with key in the map v to elem.
// It panics if v's Kind is not Map.
// If val is the zero Value, SetMapIndex deletes the key from the map.
// If elem is the zero Value, SetMapIndex deletes the key from the map.
// Otherwise if v holds a nil map, SetMapIndex will panic.
// As in Go, key's value must be assignable to the map's key type,
// and val's value must be assignable to the map's value type.
func (v Value) SetMapIndex(key, val Value) {
// As in Go, key's elem must be assignable to the map's key type,
// and elem's value must be assignable to the map's elem type.
func (v Value) SetMapIndex(key, elem Value) {
v.mustBe(Map)
v.mustBeExported()
key.mustBeExported()
......@@ -1653,17 +1653,17 @@ func (v Value) SetMapIndex(key, val Value) {
} else {
k = unsafe.Pointer(&key.ptr)
}
if val.typ == nil {
if elem.typ == nil {
mapdelete(v.typ, v.pointer(), k)
return
}
val.mustBeExported()
val = val.assignTo("reflect.Value.SetMapIndex", tt.elem, nil)
elem.mustBeExported()
elem = elem.assignTo("reflect.Value.SetMapIndex", tt.elem, nil)
var e unsafe.Pointer
if val.flag&flagIndir != 0 {
e = val.ptr
if elem.flag&flagIndir != 0 {
e = elem.ptr
} else {
e = unsafe.Pointer(&val.ptr)
e = unsafe.Pointer(&elem.ptr)
}
mapassign(v.typ, v.pointer(), k, e)
}
......@@ -2708,7 +2708,7 @@ func mapiterinit(t *rtype, m unsafe.Pointer) unsafe.Pointer
func mapiterkey(it unsafe.Pointer) (key unsafe.Pointer)
//go:noescape
func mapitervalue(it unsafe.Pointer) (value unsafe.Pointer)
func mapiterelem(it unsafe.Pointer) (elem unsafe.Pointer)
//go:noescape
func mapiternext(it unsafe.Pointer)
......
This diff is collapsed.
......@@ -42,7 +42,7 @@ func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
for ; b != nil; b = b.overflow(t) {
for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.elemsize))
}
}
}
......@@ -82,7 +82,7 @@ func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
for ; b != nil; b = b.overflow(t) {
for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)), true
return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.elemsize)), true
}
}
}
......@@ -171,12 +171,12 @@ bucketloop:
h.count++
done:
val := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.valuesize))
elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.elemsize))
if h.flags&hashWriting == 0 {
throw("concurrent map writes")
}
h.flags &^= hashWriting
return val
return elem
}
func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
......@@ -261,12 +261,12 @@ bucketloop:
h.count++
done:
val := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.valuesize))
elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.elemsize))
if h.flags&hashWriting == 0 {
throw("concurrent map writes")
}
h.flags &^= hashWriting
return val
return elem
}
func mapdelete_fast32(t *maptype, h *hmap, key uint32) {
......@@ -302,11 +302,11 @@ search:
if t.key.ptrdata != 0 {
memclrHasPointers(k, t.key.size)
}
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.elemsize))
if t.elem.ptrdata != 0 {
memclrHasPointers(v, t.elem.size)
memclrHasPointers(e, t.elem.size)
} else {
memclrNoHeapPointers(v, t.elem.size)
memclrNoHeapPointers(e, t.elem.size)
}
b.tophash[i] = emptyOne
// If the bucket now ends in a bunch of emptyOne states,
......@@ -373,7 +373,7 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
x := &xy[0]
x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
x.k = add(unsafe.Pointer(x.b), dataOffset)
x.v = add(x.k, bucketCnt*4)
x.e = add(x.k, bucketCnt*4)
if !h.sameSizeGrow() {
// Only calculate y pointers if we're growing bigger.
......@@ -381,13 +381,13 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
y := &xy[1]
y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
y.k = add(unsafe.Pointer(y.b), dataOffset)
y.v = add(y.k, bucketCnt*4)
y.e = add(y.k, bucketCnt*4)
}
for ; b != nil; b = b.overflow(t) {
k := add(unsafe.Pointer(b), dataOffset)
v := add(k, bucketCnt*4)
for i := 0; i < bucketCnt; i, k, v = i+1, add(k, 4), add(v, uintptr(t.valuesize)) {
e := add(k, bucketCnt*4)
for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 4), add(e, uintptr(t.elemsize)) {
top := b.tophash[i]
if isEmpty(top) {
b.tophash[i] = evacuatedEmpty
......@@ -399,7 +399,7 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
var useY uint8
if !h.sameSizeGrow() {
// Compute hash to make our evacuation decision (whether we need
// to send this key/value to bucket x or bucket y).
// to send this key/elem to bucket x or bucket y).
hash := t.key.alg.hash(k, uintptr(h.hash0))
if hash&newbit != 0 {
useY = 1
......@@ -413,7 +413,7 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
dst.b = h.newoverflow(t, dst.b)
dst.i = 0
dst.k = add(unsafe.Pointer(dst.b), dataOffset)
dst.v = add(dst.k, bucketCnt*4)
dst.e = add(dst.k, bucketCnt*4)
}
dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
......@@ -425,17 +425,17 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
*(*uint32)(dst.k) = *(*uint32)(k)
}
typedmemmove(t.elem, dst.v, v)
typedmemmove(t.elem, dst.e, e)
dst.i++
// These updates might push these pointers past the end of the
// key or value arrays. That's ok, as we have the overflow pointer
// key or elem arrays. That's ok, as we have the overflow pointer
// at the end of the bucket to protect against pointing past the
// end of the bucket.
dst.k = add(dst.k, 4)
dst.v = add(dst.v, uintptr(t.valuesize))
dst.e = add(dst.e, uintptr(t.elemsize))
}
}
// Unlink the overflow buckets & clear key/value to help GC.
// Unlink the overflow buckets & clear key/elem to help GC.
if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
// Preserve b.tophash because the evacuation
......
......@@ -42,7 +42,7 @@ func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
for ; b != nil; b = b.overflow(t) {
for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize))
}
}
}
......@@ -82,7 +82,7 @@ func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
for ; b != nil; b = b.overflow(t) {
for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)), true
return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize)), true
}
}
}
......@@ -171,12 +171,12 @@ bucketloop:
h.count++
done:
val := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.valuesize))
elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.elemsize))
if h.flags&hashWriting == 0 {
throw("concurrent map writes")
}
h.flags &^= hashWriting
return val
return elem
}
func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
......@@ -261,12 +261,12 @@ bucketloop:
h.count++
done:
val := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.valuesize))
elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.elemsize))
if h.flags&hashWriting == 0 {
throw("concurrent map writes")
}
h.flags &^= hashWriting
return val
return elem
}
func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
......@@ -302,11 +302,11 @@ search:
if t.key.ptrdata != 0 {
memclrHasPointers(k, t.key.size)
}
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize))
if t.elem.ptrdata != 0 {
memclrHasPointers(v, t.elem.size)
memclrHasPointers(e, t.elem.size)
} else {
memclrNoHeapPointers(v, t.elem.size)
memclrNoHeapPointers(e, t.elem.size)
}
b.tophash[i] = emptyOne
// If the bucket now ends in a bunch of emptyOne states,
......@@ -373,7 +373,7 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
x := &xy[0]
x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
x.k = add(unsafe.Pointer(x.b), dataOffset)
x.v = add(x.k, bucketCnt*8)
x.e = add(x.k, bucketCnt*8)
if !h.sameSizeGrow() {
// Only calculate y pointers if we're growing bigger.
......@@ -381,13 +381,13 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
y := &xy[1]
y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
y.k = add(unsafe.Pointer(y.b), dataOffset)
y.v = add(y.k, bucketCnt*8)
y.e = add(y.k, bucketCnt*8)
}
for ; b != nil; b = b.overflow(t) {
k := add(unsafe.Pointer(b), dataOffset)
v := add(k, bucketCnt*8)
for i := 0; i < bucketCnt; i, k, v = i+1, add(k, 8), add(v, uintptr(t.valuesize)) {
e := add(k, bucketCnt*8)
for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 8), add(e, uintptr(t.elemsize)) {
top := b.tophash[i]
if isEmpty(top) {
b.tophash[i] = evacuatedEmpty
......@@ -399,7 +399,7 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
var useY uint8
if !h.sameSizeGrow() {
// Compute hash to make our evacuation decision (whether we need
// to send this key/value to bucket x or bucket y).
// to send this key/elem to bucket x or bucket y).
hash := t.key.alg.hash(k, uintptr(h.hash0))
if hash&newbit != 0 {
useY = 1
......@@ -413,7 +413,7 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
dst.b = h.newoverflow(t, dst.b)
dst.i = 0
dst.k = add(unsafe.Pointer(dst.b), dataOffset)
dst.v = add(dst.k, bucketCnt*8)
dst.e = add(dst.k, bucketCnt*8)
}
dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
......@@ -431,17 +431,17 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
*(*uint64)(dst.k) = *(*uint64)(k)
}
typedmemmove(t.elem, dst.v, v)
typedmemmove(t.elem, dst.e, e)
dst.i++
// These updates might push these pointers past the end of the
// key or value arrays. That's ok, as we have the overflow pointer
// key or elem arrays. That's ok, as we have the overflow pointer
// at the end of the bucket to protect against pointing past the
// end of the bucket.
dst.k = add(dst.k, 8)
dst.v = add(dst.v, uintptr(t.valuesize))
dst.e = add(dst.e, uintptr(t.elemsize))
}
}
// Unlink the overflow buckets & clear key/value to help GC.
// Unlink the overflow buckets & clear key/elem to help GC.
if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
// Preserve b.tophash because the evacuation
......
......@@ -35,7 +35,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
continue
}
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize))
}
}
return unsafe.Pointer(&zeroVal[0])
......@@ -51,7 +51,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
continue
}
if k.str == key.str {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize))
}
// check first 4 bytes
if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
......@@ -70,7 +70,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
if keymaybe != bucketCnt {
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
if memequal(k.str, key.str, uintptr(key.len)) {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize))
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.elemsize))
}
}
return unsafe.Pointer(&zeroVal[0])
......@@ -97,7 +97,7 @@ dohash:
continue
}
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize))
}
}
}
......@@ -130,7 +130,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
continue
}
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize)), true
}
}
return unsafe.Pointer(&zeroVal[0]), false
......@@ -146,7 +146,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
continue
}
if k.str == key.str {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize)), true
}
// check first 4 bytes
if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
......@@ -165,7 +165,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
if keymaybe != bucketCnt {
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
if memequal(k.str, key.str, uintptr(key.len)) {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize)), true
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.elemsize)), true
}
}
return unsafe.Pointer(&zeroVal[0]), false
......@@ -192,7 +192,7 @@ dohash:
continue
}
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize)), true
}
}
}
......@@ -286,12 +286,12 @@ bucketloop:
h.count++
done:
val := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*2*sys.PtrSize+inserti*uintptr(t.valuesize))
elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*2*sys.PtrSize+inserti*uintptr(t.elemsize))
if h.flags&hashWriting == 0 {
throw("concurrent map writes")
}
h.flags &^= hashWriting
return val
return elem
}
func mapdelete_faststr(t *maptype, h *hmap, ky string) {
......@@ -331,11 +331,11 @@ search:
}
// Clear key's pointer.
k.str = nil
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize))
if t.elem.ptrdata != 0 {
memclrHasPointers(v, t.elem.size)
memclrHasPointers(e, t.elem.size)
} else {
memclrNoHeapPointers(v, t.elem.size)
memclrNoHeapPointers(e, t.elem.size)
}
b.tophash[i] = emptyOne
// If the bucket now ends in a bunch of emptyOne states,
......@@ -402,7 +402,7 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
x := &xy[0]
x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
x.k = add(unsafe.Pointer(x.b), dataOffset)
x.v = add(x.k, bucketCnt*2*sys.PtrSize)
x.e = add(x.k, bucketCnt*2*sys.PtrSize)
if !h.sameSizeGrow() {
// Only calculate y pointers if we're growing bigger.
......@@ -410,13 +410,13 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
y := &xy[1]
y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
y.k = add(unsafe.Pointer(y.b), dataOffset)
y.v = add(y.k, bucketCnt*2*sys.PtrSize)
y.e = add(y.k, bucketCnt*2*sys.PtrSize)
}
for ; b != nil; b = b.overflow(t) {
k := add(unsafe.Pointer(b), dataOffset)
v := add(k, bucketCnt*2*sys.PtrSize)
for i := 0; i < bucketCnt; i, k, v = i+1, add(k, 2*sys.PtrSize), add(v, uintptr(t.valuesize)) {
e := add(k, bucketCnt*2*sys.PtrSize)
for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 2*sys.PtrSize), add(e, uintptr(t.elemsize)) {
top := b.tophash[i]
if isEmpty(top) {
b.tophash[i] = evacuatedEmpty
......@@ -428,7 +428,7 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
var useY uint8
if !h.sameSizeGrow() {
// Compute hash to make our evacuation decision (whether we need
// to send this key/value to bucket x or bucket y).
// to send this key/elem to bucket x or bucket y).
hash := t.key.alg.hash(k, uintptr(h.hash0))
if hash&newbit != 0 {
useY = 1
......@@ -442,25 +442,24 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
dst.b = h.newoverflow(t, dst.b)
dst.i = 0
dst.k = add(unsafe.Pointer(dst.b), dataOffset)
dst.v = add(dst.k, bucketCnt*2*sys.PtrSize)
dst.e = add(dst.k, bucketCnt*2*sys.PtrSize)
}
dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
// Copy key.
*(*string)(dst.k) = *(*string)(k)
typedmemmove(t.elem, dst.v, v)
typedmemmove(t.elem, dst.e, e)
dst.i++
// These updates might push these pointers past the end of the
// key or value arrays. That's ok, as we have the overflow pointer
// key or elem arrays. That's ok, as we have the overflow pointer
// at the end of the bucket to protect against pointing past the
// end of the bucket.
dst.k = add(dst.k, 2*sys.PtrSize)
dst.v = add(dst.v, uintptr(t.valuesize))
dst.e = add(dst.e, uintptr(t.elemsize))
}
}
// Unlink the overflow buckets & clear key/value to help GC.
// Unlink the overflow buckets & clear key/value to help GC.
// Unlink the overflow buckets & clear key/elem to help GC.
if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
// Preserve b.tophash because the evacuation
......
......@@ -363,7 +363,7 @@ type maptype struct {
elem *_type
bucket *_type // internal type representing a hash bucket
keysize uint8 // size of key slot
valuesize uint8 // size of value slot
elemsize uint8 // size of elem slot
bucketsize uint16 // size of bucket
flags uint32
}
......@@ -373,7 +373,7 @@ type maptype struct {
func (mt *maptype) indirectkey() bool { // store ptr to key instead of key itself
return mt.flags&1 != 0
}
func (mt *maptype) indirectvalue() bool { // store ptr to value instead of value itself
func (mt *maptype) indirectelem() bool { // store ptr to elem instead of elem itself
return mt.flags&2 != 0
}
func (mt *maptype) reflexivekey() bool { // true if k==k for all keys
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment