Commit db16de92 authored by Keith Randall's avatar Keith Randall Committed by Keith Randall

runtime: remove kindNoPointers

We already have the ptrdata field in a type, which encodes exactly
the same information that kindNoPointers does.

My problem with kindNoPointers is that it often leads to
double-negative code like:

   t.kind & kindNoPointers != 0

Much clearer is:

   t.ptrdata == 0

Update #27167

Change-Id: I92307d7f018a6bbe3daca4a4abb4225e359349b1
Reviewed-on: https://go-review.googlesource.com/c/go/+/169157
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarBrad Fitzpatrick <bradfitz@golang.org>
parent 50163233
......@@ -882,9 +882,6 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int {
ot = duint8(lsym, ot, t.Align) // fieldAlign
i = kinds[t.Etype]
if !types.Haspointers(t) {
i |= objabi.KindNoPointers
}
if isdirectiface(t) {
i |= objabi.KindDirectIface
}
......
......@@ -36,6 +36,5 @@ const (
KindUnsafePointer
KindDirectIface = 1 << 5
KindGCProg = 1 << 6
KindNoPointers = 1 << 7
KindMask = (1 << 5) - 1
)
......@@ -370,7 +370,6 @@ func (n name) pkgPath() string {
const (
kindDirectIface = 1 << 5
kindGCProg = 1 << 6 // Type.gc points to GC program
kindNoPointers = 1 << 7
kindMask = (1 << 5) - 1
)
......
......@@ -40,7 +40,7 @@ func FuncLayout(t Type, rcvr Type) (frametype Type, argSize, retOffset uintptr,
for i := uintptr(0); i < ft.ptrdata/ptrSize; i++ {
gc = append(gc, gcdata[i/8]>>(i%8)&1)
}
ptrs = ft.kind&kindNoPointers == 0
ptrs = ft.ptrdata != 0
return
}
......
......@@ -29,7 +29,7 @@ func Swapper(slice interface{}) func(i, j int) {
typ := v.Type().Elem().(*rtype)
size := typ.Size()
hasPtr := typ.kind&kindNoPointers == 0
hasPtr := typ.ptrdata != 0
// Some common & small cases, without using memmove:
if hasPtr {
......
......@@ -586,7 +586,6 @@ type Method struct {
const (
kindDirectIface = 1 << 5
kindGCProg = 1 << 6 // Type.gc points to GC program
kindNoPointers = 1 << 7
kindMask = (1 << 5) - 1
)
......@@ -782,7 +781,7 @@ func (t *rtype) FieldAlign() int { return int(t.fieldAlign) }
func (t *rtype) Kind() Kind { return Kind(t.kind & kindMask) }
func (t *rtype) pointers() bool { return t.kind&kindNoPointers == 0 }
func (t *rtype) pointers() bool { return t.ptrdata != 0 }
func (t *rtype) common() *rtype { return t }
......@@ -2156,13 +2155,6 @@ const (
)
func bucketOf(ktyp, etyp *rtype) *rtype {
// See comment on hmap.overflow in ../runtime/map.go.
var kind uint8
if ktyp.kind&kindNoPointers != 0 && etyp.kind&kindNoPointers != 0 &&
ktyp.size <= maxKeySize && etyp.size <= maxValSize {
kind = kindNoPointers
}
if ktyp.size > maxKeySize {
ktyp = PtrTo(ktyp).(*rtype)
}
......@@ -2189,12 +2181,12 @@ func bucketOf(ktyp, etyp *rtype) *rtype {
panic("reflect: bad size computation in MapOf")
}
if kind != kindNoPointers {
if ktyp.ptrdata != 0 || etyp.ptrdata != 0 {
nptr := (bucketSize*(1+ktyp.size+etyp.size) + ptrSize) / ptrSize
mask := make([]byte, (nptr+7)/8)
base := bucketSize / ptrSize
if ktyp.kind&kindNoPointers == 0 {
if ktyp.ptrdata != 0 {
if ktyp.kind&kindGCProg != 0 {
panic("reflect: unexpected GC program in MapOf")
}
......@@ -2210,7 +2202,7 @@ func bucketOf(ktyp, etyp *rtype) *rtype {
}
base += bucketSize * ktyp.size / ptrSize
if etyp.kind&kindNoPointers == 0 {
if etyp.ptrdata != 0 {
if etyp.kind&kindGCProg != 0 {
panic("reflect: unexpected GC program in MapOf")
}
......@@ -2241,7 +2233,7 @@ func bucketOf(ktyp, etyp *rtype) *rtype {
b := &rtype{
align: ptrSize,
size: size,
kind: kind,
kind: uint8(Struct),
ptrdata: ptrdata,
gcdata: gcdata,
}
......@@ -2349,7 +2341,6 @@ func StructOf(fields []StructField) Type {
repr = make([]byte, 0, 64)
fset = map[string]struct{}{} // fields' names
hasPtr = false // records whether at least one struct-field is a pointer
hasGCProg = false // records whether a struct-field type has a GCProg
)
......@@ -2370,9 +2361,6 @@ func StructOf(fields []StructField) Type {
if ft.kind&kindGCProg != 0 {
hasGCProg = true
}
if ft.pointers() {
hasPtr = true
}
// Update string and hash
name := f.name.name()
......@@ -2657,11 +2645,6 @@ func StructOf(fields []StructField) Type {
if len(methods) > 0 {
typ.tflag |= tflagUncommon
}
if !hasPtr {
typ.kind |= kindNoPointers
} else {
typ.kind &^= kindNoPointers
}
if hasGCProg {
lastPtrField := 0
......@@ -2869,11 +2852,9 @@ func ArrayOf(count int, elem Type) Type {
array.len = uintptr(count)
array.slice = SliceOf(elem).(*rtype)
array.kind &^= kindNoPointers
switch {
case typ.kind&kindNoPointers != 0 || array.size == 0:
case typ.ptrdata == 0 || array.size == 0:
// No pointers.
array.kind |= kindNoPointers
array.gcdata = nil
array.ptrdata = 0
......@@ -3087,8 +3068,6 @@ func funcLayout(t *funcType, rcvr *rtype) (frametype *rtype, argSize, retOffset
}
if ptrmap.n > 0 {
x.gcdata = &ptrmap.data[0]
} else {
x.kind |= kindNoPointers
}
var s string
......@@ -3135,7 +3114,7 @@ func (bv *bitVector) append(bit uint8) {
}
func addTypeBits(bv *bitVector, offset uintptr, t *rtype) {
if t.kind&kindNoPointers != 0 {
if t.ptrdata == 0 {
return
}
......
......@@ -460,7 +460,7 @@ const cgoResultFail = "cgo result has Go pointer"
// depending on indir. The top parameter is whether we are at the top
// level, where Go pointers are allowed.
func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) {
if t.kind&kindNoPointers != 0 {
if t.ptrdata == 0 {
// If the type has no pointers there is nothing to do.
return
}
......@@ -523,7 +523,7 @@ func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) {
if !top {
panic(errorString(msg))
}
if st.elem.kind&kindNoPointers != 0 {
if st.elem.ptrdata == 0 {
return
}
for i := 0; i < s.cap; i++ {
......
......@@ -64,7 +64,7 @@ func cgoCheckWriteBarrier(dst *uintptr, src uintptr) {
//go:nosplit
//go:nowritebarrier
func cgoCheckMemmove(typ *_type, dst, src unsafe.Pointer, off, size uintptr) {
if typ.kind&kindNoPointers != 0 {
if typ.ptrdata == 0 {
return
}
if !cgoIsGoPointer(src) {
......@@ -83,7 +83,7 @@ func cgoCheckMemmove(typ *_type, dst, src unsafe.Pointer, off, size uintptr) {
//go:nosplit
//go:nowritebarrier
func cgoCheckSliceCopy(typ *_type, dst, src slice, n int) {
if typ.kind&kindNoPointers != 0 {
if typ.ptrdata == 0 {
return
}
if !cgoIsGoPointer(src.array) {
......@@ -203,7 +203,7 @@ func cgoCheckBits(src unsafe.Pointer, gcbits *byte, off, size uintptr) {
//go:nowritebarrier
//go:systemstack
func cgoCheckUsingType(typ *_type, src unsafe.Pointer, off, size uintptr) {
if typ.kind&kindNoPointers != 0 {
if typ.ptrdata == 0 {
return
}
......
......@@ -95,7 +95,7 @@ func makechan(t *chantype, size int) *hchan {
c = (*hchan)(mallocgc(hchanSize, nil, true))
// Race detector uses this location for synchronization.
c.buf = c.raceaddr()
case elem.kind&kindNoPointers != 0:
case elem.ptrdata == 0:
// Elements do not contain pointers.
// Allocate hchan and buf in one call.
c = (*hchan)(mallocgc(hchanSize+mem, nil, true))
......
......@@ -195,7 +195,7 @@ func dumptype(t *_type) {
dwritebyte('.')
dwrite(name.str, uintptr(name.len))
}
dumpbool(t.kind&kindDirectIface == 0 || t.kind&kindNoPointers == 0)
dumpbool(t.kind&kindDirectIface == 0 || t.ptrdata != 0)
}
// dump an object
......
......@@ -858,7 +858,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
dataSize := size
c := gomcache()
var x unsafe.Pointer
noscan := typ == nil || typ.kind&kindNoPointers != 0
noscan := typ == nil || typ.ptrdata == 0
if size <= maxSmallSize {
if noscan && size < maxTinySize {
// Tiny allocator.
......
......@@ -264,7 +264,7 @@ func (h *hmap) newoverflow(t *maptype, b *bmap) *bmap {
ovf = (*bmap)(newobject(t.bucket))
}
h.incrnoverflow()
if t.bucket.kind&kindNoPointers != 0 {
if t.bucket.ptrdata == 0 {
h.createOverflow()
*h.extra.overflow = append(*h.extra.overflow, ovf)
}
......@@ -368,7 +368,7 @@ func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets un
// but may not be empty.
buckets = dirtyalloc
size := t.bucket.size * nbuckets
if t.bucket.kind&kindNoPointers == 0 {
if t.bucket.ptrdata != 0 {
memclrHasPointers(buckets, size)
} else {
memclrNoHeapPointers(buckets, size)
......@@ -742,13 +742,13 @@ search:
// Only clear key if there are pointers in it.
if t.indirectkey() {
*(*unsafe.Pointer)(k) = nil
} else if t.key.kind&kindNoPointers == 0 {
} else if t.key.ptrdata != 0 {
memclrHasPointers(k, t.key.size)
}
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
if t.indirectvalue() {
*(*unsafe.Pointer)(v) = nil
} else if t.elem.kind&kindNoPointers == 0 {
} else if t.elem.ptrdata != 0 {
memclrHasPointers(v, t.elem.size)
} else {
memclrNoHeapPointers(v, t.elem.size)
......@@ -820,7 +820,7 @@ func mapiterinit(t *maptype, h *hmap, it *hiter) {
// grab snapshot of bucket state
it.B = h.B
it.buckets = h.buckets
if t.bucket.kind&kindNoPointers != 0 {
if t.bucket.ptrdata == 0 {
// Allocate the current slice and remember pointers to both current and old.
// This preserves all relevant overflow buckets alive even if
// the table grows and/or overflow buckets are added to the table
......@@ -1232,7 +1232,7 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
}
}
// Unlink the overflow buckets & clear key/value to help GC.
if h.flags&oldIterator == 0 && t.bucket.kind&kindNoPointers == 0 {
if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
// Preserve b.tophash because the evacuation
// state is maintained there.
......
......@@ -299,11 +299,11 @@ search:
continue
}
// Only clear key if there are pointers in it.
if t.key.kind&kindNoPointers == 0 {
if t.key.ptrdata != 0 {
memclrHasPointers(k, t.key.size)
}
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
if t.elem.kind&kindNoPointers == 0 {
if t.elem.ptrdata != 0 {
memclrHasPointers(v, t.elem.size)
} else {
memclrNoHeapPointers(v, t.elem.size)
......@@ -418,7 +418,7 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
// Copy key.
if sys.PtrSize == 4 && t.key.kind&kindNoPointers == 0 && writeBarrier.enabled {
if sys.PtrSize == 4 && t.key.ptrdata != 0 && writeBarrier.enabled {
// Write with a write barrier.
*(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
} else {
......@@ -436,7 +436,7 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
}
}
// Unlink the overflow buckets & clear key/value to help GC.
if h.flags&oldIterator == 0 && t.bucket.kind&kindNoPointers == 0 {
if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
// Preserve b.tophash because the evacuation
// state is maintained there.
......
......@@ -299,11 +299,11 @@ search:
continue
}
// Only clear key if there are pointers in it.
if t.key.kind&kindNoPointers == 0 {
if t.key.ptrdata != 0 {
memclrHasPointers(k, t.key.size)
}
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
if t.elem.kind&kindNoPointers == 0 {
if t.elem.ptrdata != 0 {
memclrHasPointers(v, t.elem.size)
} else {
memclrNoHeapPointers(v, t.elem.size)
......@@ -418,7 +418,7 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
// Copy key.
if t.key.kind&kindNoPointers == 0 && writeBarrier.enabled {
if t.key.ptrdata != 0 && writeBarrier.enabled {
if sys.PtrSize == 8 {
// Write with a write barrier.
*(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
......@@ -442,7 +442,7 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
}
}
// Unlink the overflow buckets & clear key/value to help GC.
if h.flags&oldIterator == 0 && t.bucket.kind&kindNoPointers == 0 {
if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
// Preserve b.tophash because the evacuation
// state is maintained there.
......
......@@ -332,7 +332,7 @@ search:
// Clear key's pointer.
k.str = nil
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
if t.elem.kind&kindNoPointers == 0 {
if t.elem.ptrdata != 0 {
memclrHasPointers(v, t.elem.size)
} else {
memclrNoHeapPointers(v, t.elem.size)
......@@ -461,7 +461,7 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
}
// Unlink the overflow buckets & clear key/value to help GC.
// Unlink the overflow buckets & clear key/value to help GC.
if h.flags&oldIterator == 0 && t.bucket.kind&kindNoPointers == 0 {
if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
// Preserve b.tophash because the evacuation
// state is maintained there.
......
......@@ -157,7 +157,7 @@ func typedmemmove(typ *_type, dst, src unsafe.Pointer) {
if dst == src {
return
}
if typ.kind&kindNoPointers == 0 {
if typ.ptrdata != 0 {
bulkBarrierPreWrite(uintptr(dst), uintptr(src), typ.size)
}
// There's a race here: if some other goroutine can write to
......@@ -195,7 +195,7 @@ func reflectlite_typedmemmove(typ *_type, dst, src unsafe.Pointer) {
// dst and src point off bytes into the value and only copies size bytes.
//go:linkname reflect_typedmemmovepartial reflect.typedmemmovepartial
func reflect_typedmemmovepartial(typ *_type, dst, src unsafe.Pointer, off, size uintptr) {
if writeBarrier.needed && typ.kind&kindNoPointers == 0 && size >= sys.PtrSize {
if writeBarrier.needed && typ.ptrdata != 0 && size >= sys.PtrSize {
// Pointer-align start address for bulk barrier.
adst, asrc, asize := dst, src, size
if frag := -off & (sys.PtrSize - 1); frag != 0 {
......@@ -223,7 +223,7 @@ func reflect_typedmemmovepartial(typ *_type, dst, src unsafe.Pointer, off, size
//
//go:nosplit
func reflectcallmove(typ *_type, dst, src unsafe.Pointer, size uintptr) {
if writeBarrier.needed && typ != nil && typ.kind&kindNoPointers == 0 && size >= sys.PtrSize {
if writeBarrier.needed && typ != nil && typ.ptrdata != 0 && size >= sys.PtrSize {
bulkBarrierPreWrite(uintptr(dst), uintptr(src), size)
}
memmove(dst, src, size)
......@@ -264,7 +264,7 @@ func typedslicecopy(typ *_type, dst, src slice) int {
return n
}
// Note: No point in checking typ.kind&kindNoPointers here:
// Note: No point in checking typ.ptrdata here:
// compiler only emits calls to typedslicecopy for types with pointers,
// and growslice and reflect_typedslicecopy check for pointers
// before calling typedslicecopy.
......@@ -280,7 +280,7 @@ func typedslicecopy(typ *_type, dst, src slice) int {
//go:linkname reflect_typedslicecopy reflect.typedslicecopy
func reflect_typedslicecopy(elemType *_type, dst, src slice) int {
if elemType.kind&kindNoPointers != 0 {
if elemType.ptrdata == 0 {
n := dst.len
if n > src.len {
n = src.len
......@@ -317,7 +317,7 @@ func reflect_typedslicecopy(elemType *_type, dst, src slice) int {
//
//go:nosplit
func typedmemclr(typ *_type, ptr unsafe.Pointer) {
if typ.kind&kindNoPointers == 0 {
if typ.ptrdata != 0 {
bulkBarrierPreWrite(uintptr(ptr), 0, typ.size)
}
memclrNoHeapPointers(ptr, typ.size)
......@@ -330,7 +330,7 @@ func reflect_typedmemclr(typ *_type, ptr unsafe.Pointer) {
//go:linkname reflect_typedmemclrpartial reflect.typedmemclrpartial
func reflect_typedmemclrpartial(typ *_type, ptr unsafe.Pointer, off, size uintptr) {
if typ.kind&kindNoPointers == 0 {
if typ.ptrdata != 0 {
bulkBarrierPreWrite(uintptr(ptr), 0, size)
}
memclrNoHeapPointers(ptr, size)
......@@ -338,7 +338,7 @@ func reflect_typedmemclrpartial(typ *_type, ptr unsafe.Pointer, off, size uintpt
// memclrHasPointers clears n bytes of typed memory starting at ptr.
// The caller must ensure that the type of the object at ptr has
// pointers, usually by checking typ.kind&kindNoPointers. However, ptr
// pointers, usually by checking typ.ptrdata. However, ptr
// does not have to point to the start of the allocation.
//
//go:nosplit
......
......@@ -581,7 +581,7 @@ func (h heapBits) setCheckmarked(size uintptr) {
// The pointer bitmap is not maintained for allocations containing
// no pointers at all; any caller of bulkBarrierPreWrite must first
// make sure the underlying allocation contains pointers, usually
// by checking typ.kind&kindNoPointers.
// by checking typ.ptrdata.
//
// Callers must perform cgo checks if writeBarrier.cgo.
//
......
......@@ -356,7 +356,7 @@ func SetFinalizer(obj interface{}, finalizer interface{}) {
if uintptr(e.data) != base {
// As an implementation detail we allow to set finalizers for an inner byte
// of an object if it could come from tiny alloc (see mallocgc for details).
if ot.elem == nil || ot.elem.kind&kindNoPointers == 0 || ot.elem.size >= maxTinySize {
if ot.elem == nil || ot.elem.ptrdata != 0 || ot.elem.size >= maxTinySize {
throw("runtime.SetFinalizer: pointer not at beginning of allocated block")
}
}
......
......@@ -171,7 +171,7 @@ func growslice(et *_type, old slice, cap int) slice {
}
var p unsafe.Pointer
if et.kind&kindNoPointers != 0 {
if et.ptrdata == 0 {
p = mallocgc(capmem, nil, false)
// The append() that calls growslice is going to overwrite from old.len to cap (which will be the new length).
// Only clear the part that will not be overwritten.
......
......@@ -34,7 +34,6 @@ const (
kindDirectIface = 1 << 5
kindGCProg = 1 << 6
kindNoPointers = 1 << 7
kindMask = (1 << 5) - 1
)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment