Commit 7d469179 authored by David Crawshaw's avatar David Crawshaw

cmd/compile, etc: store method tables as offsets

This CL introduces the typeOff type and a lookup method of the same
name that can turn a typeOff offset into an *rtype.

In a typical Go binary (built with buildmode=exe, pie, c-archive, or
c-shared), there is one moduledata and all typeOff values are offsets
relative to firstmoduledata.types. This makes computing the pointer
cheap in typical programs.

With buildmode=shared (and one day, buildmode=plugin) there are
multiple modules whose relative offset is determined at runtime.
We identify a type in the general case by the pair of the original
*rtype that references it and its typeOff value. We determine
the module from the original pointer, and then use the typeOff from
there to compute the final *rtype.

To ensure there is only one *rtype representing each type, the
runtime initializes a typemap for each module, using any identical
type from an earlier module when resolving that offset. This means
that types computed from an offset match the type mapped by the
pointer dynamic relocations.

A series of followup CLs will replace other *rtype values with typeOff
(and name/*string with nameOff).

For types created at runtime by reflect, type offsets are treated as
global IDs and reference into a reflect offset map kept by the runtime.

darwin/amd64:
	cmd/go:  -57KB (0.6%)
	jujud:  -557KB (0.8%)

linux/amd64 PIE:
	cmd/go: -361KB (3.0%)
	jujud:  -3.5MB (4.2%)

For #6853.

Change-Id: Icf096fd884a0a0cb9f280f46f7a26c70a9006c96
Reviewed-on: https://go-review.googlesource.com/21285Reviewed-by: default avatarIan Lance Taylor <iant@golang.org>
Run-TryBot: David Crawshaw <crawshaw@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
parent e0611b16
...@@ -75,7 +75,7 @@ func uncommonSize(t *Type) int { // Sizeof(runtime.uncommontype{}) ...@@ -75,7 +75,7 @@ func uncommonSize(t *Type) int { // Sizeof(runtime.uncommontype{})
if t.Sym == nil && len(methods(t)) == 0 { if t.Sym == nil && len(methods(t)) == 0 {
return 0 return 0
} }
return 2*Widthptr + 2*Widthint return 2 * Widthptr
} }
func makefield(name string, t *Type) *Field { func makefield(name string, t *Type) *Field {
...@@ -580,13 +580,23 @@ func dextratype(s *Sym, ot int, t *Type, dataAdd int) int { ...@@ -580,13 +580,23 @@ func dextratype(s *Sym, ot int, t *Type, dataAdd int) int {
ot = dgopkgpath(s, ot, typePkg(t)) ot = dgopkgpath(s, ot, typePkg(t))
// slice header dataAdd += Widthptr + 2 + 2
ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint+dataAdd) if Widthptr == 8 {
dataAdd += 4
n := len(m) }
ot = duintxx(s, ot, uint64(n), Widthint) mcount := len(m)
ot = duintxx(s, ot, uint64(n), Widthint) if mcount != int(uint16(mcount)) {
Fatalf("too many methods on %s: %d", t, mcount)
}
if dataAdd != int(uint16(dataAdd)) {
Fatalf("methods are too far away on %s: %d", t, dataAdd)
}
ot = duint16(s, ot, uint16(mcount))
ot = duint16(s, ot, uint16(dataAdd))
if Widthptr == 8 {
ot = duint32(s, ot, 0) // align for following pointers
}
return ot return ot
} }
...@@ -609,6 +619,7 @@ func typePkg(t *Type) *Pkg { ...@@ -609,6 +619,7 @@ func typePkg(t *Type) *Pkg {
// dextratypeData dumps the backing array for the []method field of // dextratypeData dumps the backing array for the []method field of
// runtime.uncommontype. // runtime.uncommontype.
func dextratypeData(s *Sym, ot int, t *Type) int { func dextratypeData(s *Sym, ot int, t *Type) int {
lsym := Linksym(s)
for _, a := range methods(t) { for _, a := range methods(t) {
// ../../../../runtime/type.go:/method // ../../../../runtime/type.go:/method
exported := exportname(a.name) exported := exportname(a.name)
...@@ -617,21 +628,24 @@ func dextratypeData(s *Sym, ot int, t *Type) int { ...@@ -617,21 +628,24 @@ func dextratypeData(s *Sym, ot int, t *Type) int {
pkg = a.pkg pkg = a.pkg
} }
ot = dname(s, ot, a.name, "", pkg, exported) ot = dname(s, ot, a.name, "", pkg, exported)
ot = dmethodptr(s, ot, dtypesym(a.mtype)) ot = dmethodptrOffLSym(lsym, ot, Linksym(dtypesym(a.mtype)))
ot = dmethodptr(s, ot, a.isym) ot = dmethodptrOffLSym(lsym, ot, Linksym(a.isym))
ot = dmethodptr(s, ot, a.tsym) ot = dmethodptrOffLSym(lsym, ot, Linksym(a.tsym))
if Widthptr == 8 {
ot = duintxxLSym(lsym, ot, 0, 4) // pad to reflect.method size
}
} }
return ot return ot
} }
func dmethodptr(s *Sym, off int, x *Sym) int { func dmethodptrOffLSym(s *obj.LSym, ot int, x *obj.LSym) int {
duintptr(s, off, 0) duintxxLSym(s, ot, 0, 4)
r := obj.Addrel(Linksym(s)) r := obj.Addrel(s)
r.Off = int32(off) r.Off = int32(ot)
r.Siz = uint8(Widthptr) r.Siz = 4
r.Sym = Linksym(x) r.Sym = x
r.Type = obj.R_METHOD r.Type = obj.R_METHODOFF
return off + Widthptr return ot + 4
} }
var kinds = []int{ var kinds = []int{
...@@ -1286,18 +1300,29 @@ ok: ...@@ -1286,18 +1300,29 @@ ok:
ggloblsym(s, int32(ot), int16(dupok|obj.RODATA)) ggloblsym(s, int32(ot), int16(dupok|obj.RODATA))
// generate typelink.foo pointing at s = type.foo. // generate typelink.foo pointing at s = type.foo.
//
// The linker will leave a table of all the typelinks for // The linker will leave a table of all the typelinks for
// types in the binary, so reflect can find them. // types in the binary, so the runtime can find them.
// We only need the link for unnamed composites that //
// we want be able to find. // When buildmode=shared, all types are in typelinks so the
if t.Sym == nil { // runtime can deduplicate type pointers.
keep := Ctxt.Flag_dynlink
if !keep && t.Sym == nil {
// For an unnamed type, we only need the link if the type can
// be created at run time by reflect.PtrTo and similar
// functions. If the type exists in the program, those
// functions must return the existing type structure rather
// than creating a new one.
switch t.Etype { switch t.Etype {
case TPTR32, TPTR64, TARRAY, TCHAN, TFUNC, TMAP, TSTRUCT: case TPTR32, TPTR64, TARRAY, TCHAN, TFUNC, TMAP, TSTRUCT:
slink := typelinkLSym(t) keep = true
dsymptrOffLSym(slink, 0, Linksym(s), 0)
ggloblLSym(slink, 4, int16(dupok|obj.RODATA))
} }
} }
if keep {
slink := typelinkLSym(t)
dsymptrOffLSym(slink, 0, Linksym(s), 0)
ggloblLSym(slink, 4, int16(dupok|obj.RODATA))
}
return s return s
} }
......
...@@ -457,8 +457,8 @@ const ( ...@@ -457,8 +457,8 @@ const (
// R_ADDRMIPS (only used on mips64) resolves to a 32-bit external address, // R_ADDRMIPS (only used on mips64) resolves to a 32-bit external address,
// by loading the address into a register with two instructions (lui, ori). // by loading the address into a register with two instructions (lui, ori).
R_ADDRMIPS R_ADDRMIPS
// R_ADDROFF resolves to an offset from the beginning of the section holding // R_ADDROFF resolves to a 32-bit offset from the beginning of the section
// the data being relocated to the referenced symbol. // holding the data being relocated to the referenced symbol.
R_ADDROFF R_ADDROFF
R_SIZE R_SIZE
R_CALL R_CALL
...@@ -492,11 +492,12 @@ const ( ...@@ -492,11 +492,12 @@ const (
// should be linked into the final binary, even if there are no other // should be linked into the final binary, even if there are no other
// direct references. (This is used for types reachable by reflection.) // direct references. (This is used for types reachable by reflection.)
R_USETYPE R_USETYPE
// R_METHOD resolves to an *rtype for a method. // R_METHODOFF resolves to a 32-bit offset from the beginning of the section
// It is used when linking from the uncommonType of another *rtype, and // holding the data being relocated to the referenced symbol.
// may be set to zero by the linker if it determines the method text is // It is a variant of R_ADDROFF used when linking from the uncommonType of a
// unreachable by the linked program. // *rtype, and may be set to zero by the linker if it determines the method
R_METHOD // text is unreachable by the linked program.
R_METHODOFF
R_POWER_TOC R_POWER_TOC
R_GOTPCREL R_GOTPCREL
// R_JMPMIPS (only used on mips64) resolves to non-PC-relative target address // R_JMPMIPS (only used on mips64) resolves to non-PC-relative target address
......
...@@ -19,7 +19,7 @@ import ( ...@@ -19,7 +19,7 @@ import (
// //
// This flood fill is wrapped in logic for pruning unused methods. // This flood fill is wrapped in logic for pruning unused methods.
// All methods are mentioned by relocations on their receiver's *rtype. // All methods are mentioned by relocations on their receiver's *rtype.
// These relocations are specially defined as R_METHOD by the compiler // These relocations are specially defined as R_METHODOFF by the compiler
// so we can detect and manipulated them here. // so we can detect and manipulated them here.
// //
// There are three ways a method of a reachable type can be invoked: // There are three ways a method of a reachable type can be invoked:
...@@ -100,7 +100,7 @@ func deadcode(ctxt *Link) { ...@@ -100,7 +100,7 @@ func deadcode(ctxt *Link) {
d.flood() d.flood()
} }
// Remove all remaining unreached R_METHOD relocations. // Remove all remaining unreached R_METHODOFF relocations.
for _, m := range d.markableMethods { for _, m := range d.markableMethods {
for _, r := range m.r { for _, r := range m.r {
d.cleanupReloc(r) d.cleanupReloc(r)
...@@ -167,7 +167,7 @@ var markextra = []string{ ...@@ -167,7 +167,7 @@ var markextra = []string{
type methodref struct { type methodref struct {
m methodsig m methodsig
src *LSym // receiver type symbol src *LSym // receiver type symbol
r [3]*Reloc // R_METHOD relocations to fields of runtime.method r [3]*Reloc // R_METHODOFF relocations to fields of runtime.method
} }
func (m methodref) ifn() *LSym { return m.r[1].Sym } func (m methodref) ifn() *LSym { return m.r[1].Sym }
...@@ -190,7 +190,7 @@ type deadcodepass struct { ...@@ -190,7 +190,7 @@ type deadcodepass struct {
func (d *deadcodepass) cleanupReloc(r *Reloc) { func (d *deadcodepass) cleanupReloc(r *Reloc) {
if r.Sym.Attr.Reachable() { if r.Sym.Attr.Reachable() {
r.Type = obj.R_ADDR r.Type = obj.R_ADDROFF
} else { } else {
if Debug['v'] > 1 { if Debug['v'] > 1 {
fmt.Fprintf(d.ctxt.Bso, "removing method %s\n", r.Sym.Name) fmt.Fprintf(d.ctxt.Bso, "removing method %s\n", r.Sym.Name)
...@@ -217,7 +217,7 @@ func (d *deadcodepass) mark(s, parent *LSym) { ...@@ -217,7 +217,7 @@ func (d *deadcodepass) mark(s, parent *LSym) {
func (d *deadcodepass) markMethod(m methodref) { func (d *deadcodepass) markMethod(m methodref) {
for _, r := range m.r { for _, r := range m.r {
d.mark(r.Sym, m.src) d.mark(r.Sym, m.src)
r.Type = obj.R_ADDR r.Type = obj.R_ADDROFF
} }
} }
...@@ -291,14 +291,14 @@ func (d *deadcodepass) flood() { ...@@ -291,14 +291,14 @@ func (d *deadcodepass) flood() {
} }
} }
mpos := 0 // 0-3, the R_METHOD relocs of runtime.uncommontype mpos := 0 // 0-3, the R_METHODOFF relocs of runtime.uncommontype
var methods []methodref var methods []methodref
for i := 0; i < len(s.R); i++ { for i := 0; i < len(s.R); i++ {
r := &s.R[i] r := &s.R[i]
if r.Sym == nil { if r.Sym == nil {
continue continue
} }
if r.Type != obj.R_METHOD { if r.Type != obj.R_METHODOFF {
d.mark(r.Sym, s) d.mark(r.Sym, s)
continue continue
} }
......
...@@ -47,9 +47,9 @@ func decode_inuxi(p []byte, sz int) uint64 { ...@@ -47,9 +47,9 @@ func decode_inuxi(p []byte, sz int) uint64 {
} }
} }
func commonsize() int { return 6*SysArch.PtrSize + 8 } // runtime._type func commonsize() int { return 6*SysArch.PtrSize + 8 } // runtime._type
func structfieldSize() int { return 3 * SysArch.PtrSize } // runtime.structfield func structfieldSize() int { return 3 * SysArch.PtrSize } // runtime.structfield
func uncommonSize() int { return 2*SysArch.PtrSize + 2*SysArch.IntSize } // runtime.uncommontype func uncommonSize() int { return 2 * SysArch.PtrSize } // runtime.uncommontype
// Type.commonType.kind // Type.commonType.kind
func decodetype_kind(s *LSym) uint8 { func decodetype_kind(s *LSym) uint8 {
...@@ -341,12 +341,14 @@ func decodetype_methods(s *LSym) []methodsig { ...@@ -341,12 +341,14 @@ func decodetype_methods(s *LSym) []methodsig {
// just Sizeof(rtype) // just Sizeof(rtype)
} }
numMethods := int(decode_inuxi(s.P[off+2*SysArch.PtrSize:], SysArch.IntSize)) mcount := int(decode_inuxi(s.P[off+SysArch.PtrSize:], 2))
r := decode_reloc(s, int32(off+SysArch.PtrSize)) moff := int(decode_inuxi(s.P[off+SysArch.PtrSize+2:], 2))
if r.Sym != s { off += moff // offset to array of reflect.method values
panic(fmt.Sprintf("method slice pointer in %s leads to a different symbol %s", s, r.Sym)) var sizeofMethod int // sizeof reflect.method in program
if SysArch.PtrSize == 4 {
sizeofMethod = 4 * SysArch.PtrSize
} else {
sizeofMethod = 3 * SysArch.PtrSize
} }
off = int(r.Add) // array of reflect.method values return decode_methodsig(s, off, sizeofMethod, mcount)
sizeofMethod := 4 * SysArch.PtrSize // sizeof reflect.method in program
return decode_methodsig(s, off, sizeofMethod, numMethods)
} }
...@@ -90,7 +90,7 @@ func FirstMethodNameBytes(t Type) *byte { ...@@ -90,7 +90,7 @@ func FirstMethodNameBytes(t Type) *byte {
if ut == nil { if ut == nil {
panic("type has no methods") panic("type has no methods")
} }
m := ut.methods[0] m := ut.methods()[0]
if *m.name.data(0)&(1<<2) == 0 { if *m.name.data(0)&(1<<2) == 0 {
panic("method name does not have pkgPath *string") panic("method name does not have pkgPath *string")
} }
......
...@@ -288,10 +288,10 @@ type typeAlg struct { ...@@ -288,10 +288,10 @@ type typeAlg struct {
// Method on non-interface type // Method on non-interface type
type method struct { type method struct {
name name // name of method name name // name of method
mtyp *rtype // method type (without receiver) mtyp typeOff // method type (without receiver)
ifn unsafe.Pointer // fn used in interface call (one-word receiver) ifn textOff // fn used in interface call (one-word receiver)
tfn unsafe.Pointer // fn used for normal method call tfn textOff // fn used for normal method call
} }
// uncommonType is present only for types with names or methods // uncommonType is present only for types with names or methods
...@@ -299,8 +299,9 @@ type method struct { ...@@ -299,8 +299,9 @@ type method struct {
// Using a pointer to this struct reduces the overall size required // Using a pointer to this struct reduces the overall size required
// to describe an unnamed type with no methods. // to describe an unnamed type with no methods.
type uncommonType struct { type uncommonType struct {
pkgPath *string // import path; nil for built-in types like int, string pkgPath *string // import path; nil for built-in types like int, string
methods []method // methods associated with type mcount uint16 // number of methods
moff uint16 // offset from this uncommontype to [mcount]method
} }
// ChanDir represents a channel type's direction. // ChanDir represents a channel type's direction.
...@@ -589,6 +590,10 @@ var kindNames = []string{ ...@@ -589,6 +590,10 @@ var kindNames = []string{
UnsafePointer: "unsafe.Pointer", UnsafePointer: "unsafe.Pointer",
} }
func (t *uncommonType) methods() []method {
return (*[1 << 16]method)(add(unsafe.Pointer(t), uintptr(t.moff)))[:t.mcount:t.mcount]
}
func (t *uncommonType) PkgPath() string { func (t *uncommonType) PkgPath() string {
if t == nil || t.pkgPath == nil { if t == nil || t.pkgPath == nil {
return "" return ""
...@@ -596,13 +601,55 @@ func (t *uncommonType) PkgPath() string { ...@@ -596,13 +601,55 @@ func (t *uncommonType) PkgPath() string {
return *t.pkgPath return *t.pkgPath
} }
// resolveTypeOff resolves an *rtype offset from a base type.
// The (*rtype).typeOff method is a convenience wrapper for this function.
// Implemented in the runtime package.
func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
// resolveTextOff resolves an function pointer offset from a base type.
// The (*rtype).textOff method is a convenience wrapper for this function.
// Implemented in the runtime package.
func resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
// addReflectOff adds a pointer to the reflection lookup map in the runtime.
// It returns a new ID that can be used as a typeOff or textOff, and will
// be resolved correctly. Implemented in the runtime package.
func addReflectOff(ptr unsafe.Pointer) int32
// resolveReflectType adds a *rtype to the reflection lookup map in the runtime.
// It returns a new typeOff that can be used to refer to the pointer.
func resolveReflectType(t *rtype) typeOff {
return typeOff(addReflectOff(unsafe.Pointer(t)))
}
// resolveReflectText adds a function pointer to the reflection lookup map in
// the runtime. It returns a new textOff that can be used to refer to the
// pointer.
func resolveReflectText(ptr unsafe.Pointer) textOff {
return textOff(addReflectOff(ptr))
}
type typeOff int32 // offset to an *rtype
type textOff int32 // offset from top of text section
func (t *rtype) typeOff(off typeOff) *rtype {
if off == 0 {
return nil
}
return (*rtype)(resolveTypeOff(unsafe.Pointer(t), int32(off)))
}
func (t *rtype) textOff(off textOff) unsafe.Pointer {
return resolveTextOff(unsafe.Pointer(t), int32(off))
}
func (t *rtype) uncommon() *uncommonType { func (t *rtype) uncommon() *uncommonType {
if t.tflag&tflagUncommon == 0 { if t.tflag&tflagUncommon == 0 {
return nil return nil
} }
switch t.Kind() { switch t.Kind() {
case Struct: case Struct:
return &(*structTypeWithMethods)(unsafe.Pointer(t)).u return &(*structTypeUncommon)(unsafe.Pointer(t)).u
case Ptr: case Ptr:
type u struct { type u struct {
ptrType ptrType
...@@ -688,7 +735,7 @@ func (t *rtype) NumMethod() int { ...@@ -688,7 +735,7 @@ func (t *rtype) NumMethod() int {
if ut == nil { if ut == nil {
return 0 return 0
} }
return len(ut.methods) return int(ut.mcount)
} }
func (t *rtype) Method(i int) (m Method) { func (t *rtype) Method(i int) (m Method) {
...@@ -698,10 +745,10 @@ func (t *rtype) Method(i int) (m Method) { ...@@ -698,10 +745,10 @@ func (t *rtype) Method(i int) (m Method) {
} }
ut := t.uncommon() ut := t.uncommon()
if ut == nil || i < 0 || i >= len(ut.methods) { if ut == nil || i < 0 || i >= int(ut.mcount) {
panic("reflect: Method index out of range") panic("reflect: Method index out of range")
} }
p := &ut.methods[i] p := ut.methods()[i]
m.Name = p.name.name() m.Name = p.name.name()
fl := flag(Func) fl := flag(Func)
if !p.name.isExported() { if !p.name.isExported() {
...@@ -712,8 +759,9 @@ func (t *rtype) Method(i int) (m Method) { ...@@ -712,8 +759,9 @@ func (t *rtype) Method(i int) (m Method) {
m.PkgPath = *pkgPath m.PkgPath = *pkgPath
fl |= flagStickyRO fl |= flagStickyRO
} }
if p.mtyp != nil { if p.mtyp != 0 {
ft := (*funcType)(unsafe.Pointer(p.mtyp)) mtyp := t.typeOff(p.mtyp)
ft := (*funcType)(unsafe.Pointer(mtyp))
in := make([]Type, 0, 1+len(ft.in())) in := make([]Type, 0, 1+len(ft.in()))
in = append(in, t) in = append(in, t)
for _, arg := range ft.in() { for _, arg := range ft.in() {
...@@ -723,9 +771,10 @@ func (t *rtype) Method(i int) (m Method) { ...@@ -723,9 +771,10 @@ func (t *rtype) Method(i int) (m Method) {
for _, ret := range ft.out() { for _, ret := range ft.out() {
out = append(out, ret) out = append(out, ret)
} }
mt := FuncOf(in, out, p.mtyp.IsVariadic()) mt := FuncOf(in, out, ft.IsVariadic())
m.Type = mt m.Type = mt
fn := unsafe.Pointer(&p.tfn) tfn := t.textOff(p.tfn)
fn := unsafe.Pointer(&tfn)
m.Func = Value{mt.(*rtype), fn, fl} m.Func = Value{mt.(*rtype), fn, fl}
} }
m.Index = i m.Index = i
...@@ -741,8 +790,9 @@ func (t *rtype) MethodByName(name string) (m Method, ok bool) { ...@@ -741,8 +790,9 @@ func (t *rtype) MethodByName(name string) (m Method, ok bool) {
if ut == nil { if ut == nil {
return Method{}, false return Method{}, false
} }
for i := range ut.methods { utmethods := ut.methods()
p := &ut.methods[i] for i := 0; i < int(ut.mcount); i++ {
p := utmethods[i]
if p.name.name() == name { if p.name.name() == name {
return t.Method(i), true return t.Method(i), true
} }
...@@ -1430,10 +1480,11 @@ func implements(T, V *rtype) bool { ...@@ -1430,10 +1480,11 @@ func implements(T, V *rtype) bool {
return false return false
} }
i := 0 i := 0
for j := 0; j < len(v.methods); j++ { vmethods := v.methods()
for j := 0; j < int(v.mcount); j++ {
tm := &t.methods[i] tm := &t.methods[i]
vm := &v.methods[j] vm := vmethods[j]
if vm.name.name() == tm.name.name() && vm.mtyp == tm.typ { if vm.name.name() == tm.name.name() && V.typeOff(vm.mtyp) == tm.typ {
if i++; i >= len(t.methods) { if i++; i >= len(t.methods) {
return true return true
} }
...@@ -2161,21 +2212,55 @@ func SliceOf(t Type) Type { ...@@ -2161,21 +2212,55 @@ func SliceOf(t Type) Type {
return cachePut(ckey, &slice.rtype) return cachePut(ckey, &slice.rtype)
} }
// structTypeWithMethods is a structType created at runtime with StructOf.
// It is needed to pin the []method slice from its associated uncommonType struct.
// Keep in sync with the memory layout of structType.
type structTypeWithMethods struct {
structType
u uncommonType
}
// The structLookupCache caches StructOf lookups. // The structLookupCache caches StructOf lookups.
// StructOf does not share the common lookupCache since we need to pin // StructOf does not share the common lookupCache since we need to pin
// the *structType and its associated *uncommonType (especially the // the memory associated with *structTypeFixedN.
// []method slice field of that uncommonType.)
var structLookupCache struct { var structLookupCache struct {
sync.RWMutex sync.RWMutex
m map[uint32][]*structTypeWithMethods // keyed by hash calculated in StructOf m map[uint32][]interface {
common() *rtype
} // keyed by hash calculated in StructOf
}
type structTypeUncommon struct {
structType
u uncommonType
}
// A *rtype representing a struct is followed directly in memory by an
// array of method objects representing the methods attached to the
// struct. To get the same layout for a run time generated type, we
// need an array directly following the uncommonType memory. The types
// structTypeFixed4, ...structTypeFixedN are used to do this.
//
// A similar strategy is used for funcTypeFixed4, ...funcTypeFixedN.
// TODO(crawshaw): as these structTypeFixedN and funcTypeFixedN structs
// have no methods, they could be defined at runtime using the StructOf
// function.
type structTypeFixed4 struct {
structType
u uncommonType
m [4]method
}
type structTypeFixed8 struct {
structType
u uncommonType
m [8]method
}
type structTypeFixed16 struct {
structType
u uncommonType
m [16]method
}
type structTypeFixed32 struct {
structType
u uncommonType
m [32]method
} }
// StructOf returns the struct type containing fields. // StructOf returns the struct type containing fields.
...@@ -2192,7 +2277,7 @@ func StructOf(fields []StructField) Type { ...@@ -2192,7 +2277,7 @@ func StructOf(fields []StructField) Type {
typalign uint8 typalign uint8
comparable = true comparable = true
hashable = true hashable = true
typ = new(structTypeWithMethods) methods []method
fs = make([]structField, len(fields)) fs = make([]structField, len(fields))
repr = make([]byte, 0, 64) repr = make([]byte, 0, 64)
...@@ -2269,7 +2354,6 @@ func StructOf(fields []StructField) Type { ...@@ -2269,7 +2354,6 @@ func StructOf(fields []StructField) Type {
} }
return recv.Field(ifield).Method(imethod).Call(args) return recv.Field(ifield).Method(imethod).Call(args)
}) })
} else { } else {
tfn = MakeFunc(m.typ, func(in []Value) []Value { tfn = MakeFunc(m.typ, func(in []Value) []Value {
var args []Value var args []Value
...@@ -2287,47 +2371,59 @@ func StructOf(fields []StructField) Type { ...@@ -2287,47 +2371,59 @@ func StructOf(fields []StructField) Type {
} }
return recv.Field(ifield).Method(imethod).Call(args) return recv.Field(ifield).Method(imethod).Call(args)
}) })
} }
typ.u.methods = append( methods = append(methods, method{
typ.u.methods, name: m.name,
method{ mtyp: resolveReflectType(m.typ),
name: m.name, ifn: resolveReflectText(unsafe.Pointer(&ifn)),
mtyp: m.typ, tfn: resolveReflectText(unsafe.Pointer(&tfn)),
ifn: unsafe.Pointer(&ifn), })
tfn: unsafe.Pointer(&tfn),
},
)
} }
case Ptr: case Ptr:
ptr := (*ptrType)(unsafe.Pointer(ft)) ptr := (*ptrType)(unsafe.Pointer(ft))
if unt := ptr.uncommon(); unt != nil { if unt := ptr.uncommon(); unt != nil {
for _, m := range unt.methods { for _, m := range unt.methods() {
if m.name.pkgPath() != nil { if m.name.pkgPath() != nil {
// TODO(sbinet) // TODO(sbinet)
panic("reflect: embedded interface with unexported method(s) not implemented") panic("reflect: embedded interface with unexported method(s) not implemented")
} }
typ.u.methods = append(typ.u.methods, m) methods = append(methods, method{
name: m.name,
mtyp: resolveReflectType(ptr.typeOff(m.mtyp)),
ifn: resolveReflectText(ptr.textOff(m.ifn)),
tfn: resolveReflectText(ptr.textOff(m.tfn)),
})
} }
} }
if unt := ptr.elem.uncommon(); unt != nil { if unt := ptr.elem.uncommon(); unt != nil {
for _, m := range unt.methods { for _, m := range unt.methods() {
if m.name.pkgPath() != nil { if m.name.pkgPath() != nil {
// TODO(sbinet) // TODO(sbinet)
panic("reflect: embedded interface with unexported method(s) not implemented") panic("reflect: embedded interface with unexported method(s) not implemented")
} }
typ.u.methods = append(typ.u.methods, m) methods = append(methods, method{
name: m.name,
mtyp: resolveReflectType(ptr.elem.typeOff(m.mtyp)),
ifn: resolveReflectText(ptr.elem.textOff(m.ifn)),
tfn: resolveReflectText(ptr.elem.textOff(m.tfn)),
})
} }
} }
default: default:
if unt := ft.uncommon(); unt != nil { if unt := ft.uncommon(); unt != nil {
for _, m := range unt.methods { for _, m := range unt.methods() {
if m.name.pkgPath() != nil { if m.name.pkgPath() != nil {
// TODO(sbinet) // TODO(sbinet)
panic("reflect: embedded interface with unexported method(s) not implemented") panic("reflect: embedded interface with unexported method(s) not implemented")
} }
typ.u.methods = append(typ.u.methods, m) methods = append(methods, method{
name: m.name,
mtyp: resolveReflectType(ft.typeOff(m.mtyp)),
ifn: resolveReflectText(ft.textOff(m.ifn)),
tfn: resolveReflectText(ft.textOff(m.tfn)),
})
} }
} }
} }
...@@ -2359,6 +2455,49 @@ func StructOf(fields []StructField) Type { ...@@ -2359,6 +2455,49 @@ func StructOf(fields []StructField) Type {
fs[i] = f fs[i] = f
} }
var typ *structType
var ut *uncommonType
var typPin interface {
common() *rtype
} // structTypeFixedN
switch {
case len(methods) == 0:
t := new(structTypeUncommon)
typ = &t.structType
ut = &t.u
typPin = t
case len(methods) <= 4:
t := new(structTypeFixed4)
typ = &t.structType
ut = &t.u
copy(t.m[:], methods)
typPin = t
case len(methods) <= 8:
t := new(structTypeFixed8)
typ = &t.structType
ut = &t.u
copy(t.m[:], methods)
typPin = t
case len(methods) <= 16:
t := new(structTypeFixed16)
typ = &t.structType
ut = &t.u
copy(t.m[:], methods)
typPin = t
case len(methods) <= 32:
t := new(structTypeFixed32)
typ = &t.structType
ut = &t.u
copy(t.m[:], methods)
typPin = t
default:
panic("reflect.StructOf: too many methods")
}
ut.mcount = uint16(len(methods))
ut.moff = uint16(unsafe.Sizeof(uncommonType{}))
if len(fs) > 0 { if len(fs) > 0 {
repr = append(repr, ' ') repr = append(repr, ' ')
} }
...@@ -2372,15 +2511,16 @@ func StructOf(fields []StructField) Type { ...@@ -2372,15 +2511,16 @@ func StructOf(fields []StructField) Type {
// Make the struct type. // Make the struct type.
var istruct interface{} = struct{}{} var istruct interface{} = struct{}{}
prototype := *(**structType)(unsafe.Pointer(&istruct)) prototype := *(**structType)(unsafe.Pointer(&istruct))
typ.structType = *prototype *typ = *prototype
typ.structType.fields = fs typ.fields = fs
// Look in cache // Look in cache
structLookupCache.RLock() structLookupCache.RLock()
for _, t := range structLookupCache.m[hash] { for _, st := range structLookupCache.m[hash] {
if haveIdenticalUnderlyingType(&typ.rtype, &t.rtype) { t := st.common()
if haveIdenticalUnderlyingType(&typ.rtype, t) {
structLookupCache.RUnlock() structLookupCache.RUnlock()
return &t.rtype return t
} }
} }
structLookupCache.RUnlock() structLookupCache.RUnlock()
...@@ -2389,11 +2529,14 @@ func StructOf(fields []StructField) Type { ...@@ -2389,11 +2529,14 @@ func StructOf(fields []StructField) Type {
structLookupCache.Lock() structLookupCache.Lock()
defer structLookupCache.Unlock() defer structLookupCache.Unlock()
if structLookupCache.m == nil { if structLookupCache.m == nil {
structLookupCache.m = make(map[uint32][]*structTypeWithMethods) structLookupCache.m = make(map[uint32][]interface {
common() *rtype
})
} }
for _, t := range structLookupCache.m[hash] { for _, st := range structLookupCache.m[hash] {
if haveIdenticalUnderlyingType(&typ.rtype, &t.rtype) { t := st.common()
return &t.rtype if haveIdenticalUnderlyingType(&typ.rtype, t) {
return t
} }
} }
...@@ -2403,9 +2546,8 @@ func StructOf(fields []StructField) Type { ...@@ -2403,9 +2546,8 @@ func StructOf(fields []StructField) Type {
// even if 't' wasn't a structType with methods, we should be ok // even if 't' wasn't a structType with methods, we should be ok
// as the 'u uncommonType' field won't be accessed except when // as the 'u uncommonType' field won't be accessed except when
// tflag&tflagUncommon is set. // tflag&tflagUncommon is set.
tt := (*structTypeWithMethods)(unsafe.Pointer(t)) structLookupCache.m[hash] = append(structLookupCache.m[hash], t)
structLookupCache.m[hash] = append(structLookupCache.m[hash], tt) return t
return &tt.rtype
} }
} }
...@@ -2414,7 +2556,7 @@ func StructOf(fields []StructField) Type { ...@@ -2414,7 +2556,7 @@ func StructOf(fields []StructField) Type {
typ.size = size typ.size = size
typ.align = typalign typ.align = typalign
typ.fieldAlign = typalign typ.fieldAlign = typalign
if len(typ.u.methods) > 0 { if len(methods) > 0 {
typ.tflag |= tflagUncommon typ.tflag |= tflagUncommon
} }
if !hasPtr { if !hasPtr {
...@@ -2514,7 +2656,7 @@ func StructOf(fields []StructField) Type { ...@@ -2514,7 +2656,7 @@ func StructOf(fields []StructField) Type {
typ.kind &^= kindDirectIface typ.kind &^= kindDirectIface
} }
structLookupCache.m[hash] = append(structLookupCache.m[hash], typ) structLookupCache.m[hash] = append(structLookupCache.m[hash], typPin)
return &typ.rtype return &typ.rtype
} }
...@@ -2533,6 +2675,7 @@ func runtimeStructField(field StructField) structField { ...@@ -2533,6 +2675,7 @@ func runtimeStructField(field StructField) structField {
} }
} }
_ = resolveReflectType(field.Type.common())
return structField{ return structField{
name: newName(field.Name, string(field.Tag), field.PkgPath, exported), name: newName(field.Name, string(field.Tag), field.PkgPath, exported),
typ: field.Type.common(), typ: field.Type.common(),
......
...@@ -566,15 +566,16 @@ func methodReceiver(op string, v Value, methodIndex int) (rcvrtype, t *rtype, fn ...@@ -566,15 +566,16 @@ func methodReceiver(op string, v Value, methodIndex int) (rcvrtype, t *rtype, fn
} else { } else {
rcvrtype = v.typ rcvrtype = v.typ
ut := v.typ.uncommon() ut := v.typ.uncommon()
if ut == nil || uint(i) >= uint(len(ut.methods)) { if ut == nil || uint(i) >= uint(ut.mcount) {
panic("reflect: internal error: invalid method index") panic("reflect: internal error: invalid method index")
} }
m := &ut.methods[i] m := ut.methods()[i]
if !m.name.isExported() { if !m.name.isExported() {
panic("reflect: " + op + " of unexported method") panic("reflect: " + op + " of unexported method")
} }
fn = unsafe.Pointer(&m.ifn) ifn := v.typ.textOff(m.ifn)
t = m.mtyp fn = unsafe.Pointer(&ifn)
t = v.typ.typeOff(m.mtyp)
} }
return return
} }
...@@ -1687,11 +1688,11 @@ func (v Value) Type() Type { ...@@ -1687,11 +1688,11 @@ func (v Value) Type() Type {
} }
// Method on concrete type. // Method on concrete type.
ut := v.typ.uncommon() ut := v.typ.uncommon()
if ut == nil || uint(i) >= uint(len(ut.methods)) { if ut == nil || uint(i) >= uint(ut.mcount) {
panic("reflect: internal error: invalid method index") panic("reflect: internal error: invalid method index")
} }
m := &ut.methods[i] m := ut.methods()[i]
return m.mtyp return v.typ.typeOff(m.mtyp)
} }
// Uint returns v's underlying value, as a uint64. // Uint returns v's underlying value, as a uint64.
......
...@@ -93,7 +93,8 @@ func additab(m *itab, locked, canfail bool) { ...@@ -93,7 +93,8 @@ func additab(m *itab, locked, canfail bool) {
// so can iterate over both in lock step; // so can iterate over both in lock step;
// the loop is O(ni+nt) not O(ni*nt). // the loop is O(ni+nt) not O(ni*nt).
ni := len(inter.mhdr) ni := len(inter.mhdr)
nt := len(x.mhdr) nt := int(x.mcount)
xmhdr := (*[1 << 16]method)(add(unsafe.Pointer(x), uintptr(x.moff)))[:nt:nt]
j := 0 j := 0
for k := 0; k < ni; k++ { for k := 0; k < ni; k++ {
i := &inter.mhdr[k] i := &inter.mhdr[k]
...@@ -104,15 +105,16 @@ func additab(m *itab, locked, canfail bool) { ...@@ -104,15 +105,16 @@ func additab(m *itab, locked, canfail bool) {
ipkg = inter.pkgpath ipkg = inter.pkgpath
} }
for ; j < nt; j++ { for ; j < nt; j++ {
t := &x.mhdr[j] t := &xmhdr[j]
if t.mtyp == itype && t.name.name() == iname { if typ.typeOff(t.mtyp) == itype && t.name.name() == iname {
pkgPath := t.name.pkgPath() pkgPath := t.name.pkgPath()
if pkgPath == nil { if pkgPath == nil {
pkgPath = x.pkgpath pkgPath = x.pkgpath
} }
if t.name.isExported() || pkgPath == ipkg { if t.name.isExported() || pkgPath == ipkg {
if m != nil { if m != nil {
*(*unsafe.Pointer)(add(unsafe.Pointer(&m.fun[0]), uintptr(k)*sys.PtrSize)) = t.ifn ifn := typ.textOff(t.ifn)
*(*unsafe.Pointer)(add(unsafe.Pointer(&m.fun[0]), uintptr(k)*sys.PtrSize)) = ifn
} }
goto nextimethod goto nextimethod
} }
......
...@@ -435,9 +435,10 @@ func schedinit() { ...@@ -435,9 +435,10 @@ func schedinit() {
tracebackinit() tracebackinit()
moduledataverify() moduledataverify()
stackinit() stackinit()
itabsinit()
mallocinit() mallocinit()
mcommoninit(_g_.m) mcommoninit(_g_.m)
typelinksinit()
itabsinit()
msigsave(_g_.m) msigsave(_g_.m)
initSigmask = _g_.m.sigmask initSigmask = _g_.m.sigmask
......
...@@ -486,3 +486,36 @@ func reflect_typelinks() ([]unsafe.Pointer, [][]int32) { ...@@ -486,3 +486,36 @@ func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
} }
return sections, ret return sections, ret
} }
// reflect_resolveTypeOff resolves an *rtype offset from a base type.
//go:linkname reflect_resolveTypeOff reflect.resolveTypeOff
func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
return unsafe.Pointer((*_type)(rtype).typeOff(typeOff(off)))
}
// reflect_resolveTextOff resolves an function pointer offset from a base type.
//go:linkname reflect_resolveTextOff reflect.resolveTextOff
func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
return (*_type)(rtype).textOff(textOff(off))
}
// reflect_addReflectOff adds a pointer to the reflection offset lookup map.
//go:linkname reflect_addReflectOff reflect.addReflectOff
func reflect_addReflectOff(ptr unsafe.Pointer) int32 {
lock(&reflectOffs.lock)
if reflectOffs.m == nil {
reflectOffs.m = make(map[int32]unsafe.Pointer)
reflectOffs.minv = make(map[unsafe.Pointer]int32)
reflectOffs.next = -1
}
id, found := reflectOffs.minv[ptr]
if !found {
id = reflectOffs.next
reflectOffs.next-- // use negative offsets as IDs to aid debugging
reflectOffs.m[id] = ptr
reflectOffs.minv[ptr] = id
}
unlock(&reflectOffs.lock)
return id
}
...@@ -137,6 +137,8 @@ type moduledata struct { ...@@ -137,6 +137,8 @@ type moduledata struct {
gcdatamask, gcbssmask bitvector gcdatamask, gcbssmask bitvector
typemap map[typeOff]*_type // offset to *_rtype in previous module
next *moduledata next *moduledata
} }
......
...@@ -131,6 +131,92 @@ func (t *_type) name() string { ...@@ -131,6 +131,92 @@ func (t *_type) name() string {
return t._string[i+1:] return t._string[i+1:]
} }
// reflectOffs holds type offsets defined at run time by the reflect package.
//
// When a type is defined at run time, its *rtype data lives on the heap.
// There are a wide range of possible addresses the heap may use, that
// may not be representable as a 32-bit offset. Moreover the GC may
// one day start moving heap memory, in which case there is no stable
// offset that can be defined.
//
// To provide stable offsets, we add pin *rtype objects in a global map
// and treat the offset as an identifier. We use negative offsets that
// do not overlap with any compile-time module offsets.
//
// Entries are created by reflect.addReflectOff.
var reflectOffs struct {
lock mutex
next int32
m map[int32]unsafe.Pointer
minv map[unsafe.Pointer]int32
}
func (t *_type) typeOff(off typeOff) *_type {
if off == 0 {
return nil
}
base := uintptr(unsafe.Pointer(t))
var md *moduledata
for next := &firstmoduledata; next != nil; next = next.next {
if base >= next.types && base < next.etypes {
md = next
break
}
}
if md == nil {
lock(&reflectOffs.lock)
res := reflectOffs.m[int32(off)]
unlock(&reflectOffs.lock)
if res == nil {
println("runtime: typeOff", hex(off), "base", hex(base), "not in ranges:")
for next := &firstmoduledata; next != nil; next = next.next {
println("\ttypes", hex(next.types), "etypes", hex(next.etypes))
}
throw("runtime: type offset base pointer out of range")
}
return (*_type)(res)
}
if t := md.typemap[off]; t != nil {
return t
}
res := md.types + uintptr(off)
if res > md.etypes {
println("runtime: typeOff", hex(off), "out of range", hex(md.types), "-", hex(md.etypes))
throw("runtime: type offset out of range")
}
return (*_type)(unsafe.Pointer(res))
}
func (t *_type) textOff(off textOff) unsafe.Pointer {
base := uintptr(unsafe.Pointer(t))
var md *moduledata
for next := &firstmoduledata; next != nil; next = next.next {
if base >= next.types && base < next.etypes {
md = next
break
}
}
if md == nil {
lock(&reflectOffs.lock)
res := reflectOffs.m[int32(off)]
unlock(&reflectOffs.lock)
if res == nil {
println("runtime: textOff", hex(off), "base", hex(base), "not in ranges:")
for next := &firstmoduledata; next != nil; next = next.next {
println("\ttypes", hex(next.types), "etypes", hex(next.etypes))
}
throw("runtime: text offset base pointer out of range")
}
return res
}
res := md.text + uintptr(off)
if res > md.etext {
println("runtime: textOff", hex(off), "out of range", hex(md.text), "-", hex(md.etext))
throw("runtime: text offset out of range")
}
return unsafe.Pointer(res)
}
func (t *functype) in() []*_type { func (t *functype) in() []*_type {
// See funcType in reflect/type.go for details on data layout. // See funcType in reflect/type.go for details on data layout.
uadd := uintptr(unsafe.Sizeof(functype{})) uadd := uintptr(unsafe.Sizeof(functype{}))
...@@ -154,16 +240,20 @@ func (t *functype) dotdotdot() bool { ...@@ -154,16 +240,20 @@ func (t *functype) dotdotdot() bool {
return t.outCount&(1<<15) != 0 return t.outCount&(1<<15) != 0
} }
type typeOff int32
type textOff int32
type method struct { type method struct {
name name name name
mtyp *_type mtyp typeOff
ifn unsafe.Pointer ifn textOff
tfn unsafe.Pointer tfn textOff
} }
type uncommontype struct { type uncommontype struct {
pkgpath *string pkgpath *string
mhdr []method mcount uint16 // number of methods
moff uint16 // offset from this uncommontype to [mcount]method
} }
type imethod struct { type imethod struct {
...@@ -270,6 +360,18 @@ func (n *name) name() (s string) { ...@@ -270,6 +360,18 @@ func (n *name) name() (s string) {
return s return s
} }
func (n *name) tag() (s string) {
tl := n.tagLen()
if tl == 0 {
return ""
}
nl := n.nameLen()
hdr := (*stringStruct)(unsafe.Pointer(&s))
hdr.str = unsafe.Pointer(n.data(3 + nl + 2))
hdr.len = tl
return s
}
func (n *name) pkgPath() *string { func (n *name) pkgPath() *string {
if *n.data(0)&(1<<2) == 0 { if *n.data(0)&(1<<2) == 0 {
return nil return nil
...@@ -281,3 +383,200 @@ func (n *name) pkgPath() *string { ...@@ -281,3 +383,200 @@ func (n *name) pkgPath() *string {
off = int(round(uintptr(off), sys.PtrSize)) off = int(round(uintptr(off), sys.PtrSize))
return *(**string)(unsafe.Pointer(n.data(off))) return *(**string)(unsafe.Pointer(n.data(off)))
} }
// typelinksinit scans the types from extra modules and builds the
// moduledata typemap used to de-duplicate type pointers.
func typelinksinit() {
if firstmoduledata.next == nil {
return
}
typehash := make(map[uint32][]*_type)
modules := []*moduledata{}
for md := &firstmoduledata; md != nil; md = md.next {
modules = append(modules, md)
}
prev, modules := modules[len(modules)-1], modules[:len(modules)-1]
for len(modules) > 0 {
// Collect types from the previous module into typehash.
collect:
for _, tl := range prev.typelinks {
var t *_type
if prev.typemap == nil {
t = (*_type)(unsafe.Pointer(prev.types + uintptr(tl)))
} else {
t = prev.typemap[typeOff(tl)]
}
// Add to typehash if not seen before.
tlist := typehash[t.hash]
for _, tcur := range tlist {
if tcur == t {
continue collect
}
}
typehash[t.hash] = append(tlist, t)
}
// If any of this module's typelinks match a type from a
// prior module, prefer that prior type by adding the offset
// to this module's typemap.
md := modules[len(modules)-1]
md.typemap = make(map[typeOff]*_type, len(md.typelinks))
for _, tl := range md.typelinks {
t := (*_type)(unsafe.Pointer(md.types + uintptr(tl)))
for _, candidate := range typehash[t.hash] {
if typesEqual(t, candidate) {
t = candidate
break
}
}
md.typemap[typeOff(tl)] = t
}
prev, modules = md, modules[:len(modules)-1]
}
}
// typesEqual reports whether two types are equal.
//
// Everywhere in the runtime and reflect packages, it is assumed that
// there is exactly one *_type per Go type, so that pointer equality
// can be used to test if types are equal. There is one place that
// breaks this assumption: buildmode=shared. In this case a type can
// appear as two different pieces of memory. This is hidden from the
// runtime and reflect package by the per-module typemap built in
// typelinksinit. It uses typesEqual to map types from later modules
// back into earlier ones.
//
// Only typelinksinit needs this function.
func typesEqual(t, v *_type) bool {
if t == v {
return true
}
kind := t.kind & kindMask
if kind != v.kind&kindMask {
return false
}
if t._string != v._string {
return false
}
ut := t.uncommon()
uv := v.uncommon()
if ut != nil || uv != nil {
if ut == nil || uv == nil {
return false
}
if !pkgPathEqual(ut.pkgpath, uv.pkgpath) {
return false
}
}
if kindBool <= kind && kind <= kindComplex128 {
return true
}
switch kind {
case kindString, kindUnsafePointer:
return true
case kindArray:
at := (*arraytype)(unsafe.Pointer(t))
av := (*arraytype)(unsafe.Pointer(v))
return typesEqual(at.elem, av.elem) && at.len == av.len
case kindChan:
ct := (*chantype)(unsafe.Pointer(t))
cv := (*chantype)(unsafe.Pointer(v))
return ct.dir == cv.dir && typesEqual(ct.elem, cv.elem)
case kindFunc:
ft := (*functype)(unsafe.Pointer(t))
fv := (*functype)(unsafe.Pointer(v))
if ft.outCount != fv.outCount || ft.inCount != fv.inCount {
return false
}
tin, vin := ft.in(), fv.in()
for i := 0; i < len(tin); i++ {
if !typesEqual(tin[i], vin[i]) {
return false
}
}
tout, vout := ft.out(), fv.out()
for i := 0; i < len(tout); i++ {
if !typesEqual(tout[i], vout[i]) {
return false
}
}
return true
case kindInterface:
it := (*interfacetype)(unsafe.Pointer(t))
iv := (*interfacetype)(unsafe.Pointer(v))
if !pkgPathEqual(it.pkgpath, iv.pkgpath) {
return false
}
if len(it.mhdr) != len(iv.mhdr) {
return false
}
for i := range it.mhdr {
tm := &it.mhdr[i]
vm := &iv.mhdr[i]
if tm.name.name() != vm.name.name() {
return false
}
if !pkgPathEqual(tm.name.pkgPath(), vm.name.pkgPath()) {
return false
}
if !typesEqual(tm._type, vm._type) {
return false
}
}
return true
case kindMap:
mt := (*maptype)(unsafe.Pointer(t))
mv := (*maptype)(unsafe.Pointer(v))
return typesEqual(mt.key, mv.key) && typesEqual(mt.elem, mv.elem)
case kindPtr:
pt := (*ptrtype)(unsafe.Pointer(t))
pv := (*ptrtype)(unsafe.Pointer(v))
return typesEqual(pt.elem, pv.elem)
case kindSlice:
st := (*slicetype)(unsafe.Pointer(t))
sv := (*slicetype)(unsafe.Pointer(v))
return typesEqual(st.elem, sv.elem)
case kindStruct:
st := (*structtype)(unsafe.Pointer(t))
sv := (*structtype)(unsafe.Pointer(v))
if len(st.fields) != len(sv.fields) {
return false
}
for i := range st.fields {
tf := &st.fields[i]
vf := &sv.fields[i]
if tf.name.name() != vf.name.name() {
return false
}
if !pkgPathEqual(tf.name.pkgPath(), vf.name.pkgPath()) {
return false
}
if !typesEqual(tf.typ, vf.typ) {
return false
}
if tf.name.tag() != vf.name.tag() {
return false
}
if tf.offset != vf.offset {
return false
}
}
return true
default:
println("runtime: impossible type kind", kind)
throw("runtime: impossible type kind")
return false
}
}
func pkgPathEqual(p, q *string) bool {
if p == q {
return true
}
if p == nil || q == nil {
return false
}
return *p == *q
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment