Commit 06b12e66 authored by Matthew Dempsky's avatar Matthew Dempsky

cmd/compile: move some ONAME-specific flags from Node to Name

The IsClosureVar, IsOutputParamHeapAddr, Assigned, Addrtaken,
InlFormal, and InlLocal flags are only interesting for ONAME nodes, so
it's better to set these flags on Name.flags instead of Node.flags.

Two caveats though:

1. Previously, we would set Assigned and Addrtaken on the entire
expression tree involved in an assignment or addressing operation.
However, the rest of the compiler only actually cares about knowing
whether the underlying ONAME (if any) was assigned/addressed.

2. This actually requires bumping Name.flags from bitset8 to bitset16,
whereas it doesn't allow shrinking Node.flags any. However, Name has
some trailing padding bytes, so expanding Name.flags doesn't cost any
memory.

Passes toolstash-check.

Change-Id: I7775d713566a38d5b9723360b1659b79391744c2
Reviewed-on: https://go-review.googlesource.com/c/go/+/200898
Run-TryBot: Matthew Dempsky <mdempsky@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarBrad Fitzpatrick <bradfitz@golang.org>
parent 9969c720
......@@ -101,7 +101,7 @@ func typecheckclosure(clo *Node, top int) {
// Ignore assignments to the variable in straightline code
// preceding the first capturing by a closure.
if n.Name.Decldepth == decldepth {
n.SetAssigned(false)
n.Name.SetAssigned(false)
}
}
}
......@@ -192,10 +192,10 @@ func capturevars(xfunc *Node) {
outermost := v.Name.Defn
// out parameters will be assigned to implicitly upon return.
if outermost.Class() != PPARAMOUT && !outermost.Addrtaken() && !outermost.Assigned() && v.Type.Width <= 128 {
if outermost.Class() != PPARAMOUT && !outermost.Name.Addrtaken() && !outermost.Name.Assigned() && v.Type.Width <= 128 {
v.Name.SetByval(true)
} else {
outermost.SetAddrtaken(true)
outermost.Name.SetAddrtaken(true)
outer = nod(OADDR, outer, nil)
}
......@@ -208,7 +208,7 @@ func capturevars(xfunc *Node) {
if v.Name.Byval() {
how = "value"
}
Warnl(v.Pos, "%v capturing by %s: %v (addr=%v assign=%v width=%d)", name, how, v.Sym, outermost.Addrtaken(), outermost.Assigned(), int32(v.Type.Width))
Warnl(v.Pos, "%v capturing by %s: %v (addr=%v assign=%v width=%d)", name, how, v.Sym, outermost.Name.Addrtaken(), outermost.Name.Assigned(), int32(v.Type.Width))
}
outer = typecheck(outer, ctxExpr)
......
......@@ -278,7 +278,7 @@ func oldname(s *types.Sym) *Node {
// Do not have a closure var for the active closure yet; make one.
c = newname(s)
c.SetClass(PAUTOHEAP)
c.SetIsClosureVar(true)
c.Name.SetIsClosureVar(true)
c.SetIsDDD(n.IsDDD())
c.Name.Defn = n
......
......@@ -201,7 +201,7 @@ func addrescapes(n *Node) {
}
// If a closure reference escapes, mark the outer variable as escaping.
if n.IsClosureVar() {
if n.Name.IsClosureVar() {
addrescapes(n.Name.Defn)
break
}
......@@ -293,7 +293,7 @@ func moveToHeap(n *Node) {
// Thus, we need the pointer to the heap copy always available so the
// post-deferreturn code can copy the return value back to the stack.
// See issue 16095.
heapaddr.SetIsOutputParamHeapAddr(true)
heapaddr.Name.SetIsOutputParamHeapAddr(true)
}
n.Name.Param.Stackcopy = stackcopy
......
......@@ -995,9 +995,9 @@ func (e *Escape) later(k EscHole) EscHole {
// canonicalNode returns the canonical *Node that n logically
// represents.
func canonicalNode(n *Node) *Node {
if n != nil && n.IsClosureVar() {
if n != nil && n.Op == ONAME && n.Name.IsClosureVar() {
n = n.Name.Defn
if n.IsClosureVar() {
if n.Name.IsClosureVar() {
Fatalf("still closure var")
}
}
......
......@@ -483,12 +483,13 @@ func (n *Node) jconv(s fmt.State, flag FmtFlag) {
fmt.Fprintf(s, " embedded")
}
if n.Addrtaken() {
fmt.Fprint(s, " addrtaken")
}
if n.Assigned() {
fmt.Fprint(s, " assigned")
if n.Op == ONAME {
if n.Name.Addrtaken() {
fmt.Fprint(s, " addrtaken")
}
if n.Name.Assigned() {
fmt.Fprint(s, " assigned")
}
}
if n.Bounded() {
fmt.Fprint(s, " bounded")
......
......@@ -655,7 +655,7 @@ func inlnode(n *Node, maxCost int32) *Node {
// NB: this check is necessary to prevent indirect re-assignment of the variable
// having the address taken after the invocation or only used for reads is actually fine
// but we have no easy way to distinguish the safe cases
if d.Left.Addrtaken() {
if d.Left.Name.Addrtaken() {
if Debug['m'] > 1 {
fmt.Printf("%v: cannot inline escaping closure variable %v\n", n.Line(), n.Left)
}
......@@ -919,9 +919,9 @@ func mkinlcall(n, fn *Node, maxCost int32) *Node {
if genDwarfInline > 0 {
inlf := inlvars[ln]
if ln.Class() == PPARAM {
inlf.SetInlFormal(true)
inlf.Name.SetInlFormal(true)
} else {
inlf.SetInlLocal(true)
inlf.Name.SetInlLocal(true)
}
inlf.Pos = ln.Pos
inlfvars = append(inlfvars, inlf)
......@@ -947,7 +947,7 @@ func mkinlcall(n, fn *Node, maxCost int32) *Node {
// was manufactured by the inliner (e.g. "~R2"); such vars
// were not part of the original callee.
if !strings.HasPrefix(m.Sym.Name, "~R") {
m.SetInlFormal(true)
m.Name.SetInlFormal(true)
m.Pos = mpos
inlfvars = append(inlfvars, m)
}
......@@ -1125,7 +1125,7 @@ func inlvar(var_ *Node) *Node {
n.SetClass(PAUTO)
n.Name.SetUsed(true)
n.Name.Curfn = Curfn // the calling function, not the called one
n.SetAddrtaken(var_.Addrtaken())
n.Name.SetAddrtaken(var_.Name.Addrtaken())
Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
return n
......
......@@ -298,7 +298,7 @@ func (o *Order) cleanTempNoPop(mark ordermarker) []*Node {
n := o.temp[i]
if n.Name.Keepalive() {
n.Name.SetKeepalive(false)
n.SetAddrtaken(true) // ensure SSA keeps the n variable
n.Name.SetAddrtaken(true) // ensure SSA keeps the n variable
live := nod(OVARLIVE, n, nil)
live = typecheck(live, ctxStmt)
out = append(out, live)
......
......@@ -262,7 +262,7 @@ func compile(fn *Node) {
for _, n := range fn.Func.Dcl {
switch n.Class() {
case PPARAM, PPARAMOUT, PAUTO:
if livenessShouldTrack(n) && n.Addrtaken() {
if livenessShouldTrack(n) && n.Name.Addrtaken() {
dtypesym(n.Type)
// Also make sure we allocate a linker symbol
// for the stack object data, for the same reason.
......@@ -498,9 +498,9 @@ func createSimpleVar(n *Node) *dwarf.Var {
typename := dwarf.InfoPrefix + typesymname(n.Type)
inlIndex := 0
if genDwarfInline > 1 {
if n.InlFormal() || n.InlLocal() {
if n.Name.InlFormal() || n.Name.InlLocal() {
inlIndex = posInlIndex(n.Pos) + 1
if n.InlFormal() {
if n.Name.InlFormal() {
abbrev = dwarf.DW_ABRV_PARAM
}
}
......@@ -509,7 +509,7 @@ func createSimpleVar(n *Node) *dwarf.Var {
return &dwarf.Var{
Name: n.Sym.Name,
IsReturnValue: n.Class() == PPARAMOUT,
IsInlFormal: n.InlFormal(),
IsInlFormal: n.Name.InlFormal(),
Abbrev: abbrev,
StackOffset: int32(offs),
Type: Ctxt.Lookup(typename),
......@@ -619,9 +619,9 @@ func createDwarfVars(fnsym *obj.LSym, fn *Func, apDecls []*Node) ([]*Node, []*dw
}
inlIndex := 0
if genDwarfInline > 1 {
if n.InlFormal() || n.InlLocal() {
if n.Name.InlFormal() || n.Name.InlLocal() {
inlIndex = posInlIndex(n.Pos) + 1
if n.InlFormal() {
if n.Name.InlFormal() {
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
}
}
......@@ -707,9 +707,9 @@ func createComplexVar(fn *Func, varID ssa.VarID) *dwarf.Var {
typename := dwarf.InfoPrefix + gotype.Name[len("type."):]
inlIndex := 0
if genDwarfInline > 1 {
if n.InlFormal() || n.InlLocal() {
if n.Name.InlFormal() || n.Name.InlLocal() {
inlIndex = posInlIndex(n.Pos) + 1
if n.InlFormal() {
if n.Name.InlFormal() {
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
}
}
......@@ -718,7 +718,7 @@ func createComplexVar(fn *Func, varID ssa.VarID) *dwarf.Var {
dvar := &dwarf.Var{
Name: n.Sym.Name,
IsReturnValue: n.Class() == PPARAMOUT,
IsInlFormal: n.InlFormal(),
IsInlFormal: n.Name.InlFormal(),
Abbrev: abbrev,
Type: Ctxt.Lookup(typename),
// The stack offset is used as a sorting key, so for decomposed
......
......@@ -908,7 +908,7 @@ func (lv *Liveness) epilogue() {
if lv.fn.Func.HasDefer() {
for i, n := range lv.vars {
if n.Class() == PPARAMOUT {
if n.IsOutputParamHeapAddr() {
if n.Name.IsOutputParamHeapAddr() {
// Just to be paranoid. Heap addresses are PAUTOs.
Fatalf("variable %v both output param and heap output param", n)
}
......@@ -920,7 +920,7 @@ func (lv *Liveness) epilogue() {
// Note: zeroing is handled by zeroResults in walk.go.
livedefer.Set(int32(i))
}
if n.IsOutputParamHeapAddr() {
if n.Name.IsOutputParamHeapAddr() {
// This variable will be overwritten early in the function
// prologue (from the result of a mallocgc) but we need to
// zero it in case that malloc causes a stack scan.
......
......@@ -1264,7 +1264,7 @@ func (s *state) stmt(n *Node) {
case OVARLIVE:
// Insert a varlive op to record that a variable is still live.
if !n.Left.Addrtaken() {
if !n.Left.Name.Addrtaken() {
s.Fatalf("VARLIVE variable %v must have Addrtaken set", n.Left)
}
switch n.Left.Class() {
......@@ -4090,7 +4090,7 @@ func (s *state) canSSA(n *Node) bool {
if n.Op != ONAME {
return false
}
if n.Addrtaken() {
if n.Name.Addrtaken() {
return false
}
if n.isParamHeapCopy() {
......@@ -5257,7 +5257,7 @@ func (s byXoffset) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func emitStackObjects(e *ssafn, pp *Progs) {
var vars []*Node
for _, n := range e.curfn.Func.Dcl {
if livenessShouldTrack(n) && n.Addrtaken() {
if livenessShouldTrack(n) && n.Name.Addrtaken() {
vars = append(vars, n)
}
}
......@@ -6015,7 +6015,7 @@ func (e *ssafn) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
ptrType := types.NewPtr(types.Types[TUINT8])
lenType := types.Types[TINT]
if n.Class() == PAUTO && !n.Addrtaken() {
if n.Class() == PAUTO && !n.Name.Addrtaken() {
// Split this string up into two separate variables.
p := e.splitSlot(&name, ".ptr", 0, ptrType)
l := e.splitSlot(&name, ".len", ptrType.Size(), lenType)
......@@ -6029,7 +6029,7 @@ func (e *ssafn) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot
n := name.N.(*Node)
u := types.Types[TUINTPTR]
t := types.NewPtr(types.Types[TUINT8])
if n.Class() == PAUTO && !n.Addrtaken() {
if n.Class() == PAUTO && !n.Name.Addrtaken() {
// Split this interface up into two separate variables.
f := ".itab"
if n.Type.IsEmptyInterface() {
......@@ -6047,7 +6047,7 @@ func (e *ssafn) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ss
n := name.N.(*Node)
ptrType := types.NewPtr(name.Type.Elem())
lenType := types.Types[TINT]
if n.Class() == PAUTO && !n.Addrtaken() {
if n.Class() == PAUTO && !n.Name.Addrtaken() {
// Split this slice up into three separate variables.
p := e.splitSlot(&name, ".ptr", 0, ptrType)
l := e.splitSlot(&name, ".len", ptrType.Size(), lenType)
......@@ -6069,7 +6069,7 @@ func (e *ssafn) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot)
} else {
t = types.Types[TFLOAT32]
}
if n.Class() == PAUTO && !n.Addrtaken() {
if n.Class() == PAUTO && !n.Name.Addrtaken() {
// Split this complex up into two separate variables.
r := e.splitSlot(&name, ".real", 0, t)
i := e.splitSlot(&name, ".imag", t.Size(), t)
......@@ -6087,7 +6087,7 @@ func (e *ssafn) SplitInt64(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
} else {
t = types.Types[TUINT32]
}
if n.Class() == PAUTO && !n.Addrtaken() {
if n.Class() == PAUTO && !n.Name.Addrtaken() {
// Split this int64 up into two separate variables.
if thearch.LinkArch.ByteOrder == binary.BigEndian {
return e.splitSlot(&name, ".hi", 0, t), e.splitSlot(&name, ".lo", t.Size(), types.Types[TUINT32])
......@@ -6109,7 +6109,7 @@ func (e *ssafn) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot {
for f := 0; f < i; f++ {
offset += st.FieldType(f).Size()
}
if n.Class() == PAUTO && !n.Addrtaken() {
if n.Class() == PAUTO && !n.Name.Addrtaken() {
// Note: the _ field may appear several times. But
// have no fear, identically-named but distinct Autos are
// ok, albeit maybe confusing for a debugger.
......@@ -6125,7 +6125,7 @@ func (e *ssafn) SplitArray(name ssa.LocalSlot) ssa.LocalSlot {
e.Fatalf(n.Pos, "bad array size")
}
et := at.Elem()
if n.Class() == PAUTO && !n.Addrtaken() {
if n.Class() == PAUTO && !n.Name.Addrtaken() {
return e.splitSlot(&name, "[0]", 0, et)
}
return ssa.LocalSlot{N: n, Type: et, Off: name.Off}
......
This diff is collapsed.
......@@ -828,20 +828,17 @@ func typecheck1(n *Node, top int) (res *Node) {
default:
checklvalue(n.Left, "take the address of")
r := outervalue(n.Left)
if r.Orig != r && r.Op == ONAME {
Fatalf("found non-orig name node %v", r) // TODO(mdempsky): What does this mean?
}
for l := n.Left; ; l = l.Left {
l.SetAddrtaken(true)
if l.IsClosureVar() && !capturevarscomplete {
if r.Op == ONAME {
if r.Orig != r {
Fatalf("found non-orig name node %v", r) // TODO(mdempsky): What does this mean?
}
r.Name.SetAddrtaken(true)
if r.Name.IsClosureVar() && !capturevarscomplete {
// Mark the original variable as Addrtaken so that capturevars
// knows not to pass it by value.
// But if the capturevars phase is complete, don't touch it,
// in case l.Name's containing function has not yet been compiled.
l.Name.Defn.SetAddrtaken(true)
}
if l == r {
break
r.Name.Defn.Name.SetAddrtaken(true)
}
}
n.Left = defaultlit(n.Left, nil)
......@@ -3061,18 +3058,12 @@ func checkassign(stmt *Node, n *Node) {
// Variables declared in ORANGE are assigned on every iteration.
if n.Name == nil || n.Name.Defn != stmt || stmt.Op == ORANGE {
r := outervalue(n)
var l *Node
for l = n; l != r; l = l.Left {
l.SetAssigned(true)
if l.IsClosureVar() {
l.Name.Defn.SetAssigned(true)
if r.Op == ONAME {
r.Name.SetAssigned(true)
if r.Name.IsClosureVar() {
r.Name.Defn.Name.SetAssigned(true)
}
}
l.SetAssigned(true)
if l.IsClosureVar() {
l.Name.Defn.SetAssigned(true)
}
}
if islvalue(n) {
......
......@@ -97,7 +97,7 @@ func paramoutheap(fn *Node) bool {
for _, ln := range fn.Func.Dcl {
switch ln.Class() {
case PPARAMOUT:
if ln.isParamStackCopy() || ln.Addrtaken() {
if ln.isParamStackCopy() || ln.Name.Addrtaken() {
return true
}
......@@ -2097,7 +2097,7 @@ func aliased(n *Node, all []*Node, i int) bool {
continue
case PAUTO, PPARAM, PPARAMOUT:
if n.Addrtaken() {
if n.Name.Addrtaken() {
varwrite = true
continue
}
......@@ -2145,7 +2145,7 @@ func varexpr(n *Node) bool {
case ONAME:
switch n.Class() {
case PAUTO, PPARAM, PPARAMOUT:
if !n.Addrtaken() {
if !n.Name.Addrtaken() {
return true
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment