Commit 73f1a1a1 authored by Heschi Kreinick's avatar Heschi Kreinick

cmd/compile/internal/ssa: use reverse postorder traversal

Instead of the hand-written control flow analysis in debug info
generation, use a reverse postorder traversal, which is basically the
same thing. It should be slightly faster.

More importantly, the previous version simply gave up in the case of
non-reducible functions, and produced output that caused a later stage
to crash. It turns out that there's a non-reducible function in
compress/flate, so that wasn't a theoretical issue.

With this change, all blocks will be visited, even for non-reducible
functions.

Change-Id: Id47536764ee93203c6b4105a1a3013fe3265aa12
Reviewed-on: https://go-review.googlesource.com/73110
Run-TryBot: Heschi Kreinick <heschi@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarDavid Chase <drchase@google.com>
parent 81ec7256
...@@ -606,7 +606,6 @@ var knownFormats = map[string]string{ ...@@ -606,7 +606,6 @@ var knownFormats = map[string]string{
"[16]byte %x": "", "[16]byte %x": "",
"[]*cmd/compile/internal/gc.Node %v": "", "[]*cmd/compile/internal/gc.Node %v": "",
"[]*cmd/compile/internal/gc.Sig %#v": "", "[]*cmd/compile/internal/gc.Sig %#v": "",
"[]*cmd/compile/internal/ssa.Block %+v": "",
"[]*cmd/compile/internal/ssa.Value %v": "", "[]*cmd/compile/internal/ssa.Value %v": "",
"[][]cmd/compile/internal/ssa.SlotID %v": "", "[][]cmd/compile/internal/ssa.SlotID %v": "",
"[]byte %s": "", "[]byte %s": "",
......
...@@ -274,30 +274,14 @@ func BuildFuncDebug(f *Func, loggingEnabled bool) *FuncDebug { ...@@ -274,30 +274,14 @@ func BuildFuncDebug(f *Func, loggingEnabled bool) *FuncDebug {
// Build up block states, starting with the first block, then // Build up block states, starting with the first block, then
// processing blocks once their predecessors have been processed. // processing blocks once their predecessors have been processed.
// TODO: use a reverse post-order traversal instead of the work queue.
// Location list entries for each block. // Location list entries for each block.
blockLocs := make([]*BlockDebug, f.NumBlocks()) blockLocs := make([]*BlockDebug, f.NumBlocks())
// Work queue of blocks to visit. Some of them may already be processed. // Reverse postorder: visit a block after as many as possible of its
work := []*Block{f.Entry} // predecessors have been visited.
po := f.Postorder()
for len(work) > 0 { for i := len(po) - 1; i >= 0; i-- {
b := work[0] b := po[i]
work = work[1:]
if blockLocs[b.ID] != nil {
continue // already processed
}
if !state.predecessorsDone(b, blockLocs) {
continue // not ready yet
}
for _, edge := range b.Succs {
if blockLocs[edge.Block().ID] != nil {
continue
}
work = append(work, edge.Block())
}
// Build the starting state for the block from the final // Build the starting state for the block from the final
// state of its predecessors. // state of its predecessors.
...@@ -351,7 +335,7 @@ func BuildFuncDebug(f *Func, loggingEnabled bool) *FuncDebug { ...@@ -351,7 +335,7 @@ func BuildFuncDebug(f *Func, loggingEnabled bool) *FuncDebug {
last.End = BlockEnd last.End = BlockEnd
} }
if state.loggingEnabled { if state.loggingEnabled {
f.Logf("Block done: locs %v, regs %v. work = %+v\n", state.BlockString(locs), state.registerContents, work) f.Logf("Block done: locs %v, regs %v\n", state.BlockString(locs), state.registerContents)
} }
blockLocs[b.ID] = locs blockLocs[b.ID] = locs
} }
...@@ -382,30 +366,6 @@ func isSynthetic(slot *LocalSlot) bool { ...@@ -382,30 +366,6 @@ func isSynthetic(slot *LocalSlot) bool {
return c == '.' || c == '~' return c == '.' || c == '~'
} }
// predecessorsDone reports whether block is ready to be processed.
func (state *debugState) predecessorsDone(b *Block, blockLocs []*BlockDebug) bool {
f := b.Func
for _, edge := range b.Preds {
// Ignore back branches, e.g. the continuation of a for loop.
// This may not work for functions with mutual gotos, which are not
// reducible, in which case debug information will be missing for any
// code after that point in the control flow.
if f.sdom().isAncestorEq(b, edge.b) {
if state.loggingEnabled {
f.Logf("ignoring back branch from %v to %v\n", edge.b, b)
}
continue // back branch
}
if blockLocs[edge.b.ID] == nil {
if state.loggingEnabled {
f.Logf("%v is not ready because %v isn't done\n", b, edge.b)
}
return false
}
}
return true
}
// mergePredecessors takes the end state of each of b's predecessors and // mergePredecessors takes the end state of each of b's predecessors and
// intersects them to form the starting state for b. // intersects them to form the starting state for b.
// The registers slice (the second return value) will be reused for each call to mergePredecessors. // The registers slice (the second return value) will be reused for each call to mergePredecessors.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment