Commit a5e3cac8 authored by Josh Bleecher Snyder's avatar Josh Bleecher Snyder

cmd/compile: rearrange fields between ssa.Func, ssa.Cache, and ssa.Config

This makes ssa.Func, ssa.Cache, and ssa.Config fulfill
the roles laid out for them in CL 38160.

The only non-trivial change in this CL is how cached
values and blocks get IDs. Prior to this CL, their IDs were
assigned as part of resetting the cache, and only modified
IDs were reset. This required knowing how many values and
blocks were modified, which required a tight coupling between
ssa.Func and ssa.Config. To eliminate that coupling,
we now zero values and blocks during reset,
and assign their IDs when they are used.
Since unused values and blocks have ID == 0,
we can efficiently find the last used value/block,
to avoid zeroing everything.
Bulk zeroing is efficient, but not efficient enough
to obviate the need to avoid zeroing everything every time.
As a happy side-effect, ssa.Func.Free is no longer necessary.

DebugHashMatch and friends now belong in func.go.
They have been left in place for clarity and review.
I will move them in a subsequent CL.

Passes toolstash -cmp. No compiler performance impact.
No change in 'go test cmd/compile/internal/ssa' execution time.

Change-Id: I2eb7af58da067ef6a36e815a6f386cfe8634d098
Reviewed-on: https://go-review.googlesource.com/38167
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarKeith Randall <khr@golang.org>
parent ccaa8e3c
...@@ -483,6 +483,11 @@ func Main() { ...@@ -483,6 +483,11 @@ func Main() {
} }
} }
// Prepare for SSA compilation.
// This must be before peekitabs, because peekitabs
// can trigger function compilation.
initssaconfig()
// Just before compilation, compile itabs found on // Just before compilation, compile itabs found on
// the right side of OCONVIFACE so that methods // the right side of OCONVIFACE so that methods
// can be de-virtualized during compilation. // can be de-virtualized during compilation.
......
...@@ -411,7 +411,6 @@ func compile(fn *Node) { ...@@ -411,7 +411,6 @@ func compile(fn *Node) {
gclocals := makefuncdatasym("gclocals·", obj.FUNCDATA_LocalsPointerMaps) gclocals := makefuncdatasym("gclocals·", obj.FUNCDATA_LocalsPointerMaps)
genssa(ssafn, ptxt, gcargs, gclocals) genssa(ssafn, ptxt, gcargs, gclocals)
ssafn.Free()
obj.Flushplist(Ctxt, plist) // convert from Prog list to machine code obj.Flushplist(Ctxt, plist) // convert from Prog list to machine code
ptxt = nil // nil to prevent misuse; Prog may have been freed by Flushplist ptxt = nil // nil to prevent misuse; Prog may have been freed by Flushplist
......
...@@ -20,16 +20,14 @@ import ( ...@@ -20,16 +20,14 @@ import (
var ssaConfig *ssa.Config var ssaConfig *ssa.Config
var ssaExp ssaExport var ssaExp ssaExport
var ssaCache *ssa.Cache
func initssa() *ssa.Config { func initssaconfig() {
if ssaConfig == nil {
ssaConfig = ssa.NewConfig(Thearch.LinkArch.Name, &ssaExp, Ctxt, Debug['N'] == 0) ssaConfig = ssa.NewConfig(Thearch.LinkArch.Name, &ssaExp, Ctxt, Debug['N'] == 0)
if Thearch.LinkArch.Name == "386" { if Thearch.LinkArch.Name == "386" {
ssaConfig.Set387(Thearch.Use387) ssaConfig.Set387(Thearch.Use387)
} }
} ssaCache = new(ssa.Cache)
ssaConfig.HTML = nil
return ssaConfig
} }
// buildssa builds an SSA function. // buildssa builds an SSA function.
...@@ -51,12 +49,15 @@ func buildssa(fn *Node) *ssa.Func { ...@@ -51,12 +49,15 @@ func buildssa(fn *Node) *ssa.Func {
if fn.Func.Pragma&CgoUnsafeArgs != 0 { if fn.Func.Pragma&CgoUnsafeArgs != 0 {
s.cgoUnsafeArgs = true s.cgoUnsafeArgs = true
} }
// TODO(khr): build config just once at the start of the compiler binary
ssaExp.log = printssa ssaExp.log = printssa
s.config = initssa() s.f = ssa.NewFunc()
s.f = s.config.NewFunc() s.config = ssaConfig
s.f.Config = ssaConfig
s.f.Cache = ssaCache
s.f.Cache.Reset()
s.f.DebugTest = s.f.DebugHashMatch("GOSSAHASH", name)
s.f.Name = name s.f.Name = name
if fn.Func.Pragma&Nosplit != 0 { if fn.Func.Pragma&Nosplit != 0 {
s.f.NoSplit = true s.f.NoSplit = true
...@@ -71,12 +72,9 @@ func buildssa(fn *Node) *ssa.Func { ...@@ -71,12 +72,9 @@ func buildssa(fn *Node) *ssa.Func {
}() }()
s.exitCode = fn.Func.Exit s.exitCode = fn.Func.Exit
s.panics = map[funcLine]*ssa.Block{} s.panics = map[funcLine]*ssa.Block{}
s.config.DebugTest = s.config.DebugHashMatch("GOSSAHASH", name)
if name == os.Getenv("GOSSAFUNC") { if name == os.Getenv("GOSSAFUNC") {
// TODO: tempfile? it is handy to have the location s.f.HTMLWriter = ssa.NewHTMLWriter("ssa.html", ssaConfig, name)
// of this file be stable, so you can just reload in the browser.
s.config.HTML = ssa.NewHTMLWriter("ssa.html", s.config, name)
// TODO: generate and print a mapping from nodes to values and blocks // TODO: generate and print a mapping from nodes to values and blocks
} }
...@@ -140,7 +138,6 @@ func buildssa(fn *Node) *ssa.Func { ...@@ -140,7 +138,6 @@ func buildssa(fn *Node) *ssa.Func {
} }
if nerrors > 0 { if nerrors > 0 {
s.f.Free()
return nil return nil
} }
...@@ -152,7 +149,6 @@ func buildssa(fn *Node) *ssa.Func { ...@@ -152,7 +149,6 @@ func buildssa(fn *Node) *ssa.Func {
// Main call to ssa package to compile function // Main call to ssa package to compile function
ssa.Compile(s.f) ssa.Compile(s.f)
if nerrors > 0 { if nerrors > 0 {
s.f.Free()
return nil return nil
} }
...@@ -4287,7 +4283,7 @@ func genssa(f *ssa.Func, ptxt *obj.Prog, gcargs, gclocals *Sym) { ...@@ -4287,7 +4283,7 @@ func genssa(f *ssa.Func, ptxt *obj.Prog, gcargs, gclocals *Sym) {
} }
f.Logf("%s\t%s\n", s, p) f.Logf("%s\t%s\n", s, p)
} }
if f.Config.HTML != nil { if f.HTMLWriter != nil {
// LineHist is defunct now - this code won't do // LineHist is defunct now - this code won't do
// anything. // anything.
// TODO: fix this (ideally without a global variable) // TODO: fix this (ideally without a global variable)
...@@ -4311,7 +4307,7 @@ func genssa(f *ssa.Func, ptxt *obj.Prog, gcargs, gclocals *Sym) { ...@@ -4311,7 +4307,7 @@ func genssa(f *ssa.Func, ptxt *obj.Prog, gcargs, gclocals *Sym) {
} }
buf.WriteString("</dl>") buf.WriteString("</dl>")
buf.WriteString("</code>") buf.WriteString("</code>")
f.Config.HTML.WriteColumn("genssa", buf.String()) f.HTMLWriter.WriteColumn("genssa", buf.String())
// ptxt.Ctxt.LineHist.PrintFilenameOnly = saved // ptxt.Ctxt.LineHist.PrintFilenameOnly = saved
} }
} }
...@@ -4328,8 +4324,8 @@ func genssa(f *ssa.Func, ptxt *obj.Prog, gcargs, gclocals *Sym) { ...@@ -4328,8 +4324,8 @@ func genssa(f *ssa.Func, ptxt *obj.Prog, gcargs, gclocals *Sym) {
// Remove leftover instrumentation from the instruction stream. // Remove leftover instrumentation from the instruction stream.
removevardef(ptxt) removevardef(ptxt)
f.Config.HTML.Close() f.HTMLWriter.Close()
f.Config.HTML = nil f.HTMLWriter = nil
} }
type FloatingEQNEJump struct { type FloatingEQNEJump struct {
......
...@@ -4,7 +4,38 @@ ...@@ -4,7 +4,38 @@
package ssa package ssa
import "sort"
// A Cache holds reusable compiler state. // A Cache holds reusable compiler state.
// It is intended to be re-used for multiple Func compilations. // It is intended to be re-used for multiple Func compilations.
type Cache struct { type Cache struct {
// Storage for low-numbered values and blocks.
values [2000]Value
blocks [200]Block
locs [2000]Location
// Reusable stackAllocState.
// See stackalloc.go's {new,put}StackAllocState.
stackAllocState *stackAllocState
domblockstore []ID // scratch space for computing dominators
scrSparse []*sparseSet // scratch sparse sets to be re-used.
}
func (c *Cache) Reset() {
nv := sort.Search(len(c.values), func(i int) bool { return c.values[i].ID == 0 })
xv := c.values[:nv]
for i := range xv {
xv[i] = Value{}
}
nb := sort.Search(len(c.blocks), func(i int) bool { return c.blocks[i].ID == 0 })
xb := c.blocks[:nb]
for i := range xb {
xb[i] = Block{}
}
nl := sort.Search(len(c.locs), func(i int) bool { return c.locs[i] == nil })
xl := c.locs[:nl]
for i := range xl {
xl[i] = nil
}
} }
...@@ -43,7 +43,7 @@ func Compile(f *Func) { ...@@ -43,7 +43,7 @@ func Compile(f *Func) {
// Run all the passes // Run all the passes
printFunc(f) printFunc(f)
f.Config.HTML.WriteFunc("start", f) f.HTMLWriter.WriteFunc("start", f)
if BuildDump != "" && BuildDump == f.Name { if BuildDump != "" && BuildDump == f.Name {
f.dumpFile("build") f.dumpFile("build")
} }
...@@ -71,7 +71,7 @@ func Compile(f *Func) { ...@@ -71,7 +71,7 @@ func Compile(f *Func) {
tEnd := time.Now() tEnd := time.Now()
// Need something less crude than "Log the whole intermediate result". // Need something less crude than "Log the whole intermediate result".
if f.Log() || f.Config.HTML != nil { if f.Log() || f.HTMLWriter != nil {
time := tEnd.Sub(tStart).Nanoseconds() time := tEnd.Sub(tStart).Nanoseconds()
var stats string var stats string
if logMemStats { if logMemStats {
...@@ -86,7 +86,7 @@ func Compile(f *Func) { ...@@ -86,7 +86,7 @@ func Compile(f *Func) {
f.Logf(" pass %s end %s\n", p.name, stats) f.Logf(" pass %s end %s\n", p.name, stats)
printFunc(f) printFunc(f)
f.Config.HTML.WriteFunc(fmt.Sprintf("after %s <span class=\"stats\">%s</span>", phaseName, stats), f) f.HTMLWriter.WriteFunc(fmt.Sprintf("after %s <span class=\"stats\">%s</span>", phaseName, stats), f)
} }
if p.time || p.mem { if p.time || p.mem {
// Surround timing information w/ enough context to allow comparisons. // Surround timing information w/ enough context to allow comparisons.
......
...@@ -32,7 +32,6 @@ type Config struct { ...@@ -32,7 +32,6 @@ type Config struct {
LinkReg int8 // register number of link register if it is a general purpose register, -1 if not used LinkReg int8 // register number of link register if it is a general purpose register, -1 if not used
hasGReg bool // has hardware g register hasGReg bool // has hardware g register
fe Frontend // callbacks into compiler frontend fe Frontend // callbacks into compiler frontend
HTML *HTMLWriter // html writer, for debugging
ctxt *obj.Link // Generic arch information ctxt *obj.Link // Generic arch information
optimize bool // Do optimization optimize bool // Do optimization
noDuffDevice bool // Don't use Duff's device noDuffDevice bool // Don't use Duff's device
...@@ -41,27 +40,7 @@ type Config struct { ...@@ -41,27 +40,7 @@ type Config struct {
OldArch bool // True for older versions of architecture, e.g. true for PPC64BE, false for PPC64LE OldArch bool // True for older versions of architecture, e.g. true for PPC64BE, false for PPC64LE
NeedsFpScratch bool // No direct move between GP and FP register sets NeedsFpScratch bool // No direct move between GP and FP register sets
BigEndian bool // BigEndian bool //
DebugTest bool // default true unless $GOSSAHASH != ""; as a debugging aid, make new code conditional on this and use GOSSAHASH to binary search for failing cases
sparsePhiCutoff uint64 // Sparse phi location algorithm used above this #blocks*#variables score sparsePhiCutoff uint64 // Sparse phi location algorithm used above this #blocks*#variables score
curFunc *Func
// TODO: more stuff. Compiler flags of interest, ...
// Given an environment variable used for debug hash match,
// what file (if any) receives the yes/no logging?
logfiles map[string]*os.File
// Storage for low-numbered values and blocks.
values [2000]Value
blocks [200]Block
locs [2000]Location
// Reusable stackAllocState.
// See stackalloc.go's {new,put}StackAllocState.
stackAllocState *stackAllocState
domblockstore []ID // scratch space for computing dominators
scrSparse []*sparseSet // scratch sparse sets to be re-used.
} }
type TypeSource interface { type TypeSource interface {
...@@ -304,16 +283,6 @@ func NewConfig(arch string, fe Frontend, ctxt *obj.Link, optimize bool) *Config ...@@ -304,16 +283,6 @@ func NewConfig(arch string, fe Frontend, ctxt *obj.Link, optimize bool) *Config
opcodeTable[OpARMCALLudiv].reg.clobbers |= 1 << 12 // R12 opcodeTable[OpARMCALLudiv].reg.clobbers |= 1 << 12 // R12
} }
// Assign IDs to preallocated values/blocks.
for i := range c.values {
c.values[i].ID = ID(i)
}
for i := range c.blocks {
c.blocks[i].ID = ID(i)
}
c.logfiles = make(map[string]*os.File)
// cutoff is compared with product of numblocks and numvalues, // cutoff is compared with product of numblocks and numvalues,
// if product is smaller than cutoff, use old non-sparse method. // if product is smaller than cutoff, use old non-sparse method.
// cutoff == 0 implies all sparse. // cutoff == 0 implies all sparse.
...@@ -342,18 +311,6 @@ func (c *Config) Frontend() Frontend { return c.fe } ...@@ -342,18 +311,6 @@ func (c *Config) Frontend() Frontend { return c.fe }
func (c *Config) SparsePhiCutoff() uint64 { return c.sparsePhiCutoff } func (c *Config) SparsePhiCutoff() uint64 { return c.sparsePhiCutoff }
func (c *Config) Ctxt() *obj.Link { return c.ctxt } func (c *Config) Ctxt() *obj.Link { return c.ctxt }
// NewFunc returns a new, empty function object.
// Caller must call f.Free() before calling NewFunc again.
func (c *Config) NewFunc() *Func {
// TODO(khr): should this function take name, type, etc. as arguments?
if c.curFunc != nil {
c.Fatalf(src.NoXPos, "NewFunc called without previous Free")
}
f := &Func{Config: c, NamedValues: map[LocalSlot][]*Value{}}
c.curFunc = f
return f
}
func (c *Config) Logf(msg string, args ...interface{}) { c.fe.Logf(msg, args...) } func (c *Config) Logf(msg string, args ...interface{}) { c.fe.Logf(msg, args...) }
func (c *Config) Log() bool { return c.fe.Log() } func (c *Config) Log() bool { return c.fe.Log() }
func (c *Config) Fatalf(pos src.XPos, msg string, args ...interface{}) { c.fe.Fatalf(pos, msg, args...) } func (c *Config) Fatalf(pos src.XPos, msg string, args ...interface{}) { c.fe.Fatalf(pos, msg, args...) }
...@@ -362,8 +319,11 @@ func (c *Config) Warnl(pos src.XPos, msg string, args ...interface{}) { c.fe.Wa ...@@ -362,8 +319,11 @@ func (c *Config) Warnl(pos src.XPos, msg string, args ...interface{}) { c.fe.Wa
func (c *Config) Debug_checknil() bool { return c.fe.Debug_checknil() } func (c *Config) Debug_checknil() bool { return c.fe.Debug_checknil() }
func (c *Config) Debug_wb() bool { return c.fe.Debug_wb() } func (c *Config) Debug_wb() bool { return c.fe.Debug_wb() }
func (c *Config) logDebugHashMatch(evname, name string) { func (f *Func) logDebugHashMatch(evname, name string) {
file := c.logfiles[evname] if f.logfiles == nil {
f.logfiles = make(map[string]*os.File)
}
file := f.logfiles[evname]
if file == nil { if file == nil {
file = os.Stdout file = os.Stdout
tmpfile := os.Getenv("GSHS_LOGFILE") tmpfile := os.Getenv("GSHS_LOGFILE")
...@@ -371,10 +331,10 @@ func (c *Config) logDebugHashMatch(evname, name string) { ...@@ -371,10 +331,10 @@ func (c *Config) logDebugHashMatch(evname, name string) {
var ok error var ok error
file, ok = os.Create(tmpfile) file, ok = os.Create(tmpfile)
if ok != nil { if ok != nil {
c.Fatalf(src.NoXPos, "Could not open hash-testing logfile %s", tmpfile) f.Fatalf("could not open hash-testing logfile %s", tmpfile)
} }
} }
c.logfiles[evname] = file f.logfiles[evname] = file
} }
s := fmt.Sprintf("%s triggered %s\n", evname, name) s := fmt.Sprintf("%s triggered %s\n", evname, name)
file.WriteString(s) file.WriteString(s)
...@@ -395,14 +355,13 @@ func (c *Config) logDebugHashMatch(evname, name string) { ...@@ -395,14 +355,13 @@ func (c *Config) logDebugHashMatch(evname, name string) {
// GSHS_LOGFILE // GSHS_LOGFILE
// or standard out if that is empty or there is an error // or standard out if that is empty or there is an error
// opening the file. // opening the file.
func (f *Func) DebugHashMatch(evname, name string) bool {
func (c *Config) DebugHashMatch(evname, name string) bool {
evhash := os.Getenv(evname) evhash := os.Getenv(evname)
if evhash == "" { if evhash == "" {
return true // default behavior with no EV is "on" return true // default behavior with no EV is "on"
} }
if evhash == "y" || evhash == "Y" { if evhash == "y" || evhash == "Y" {
c.logDebugHashMatch(evname, name) f.logDebugHashMatch(evname, name)
return true return true
} }
if evhash == "n" || evhash == "N" { if evhash == "n" || evhash == "N" {
...@@ -417,7 +376,7 @@ func (c *Config) DebugHashMatch(evname, name string) bool { ...@@ -417,7 +376,7 @@ func (c *Config) DebugHashMatch(evname, name string) bool {
} }
if strings.HasSuffix(hstr, evhash) { if strings.HasSuffix(hstr, evhash) {
c.logDebugHashMatch(evname, name) f.logDebugHashMatch(evname, name)
return true return true
} }
...@@ -430,13 +389,13 @@ func (c *Config) DebugHashMatch(evname, name string) bool { ...@@ -430,13 +389,13 @@ func (c *Config) DebugHashMatch(evname, name string) bool {
break break
} }
if strings.HasSuffix(hstr, evv) { if strings.HasSuffix(hstr, evv) {
c.logDebugHashMatch(ev, name) f.logDebugHashMatch(ev, name)
return true return true
} }
} }
return false return false
} }
func (c *Config) DebugNameMatch(evname, name string) bool { func DebugNameMatch(evname, name string) bool {
return os.Getenv(evname) == name return os.Getenv(evname) == name
} }
...@@ -36,6 +36,5 @@ func benchmarkCopyElim(b *testing.B, n int) { ...@@ -36,6 +36,5 @@ func benchmarkCopyElim(b *testing.B, n int) {
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
fun := Fun(c, "entry", Bloc("entry", values...)) fun := Fun(c, "entry", Bloc("entry", values...))
Copyelim(fun.f) Copyelim(fun.f)
fun.f.Free()
} }
} }
...@@ -154,7 +154,6 @@ func BenchmarkDeadCode(b *testing.B) { ...@@ -154,7 +154,6 @@ func BenchmarkDeadCode(b *testing.B) {
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
fun := Fun(c, "entry", blocks...) fun := Fun(c, "entry", blocks...)
Deadcode(fun.f) Deadcode(fun.f)
fun.f.Free()
} }
}) })
} }
......
...@@ -70,9 +70,9 @@ const nscratchslices = 7 ...@@ -70,9 +70,9 @@ const nscratchslices = 7
// in make.bash. // in make.bash.
const minscratchblocks = 512 const minscratchblocks = 512
func (cfg *Config) scratchBlocksForDom(maxBlockID int) (a, b, c, d, e, f, g []ID) { func (cache *Cache) scratchBlocksForDom(maxBlockID int) (a, b, c, d, e, f, g []ID) {
tot := maxBlockID * nscratchslices tot := maxBlockID * nscratchslices
scratch := cfg.domblockstore scratch := cache.domblockstore
if len(scratch) < tot { if len(scratch) < tot {
// req = min(1.5*tot, nscratchslices*minscratchblocks) // req = min(1.5*tot, nscratchslices*minscratchblocks)
// 50% padding allows for graph growth in later phases. // 50% padding allows for graph growth in later phases.
...@@ -81,7 +81,7 @@ func (cfg *Config) scratchBlocksForDom(maxBlockID int) (a, b, c, d, e, f, g []ID ...@@ -81,7 +81,7 @@ func (cfg *Config) scratchBlocksForDom(maxBlockID int) (a, b, c, d, e, f, g []ID
req = nscratchslices * minscratchblocks req = nscratchslices * minscratchblocks
} }
scratch = make([]ID, req) scratch = make([]ID, req)
cfg.domblockstore = scratch cache.domblockstore = scratch
} else { } else {
// Clear as much of scratch as we will (re)use // Clear as much of scratch as we will (re)use
scratch = scratch[0:tot] scratch = scratch[0:tot]
...@@ -117,7 +117,7 @@ func (f *Func) dominatorsLTOrig(entry *Block, predFn linkedBlocks, succFn linked ...@@ -117,7 +117,7 @@ func (f *Func) dominatorsLTOrig(entry *Block, predFn linkedBlocks, succFn linked
// Adapted directly from the original TOPLAS article's "simple" algorithm // Adapted directly from the original TOPLAS article's "simple" algorithm
maxBlockID := entry.Func.NumBlocks() maxBlockID := entry.Func.NumBlocks()
semi, vertex, label, parent, ancestor, bucketHead, bucketLink := f.Config.scratchBlocksForDom(maxBlockID) semi, vertex, label, parent, ancestor, bucketHead, bucketLink := f.Cache.scratchBlocksForDom(maxBlockID)
// This version uses integers for most of the computation, // This version uses integers for most of the computation,
// to make the work arrays smaller and pointer-free. // to make the work arrays smaller and pointer-free.
......
...@@ -8,6 +8,7 @@ import ( ...@@ -8,6 +8,7 @@ import (
"cmd/internal/src" "cmd/internal/src"
"fmt" "fmt"
"math" "math"
"os"
"strings" "strings"
) )
...@@ -16,6 +17,7 @@ import ( ...@@ -16,6 +17,7 @@ import (
// Funcs are single-use; a new Func must be created for every compiled function. // Funcs are single-use; a new Func must be created for every compiled function.
type Func struct { type Func struct {
Config *Config // architecture information Config *Config // architecture information
Cache *Cache // re-usable cache
pass *pass // current pass information (name, options, etc.) pass *pass // current pass information (name, options, etc.)
Name string // e.g. bytes·Compare Name string // e.g. bytes·Compare
Type Type // type signature of the function. Type Type // type signature of the function.
...@@ -24,6 +26,12 @@ type Func struct { ...@@ -24,6 +26,12 @@ type Func struct {
bid idAlloc // block ID allocator bid idAlloc // block ID allocator
vid idAlloc // value ID allocator vid idAlloc // value ID allocator
// Given an environment variable used for debug hash match,
// what file (if any) receives the yes/no logging?
logfiles map[string]*os.File
HTMLWriter *HTMLWriter // html writer, for debugging
DebugTest bool // default true unless $GOSSAHASH != ""; as a debugging aid, make new code conditional on this and use GOSSAHASH to binary search for failing cases
scheduled bool // Values in Blocks are in final order scheduled bool // Values in Blocks are in final order
NoSplit bool // true if function is marked as nosplit. Used by schedule check pass. NoSplit bool // true if function is marked as nosplit. Used by schedule check pass.
...@@ -52,6 +60,12 @@ type Func struct { ...@@ -52,6 +60,12 @@ type Func struct {
constants map[int64][]*Value // constants cache, keyed by constant value; users must check value's Op and Type constants map[int64][]*Value // constants cache, keyed by constant value; users must check value's Op and Type
} }
// NewFunc returns a new, empty function object.
// Caller must set f.Config and f.Cache before using f.
func NewFunc() *Func {
return &Func{NamedValues: make(map[LocalSlot][]*Value)}
}
// NumBlocks returns an integer larger than the id of any Block in the Func. // NumBlocks returns an integer larger than the id of any Block in the Func.
func (f *Func) NumBlocks() int { func (f *Func) NumBlocks() int {
return f.bid.num() return f.bid.num()
...@@ -64,9 +78,9 @@ func (f *Func) NumValues() int { ...@@ -64,9 +78,9 @@ func (f *Func) NumValues() int {
// newSparseSet returns a sparse set that can store at least up to n integers. // newSparseSet returns a sparse set that can store at least up to n integers.
func (f *Func) newSparseSet(n int) *sparseSet { func (f *Func) newSparseSet(n int) *sparseSet {
for i, scr := range f.Config.scrSparse { for i, scr := range f.Cache.scrSparse {
if scr != nil && scr.cap() >= n { if scr != nil && scr.cap() >= n {
f.Config.scrSparse[i] = nil f.Cache.scrSparse[i] = nil
scr.clear() scr.clear()
return scr return scr
} }
...@@ -76,13 +90,13 @@ func (f *Func) newSparseSet(n int) *sparseSet { ...@@ -76,13 +90,13 @@ func (f *Func) newSparseSet(n int) *sparseSet {
// retSparseSet returns a sparse set to the config's cache of sparse sets to be reused by f.newSparseSet. // retSparseSet returns a sparse set to the config's cache of sparse sets to be reused by f.newSparseSet.
func (f *Func) retSparseSet(ss *sparseSet) { func (f *Func) retSparseSet(ss *sparseSet) {
for i, scr := range f.Config.scrSparse { for i, scr := range f.Cache.scrSparse {
if scr == nil { if scr == nil {
f.Config.scrSparse[i] = ss f.Cache.scrSparse[i] = ss
return return
} }
} }
f.Config.scrSparse = append(f.Config.scrSparse, ss) f.Cache.scrSparse = append(f.Cache.scrSparse, ss)
} }
// newValue allocates a new Value with the given fields and places it at the end of b.Values. // newValue allocates a new Value with the given fields and places it at the end of b.Values.
...@@ -94,8 +108,9 @@ func (f *Func) newValue(op Op, t Type, b *Block, pos src.XPos) *Value { ...@@ -94,8 +108,9 @@ func (f *Func) newValue(op Op, t Type, b *Block, pos src.XPos) *Value {
v.argstorage[0] = nil v.argstorage[0] = nil
} else { } else {
ID := f.vid.get() ID := f.vid.get()
if int(ID) < len(f.Config.values) { if int(ID) < len(f.Cache.values) {
v = &f.Config.values[ID] v = &f.Cache.values[ID]
v.ID = ID
} else { } else {
v = &Value{ID: ID} v = &Value{ID: ID}
} }
...@@ -120,8 +135,9 @@ func (f *Func) newValueNoBlock(op Op, t Type, pos src.XPos) *Value { ...@@ -120,8 +135,9 @@ func (f *Func) newValueNoBlock(op Op, t Type, pos src.XPos) *Value {
v.argstorage[0] = nil v.argstorage[0] = nil
} else { } else {
ID := f.vid.get() ID := f.vid.get()
if int(ID) < len(f.Config.values) { if int(ID) < len(f.Cache.values) {
v = &f.Config.values[ID] v = &f.Cache.values[ID]
v.ID = ID
} else { } else {
v = &Value{ID: ID} v = &Value{ID: ID}
} }
...@@ -190,8 +206,9 @@ func (f *Func) NewBlock(kind BlockKind) *Block { ...@@ -190,8 +206,9 @@ func (f *Func) NewBlock(kind BlockKind) *Block {
b.succstorage[0].b = nil b.succstorage[0].b = nil
} else { } else {
ID := f.bid.get() ID := f.bid.get()
if int(ID) < len(f.Config.blocks) { if int(ID) < len(f.Cache.blocks) {
b = &f.Config.blocks[ID] b = &f.Cache.blocks[ID]
b.ID = ID
} else { } else {
b = &Block{ID: ID} b = &Block{ID: ID}
} }
...@@ -468,48 +485,6 @@ func (f *Func) Logf(msg string, args ...interface{}) { f.Config.Logf(msg, args ...@@ -468,48 +485,6 @@ func (f *Func) Logf(msg string, args ...interface{}) { f.Config.Logf(msg, args
func (f *Func) Log() bool { return f.Config.Log() } func (f *Func) Log() bool { return f.Config.Log() }
func (f *Func) Fatalf(msg string, args ...interface{}) { f.Config.Fatalf(f.Entry.Pos, msg, args...) } func (f *Func) Fatalf(msg string, args ...interface{}) { f.Config.Fatalf(f.Entry.Pos, msg, args...) }
func (f *Func) Free() {
// Clear cached CFG info.
f.invalidateCFG()
// Clear values.
n := f.vid.num()
if n > len(f.Config.values) {
n = len(f.Config.values)
}
for i := 1; i < n; i++ {
f.Config.values[i] = Value{}
f.Config.values[i].ID = ID(i)
}
// Clear blocks.
n = f.bid.num()
if n > len(f.Config.blocks) {
n = len(f.Config.blocks)
}
for i := 1; i < n; i++ {
f.Config.blocks[i] = Block{}
f.Config.blocks[i].ID = ID(i)
}
// Clear locs.
n = len(f.RegAlloc)
if n > len(f.Config.locs) {
n = len(f.Config.locs)
}
head := f.Config.locs[:n]
for i := range head {
head[i] = nil
}
// Unregister from config.
if f.Config.curFunc != f {
f.Fatalf("free of function which isn't the last one allocated")
}
f.Config.curFunc = nil
*f = Func{} // just in case
}
// postorder returns the reachable blocks in f in a postorder traversal. // postorder returns the reachable blocks in f in a postorder traversal.
func (f *Func) postorder() []*Block { func (f *Func) postorder() []*Block {
if f.cachedPostorder == nil { if f.cachedPostorder == nil {
......
...@@ -144,7 +144,12 @@ var emptyPass pass = pass{ ...@@ -144,7 +144,12 @@ var emptyPass pass = pass{
// supplied to one of the Bloc functions. Each of the bloc names and // supplied to one of the Bloc functions. Each of the bloc names and
// valu names should be unique across the Fun. // valu names should be unique across the Fun.
func Fun(c *Config, entry string, blocs ...bloc) fun { func Fun(c *Config, entry string, blocs ...bloc) fun {
f := c.NewFunc() f := NewFunc()
f.Config = c
// TODO: Either mark some SSA tests as t.Parallel,
// or set up a shared Cache and Reset it between tests.
// But not both.
f.Cache = new(Cache)
f.pass = &emptyPass f.pass = &emptyPass
blocks := make(map[string]*Block) blocks := make(map[string]*Block)
......
...@@ -162,7 +162,6 @@ func BenchmarkFuse(b *testing.B) { ...@@ -162,7 +162,6 @@ func BenchmarkFuse(b *testing.B) {
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
fun := Fun(c, "entry", blocks...) fun := Fun(c, "entry", blocks...)
fuse(fun.f) fuse(fun.f)
fun.f.Free()
} }
}) })
} }
......
...@@ -82,6 +82,4 @@ func TestLoopConditionS390X(t *testing.T) { ...@@ -82,6 +82,4 @@ func TestLoopConditionS390X(t *testing.T) {
OpS390XCMP: 1, OpS390XCMP: 1,
OpS390XCMPWconst: 0, OpS390XCMPWconst: 0,
}) })
fun.f.Free()
} }
...@@ -495,7 +495,7 @@ func isLeaf(f *Func) bool { ...@@ -495,7 +495,7 @@ func isLeaf(f *Func) bool {
func (s *regAllocState) init(f *Func) { func (s *regAllocState) init(f *Func) {
s.f = f s.f = f
s.f.RegAlloc = s.f.Config.locs[:0] s.f.RegAlloc = s.f.Cache.locs[:0]
s.registers = f.Config.registers s.registers = f.Config.registers
if nr := len(s.registers); nr == 0 || nr > int(noRegister) || nr > int(unsafe.Sizeof(regMask(0))*8) { if nr := len(s.registers); nr == 0 || nr > int(noRegister) || nr > int(unsafe.Sizeof(regMask(0))*8) {
s.f.Fatalf("bad number of registers: %d", nr) s.f.Fatalf("bad number of registers: %d", nr)
......
...@@ -12,22 +12,21 @@ func TestShiftConstAMD64(t *testing.T) { ...@@ -12,22 +12,21 @@ func TestShiftConstAMD64(t *testing.T) {
c := testConfig(t) c := testConfig(t)
fun := makeConstShiftFunc(c, 18, OpLsh64x64, TypeUInt64) fun := makeConstShiftFunc(c, 18, OpLsh64x64, TypeUInt64)
checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SHLQconst: 1, OpAMD64CMPQconst: 0, OpAMD64ANDQconst: 0}) checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SHLQconst: 1, OpAMD64CMPQconst: 0, OpAMD64ANDQconst: 0})
fun.f.Free()
fun = makeConstShiftFunc(c, 66, OpLsh64x64, TypeUInt64) fun = makeConstShiftFunc(c, 66, OpLsh64x64, TypeUInt64)
checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SHLQconst: 0, OpAMD64CMPQconst: 0, OpAMD64ANDQconst: 0}) checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SHLQconst: 0, OpAMD64CMPQconst: 0, OpAMD64ANDQconst: 0})
fun.f.Free()
fun = makeConstShiftFunc(c, 18, OpRsh64Ux64, TypeUInt64) fun = makeConstShiftFunc(c, 18, OpRsh64Ux64, TypeUInt64)
checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SHRQconst: 1, OpAMD64CMPQconst: 0, OpAMD64ANDQconst: 0}) checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SHRQconst: 1, OpAMD64CMPQconst: 0, OpAMD64ANDQconst: 0})
fun.f.Free()
fun = makeConstShiftFunc(c, 66, OpRsh64Ux64, TypeUInt64) fun = makeConstShiftFunc(c, 66, OpRsh64Ux64, TypeUInt64)
checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SHRQconst: 0, OpAMD64CMPQconst: 0, OpAMD64ANDQconst: 0}) checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SHRQconst: 0, OpAMD64CMPQconst: 0, OpAMD64ANDQconst: 0})
fun.f.Free()
fun = makeConstShiftFunc(c, 18, OpRsh64x64, TypeInt64) fun = makeConstShiftFunc(c, 18, OpRsh64x64, TypeInt64)
checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SARQconst: 1, OpAMD64CMPQconst: 0}) checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SARQconst: 1, OpAMD64CMPQconst: 0})
fun.f.Free()
fun = makeConstShiftFunc(c, 66, OpRsh64x64, TypeInt64) fun = makeConstShiftFunc(c, 66, OpRsh64x64, TypeInt64)
checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SARQconst: 1, OpAMD64CMPQconst: 0}) checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SARQconst: 1, OpAMD64CMPQconst: 0})
fun.f.Free()
} }
func makeConstShiftFunc(c *Config, amount int64, op Op, typ Type) fun { func makeConstShiftFunc(c *Config, amount int64, op Op, typ Type) fun {
...@@ -80,7 +79,6 @@ func TestShiftToExtensionAMD64(t *testing.T) { ...@@ -80,7 +79,6 @@ func TestShiftToExtensionAMD64(t *testing.T) {
for _, tc := range tests { for _, tc := range tests {
fun := makeShiftExtensionFunc(c, tc.amount, tc.left, tc.right, tc.typ) fun := makeShiftExtensionFunc(c, tc.amount, tc.left, tc.right, tc.typ)
checkOpcodeCounts(t, fun.f, ops) checkOpcodeCounts(t, fun.f, ops)
fun.f.Free()
} }
} }
......
...@@ -35,7 +35,7 @@ type stackAllocState struct { ...@@ -35,7 +35,7 @@ type stackAllocState struct {
} }
func newStackAllocState(f *Func) *stackAllocState { func newStackAllocState(f *Func) *stackAllocState {
s := f.Config.stackAllocState s := f.Cache.stackAllocState
if s == nil { if s == nil {
return new(stackAllocState) return new(stackAllocState)
} }
...@@ -61,7 +61,7 @@ func putStackAllocState(s *stackAllocState) { ...@@ -61,7 +61,7 @@ func putStackAllocState(s *stackAllocState) {
for i := range s.used { for i := range s.used {
s.used[i] = false s.used[i] = false
} }
s.f.Config.stackAllocState = s s.f.Cache.stackAllocState = s
s.f = nil s.f = nil
s.live = nil s.live = nil
s.nArgSlot, s.nNotNeed, s.nNamedSlot, s.nReuse, s.nAuto, s.nSelfInterfere = 0, 0, 0, 0, 0, 0 s.nArgSlot, s.nNotNeed, s.nNamedSlot, s.nReuse, s.nAuto, s.nSelfInterfere = 0, 0, 0, 0, 0, 0
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment