Commit 3c8545c5 authored by Giovanni Bajo's avatar Giovanni Bajo

cmd/compile: reduce allocations in prove by reusing posets

In prove, reuse posets between different functions by storing them
in the per-worker cache.

Allocation count regression caused by prove improvements is down
from 5% to 3% after this CL.

Updates #25179

Change-Id: I6d14003109833d9b3ef5165fdea00aa9c9e952e8
Reviewed-on: https://go-review.googlesource.com/110455
Run-TryBot: Giovanni Bajo <rasky@develer.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarDavid Chase <drchase@google.com>
parent 67656ba7
...@@ -24,6 +24,7 @@ type Cache struct { ...@@ -24,6 +24,7 @@ type Cache struct {
domblockstore []ID // scratch space for computing dominators domblockstore []ID // scratch space for computing dominators
scrSparseSet []*sparseSet // scratch sparse sets to be re-used. scrSparseSet []*sparseSet // scratch sparse sets to be re-used.
scrSparseMap []*sparseMap // scratch sparse maps to be re-used. scrSparseMap []*sparseMap // scratch sparse maps to be re-used.
scrPoset []*poset // scratch poset to be reused
ValueToProgAfter []*obj.Prog ValueToProgAfter []*obj.Prog
debugState debugState debugState debugState
......
...@@ -130,6 +130,21 @@ func (f *Func) retSparseMap(ss *sparseMap) { ...@@ -130,6 +130,21 @@ func (f *Func) retSparseMap(ss *sparseMap) {
f.Cache.scrSparseMap = append(f.Cache.scrSparseMap, ss) f.Cache.scrSparseMap = append(f.Cache.scrSparseMap, ss)
} }
// newPoset returns a new poset from the internal cache
func (f *Func) newPoset() *poset {
if len(f.Cache.scrPoset) > 0 {
po := f.Cache.scrPoset[len(f.Cache.scrPoset)-1]
f.Cache.scrPoset = f.Cache.scrPoset[:len(f.Cache.scrPoset)-1]
return po
}
return newPoset()
}
// retPoset returns a poset to the internal cache
func (f *Func) retPoset(po *poset) {
f.Cache.scrPoset = append(f.Cache.scrPoset, po)
}
// newValue allocates a new Value with the given fields and places it at the end of b.Values. // newValue allocates a new Value with the given fields and places it at the end of b.Values.
func (f *Func) newValue(op Op, t *types.Type, b *Block, pos src.XPos) *Value { func (f *Func) newValue(op Op, t *types.Type, b *Block, pos src.XPos) *Value {
var v *Value var v *Value
......
...@@ -152,13 +152,8 @@ type poset struct { ...@@ -152,13 +152,8 @@ type poset struct {
undo []posetUndo // undo chain undo []posetUndo // undo chain
} }
func newPoset(unsigned bool) *poset { func newPoset() *poset {
var flags uint8
if unsigned {
flags |= posetFlagUnsigned
}
return &poset{ return &poset{
flags: flags,
values: make(map[ID]uint32), values: make(map[ID]uint32),
constants: make([]*Value, 0, 8), constants: make([]*Value, 0, 8),
nodes: make([]posetNode, 1, 16), nodes: make([]posetNode, 1, 16),
...@@ -168,6 +163,14 @@ func newPoset(unsigned bool) *poset { ...@@ -168,6 +163,14 @@ func newPoset(unsigned bool) *poset {
} }
} }
func (po *poset) SetUnsigned(uns bool) {
if uns {
po.flags |= posetFlagUnsigned
} else {
po.flags &^= posetFlagUnsigned
}
}
// Handle children // Handle children
func (po *poset) setchl(i uint32, l posetEdge) { po.nodes[i].l = l } func (po *poset) setchl(i uint32, l posetEdge) { po.nodes[i].l = l }
func (po *poset) setchr(i uint32, r posetEdge) { po.nodes[i].r = r } func (po *poset) setchr(i uint32, r posetEdge) { po.nodes[i].r = r }
......
...@@ -64,7 +64,8 @@ func testPosetOps(t *testing.T, unsigned bool, ops []posetTestOp) { ...@@ -64,7 +64,8 @@ func testPosetOps(t *testing.T, unsigned bool, ops []posetTestOp) {
} }
} }
po := newPoset(unsigned) po := newPoset()
po.SetUnsigned(unsigned)
for idx, op := range ops { for idx, op := range ops {
t.Logf("op%d%v", idx, op) t.Logf("op%d%v", idx, op)
switch op.typ { switch op.typ {
......
...@@ -181,10 +181,12 @@ type factsTable struct { ...@@ -181,10 +181,12 @@ type factsTable struct {
var checkpointFact = fact{} var checkpointFact = fact{}
var checkpointBound = limitFact{} var checkpointBound = limitFact{}
func newFactsTable() *factsTable { func newFactsTable(f *Func) *factsTable {
ft := &factsTable{} ft := &factsTable{}
ft.order[0] = newPoset(false) // signed ft.order[0] = f.newPoset() // signed
ft.order[1] = newPoset(true) // unsigned ft.order[1] = f.newPoset() // unsigned
ft.order[0].SetUnsigned(false)
ft.order[1].SetUnsigned(true)
ft.facts = make(map[pair]relation) ft.facts = make(map[pair]relation)
ft.stack = make([]fact, 4) ft.stack = make([]fact, 4)
ft.limits = make(map[ID]limit) ft.limits = make(map[ID]limit)
...@@ -666,7 +668,8 @@ var ( ...@@ -666,7 +668,8 @@ var (
// its negation. If either leads to a contradiction, it can trim that // its negation. If either leads to a contradiction, it can trim that
// successor. // successor.
func prove(f *Func) { func prove(f *Func) {
ft := newFactsTable() ft := newFactsTable(f)
ft.checkpoint()
// Find length and capacity ops. // Find length and capacity ops.
var zero *Value var zero *Value
...@@ -794,6 +797,20 @@ func prove(f *Func) { ...@@ -794,6 +797,20 @@ func prove(f *Func) {
ft.restore() ft.restore()
} }
} }
ft.restore()
// Return the posets to the free list
for _, po := range ft.order {
// Make sure it's empty as it should be. A non-empty poset
// might cause errors and miscompilations if reused.
if checkEnabled {
if err := po.CheckEmpty(); err != nil {
f.Fatalf("prove poset not empty after function %s: %v", f.Name, err)
}
}
f.retPoset(po)
}
} }
// getBranch returns the range restrictions added by p // getBranch returns the range restrictions added by p
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment