Commit be64a19d authored by Dan Scales's avatar Dan Scales

cmd/compile, cmd/link, runtime: make defers low-cost through inline code and extra funcdata

Generate inline code at defer time to save the args of defer calls to unique
(autotmp) stack slots, and generate inline code at exit time to check which defer
calls were made and make the associated function/method/interface calls. We
remember that a particular defer statement was reached by storing in the deferBits
variable (always stored on the stack). At exit time, we check the bits of the
deferBits variable to determine which defer function calls to make (in reverse
order). These low-cost defers are only used for functions where no defers
appear in loops. In addition, we don't do these low-cost defers if there are too
many defer statements or too many exits in a function (to limit code increase).

When a function uses open-coded defers, we produce extra
FUNCDATA_OpenCodedDeferInfo information that specifies the number of defers, and
for each defer, the stack slots where the closure and associated args have been
stored. The funcdata also includes the location of the deferBits variable.
Therefore, for panics, we can use this funcdata to determine exactly which defers
are active, and call the appropriate functions/methods/closures with the correct
arguments for each active defer.

In order to unwind the stack correctly after a recover(), we need to add an extra
code segment to functions with open-coded defers that simply calls deferreturn()
and returns. This segment is not reachable by the normal function, but is returned
to by the runtime during recovery. We set the liveness information of this
deferreturn() to be the same as the liveness at the first function call during the
last defer exit code (so all return values and all stack slots needed by the defer
calls will be live).

I needed to increase the stackguard constant from 880 to 896, because of a small
amount of new code in deferreturn().

The -N flag disables open-coded defers. '-d defer' prints out the kind of defer
being used at each defer statement (heap-allocated, stack-allocated, or
open-coded).

Cost of defer statement  [ go test -run NONE -bench BenchmarkDefer$ runtime ]
  With normal (stack-allocated) defers only:         35.4  ns/op
  With open-coded defers:                             5.6  ns/op
  Cost of function call alone (remove defer keyword): 4.4  ns/op

Text size increase (including funcdata) for go binary without/with open-coded defers:  0.09%

The average size increase (including funcdata) for only the functions that use
open-coded defers is 1.1%.

The cost of a panic followed by a recover got noticeably slower, since panic
processing now requires a scan of the stack for open-coded defer frames. This scan
is required, even if no frames are using open-coded defers:

Cost of panic and recover [ go test -run NONE -bench BenchmarkPanicRecover runtime ]
  Without open-coded defers:        62.0 ns/op
  With open-coded defers:           255  ns/op

A CGO Go-to-C-to-Go benchmark got noticeably faster because of open-coded defers:

CGO Go-to-C-to-Go benchmark [cd misc/cgo/test; go test -run NONE -bench BenchmarkCGoCallback ]
  Without open-coded defers:        443 ns/op
  With open-coded defers:           347 ns/op

Updates #14939 (defer performance)
Updates #34481 (design doc)

Change-Id: I63b1a60d1ebf28126f55ee9fd7ecffe9cb23d1ff
Reviewed-on: https://go-review.googlesource.com/c/go/+/202340Reviewed-by: default avatarAustin Clements <austin@google.com>
parent dc77dc2b
......@@ -880,7 +880,9 @@ func (e *Escape) augmentParamHole(k EscHole, where *Node) EscHole {
// non-transient location to avoid arguments from being
// transiently allocated.
if where.Op == ODEFER && e.loopDepth == 1 {
where.Esc = EscNever // force stack allocation of defer record (see ssa.go)
// force stack allocation of defer record, unless open-coded
// defers are used (see ssa.go)
where.Esc = EscNever
return e.later(k)
}
......
......@@ -53,6 +53,7 @@ var (
Debug_typecheckinl int
Debug_gendwarfinl int
Debug_softfloat int
Debug_defer int
)
// Debug arguments.
......@@ -83,6 +84,7 @@ var debugtab = []struct {
{"typecheckinl", "eager typechecking of inline function bodies", &Debug_typecheckinl},
{"dwarfinl", "print information about DWARF inlined function creation", &Debug_gendwarfinl},
{"softfloat", "force compiler to emit soft-float code", &Debug_softfloat},
{"defer", "print information about defer compilation", &Debug_defer},
}
const debugHelpHeader = `usage: -d arg[,arg]* and arg is <key>[=<value>]
......
......@@ -294,6 +294,9 @@ func addGCLocals() {
}
ggloblsym(x, int32(len(x.P)), attr)
}
if x := s.Func.OpenCodedDeferInfo; x != nil {
ggloblsym(x, int32(len(x.P)), obj.RODATA|obj.DUPOK)
}
}
}
......
......@@ -863,7 +863,16 @@ func (lv *Liveness) solve() {
newliveout.vars.Set(pos)
}
case ssa.BlockExit:
// panic exit - nothing to do
if lv.fn.Func.HasDefer() && !lv.fn.Func.OpenCodedDeferDisallowed() {
// All stack slots storing args for open-coded
// defers are live at panic exit (since they
// will be used in running defers)
for i, n := range lv.vars {
if n.Name.OpenDeferSlot() {
newliveout.vars.Set(int32(i))
}
}
}
default:
// A variable is live on output from this block
// if it is live on input to some successor.
......
......@@ -317,6 +317,7 @@ func deferstruct(stksize int64) *types.Type {
makefield("siz", types.Types[TUINT32]),
makefield("started", types.Types[TBOOL]),
makefield("heap", types.Types[TBOOL]),
makefield("openDefer", types.Types[TBOOL]),
makefield("sp", types.Types[TUINTPTR]),
makefield("pc", types.Types[TUINTPTR]),
// Note: the types here don't really matter. Defer structures
......@@ -325,6 +326,9 @@ func deferstruct(stksize int64) *types.Type {
makefield("fn", types.Types[TUINTPTR]),
makefield("_panic", types.Types[TUINTPTR]),
makefield("link", types.Types[TUINTPTR]),
makefield("framepc", types.Types[TUINTPTR]),
makefield("varp", types.Types[TUINTPTR]),
makefield("fd", types.Types[TUINTPTR]),
makefield("args", argtype),
}
......
......@@ -20,7 +20,7 @@ func TestSizeof(t *testing.T) {
_32bit uintptr // size on 32bit platforms
_64bit uintptr // size on 64bit platforms
}{
{Func{}, 116, 208},
{Func{}, 124, 224},
{Name{}, 32, 56},
{Param{}, 24, 48},
{Node{}, 76, 128},
......
This diff is collapsed.
......@@ -295,6 +295,7 @@ const (
nameAddrtaken // address taken, even if not moved to heap
nameInlFormal // OPAUTO created by inliner, derived from callee formal
nameInlLocal // OPAUTO created by inliner, derived from callee local
nameOpenDeferSlot // if temporary var storing info for open-coded defers
)
func (n *Name) Captured() bool { return n.flags&nameCaptured != 0 }
......@@ -310,6 +311,7 @@ func (n *Name) Assigned() bool { return n.flags&nameAssigned != 0 }
func (n *Name) Addrtaken() bool { return n.flags&nameAddrtaken != 0 }
func (n *Name) InlFormal() bool { return n.flags&nameInlFormal != 0 }
func (n *Name) InlLocal() bool { return n.flags&nameInlLocal != 0 }
func (n *Name) OpenDeferSlot() bool { return n.flags&nameOpenDeferSlot != 0 }
func (n *Name) SetCaptured(b bool) { n.flags.set(nameCaptured, b) }
func (n *Name) SetReadonly(b bool) { n.flags.set(nameReadonly, b) }
......@@ -324,6 +326,7 @@ func (n *Name) SetAssigned(b bool) { n.flags.set(nameAssigned, b) }
func (n *Name) SetAddrtaken(b bool) { n.flags.set(nameAddrtaken, b) }
func (n *Name) SetInlFormal(b bool) { n.flags.set(nameInlFormal, b) }
func (n *Name) SetInlLocal(b bool) { n.flags.set(nameInlLocal, b) }
func (n *Name) SetOpenDeferSlot(b bool) { n.flags.set(nameOpenDeferSlot, b) }
type Param struct {
Ntype *Node
......@@ -491,7 +494,9 @@ type Func struct {
Pragma syntax.Pragma // go:xxx function annotations
flags bitset16
flags bitset16
numDefers int // number of defer calls in the function
numReturns int // number of explicit returns in the function
// nwbrCalls records the LSyms of functions called by this
// function for go:nowritebarrierrec analysis. Only filled in
......@@ -527,34 +532,37 @@ const (
funcNeedctxt // function uses context register (has closure variables)
funcReflectMethod // function calls reflect.Type.Method or MethodByName
funcIsHiddenClosure
funcHasDefer // contains a defer statement
funcNilCheckDisabled // disable nil checks when compiling this function
funcInlinabilityChecked // inliner has already determined whether the function is inlinable
funcExportInline // include inline body in export data
funcInstrumentBody // add race/msan instrumentation during SSA construction
funcHasDefer // contains a defer statement
funcNilCheckDisabled // disable nil checks when compiling this function
funcInlinabilityChecked // inliner has already determined whether the function is inlinable
funcExportInline // include inline body in export data
funcInstrumentBody // add race/msan instrumentation during SSA construction
funcOpenCodedDeferDisallowed // can't do open-coded defers
)
func (f *Func) Dupok() bool { return f.flags&funcDupok != 0 }
func (f *Func) Wrapper() bool { return f.flags&funcWrapper != 0 }
func (f *Func) Needctxt() bool { return f.flags&funcNeedctxt != 0 }
func (f *Func) ReflectMethod() bool { return f.flags&funcReflectMethod != 0 }
func (f *Func) IsHiddenClosure() bool { return f.flags&funcIsHiddenClosure != 0 }
func (f *Func) HasDefer() bool { return f.flags&funcHasDefer != 0 }
func (f *Func) NilCheckDisabled() bool { return f.flags&funcNilCheckDisabled != 0 }
func (f *Func) InlinabilityChecked() bool { return f.flags&funcInlinabilityChecked != 0 }
func (f *Func) ExportInline() bool { return f.flags&funcExportInline != 0 }
func (f *Func) InstrumentBody() bool { return f.flags&funcInstrumentBody != 0 }
func (f *Func) SetDupok(b bool) { f.flags.set(funcDupok, b) }
func (f *Func) SetWrapper(b bool) { f.flags.set(funcWrapper, b) }
func (f *Func) SetNeedctxt(b bool) { f.flags.set(funcNeedctxt, b) }
func (f *Func) SetReflectMethod(b bool) { f.flags.set(funcReflectMethod, b) }
func (f *Func) SetIsHiddenClosure(b bool) { f.flags.set(funcIsHiddenClosure, b) }
func (f *Func) SetHasDefer(b bool) { f.flags.set(funcHasDefer, b) }
func (f *Func) SetNilCheckDisabled(b bool) { f.flags.set(funcNilCheckDisabled, b) }
func (f *Func) SetInlinabilityChecked(b bool) { f.flags.set(funcInlinabilityChecked, b) }
func (f *Func) SetExportInline(b bool) { f.flags.set(funcExportInline, b) }
func (f *Func) SetInstrumentBody(b bool) { f.flags.set(funcInstrumentBody, b) }
func (f *Func) Dupok() bool { return f.flags&funcDupok != 0 }
func (f *Func) Wrapper() bool { return f.flags&funcWrapper != 0 }
func (f *Func) Needctxt() bool { return f.flags&funcNeedctxt != 0 }
func (f *Func) ReflectMethod() bool { return f.flags&funcReflectMethod != 0 }
func (f *Func) IsHiddenClosure() bool { return f.flags&funcIsHiddenClosure != 0 }
func (f *Func) HasDefer() bool { return f.flags&funcHasDefer != 0 }
func (f *Func) NilCheckDisabled() bool { return f.flags&funcNilCheckDisabled != 0 }
func (f *Func) InlinabilityChecked() bool { return f.flags&funcInlinabilityChecked != 0 }
func (f *Func) ExportInline() bool { return f.flags&funcExportInline != 0 }
func (f *Func) InstrumentBody() bool { return f.flags&funcInstrumentBody != 0 }
func (f *Func) OpenCodedDeferDisallowed() bool { return f.flags&funcOpenCodedDeferDisallowed != 0 }
func (f *Func) SetDupok(b bool) { f.flags.set(funcDupok, b) }
func (f *Func) SetWrapper(b bool) { f.flags.set(funcWrapper, b) }
func (f *Func) SetNeedctxt(b bool) { f.flags.set(funcNeedctxt, b) }
func (f *Func) SetReflectMethod(b bool) { f.flags.set(funcReflectMethod, b) }
func (f *Func) SetIsHiddenClosure(b bool) { f.flags.set(funcIsHiddenClosure, b) }
func (f *Func) SetHasDefer(b bool) { f.flags.set(funcHasDefer, b) }
func (f *Func) SetNilCheckDisabled(b bool) { f.flags.set(funcNilCheckDisabled, b) }
func (f *Func) SetInlinabilityChecked(b bool) { f.flags.set(funcInlinabilityChecked, b) }
func (f *Func) SetExportInline(b bool) { f.flags.set(funcExportInline, b) }
func (f *Func) SetInstrumentBody(b bool) { f.flags.set(funcInstrumentBody, b) }
func (f *Func) SetOpenCodedDeferDisallowed(b bool) { f.flags.set(funcOpenCodedDeferDisallowed, b) }
func (f *Func) setWBPos(pos src.XPos) {
if Debug_wb != 0 {
......
......@@ -214,6 +214,18 @@ func walkstmt(n *Node) *Node {
case ODEFER:
Curfn.Func.SetHasDefer(true)
Curfn.Func.numDefers++
if Curfn.Func.numDefers > maxOpenDefers {
// Don't allow open-coded defers if there are more than
// 8 defers in the function, since we use a single
// byte to record active defers.
Curfn.Func.SetOpenCodedDeferDisallowed(true)
}
if n.Esc != EscNever {
// If n.Esc is not EscNever, then this defer occurs in a loop,
// so open-coded defers cannot be used in this function.
Curfn.Func.SetOpenCodedDeferDisallowed(true)
}
fallthrough
case OGO:
switch n.Left.Op {
......@@ -255,6 +267,7 @@ func walkstmt(n *Node) *Node {
walkstmtlist(n.Rlist.Slice())
case ORETURN:
Curfn.Func.numReturns++
if n.List.Len() == 0 {
break
}
......
......@@ -170,6 +170,11 @@ func elimDeadAutosGeneric(f *Func) {
return
case OpVarLive:
// Don't delete the auto if it needs to be kept alive.
// We depend on this check to keep the autotmp stack slots
// for open-coded defers from being removed (since they
// may not be used by the inline code, but will be used by
// panic processing).
n, ok := v.Aux.(GCNode)
if !ok || n.StorageClass() != ClassAuto {
return
......
......@@ -32,8 +32,16 @@ type Func struct {
Type *types.Type // type signature of the function.
Blocks []*Block // unordered set of all basic blocks (note: not indexable by ID)
Entry *Block // the entry basic block
bid idAlloc // block ID allocator
vid idAlloc // value ID allocator
// If we are using open-coded defers, this is the first call to a deferred
// function in the final defer exit sequence that we generated. This call
// should be after all defer statements, and will have all args, etc. of
// all defer calls as live. The liveness info of this call will be used
// for the deferreturn/ret segment generated for functions with open-coded
// defers.
LastDeferExit *Value
bid idAlloc // block ID allocator
vid idAlloc // value ID allocator
// Given an environment variable used for debug hash match,
// what file (if any) receives the yes/no logging?
......
......@@ -405,10 +405,11 @@ type FuncInfo struct {
dwarfAbsFnSym *LSym
dwarfDebugLinesSym *LSym
GCArgs *LSym
GCLocals *LSym
GCRegs *LSym
StackObjects *LSym
GCArgs *LSym
GCLocals *LSym
GCRegs *LSym
StackObjects *LSym
OpenCodedDeferInfo *LSym
}
type InlMark struct {
......
......@@ -419,6 +419,9 @@ func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
// to a PLT, so make sure the GOT pointer is loaded into BX.
// RegTo2 is set on the replacement call insn to stop it being
// processed when it is in turn passed to progedit.
//
// We disable open-coded defers in buildssa() on 386 ONLY with shared
// libraries because of this extra code added before deferreturn calls.
if ctxt.Arch.Family == sys.AMD64 || (p.To.Sym != nil && p.To.Sym.Local()) || p.RegTo2 != 0 {
return
}
......
......@@ -15,11 +15,12 @@ const (
PCDATA_StackMapIndex = 1
PCDATA_InlTreeIndex = 2
FUNCDATA_ArgsPointerMaps = 0
FUNCDATA_LocalsPointerMaps = 1
FUNCDATA_RegPointerMaps = 2
FUNCDATA_StackObjects = 3
FUNCDATA_InlTree = 4
FUNCDATA_ArgsPointerMaps = 0
FUNCDATA_LocalsPointerMaps = 1
FUNCDATA_RegPointerMaps = 2
FUNCDATA_StackObjects = 3
FUNCDATA_InlTree = 4
FUNCDATA_OpenCodedDeferInfo = 5
// ArgsSizeUnknown is set in Func.argsize to mark all functions
// whose argument size is unknown (C vararg functions, and
......
......@@ -85,6 +85,12 @@ func GetFuncID(name, file string) FuncID {
return FuncID_panicwrap
case "runtime.handleAsyncEvent":
return FuncID_handleAsyncEvent
case "runtime.deferreturn":
// Don't show in the call stack (used when invoking defer functions)
return FuncID_wrapper
case "runtime.runOpenDeferFrame":
// Don't show in the call stack (used when invoking defer functions)
return FuncID_wrapper
}
if file == "<autogenerated>" {
return FuncID_wrapper
......
......@@ -18,7 +18,7 @@ const (
)
// Initialize StackGuard and StackLimit according to target system.
var StackGuard = 880*stackGuardMultiplier() + StackSystem
var StackGuard = 896*stackGuardMultiplier() + StackSystem
var StackLimit = StackGuard - StackSystem - StackSmall
// stackGuardMultiplier returns a multiplier to apply to the default
......
......@@ -11,6 +11,7 @@ import (
"cmd/internal/sys"
"cmd/link/internal/sym"
"encoding/binary"
"fmt"
"log"
"os"
"path/filepath"
......@@ -255,13 +256,23 @@ func (ctxt *Link) pclntab() {
}
if r.Type.IsDirectJump() && r.Sym != nil && r.Sym.Name == "runtime.deferreturn" {
if ctxt.Arch.Family == sys.Wasm {
deferreturn = lastWasmAddr
deferreturn = lastWasmAddr - 1
} else {
// Note: the relocation target is in the call instruction, but
// is not necessarily the whole instruction (for instance, on
// x86 the relocation applies to bytes [1:5] of the 5 byte call
// instruction).
deferreturn = uint32(r.Off)
switch ctxt.Arch.Family {
case sys.AMD64, sys.I386:
deferreturn--
case sys.PPC64, sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64, sys.RISCV64:
// no change
case sys.S390X:
deferreturn -= 2
default:
panic(fmt.Sprint("Unhandled architecture:", ctxt.Arch.Family))
}
}
break // only need one
}
......
......@@ -498,7 +498,8 @@ func (ctxt *Link) symtab() {
case strings.HasPrefix(s.Name, "gcargs."),
strings.HasPrefix(s.Name, "gclocals."),
strings.HasPrefix(s.Name, "gclocals·"),
strings.HasPrefix(s.Name, "inltree."):
strings.HasPrefix(s.Name, "inltree."),
strings.HasSuffix(s.Name, ".opendefer"):
s.Type = sym.SGOFUNC
s.Attr |= sym.AttrNotInSymbolTable
s.Outer = symgofunc
......
......@@ -188,3 +188,32 @@ func TestCallersDivZeroPanic(t *testing.T) {
t.Fatal("did not see divide-by-sizer panic")
}
}
func TestCallersDeferNilFuncPanic(t *testing.T) {
// Make sure we don't have any extra frames on the stack. We cut off the check
// at runtime.sigpanic, because non-open-coded defers (which may be used in
// non-opt or race checker mode) include an extra 'jmpdefer' frame (which is
// where the nil pointer deref happens). We could consider hiding jmpdefer in
// tracebacks.
state := 1
want := []string{"runtime.Callers", "runtime_test.TestCallersDeferNilFuncPanic.func1",
"runtime.gopanic", "runtime.panicmem", "runtime.sigpanic"}
defer func() {
if r := recover(); r == nil {
t.Fatal("did not panic")
}
pcs := make([]uintptr, 20)
pcs = pcs[:runtime.Callers(0, pcs)]
testCallersEqual(t, pcs, want)
if state == 1 {
t.Fatal("nil defer func panicked at defer time rather than function exit time")
}
}()
var f func()
defer f()
// Use the value of 'state' to make sure nil defer func f causes panic at
// function exit, rather than at the defer statement.
state = 2
}
......@@ -15,11 +15,11 @@ import (
// unconditional panic (hence no return from the function)
func TestUnconditionalPanic(t *testing.T) {
defer func() {
if recover() == nil {
if recover() != "testUnconditional" {
t.Fatal("expected unconditional panic")
}
}()
panic("panic should be recovered")
panic("testUnconditional")
}
var glob int = 3
......@@ -30,7 +30,7 @@ func TestOpenAndNonOpenDefers(t *testing.T) {
for {
// Non-open defer because in a loop
defer func(n int) {
if recover() == nil {
if recover() != "testNonOpenDefer" {
t.Fatal("expected testNonOpen panic")
}
}(3)
......@@ -45,7 +45,7 @@ func TestOpenAndNonOpenDefers(t *testing.T) {
//go:noinline
func testOpen(t *testing.T, arg int) {
defer func(n int) {
if recover() == nil {
if recover() != "testOpenDefer" {
t.Fatal("expected testOpen panic")
}
}(4)
......@@ -61,7 +61,7 @@ func TestNonOpenAndOpenDefers(t *testing.T) {
for {
// Non-open defer because in a loop
defer func(n int) {
if recover() == nil {
if recover() != "testNonOpenDefer" {
t.Fatal("expected testNonOpen panic")
}
}(3)
......@@ -80,7 +80,7 @@ func TestConditionalDefers(t *testing.T) {
list = make([]int, 0, 10)
defer func() {
if recover() == nil {
if recover() != "testConditional" {
t.Fatal("expected panic")
}
want := []int{4, 2, 1}
......@@ -106,7 +106,7 @@ func testConditionalDefers(n int) {
defer doappend(4)
}
}
panic("test")
panic("testConditional")
}
// Test that there is no compile-time or run-time error if an open-coded defer
......@@ -174,3 +174,52 @@ func TestRecoverMatching(t *testing.T) {
}()
panic("panic1")
}
type nonSSAable [128]byte
type bigStruct struct {
x, y, z, w, p, q int64
}
func mknonSSAable() nonSSAable {
globint1++
return nonSSAable{0, 0, 0, 0, 5}
}
var globint1, globint2 int
//go:noinline
func sideeffect(n int64) int64 {
globint2++
return n
}
// Test that nonSSAable arguments to defer are handled correctly and only evaluated once.
func TestNonSSAableArgs(t *testing.T) {
globint1 = 0
globint2 = 0
var save1 byte
var save2 int64
defer func() {
if globint1 != 1 {
t.Fatal(fmt.Sprintf("globint1: wanted: 1, got %v", globint1))
}
if save1 != 5 {
t.Fatal(fmt.Sprintf("save1: wanted: 5, got %v", save1))
}
if globint2 != 1 {
t.Fatal(fmt.Sprintf("globint2: wanted: 1, got %v", globint2))
}
if save2 != 2 {
t.Fatal(fmt.Sprintf("save2: wanted: 2, got %v", save2))
}
}()
defer func(n nonSSAable) {
save1 = n[4]
}(mknonSSAable())
defer func(b bigStruct) {
save2 = b.y
}(bigStruct{1, 2, 3, 4, 5, sideeffect(6)})
}
......@@ -17,6 +17,7 @@
#define FUNCDATA_RegPointerMaps 2
#define FUNCDATA_StackObjects 3
#define FUNCDATA_InlTree 4
#define FUNCDATA_OpenCodedDeferInfo 5 /* info for func with open-coded defers */
// Pseudo-assembly statements.
......
This diff is collapsed.
......@@ -720,7 +720,7 @@ type _func struct {
nameoff int32 // function name
args int32 // in/out args size
deferreturn uint32 // offset of a deferreturn block from entry, if any.
deferreturn uint32 // offset of start of a deferreturn call instruction from entry, if any.
pcsp int32
pcfile int32
......@@ -793,7 +793,7 @@ func extendRandom(r []byte, n int) {
}
// A _defer holds an entry on the list of deferred calls.
// If you add a field here, add code to clear it in freedefer.
// If you add a field here, add code to clear it in freedefer and deferProcStack
// This struct must match the code in cmd/compile/internal/gc/reflect.go:deferstruct
// and cmd/compile/internal/gc/ssa.go:(*state).call.
// Some defers will be allocated on the stack and some on the heap.
......@@ -804,11 +804,27 @@ type _defer struct {
siz int32 // includes both arguments and results
started bool
heap bool
sp uintptr // sp at time of defer
pc uintptr
fn *funcval
_panic *_panic // panic that is running defer
link *_defer
// openDefer indicates that this _defer is for a frame with open-coded
// defers. We have only one defer record for the entire frame (which may
// currently have 0, 1, or more defers active).
openDefer bool
sp uintptr // sp at time of defer
pc uintptr // pc at time of defer
fn *funcval
_panic *_panic // panic that is running defer
link *_defer
// If openDefer is true, the fields below record values about the stack
// frame and associated function that has the open-coded defer(s). sp
// above will be the sp for the frame, and pc will be address of the
// deferreturn call in the function.
fd unsafe.Pointer // funcdata for the function associated with the frame
varp uintptr // value of varp for the stack frame
// framepc is the current pc associated with the stack frame. Together,
// with sp above (which is the sp associated with the stack frame),
// framepc/sp can be used as pc/sp pair to continue a stack trace via
// gentraceback().
framepc uintptr
}
// A _panic holds information about an active panic.
......
......@@ -91,7 +91,7 @@ const (
// The stack guard is a pointer this many bytes above the
// bottom of the stack.
_StackGuard = 880*sys.StackGuardMultiplier + _StackSystem
_StackGuard = 896*sys.StackGuardMultiplier + _StackSystem
// After a stack split check the SP is allowed to be this
// many bytes below the stack guard. This saves an instruction
......@@ -736,6 +736,8 @@ func adjustdefers(gp *g, adjinfo *adjustinfo) {
adjustpointer(adjinfo, unsafe.Pointer(&d.sp))
adjustpointer(adjinfo, unsafe.Pointer(&d._panic))
adjustpointer(adjinfo, unsafe.Pointer(&d.link))
adjustpointer(adjinfo, unsafe.Pointer(&d.varp))
adjustpointer(adjinfo, unsafe.Pointer(&d.fd))
}
// Adjust defer argument blocks the same way we adjust active stack frames.
......
......@@ -216,11 +216,12 @@ const (
_PCDATA_StackMapIndex = 1
_PCDATA_InlTreeIndex = 2
_FUNCDATA_ArgsPointerMaps = 0
_FUNCDATA_LocalsPointerMaps = 1
_FUNCDATA_RegPointerMaps = 2
_FUNCDATA_StackObjects = 3
_FUNCDATA_InlTree = 4
_FUNCDATA_ArgsPointerMaps = 0
_FUNCDATA_LocalsPointerMaps = 1
_FUNCDATA_RegPointerMaps = 2
_FUNCDATA_StackObjects = 3
_FUNCDATA_InlTree = 4
_FUNCDATA_OpenCodedDeferInfo = 5
_ArgsSizeUnknown = -0x80000000
)
......
// errorcheck -0 -l -d=defer
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// check that open-coded defers are used in expected situations
package main
import "fmt"
var glob = 3
func f1() {
for i := 0; i < 10; i++ {
fmt.Println("loop")
}
defer func() { // ERROR "open-coded defer"
fmt.Println("defer")
}()
}
func f2() {
for {
defer func() { // ERROR "heap-allocated defer"
fmt.Println("defer1")
}()
if glob > 2 {
break
}
}
defer func() { // ERROR "stack-allocated defer"
fmt.Println("defer2")
}()
}
func f3() {
defer func() { // ERROR "stack-allocated defer"
fmt.Println("defer2")
}()
for {
defer func() { // ERROR "heap-allocated defer"
fmt.Println("defer1")
}()
if glob > 2 {
break
}
}
}
func f4() {
defer func() { // ERROR "open-coded defer"
fmt.Println("defer")
}()
label:
fmt.Println("goto loop")
if glob > 2 {
goto label
}
}
func f5() {
label:
fmt.Println("goto loop")
defer func() { // ERROR "heap-allocated defer"
fmt.Println("defer")
}()
if glob > 2 {
goto label
}
}
func f6() {
label:
fmt.Println("goto loop")
if glob > 2 {
goto label
}
// The current analysis doesn't end a backward goto loop, so this defer is
// considered to be inside a loop
defer func() { // ERROR "heap-allocated defer"
fmt.Println("defer")
}()
}
......@@ -367,16 +367,19 @@ func f24() {
m2[[2]string{"x", "y"}] = nil
}
// defer should not cause spurious ambiguously live variables
// Non-open-coded defers should not cause autotmps. (Open-coded defers do create extra autotmps).
func f25(b bool) {
defer g25()
for i := 0; i < 2; i++ {
// Put in loop to make sure defer is not open-coded
defer g25()
}
if b {
return
}
var x string
x = g14()
printstring(x)
return
}
func g25()
......@@ -417,7 +420,8 @@ func f27defer(b bool) {
defer call27(func() { x++ }) // ERROR "stack object .autotmp_[0-9]+ struct \{"
}
defer call27(func() { x++ }) // ERROR "stack object .autotmp_[0-9]+ struct \{"
printnl()
printnl() // ERROR "live at call to printnl: .autotmp_[0-9]+ .autotmp_[0-9]+"
return // ERROR "live at call to call27: .autotmp_[0-9]+"
}
// and newproc (go) escapes to the heap
......@@ -687,12 +691,12 @@ type R struct{ *T } // ERRORAUTO "live at entry to \(\*R\)\.Foo: \.this ptr" "li
// In particular, at printint r must be live.
func f41(p, q *int) (r *int) { // ERROR "live at entry to f41: p q$"
r = p
defer func() { // ERROR "live at call to deferprocStack: q r$" "live at call to deferreturn: r$"
defer func() {
recover()
}()
printint(0) // ERROR "live at call to printint: q r$"
printint(0) // ERROR "live at call to printint: q r .autotmp_[0-9]+$"
r = q
return // ERROR "live at call to deferreturn: r$"
return // ERROR "live at call to f41.func1: r .autotmp_[0-9]+$"
}
func f42() {
......
......@@ -309,17 +309,17 @@ TestCases:
name := m[1]
size, _ := strconv.Atoi(m[2])
// The limit was originally 128 but is now 752 (880-128).
// The limit was originally 128 but is now 768 (896-128).
// Instead of rewriting the test cases above, adjust
// the first stack frame to use up the extra bytes.
if i == 0 {
size += (880 - 128) - 128
size += (896 - 128) - 128
// Noopt builds have a larger stackguard.
// See ../src/cmd/dist/buildruntime.go:stackGuardMultiplier
// This increase is included in objabi.StackGuard
for _, s := range strings.Split(os.Getenv("GO_GCFLAGS"), " ") {
if s == "-N" {
size += 880
size += 896
}
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment