Commit 6adaf17e authored by David Chase's avatar David Chase

cmd/compile: preserve statements in late nilcheckelim optimization

When a subsequent load/store of a ptr makes the nil check of that pointer
unnecessary, if their lines differ, change the line of the load/store
to that of the nilcheck, and attempt to rehome the load/store position
instead.

This fix makes profiling less accurate in order to make panics more
informative.

Fixes #33724

Change-Id: Ib9afaac12fe0d0320aea1bf493617facc34034b3
Reviewed-on: https://go-review.googlesource.com/c/go/+/200197
Run-TryBot: David Chase <drchase@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarKeith Randall <khr@golang.org>
parent c2c2ba28
...@@ -199,8 +199,8 @@ var faultOnLoad = objabi.GOOS != "aix" ...@@ -199,8 +199,8 @@ var faultOnLoad = objabi.GOOS != "aix"
// nilcheckelim2 eliminates unnecessary nil checks. // nilcheckelim2 eliminates unnecessary nil checks.
// Runs after lowering and scheduling. // Runs after lowering and scheduling.
func nilcheckelim2(f *Func) { func nilcheckelim2(f *Func) {
unnecessary := f.newSparseSet(f.NumValues()) unnecessary := f.newSparseMap(f.NumValues()) // map from pointer that will be dereferenced to index of dereferencing value in b.Values[]
defer f.retSparseSet(unnecessary) defer f.retSparseMap(unnecessary)
pendingLines := f.cachedLineStarts // Holds statement boundaries that need to be moved to a new value/block pendingLines := f.cachedLineStarts // Holds statement boundaries that need to be moved to a new value/block
...@@ -218,9 +218,21 @@ func nilcheckelim2(f *Func) { ...@@ -218,9 +218,21 @@ func nilcheckelim2(f *Func) {
if f.fe.Debug_checknil() && v.Pos.Line() > 1 { if f.fe.Debug_checknil() && v.Pos.Line() > 1 {
f.Warnl(v.Pos, "removed nil check") f.Warnl(v.Pos, "removed nil check")
} }
if v.Pos.IsStmt() == src.PosIsStmt { // For bug 33724, policy is that we might choose to bump an existing position
// off the faulting load/store in favor of the one from the nil check.
// Iteration order means that first nilcheck in the chain wins, others
// are bumped into the ordinary statement preservation algorithm.
u := b.Values[unnecessary.get(v.Args[0].ID)]
if !u.Pos.SameFileAndLine(v.Pos) {
if u.Pos.IsStmt() == src.PosIsStmt {
pendingLines.add(u.Pos)
}
u.Pos = v.Pos
} else if v.Pos.IsStmt() == src.PosIsStmt {
pendingLines.add(v.Pos) pendingLines.add(v.Pos)
} }
v.reset(OpUnknown) v.reset(OpUnknown)
firstToRemove = i firstToRemove = i
continue continue
...@@ -294,7 +306,7 @@ func nilcheckelim2(f *Func) { ...@@ -294,7 +306,7 @@ func nilcheckelim2(f *Func) {
} }
// This instruction is guaranteed to fault if ptr is nil. // This instruction is guaranteed to fault if ptr is nil.
// Any previous nil check op is unnecessary. // Any previous nil check op is unnecessary.
unnecessary.add(ptr.ID) unnecessary.set(ptr.ID, int32(i), src.NoXPos)
} }
} }
// Remove values we've clobbered with OpUnknown. // Remove values we've clobbered with OpUnknown.
...@@ -302,7 +314,7 @@ func nilcheckelim2(f *Func) { ...@@ -302,7 +314,7 @@ func nilcheckelim2(f *Func) {
for j := i; j < len(b.Values); j++ { for j := i; j < len(b.Values); j++ {
v := b.Values[j] v := b.Values[j]
if v.Op != OpUnknown { if v.Op != OpUnknown {
if v.Pos.IsStmt() != src.PosNotStmt && pendingLines.contains(v.Pos) { if !notStmtBoundary(v.Op) && pendingLines.contains(v.Pos) { // Late in compilation, so any remaining NotStmt values are probably okay now.
v.Pos = v.Pos.WithIsStmt() v.Pos = v.Pos.WithIsStmt()
pendingLines.remove(v.Pos) pendingLines.remove(v.Pos)
} }
......
...@@ -74,7 +74,7 @@ func nextGoodStatementIndex(v *Value, i int, b *Block) int { ...@@ -74,7 +74,7 @@ func nextGoodStatementIndex(v *Value, i int, b *Block) int {
// rewrite. // rewrite.
func notStmtBoundary(op Op) bool { func notStmtBoundary(op Op) bool {
switch op { switch op {
case OpCopy, OpPhi, OpVarKill, OpVarDef, OpUnknown, OpFwdRef, OpArg: case OpCopy, OpPhi, OpVarKill, OpVarDef, OpVarLive, OpUnknown, OpFwdRef, OpArg:
return true return true
} }
return false return false
......
...@@ -321,8 +321,8 @@ func fcall_uint32(a, b uint32) (uint32, uint32) { ...@@ -321,8 +321,8 @@ func fcall_uint32(a, b uint32) (uint32, uint32) {
// We want to merge load+op in the first function, but not in the // We want to merge load+op in the first function, but not in the
// second. See Issue 19595. // second. See Issue 19595.
func load_op_merge(p, q *int) { func load_op_merge(p, q *int) {
x := *p x := *p // amd64:`ADDQ\t\(`
*q += x // amd64:`ADDQ\t\(` *q += x // The combined nilcheck and load would normally have this line number, but we want that combined operation to have the line number of the nil check instead (see #33724).
} }
func load_op_no_merge(p, q *int) { func load_op_no_merge(p, q *int) {
x := *p x := *p
......
// run
package main
import (
"fmt"
"runtime/debug"
"strings"
)
type Inner struct {
Err int
}
func (i *Inner) NotExpectedInStackTrace() int {
if i == nil {
return 86
}
return 17 + i.Err
}
type Outer struct {
Inner
}
func ExpectedInStackTrace() {
var o *Outer
println(o.NotExpectedInStackTrace())
}
func main() {
defer func() {
if r := recover(); r != nil {
stacktrace := string(debug.Stack())
if strings.Contains(stacktrace, "NotExpectedInStackTrace") {
fmt.Println("FAIL, stacktrace contains NotExpectedInStackTrace")
}
if !strings.Contains(stacktrace, "ExpectedInStackTrace") {
fmt.Println("FAIL, stacktrace does not contain ExpectedInStackTrace")
}
} else {
fmt.Println("FAIL, should have panicked but did not")
}
}()
ExpectedInStackTrace()
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment