Commit fa2af441 authored by Russ Cox's avatar Russ Cox

runtime: convert traceback*.c to Go

The two converted files were nearly identical.
Instead of continuing that duplication, I merged them
into a single traceback.go.

Tested on arm, amd64, amd64p32, and 386.

LGTM=r
R=golang-codereviews, remyoudompheng, dave, r
CC=dvyukov, golang-codereviews, iant, khr
https://golang.org/cl/134200044
parent 8e89f871
......@@ -379,13 +379,30 @@ func (w *Walker) parseFile(dir, file string) (*ast.File, error) {
if w.context != nil && file == fmt.Sprintf("zruntime_defs_%s_%s.go", w.context.GOOS, w.context.GOARCH) {
// Just enough to keep the api checker happy.
src := "package runtime; type (" +
" maptype struct{}; _type struct{}; alg struct{};" +
" mspan struct{}; m struct{}; mutex struct{}; slicetype struct{};" +
" iface struct{}; eface struct{}; interfacetype struct{}; itab struct{};" +
" mcache struct{}; sudog struct{}; g struct{};" +
" hchan struct{}; chantype struct{}; waitq struct{};" +
" note struct{}; wincallbackcontext struct{};" +
" gobuf struct{}; funcval struct{}; _func struct{};" +
" _func struct{};" +
" _type struct{};" +
" alg struct{};" +
" chantype struct{};" +
" context struct{};" + // windows
" eface struct{};" +
" funcval struct{};" +
" g struct{};" +
" gobuf struct{};" +
" hchan struct{};" +
" iface struct{};" +
" interfacetype struct{};" +
" itab struct{};" +
" m struct{};" +
" maptype struct{};" +
" mcache struct{};" +
" mspan struct{};" +
" mutex struct{};" +
" note struct{};" +
" slicetype struct{};" +
" stkframe struct{};" +
" sudog struct{};" +
" waitq struct{};" +
" wincallbackcontext struct{};" +
"); " +
"const ( cb_max = 2000 )"
f, err = parser.ParseFile(fset, filename, src, 0)
......
......@@ -86,11 +86,9 @@ import "unsafe"
// If all other goroutines exit, the program crashes.
func Goexit()
// We assume that all architectures turn faults and the like
// into apparent calls to runtime.sigpanic. If we see a "call"
// to runtime.sigpanic, we do not back up the PC to find the
// line number of the CALL instruction, because there is no CALL.
var sigpanic byte
// sigpanic is the C function sigpanic.
// That is, unsafe.Pointer(&sigpanic) is the C function pointer for sigpanic.
var sigpanic struct{}
// Caller reports file and line number information about function invocations on
// the calling goroutine's stack. The argument skip is the number of stack frames
......@@ -103,7 +101,7 @@ func Caller(skip int) (pc uintptr, file string, line int, ok bool) {
// and what it called, so that we can see if it
// "called" sigpanic.
var rpc [2]uintptr
if callers(int32(1+skip-1), &rpc[0], 2) < 2 {
if callers(1+skip-1, &rpc[0], 2) < 2 {
return
}
f := findfunc(rpc[1])
......@@ -117,6 +115,9 @@ func Caller(skip int) (pc uintptr, file string, line int, ok bool) {
pc = rpc[1]
xpc := pc
g := findfunc(rpc[0])
// All architectures turn faults into apparent calls to sigpanic.
// If we see a call to sigpanic, we do not back up the PC to find
// the line number of the call instruction, because there is no call.
if xpc > f.entry && (g == nil || g.entry != uintptr(unsafe.Pointer(&sigpanic))) {
xpc--
}
......@@ -142,18 +143,9 @@ func Callers(skip int, pc []uintptr) int {
if len(pc) == 0 {
return 0
}
return int(callers(int32(skip), &pc[0], int32(len(pc))))
return callers(skip, &pc[0], len(pc))
}
//go:noescape
func callers(int32, *uintptr, int32) int32
//go:noescape
func gcallers(*g, int32, *uintptr, int32) int32
//go:noescape
func gentraceback(uintptr, uintptr, uintptr, *g, int32, *uintptr, int32, unsafe.Pointer, unsafe.Pointer, bool) int32
func getgoroot() string
// GOROOT returns the root of the Go tree.
......
......@@ -380,6 +380,7 @@ dumpgoroutine(G *gp)
ChildInfo child;
Defer *d;
Panic *p;
bool (*fn)(Stkframe*, void*);
if(gp->syscallstack != (uintptr)nil) {
sp = gp->syscallsp;
......@@ -413,7 +414,8 @@ dumpgoroutine(G *gp)
child.depth = 0;
if(!ScanStackByFrames)
runtime·throw("need frame info to dump stacks");
runtime·gentraceback(pc, sp, lr, gp, 0, nil, 0x7fffffff, dumpframe, &child, false);
fn = dumpframe;
runtime·gentraceback(pc, sp, lr, gp, 0, nil, 0x7fffffff, &fn, &child, false);
// dump defer & panic records
for(d = gp->defer; d != nil; d = d->link) {
......
......@@ -685,6 +685,7 @@ scanstack(G *gp)
int32 n;
Stktop *stk;
uintptr sp, guard;
bool (*fn)(Stkframe*, void*);
switch(runtime·readgstatus(gp)) {
default:
......@@ -726,7 +727,8 @@ scanstack(G *gp)
USED(sp);
USED(stk);
USED(guard);
runtime·gentraceback(~(uintptr)0, ~(uintptr)0, 0, gp, 0, nil, 0x7fffffff, scanframe, nil, false);
fn = scanframe;
runtime·gentraceback(~(uintptr)0, ~(uintptr)0, 0, gp, 0, nil, 0x7fffffff, &fn, nil, false);
} else {
n = 0;
while(stk) {
......@@ -1779,6 +1781,7 @@ runtime·getgcmask(byte *p, Type *t, byte **mask, uintptr *len)
Stkframe frame;
uintptr i, n, off;
byte *base, bits, shift, *b;
bool (*cb)(Stkframe*, void*);
*mask = nil;
*len = 0;
......@@ -1823,7 +1826,8 @@ runtime·getgcmask(byte *p, Type *t, byte **mask, uintptr *len)
// stack
frame.fn = nil;
frame.sp = (uintptr)p;
runtime·gentraceback((uintptr)runtime·getcallerpc(&p), (uintptr)runtime·getcallersp(&p), 0, g, 0, nil, 1000, getgcmaskcb, &frame, false);
cb = getgcmaskcb;
runtime·gentraceback((uintptr)runtime·getcallerpc(&p), (uintptr)runtime·getcallersp(&p), 0, g, 0, nil, 1000, &cb, &frame, false);
if(frame.fn != nil) {
Func *f;
StackMap *stackmap;
......
......@@ -234,7 +234,7 @@ func mProf_GC() {
// Called by malloc to record a profiled block.
func mProf_Malloc(p unsafe.Pointer, size uintptr) {
var stk [maxStack]uintptr
nstk := callers(1, &stk[0], int32(len(stk)))
nstk := callers(1, &stk[0], len(stk))
lock(&proflock)
b := stkbucket(memProfile, size, stk[:nstk], true)
mp := b.mp()
......@@ -304,9 +304,9 @@ func blockevent(cycles int64, skip int) {
var nstk int
var stk [maxStack]uintptr
if gp.m.curg == nil || gp.m.curg == gp {
nstk = int(callers(int32(skip), &stk[0], int32(len(stk))))
nstk = callers(skip, &stk[0], len(stk))
} else {
nstk = int(gcallers(gp.m.curg, int32(skip), &stk[0], int32(len(stk))))
nstk = gcallers(gp.m.curg, skip, &stk[0], len(stk))
}
lock(&proflock)
b := stkbucket(blockProfile, 0, stk[:nstk], true)
......@@ -557,8 +557,8 @@ func GoroutineProfile(p []StackRecord) (n int, ok bool) {
}
func saveg(pc, sp uintptr, gp *g, r *StackRecord) {
n := gentraceback(pc, sp, 0, gp, 0, &r.Stack0[0], int32(len(r.Stack0)), nil, nil, false)
if int(n) < len(r.Stack0) {
n := gentraceback(pc, sp, 0, gp, 0, &r.Stack0[0], len(r.Stack0), nil, nil, false)
if n < len(r.Stack0) {
r.Stack0[n] = 0
}
}
......
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
// contextPC returns the EIP (program counter) register from the context.
func contextPC(r *context) uintptr { return uintptr(r.eip) }
// contextSP returns the ESP (stack pointer) register from the context.
func contextSP(r *context) uintptr { return uintptr(r.esp) }
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
// contextPC returns the RIP (program counter) register from the context.
func contextPC(r *context) uintptr { return uintptr(r.rip) }
// contextSP returns the RSP (stack pointer) register from the context.
func contextSP(r *context) uintptr { return uintptr(r.rsp) }
......@@ -666,7 +666,7 @@ struct Stkframe
uintptr arglen; // number of bytes at argp
};
int32 runtime·gentraceback(uintptr, uintptr, uintptr, G*, int32, uintptr*, int32, bool(*)(Stkframe*, void*), void*, bool);
intgo runtime·gentraceback(uintptr, uintptr, uintptr, G*, intgo, uintptr*, intgo, bool(**)(Stkframe*, void*), void*, bool);
void runtime·traceback(uintptr pc, uintptr sp, uintptr lr, G* gp);
void runtime·tracebackothers(G*);
bool runtime·haszeroargs(uintptr pc);
......@@ -854,8 +854,8 @@ void runtime·exitsyscall(void);
void runtime·entersyscallblock_m(void);
G* runtime·newproc1(FuncVal*, byte*, int32, int32, void*);
bool runtime·sigsend(int32 sig);
int32 runtime·callers(int32, uintptr*, int32);
int32 runtime·gcallers(G*, int32, uintptr*, int32);
intgo runtime·callers(intgo, uintptr*, intgo);
intgo runtime·gcallers(G*, intgo, uintptr*, intgo);
int64 runtime·nanotime(void); // monotonic time
int64 runtime·unixnanotime(void); // real time, can skip
void runtime·dopanic(int32);
......@@ -868,7 +868,7 @@ void runtime·setcpuprofilerate(int32);
void runtime·usleep(uint32);
int64 runtime·cputicks(void);
int64 runtime·tickspersecond(void);
void runtime·blockevent(int64, int32);
void runtime·blockevent(int64, intgo);
G* runtime·netpoll(bool);
void runtime·netpollinit(void);
int32 runtime·netpollopen(uintptr, PollDesc*);
......
......@@ -477,6 +477,7 @@ copyabletopsegment(G *gp)
Func *f;
FuncVal *fn;
StackMap *stackmap;
bool (*cb)(Stkframe*, void*);
if(gp->stackbase == 0)
runtime·throw("stackbase == 0");
......@@ -486,7 +487,8 @@ copyabletopsegment(G *gp)
// Check that each frame is copyable. As a side effect,
// count the frames.
runtime·gentraceback(~(uintptr)0, ~(uintptr)0, 0, gp, 0, nil, 0x7fffffff, checkframecopy, &cinfo, false);
cb = checkframecopy;
runtime·gentraceback(~(uintptr)0, ~(uintptr)0, 0, gp, 0, nil, 0x7fffffff, &cb, &cinfo, false);
if(StackDebug >= 1 && cinfo.frames != -1)
runtime·printf("copystack: %d copyable frames\n", cinfo.frames);
......@@ -680,8 +682,10 @@ adjustframe(Stkframe *frame, void *arg)
// adjust inargs and outargs
if(frame->arglen != 0) {
stackmap = runtime·funcdata(f, FUNCDATA_ArgsPointerMaps);
if(stackmap == nil)
if(stackmap == nil) {
runtime·printf("size %d\n", (int32)frame->arglen);
runtime·throw("no arg info");
}
bv = runtime·stackmapdata(stackmap, pcdata);
if(StackDebug >= 3)
runtime·printf(" args\n");
......@@ -773,6 +777,7 @@ copystack(G *gp, uintptr nframes, uintptr newsize)
AdjustInfo adjinfo;
Stktop *oldtop, *newtop;
uint32 oldstatus;
bool (*cb)(Stkframe*, void*);
if(gp->syscallstack != 0)
runtime·throw("can't handle stack copy in syscall yet");
......@@ -797,7 +802,8 @@ copystack(G *gp, uintptr nframes, uintptr newsize)
adjinfo.oldstk = oldstk;
adjinfo.oldbase = oldbase;
adjinfo.delta = newbase - oldbase;
runtime·gentraceback(~(uintptr)0, ~(uintptr)0, 0, gp, 0, nil, nframes, adjustframe, &adjinfo, false);
cb = adjustframe;
runtime·gentraceback(~(uintptr)0, ~(uintptr)0, 0, gp, 0, nil, nframes, &cb, &adjinfo, false);
// adjust other miscellaneous things that have pointers into stacks.
adjustctxt(gp, &adjinfo);
......
......@@ -12,6 +12,7 @@ import "unsafe"
// each function.
const ptrSize = 4 << (^uintptr(0) >> 63) // unsafe.Sizeof(uintptr(0)) but an ideal const
const regSize = 4 << (^uintreg(0) >> 63) // unsafe.Sizeof(uintreg(0)) but an ideal const
//go:noescape
func racereadpc(addr unsafe.Pointer, callpc, pc uintptr)
......@@ -141,7 +142,6 @@ func entersyscallblock()
func exitsyscall()
func goroutineheader(gp *g)
func traceback(pc, sp, lr uintptr, gp *g)
func tracebackothers(gp *g)
func cgocallback(fn, frame unsafe.Pointer, framesize uintptr)
......@@ -246,3 +246,21 @@ func asmcgocall(fn, arg unsafe.Pointer)
//go:noescape
func open(name *byte, mode, perm int32) int32
//go:noescape
func gotraceback(*bool) int32
func funcname(*_func) *byte
func gofuncname(f *_func) string {
return gostringnocopy(funcname(f))
}
const _NoArgs = ^uintptr(0)
var newproc, deferproc, lessstack struct{} // C/assembly functions
func funcspdelta(*_func, uintptr) int32 // symtab.c
func funcarglen(*_func, uintptr) int32 // symtab.c
const _ArgsSizeUnknown = -0x80000000 // funcdata.h
func topofstack(*_func) bool // proc.c
This diff is collapsed.
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import "unsafe"
// sigtrampPC is the PC at the beginning of the jmpdefer assembly function.
// The traceback needs to recognize it on link register architectures.
var sigtrampPC uintptr
var sigtramp struct{} // assembly function
func init() {
f := sigtramp
sigtrampPC = **(**uintptr)(unsafe.Pointer(&f))
systraceback = traceback_windows
}
func traceback_windows(f *_func, frame *stkframe, gp *g, printing bool, callback func(*stkframe, unsafe.Pointer) bool, v unsafe.Pointer) (changed, aborted bool) {
// The main traceback thinks it has found a function. Check this.
// Windows exception handlers run on the actual g stack (there is room
// dedicated to this below the usual "bottom of stack"), not on a separate
// stack. As a result, we have to be able to unwind past the exception
// handler when called to unwind during stack growth inside the handler.
// Recognize the frame at the call to sighandler in sigtramp and unwind
// using the context argument passed to the call. This is awful.
if f != nil && f.entry == sigtrampPC && frame.pc > f.entry {
var r *context
// Invoke callback so that stack copier sees an uncopyable frame.
if callback != nil {
frame.continpc = frame.pc
frame.argp = 0
frame.arglen = 0
if !callback(frame, v) {
aborted = true
return
}
}
r = (*context)(unsafe.Pointer(frame.sp + ptrSize))
frame.pc = contextPC(r)
frame.sp = contextSP(r)
frame.lr = 0
frame.fp = 0
frame.fn = nil
if printing && showframe(nil, gp) {
print("----- exception handler -----\n")
}
f = findfunc(frame.pc)
if f == nil {
print("runtime: unknown pc ", hex(frame.pc), " after exception handler\n")
if callback != nil {
gothrow("unknown pc")
}
}
frame.fn = f
changed = true
return
}
return
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment