Commit 48920809 authored by Kirill Smelkov's avatar Kirill Smelkov

tracing/runtime: Add support for Go1.23 (preliminary, incomplete)

Generate g for today's state of Go 1.23 (go1.23rc1-0-g7dff7439dc).
Compared to Go1.22 many new fields and several new types were
introduced as shown by the diff below.

Regenerated files stay without changes for Go1.22 and previous releases.

Support for Go1.23 remains incomplete because currently there is no way to
access runtime.stopTheWorld via go:linkname :

https://github.com/golang/go/issues/68167#issuecomment-2192263282

---- 8< ----
diff --git a/zruntime_g_go1.22.go b/zruntime_g_go1.23.go
index 910382b..4aa6799 100644
--- a/zruntime_g_go1.22.go
+++ b/zruntime_g_go1.23.go
@@ -1,7 +1,7 @@
 // Code generated by g_typedef; DO NOT EDIT.

-//go:build go1.22 && !go1.23
-// +build go1.22,!go1.23
+//go:build go1.23 && !go1.24
+// +build go1.23,!go1.24

 package xruntime

@@ -26,6 +26,7 @@ type g struct {
 	sched     gobuf
 	syscallsp uintptr // if status==Gsyscall, syscallsp = sched.sp to use during gc
 	syscallpc uintptr // if status==Gsyscall, syscallpc = sched.pc to use during gc
+	syscallbp uintptr // if status==Gsyscall, syscallbp = sched.bp to use in fpTraceback
 	stktopsp  uintptr // expected sp at top of stack, to check in traceback
 	// param is a generic pointer parameter field used to pass
 	// values in particular contexts where other storage for the
@@ -95,14 +96,15 @@ type g struct {
 	cgoCtxt       []uintptr      // cgo traceback context
 	labels        unsafe.Pointer // profiler labels
 	timer         *timer         // cached timer for time.Sleep
+	sleepWhen     int64          // when to sleep until
 	selectDone    atomic.Uint32  // are we participating in a select and did someone win the race?

-	coroarg *coro // argument during coroutine transfers
-
 	// goroutineProfiled indicates the status of this goroutine's stack for the
 	// current in-progress goroutine profile
 	goroutineProfiled goroutineProfileStateHolder

+	coroarg *coro // argument during coroutine transfers
+
 	// Per-G tracer state.
 	trace gTraceState

@@ -182,27 +184,51 @@ type funcval struct {
 	fn uintptr
 }
 type timer struct {
-	// If this timer is on a heap, which P's heap it is on.
-	// puintptr rather than *p to match uintptr in the versions
-	// of this struct defined in other packages.
-	pp puintptr
+	// mu protects reads and writes to all fields, with exceptions noted below.
+	mu mutex
+
+	astate  uint8  // atomic copy of state bits at last unlock
+	state   uint8  // state bits
+	isChan  bool   // timer has a channel; immutable; can be read without lock
+	blocked uint32 // number of goroutines blocked on timer's channel

 	// Timer wakes up at when, and then at when+period, ... (period > 0 only)
-	// each time calling f(arg, now) in the timer goroutine, so f must be
+	// each time calling f(arg, seq, delay) in the timer goroutine, so f must be
 	// a well-behaved function and not block.
 	//
-	// when must be positive on an active timer.
+	// The arg and seq are client-specified opaque arguments passed back to f.
+	// When used from netpoll, arg and seq have meanings defined by netpoll
+	// and are completely opaque to this code; in that context, seq is a sequence
+	// number to recognize and squech stale function invocations.
+	// When used from package time, arg is a channel (for After, NewTicker)
+	// or the function to call (for AfterFunc) and seq is unused (0).
+	//
+	// Package time does not know about seq, but if this is a channel timer (t.isChan == true),
+	// this file uses t.seq as a sequence number to recognize and squelch
+	// sends that correspond to an earlier (stale) timer configuration,
+	// similar to its use in netpoll. In this usage (that is, when t.isChan == true),
+	// writes to seq are protected by both t.mu and t.sendLock,
+	// so reads are allowed when holding either of the two mutexes.
+	//
+	// The delay argument is nanotime() - t.when, meaning the delay in ns between
+	// when the timer should have gone off and now. Normally that amount is
+	// small enough not to matter, but for channel timers that are fed lazily,
+	// the delay can be arbitrarily long; package time subtracts it out to make
+	// it look like the send happened earlier than it actually did.
+	// (No one looked at the channel since then, or the send would have
+	// not happened so late, so no one can tell the difference.)
 	when   int64
 	period int64
-	f      func(interface{}, uintptr)
+	f      func(arg interface{}, seq uintptr, delay int64)
 	arg    interface{}
 	seq    uintptr

-	// What to set the when field to in timerModifiedXX status.
-	nextwhen int64
+	// If non-nil, the timers containing t.
+	ts *timers

-	// The status field holds one of the values below.
-	status atomic.Uint32
+	// sendLock protects sends on the timer's channel.
+	// Not used for async (pre-Go 1.23) behavior when debug.asynctimerchan.Load() != 0.
+	sendLock mutex
 }
 type guintptr uintptr
 type puintptr uintptr
@@ -221,6 +247,11 @@ type traceTime uint64
 type coro struct {
 	gp guintptr
 	f  func(*coro)
+
+	// State for validating thread-lock interactions.
+	mp        *m
+	lockedExt uint32 // mp's external LockOSThread counter at coro creation time.
+	lockedInt uint32 // mp's internal lockOSThread counter at coro creation time.
 }
 type traceSchedResourceState struct {
 	// statusTraced indicates whether a status event was traced for this resource
@@ -240,7 +271,18 @@ type traceSchedResourceState struct {
 	// GoStatus and GoCreate events to omit a sequence number (implicitly 0).
 	seq [2]uint64
 }
+type mutex struct {
+	// Empty struct if lock ranking is disabled, otherwise includes the lock rank
+	lockRankStruct
+	// Futex-based impl treats it as uint32 key,
+	// while sema-based impl as M* waitm.
+	// Used to be a union, but unions break precise GC.
+	key uintptr
+}
+type lockRankStruct struct {
+}
 type uintreg uint          // FIXME wrong on amd64p32
 type m struct{}            // FIXME stub
 type sudog struct{}        // FIXME stub
 type timersBucket struct{} // FIXME stub
+type timers struct{}       // FIXME stub
parent 8299741f
...@@ -87,12 +87,19 @@ typedef_g() { ...@@ -87,12 +87,19 @@ typedef_g() {
typedef runtime.coro typedef runtime.coro
typedef runtime.traceSchedResourceState typedef runtime.traceSchedResourceState
fi fi
if (( $govern >= 123 )); then
typedef runtime.mutex
typedef runtime.lockRankStruct
fi
} }
# typedef_g_fixed - print adjusted <g> & friends definitions # typedef_g_fixed - print adjusted <g> & friends definitions
typedef_g_fixed() { typedef_g_fixed() {
typedef_g $1 | \ typedef_g $1 | \
sed -e 's/\<sys.Uintreg\>/uintreg/' sed -e 's/\<sys.Uintreg\>/uintreg/;
# there is no sync/atomic.Uint8
s/\<atomic.Uint8\>/uint8/'
echo "type uintreg uint // FIXME wrong on amd64p32" echo "type uintreg uint // FIXME wrong on amd64p32"
echo "type m struct {} // FIXME stub" echo "type m struct {} // FIXME stub"
...@@ -101,6 +108,10 @@ typedef_g_fixed() { ...@@ -101,6 +108,10 @@ typedef_g_fixed() {
if (( $govern >= 110 )); then if (( $govern >= 110 )); then
echo "type timersBucket struct {} // FIXME stub" echo "type timersBucket struct {} // FIXME stub"
fi fi
if (( $govern >= 123 )); then
echo "type timers struct {} // FIXME stub"
fi
} }
...@@ -131,7 +142,7 @@ gen_zruntime() { ...@@ -131,7 +142,7 @@ gen_zruntime() {
# main driver # main driver
gov="go18 go19 go1.10 go1.11 go1.12 go1.13 go1.14 go1.15 go1.16 go1.17 go1.18 go1.19 go1.20 go1.21 go1.22" gov="go18 go19 go1.10 go1.11 go1.12 go1.13 go1.14 go1.15 go1.16 go1.17 go1.18 go1.19 go1.20 go1.21 go1.22 go1.23"
for g in $gov; do for g in $gov; do
goset $g goset $g
......
// Code generated by g_typedef; DO NOT EDIT.
//go:build go1.23 && !go1.24
// +build go1.23,!go1.24
package xruntime
import "unsafe"
import "sync/atomic"
type g struct {
// Stack parameters.
// stack describes the actual stack memory: [stack.lo, stack.hi).
// stackguard0 is the stack pointer compared in the Go stack growth prologue.
// It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption.
// stackguard1 is the stack pointer compared in the //go:systemstack stack growth prologue.
// It is stack.lo+StackGuard on g0 and gsignal stacks.
// It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash).
stack stack // offset known to runtime/cgo
stackguard0 uintptr // offset known to liblink
stackguard1 uintptr // offset known to liblink
_panic *_panic // innermost panic - offset known to liblink
_defer *_defer // innermost defer
m *m // current m; offset known to arm liblink
sched gobuf
syscallsp uintptr // if status==Gsyscall, syscallsp = sched.sp to use during gc
syscallpc uintptr // if status==Gsyscall, syscallpc = sched.pc to use during gc
syscallbp uintptr // if status==Gsyscall, syscallbp = sched.bp to use in fpTraceback
stktopsp uintptr // expected sp at top of stack, to check in traceback
// param is a generic pointer parameter field used to pass
// values in particular contexts where other storage for the
// parameter would be difficult to find. It is currently used
// in four ways:
// 1. When a channel operation wakes up a blocked goroutine, it sets param to
// point to the sudog of the completed blocking operation.
// 2. By gcAssistAlloc1 to signal back to its caller that the goroutine completed
// the GC cycle. It is unsafe to do so in any other way, because the goroutine's
// stack may have moved in the meantime.
// 3. By debugCallWrap to pass parameters to a new goroutine because allocating a
// closure in the runtime is forbidden.
// 4. When a panic is recovered and control returns to the respective frame,
// param may point to a savedOpenDeferState.
param unsafe.Pointer
atomicstatus atomic.Uint32
stackLock uint32 // sigprof/scang lock; TODO: fold in to atomicstatus
goid uint64
schedlink guintptr
waitsince int64 // approx time when the g become blocked
waitreason waitReason // if status==Gwaiting
preempt bool // preemption signal, duplicates stackguard0 = stackpreempt
preemptStop bool // transition to _Gpreempted on preemption; otherwise, just deschedule
preemptShrink bool // shrink stack at synchronous safe point
// asyncSafePoint is set if g is stopped at an asynchronous
// safe point. This means there are frames on the stack
// without precise pointer information.
asyncSafePoint bool
paniconfault bool // panic (instead of crash) on unexpected fault address
gcscandone bool // g has scanned stack; protected by _Gscan bit in status
throwsplit bool // must not split stack
// activeStackChans indicates that there are unlocked channels
// pointing into this goroutine's stack. If true, stack
// copying needs to acquire channel locks to protect these
// areas of the stack.
activeStackChans bool
// parkingOnChan indicates that the goroutine is about to
// park on a chansend or chanrecv. Used to signal an unsafe point
// for stack shrinking.
parkingOnChan atomic.Bool
// inMarkAssist indicates whether the goroutine is in mark assist.
// Used by the execution tracer.
inMarkAssist bool
coroexit bool // argument to coroswitch_m
raceignore int8 // ignore race detection events
nocgocallback bool // whether disable callback from C
tracking bool // whether we're tracking this G for sched latency statistics
trackingSeq uint8 // used to decide whether to track this G
trackingStamp int64 // timestamp of when the G last started being tracked
runnableTime int64 // the amount of time spent runnable, cleared when running, only used when tracking
lockedm muintptr
sig uint32
writebuf []byte
sigcode0 uintptr
sigcode1 uintptr
sigpc uintptr
parentGoid uint64 // goid of goroutine that created this goroutine
gopc uintptr // pc of go statement that created this goroutine
ancestors *[]ancestorInfo // ancestor information goroutine(s) that created this goroutine (only used if debug.tracebackancestors)
startpc uintptr // pc of goroutine function
racectx uintptr
waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr); in lock order
cgoCtxt []uintptr // cgo traceback context
labels unsafe.Pointer // profiler labels
timer *timer // cached timer for time.Sleep
sleepWhen int64 // when to sleep until
selectDone atomic.Uint32 // are we participating in a select and did someone win the race?
// goroutineProfiled indicates the status of this goroutine's stack for the
// current in-progress goroutine profile
goroutineProfiled goroutineProfileStateHolder
coroarg *coro // argument during coroutine transfers
// Per-G tracer state.
trace gTraceState
// gcAssistBytes is this G's GC assist credit in terms of
// bytes allocated. If this is positive, then the G has credit
// to allocate gcAssistBytes bytes without assisting. If this
// is negative, then the G must correct this by performing
// scan work. We track this in bytes to make it fast to update
// and check for debt in the malloc hot path. The assist ratio
// determines how this corresponds to scan work debt.
gcAssistBytes int64
}
type stack struct {
lo uintptr
hi uintptr
}
type _panic struct {
argp unsafe.Pointer // pointer to arguments of deferred call run during panic; cannot move - known to liblink
arg interface{} // argument to panic
link *_panic // link to earlier panic
// startPC and startSP track where _panic.start was called.
startPC uintptr
startSP unsafe.Pointer
// The current stack frame that we're running deferred calls for.
sp unsafe.Pointer
lr uintptr
fp unsafe.Pointer
// retpc stores the PC where the panic should jump back to, if the
// function last returned by _panic.next() recovers the panic.
retpc uintptr
// Extra state for handling open-coded defers.
deferBitsPtr *uint8
slotsPtr unsafe.Pointer
recovered bool // whether this panic has been recovered
goexit bool
deferreturn bool
}
type _defer struct {
heap bool
rangefunc bool // true for rangefunc list
sp uintptr // sp at time of defer
pc uintptr // pc at time of defer
fn func() // can be nil for open-coded defers
link *_defer // next defer on G; can point to either heap or stack!
// If rangefunc is true, *head is the head of the atomic linked list
// during a range-over-func execution.
head *atomic.Pointer[_defer]
}
type gobuf struct {
// The offsets of sp, pc, and g are known to (hard-coded in) libmach.
//
// ctxt is unusual with respect to GC: it may be a
// heap-allocated funcval, so GC needs to track it, but it
// needs to be set and cleared from assembly, where it's
// difficult to have write barriers. However, ctxt is really a
// saved, live register, and we only ever exchange it between
// the real register and the gobuf. Hence, we treat it as a
// root during stack scanning, which means assembly that saves
// and restores it doesn't need write barriers. It's still
// typed as a pointer so that any other writes from Go get
// write barriers.
sp uintptr
pc uintptr
g guintptr
ctxt unsafe.Pointer
ret uintptr
lr uintptr
bp uintptr // for framepointer-enabled architectures
}
type funcval struct {
fn uintptr
}
type timer struct {
// mu protects reads and writes to all fields, with exceptions noted below.
mu mutex
astate uint8 // atomic copy of state bits at last unlock
state uint8 // state bits
isChan bool // timer has a channel; immutable; can be read without lock
blocked uint32 // number of goroutines blocked on timer's channel
// Timer wakes up at when, and then at when+period, ... (period > 0 only)
// each time calling f(arg, seq, delay) in the timer goroutine, so f must be
// a well-behaved function and not block.
//
// The arg and seq are client-specified opaque arguments passed back to f.
// When used from netpoll, arg and seq have meanings defined by netpoll
// and are completely opaque to this code; in that context, seq is a sequence
// number to recognize and squech stale function invocations.
// When used from package time, arg is a channel (for After, NewTicker)
// or the function to call (for AfterFunc) and seq is unused (0).
//
// Package time does not know about seq, but if this is a channel timer (t.isChan == true),
// this file uses t.seq as a sequence number to recognize and squelch
// sends that correspond to an earlier (stale) timer configuration,
// similar to its use in netpoll. In this usage (that is, when t.isChan == true),
// writes to seq are protected by both t.mu and t.sendLock,
// so reads are allowed when holding either of the two mutexes.
//
// The delay argument is nanotime() - t.when, meaning the delay in ns between
// when the timer should have gone off and now. Normally that amount is
// small enough not to matter, but for channel timers that are fed lazily,
// the delay can be arbitrarily long; package time subtracts it out to make
// it look like the send happened earlier than it actually did.
// (No one looked at the channel since then, or the send would have
// not happened so late, so no one can tell the difference.)
when int64
period int64
f func(arg interface{}, seq uintptr, delay int64)
arg interface{}
seq uintptr
// If non-nil, the timers containing t.
ts *timers
// sendLock protects sends on the timer's channel.
// Not used for async (pre-Go 1.23) behavior when debug.asynctimerchan.Load() != 0.
sendLock mutex
}
type guintptr uintptr
type puintptr uintptr
type muintptr uintptr
type waitReason uint8
type ancestorInfo struct {
pcs []uintptr // pcs from the stack of this goroutine
goid uint64 // goroutine id of this goroutine; original goroutine possibly dead
gopc uintptr // pc of go statement that created this goroutine
}
type goroutineProfileStateHolder atomic.Uint32
type gTraceState struct {
traceSchedResourceState
}
type traceTime uint64
type coro struct {
gp guintptr
f func(*coro)
// State for validating thread-lock interactions.
mp *m
lockedExt uint32 // mp's external LockOSThread counter at coro creation time.
lockedInt uint32 // mp's internal lockOSThread counter at coro creation time.
}
type traceSchedResourceState struct {
// statusTraced indicates whether a status event was traced for this resource
// a particular generation.
//
// There are 3 of these because when transitioning across generations, traceAdvance
// needs to be able to reliably observe whether a status was traced for the previous
// generation, while we need to clear the value for the next generation.
statusTraced [3]atomic.Uint32
// seq is the sequence counter for this scheduling resource's events.
// The purpose of the sequence counter is to establish a partial order between
// events that don't obviously happen serially (same M) in the stream ofevents.
//
// There are two of these so that we can reset the counter on each generation.
// This saves space in the resulting trace by keeping the counter small and allows
// GoStatus and GoCreate events to omit a sequence number (implicitly 0).
seq [2]uint64
}
type mutex struct {
// Empty struct if lock ranking is disabled, otherwise includes the lock rank
lockRankStruct
// Futex-based impl treats it as uint32 key,
// while sema-based impl as M* waitm.
// Used to be a union, but unions break precise GC.
key uintptr
}
type lockRankStruct struct {
}
type uintreg uint // FIXME wrong on amd64p32
type m struct{} // FIXME stub
type sudog struct{} // FIXME stub
type timersBucket struct{} // FIXME stub
type timers struct{} // FIXME stub
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment