Commit 0d7a2241 authored by Keith Randall's avatar Keith Randall

runtime: update a few comments

noescape is now 0 instructions with the SSA backend.
fast atomics are no longer a TODO (at least for amd64).

Change-Id: Ib6e06f7471bef282a47ba236d8ce95404bb60a42
Reviewed-on: https://go-review.googlesource.com/28087
Run-TryBot: Keith Randall <khr@golang.org>
Reviewed-by: default avatarBrad Fitzpatrick <bradfitz@golang.org>
Reviewed-by: default avatarCherry Zhang <cherryyz@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
parent f7ac5da4
...@@ -543,7 +543,7 @@ func ready(gp *g, traceskip int, next bool) { ...@@ -543,7 +543,7 @@ func ready(gp *g, traceskip int, next bool) {
// status is Gwaiting or Gscanwaiting, make Grunnable and put on runq // status is Gwaiting or Gscanwaiting, make Grunnable and put on runq
casgstatus(gp, _Gwaiting, _Grunnable) casgstatus(gp, _Gwaiting, _Grunnable)
runqput(_g_.m.p.ptr(), gp, next) runqput(_g_.m.p.ptr(), gp, next)
if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 { // TODO: fast atomic if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
wakep() wakep()
} }
_g_.m.locks-- _g_.m.locks--
...@@ -1901,7 +1901,7 @@ top: ...@@ -1901,7 +1901,7 @@ top:
// If number of spinning M's >= number of busy P's, block. // If number of spinning M's >= number of busy P's, block.
// This is necessary to prevent excessive CPU consumption // This is necessary to prevent excessive CPU consumption
// when GOMAXPROCS>>1 but the program parallelism is low. // when GOMAXPROCS>>1 but the program parallelism is low.
if !_g_.m.spinning && 2*atomic.Load(&sched.nmspinning) >= procs-atomic.Load(&sched.npidle) { // TODO: fast atomic if !_g_.m.spinning && 2*atomic.Load(&sched.nmspinning) >= procs-atomic.Load(&sched.npidle) {
goto stop goto stop
} }
if !_g_.m.spinning { if !_g_.m.spinning {
...@@ -2341,7 +2341,7 @@ func reentersyscall(pc, sp uintptr) { ...@@ -2341,7 +2341,7 @@ func reentersyscall(pc, sp uintptr) {
save(pc, sp) save(pc, sp)
} }
if atomic.Load(&sched.sysmonwait) != 0 { // TODO: fast atomic if atomic.Load(&sched.sysmonwait) != 0 {
systemstack(entersyscall_sysmon) systemstack(entersyscall_sysmon)
save(pc, sp) save(pc, sp)
} }
...@@ -2806,7 +2806,7 @@ func newproc1(fn *funcval, argp *uint8, narg int32, nret int32, callerpc uintptr ...@@ -2806,7 +2806,7 @@ func newproc1(fn *funcval, argp *uint8, narg int32, nret int32, callerpc uintptr
} }
runqput(_p_, newg, true) runqput(_p_, newg, true)
if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 && unsafe.Pointer(fn.fn) != unsafe.Pointer(funcPC(main)) { // TODO: fast atomic if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 && unsafe.Pointer(fn.fn) != unsafe.Pointer(funcPC(main)) {
wakep() wakep()
} }
_g_.m.locks-- _g_.m.locks--
...@@ -3604,7 +3604,7 @@ func sysmon() { ...@@ -3604,7 +3604,7 @@ func sysmon() {
delay = 10 * 1000 delay = 10 * 1000
} }
usleep(delay) usleep(delay)
if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs)) { // TODO: fast atomic if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs)) {
lock(&sched.lock) lock(&sched.lock)
if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) { if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) {
atomic.Store(&sched.sysmonwait, 1) atomic.Store(&sched.sysmonwait, 1)
......
...@@ -90,7 +90,7 @@ func memequal(a, b unsafe.Pointer, size uintptr) bool ...@@ -90,7 +90,7 @@ func memequal(a, b unsafe.Pointer, size uintptr) bool
// noescape hides a pointer from escape analysis. noescape is // noescape hides a pointer from escape analysis. noescape is
// the identity function but escape analysis doesn't think the // the identity function but escape analysis doesn't think the
// output depends on the input. noescape is inlined and currently // output depends on the input. noescape is inlined and currently
// compiles down to a single xor instruction. // compiles down to zero instructions.
// USE CAREFULLY! // USE CAREFULLY!
//go:nosplit //go:nosplit
func noescape(p unsafe.Pointer) unsafe.Pointer { func noescape(p unsafe.Pointer) unsafe.Pointer {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment