Commit 961eb13b authored by Martin Möhrmann's avatar Martin Möhrmann

runtime: replace sys.CacheLineSize by corresponding internal/cpu const and vars

sys here is runtime/internal/sys.

Replace uses of sys.CacheLineSize for padding by
cpu.CacheLinePad or cpu.CacheLinePadSize.
Replace other uses of sys.CacheLineSize by cpu.CacheLineSize.
Remove now unused sys.CacheLineSize.

Updates #25203

Change-Id: I1daf410fe8f6c0493471c2ceccb9ca0a5a75ed8f
Reviewed-on: https://go-review.googlesource.com/126601
Run-TryBot: Martin Möhrmann <moehrmann@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarIan Lance Taylor <iant@golang.org>
parent 2200b182
......@@ -38,7 +38,7 @@ var pkgDeps = map[string][]string{
"io": {"errors", "sync", "sync/atomic"},
"runtime": {"unsafe", "runtime/internal/atomic", "runtime/internal/sys", "internal/cpu", "internal/bytealg"},
"runtime/internal/sys": {},
"runtime/internal/atomic": {"unsafe", "runtime/internal/sys"},
"runtime/internal/atomic": {"unsafe", "internal/cpu"},
"internal/race": {"runtime", "unsafe"},
"sync": {"internal/race", "runtime", "sync/atomic", "unsafe"},
"sync/atomic": {"unsafe"},
......
......@@ -17,7 +17,7 @@ type CacheLinePad struct{ _ [CacheLinePadSize]byte }
// CacheLineSize is the CPU's assumed cache line size.
// There is currently no runtime detection of the real cache line size
// so we use the constant per GOARCH CacheLinePadSize as an approximation.
var CacheLineSize = CacheLinePadSize
var CacheLineSize uintptr = CacheLinePadSize
var X86 x86
......
......@@ -7,7 +7,7 @@
package atomic
import (
"runtime/internal/sys"
"internal/cpu"
"unsafe"
)
......@@ -31,7 +31,7 @@ func (l *spinlock) unlock() {
var locktab [57]struct {
l spinlock
pad [sys.CacheLineSize - unsafe.Sizeof(spinlock{})]byte
pad [cpu.CacheLinePadSize - unsafe.Sizeof(spinlock{})]byte
}
func addrLock(addr *uint64) *spinlock {
......
......@@ -7,14 +7,14 @@
package atomic
import (
"runtime/internal/sys"
"internal/cpu"
"unsafe"
)
// TODO implement lock striping
var lock struct {
state uint32
pad [sys.CacheLineSize - 4]byte
pad [cpu.CacheLinePadSize - 4]byte
}
//go:noescape
......
......@@ -7,7 +7,6 @@ package sys
const (
ArchFamily = I386
BigEndian = false
CacheLineSize = 64
DefaultPhysPageSize = GoosNacl*65536 + (1-GoosNacl)*4096 // 4k normally; 64k on NaCl
PCQuantum = 1
Int64Align = 4
......
......@@ -7,7 +7,6 @@ package sys
const (
ArchFamily = AMD64
BigEndian = false
CacheLineSize = 64
DefaultPhysPageSize = 4096
PCQuantum = 1
Int64Align = 8
......
......@@ -7,7 +7,6 @@ package sys
const (
ArchFamily = AMD64
BigEndian = false
CacheLineSize = 64
DefaultPhysPageSize = 65536*GoosNacl + 4096*(1-GoosNacl)
PCQuantum = 1
Int64Align = 8
......
......@@ -7,7 +7,6 @@ package sys
const (
ArchFamily = ARM
BigEndian = false
CacheLineSize = 32
DefaultPhysPageSize = 65536
PCQuantum = 4
Int64Align = 4
......
......@@ -7,7 +7,6 @@ package sys
const (
ArchFamily = ARM64
BigEndian = false
CacheLineSize = 64
DefaultPhysPageSize = 65536
PCQuantum = 4
Int64Align = 8
......
......@@ -7,7 +7,6 @@ package sys
const (
ArchFamily = MIPS
BigEndian = true
CacheLineSize = 32
DefaultPhysPageSize = 65536
PCQuantum = 4
Int64Align = 4
......
......@@ -7,7 +7,6 @@ package sys
const (
ArchFamily = MIPS64
BigEndian = true
CacheLineSize = 32
DefaultPhysPageSize = 16384
PCQuantum = 4
Int64Align = 8
......
......@@ -7,7 +7,6 @@ package sys
const (
ArchFamily = MIPS64
BigEndian = false
CacheLineSize = 32
DefaultPhysPageSize = 16384
PCQuantum = 4
Int64Align = 8
......
......@@ -7,7 +7,6 @@ package sys
const (
ArchFamily = MIPS
BigEndian = false
CacheLineSize = 32
DefaultPhysPageSize = 65536
PCQuantum = 4
Int64Align = 4
......
......@@ -7,7 +7,6 @@ package sys
const (
ArchFamily = PPC64
BigEndian = true
CacheLineSize = 128
DefaultPhysPageSize = 65536
PCQuantum = 4
Int64Align = 8
......
......@@ -7,7 +7,6 @@ package sys
const (
ArchFamily = PPC64
BigEndian = false
CacheLineSize = 128
DefaultPhysPageSize = 65536
PCQuantum = 4
Int64Align = 8
......
......@@ -7,7 +7,6 @@ package sys
const (
ArchFamily = S390X
BigEndian = true
CacheLineSize = 256
DefaultPhysPageSize = 4096
PCQuantum = 2
Int64Align = 8
......
......@@ -7,7 +7,6 @@ package sys
const (
ArchFamily = WASM
BigEndian = false
CacheLineSize = 64
DefaultPhysPageSize = 65536
PCQuantum = 1
Int64Align = 8
......
......@@ -137,8 +137,8 @@
package runtime
import (
"internal/cpu"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
)
......@@ -414,7 +414,7 @@ type gcControllerState struct {
// If this is zero, no fractional workers are needed.
fractionalUtilizationGoal float64
_ [sys.CacheLineSize]byte
_ cpu.CacheLinePad
}
// startCycle resets the GC controller's state and computes estimates
......@@ -919,9 +919,9 @@ const gcAssistTimeSlack = 5000
const gcOverAssistWork = 64 << 10
var work struct {
full lfstack // lock-free list of full blocks workbuf
empty lfstack // lock-free list of empty blocks workbuf
pad0 [sys.CacheLineSize]uint8 // prevents false-sharing between full/empty and nproc/nwait
full lfstack // lock-free list of full blocks workbuf
empty lfstack // lock-free list of empty blocks workbuf
pad0 cpu.CacheLinePad // prevents false-sharing between full/empty and nproc/nwait
wbufSpans struct {
lock mutex
......
......@@ -5,6 +5,7 @@
package runtime
import (
"internal/cpu"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
......@@ -83,7 +84,7 @@ retry:
if newCap == 0 {
newCap = gcSweepBufInitSpineCap
}
newSpine := persistentalloc(newCap*sys.PtrSize, sys.CacheLineSize, &memstats.gc_sys)
newSpine := persistentalloc(newCap*sys.PtrSize, cpu.CacheLineSize, &memstats.gc_sys)
if b.spineCap != 0 {
// Blocks are allocated off-heap, so
// no write barriers.
......@@ -102,7 +103,7 @@ retry:
}
// Allocate a new block and add it to the spine.
block = (*gcSweepBlock)(persistentalloc(unsafe.Sizeof(gcSweepBlock{}), sys.CacheLineSize, &memstats.gc_sys))
block = (*gcSweepBlock)(persistentalloc(unsafe.Sizeof(gcSweepBlock{}), cpu.CacheLineSize, &memstats.gc_sys))
blockp := add(b.spine, sys.PtrSize*top)
// Blocks are allocated off-heap, so no write barrier.
atomic.StorepNoWB(blockp, unsafe.Pointer(block))
......
......@@ -9,6 +9,7 @@
package runtime
import (
"internal/cpu"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
......@@ -137,12 +138,12 @@ type mheap struct {
// central free lists for small size classes.
// the padding makes sure that the MCentrals are
// spaced CacheLineSize bytes apart, so that each MCentral.lock
// spaced CacheLinePadSize bytes apart, so that each MCentral.lock
// gets its own cache line.
// central is indexed by spanClass.
central [numSpanClasses]struct {
mcentral mcentral
pad [sys.CacheLineSize - unsafe.Sizeof(mcentral{})%sys.CacheLineSize]byte
pad [cpu.CacheLinePadSize - unsafe.Sizeof(mcentral{})%cpu.CacheLinePadSize]byte
}
spanalloc fixalloc // allocator for span*
......
......@@ -5,6 +5,7 @@
package runtime
import (
"internal/cpu"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
......@@ -548,7 +549,7 @@ type p struct {
runSafePointFn uint32 // if 1, run sched.safePointFn at next safe point
pad [sys.CacheLineSize]byte
pad cpu.CacheLinePad
}
type schedt struct {
......
......@@ -20,8 +20,8 @@
package runtime
import (
"internal/cpu"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
)
......@@ -48,7 +48,7 @@ const semTabSize = 251
var semtable [semTabSize]struct {
root semaRoot
pad [sys.CacheLineSize - unsafe.Sizeof(semaRoot{})]byte
pad [cpu.CacheLinePadSize - unsafe.Sizeof(semaRoot{})]byte
}
//go:linkname sync_runtime_Semacquire sync.runtime_Semacquire
......
......@@ -7,7 +7,7 @@
package runtime
import (
"runtime/internal/sys"
"internal/cpu"
"unsafe"
)
......@@ -50,7 +50,7 @@ var timers [timersLen]struct {
// The padding should eliminate false sharing
// between timersBucket values.
pad [sys.CacheLineSize - unsafe.Sizeof(timersBucket{})%sys.CacheLineSize]byte
pad [cpu.CacheLinePadSize - unsafe.Sizeof(timersBucket{})%cpu.CacheLinePadSize]byte
}
func (t *timer) assignBucket() *timersBucket {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment