Commit 041526c6 authored by Clément Chigot's avatar Clément Chigot Committed by Lynn Boger

runtime: handle 64bits addresses for AIX

This commit allows the runtime to handle 64bits addresses returned by
mmap syscall on AIX.

Mmap syscall returns addresses on 59bits on AIX. But the Arena
implementation only allows addresses with less than 48 bits.
This commit increases the arena size up to 1<<60 for aix/ppc64.

Update: #25893

Change-Id: Iea72e8a944d10d4f00be915785e33ae82dd6329e
Reviewed-on: https://go-review.googlesource.com/c/138736Reviewed-by: default avatarAustin Clements <austin@google.com>
parent bb3b24bf
...@@ -16,7 +16,7 @@ func Init(arch *gc.Arch) { ...@@ -16,7 +16,7 @@ func Init(arch *gc.Arch) {
arch.LinkArch = &ppc64.Linkppc64le arch.LinkArch = &ppc64.Linkppc64le
} }
arch.REGSP = ppc64.REGSP arch.REGSP = ppc64.REGSP
arch.MAXWIDTH = 1 << 50 arch.MAXWIDTH = 1 << 60
arch.ZeroRange = zerorange arch.ZeroRange = zerorange
arch.ZeroAuto = zeroAuto arch.ZeroAuto = zeroAuto
......
...@@ -28,9 +28,20 @@ const ( ...@@ -28,9 +28,20 @@ const (
// bottom, because node must be pointer-aligned, giving a total of 19 bits // bottom, because node must be pointer-aligned, giving a total of 19 bits
// of count. // of count.
cntBits = 64 - addrBits + 3 cntBits = 64 - addrBits + 3
// On AIX, 64-bit addresses are split into 36-bit segment number and 28-bit
// offset in segment. Segment numbers in the range 0x0A0000000-0x0AFFFFFFF(LSA)
// are available for mmap.
// We assume all lfnode addresses are from memory allocated with mmap.
// We use one bit to distinguish between the two ranges.
aixAddrBits = 57
aixCntBits = 64 - aixAddrBits + 3
) )
func lfstackPack(node *lfnode, cnt uintptr) uint64 { func lfstackPack(node *lfnode, cnt uintptr) uint64 {
if GOARCH == "ppc64" && GOOS == "aix" {
return uint64(uintptr(unsafe.Pointer(node)))<<(64-aixAddrBits) | uint64(cnt&(1<<aixCntBits-1))
}
return uint64(uintptr(unsafe.Pointer(node)))<<(64-addrBits) | uint64(cnt&(1<<cntBits-1)) return uint64(uintptr(unsafe.Pointer(node)))<<(64-addrBits) | uint64(cnt&(1<<cntBits-1))
} }
...@@ -40,5 +51,8 @@ func lfstackUnpack(val uint64) *lfnode { ...@@ -40,5 +51,8 @@ func lfstackUnpack(val uint64) *lfnode {
// val before unpacking. // val before unpacking.
return (*lfnode)(unsafe.Pointer(uintptr(int64(val) >> cntBits << 3))) return (*lfnode)(unsafe.Pointer(uintptr(int64(val) >> cntBits << 3)))
} }
if GOARCH == "ppc64" && GOOS == "aix" {
return (*lfnode)(unsafe.Pointer(uintptr((val >> aixCntBits << 3) | 0xa<<56)))
}
return (*lfnode)(unsafe.Pointer(uintptr(val >> cntBits << 3))) return (*lfnode)(unsafe.Pointer(uintptr(val >> cntBits << 3)))
} }
...@@ -160,7 +160,7 @@ const ( ...@@ -160,7 +160,7 @@ const (
// amd64, addresses are sign-extended beyond heapAddrBits. On // amd64, addresses are sign-extended beyond heapAddrBits. On
// other arches, they are zero-extended. // other arches, they are zero-extended.
// //
// On 64-bit platforms, we limit this to 48 bits based on a // On most 64-bit platforms, we limit this to 48 bits based on a
// combination of hardware and OS limitations. // combination of hardware and OS limitations.
// //
// amd64 hardware limits addresses to 48 bits, sign-extended // amd64 hardware limits addresses to 48 bits, sign-extended
...@@ -178,10 +178,9 @@ const ( ...@@ -178,10 +178,9 @@ const (
// bits, in the range [0, 1<<48). // bits, in the range [0, 1<<48).
// //
// ppc64, mips64, and s390x support arbitrary 64 bit addresses // ppc64, mips64, and s390x support arbitrary 64 bit addresses
// in hardware. However, since Go only supports Linux on // in hardware. On Linux, Go leans on stricter OS limits. Based
// these, we lean on OS limits. Based on Linux's processor.h, // on Linux's processor.h, the user address space is limited as
// the user address space is limited as follows on 64-bit // follows on 64-bit architectures:
// architectures:
// //
// Architecture Name Maximum Value (exclusive) // Architecture Name Maximum Value (exclusive)
// --------------------------------------------------------------------- // ---------------------------------------------------------------------
...@@ -198,13 +197,17 @@ const ( ...@@ -198,13 +197,17 @@ const (
// exceed Go's 48 bit limit, it's extremely unlikely in // exceed Go's 48 bit limit, it's extremely unlikely in
// practice. // practice.
// //
// On aix/ppc64, the limits is increased to 1<<60 to accept addresses
// returned by mmap syscall. These are in range:
// 0x0a00000000000000 - 0x0afffffffffffff
//
// On 32-bit platforms, we accept the full 32-bit address // On 32-bit platforms, we accept the full 32-bit address
// space because doing so is cheap. // space because doing so is cheap.
// mips32 only has access to the low 2GB of virtual memory, so // mips32 only has access to the low 2GB of virtual memory, so
// we further limit it to 31 bits. // we further limit it to 31 bits.
// //
// WebAssembly currently has a limit of 4GB linear memory. // WebAssembly currently has a limit of 4GB linear memory.
heapAddrBits = (_64bit*(1-sys.GoarchWasm))*48 + (1-_64bit+sys.GoarchWasm)*(32-(sys.GoarchMips+sys.GoarchMipsle)) heapAddrBits = (_64bit*(1-sys.GoarchWasm)*(1-sys.GoosAix))*48 + (1-_64bit+sys.GoarchWasm)*(32-(sys.GoarchMips+sys.GoarchMipsle)) + 60*sys.GoosAix
// maxAlloc is the maximum size of an allocation. On 64-bit, // maxAlloc is the maximum size of an allocation. On 64-bit,
// it's theoretically possible to allocate 1<<heapAddrBits bytes. On // it's theoretically possible to allocate 1<<heapAddrBits bytes. On
...@@ -223,6 +226,7 @@ const ( ...@@ -223,6 +226,7 @@ const (
// Platform Addr bits Arena size L1 entries L2 entries // Platform Addr bits Arena size L1 entries L2 entries
// -------------- --------- ---------- ---------- ----------- // -------------- --------- ---------- ---------- -----------
// */64-bit 48 64MB 1 4M (32MB) // */64-bit 48 64MB 1 4M (32MB)
// aix/64-bit 60 256MB 4096 4M (32MB)
// windows/64-bit 48 4MB 64 1M (8MB) // windows/64-bit 48 4MB 64 1M (8MB)
// */32-bit 32 4MB 1 1024 (4KB) // */32-bit 32 4MB 1 1024 (4KB)
// */mips(le) 31 4MB 1 512 (2KB) // */mips(le) 31 4MB 1 512 (2KB)
...@@ -244,7 +248,7 @@ const ( ...@@ -244,7 +248,7 @@ const (
// logHeapArenaBytes is log_2 of heapArenaBytes. For clarity, // logHeapArenaBytes is log_2 of heapArenaBytes. For clarity,
// prefer using heapArenaBytes where possible (we need the // prefer using heapArenaBytes where possible (we need the
// constant to compute some other constants). // constant to compute some other constants).
logHeapArenaBytes = (6+20)*(_64bit*(1-sys.GoosWindows)) + (2+20)*(_64bit*sys.GoosWindows) + (2+20)*(1-_64bit) logHeapArenaBytes = (6+20)*(_64bit*(1-sys.GoosWindows)*(1-sys.GoosAix)) + (2+20)*(_64bit*sys.GoosWindows) + (2+20)*(1-_64bit) + (8+20)*sys.GoosAix
// heapArenaBitmapBytes is the size of each heap arena's bitmap. // heapArenaBitmapBytes is the size of each heap arena's bitmap.
heapArenaBitmapBytes = heapArenaBytes / (sys.PtrSize * 8 / 2) heapArenaBitmapBytes = heapArenaBytes / (sys.PtrSize * 8 / 2)
...@@ -264,7 +268,10 @@ const ( ...@@ -264,7 +268,10 @@ const (
// We use the L1 map on 64-bit Windows because the arena size // We use the L1 map on 64-bit Windows because the arena size
// is small, but the address space is still 48 bits, and // is small, but the address space is still 48 bits, and
// there's a high cost to having a large L2. // there's a high cost to having a large L2.
arenaL1Bits = 6 * (_64bit * sys.GoosWindows) //
// We use the L1 map on aix/ppc64 to keep the same L2 value
// as on Linux.
arenaL1Bits = 6*(_64bit*sys.GoosWindows) + 12*sys.GoosAix
// arenaL2Bits is the number of bits of the arena number // arenaL2Bits is the number of bits of the arena number
// covered by the second level arena index. // covered by the second level arena index.
...@@ -418,6 +425,8 @@ func mallocinit() { ...@@ -418,6 +425,8 @@ func mallocinit() {
// allocation at 0x40 << 32 because when using 4k pages with 3-level // allocation at 0x40 << 32 because when using 4k pages with 3-level
// translation buffers, the user address space is limited to 39 bits // translation buffers, the user address space is limited to 39 bits
// On darwin/arm64, the address space is even smaller. // On darwin/arm64, the address space is even smaller.
// On AIX, mmaps starts at 0x0A00000000000000 for 64-bit.
// processes.
for i := 0x7f; i >= 0; i-- { for i := 0x7f; i >= 0; i-- {
var p uintptr var p uintptr
switch { switch {
...@@ -425,6 +434,13 @@ func mallocinit() { ...@@ -425,6 +434,13 @@ func mallocinit() {
p = uintptr(i)<<40 | uintptrMask&(0x0013<<28) p = uintptr(i)<<40 | uintptrMask&(0x0013<<28)
case GOARCH == "arm64": case GOARCH == "arm64":
p = uintptr(i)<<40 | uintptrMask&(0x0040<<32) p = uintptr(i)<<40 | uintptrMask&(0x0040<<32)
case GOOS == "aix":
if i == 0 {
// We don't use addresses directly after 0x0A00000000000000
// to avoid collisions with others mmaps done by non-go programs.
continue
}
p = uintptr(i)<<40 | uintptrMask&(0xa0<<52)
case raceenabled: case raceenabled:
// The TSAN runtime requires the heap // The TSAN runtime requires the heap
// to be in the range [0x00c000000000, // to be in the range [0x00c000000000,
...@@ -458,7 +474,7 @@ func mallocinit() { ...@@ -458,7 +474,7 @@ func mallocinit() {
// 3. We try to stake out a reasonably large initial // 3. We try to stake out a reasonably large initial
// heap reservation. // heap reservation.
const arenaMetaSize = unsafe.Sizeof([1 << arenaBits]heapArena{}) const arenaMetaSize = (1 << arenaBits) * unsafe.Sizeof(heapArena{})
meta := uintptr(sysReserve(nil, arenaMetaSize)) meta := uintptr(sysReserve(nil, arenaMetaSize))
if meta != 0 { if meta != 0 {
mheap_.heapArenaAlloc.init(meta, arenaMetaSize) mheap_.heapArenaAlloc.init(meta, arenaMetaSize)
......
...@@ -42,8 +42,10 @@ func main() { ...@@ -42,8 +42,10 @@ func main() {
shouldPanic("makechan: size out of range", func() { _ = make(T, n) }) shouldPanic("makechan: size out of range", func() { _ = make(T, n) })
shouldPanic("makechan: size out of range", func() { _ = make(T, int64(n)) }) shouldPanic("makechan: size out of range", func() { _ = make(T, int64(n)) })
if ptrSize == 8 { if ptrSize == 8 {
var n2 int64 = 1 << 50 // Test mem > maxAlloc
var n2 int64 = 1 << 59
shouldPanic("makechan: size out of range", func() { _ = make(T, int(n2)) }) shouldPanic("makechan: size out of range", func() { _ = make(T, int(n2)) })
// Test elem.size*cap overflow
n2 = 1<<63 - 1 n2 = 1<<63 - 1
shouldPanic("makechan: size out of range", func() { _ = make(T, int(n2)) }) shouldPanic("makechan: size out of range", func() { _ = make(T, int(n2)) })
} else { } else {
......
...@@ -14,7 +14,7 @@ var bug = false ...@@ -14,7 +14,7 @@ var bug = false
var minus1 = -1 var minus1 = -1
var five = 5 var five = 5
var big int64 = 10 | 1<<40 var big int64 = 10 | 1<<46
type block [1 << 19]byte type block [1 << 19]byte
......
...@@ -21,9 +21,11 @@ func main() { ...@@ -21,9 +21,11 @@ func main() {
shouldPanic("cap out of range", func() { _ = make(T, 0, int64(n)) }) shouldPanic("cap out of range", func() { _ = make(T, 0, int64(n)) })
var t *byte var t *byte
if unsafe.Sizeof(t) == 8 { if unsafe.Sizeof(t) == 8 {
var n2 int64 = 1 << 50 // Test mem > maxAlloc
var n2 int64 = 1 << 59
shouldPanic("len out of range", func() { _ = make(T, int(n2)) }) shouldPanic("len out of range", func() { _ = make(T, int(n2)) })
shouldPanic("cap out of range", func() { _ = make(T, 0, int(n2)) }) shouldPanic("cap out of range", func() { _ = make(T, 0, int(n2)) })
// Test elem.size*cap overflow
n2 = 1<<63 - 1 n2 = 1<<63 - 1
shouldPanic("len out of range", func() { _ = make(T, int(n2)) }) shouldPanic("len out of range", func() { _ = make(T, int(n2)) })
shouldPanic("cap out of range", func() { _ = make(T, 0, int(n2)) }) shouldPanic("cap out of range", func() { _ = make(T, 0, int(n2)) })
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment