Commit 03ef105d authored by Brad Fitzpatrick's avatar Brad Fitzpatrick

all: remove nacl (part 3, more amd64p32)

Part 1: CL 199499 (GOOS nacl)
Part 2: CL 200077 (amd64p32 files, toolchain)
Part 3: stuff that arguably should've been part of Part 2, but I forgot
        one of my grep patterns when splitting the original CL up into
        two parts.

This one might also have interesting stuff to resurrect for any future
x32 ABI support.

Updates #30439

Change-Id: I2b4143374a253a003666f3c69e776b7e456bdb9c
Reviewed-on: https://go-review.googlesource.com/c/go/+/200318
Run-TryBot: Brad Fitzpatrick <bradfitz@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarIan Lance Taylor <iant@golang.org>
parent 6dc740f0
...@@ -56,8 +56,6 @@ func Set(GOARCH string) *Arch { ...@@ -56,8 +56,6 @@ func Set(GOARCH string) *Arch {
return archX86(&x86.Link386) return archX86(&x86.Link386)
case "amd64": case "amd64":
return archX86(&x86.Linkamd64) return archX86(&x86.Linkamd64)
case "amd64p32":
return archX86(&x86.Linkamd64p32)
case "arm": case "arm":
return archArm() return archArm()
case "arm64": case "arm64":
......
...@@ -7,17 +7,12 @@ package amd64 ...@@ -7,17 +7,12 @@ package amd64
import ( import (
"cmd/compile/internal/gc" "cmd/compile/internal/gc"
"cmd/internal/obj/x86" "cmd/internal/obj/x86"
"cmd/internal/objabi"
) )
var leaptr = x86.ALEAQ var leaptr = x86.ALEAQ
func Init(arch *gc.Arch) { func Init(arch *gc.Arch) {
arch.LinkArch = &x86.Linkamd64 arch.LinkArch = &x86.Linkamd64
if objabi.GOARCH == "amd64p32" {
arch.LinkArch = &x86.Linkamd64p32
leaptr = x86.ALEAL
}
arch.REGSP = x86.REGSP arch.REGSP = x86.REGSP
arch.MAXWIDTH = 1 << 50 arch.MAXWIDTH = 1 << 50
......
...@@ -210,19 +210,6 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config ...@@ -210,19 +210,6 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config
c.FPReg = framepointerRegAMD64 c.FPReg = framepointerRegAMD64
c.LinkReg = linkRegAMD64 c.LinkReg = linkRegAMD64
c.hasGReg = false c.hasGReg = false
case "amd64p32":
c.PtrSize = 4
c.RegSize = 8
c.lowerBlock = rewriteBlockAMD64
c.lowerValue = rewriteValueAMD64
c.splitLoad = rewriteValueAMD64splitload
c.registers = registersAMD64[:]
c.gpRegMask = gpRegMaskAMD64
c.fpRegMask = fpRegMaskAMD64
c.FPReg = framepointerRegAMD64
c.LinkReg = linkRegAMD64
c.hasGReg = false
c.noDuffDevice = true
case "386": case "386":
c.PtrSize = 4 c.PtrSize = 4
c.RegSize = 4 c.RegSize = 4
......
...@@ -1061,7 +1061,7 @@ func isInlinableMemmove(dst, src *Value, sz int64, c *Config) bool { ...@@ -1061,7 +1061,7 @@ func isInlinableMemmove(dst, src *Value, sz int64, c *Config) bool {
// lowers them, so we only perform this optimization on platforms that we know to // lowers them, so we only perform this optimization on platforms that we know to
// have fast Move ops. // have fast Move ops.
switch c.arch { switch c.arch {
case "amd64", "amd64p32": case "amd64":
return sz <= 16 || (sz < 1024 && disjoint(dst, sz, src, sz)) return sz <= 16 || (sz < 1024 && disjoint(dst, sz, src, sz))
case "386", "ppc64", "ppc64le", "arm64": case "386", "ppc64", "ppc64le", "arm64":
return sz <= 8 return sz <= 8
...@@ -1077,7 +1077,7 @@ func isInlinableMemmove(dst, src *Value, sz int64, c *Config) bool { ...@@ -1077,7 +1077,7 @@ func isInlinableMemmove(dst, src *Value, sz int64, c *Config) bool {
// for sizes < 32-bit. This is used to decide whether to promote some rotations. // for sizes < 32-bit. This is used to decide whether to promote some rotations.
func hasSmallRotate(c *Config) bool { func hasSmallRotate(c *Config) bool {
switch c.arch { switch c.arch {
case "amd64", "amd64p32", "386": case "amd64", "386":
return true return true
default: default:
return false return false
......
...@@ -24,7 +24,6 @@ import ( ...@@ -24,7 +24,6 @@ import (
var archInits = map[string]func(*gc.Arch){ var archInits = map[string]func(*gc.Arch){
"386": x86.Init, "386": x86.Init,
"amd64": amd64.Init, "amd64": amd64.Init,
"amd64p32": amd64.Init,
"arm": arm.Init, "arm": arm.Init,
"arm64": arm64.Init, "arm64": arm64.Init,
"mips": mips.Init, "mips": mips.Init,
......
...@@ -61,7 +61,6 @@ var ( ...@@ -61,7 +61,6 @@ var (
var okgoarch = []string{ var okgoarch = []string{
"386", "386",
"amd64", "amd64",
"amd64p32",
"arm", "arm",
"arm64", "arm64",
"mips", "mips",
...@@ -86,6 +85,7 @@ var okgoos = []string{ ...@@ -86,6 +85,7 @@ var okgoos = []string{
"android", "android",
"solaris", "solaris",
"freebsd", "freebsd",
"nacl", // keep;
"netbsd", "netbsd",
"openbsd", "openbsd",
"plan9", "plan9",
......
...@@ -210,7 +210,7 @@ var KnownOS = map[string]bool{ ...@@ -210,7 +210,7 @@ var KnownOS = map[string]bool{
"illumos": true, "illumos": true,
"js": true, "js": true,
"linux": true, "linux": true,
"nacl": true, "nacl": true, // legacy; don't remove
"netbsd": true, "netbsd": true,
"openbsd": true, "openbsd": true,
"plan9": true, "plan9": true,
...@@ -222,7 +222,7 @@ var KnownOS = map[string]bool{ ...@@ -222,7 +222,7 @@ var KnownOS = map[string]bool{
var KnownArch = map[string]bool{ var KnownArch = map[string]bool{
"386": true, "386": true,
"amd64": true, "amd64": true,
"amd64p32": true, "amd64p32": true, // legacy; don't remove
"arm": true, "arm": true,
"armbe": true, "armbe": true,
"arm64": true, "arm64": true,
......
...@@ -2390,7 +2390,7 @@ func (b *Builder) gccArchArgs() []string { ...@@ -2390,7 +2390,7 @@ func (b *Builder) gccArchArgs() []string {
switch cfg.Goarch { switch cfg.Goarch {
case "386": case "386":
return []string{"-m32"} return []string{"-m32"}
case "amd64", "amd64p32": case "amd64":
return []string{"-m64"} return []string{"-m64"}
case "arm": case "arm":
return []string{"-marm"} // not thumb return []string{"-marm"} // not thumb
......
...@@ -1226,16 +1226,6 @@ var Linkamd64 = obj.LinkArch{ ...@@ -1226,16 +1226,6 @@ var Linkamd64 = obj.LinkArch{
DWARFRegisters: AMD64DWARFRegisters, DWARFRegisters: AMD64DWARFRegisters,
} }
var Linkamd64p32 = obj.LinkArch{
Arch: sys.ArchAMD64P32,
Init: instinit,
Preprocess: preprocess,
Assemble: span6,
Progedit: progedit,
UnaryDst: unaryDst,
DWARFRegisters: AMD64DWARFRegisters,
}
var Link386 = obj.LinkArch{ var Link386 = obj.LinkArch{
Arch: sys.Arch386, Arch: sys.Arch386,
Init: instinit, Init: instinit,
......
...@@ -241,7 +241,7 @@ func (d *Disasm) Print(w io.Writer, filter *regexp.Regexp, start, end uint64, pr ...@@ -241,7 +241,7 @@ func (d *Disasm) Print(w io.Writer, filter *regexp.Regexp, start, end uint64, pr
fmt.Fprintf(tw, " %s:%d\t%#x\t", base(file), line, pc) fmt.Fprintf(tw, " %s:%d\t%#x\t", base(file), line, pc)
} }
if size%4 != 0 || d.goarch == "386" || d.goarch == "amd64" || d.goarch == "amd64p32" { if size%4 != 0 || d.goarch == "386" || d.goarch == "amd64" {
// Print instruction as bytes. // Print instruction as bytes.
fmt.Fprintf(tw, "%x", code[i:i+size]) fmt.Fprintf(tw, "%x", code[i:i+size])
} else { } else {
...@@ -369,7 +369,6 @@ func disasm_ppc64(code []byte, pc uint64, lookup lookupFunc, byteOrder binary.By ...@@ -369,7 +369,6 @@ func disasm_ppc64(code []byte, pc uint64, lookup lookupFunc, byteOrder binary.By
var disasms = map[string]disasmFunc{ var disasms = map[string]disasmFunc{
"386": disasm_386, "386": disasm_386,
"amd64": disasm_amd64, "amd64": disasm_amd64,
"amd64p32": disasm_amd64,
"arm": disasm_arm, "arm": disasm_arm,
"arm64": disasm_arm64, "arm64": disasm_arm64,
"ppc64": disasm_ppc64, "ppc64": disasm_ppc64,
...@@ -379,7 +378,6 @@ var disasms = map[string]disasmFunc{ ...@@ -379,7 +378,6 @@ var disasms = map[string]disasmFunc{
var byteOrders = map[string]binary.ByteOrder{ var byteOrders = map[string]binary.ByteOrder{
"386": binary.LittleEndian, "386": binary.LittleEndian,
"amd64": binary.LittleEndian, "amd64": binary.LittleEndian,
"amd64p32": binary.LittleEndian,
"arm": binary.LittleEndian, "arm": binary.LittleEndian,
"arm64": binary.LittleEndian, "arm64": binary.LittleEndian,
"ppc64": binary.BigEndian, "ppc64": binary.BigEndian,
......
...@@ -7,8 +7,7 @@ package sys ...@@ -7,8 +7,7 @@ package sys
import "encoding/binary" import "encoding/binary"
// ArchFamily represents a family of one or more related architectures. // ArchFamily represents a family of one or more related architectures.
// For example, amd64 and amd64p32 are both members of the AMD64 family, // For example, ppc64 and ppc64le are both members of the PPC64 family.
// and ppc64 and ppc64le are both members of the PPC64 family.
type ArchFamily byte type ArchFamily byte
const ( const (
...@@ -72,15 +71,6 @@ var ArchAMD64 = &Arch{ ...@@ -72,15 +71,6 @@ var ArchAMD64 = &Arch{
MinLC: 1, MinLC: 1,
} }
var ArchAMD64P32 = &Arch{
Name: "amd64p32",
Family: AMD64,
ByteOrder: binary.LittleEndian,
PtrSize: 4,
RegSize: 8,
MinLC: 1,
}
var ArchARM = &Arch{ var ArchARM = &Arch{
Name: "arm", Name: "arm",
Family: ARM, Family: ARM,
...@@ -183,7 +173,6 @@ var ArchWasm = &Arch{ ...@@ -183,7 +173,6 @@ var ArchWasm = &Arch{
var Archs = [...]*Arch{ var Archs = [...]*Arch{
Arch386, Arch386,
ArchAMD64, ArchAMD64,
ArchAMD64P32,
ArchARM, ArchARM,
ArchARM64, ArchARM64,
ArchMIPS, ArchMIPS,
......
...@@ -38,9 +38,6 @@ import ( ...@@ -38,9 +38,6 @@ import (
func Init() (*sys.Arch, ld.Arch) { func Init() (*sys.Arch, ld.Arch) {
arch := sys.ArchAMD64 arch := sys.ArchAMD64
if objabi.GOARCH == "amd64p32" {
arch = sys.ArchAMD64P32
}
theArch := ld.Arch{ theArch := ld.Arch{
Funcalign: funcAlign, Funcalign: funcAlign,
......
...@@ -45,7 +45,7 @@ func main() { ...@@ -45,7 +45,7 @@ func main() {
os.Exit(2) os.Exit(2)
case "386": case "386":
arch, theArch = x86.Init() arch, theArch = x86.Init()
case "amd64", "amd64p32": case "amd64":
arch, theArch = amd64.Init() arch, theArch = amd64.Init()
case "arm": case "arm":
arch, theArch = arm.Init() arch, theArch = arm.Init()
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build amd64 amd64p32 386 arm ppc64le ppc64 s390x arm64 // +build amd64 386 arm ppc64le ppc64 s390x arm64
package md5 package md5
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build !amd64,!amd64p32,!386,!arm,!ppc64le,!ppc64,!s390x,!arm64 // +build !amd64,!386,!arm,!ppc64le,!ppc64,!s390x,!arm64
package md5 package md5
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build amd64p32 arm 386 s390x // +build arm 386 s390x
package sha1 package sha1
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build !amd64,!amd64p32,!386,!arm,!s390x,!arm64 // +build !amd64,!386,!arm,!s390x,!arm64
package sha1 package sha1
......
...@@ -4,5 +4,8 @@ ...@@ -4,5 +4,8 @@
package build package build
// List of past, present, and future known GOOS and GOARCH values.
// Do not remove from this list, as these are used for go/build filename matching.
const goosList = "aix android darwin dragonfly freebsd hurd illumos js linux nacl netbsd openbsd plan9 solaris windows zos " const goosList = "aix android darwin dragonfly freebsd hurd illumos js linux nacl netbsd openbsd plan9 solaris windows zos "
const goarchList = "386 amd64 amd64p32 arm armbe arm64 arm64be ppc64 ppc64le mips mipsle mips64 mips64le mips64p32 mips64p32le ppc riscv riscv64 s390 s390x sparc sparc64 wasm " const goarchList = "386 amd64 amd64p32 arm armbe arm64 arm64be ppc64 ppc64le mips mipsle mips64 mips64le mips64p32 mips64p32le ppc riscv riscv64 s390 s390x sparc sparc64 wasm "
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build !amd64,!amd64p32,!s390x,!ppc64le,!arm64 // +build !amd64,!s390x,!ppc64le,!arm64
package crc32 package crc32
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build !386,!amd64,!amd64p32,!s390x,!arm,!arm64,!ppc64,!ppc64le,!mips,!mipsle,!wasm,!mips64,!mips64le // +build !386,!amd64,!s390x,!arm,!arm64,!ppc64,!ppc64le,!mips,!mipsle,!wasm,!mips64,!mips64le
package bytealg package bytealg
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build 386 amd64 amd64p32 s390x arm arm64 ppc64 ppc64le mips mipsle wasm mips64 mips64le // +build 386 amd64 s390x arm arm64 ppc64 ppc64le mips mipsle wasm mips64 mips64le
package bytealg package bytealg
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build !386,!amd64,!amd64p32,!s390x,!arm,!arm64,!ppc64,!ppc64le,!mips,!mipsle,!mips64,!mips64le,!wasm // +build !386,!amd64,!s390x,!arm,!arm64,!ppc64,!ppc64le,!mips,!mipsle,!mips64,!mips64le,!wasm
package bytealg package bytealg
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build 386 amd64 amd64p32 s390x arm arm64 ppc64 ppc64le mips mipsle mips64 mips64le wasm // +build 386 amd64 s390x arm arm64 ppc64 ppc64le mips mipsle mips64 mips64le wasm
package bytealg package bytealg
......
...@@ -4,7 +4,6 @@ ...@@ -4,7 +4,6 @@
// +build !386 // +build !386
// +build !amd64 // +build !amd64
// +build !amd64p32
// +build !arm // +build !arm
// +build !arm64 // +build !arm64
// +build !ppc64 // +build !ppc64
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build 386 amd64 amd64p32 // +build 386 amd64
package cpu package cpu
...@@ -55,8 +55,8 @@ func doinit() { ...@@ -55,8 +55,8 @@ func doinit() {
{Name: "sse42", Feature: &X86.HasSSE42}, {Name: "sse42", Feature: &X86.HasSSE42},
{Name: "ssse3", Feature: &X86.HasSSSE3}, {Name: "ssse3", Feature: &X86.HasSSSE3},
// These capabilities should always be enabled on amd64(p32): // These capabilities should always be enabled on amd64:
{Name: "sse2", Feature: &X86.HasSSE2, Required: GOARCH == "amd64" || GOARCH == "amd64p32"}, {Name: "sse2", Feature: &X86.HasSSE2, Required: GOARCH == "amd64"},
} }
maxID, _, _, _ := cpuid(0, 0) maxID, _, _, _ := cpuid(0, 0)
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build 386 amd64 amd64p32 // +build 386 amd64
#include "textflag.h" #include "textflag.h"
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build 386 amd64 amd64p32 // +build 386 amd64
package cpu_test package cpu_test
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build amd64 amd64p32 // +build amd64
package math package math
......
...@@ -6104,9 +6104,6 @@ var funcLayoutTests []funcLayoutTest ...@@ -6104,9 +6104,6 @@ var funcLayoutTests []funcLayoutTest
func init() { func init() {
var argAlign uintptr = PtrSize var argAlign uintptr = PtrSize
if runtime.GOARCH == "amd64p32" {
argAlign = 2 * PtrSize
}
roundup := func(x uintptr, a uintptr) uintptr { roundup := func(x uintptr, a uintptr) uintptr {
return (x + a - 1) / a * a return (x + a - 1) / a * a
} }
......
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
package reflect package reflect
import ( import (
"runtime"
"strconv" "strconv"
"sync" "sync"
"unicode" "unicode"
...@@ -3015,9 +3014,6 @@ func funcLayout(t *funcType, rcvr *rtype) (frametype *rtype, argSize, retOffset ...@@ -3015,9 +3014,6 @@ func funcLayout(t *funcType, rcvr *rtype) (frametype *rtype, argSize, retOffset
offset += arg.size offset += arg.size
} }
argSize = offset argSize = offset
if runtime.GOARCH == "amd64p32" {
offset += -offset & (8 - 1)
}
offset += -offset & (ptrSize - 1) offset += -offset & (ptrSize - 1)
retOffset = offset retOffset = offset
for _, res := range t.out() { for _, res := range t.out() {
...@@ -3033,9 +3029,6 @@ func funcLayout(t *funcType, rcvr *rtype) (frametype *rtype, argSize, retOffset ...@@ -3033,9 +3029,6 @@ func funcLayout(t *funcType, rcvr *rtype) (frametype *rtype, argSize, retOffset
size: offset, size: offset,
ptrdata: uintptr(ptrmap.n) * ptrSize, ptrdata: uintptr(ptrmap.n) * ptrSize,
} }
if runtime.GOARCH == "amd64p32" {
x.align = 8
}
if ptrmap.n > 0 { if ptrmap.n > 0 {
x.gcdata = &ptrmap.data[0] x.gcdata = &ptrmap.data[0]
} }
......
...@@ -555,9 +555,6 @@ func callReflect(ctxt *makeFuncImpl, frame unsafe.Pointer, retValid *bool) { ...@@ -555,9 +555,6 @@ func callReflect(ctxt *makeFuncImpl, frame unsafe.Pointer, retValid *bool) {
// Copy results back into argument frame. // Copy results back into argument frame.
if numOut > 0 { if numOut > 0 {
off += -off & (ptrSize - 1) off += -off & (ptrSize - 1)
if runtime.GOARCH == "amd64p32" {
off = align(off, 8)
}
for i, typ := range ftyp.out() { for i, typ := range ftyp.out() {
v := out[i] v := out[i]
if v.typ == nil { if v.typ == nil {
...@@ -697,8 +694,7 @@ func callMethod(ctxt *methodValue, frame unsafe.Pointer, retValid *bool) { ...@@ -697,8 +694,7 @@ func callMethod(ctxt *methodValue, frame unsafe.Pointer, retValid *bool) {
// Copy in receiver and rest of args. // Copy in receiver and rest of args.
storeRcvr(rcvr, scratch) storeRcvr(rcvr, scratch)
// Align the first arg. Only on amd64p32 the alignment can be // Align the first arg. The alignment can't be larger than ptrSize.
// larger than ptrSize.
argOffset := uintptr(ptrSize) argOffset := uintptr(ptrSize)
if len(t.in()) > 0 { if len(t.in()) > 0 {
argOffset = align(argOffset, uintptr(t.in()[0].align)) argOffset = align(argOffset, uintptr(t.in()[0].align))
...@@ -713,17 +709,11 @@ func callMethod(ctxt *methodValue, frame unsafe.Pointer, retValid *bool) { ...@@ -713,17 +709,11 @@ func callMethod(ctxt *methodValue, frame unsafe.Pointer, retValid *bool) {
// and then copies the results back into scratch. // and then copies the results back into scratch.
call(frametype, fn, scratch, uint32(frametype.size), uint32(retOffset)) call(frametype, fn, scratch, uint32(frametype.size), uint32(retOffset))
// Copy return values. On amd64p32, the beginning of return values // Copy return values.
// is 64-bit aligned, so the caller's frame layout (which doesn't have
// a receiver) is different from the layout of the fn call, which has
// a receiver.
// Ignore any changes to args and just copy return values. // Ignore any changes to args and just copy return values.
// Avoid constructing out-of-bounds pointers if there are no return values. // Avoid constructing out-of-bounds pointers if there are no return values.
if frametype.size-retOffset > 0 { if frametype.size-retOffset > 0 {
callerRetOffset := retOffset - argOffset callerRetOffset := retOffset - argOffset
if runtime.GOARCH == "amd64p32" {
callerRetOffset = align(argSize-argOffset, 8)
}
// This copies to the stack. Write barriers are not needed. // This copies to the stack. Write barriers are not needed.
memmove(add(frame, callerRetOffset, "frametype.size > retOffset"), memmove(add(frame, callerRetOffset, "frametype.size > retOffset"),
add(scratch, retOffset, "frametype.size > retOffset"), add(scratch, retOffset, "frametype.size > retOffset"),
......
...@@ -19,9 +19,6 @@ GLOBL runtime·no_pointers_stackmap(SB),RODATA, $8 ...@@ -19,9 +19,6 @@ GLOBL runtime·no_pointers_stackmap(SB),RODATA, $8
#ifdef GOARCH_386 #ifdef GOARCH_386
#define SKIP4 BYTE $0x90; BYTE $0x90; BYTE $0x90; BYTE $0x90 #define SKIP4 BYTE $0x90; BYTE $0x90; BYTE $0x90; BYTE $0x90
#endif #endif
#ifdef GOARCH_amd64p32
#define SKIP4 BYTE $0x90; BYTE $0x90; BYTE $0x90; BYTE $0x90
#endif
#ifdef GOARCH_wasm #ifdef GOARCH_wasm
#define SKIP4 UNDEF; UNDEF; UNDEF; UNDEF #define SKIP4 UNDEF; UNDEF; UNDEF; UNDEF
#endif #endif
......
...@@ -187,14 +187,6 @@ func infoBigStruct() []byte { ...@@ -187,14 +187,6 @@ func infoBigStruct() []byte {
typeScalar, typeScalar, typeScalar, // t int; y uint16; u uint64 typeScalar, typeScalar, typeScalar, // t int; y uint16; u uint64
typePointer, typeScalar, // i string typePointer, typeScalar, // i string
} }
case "amd64p32":
return []byte{
typePointer, // q *int
typeScalar, typeScalar, typeScalar, typeScalar, typeScalar, // w byte; e [17]byte
typePointer, typeScalar, typeScalar, // r []byte
typeScalar, typeScalar, typeScalar, typeScalar, typeScalar, // t int; y uint16; u uint64
typePointer, typeScalar, // i string
}
default: default:
panic("unknown arch") panic("unknown arch")
} }
......
...@@ -11,11 +11,6 @@ ...@@ -11,11 +11,6 @@
#define g(r) 0(r)(TLS*1) #define g(r) 0(r)(TLS*1)
#endif #endif
#ifdef GOARCH_amd64p32
#define get_tls(r) MOVL TLS, r
#define g(r) 0(r)(TLS*1)
#endif
#ifdef GOARCH_386 #ifdef GOARCH_386
#define get_tls(r) MOVL TLS, r #define get_tls(r) MOVL TLS, r
#define g(r) 0(r)(TLS*1) #define g(r) 0(r)(TLS*1)
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
// xxhash: https://code.google.com/p/xxhash/ // xxhash: https://code.google.com/p/xxhash/
// cityhash: https://code.google.com/p/cityhash/ // cityhash: https://code.google.com/p/cityhash/
// +build amd64 amd64p32 arm64 mips64 mips64le ppc64 ppc64le s390x wasm // +build amd64 arm64 mips64 mips64le ppc64 ppc64le s390x wasm
package runtime package runtime
......
...@@ -2,8 +2,6 @@ ...@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build amd64 amd64p32
package atomic package atomic
import "unsafe" import "unsafe"
......
...@@ -86,15 +86,9 @@ func TestUnaligned64(t *testing.T) { ...@@ -86,15 +86,9 @@ func TestUnaligned64(t *testing.T) {
// a continual source of pain. Test that on 32-bit systems they crash // a continual source of pain. Test that on 32-bit systems they crash
// instead of failing silently. // instead of failing silently.
switch runtime.GOARCH {
default:
if unsafe.Sizeof(int(0)) != 4 { if unsafe.Sizeof(int(0)) != 4 {
t.Skip("test only runs on 32-bit systems") t.Skip("test only runs on 32-bit systems")
} }
case "amd64p32":
// amd64p32 can handle unaligned atomics.
t.Skipf("test not needed on %v", runtime.GOARCH)
}
x := make([]uint32, 4) x := make([]uint32, 4)
u := unsafe.Pointer(uintptr(unsafe.Pointer(&x[0])) | 4) // force alignment to 4 u := unsafe.Pointer(uintptr(unsafe.Pointer(&x[0])) | 4) // force alignment to 4
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build 386 amd64p32 arm mips mipsle // +build 386 arm mips mipsle
package runtime package runtime
......
...@@ -922,7 +922,7 @@ var ( ...@@ -922,7 +922,7 @@ var (
// Information about what cpu features are available. // Information about what cpu features are available.
// Packages outside the runtime should not use these // Packages outside the runtime should not use these
// as they are not an external api. // as they are not an external api.
// Set on startup in asm_{386,amd64,amd64p32}.s // Set on startup in asm_{386,amd64}.s
processorVersionInfo uint32 processorVersionInfo uint32
isIntel bool isIntel bool
lfenceBeforeRdtsc bool lfenceBeforeRdtsc bool
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build amd64 amd64p32 // +build amd64
// +build darwin dragonfly freebsd linux netbsd openbsd solaris // +build darwin dragonfly freebsd linux netbsd openbsd solaris
package runtime package runtime
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build 386 arm amd64p32 mips mipsle // +build 386 arm mips mipsle
package runtime package runtime
......
...@@ -2,8 +2,6 @@ ...@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build amd64 amd64p32
package runtime package runtime
// stackcheck checks that SP is in range [g->stack.lo, g->stack.hi). // stackcheck checks that SP is in range [g->stack.lo, g->stack.hi).
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build amd64 amd64p32 386 // +build amd64 386
package runtime package runtime
......
...@@ -84,7 +84,7 @@ const ( ...@@ -84,7 +84,7 @@ const (
// and ppc64le. // and ppc64le.
// Tracing won't work reliably for architectures where cputicks is emulated // Tracing won't work reliably for architectures where cputicks is emulated
// by nanotime, so the value doesn't matter for those architectures. // by nanotime, so the value doesn't matter for those architectures.
traceTickDiv = 16 + 48*(sys.Goarch386|sys.GoarchAmd64|sys.GoarchAmd64p32) traceTickDiv = 16 + 48*(sys.Goarch386|sys.GoarchAmd64)
// Maximum number of PCs in a single stack trace. // Maximum number of PCs in a single stack trace.
// Since events contain only stack id rather than whole stack trace, // Since events contain only stack id rather than whole stack trace,
// we can allow quite large values here. // we can allow quite large values here.
......
...@@ -26,8 +26,8 @@ import ( ...@@ -26,8 +26,8 @@ import (
// takes up only 4 bytes on the stack, while on 64-bit systems it takes up 8 bytes. // takes up only 4 bytes on the stack, while on 64-bit systems it takes up 8 bytes.
// Typically this is ptrSize. // Typically this is ptrSize.
// //
// As an exception, amd64p32 has ptrSize == 4 but the CALL instruction still // As an exception, amd64p32 had ptrSize == 4 but the CALL instruction still
// stores an 8-byte return PC onto the stack. To accommodate this, we use regSize // stored an 8-byte return PC onto the stack. To accommodate this, we used regSize
// as the size of the architecture-pushed return PC. // as the size of the architecture-pushed return PC.
// //
// usesLR is defined below in terms of minFrameSize, which is defined in // usesLR is defined below in terms of minFrameSize, which is defined in
......
...@@ -1391,16 +1391,9 @@ func TestUnaligned64(t *testing.T) { ...@@ -1391,16 +1391,9 @@ func TestUnaligned64(t *testing.T) {
// Unaligned 64-bit atomics on 32-bit systems are // Unaligned 64-bit atomics on 32-bit systems are
// a continual source of pain. Test that on 32-bit systems they crash // a continual source of pain. Test that on 32-bit systems they crash
// instead of failing silently. // instead of failing silently.
switch runtime.GOARCH {
default:
if !arch32 { if !arch32 {
t.Skip("test only runs on 32-bit systems") t.Skip("test only runs on 32-bit systems")
} }
case "amd64p32":
// amd64p32 can handle unaligned atomics.
t.Skipf("test not needed on %v", runtime.GOARCH)
}
x := make([]uint32, 4) x := make([]uint32, 4)
p := (*uint64)(unsafe.Pointer(&x[1])) // misaligned p := (*uint64)(unsafe.Pointer(&x[1])) // misaligned
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// //
// +build 386 amd64 amd64p32 arm arm64 ppc64le mips64le mipsle wasm // +build 386 amd64 arm arm64 ppc64le mips64le mipsle wasm
package syscall package syscall
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment