Commit 6d6e1600 authored by Austin Clements's avatar Austin Clements

[dev.garbage] Merge branch 'master' into dev.garbage

Change-Id: I47ac4112befc07d3674d7a88827227199edd93b4
parents 22972d22 5e1b7bde
...@@ -16,6 +16,7 @@ API additions and behavior changes: ...@@ -16,6 +16,7 @@ API additions and behavior changes:
runtime: add CallerFrames and Frames (CL 19869) runtime: add CallerFrames and Frames (CL 19869)
testing/quick: now generates nil values (CL 16470) testing/quick: now generates nil values (CL 16470)
net/http/httptest: ResponseRecorder supports trailer (CL 20047) (compat impact: issue 14928)
net/url: support query string without values (CL 19931) net/url: support query string without values (CL 19931)
net/textproto: permit all valid token chars in CanonicalMIMEHeaderKey input (CL 18725) net/textproto: permit all valid token chars in CanonicalMIMEHeaderKey input (CL 18725)
go/doc: add Unordered boolean to Example struct (CL 19280) go/doc: add Unordered boolean to Example struct (CL 19280)
...@@ -118,13 +118,12 @@ func goEnv(key string) string { ...@@ -118,13 +118,12 @@ func goEnv(key string) string {
} }
func compilemain(t *testing.T, libgo string) { func compilemain(t *testing.T, libgo string) {
ccArgs := append(cc, "-o", "testp"+exeSuffix) ccArgs := append(cc, "-o", "testp"+exeSuffix, "main.c")
if GOOS == "windows" { if GOOS == "windows" {
ccArgs = append(ccArgs, "main_windows.c") ccArgs = append(ccArgs, "main_windows.c", libgo, "-lntdll", "-lws2_32")
} else { } else {
ccArgs = append(ccArgs, "main_unix.c") ccArgs = append(ccArgs, "main_unix.c", libgo)
} }
ccArgs = append(ccArgs, "main.c", libgo)
t.Log(ccArgs) t.Log(ccArgs)
if out, err := exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput(); err != nil { if out, err := exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput(); err != nil {
......
...@@ -18,7 +18,7 @@ goos=$(go env GOOS) ...@@ -18,7 +18,7 @@ goos=$(go env GOOS)
goarch=$(go env GOARCH) goarch=$(go env GOARCH)
goroot=$(go env GOROOT) goroot=$(go env GOROOT)
if [ ! -d "$goroot" ]; then if [ ! -d "$goroot" ]; then
echo 'misc/cgo/testcshared/test.bash cannnot find GOROOT' 1>&2 echo 'misc/cgo/testcshared/test.bash cannot find GOROOT' 1>&2
echo '$GOROOT:' "$GOROOT" 1>&2 echo '$GOROOT:' "$GOROOT" 1>&2
echo 'go env GOROOT:' "$goroot" 1>&2 echo 'go env GOROOT:' "$goroot" 1>&2
exit 1 exit 1
......
...@@ -732,7 +732,7 @@ func TestABIChecking(t *testing.T) { ...@@ -732,7 +732,7 @@ func TestABIChecking(t *testing.T) {
// If we make an ABI-breaking change to dep and rebuild libp.so but not exe, // If we make an ABI-breaking change to dep and rebuild libp.so but not exe,
// exe will abort with a complaint on startup. // exe will abort with a complaint on startup.
// This assumes adding an exported function breaks ABI, which is not true in // This assumes adding an exported function breaks ABI, which is not true in
// some senses but suffices for the narrow definition of ABI compatiblity the // some senses but suffices for the narrow definition of ABI compatibility the
// toolchain uses today. // toolchain uses today.
resetFileStamps() resetFileStamps()
appendFile("src/dep/dep.go", "func ABIBreak() {}\n") appendFile("src/dep/dep.go", "func ABIBreak() {}\n")
......
...@@ -10,6 +10,7 @@ import ( ...@@ -10,6 +10,7 @@ import (
"cmd/internal/obj/arm64" "cmd/internal/obj/arm64"
"cmd/internal/obj/mips" "cmd/internal/obj/mips"
"cmd/internal/obj/ppc64" "cmd/internal/obj/ppc64"
"cmd/internal/obj/s390x"
"cmd/internal/obj/x86" "cmd/internal/obj/x86"
"fmt" "fmt"
"strings" "strings"
...@@ -74,6 +75,10 @@ func Set(GOARCH string) *Arch { ...@@ -74,6 +75,10 @@ func Set(GOARCH string) *Arch {
a := archPPC64() a := archPPC64()
a.LinkArch = &ppc64.Linkppc64le a.LinkArch = &ppc64.Linkppc64le
return a return a
case "s390x":
a := archS390x()
a.LinkArch = &s390x.Links390x
return a
} }
return nil return nil
} }
...@@ -416,3 +421,56 @@ func archMips64() *Arch { ...@@ -416,3 +421,56 @@ func archMips64() *Arch {
IsJump: jumpMIPS64, IsJump: jumpMIPS64,
} }
} }
func archS390x() *Arch {
register := make(map[string]int16)
// Create maps for easy lookup of instruction names etc.
// Note that there is no list of names as there is for x86.
for i := s390x.REG_R0; i <= s390x.REG_R15; i++ {
register[obj.Rconv(i)] = int16(i)
}
for i := s390x.REG_F0; i <= s390x.REG_F15; i++ {
register[obj.Rconv(i)] = int16(i)
}
for i := s390x.REG_V0; i <= s390x.REG_V31; i++ {
register[obj.Rconv(i)] = int16(i)
}
for i := s390x.REG_AR0; i <= s390x.REG_AR15; i++ {
register[obj.Rconv(i)] = int16(i)
}
register["LR"] = s390x.REG_LR
// Pseudo-registers.
register["SB"] = RSB
register["FP"] = RFP
register["PC"] = RPC
// Avoid unintentionally clobbering g using R13.
delete(register, "R13")
register["g"] = s390x.REG_R13
registerPrefix := map[string]bool{
"AR": true,
"F": true,
"R": true,
}
instructions := make(map[string]obj.As)
for i, s := range obj.Anames {
instructions[s] = obj.As(i)
}
for i, s := range s390x.Anames {
if obj.As(i) >= obj.A_ARCHSPECIFIC {
instructions[s] = obj.As(i) + obj.ABaseS390X
}
}
// Annoying aliases.
instructions["BR"] = s390x.ABR
instructions["BL"] = s390x.ABL
return &Arch{
LinkArch: &s390x.Links390x,
Instructions: instructions,
Register: register,
RegisterPrefix: registerPrefix,
RegisterNumber: s390xRegisterNumber,
IsJump: jumpS390x,
}
}
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file encapsulates some of the odd characteristics of the
// s390x instruction set, to minimize its interaction
// with the core of the assembler.
package arch
import (
"cmd/internal/obj"
"cmd/internal/obj/s390x"
)
func jumpS390x(word string) bool {
switch word {
case "BC",
"BCL",
"BEQ",
"BGE",
"BGT",
"BL",
"BLE",
"BLT",
"BNE",
"BR",
"BVC",
"BVS",
"CMPBEQ",
"CMPBGE",
"CMPBGT",
"CMPBLE",
"CMPBLT",
"CMPBNE",
"CMPUBEQ",
"CMPUBGE",
"CMPUBGT",
"CMPUBLE",
"CMPUBLT",
"CMPUBNE",
"CALL",
"JMP":
return true
}
return false
}
// IsS390xRLD reports whether the op (as defined by an s390x.A* constant) is
// one of the RLD-like instructions that require special handling.
// The FMADD-like instructions behave similarly.
func IsS390xRLD(op obj.As) bool {
switch op {
case s390x.AFMADD,
s390x.AFMADDS,
s390x.AFMSUB,
s390x.AFMSUBS,
s390x.AFNMADD,
s390x.AFNMADDS,
s390x.AFNMSUB,
s390x.AFNMSUBS:
return true
}
return false
}
// IsS390xCMP reports whether the op (as defined by an s390x.A* constant) is
// one of the CMP instructions that require special handling.
func IsS390xCMP(op obj.As) bool {
switch op {
case s390x.ACMP, s390x.ACMPU, s390x.ACMPW, s390x.ACMPWU:
return true
}
return false
}
// IsS390xNEG reports whether the op (as defined by an s390x.A* constant) is
// one of the NEG-like instructions that require special handling.
func IsS390xNEG(op obj.As) bool {
switch op {
case s390x.AADDME,
s390x.AADDZE,
s390x.ANEG,
s390x.ASUBME,
s390x.ASUBZE:
return true
}
return false
}
// IsS390xWithLength reports whether the op (as defined by an s390x.A* constant)
// refers to an instruction which takes a length as its first argument.
func IsS390xWithLength(op obj.As) bool {
switch op {
case s390x.AMVC, s390x.ACLC, s390x.AXC, s390x.AOC, s390x.ANC:
return true
case s390x.AVLL, s390x.AVSTL:
return true
}
return false
}
// IsS390xWithIndex reports whether the op (as defined by an s390x.A* constant)
// refers to an instruction which takes an index as its first argument.
func IsS390xWithIndex(op obj.As) bool {
switch op {
case s390x.AVSCEG, s390x.AVSCEF, s390x.AVGEG, s390x.AVGEF:
return true
case s390x.AVGMG, s390x.AVGMF, s390x.AVGMH, s390x.AVGMB:
return true
case s390x.AVLEIG, s390x.AVLEIF, s390x.AVLEIH, s390x.AVLEIB:
return true
case s390x.AVPDI:
return true
}
return false
}
func s390xRegisterNumber(name string, n int16) (int16, bool) {
switch name {
case "AR":
if 0 <= n && n <= 15 {
return s390x.REG_AR0 + n, true
}
case "F":
if 0 <= n && n <= 15 {
return s390x.REG_F0 + n, true
}
case "R":
if 0 <= n && n <= 15 {
return s390x.REG_R0 + n, true
}
case "V":
if 0 <= n && n <= 31 {
return s390x.REG_V0 + n, true
}
}
return 0, false
}
...@@ -386,6 +386,20 @@ func (p *Parser) asmJump(op obj.As, cond string, a []obj.Addr) { ...@@ -386,6 +386,20 @@ func (p *Parser) asmJump(op obj.As, cond string, a []obj.Addr) {
prog.Reg = p.getRegister(prog, op, &a[1]) prog.Reg = p.getRegister(prog, op, &a[1])
break break
} }
if p.arch.Thechar == 'z' {
// 3-operand jumps.
target = &a[2]
prog.From = a[0]
if a[1].Reg != 0 {
// Compare two registers and jump.
prog.Reg = p.getRegister(prog, op, &a[1])
} else {
// Compare register with immediate and jump.
prog.From3 = newAddr(a[1])
}
break
}
fallthrough fallthrough
default: default:
p.errorf("wrong number of arguments to %s instruction", obj.Aconv(op)) p.errorf("wrong number of arguments to %s instruction", obj.Aconv(op))
...@@ -598,6 +612,15 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) { ...@@ -598,6 +612,15 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) {
p.errorf("invalid addressing modes for %s instruction", obj.Aconv(op)) p.errorf("invalid addressing modes for %s instruction", obj.Aconv(op))
return return
} }
case 'z':
if arch.IsS390xWithLength(op) || arch.IsS390xWithIndex(op) {
prog.From = a[1]
prog.From3 = newAddr(a[0])
} else {
prog.Reg = p.getRegister(prog, op, &a[1])
prog.From = a[0]
}
prog.To = a[2]
default: default:
p.errorf("TODO: implement three-operand instructions for this architecture") p.errorf("TODO: implement three-operand instructions for this architecture")
return return
...@@ -633,6 +656,13 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) { ...@@ -633,6 +656,13 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) {
prog.To = a[3] prog.To = a[3]
break break
} }
if p.arch.Thechar == 'z' {
prog.From = a[1]
prog.Reg = p.getRegister(prog, op, &a[2])
prog.From3 = newAddr(a[0])
prog.To = a[3]
break
}
p.errorf("can't handle %s instruction with 4 operands", obj.Aconv(op)) p.errorf("can't handle %s instruction with 4 operands", obj.Aconv(op))
return return
case 5: case 5:
......
...@@ -389,3 +389,7 @@ func TestMIPS64EndToEnd(t *testing.T) { ...@@ -389,3 +389,7 @@ func TestMIPS64EndToEnd(t *testing.T) {
func TestPPC64EndToEnd(t *testing.T) { func TestPPC64EndToEnd(t *testing.T) {
testEndToEnd(t, "ppc64", "ppc64") testEndToEnd(t, "ppc64", "ppc64")
} }
func TestS390XEndToEnd(t *testing.T) {
testEndToEnd(t, "s390x", "s390x")
}
...@@ -70,6 +70,11 @@ func TestMIPS64OperandParser(t *testing.T) { ...@@ -70,6 +70,11 @@ func TestMIPS64OperandParser(t *testing.T) {
testOperandParser(t, parser, mips64OperandTests) testOperandParser(t, parser, mips64OperandTests)
} }
func TestS390XOperandParser(t *testing.T) {
parser := newParser("s390x")
testOperandParser(t, parser, s390xOperandTests)
}
type operandTest struct { type operandTest struct {
input, output string input, output string
} }
...@@ -526,3 +531,101 @@ var mips64OperandTests = []operandTest{ ...@@ -526,3 +531,101 @@ var mips64OperandTests = []operandTest{
{"·trunc(SB)", "\"\".trunc(SB)"}, {"·trunc(SB)", "\"\".trunc(SB)"},
{"[):[o-FP", ""}, // Issue 12469 - asm hung parsing the o-FP range on non ARM platforms. {"[):[o-FP", ""}, // Issue 12469 - asm hung parsing the o-FP range on non ARM platforms.
} }
var s390xOperandTests = []operandTest{
{"$((1<<63)-1)", "$9223372036854775807"},
{"$(-64*1024)", "$-65536"},
{"$(1024 * 8)", "$8192"},
{"$-1", "$-1"},
{"$-24(R4)", "$-24(R4)"},
{"$0", "$0"},
{"$0(R1)", "$(R1)"},
{"$0.5", "$(0.5)"},
{"$0x7000", "$28672"},
{"$0x88888eef", "$2290650863"},
{"$1", "$1"},
{"$_main<>(SB)", "$_main<>(SB)"},
{"$argframe(FP)", "$argframe(FP)"},
{"$~3", "$-4"},
{"(-288-3*8)(R1)", "-312(R1)"},
{"(16)(R7)", "16(R7)"},
{"(8)(g)", "8(g)"},
{"(R0)", "(R0)"},
{"(R3)", "(R3)"},
{"(R4)", "(R4)"},
{"(R5)", "(R5)"},
{"-1(R4)", "-1(R4)"},
{"-1(R5)", "-1(R5)"},
{"6(PC)", "6(PC)"},
{"R0", "R0"},
{"R1", "R1"},
{"R2", "R2"},
{"R3", "R3"},
{"R4", "R4"},
{"R5", "R5"},
{"R6", "R6"},
{"R7", "R7"},
{"R8", "R8"},
{"R9", "R9"},
{"R10", "R10"},
{"R11", "R11"},
{"R12", "R12"},
// {"R13", "R13"}, R13 is g
{"R14", "R14"},
{"R15", "R15"},
{"F0", "F0"},
{"F1", "F1"},
{"F2", "F2"},
{"F3", "F3"},
{"F4", "F4"},
{"F5", "F5"},
{"F6", "F6"},
{"F7", "F7"},
{"F8", "F8"},
{"F9", "F9"},
{"F10", "F10"},
{"F11", "F11"},
{"F12", "F12"},
{"F13", "F13"},
{"F14", "F14"},
{"F15", "F15"},
{"V0", "V0"},
{"V1", "V1"},
{"V2", "V2"},
{"V3", "V3"},
{"V4", "V4"},
{"V5", "V5"},
{"V6", "V6"},
{"V7", "V7"},
{"V8", "V8"},
{"V9", "V9"},
{"V10", "V10"},
{"V11", "V11"},
{"V12", "V12"},
{"V13", "V13"},
{"V14", "V14"},
{"V15", "V15"},
{"V16", "V16"},
{"V17", "V17"},
{"V18", "V18"},
{"V19", "V19"},
{"V20", "V20"},
{"V21", "V21"},
{"V22", "V22"},
{"V23", "V23"},
{"V24", "V24"},
{"V25", "V25"},
{"V26", "V26"},
{"V27", "V27"},
{"V28", "V28"},
{"V29", "V29"},
{"V30", "V30"},
{"V31", "V31"},
{"a(FP)", "a(FP)"},
{"g", "g"},
{"ret+8(FP)", "ret+8(FP)"},
{"runtime·abort(SB)", "runtime.abort(SB)"},
{"·AddUint32(SB)", "\"\".AddUint32(SB)"},
{"·trunc(SB)", "\"\".trunc(SB)"},
{"[):[o-FP", ""}, // Issue 12469 - asm hung parsing the o-FP range on non ARM platforms.
}
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
TEXT main·foo(SB),7,$16-0 // TEXT main.foo(SB), 7, $16-0
MOVD R1, R2 // b9040021
MOVW R3, R4 // b9140043
MOVH R5, R6 // b9070065
MOVB R7, R8 // b9060087
MOVWZ R1, R2 // b9160021
MOVHZ R2, R3 // b9850032
MOVBZ R4, R5 // b9840054
MOVDBR R1, R2 // b90f0021
MOVWBR R3, R4 // b91f0043
MOVD (R15), R1 // e310f0000004
MOVW (R15), R2 // e320f0000014
MOVH (R15), R3 // e330f0000015
MOVB (R15), R4 // e340f0000077
MOVWZ (R15), R5 // e350f0000016
MOVHZ (R15), R6 // e360f0000091
MOVBZ (R15), R7 // e370f0000090
MOVDBR (R15), R8 // e380f000000f
MOVWBR (R15), R9 // e390f000001e
MOVD R1, n-8(SP) // e310f0100024
MOVW R2, n-8(SP) // e320f0100050
MOVH R3, n-8(SP) // e330f0100070
MOVB R4, n-8(SP) // e340f0100072
MOVWZ R5, n-8(SP) // e350f0100050
MOVHZ R6, n-8(SP) // e360f0100070
MOVBZ R7, n-8(SP) // e370f0100072
MOVDBR R8, n-8(SP) // e380f010002f
MOVWBR R9, n-8(SP) // e390f010003e
MOVD $-8589934592, R1 // c01efffffffe
MOVW $-131072, R2 // c021fffe0000
MOVH $-512, R3 // a739fe00
MOVB $-1, R4 // a749ffff
MOVD $-2147483648, n-8(SP) // c0b180000000e3b0f0100024
MOVW $-131072, n-8(SP) // c0b1fffe0000e3b0f0100050
MOVH $-512, n-8(SP) // e544f010fe00
MOVB $-1, n-8(SP) // 92fff010
ADD R1, R2 // b9e81022
ADD R1, R2, R3 // b9e81032
ADD $8192, R1 // c21800002000
ADD $8192, R1, R2 // ec21200000d9
ADDC R1, R2 // b9ea1022
ADDC $1, R1, R2 // b9040021c22a00000001
ADDC R1, R2, R3 // b9ea1032
SUB R3, R4 // b9090043
SUB R3, R4, R5 // b9e93054
SUB $8192, R3 // c238ffffe000
SUB $8192, R3, R4 // ec43e00000d9
SUBC R1, R2 // b90b0021
SUBC $1, R1, R2 // b9040021c22affffffff
SUBC R2, R3, R4 // b9eb2043
MULLW R6, R7 // b91c0076
MULLW R6, R7, R8 // b9040087b91c0086
MULLW $8192, R6 // c26000002000
MULLW $8192, R6, R7 // b9040076c27000002000
DIVD R1, R2 // b90400b2b90d00a1b904002b
DIVD R1, R2, R3 // b90400b2b90d00a1b904003b
DIVW R4, R5 // b90400b5b91d00a4b904005b
DIVW R4, R5, R6 // b90400b5b91d00a4b904006b
DIVDU R7, R8 // b90400a0b90400b8b98700a7b904008b
DIVDU R7, R8, R9 // b90400a0b90400b8b98700a7b904009b
DIVWU R1, R2 // b90400a0b90400b2b99700a1b904002b
DIVWU R1, R2, R3 // b90400a0b90400b2b99700a1b904003b
XC $8, (R15), n-8(SP) // XC (R15), $8, n-8(SP) // d707f010f000
NC $8, (R15), n-8(SP) // NC (R15), $8, n-8(SP) // d407f010f000
OC $8, (R15), n-8(SP) // OC (R15), $8, n-8(SP) // d607f010f000
MVC $8, (R15), n-8(SP) // MVC (R15), $8, n-8(SP) // d207f010f000
CLC $8, (R15), n-8(SP) // CLC (R15), $8, n-8(SP) // d507f000f010
XC $256, -8(R15), -8(R15) // XC -8(R15), $256, -8(R15) // b90400afc2a8fffffff8d7ffa000a000
MVC $256, 8192(R1), 8192(R2) // MVC 8192(R1), $256, 8192(R2) // b90400a2c2a800002000b90400b1c2b800002000d2ffa000b000
CMP R1, R2 // b9200012
CMP R3, $-2147483648 // c23c80000000
CMPU R4, R5 // b9210045
CMPU R6, $4294967295 // c26effffffff
CMPW R7, R8 // 1978
CMPW R9, $-2147483648 // c29d80000000
CMPWU R1, R2 // 1512
CMPWU R3, $4294967295 // c23fffffffff
BNE 0(PC) // a7740000
BEQ 0(PC) // a7840000
BLT 0(PC) // a7440000
BLE 0(PC) // a7c40000
BGT 0(PC) // a7240000
BGE 0(PC) // a7a40000
CMPBNE R1, R2, 0(PC) // ec1200007064
CMPBEQ R3, R4, 0(PC) // ec3400008064
CMPBLT R5, R6, 0(PC) // ec5600004064
CMPBLE R7, R8, 0(PC) // ec780000c064
CMPBGT R9, R1, 0(PC) // ec9100002064
CMPBGE R2, R3, 0(PC) // ec230000a064
CMPBNE R1, $-127, 0(PC) // ec170000817c
CMPBEQ R3, $0, 0(PC) // ec380000007c
CMPBLT R5, $128, 0(PC) // ec540000807c
CMPBLE R7, $127, 0(PC) // ec7c00007f7c
CMPBGT R9, $0, 0(PC) // ec920000007c
CMPBGE R2, $128, 0(PC) // ec2a0000807c
CMPUBNE R1, R2, 0(PC) // ec1200007065
CMPUBEQ R3, R4, 0(PC) // ec3400008065
CMPUBLT R5, R6, 0(PC) // ec5600004065
CMPUBLE R7, R8, 0(PC) // ec780000c065
CMPUBGT R9, R1, 0(PC) // ec9100002065
CMPUBGE R2, R3, 0(PC) // ec230000a065
CMPUBNE R1, $256, 0(PC) // ec170000007d
CMPUBEQ R3, $0, 0(PC) // ec380000007d
CMPUBLT R5, $256, 0(PC) // ec540000007d
CMPUBLE R7, $0, 0(PC) // ec7c0000007d
CMPUBGT R9, $256, 0(PC) // ec920000007d
CMPUBGE R2, $0, 0(PC) // ec2a0000007d
CEFBRA R0, F15 // b39400f0
CDFBRA R1, F14 // b39500e1
CEGBRA R2, F13 // b3a400d2
CDGBRA R3, F12 // b3a500c3
CELFBR R0, F15 // b39000f0
CDLFBR R1, F14 // b39100e1
CELGBR R2, F13 // b3a000d2
CDLGBR R3, F12 // b3a100c3
CFEBRA F15, R1 // b398501f
CFDBRA F14, R2 // b399502e
CGEBRA F13, R3 // b3a8503d
CGDBRA F12, R4 // b3a9504c
CLFEBR F15, R1 // b39c501f
CLFDBR F14, R2 // b39d502e
CLGEBR F13, R3 // b3ac503d
CLGDBR F12, R4 // b3ad504c
FMOVS $0, F11 // b37400b0
FMOVD $0, F12 // b37500c0
FMOVS (R1)(R2*1), F0 // ed0210000064
FMOVS n-8(SP), F15 // edf0f0100064
FMOVD -9999999(R8)(R9*1), F8 // c0a1ff67698141aa9000ed8a80000065
FMOVD F4, F5 // 2854
FADDS F0, F15 // b30a00f0
FADD F1, F14 // b31a00e1
FSUBS F2, F13 // b30b00d2
FSUB F3, F12 // b31b00c3
FMULS F4, F11 // b31700b4
FMUL F5, F10 // b31c00a5
FDIVS F6, F9 // b30d0096
FDIV F7, F8 // b31d0087
FABS F1, F2 // b3100021
FSQRTS F3, F4 // b3140043
FSQRT F5, F15 // b31500f5
VL (R15), V1 // e710f0000006
VST V1, (R15) // e710f000000e
VL (R15), V31 // e7f0f0000806
VST V31, (R15) // e7f0f000080e
VESLB $5, V14 // e7ee00050030
VESRAG $0, V15, V16 // e70f0000383a
VLM (R15), V8, V23 // e787f0000436
VSTM V8, V23, (R15) // e787f000043e
VONE V1 // e710ffff0044
VZERO V16 // e70000000844
VGBM $52428, V31 // e7f0cccc0844
VREPIB $255, V4 // e74000ff0045
VREPG $1, V4, V16 // e7040001384d
VREPB $4, V31, V1 // e71f0004044d
VFTCIDB $4095, V1, V2 // e721fff0304a
WFTCIDB $3276, V15, V16 // e70fccc8384a
VPOPCT V8, V19 // e73800000850
VFEEZBS V1, V2, V31 // e7f120300880
WFCHDBS V22, V23, V4 // e746701836eb
VMNH V1, V2, V30 // e7e1200018fe
VO V2, V1, V0 // e7021000006a
VERLLVF V2, V30, V27 // e7be20002c73
VSCBIB V0, V23, V24 // e78700000cf5
VNOT V16, V1 // e7101000046b
VCLZF V16, V17 // e71000002c53
VLVGP R3, R4, V8 // e78340000062
// Some vector instructions have their inputs reordered.
// Typically the reordering puts the length/index input into From3.
VGEG $1, 8(R15)(V30*1), V31 // VGEG 8(R15)(V30*1), $1, V31 // e7fef0081c12
VSCEG $1, V31, 16(R15)(V30*1) // VSCEG V31, $1, 16(R15)(V30*1) // e7fef0101c1a
VGEF $0, 2048(R15)(V1*1), V2 // VGEF 2048(R15)(V1*1), $0, V2 // e721f8000013
VSCEF $0, V2, 4095(R15)(V1*1) // VSCEF V2, $0, 4095(R15)(V1*1) // e721ffff001b
VLL R0, (R15), V1 // VLL (R15), R0, V1 // e710f0000037
VSTL R0, V16, (R15) // VSTL V16, R0, (R15) // e700f000083f
VGMH $8, $16, V12 // VGMH $16, $8, V12 // e7c008101046
VLEIF $2, $-43, V16 // VLEIF $-43, $2, V16 // e700ffd52843
VSLDB $3, V1, V16, V18 // VSLDB V1, V16, $3, V18 // e72100030a77
VERIMB $2, V31, V1, V2 // VERIMB V31, V1, $2, V2 // e72f10020472
VSEL V1, V2, V3, V4 // VSEL V2, V3, V1, V4 // e7412000308d
VGFMAH V21, V31, V24, V0 // VGFMAH V31, V24, V21, V0 // e705f10087bc
WFMSDB V2, V25, V24, V31 // WFMSDB V25, V24, V2, V31 // e7f298038b8e
VPERM V31, V0, V2, V3 // VPERM V0, V2, V31, V3 // e73f0000248c
VPDI $1, V2, V31, V1 // VPDI V2, V31, $1, V1 // e712f0001284
RET
TEXT main·init(SB),7,$0 // TEXT main.init(SB), 7, $0
RET
TEXT main·main(SB),7,$0 // TEXT main.main(SB), 7, $0
BL main·foo(SB) // CALL main.foo(SB)
RET
...@@ -17,7 +17,7 @@ func defframe(ptxt *obj.Prog) { ...@@ -17,7 +17,7 @@ func defframe(ptxt *obj.Prog) {
// fill in argument size, stack size // fill in argument size, stack size
ptxt.To.Type = obj.TYPE_TEXTSIZE ptxt.To.Type = obj.TYPE_TEXTSIZE
ptxt.To.Val = int32(gc.Rnd(gc.Curfn.Type.Argwid, int64(gc.Widthptr))) ptxt.To.Val = int32(gc.Rnd(gc.Curfn.Type.ArgWidth(), int64(gc.Widthptr)))
frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg))) frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
ptxt.To.Offset = int64(frame) ptxt.To.Offset = int64(frame)
...@@ -204,17 +204,17 @@ func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) { ...@@ -204,17 +204,17 @@ func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) {
t0 := t t0 := t
check := false check := false
if gc.Issigned[t.Etype] { if t.IsSigned() {
check = true check = true
if gc.Isconst(nl, gc.CTINT) && nl.Int() != -(1<<uint64(t.Width*8-1)) { if gc.Isconst(nl, gc.CTINT) && nl.Int64() != -(1<<uint64(t.Width*8-1)) {
check = false check = false
} else if gc.Isconst(nr, gc.CTINT) && nr.Int() != -1 { } else if gc.Isconst(nr, gc.CTINT) && nr.Int64() != -1 {
check = false check = false
} }
} }
if t.Width < 4 { if t.Width < 4 {
if gc.Issigned[t.Etype] { if t.IsSigned() {
t = gc.Types[gc.TINT32] t = gc.Types[gc.TINT32]
} else { } else {
t = gc.Types[gc.TUINT32] t = gc.Types[gc.TUINT32]
...@@ -291,7 +291,7 @@ func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) { ...@@ -291,7 +291,7 @@ func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) {
var olddx gc.Node var olddx gc.Node
var dx gc.Node var dx gc.Node
savex(x86.REG_DX, &dx, &olddx, res, t) savex(x86.REG_DX, &dx, &olddx, res, t)
if !gc.Issigned[t.Etype] { if !t.IsSigned() {
gc.Nodconst(&n4, t, 0) gc.Nodconst(&n4, t, 0)
gmove(&n4, &dx) gmove(&n4, &dx)
} else { } else {
...@@ -397,7 +397,7 @@ func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) ...@@ -397,7 +397,7 @@ func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node)
var n1 gc.Node var n1 gc.Node
gc.Regalloc(&n1, nl.Type, res) gc.Regalloc(&n1, nl.Type, res)
gc.Cgen(nl, &n1) gc.Cgen(nl, &n1)
sc := uint64(nr.Int()) sc := uint64(nr.Int64())
if sc >= uint64(nl.Type.Width*8) { if sc >= uint64(nl.Type.Width*8) {
// large shift gets 2 shifts by width-1 // large shift gets 2 shifts by width-1
var n3 gc.Node var n3 gc.Node
...@@ -478,7 +478,7 @@ func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) ...@@ -478,7 +478,7 @@ func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node)
gc.Nodconst(&n3, tcount, nl.Type.Width*8) gc.Nodconst(&n3, tcount, nl.Type.Width*8)
gins(optoas(gc.OCMP, tcount), &n1, &n3) gins(optoas(gc.OCMP, tcount), &n1, &n3)
p1 := gc.Gbranch(optoas(gc.OLT, tcount), nil, +1) p1 := gc.Gbranch(optoas(gc.OLT, tcount), nil, +1)
if op == gc.ORSH && gc.Issigned[nl.Type.Etype] { if op == gc.ORSH && nl.Type.IsSigned() {
gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1) gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
gins(a, &n3, &n2) gins(a, &n3, &n2)
} else { } else {
...@@ -531,7 +531,7 @@ func cgen_bmul(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) bool { ...@@ -531,7 +531,7 @@ func cgen_bmul(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) bool {
// perform full-width multiplication. // perform full-width multiplication.
t := gc.Types[gc.TUINT64] t := gc.Types[gc.TUINT64]
if gc.Issigned[nl.Type.Etype] { if nl.Type.IsSigned() {
t = gc.Types[gc.TINT64] t = gc.Types[gc.TINT64]
} }
var n1 gc.Node var n1 gc.Node
......
...@@ -101,7 +101,7 @@ func ginscon(as obj.As, c int64, n2 *gc.Node) { ...@@ -101,7 +101,7 @@ func ginscon(as obj.As, c int64, n2 *gc.Node) {
} }
func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog { func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && gc.Smallintconst(n1) && n2.Op != gc.OLITERAL { if t.IsInteger() && n1.Op == gc.OLITERAL && gc.Smallintconst(n1) && n2.Op != gc.OLITERAL {
// Reverse comparison to place constant last. // Reverse comparison to place constant last.
op = gc.Brrev(op) op = gc.Brrev(op)
n1, n2 = n2, n1 n1, n2 = n2, n1
...@@ -112,7 +112,7 @@ func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog { ...@@ -112,7 +112,7 @@ func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
// A special case to make write barriers more efficient. // A special case to make write barriers more efficient.
// Comparing the first field of a named struct can be done directly. // Comparing the first field of a named struct can be done directly.
base := n1 base := n1
if n1.Op == gc.ODOT && n1.Left.Type.Etype == gc.TSTRUCT && n1.Left.Type.Field(0).Sym == n1.Sym { if n1.Op == gc.ODOT && n1.Left.Type.IsStruct() && n1.Left.Type.Field(0).Sym == n1.Sym {
base = n1.Left base = n1.Left
} }
...@@ -124,7 +124,7 @@ func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog { ...@@ -124,7 +124,7 @@ func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
gc.Cgen(n1, &g1) gc.Cgen(n1, &g1)
gmove(&g1, &r1) gmove(&g1, &r1)
} }
if n2.Op == gc.OLITERAL && gc.Isint[t.Etype] && gc.Smallintconst(n2) { if n2.Op == gc.OLITERAL && t.IsInteger() && gc.Smallintconst(n2) {
r2 = *n2 r2 = *n2
} else { } else {
gc.Regalloc(&r2, t, n2) gc.Regalloc(&r2, t, n2)
...@@ -213,7 +213,7 @@ func gmove(f *gc.Node, t *gc.Node) { ...@@ -213,7 +213,7 @@ func gmove(f *gc.Node, t *gc.Node) {
// 64-bit immediates are really 32-bit sign-extended // 64-bit immediates are really 32-bit sign-extended
// unless moving into a register. // unless moving into a register.
if gc.Isint[tt] { if gc.Isint[tt] {
if i := con.Int(); int64(int32(i)) != i { if i := con.Int64(); int64(int32(i)) != i {
goto hard goto hard
} }
} }
...@@ -1310,7 +1310,7 @@ func sudoaddable(as obj.As, n *gc.Node, a *obj.Addr) bool { ...@@ -1310,7 +1310,7 @@ func sudoaddable(as obj.As, n *gc.Node, a *obj.Addr) bool {
if !gc.Isconst(n, gc.CTINT) { if !gc.Isconst(n, gc.CTINT) {
break break
} }
v := n.Int() v := n.Int64()
if v >= 32000 || v <= -32000 { if v >= 32000 || v <= -32000 {
break break
} }
......
This diff is collapsed.
...@@ -237,7 +237,7 @@ func cgen64(n *gc.Node, res *gc.Node) { ...@@ -237,7 +237,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
// shld hi:lo, c // shld hi:lo, c
// shld lo:t, c // shld lo:t, c
case gc.OLROT: case gc.OLROT:
v := uint64(r.Int()) v := uint64(r.Int64())
var bl gc.Node var bl gc.Node
gc.Regalloc(&bl, lo1.Type, nil) gc.Regalloc(&bl, lo1.Type, nil)
...@@ -291,7 +291,7 @@ func cgen64(n *gc.Node, res *gc.Node) { ...@@ -291,7 +291,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
var p4 *obj.Prog var p4 *obj.Prog
var p5 *obj.Prog var p5 *obj.Prog
if r.Op == gc.OLITERAL { if r.Op == gc.OLITERAL {
v := uint64(r.Int()) v := uint64(r.Int64())
if v >= 64 { if v >= 64 {
// TODO(kaib): replace with gins(AMOVW, nodintconst(0), &al) // TODO(kaib): replace with gins(AMOVW, nodintconst(0), &al)
// here and below (verify it optimizes to EOR) // here and below (verify it optimizes to EOR)
...@@ -452,7 +452,7 @@ func cgen64(n *gc.Node, res *gc.Node) { ...@@ -452,7 +452,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
var creg gc.Node var creg gc.Node
var p3 *obj.Prog var p3 *obj.Prog
if r.Op == gc.OLITERAL { if r.Op == gc.OLITERAL {
v := uint64(r.Int()) v := uint64(r.Int64())
if v >= 64 { if v >= 64 {
if bh.Type.Etype == gc.TINT32 { if bh.Type.Etype == gc.TINT32 {
// MOVW bh->31, al // MOVW bh->31, al
......
...@@ -14,7 +14,7 @@ func defframe(ptxt *obj.Prog) { ...@@ -14,7 +14,7 @@ func defframe(ptxt *obj.Prog) {
// fill in argument size, stack size // fill in argument size, stack size
ptxt.To.Type = obj.TYPE_TEXTSIZE ptxt.To.Type = obj.TYPE_TEXTSIZE
ptxt.To.Val = int32(gc.Rnd(gc.Curfn.Type.Argwid, int64(gc.Widthptr))) ptxt.To.Val = int32(gc.Rnd(gc.Curfn.Type.ArgWidth(), int64(gc.Widthptr)))
frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg))) frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
ptxt.To.Offset = int64(frame) ptxt.To.Offset = int64(frame)
...@@ -143,7 +143,7 @@ func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) { ...@@ -143,7 +143,7 @@ func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
case gc.TINT32, case gc.TINT32,
gc.TUINT32: gc.TUINT32:
var p *obj.Prog var p *obj.Prog
if gc.Issigned[t.Etype] { if t.IsSigned() {
p = gins(arm.AMULL, &n2, nil) p = gins(arm.AMULL, &n2, nil)
} else { } else {
p = gins(arm.AMULLU, &n2, nil) p = gins(arm.AMULLU, &n2, nil)
...@@ -178,7 +178,7 @@ func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) ...@@ -178,7 +178,7 @@ func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node)
w := int(nl.Type.Width * 8) w := int(nl.Type.Width * 8)
if op == gc.OLROT { if op == gc.OLROT {
v := nr.Int() v := nr.Int64()
var n1 gc.Node var n1 gc.Node
gc.Regalloc(&n1, nl.Type, res) gc.Regalloc(&n1, nl.Type, res)
if w == 32 { if w == 32 {
...@@ -205,17 +205,17 @@ func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) ...@@ -205,17 +205,17 @@ func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node)
var n1 gc.Node var n1 gc.Node
gc.Regalloc(&n1, nl.Type, res) gc.Regalloc(&n1, nl.Type, res)
gc.Cgen(nl, &n1) gc.Cgen(nl, &n1)
sc := uint64(nr.Int()) sc := uint64(nr.Int64())
if sc == 0 { if sc == 0 {
} else // nothing to do } else // nothing to do
if sc >= uint64(nl.Type.Width*8) { if sc >= uint64(nl.Type.Width*8) {
if op == gc.ORSH && gc.Issigned[nl.Type.Etype] { if op == gc.ORSH && nl.Type.IsSigned() {
gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(w), &n1) gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(w), &n1)
} else { } else {
gins(arm.AEOR, &n1, &n1) gins(arm.AEOR, &n1, &n1)
} }
} else { } else {
if op == gc.ORSH && gc.Issigned[nl.Type.Etype] { if op == gc.ORSH && nl.Type.IsSigned() {
gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(sc), &n1) gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(sc), &n1)
} else if op == gc.ORSH { } else if op == gc.ORSH {
gshift(arm.AMOVW, &n1, arm.SHIFT_LR, int32(sc), &n1) // OLSH gshift(arm.AMOVW, &n1, arm.SHIFT_LR, int32(sc), &n1) // OLSH
...@@ -294,7 +294,7 @@ func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) ...@@ -294,7 +294,7 @@ func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node)
if op == gc.ORSH { if op == gc.ORSH {
var p1 *obj.Prog var p1 *obj.Prog
var p2 *obj.Prog var p2 *obj.Prog
if gc.Issigned[nl.Type.Etype] { if nl.Type.IsSigned() {
p1 = gshift(arm.AMOVW, &n2, arm.SHIFT_AR, int32(w)-1, &n2) p1 = gshift(arm.AMOVW, &n2, arm.SHIFT_AR, int32(w)-1, &n2)
p2 = gregshift(arm.AMOVW, &n2, arm.SHIFT_AR, &n1, &n2) p2 = gregshift(arm.AMOVW, &n2, arm.SHIFT_AR, &n1, &n2)
} else { } else {
...@@ -475,7 +475,7 @@ func ginscon(as obj.As, c int64, n *gc.Node) { ...@@ -475,7 +475,7 @@ func ginscon(as obj.As, c int64, n *gc.Node) {
} }
func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog { func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && n1.Int() == 0 && n2.Op != gc.OLITERAL { if t.IsInteger() && n1.Op == gc.OLITERAL && n1.Int64() == 0 && n2.Op != gc.OLITERAL {
op = gc.Brrev(op) op = gc.Brrev(op)
n1, n2 = n2, n1 n1, n2 = n2, n1
} }
...@@ -484,7 +484,7 @@ func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog { ...@@ -484,7 +484,7 @@ func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
gc.Regalloc(&g1, n1.Type, &r1) gc.Regalloc(&g1, n1.Type, &r1)
gc.Cgen(n1, &g1) gc.Cgen(n1, &g1)
gmove(&g1, &r1) gmove(&g1, &r1)
if gc.Isint[t.Etype] && n2.Op == gc.OLITERAL && n2.Int() == 0 { if t.IsInteger() && n2.Op == gc.OLITERAL && n2.Int64() == 0 {
gins(arm.ACMP, &r1, n2) gins(arm.ACMP, &r1, n2)
} else { } else {
gc.Regalloc(&r2, t, n2) gc.Regalloc(&r2, t, n2)
......
...@@ -112,7 +112,7 @@ func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) { ...@@ -112,7 +112,7 @@ func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) {
case gc.OLITERAL: case gc.OLITERAL:
var n1 gc.Node var n1 gc.Node
n.Convconst(&n1, n.Type) n.Convconst(&n1, n.Type)
i := n1.Int() i := n1.Int64()
gc.Nodconst(lo, gc.Types[gc.TUINT32], int64(uint32(i))) gc.Nodconst(lo, gc.Types[gc.TUINT32], int64(uint32(i)))
i >>= 32 i >>= 32
if n.Type.Etype == gc.TINT64 { if n.Type.Etype == gc.TINT64 {
...@@ -1143,7 +1143,7 @@ func sudoaddable(as obj.As, n *gc.Node, a *obj.Addr) bool { ...@@ -1143,7 +1143,7 @@ func sudoaddable(as obj.As, n *gc.Node, a *obj.Addr) bool {
if !gc.Isconst(n, gc.CTINT) { if !gc.Isconst(n, gc.CTINT) {
break break
} }
v := n.Int() v := n.Int64()
if v >= 32000 || v <= -32000 { if v >= 32000 || v <= -32000 {
break break
} }
......
...@@ -85,7 +85,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -85,7 +85,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpARMMOVWconst: case ssa.OpARMMOVWconst:
p := gc.Prog(v.Op.Asm()) p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt2Int64() p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = gc.SSARegNum(v) p.To.Reg = gc.SSARegNum(v)
case ssa.OpARMCMP: case ssa.OpARMCMP:
......
...@@ -15,7 +15,7 @@ func defframe(ptxt *obj.Prog) { ...@@ -15,7 +15,7 @@ func defframe(ptxt *obj.Prog) {
// fill in argument size, stack size // fill in argument size, stack size
ptxt.To.Type = obj.TYPE_TEXTSIZE ptxt.To.Type = obj.TYPE_TEXTSIZE
ptxt.To.Val = int32(gc.Rnd(gc.Curfn.Type.Argwid, int64(gc.Widthptr))) ptxt.To.Val = int32(gc.Rnd(gc.Curfn.Type.ArgWidth(), int64(gc.Widthptr)))
frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg))) frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
// arm64 requires that the frame size (not counting saved LR) // arm64 requires that the frame size (not counting saved LR)
...@@ -149,17 +149,17 @@ func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) { ...@@ -149,17 +149,17 @@ func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) {
t0 := t t0 := t
check := false check := false
if gc.Issigned[t.Etype] { if t.IsSigned() {
check = true check = true
if gc.Isconst(nl, gc.CTINT) && nl.Int() != -(1<<uint64(t.Width*8-1)) { if gc.Isconst(nl, gc.CTINT) && nl.Int64() != -(1<<uint64(t.Width*8-1)) {
check = false check = false
} else if gc.Isconst(nr, gc.CTINT) && nr.Int() != -1 { } else if gc.Isconst(nr, gc.CTINT) && nr.Int64() != -1 {
check = false check = false
} }
} }
if t.Width < 8 { if t.Width < 8 {
if gc.Issigned[t.Etype] { if t.IsSigned() {
t = gc.Types[gc.TINT64] t = gc.Types[gc.TINT64]
} else { } else {
t = gc.Types[gc.TUINT64] t = gc.Types[gc.TUINT64]
...@@ -287,7 +287,7 @@ func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) { ...@@ -287,7 +287,7 @@ func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
case gc.TINT64, case gc.TINT64,
gc.TUINT64: gc.TUINT64:
if gc.Issigned[t.Etype] { if t.IsSigned() {
gins(arm64.ASMULH, &n2, &n1) gins(arm64.ASMULH, &n2, &n1)
} else { } else {
gins(arm64.AUMULH, &n2, &n1) gins(arm64.AUMULH, &n2, &n1)
...@@ -314,7 +314,7 @@ func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) ...@@ -314,7 +314,7 @@ func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node)
var n1 gc.Node var n1 gc.Node
gc.Regalloc(&n1, nl.Type, res) gc.Regalloc(&n1, nl.Type, res)
gc.Cgen(nl, &n1) gc.Cgen(nl, &n1)
sc := uint64(nr.Int()) sc := uint64(nr.Int64())
if sc >= uint64(nl.Type.Width)*8 { if sc >= uint64(nl.Type.Width)*8 {
// large shift gets 2 shifts by width-1 // large shift gets 2 shifts by width-1
var n3 gc.Node var n3 gc.Node
...@@ -378,7 +378,7 @@ func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) ...@@ -378,7 +378,7 @@ func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node)
gc.Nodconst(&n3, tcount, nl.Type.Width*8) gc.Nodconst(&n3, tcount, nl.Type.Width*8)
gcmp(optoas(gc.OCMP, tcount), &n1, &n3) gcmp(optoas(gc.OCMP, tcount), &n1, &n3)
p1 := gc.Gbranch(optoas(gc.OLT, tcount), nil, +1) p1 := gc.Gbranch(optoas(gc.OLT, tcount), nil, +1)
if op == gc.ORSH && gc.Issigned[nl.Type.Etype] { if op == gc.ORSH && nl.Type.IsSigned() {
gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1) gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
gins(a, &n3, &n2) gins(a, &n3, &n2)
} else { } else {
......
...@@ -103,7 +103,7 @@ func ginscon2(as obj.As, n2 *gc.Node, c int64) { ...@@ -103,7 +103,7 @@ func ginscon2(as obj.As, n2 *gc.Node, c int64) {
} }
func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog { func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && n2.Op != gc.OLITERAL { if t.IsInteger() && n1.Op == gc.OLITERAL && n2.Op != gc.OLITERAL {
// Reverse comparison to place constant last. // Reverse comparison to place constant last.
op = gc.Brrev(op) op = gc.Brrev(op)
n1, n2 = n2, n1 n1, n2 = n2, n1
...@@ -114,8 +114,8 @@ func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog { ...@@ -114,8 +114,8 @@ func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
gc.Regalloc(&g1, n1.Type, &r1) gc.Regalloc(&g1, n1.Type, &r1)
gc.Cgen(n1, &g1) gc.Cgen(n1, &g1)
gmove(&g1, &r1) gmove(&g1, &r1)
if gc.Isint[t.Etype] && gc.Isconst(n2, gc.CTINT) { if t.IsInteger() && gc.Isconst(n2, gc.CTINT) {
ginscon2(optoas(gc.OCMP, t), &r1, n2.Int()) ginscon2(optoas(gc.OCMP, t), &r1, n2.Int64())
} else { } else {
gc.Regalloc(&r2, t, n2) gc.Regalloc(&r2, t, n2)
gc.Regalloc(&g2, n1.Type, &r2) gc.Regalloc(&g2, n1.Type, &r2)
......
...@@ -6,9 +6,13 @@ package gc ...@@ -6,9 +6,13 @@ package gc
import "fmt" import "fmt"
// AlgKind describes the kind of algorithms used for comparing and
// hashing a Type.
type AlgKind int
const ( const (
// These values are known by runtime. // These values are known by runtime.
ANOEQ = iota ANOEQ AlgKind = iota
AMEM0 AMEM0
AMEM8 AMEM8
AMEM16 AMEM16
...@@ -22,11 +26,40 @@ const ( ...@@ -22,11 +26,40 @@ const (
AFLOAT64 AFLOAT64
ACPLX64 ACPLX64
ACPLX128 ACPLX128
AMEM = 100
// Type can be compared/hashed as regular memory.
AMEM AlgKind = 100
// Type needs special comparison/hashing functions.
ASPECIAL AlgKind = -1
) )
func algtype(t *Type) int { // IsComparable reports whether t is a comparable type.
a := algtype1(t, nil) func (t *Type) IsComparable() bool {
a, _ := algtype1(t)
return a != ANOEQ
}
// IsRegularMemory reports whether t can be compared/hashed as regular memory.
func (t *Type) IsRegularMemory() bool {
a, _ := algtype1(t)
return a == AMEM
}
// IncomparableField returns an incomparable Field of struct Type t, if any.
func (t *Type) IncomparableField() *Field {
for _, f := range t.FieldSlice() {
if !f.Type.IsComparable() {
return f
}
}
return nil
}
// algtype is like algtype1, except it returns the fixed-width AMEMxx variants
// instead of the general AMEM kind when possible.
func algtype(t *Type) AlgKind {
a, _ := algtype1(t)
if a == AMEM { if a == AMEM {
switch t.Width { switch t.Width {
case 0: case 0:
...@@ -47,115 +80,105 @@ func algtype(t *Type) int { ...@@ -47,115 +80,105 @@ func algtype(t *Type) int {
return a return a
} }
func algtype1(t *Type, bad **Type) int { // algtype1 returns the AlgKind used for comparing and hashing Type t.
if bad != nil { // If it returns ANOEQ, it also returns the component type of t that
*bad = nil // makes it incomparable.
} func algtype1(t *Type) (AlgKind, *Type) {
if t.Broke { if t.Broke {
return AMEM return AMEM, nil
} }
if t.Noalg { if t.Noalg {
return ANOEQ return ANOEQ, t
} }
switch t.Etype { switch t.Etype {
case TANY, TFORW: case TANY, TFORW:
// will be defined later. // will be defined later.
*bad = t return ANOEQ, t
return -1
case TINT8, TUINT8, TINT16, TUINT16, case TINT8, TUINT8, TINT16, TUINT16,
TINT32, TUINT32, TINT64, TUINT64, TINT32, TUINT32, TINT64, TUINT64,
TINT, TUINT, TUINTPTR, TINT, TUINT, TUINTPTR,
TBOOL, TPTR32, TPTR64, TBOOL, TPTR32, TPTR64,
TCHAN, TUNSAFEPTR: TCHAN, TUNSAFEPTR:
return AMEM return AMEM, nil
case TFUNC, TMAP: case TFUNC, TMAP:
if bad != nil { return ANOEQ, t
*bad = t
}
return ANOEQ
case TFLOAT32: case TFLOAT32:
return AFLOAT32 return AFLOAT32, nil
case TFLOAT64: case TFLOAT64:
return AFLOAT64 return AFLOAT64, nil
case TCOMPLEX64: case TCOMPLEX64:
return ACPLX64 return ACPLX64, nil
case TCOMPLEX128: case TCOMPLEX128:
return ACPLX128 return ACPLX128, nil
case TSTRING: case TSTRING:
return ASTRING return ASTRING, nil
case TINTER: case TINTER:
if isnilinter(t) { if t.IsEmptyInterface() {
return ANILINTER return ANILINTER, nil
} }
return AINTER return AINTER, nil
case TARRAY: case TARRAY:
if Isslice(t) { if t.IsSlice() {
if bad != nil { return ANOEQ, t
*bad = t
}
return ANOEQ
} }
a := algtype1(t.Type, bad) a, bad := algtype1(t.Elem())
switch a { switch a {
case AMEM: case AMEM:
return AMEM return AMEM, nil
case ANOEQ: case ANOEQ:
if bad != nil { return ANOEQ, bad
*bad = t
}
return ANOEQ
} }
switch t.Bound { switch t.NumElem() {
case 0: case 0:
// We checked above that the element type is comparable. // We checked above that the element type is comparable.
return AMEM return AMEM, nil
case 1: case 1:
// Single-element array is same as its lone element. // Single-element array is same as its lone element.
return a return a, nil
} }
return -1 // needs special compare return ASPECIAL, nil
case TSTRUCT: case TSTRUCT:
fields := t.FieldSlice() fields := t.FieldSlice()
// One-field struct is same as that one field alone. // One-field struct is same as that one field alone.
if len(fields) == 1 && !isblanksym(fields[0].Sym) { if len(fields) == 1 && !isblanksym(fields[0].Sym) {
return algtype1(fields[0].Type, bad) return algtype1(fields[0].Type)
} }
ret := AMEM ret := AMEM
for i, f := range fields { for i, f := range fields {
// All fields must be comparable. // All fields must be comparable.
a := algtype1(f.Type, bad) a, bad := algtype1(f.Type)
if a == ANOEQ { if a == ANOEQ {
return ANOEQ return ANOEQ, bad
} }
// Blank fields, padded fields, fields with non-memory // Blank fields, padded fields, fields with non-memory
// equality need special compare. // equality need special compare.
if a != AMEM || isblanksym(f.Sym) || ispaddedfield(t, fields, i) { if a != AMEM || isblanksym(f.Sym) || ispaddedfield(t, i) {
ret = -1 ret = ASPECIAL
} }
} }
return ret return ret, nil
} }
Fatalf("algtype1: unexpected type %v", t) Fatalf("algtype1: unexpected type %v", t)
return 0 return 0, nil
} }
// Generate a helper function to compute the hash of a value of type t. // Generate a helper function to compute the hash of a value of type t.
...@@ -196,14 +219,14 @@ func genhash(sym *Sym, t *Type) { ...@@ -196,14 +219,14 @@ func genhash(sym *Sym, t *Type) {
Fatalf("genhash %v", t) Fatalf("genhash %v", t)
case TARRAY: case TARRAY:
if Isslice(t) { if t.IsSlice() {
Fatalf("genhash %v", t) Fatalf("genhash %v", t)
} }
// An array of pure memory would be handled by the // An array of pure memory would be handled by the
// standard algorithm, so the element type must not be // standard algorithm, so the element type must not be
// pure memory. // pure memory.
hashel := hashfor(t.Type) hashel := hashfor(t.Elem())
n := Nod(ORANGE, nil, Nod(OIND, np, nil)) n := Nod(ORANGE, nil, Nod(OIND, np, nil))
ni := newname(Lookup("i")) ni := newname(Lookup("i"))
...@@ -239,7 +262,7 @@ func genhash(sym *Sym, t *Type) { ...@@ -239,7 +262,7 @@ func genhash(sym *Sym, t *Type) {
} }
// Hash non-memory fields with appropriate hash function. // Hash non-memory fields with appropriate hash function.
if algtype1(f.Type, nil) != AMEM { if !f.Type.IsRegularMemory() {
hashel := hashfor(f.Type) hashel := hashfor(f.Type)
call := Nod(OCALL, hashel, nil) call := Nod(OCALL, hashel, nil)
nx := NodSym(OXDOT, np, f.Sym) // TODO: fields from other packages? nx := NodSym(OXDOT, np, f.Sym) // TODO: fields from other packages?
...@@ -253,7 +276,7 @@ func genhash(sym *Sym, t *Type) { ...@@ -253,7 +276,7 @@ func genhash(sym *Sym, t *Type) {
} }
// Otherwise, hash a maximal length run of raw memory. // Otherwise, hash a maximal length run of raw memory.
size, next := memrun(t, fields, i) size, next := memrun(t, i)
// h = hashel(&p.first, size, h) // h = hashel(&p.first, size, h)
hashel := hashmem(f.Type) hashel := hashmem(f.Type)
...@@ -304,7 +327,7 @@ func genhash(sym *Sym, t *Type) { ...@@ -304,7 +327,7 @@ func genhash(sym *Sym, t *Type) {
func hashfor(t *Type) *Node { func hashfor(t *Type) *Node {
var sym *Sym var sym *Sym
switch algtype1(t, nil) { switch a, _ := algtype1(t); a {
case AMEM: case AMEM:
Fatalf("hashfor with AMEM type") Fatalf("hashfor with AMEM type")
case AINTER: case AINTER:
...@@ -375,7 +398,7 @@ func geneq(sym *Sym, t *Type) { ...@@ -375,7 +398,7 @@ func geneq(sym *Sym, t *Type) {
Fatalf("geneq %v", t) Fatalf("geneq %v", t)
case TARRAY: case TARRAY:
if Isslice(t) { if t.IsSlice() {
Fatalf("geneq %v", t) Fatalf("geneq %v", t)
} }
...@@ -435,14 +458,14 @@ func geneq(sym *Sym, t *Type) { ...@@ -435,14 +458,14 @@ func geneq(sym *Sym, t *Type) {
} }
// Compare non-memory fields with field equality. // Compare non-memory fields with field equality.
if algtype1(f.Type, nil) != AMEM { if !f.Type.IsRegularMemory() {
and(eqfield(np, nq, f.Sym)) and(eqfield(np, nq, f.Sym))
i++ i++
continue continue
} }
// Find maximal length run of memory-only fields. // Find maximal length run of memory-only fields.
size, next := memrun(t, fields, i) size, next := memrun(t, i)
// TODO(rsc): All the calls to newname are wrong for // TODO(rsc): All the calls to newname are wrong for
// cross-package unexported fields. // cross-package unexported fields.
...@@ -519,7 +542,7 @@ func eqmem(p *Node, q *Node, field *Sym, size int64) *Node { ...@@ -519,7 +542,7 @@ func eqmem(p *Node, q *Node, field *Sym, size int64) *Node {
nx = typecheck(nx, Erv) nx = typecheck(nx, Erv)
ny = typecheck(ny, Erv) ny = typecheck(ny, Erv)
fn, needsize := eqmemfunc(size, nx.Type.Type) fn, needsize := eqmemfunc(size, nx.Type.Elem())
call := Nod(OCALL, fn, nil) call := Nod(OCALL, fn, nil)
call.List.Append(nx) call.List.Append(nx)
call.List.Append(ny) call.List.Append(ny)
...@@ -546,40 +569,36 @@ func eqmemfunc(size int64, t *Type) (fn *Node, needsize bool) { ...@@ -546,40 +569,36 @@ func eqmemfunc(size int64, t *Type) (fn *Node, needsize bool) {
// memrun finds runs of struct fields for which memory-only algs are appropriate. // memrun finds runs of struct fields for which memory-only algs are appropriate.
// t is the parent struct type, and start is the field index at which to start the run. // t is the parent struct type, and start is the field index at which to start the run.
// The caller is responsible for providing t.FieldSlice() as fields.
// size is the length in bytes of the memory included in the run. // size is the length in bytes of the memory included in the run.
// next is the index just after the end of the memory run. // next is the index just after the end of the memory run.
// TODO(mdempsky): Eliminate fields parameter once struct fields are kept in slices. func memrun(t *Type, start int) (size int64, next int) {
func memrun(t *Type, fields []*Field, start int) (size int64, next int) {
next = start next = start
for { for {
next++ next++
if next == len(fields) { if next == t.NumFields() {
break break
} }
// Stop run after a padded field. // Stop run after a padded field.
if ispaddedfield(t, fields, next-1) { if ispaddedfield(t, next-1) {
break break
} }
// Also, stop before a blank or non-memory field. // Also, stop before a blank or non-memory field.
if isblanksym(fields[next].Sym) || algtype1(fields[next].Type, nil) != AMEM { if f := t.Field(next); isblanksym(f.Sym) || !f.Type.IsRegularMemory() {
break break
} }
} }
end := fields[next-1].Width + fields[next-1].Type.Width return t.Field(next-1).End() - t.Field(start).Offset, next
return end - fields[start].Width, next
} }
// ispaddedfield reports whether the i'th field of struct type t is followed // ispaddedfield reports whether the i'th field of struct type t is followed
// by padding. The caller is responsible for providing t.FieldSlice() as fields. // by padding.
// TODO(mdempsky): Eliminate fields parameter once struct fields are kept in slices. func ispaddedfield(t *Type, i int) bool {
func ispaddedfield(t *Type, fields []*Field, i int) bool { if !t.IsStruct() {
if t.Etype != TSTRUCT {
Fatalf("ispaddedfield called non-struct %v", t) Fatalf("ispaddedfield called non-struct %v", t)
} }
end := t.Width end := t.Width
if i+1 < len(fields) { if i+1 < t.NumFields() {
end = fields[i+1].Width end = t.Field(i + 1).Offset
} }
return fields[i].Width+fields[i].Type.Width != end return t.Field(i).End() != end
} }
...@@ -18,7 +18,7 @@ func Rnd(o int64, r int64) int64 { ...@@ -18,7 +18,7 @@ func Rnd(o int64, r int64) int64 {
func offmod(t *Type) { func offmod(t *Type) {
o := int32(0) o := int32(0)
for _, f := range t.Fields().Slice() { for _, f := range t.Fields().Slice() {
f.Width = int64(o) f.Offset = int64(o)
o += int32(Widthptr) o += int32(Widthptr)
if int64(o) >= Thearch.MAXWIDTH { if int64(o) >= Thearch.MAXWIDTH {
Yyerror("interface too large") Yyerror("interface too large")
...@@ -53,7 +53,7 @@ func widstruct(errtype *Type, t *Type, o int64, flag int) int64 { ...@@ -53,7 +53,7 @@ func widstruct(errtype *Type, t *Type, o int64, flag int) int64 {
if f.Type.Align > 0 { if f.Type.Align > 0 {
o = Rnd(o, int64(f.Type.Align)) o = Rnd(o, int64(f.Type.Align))
} }
f.Width = o // really offset for TFIELD f.Offset = o
if f.Nname != nil { if f.Nname != nil {
// this same stackparam logic is in addrescapes // this same stackparam logic is in addrescapes
// in typecheck.go. usually addrescapes runs after // in typecheck.go. usually addrescapes runs after
...@@ -176,11 +176,11 @@ func dowidth(t *Type) { ...@@ -176,11 +176,11 @@ func dowidth(t *Type) {
case TPTR32: case TPTR32:
w = 4 w = 4
checkwidth(t.Type) checkwidth(t.Elem())
case TPTR64: case TPTR64:
w = 8 w = 8
checkwidth(t.Type) checkwidth(t.Elem())
case TUNSAFEPTR: case TUNSAFEPTR:
w = int64(Widthptr) w = int64(Widthptr)
...@@ -194,27 +194,24 @@ func dowidth(t *Type) { ...@@ -194,27 +194,24 @@ func dowidth(t *Type) {
case TCHAN: // implemented as pointer case TCHAN: // implemented as pointer
w = int64(Widthptr) w = int64(Widthptr)
checkwidth(t.Type) checkwidth(t.Elem())
// make fake type to check later to // make fake type to check later to
// trigger channel argument check. // trigger channel argument check.
t1 := typ(TCHANARGS) t1 := typWrapper(TCHANARGS, t)
t1.Type = t
checkwidth(t1) checkwidth(t1)
case TCHANARGS: case TCHANARGS:
t1 := t.Type t1 := t.Wrapped()
dowidth(t.Type) // just in case dowidth(t1) // just in case
if t1.Type.Width >= 1<<16 { if t1.Elem().Width >= 1<<16 {
Yyerror("channel element type too large (>64kB)") Yyerror("channel element type too large (>64kB)")
} }
t.Width = 1 t.Width = 1
case TMAP: // implemented as pointer case TMAP: // implemented as pointer
w = int64(Widthptr) w = int64(Widthptr)
checkwidth(t.Val())
checkwidth(t.Type)
checkwidth(t.Key()) checkwidth(t.Key())
case TFORW: // should have been filled in case TFORW: // should have been filled in
...@@ -238,25 +235,25 @@ func dowidth(t *Type) { ...@@ -238,25 +235,25 @@ func dowidth(t *Type) {
t.Align = uint8(Widthptr) t.Align = uint8(Widthptr)
case TARRAY: case TARRAY:
if t.Type == nil { if t.Elem() == nil {
break break
} }
if t.Bound >= 0 { if t.IsArray() {
dowidth(t.Type) dowidth(t.Elem())
if t.Type.Width != 0 { if t.Elem().Width != 0 {
cap := (uint64(Thearch.MAXWIDTH) - 1) / uint64(t.Type.Width) cap := (uint64(Thearch.MAXWIDTH) - 1) / uint64(t.Elem().Width)
if uint64(t.Bound) > cap { if uint64(t.NumElem()) > cap {
Yyerror("type %v larger than address space", Tconv(t, FmtLong)) Yyerror("type %v larger than address space", Tconv(t, FmtLong))
} }
} }
w = t.Bound * t.Type.Width w = t.NumElem() * t.Elem().Width
t.Align = t.Type.Align t.Align = t.Elem().Align
} else if t.Bound == -1 { } else if t.IsSlice() {
w = int64(sizeof_Array) w = int64(sizeof_Array)
checkwidth(t.Type) checkwidth(t.Elem())
t.Align = uint8(Widthptr) t.Align = uint8(Widthptr)
} else if t.Bound == -100 { } else if t.isDDDArray() {
if !t.Broke { if !t.Broke {
Yyerror("use of [...] array outside of array literal") Yyerror("use of [...] array outside of array literal")
t.Broke = true t.Broke = true
...@@ -274,22 +271,17 @@ func dowidth(t *Type) { ...@@ -274,22 +271,17 @@ func dowidth(t *Type) {
// make fake type to check later to // make fake type to check later to
// trigger function argument computation. // trigger function argument computation.
case TFUNC: case TFUNC:
t1 := typ(TFUNCARGS) t1 := typWrapper(TFUNCARGS, t)
t1.Type = t
checkwidth(t1) checkwidth(t1)
w = int64(Widthptr) // width of func type is pointer
// width of func type is pointer
w = int64(Widthptr)
// function is 3 cated structures; // function is 3 cated structures;
// compute their widths as side-effect. // compute their widths as side-effect.
case TFUNCARGS: case TFUNCARGS:
t1 := t.Type t1 := t.Wrapped()
w = widstruct(t1, t1.Recvs(), 0, 0)
w = widstruct(t.Type, t1.Recvs(), 0, 0) w = widstruct(t1, t1.Params(), w, Widthreg)
w = widstruct(t.Type, t1.Params(), w, Widthreg) w = widstruct(t1, t1.Results(), w, Widthreg)
w = widstruct(t.Type, t1.Results(), w, Widthreg)
t1.Argwid = w t1.Argwid = w
if w%int64(Widthreg) != 0 { if w%int64(Widthreg) != 0 {
Warn("bad type %v %d\n", t1, w) Warn("bad type %v %d\n", t1, w)
...@@ -388,7 +380,7 @@ func Argsize(t *Type) int { ...@@ -388,7 +380,7 @@ func Argsize(t *Type) int {
for _, p := range recvsParamsResults { for _, p := range recvsParamsResults {
for _, f := range p(t).Fields().Slice() { for _, f := range p(t).Fields().Slice() {
if x := f.Width + f.Type.Width; x > w { if x := f.End(); x > w {
w = x w = x
} }
} }
......
This diff is collapsed.
This diff is collapsed.
...@@ -46,11 +46,10 @@ const runtimeimport = "" + ...@@ -46,11 +46,10 @@ const runtimeimport = "" +
"func @\"\".stringiter2 (? string, ? int) (@\"\".retk·1 int, @\"\".retv·2 rune)\n" + "func @\"\".stringiter2 (? string, ? int) (@\"\".retk·1 int, @\"\".retv·2 rune)\n" +
"func @\"\".slicecopy (@\"\".to·2 any, @\"\".fr·3 any, @\"\".wid·4 uintptr \"unsafe-uintptr\") (? int)\n" + "func @\"\".slicecopy (@\"\".to·2 any, @\"\".fr·3 any, @\"\".wid·4 uintptr \"unsafe-uintptr\") (? int)\n" +
"func @\"\".slicestringcopy (@\"\".to·2 any, @\"\".fr·3 any) (? int)\n" + "func @\"\".slicestringcopy (@\"\".to·2 any, @\"\".fr·3 any) (? int)\n" +
"func @\"\".typ2Itab (@\"\".typ·2 *byte, @\"\".typ2·3 *byte, @\"\".cache·4 **byte) (@\"\".ret·1 *byte)\n" +
"func @\"\".convI2E (@\"\".elem·2 any) (@\"\".ret·1 any)\n" + "func @\"\".convI2E (@\"\".elem·2 any) (@\"\".ret·1 any)\n" +
"func @\"\".convI2I (@\"\".typ·2 *byte, @\"\".elem·3 any) (@\"\".ret·1 any)\n" + "func @\"\".convI2I (@\"\".typ·2 *byte, @\"\".elem·3 any) (@\"\".ret·1 any)\n" +
"func @\"\".convT2E (@\"\".typ·2 *byte, @\"\".elem·3 *any, @\"\".buf·4 *any) (@\"\".ret·1 any)\n" + "func @\"\".convT2E (@\"\".typ·2 *byte, @\"\".elem·3 *any, @\"\".buf·4 *any) (@\"\".ret·1 any)\n" +
"func @\"\".convT2I (@\"\".typ·2 *byte, @\"\".typ2·3 *byte, @\"\".cache·4 **byte, @\"\".elem·5 *any, @\"\".buf·6 *any) (@\"\".ret·1 any)\n" + "func @\"\".convT2I (@\"\".tab·2 *byte, @\"\".elem·3 *any, @\"\".buf·4 *any) (@\"\".ret·1 any)\n" +
"func @\"\".assertE2E (@\"\".typ·1 *byte, @\"\".iface·2 any, @\"\".ret·3 *any)\n" + "func @\"\".assertE2E (@\"\".typ·1 *byte, @\"\".iface·2 any, @\"\".ret·3 *any)\n" +
"func @\"\".assertE2E2 (@\"\".typ·2 *byte, @\"\".iface·3 any, @\"\".ret·4 *any) (? bool)\n" + "func @\"\".assertE2E2 (@\"\".typ·2 *byte, @\"\".iface·3 any, @\"\".ret·4 *any) (? bool)\n" +
"func @\"\".assertE2I (@\"\".typ·1 *byte, @\"\".iface·2 any, @\"\".ret·3 *any)\n" + "func @\"\".assertE2I (@\"\".typ·1 *byte, @\"\".iface·2 any, @\"\".ret·3 *any)\n" +
...@@ -86,34 +85,6 @@ const runtimeimport = "" + ...@@ -86,34 +85,6 @@ const runtimeimport = "" +
"func @\"\".closechan (@\"\".hchan·1 any)\n" + "func @\"\".closechan (@\"\".hchan·1 any)\n" +
"var @\"\".writeBarrier struct { @\"\".enabled bool; @\"\".needed bool; @\"\".cgo bool }\n" + "var @\"\".writeBarrier struct { @\"\".enabled bool; @\"\".needed bool; @\"\".cgo bool }\n" +
"func @\"\".writebarrierptr (@\"\".dst·1 *any, @\"\".src·2 any)\n" + "func @\"\".writebarrierptr (@\"\".dst·1 *any, @\"\".src·2 any)\n" +
"func @\"\".writebarrierstring (@\"\".dst·1 *any, @\"\".src·2 any)\n" +
"func @\"\".writebarrierslice (@\"\".dst·1 *any, @\"\".src·2 any)\n" +
"func @\"\".writebarrieriface (@\"\".dst·1 *any, @\"\".src·2 any)\n" +
"func @\"\".writebarrierfat01 (@\"\".dst·1 *any, _ uintptr \"unsafe-uintptr\", @\"\".src·3 any)\n" +
"func @\"\".writebarrierfat10 (@\"\".dst·1 *any, _ uintptr \"unsafe-uintptr\", @\"\".src·3 any)\n" +
"func @\"\".writebarrierfat11 (@\"\".dst·1 *any, _ uintptr \"unsafe-uintptr\", @\"\".src·3 any)\n" +
"func @\"\".writebarrierfat001 (@\"\".dst·1 *any, _ uintptr \"unsafe-uintptr\", @\"\".src·3 any)\n" +
"func @\"\".writebarrierfat010 (@\"\".dst·1 *any, _ uintptr \"unsafe-uintptr\", @\"\".src·3 any)\n" +
"func @\"\".writebarrierfat011 (@\"\".dst·1 *any, _ uintptr \"unsafe-uintptr\", @\"\".src·3 any)\n" +
"func @\"\".writebarrierfat100 (@\"\".dst·1 *any, _ uintptr \"unsafe-uintptr\", @\"\".src·3 any)\n" +
"func @\"\".writebarrierfat101 (@\"\".dst·1 *any, _ uintptr \"unsafe-uintptr\", @\"\".src·3 any)\n" +
"func @\"\".writebarrierfat110 (@\"\".dst·1 *any, _ uintptr \"unsafe-uintptr\", @\"\".src·3 any)\n" +
"func @\"\".writebarrierfat111 (@\"\".dst·1 *any, _ uintptr \"unsafe-uintptr\", @\"\".src·3 any)\n" +
"func @\"\".writebarrierfat0001 (@\"\".dst·1 *any, _ uintptr \"unsafe-uintptr\", @\"\".src·3 any)\n" +
"func @\"\".writebarrierfat0010 (@\"\".dst·1 *any, _ uintptr \"unsafe-uintptr\", @\"\".src·3 any)\n" +
"func @\"\".writebarrierfat0011 (@\"\".dst·1 *any, _ uintptr \"unsafe-uintptr\", @\"\".src·3 any)\n" +
"func @\"\".writebarrierfat0100 (@\"\".dst·1 *any, _ uintptr \"unsafe-uintptr\", @\"\".src·3 any)\n" +
"func @\"\".writebarrierfat0101 (@\"\".dst·1 *any, _ uintptr \"unsafe-uintptr\", @\"\".src·3 any)\n" +
"func @\"\".writebarrierfat0110 (@\"\".dst·1 *any, _ uintptr \"unsafe-uintptr\", @\"\".src·3 any)\n" +
"func @\"\".writebarrierfat0111 (@\"\".dst·1 *any, _ uintptr \"unsafe-uintptr\", @\"\".src·3 any)\n" +
"func @\"\".writebarrierfat1000 (@\"\".dst·1 *any, _ uintptr \"unsafe-uintptr\", @\"\".src·3 any)\n" +
"func @\"\".writebarrierfat1001 (@\"\".dst·1 *any, _ uintptr \"unsafe-uintptr\", @\"\".src·3 any)\n" +
"func @\"\".writebarrierfat1010 (@\"\".dst·1 *any, _ uintptr \"unsafe-uintptr\", @\"\".src·3 any)\n" +
"func @\"\".writebarrierfat1011 (@\"\".dst·1 *any, _ uintptr \"unsafe-uintptr\", @\"\".src·3 any)\n" +
"func @\"\".writebarrierfat1100 (@\"\".dst·1 *any, _ uintptr \"unsafe-uintptr\", @\"\".src·3 any)\n" +
"func @\"\".writebarrierfat1101 (@\"\".dst·1 *any, _ uintptr \"unsafe-uintptr\", @\"\".src·3 any)\n" +
"func @\"\".writebarrierfat1110 (@\"\".dst·1 *any, _ uintptr \"unsafe-uintptr\", @\"\".src·3 any)\n" +
"func @\"\".writebarrierfat1111 (@\"\".dst·1 *any, _ uintptr \"unsafe-uintptr\", @\"\".src·3 any)\n" +
"func @\"\".typedmemmove (@\"\".typ·1 *byte, @\"\".dst·2 *any, @\"\".src·3 *any)\n" + "func @\"\".typedmemmove (@\"\".typ·1 *byte, @\"\".dst·2 *any, @\"\".src·3 *any)\n" +
"func @\"\".typedslicecopy (@\"\".typ·2 *byte, @\"\".dst·3 any, @\"\".src·4 any) (? int)\n" + "func @\"\".typedslicecopy (@\"\".typ·2 *byte, @\"\".dst·3 any, @\"\".src·4 any) (? int)\n" +
"func @\"\".selectnbsend (@\"\".chanType·2 *byte, @\"\".hchan·3 chan<- any, @\"\".elem·4 *any) (? bool)\n" + "func @\"\".selectnbsend (@\"\".chanType·2 *byte, @\"\".hchan·3 chan<- any, @\"\".elem·4 *any) (? bool)\n" +
......
...@@ -60,11 +60,10 @@ func slicecopy(to any, fr any, wid uintptr) int ...@@ -60,11 +60,10 @@ func slicecopy(to any, fr any, wid uintptr) int
func slicestringcopy(to any, fr any) int func slicestringcopy(to any, fr any) int
// interface conversions // interface conversions
func typ2Itab(typ *byte, typ2 *byte, cache **byte) (ret *byte)
func convI2E(elem any) (ret any) func convI2E(elem any) (ret any)
func convI2I(typ *byte, elem any) (ret any) func convI2I(typ *byte, elem any) (ret any)
func convT2E(typ *byte, elem, buf *any) (ret any) func convT2E(typ *byte, elem, buf *any) (ret any)
func convT2I(typ *byte, typ2 *byte, cache **byte, elem, buf *any) (ret any) func convT2I(tab *byte, elem, buf *any) (ret any)
// interface type assertions x.(T) // interface type assertions x.(T)
func assertE2E(typ *byte, iface any, ret *any) func assertE2E(typ *byte, iface any, ret *any)
...@@ -113,39 +112,6 @@ var writeBarrier struct { ...@@ -113,39 +112,6 @@ var writeBarrier struct {
} }
func writebarrierptr(dst *any, src any) func writebarrierptr(dst *any, src any)
func writebarrierstring(dst *any, src any)
func writebarrierslice(dst *any, src any)
func writebarrieriface(dst *any, src any)
// The unused *byte argument makes sure that src is 2-pointer-aligned,
// which is the maximum alignment on NaCl amd64p32
// (and possibly on 32-bit systems if we start 64-bit aligning uint64s).
// The bitmap in the name tells which words being copied are pointers.
func writebarrierfat01(dst *any, _ uintptr, src any)
func writebarrierfat10(dst *any, _ uintptr, src any)
func writebarrierfat11(dst *any, _ uintptr, src any)
func writebarrierfat001(dst *any, _ uintptr, src any)
func writebarrierfat010(dst *any, _ uintptr, src any)
func writebarrierfat011(dst *any, _ uintptr, src any)
func writebarrierfat100(dst *any, _ uintptr, src any)
func writebarrierfat101(dst *any, _ uintptr, src any)
func writebarrierfat110(dst *any, _ uintptr, src any)
func writebarrierfat111(dst *any, _ uintptr, src any)
func writebarrierfat0001(dst *any, _ uintptr, src any)
func writebarrierfat0010(dst *any, _ uintptr, src any)
func writebarrierfat0011(dst *any, _ uintptr, src any)
func writebarrierfat0100(dst *any, _ uintptr, src any)
func writebarrierfat0101(dst *any, _ uintptr, src any)
func writebarrierfat0110(dst *any, _ uintptr, src any)
func writebarrierfat0111(dst *any, _ uintptr, src any)
func writebarrierfat1000(dst *any, _ uintptr, src any)
func writebarrierfat1001(dst *any, _ uintptr, src any)
func writebarrierfat1010(dst *any, _ uintptr, src any)
func writebarrierfat1011(dst *any, _ uintptr, src any)
func writebarrierfat1100(dst *any, _ uintptr, src any)
func writebarrierfat1101(dst *any, _ uintptr, src any)
func writebarrierfat1110(dst *any, _ uintptr, src any)
func writebarrierfat1111(dst *any, _ uintptr, src any)
// *byte is really *runtime.Type // *byte is really *runtime.Type
func typedmemmove(typ *byte, dst *any, src *any) func typedmemmove(typ *byte, dst *any, src *any)
......
This diff is collapsed.
...@@ -397,10 +397,42 @@ func transformclosure(xfunc *Node) { ...@@ -397,10 +397,42 @@ func transformclosure(xfunc *Node) {
lineno = lno lineno = lno
} }
// hasemptycvars returns true iff closure func_ has an
// empty list of captured vars. OXXX nodes don't count.
func hasemptycvars(func_ *Node) bool {
for _, v := range func_.Func.Cvars.Slice() {
if v.Op == OXXX {
continue
}
return false
}
return true
}
// closuredebugruntimecheck applies boilerplate checks for debug flags
// and compiling runtime
func closuredebugruntimecheck(r *Node) {
if Debug_closure > 0 {
if r.Esc == EscHeap {
Warnl(r.Lineno, "heap closure, captured vars = %v", r.Func.Cvars)
} else {
Warnl(r.Lineno, "stack closure, captured vars = %v", r.Func.Cvars)
}
}
if compiling_runtime > 0 && r.Esc == EscHeap {
yyerrorl(r.Lineno, "heap-allocated closure, not allowed in runtime.")
}
}
func walkclosure(func_ *Node, init *Nodes) *Node { func walkclosure(func_ *Node, init *Nodes) *Node {
// If no closure vars, don't bother wrapping. // If no closure vars, don't bother wrapping.
if len(func_.Func.Cvars.Slice()) == 0 { if hasemptycvars(func_) {
if Debug_closure > 0 {
Warnl(func_.Lineno, "closure converted to global")
}
return func_.Func.Closure.Func.Nname return func_.Func.Closure.Func.Nname
} else {
closuredebugruntimecheck(func_)
} }
// Create closure in the form of a composite literal. // Create closure in the form of a composite literal.
...@@ -489,10 +521,10 @@ func makepartialcall(fn *Node, t0 *Type, meth *Sym) *Node { ...@@ -489,10 +521,10 @@ func makepartialcall(fn *Node, t0 *Type, meth *Sym) *Node {
p = fmt.Sprintf("(%v).(%v)-fm", Tconv(rcvrtype, FmtLeft|FmtShort), Sconv(meth, FmtLeft)) p = fmt.Sprintf("(%v).(%v)-fm", Tconv(rcvrtype, FmtLeft|FmtShort), Sconv(meth, FmtLeft))
} }
basetype := rcvrtype basetype := rcvrtype
if Isptr[rcvrtype.Etype] { if rcvrtype.IsPtr() {
basetype = basetype.Type basetype = basetype.Elem()
} }
if basetype.Etype != TINTER && basetype.Sym == nil { if !basetype.IsInterface() && basetype.Sym == nil {
Fatalf("missing base type for %v", rcvrtype) Fatalf("missing base type for %v", rcvrtype)
} }
...@@ -582,7 +614,7 @@ func makepartialcall(fn *Node, t0 *Type, meth *Sym) *Node { ...@@ -582,7 +614,7 @@ func makepartialcall(fn *Node, t0 *Type, meth *Sym) *Node {
ptr.Xoffset = 0 ptr.Xoffset = 0
xfunc.Func.Dcl = append(xfunc.Func.Dcl, ptr) xfunc.Func.Dcl = append(xfunc.Func.Dcl, ptr)
var body []*Node var body []*Node
if Isptr[rcvrtype.Etype] || Isinter(rcvrtype) { if rcvrtype.IsPtr() || rcvrtype.IsInterface() {
ptr.Name.Param.Ntype = typenod(rcvrtype) ptr.Name.Param.Ntype = typenod(rcvrtype)
body = append(body, Nod(OAS, ptr, cv)) body = append(body, Nod(OAS, ptr, cv))
} else { } else {
...@@ -622,7 +654,7 @@ func walkpartialcall(n *Node, init *Nodes) *Node { ...@@ -622,7 +654,7 @@ func walkpartialcall(n *Node, init *Nodes) *Node {
// //
// Like walkclosure above. // Like walkclosure above.
if Isinter(n.Left.Type) { if n.Left.Type.IsInterface() {
// Trigger panic for method on nil interface now. // Trigger panic for method on nil interface now.
// Otherwise it happens in the wrapper and is confusing. // Otherwise it happens in the wrapper and is confusing.
n.Left = cheapexpr(n.Left, init) n.Left = cheapexpr(n.Left, init)
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -59,7 +59,7 @@ func main() { ...@@ -59,7 +59,7 @@ func main() {
if err != nil { if err != nil {
log.Fatalf("could not read target: %v", err) log.Fatalf("could not read target: %v", err)
} }
if bytes.Index(out, []byte("scanInt")) != -1 { if bytes.Contains(out, []byte("scanInt")) {
log.Fatalf("scanf code not removed from helloworld") log.Fatalf("scanf code not removed from helloworld")
} }
} }
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment