Commit aea3aff6 authored by Josh Bleecher Snyder's avatar Josh Bleecher Snyder

cmd/compile: separate ssa.Frontend and ssa.TypeSource

Prior to this CL, the ssa.Frontend field was responsible
for providing types to the backend during compilation.
However, the types needed by the backend are few and static.
It makes more sense to use a struct for them
and to hang that struct off the ssa.Config,
which is the correct home for readonly data.
Now that Types is a struct, we can clean up the names a bit as well.

This has the added benefit of allowing early construction
of all types needed by the backend.
This will be useful for concurrent backend compilation.

Passes toolstash-check -all. No compiler performance change.

Updates #15756

Change-Id: I021658c8cf2836d6a22bbc20cc828ac38c7da08a
Reviewed-on: https://go-review.googlesource.com/38336Reviewed-by: default avatarMatthew Dempsky <mdempsky@google.com>
parent 2c397c7a
......@@ -22,7 +22,24 @@ var ssaConfig *ssa.Config
var ssaCache *ssa.Cache
func initssaconfig() {
ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, Ctxt, Debug['N'] == 0)
types := ssa.Types{
Bool: Types[TBOOL],
Int8: Types[TINT8],
Int16: Types[TINT16],
Int32: Types[TINT32],
Int64: Types[TINT64],
UInt8: Types[TUINT8],
UInt16: Types[TUINT16],
UInt32: Types[TUINT32],
UInt64: Types[TUINT64],
Float32: Types[TFLOAT32],
Float64: Types[TFLOAT64],
Int: Types[TINT],
Uintptr: Types[TUINTPTR],
String: Types[TSTRING],
BytePtr: ptrto(Types[TUINT8]),
}
ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, types, Ctxt, Debug['N'] == 0)
if thearch.LinkArch.Name == "386" {
ssaConfig.Set387(thearch.Use387)
}
......@@ -4673,22 +4690,6 @@ type ssafn struct {
log bool
}
func (s *ssafn) TypeBool() ssa.Type { return Types[TBOOL] }
func (s *ssafn) TypeInt8() ssa.Type { return Types[TINT8] }
func (s *ssafn) TypeInt16() ssa.Type { return Types[TINT16] }
func (s *ssafn) TypeInt32() ssa.Type { return Types[TINT32] }
func (s *ssafn) TypeInt64() ssa.Type { return Types[TINT64] }
func (s *ssafn) TypeUInt8() ssa.Type { return Types[TUINT8] }
func (s *ssafn) TypeUInt16() ssa.Type { return Types[TUINT16] }
func (s *ssafn) TypeUInt32() ssa.Type { return Types[TUINT32] }
func (s *ssafn) TypeUInt64() ssa.Type { return Types[TUINT64] }
func (s *ssafn) TypeFloat32() ssa.Type { return Types[TFLOAT32] }
func (s *ssafn) TypeFloat64() ssa.Type { return Types[TFLOAT64] }
func (s *ssafn) TypeInt() ssa.Type { return Types[TINT] }
func (s *ssafn) TypeUintptr() ssa.Type { return Types[TUINTPTR] }
func (s *ssafn) TypeString() ssa.Type { return Types[TSTRING] }
func (s *ssafn) TypeBytePtr() ssa.Type { return ptrto(Types[TUINT8]) }
// StringData returns a symbol (a *Sym wrapped in an interface) which
// is the data component of a global string constant containing s.
func (*ssafn) StringData(s string) interface{} {
......
......@@ -15,10 +15,11 @@ import (
// It is created once, early during compilation,
// and shared across all compilations.
type Config struct {
arch string // "amd64", etc.
IntSize int64 // 4 or 8
PtrSize int64 // 4 or 8
RegSize int64 // 4 or 8
arch string // "amd64", etc.
IntSize int64 // 4 or 8
PtrSize int64 // 4 or 8
RegSize int64 // 4 or 8
Types Types
lowerBlock blockRewriter // lowering function
lowerValue valueRewriter // lowering function
registers []Register // machine registers
......@@ -44,24 +45,22 @@ type (
valueRewriter func(*Value) bool
)
type TypeSource interface {
TypeBool() Type
TypeInt8() Type
TypeInt16() Type
TypeInt32() Type
TypeInt64() Type
TypeUInt8() Type
TypeUInt16() Type
TypeUInt32() Type
TypeUInt64() Type
TypeInt() Type
TypeFloat32() Type
TypeFloat64() Type
TypeUintptr() Type
TypeString() Type
TypeBytePtr() Type // TODO: use unsafe.Pointer instead?
CanSSA(t Type) bool
type Types struct {
Bool Type
Int8 Type
Int16 Type
Int32 Type
Int64 Type
UInt8 Type
UInt16 Type
UInt32 Type
UInt64 Type
Int Type
Float32 Type
Float64 Type
Uintptr Type
String Type
BytePtr Type // TODO: use unsafe.Pointer instead?
}
type Logger interface {
......@@ -87,7 +86,8 @@ type Logger interface {
}
type Frontend interface {
TypeSource
CanSSA(t Type) bool
Logger
// StringData returns a symbol pointing to the given string's contents.
......@@ -135,8 +135,8 @@ type GCNode interface {
}
// NewConfig returns a new configuration object for the given architecture.
func NewConfig(arch string, ctxt *obj.Link, optimize bool) *Config {
c := &Config{arch: arch}
func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config {
c := &Config{arch: arch, Types: types}
switch arch {
case "amd64":
c.IntSize = 8
......
......@@ -28,15 +28,15 @@ func decomposeBuiltIn(f *Func) {
case t.IsInteger() && t.Size() == 8 && f.Config.IntSize == 4:
var elemType Type
if t.IsSigned() {
elemType = f.fe.TypeInt32()
elemType = f.Config.Types.Int32
} else {
elemType = f.fe.TypeUInt32()
elemType = f.Config.Types.UInt32
}
hiName, loName := f.fe.SplitInt64(name)
newNames = append(newNames, hiName, loName)
for _, v := range f.NamedValues[name] {
hi := v.Block.NewValue1(v.Pos, OpInt64Hi, elemType, v)
lo := v.Block.NewValue1(v.Pos, OpInt64Lo, f.fe.TypeUInt32(), v)
lo := v.Block.NewValue1(v.Pos, OpInt64Lo, f.Config.Types.UInt32, v)
f.NamedValues[hiName] = append(f.NamedValues[hiName], hi)
f.NamedValues[loName] = append(f.NamedValues[loName], lo)
}
......@@ -44,9 +44,9 @@ func decomposeBuiltIn(f *Func) {
case t.IsComplex():
var elemType Type
if t.Size() == 16 {
elemType = f.fe.TypeFloat64()
elemType = f.Config.Types.Float64
} else {
elemType = f.fe.TypeFloat32()
elemType = f.Config.Types.Float32
}
rName, iName := f.fe.SplitComplex(name)
newNames = append(newNames, rName, iName)
......@@ -58,8 +58,8 @@ func decomposeBuiltIn(f *Func) {
}
delete(f.NamedValues, name)
case t.IsString():
ptrType := f.fe.TypeBytePtr()
lenType := f.fe.TypeInt()
ptrType := f.Config.Types.BytePtr
lenType := f.Config.Types.Int
ptrName, lenName := f.fe.SplitString(name)
newNames = append(newNames, ptrName, lenName)
for _, v := range f.NamedValues[name] {
......@@ -70,8 +70,8 @@ func decomposeBuiltIn(f *Func) {
}
delete(f.NamedValues, name)
case t.IsSlice():
ptrType := f.fe.TypeBytePtr()
lenType := f.fe.TypeInt()
ptrType := f.Config.Types.BytePtr
lenType := f.Config.Types.Int
ptrName, lenName, capName := f.fe.SplitSlice(name)
newNames = append(newNames, ptrName, lenName, capName)
for _, v := range f.NamedValues[name] {
......@@ -84,7 +84,7 @@ func decomposeBuiltIn(f *Func) {
}
delete(f.NamedValues, name)
case t.IsInterface():
ptrType := f.fe.TypeBytePtr()
ptrType := f.Config.Types.BytePtr
typeName, dataName := f.fe.SplitInterface(name)
newNames = append(newNames, typeName, dataName)
for _, v := range f.NamedValues[name] {
......@@ -129,9 +129,9 @@ func decomposeBuiltInPhi(v *Value) {
}
func decomposeStringPhi(v *Value) {
fe := v.Block.Func.fe
ptrType := fe.TypeBytePtr()
lenType := fe.TypeInt()
types := &v.Block.Func.Config.Types
ptrType := types.BytePtr
lenType := types.Int
ptr := v.Block.NewValue0(v.Pos, OpPhi, ptrType)
len := v.Block.NewValue0(v.Pos, OpPhi, lenType)
......@@ -145,9 +145,9 @@ func decomposeStringPhi(v *Value) {
}
func decomposeSlicePhi(v *Value) {
fe := v.Block.Func.fe
ptrType := fe.TypeBytePtr()
lenType := fe.TypeInt()
types := &v.Block.Func.Config.Types
ptrType := types.BytePtr
lenType := types.Int
ptr := v.Block.NewValue0(v.Pos, OpPhi, ptrType)
len := v.Block.NewValue0(v.Pos, OpPhi, lenType)
......@@ -164,19 +164,19 @@ func decomposeSlicePhi(v *Value) {
}
func decomposeInt64Phi(v *Value) {
fe := v.Block.Func.fe
types := &v.Block.Func.Config.Types
var partType Type
if v.Type.IsSigned() {
partType = fe.TypeInt32()
partType = types.Int32
} else {
partType = fe.TypeUInt32()
partType = types.UInt32
}
hi := v.Block.NewValue0(v.Pos, OpPhi, partType)
lo := v.Block.NewValue0(v.Pos, OpPhi, fe.TypeUInt32())
lo := v.Block.NewValue0(v.Pos, OpPhi, types.UInt32)
for _, a := range v.Args {
hi.AddArg(a.Block.NewValue1(v.Pos, OpInt64Hi, partType, a))
lo.AddArg(a.Block.NewValue1(v.Pos, OpInt64Lo, fe.TypeUInt32(), a))
lo.AddArg(a.Block.NewValue1(v.Pos, OpInt64Lo, types.UInt32, a))
}
v.reset(OpInt64Make)
v.AddArg(hi)
......@@ -184,13 +184,13 @@ func decomposeInt64Phi(v *Value) {
}
func decomposeComplexPhi(v *Value) {
fe := v.Block.Func.fe
types := &v.Block.Func.Config.Types
var partType Type
switch z := v.Type.Size(); z {
case 8:
partType = fe.TypeFloat32()
partType = types.Float32
case 16:
partType = fe.TypeFloat64()
partType = types.Float64
default:
v.Fatalf("decomposeComplexPhi: bad complex size %d", z)
}
......@@ -207,7 +207,7 @@ func decomposeComplexPhi(v *Value) {
}
func decomposeInterfacePhi(v *Value) {
ptrType := v.Block.Func.fe.TypeBytePtr()
ptrType := v.Block.Func.Config.Types.BytePtr
itab := v.Block.NewValue0(v.Pos, OpPhi, ptrType)
data := v.Block.NewValue0(v.Pos, OpPhi, ptrType)
......
......@@ -19,11 +19,11 @@ var Copyelim = copyelim
var TestCtxt = obj.Linknew(&x86.Linkamd64)
func testConfig(t testing.TB) *Config {
return NewConfig("amd64", TestCtxt, true)
return NewConfig("amd64", dummyTypes, TestCtxt, true)
}
func testConfigS390X(t testing.TB) *Config {
return NewConfig("s390x", obj.Linknew(&s390x.Links390x), true)
return NewConfig("s390x", dummyTypes, obj.Linknew(&s390x.Links390x), true)
}
// DummyFrontend is a test-only frontend.
......@@ -52,27 +52,27 @@ func (DummyFrontend) Auto(t Type) GCNode {
return &DummyAuto{t: t, s: "aDummyAuto"}
}
func (d DummyFrontend) SplitString(s LocalSlot) (LocalSlot, LocalSlot) {
return LocalSlot{s.N, d.TypeBytePtr(), s.Off}, LocalSlot{s.N, d.TypeInt(), s.Off + 8}
return LocalSlot{s.N, dummyTypes.BytePtr, s.Off}, LocalSlot{s.N, dummyTypes.Int, s.Off + 8}
}
func (d DummyFrontend) SplitInterface(s LocalSlot) (LocalSlot, LocalSlot) {
return LocalSlot{s.N, d.TypeBytePtr(), s.Off}, LocalSlot{s.N, d.TypeBytePtr(), s.Off + 8}
return LocalSlot{s.N, dummyTypes.BytePtr, s.Off}, LocalSlot{s.N, dummyTypes.BytePtr, s.Off + 8}
}
func (d DummyFrontend) SplitSlice(s LocalSlot) (LocalSlot, LocalSlot, LocalSlot) {
return LocalSlot{s.N, s.Type.ElemType().PtrTo(), s.Off},
LocalSlot{s.N, d.TypeInt(), s.Off + 8},
LocalSlot{s.N, d.TypeInt(), s.Off + 16}
LocalSlot{s.N, dummyTypes.Int, s.Off + 8},
LocalSlot{s.N, dummyTypes.Int, s.Off + 16}
}
func (d DummyFrontend) SplitComplex(s LocalSlot) (LocalSlot, LocalSlot) {
if s.Type.Size() == 16 {
return LocalSlot{s.N, d.TypeFloat64(), s.Off}, LocalSlot{s.N, d.TypeFloat64(), s.Off + 8}
return LocalSlot{s.N, dummyTypes.Float64, s.Off}, LocalSlot{s.N, dummyTypes.Float64, s.Off + 8}
}
return LocalSlot{s.N, d.TypeFloat32(), s.Off}, LocalSlot{s.N, d.TypeFloat32(), s.Off + 4}
return LocalSlot{s.N, dummyTypes.Float32, s.Off}, LocalSlot{s.N, dummyTypes.Float32, s.Off + 4}
}
func (d DummyFrontend) SplitInt64(s LocalSlot) (LocalSlot, LocalSlot) {
if s.Type.IsSigned() {
return LocalSlot{s.N, d.TypeInt32(), s.Off + 4}, LocalSlot{s.N, d.TypeUInt32(), s.Off}
return LocalSlot{s.N, dummyTypes.Int32, s.Off + 4}, LocalSlot{s.N, dummyTypes.UInt32, s.Off}
}
return LocalSlot{s.N, d.TypeUInt32(), s.Off + 4}, LocalSlot{s.N, d.TypeUInt32(), s.Off}
return LocalSlot{s.N, dummyTypes.UInt32, s.Off + 4}, LocalSlot{s.N, dummyTypes.UInt32, s.Off}
}
func (d DummyFrontend) SplitStruct(s LocalSlot, i int) LocalSlot {
return LocalSlot{s.N, s.Type.FieldType(i), s.Off + s.Type.FieldOff(i)}
......@@ -101,21 +101,24 @@ func (d DummyFrontend) Warnl(_ src.XPos, msg string, args ...interface{}) { d.t
func (d DummyFrontend) Debug_checknil() bool { return false }
func (d DummyFrontend) Debug_wb() bool { return false }
func (d DummyFrontend) TypeBool() Type { return TypeBool }
func (d DummyFrontend) TypeInt8() Type { return TypeInt8 }
func (d DummyFrontend) TypeInt16() Type { return TypeInt16 }
func (d DummyFrontend) TypeInt32() Type { return TypeInt32 }
func (d DummyFrontend) TypeInt64() Type { return TypeInt64 }
func (d DummyFrontend) TypeUInt8() Type { return TypeUInt8 }
func (d DummyFrontend) TypeUInt16() Type { return TypeUInt16 }
func (d DummyFrontend) TypeUInt32() Type { return TypeUInt32 }
func (d DummyFrontend) TypeUInt64() Type { return TypeUInt64 }
func (d DummyFrontend) TypeFloat32() Type { return TypeFloat32 }
func (d DummyFrontend) TypeFloat64() Type { return TypeFloat64 }
func (d DummyFrontend) TypeInt() Type { return TypeInt64 }
func (d DummyFrontend) TypeUintptr() Type { return TypeUInt64 }
func (d DummyFrontend) TypeString() Type { panic("unimplemented") }
func (d DummyFrontend) TypeBytePtr() Type { return TypeBytePtr }
var dummyTypes = Types{
Bool: TypeBool,
Int8: TypeInt8,
Int16: TypeInt16,
Int32: TypeInt32,
Int64: TypeInt64,
UInt8: TypeUInt8,
UInt16: TypeUInt16,
UInt32: TypeUInt32,
UInt64: TypeUInt64,
Float32: TypeFloat32,
Float64: TypeFloat64,
Int: TypeInt64,
Uintptr: TypeUInt64,
String: nil,
BytePtr: TypeBytePtr,
}
func (d DummyFrontend) DerefItab(sym *obj.LSym, off int64) *obj.LSym { return nil }
func (d DummyFrontend) CanSSA(t Type) bool {
......
......@@ -68,8 +68,8 @@
(Neg32 x) -> (NEGL x)
(Neg16 x) -> (NEGL x)
(Neg8 x) -> (NEGL x)
(Neg32F x) && !config.use387 -> (PXOR x (MOVSSconst <fe.TypeFloat32()> [f2i(math.Copysign(0, -1))]))
(Neg64F x) && !config.use387 -> (PXOR x (MOVSDconst <fe.TypeFloat64()> [f2i(math.Copysign(0, -1))]))
(Neg32F x) && !config.use387 -> (PXOR x (MOVSSconst <types.Float32> [f2i(math.Copysign(0, -1))]))
(Neg64F x) && !config.use387 -> (PXOR x (MOVSDconst <types.Float64> [f2i(math.Copysign(0, -1))]))
(Neg32F x) && config.use387 -> (FCHS x)
(Neg64F x) && config.use387 -> (FCHS x)
......
......@@ -78,8 +78,8 @@
(Neg32 x) -> (NEGL x)
(Neg16 x) -> (NEGL x)
(Neg8 x) -> (NEGL x)
(Neg32F x) -> (PXOR x (MOVSSconst <fe.TypeFloat32()> [f2i(math.Copysign(0, -1))]))
(Neg64F x) -> (PXOR x (MOVSDconst <fe.TypeFloat64()> [f2i(math.Copysign(0, -1))]))
(Neg32F x) -> (PXOR x (MOVSSconst <types.Float32> [f2i(math.Copysign(0, -1))]))
(Neg64F x) -> (PXOR x (MOVSDconst <types.Float64> [f2i(math.Copysign(0, -1))]))
(Com64 x) -> (NOTQ x)
(Com32 x) -> (NOTL x)
......@@ -98,10 +98,10 @@
// Lowering other arithmetic
(Ctz64 <t> x) -> (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <TypeFlags> (BSFQ x)))
(Ctz32 x) -> (Select0 (BSFQ (ORQ <fe.TypeUInt64()> (MOVQconst [1<<32]) x)))
(Ctz32 x) -> (Select0 (BSFQ (ORQ <types.UInt64> (MOVQconst [1<<32]) x)))
(BitLen64 <t> x) -> (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <TypeFlags> (BSRQ x))))
(BitLen32 x) -> (BitLen64 (MOVLQZX <fe.TypeUInt64()> x))
(BitLen32 x) -> (BitLen64 (MOVLQZX <types.UInt64> x))
(Bswap64 x) -> (BSWAPQ x)
(Bswap32 x) -> (BSWAPL x)
......@@ -472,10 +472,10 @@
// Atomic stores. We use XCHG to prevent the hardware reordering a subsequent load.
// TODO: most runtime uses of atomic stores don't need that property. Use normal stores for those?
(AtomicStore32 ptr val mem) -> (Select1 (XCHGL <MakeTuple(fe.TypeUInt32(),TypeMem)> val ptr mem))
(AtomicStore64 ptr val mem) -> (Select1 (XCHGQ <MakeTuple(fe.TypeUInt64(),TypeMem)> val ptr mem))
(AtomicStorePtrNoWB ptr val mem) && config.PtrSize == 8 -> (Select1 (XCHGQ <MakeTuple(fe.TypeBytePtr(),TypeMem)> val ptr mem))
(AtomicStorePtrNoWB ptr val mem) && config.PtrSize == 4 -> (Select1 (XCHGL <MakeTuple(fe.TypeBytePtr(),TypeMem)> val ptr mem))
(AtomicStore32 ptr val mem) -> (Select1 (XCHGL <MakeTuple(types.UInt32,TypeMem)> val ptr mem))
(AtomicStore64 ptr val mem) -> (Select1 (XCHGQ <MakeTuple(types.UInt64,TypeMem)> val ptr mem))
(AtomicStorePtrNoWB ptr val mem) && config.PtrSize == 8 -> (Select1 (XCHGQ <MakeTuple(types.BytePtr,TypeMem)> val ptr mem))
(AtomicStorePtrNoWB ptr val mem) && config.PtrSize == 4 -> (Select1 (XCHGL <MakeTuple(types.BytePtr,TypeMem)> val ptr mem))
// Atomic exchanges.
(AtomicExchange32 ptr val mem) -> (XCHGL val ptr mem)
......@@ -553,8 +553,8 @@
(NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) -> (NEF cmp yes no)
// Disabled because it interferes with the pattern match above and makes worse code.
// (SETNEF x) -> (ORQ (SETNE <fe.TypeInt8()> x) (SETNAN <fe.TypeInt8()> x))
// (SETEQF x) -> (ANDQ (SETEQ <fe.TypeInt8()> x) (SETORD <fe.TypeInt8()> x))
// (SETNEF x) -> (ORQ (SETNE <types.Int8> x) (SETNAN <types.Int8> x))
// (SETEQF x) -> (ANDQ (SETEQ <types.Int8> x) (SETORD <types.Int8> x))
// fold constants into instructions
(ADDQ x (MOVQconst [c])) && is32Bit(c) -> (ADDQconst [c] x)
......
......@@ -34,12 +34,12 @@
(Mul32uhilo x y) -> (MULLU x y)
(Div32 x y) ->
(SUB (XOR <fe.TypeUInt32()> // negate the result if one operand is negative
(Select0 <fe.TypeUInt32()> (CALLudiv {config.ctxt.Lookup("udiv", 0)}
(SUB <fe.TypeUInt32()> (XOR x <fe.TypeUInt32()> (Signmask x)) (Signmask x)) // negate x if negative
(SUB <fe.TypeUInt32()> (XOR y <fe.TypeUInt32()> (Signmask y)) (Signmask y)))) // negate y if negative
(Signmask (XOR <fe.TypeUInt32()> x y))) (Signmask (XOR <fe.TypeUInt32()> x y)))
(Div32u x y) -> (Select0 <fe.TypeUInt32()> (CALLudiv {config.ctxt.Lookup("udiv", 0)} x y))
(SUB (XOR <types.UInt32> // negate the result if one operand is negative
(Select0 <types.UInt32> (CALLudiv {config.ctxt.Lookup("udiv", 0)}
(SUB <types.UInt32> (XOR x <types.UInt32> (Signmask x)) (Signmask x)) // negate x if negative
(SUB <types.UInt32> (XOR y <types.UInt32> (Signmask y)) (Signmask y)))) // negate y if negative
(Signmask (XOR <types.UInt32> x y))) (Signmask (XOR <types.UInt32> x y)))
(Div32u x y) -> (Select0 <types.UInt32> (CALLudiv {config.ctxt.Lookup("udiv", 0)} x y))
(Div16 x y) -> (Div32 (SignExt16to32 x) (SignExt16to32 y))
(Div16u x y) -> (Div32u (ZeroExt16to32 x) (ZeroExt16to32 y))
(Div8 x y) -> (Div32 (SignExt8to32 x) (SignExt8to32 y))
......@@ -48,12 +48,12 @@
(Div64F x y) -> (DIVD x y)
(Mod32 x y) ->
(SUB (XOR <fe.TypeUInt32()> // negate the result if x is negative
(Select1 <fe.TypeUInt32()> (CALLudiv {config.ctxt.Lookup("udiv", 0)}
(SUB <fe.TypeUInt32()> (XOR <fe.TypeUInt32()> x (Signmask x)) (Signmask x)) // negate x if negative
(SUB <fe.TypeUInt32()> (XOR <fe.TypeUInt32()> y (Signmask y)) (Signmask y)))) // negate y if negative
(SUB (XOR <types.UInt32> // negate the result if x is negative
(Select1 <types.UInt32> (CALLudiv {config.ctxt.Lookup("udiv", 0)}
(SUB <types.UInt32> (XOR <types.UInt32> x (Signmask x)) (Signmask x)) // negate x if negative
(SUB <types.UInt32> (XOR <types.UInt32> y (Signmask y)) (Signmask y)))) // negate y if negative
(Signmask x)) (Signmask x))
(Mod32u x y) -> (Select1 <fe.TypeUInt32()> (CALLudiv {config.ctxt.Lookup("udiv", 0)} x y))
(Mod32u x y) -> (Select1 <types.UInt32> (CALLudiv {config.ctxt.Lookup("udiv", 0)} x y))
(Mod16 x y) -> (Mod32 (SignExt16to32 x) (SignExt16to32 y))
(Mod16u x y) -> (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y))
(Mod8 x y) -> (Mod32 (SignExt8to32 x) (SignExt8to32 y))
......@@ -111,7 +111,7 @@
// boolean ops -- booleans are represented with 0=false, 1=true
(AndB x y) -> (AND x y)
(OrB x y) -> (OR x y)
(EqB x y) -> (XORconst [1] (XOR <fe.TypeBool()> x y))
(EqB x y) -> (XORconst [1] (XOR <types.Bool> x y))
(NeqB x y) -> (XOR x y)
(Not x) -> (XORconst [1] x)
......@@ -160,11 +160,11 @@
(Rsh32x64 x (Const64 [c])) && uint64(c) < 32 -> (SRAconst x [c])
(Rsh32Ux64 x (Const64 [c])) && uint64(c) < 32 -> (SRLconst x [c])
(Lsh16x64 x (Const64 [c])) && uint64(c) < 16 -> (SLLconst x [c])
(Rsh16x64 x (Const64 [c])) && uint64(c) < 16 -> (SRAconst (SLLconst <fe.TypeUInt32()> x [16]) [c+16])
(Rsh16Ux64 x (Const64 [c])) && uint64(c) < 16 -> (SRLconst (SLLconst <fe.TypeUInt32()> x [16]) [c+16])
(Rsh16x64 x (Const64 [c])) && uint64(c) < 16 -> (SRAconst (SLLconst <types.UInt32> x [16]) [c+16])
(Rsh16Ux64 x (Const64 [c])) && uint64(c) < 16 -> (SRLconst (SLLconst <types.UInt32> x [16]) [c+16])
(Lsh8x64 x (Const64 [c])) && uint64(c) < 8 -> (SLLconst x [c])
(Rsh8x64 x (Const64 [c])) && uint64(c) < 8 -> (SRAconst (SLLconst <fe.TypeUInt32()> x [24]) [c+24])
(Rsh8Ux64 x (Const64 [c])) && uint64(c) < 8 -> (SRLconst (SLLconst <fe.TypeUInt32()> x [24]) [c+24])
(Rsh8x64 x (Const64 [c])) && uint64(c) < 8 -> (SRAconst (SLLconst <types.UInt32> x [24]) [c+24])
(Rsh8Ux64 x (Const64 [c])) && uint64(c) < 8 -> (SRLconst (SLLconst <types.UInt32> x [24]) [c+24])
// large constant shifts
(Lsh32x64 _ (Const64 [c])) && uint64(c) >= 32 -> (Const32 [0])
......@@ -176,8 +176,8 @@
// large constant signed right shift, we leave the sign bit
(Rsh32x64 x (Const64 [c])) && uint64(c) >= 32 -> (SRAconst x [31])
(Rsh16x64 x (Const64 [c])) && uint64(c) >= 16 -> (SRAconst (SLLconst <fe.TypeUInt32()> x [16]) [31])
(Rsh8x64 x (Const64 [c])) && uint64(c) >= 8 -> (SRAconst (SLLconst <fe.TypeUInt32()> x [24]) [31])
(Rsh16x64 x (Const64 [c])) && uint64(c) >= 16 -> (SRAconst (SLLconst <types.UInt32> x [16]) [31])
(Rsh8x64 x (Const64 [c])) && uint64(c) >= 8 -> (SRAconst (SLLconst <types.UInt32> x [24]) [31])
// constants
(Const8 [val]) -> (MOVWconst [val])
......@@ -204,7 +204,7 @@
(SignExt16to32 x) -> (MOVHreg x)
(Signmask x) -> (SRAconst x [31])
(Zeromask x) -> (SRAconst (RSBshiftRL <fe.TypeInt32()> x x [1]) [31]) // sign bit of uint32(x)>>1 - x
(Zeromask x) -> (SRAconst (RSBshiftRL <types.Int32> x x [1]) [31]) // sign bit of uint32(x)>>1 - x
(Slicemask <t> x) -> (SRAconst (RSBconst <t> [0] x) [31])
// float <-> int conversion
......
......@@ -27,8 +27,8 @@
(Hmul64 x y) -> (MULH x y)
(Hmul64u x y) -> (UMULH x y)
(Hmul32 x y) -> (SRAconst (MULL <fe.TypeInt64()> x y) [32])
(Hmul32u x y) -> (SRAconst (UMULL <fe.TypeUInt64()> x y) [32])
(Hmul32 x y) -> (SRAconst (MULL <types.Int64> x y) [32])
(Hmul32u x y) -> (SRAconst (UMULL <types.UInt64> x y) [32])
(Div64 x y) -> (DIV x y)
(Div64u x y) -> (UDIV x y)
......@@ -86,20 +86,20 @@
(Ctz64 <t> x) -> (CLZ (RBIT <t> x))
(Ctz32 <t> x) -> (CLZW (RBITW <t> x))
(BitLen64 x) -> (SUB (MOVDconst [64]) (CLZ <fe.TypeInt()> x))
(BitLen64 x) -> (SUB (MOVDconst [64]) (CLZ <types.Int> x))
(Bswap64 x) -> (REV x)
(Bswap32 x) -> (REVW x)
(BitRev64 x) -> (RBIT x)
(BitRev32 x) -> (RBITW x)
(BitRev16 x) -> (SRLconst [48] (RBIT <fe.TypeUInt64()> x))
(BitRev8 x) -> (SRLconst [56] (RBIT <fe.TypeUInt64()> x))
(BitRev16 x) -> (SRLconst [48] (RBIT <types.UInt64> x))
(BitRev8 x) -> (SRLconst [56] (RBIT <types.UInt64> x))
// boolean ops -- booleans are represented with 0=false, 1=true
(AndB x y) -> (AND x y)
(OrB x y) -> (OR x y)
(EqB x y) -> (XOR (MOVDconst [1]) (XOR <fe.TypeBool()> x y))
(EqB x y) -> (XOR (MOVDconst [1]) (XOR <types.Bool> x y))
(NeqB x y) -> (XOR x y)
(Not x) -> (XOR (MOVDconst [1]) x)
......
......@@ -10,7 +10,7 @@
(Add64F x y) -> (ADDD x y)
(Select0 (Add32carry <t> x y)) -> (ADD <t.FieldType(0)> x y)
(Select1 (Add32carry <t> x y)) -> (SGTU <fe.TypeBool()> x (ADD <t.FieldType(0)> x y))
(Select1 (Add32carry <t> x y)) -> (SGTU <types.Bool> x (ADD <t.FieldType(0)> x y))
(Add32withcarry <t> x y c) -> (ADD c (ADD <t> x y))
(SubPtr x y) -> (SUB x y)
......@@ -21,7 +21,7 @@
(Sub64F x y) -> (SUBD x y)
(Select0 (Sub32carry <t> x y)) -> (SUB <t.FieldType(0)> x y)
(Select1 (Sub32carry <t> x y)) -> (SGTU <fe.TypeBool()> (SUB <t.FieldType(0)> x y) x)
(Select1 (Sub32carry <t> x y)) -> (SGTU <types.Bool> (SUB <t.FieldType(0)> x y) x)
(Sub32withcarry <t> x y c) -> (SUB (SUB <t> x y) c)
(Mul32 x y) -> (MUL x y)
......@@ -72,11 +72,11 @@
(Rsh32x64 x (Const64 [c])) && uint32(c) < 32 -> (SRAconst x [c])
(Rsh32Ux64 x (Const64 [c])) && uint32(c) < 32 -> (SRLconst x [c])
(Lsh16x64 x (Const64 [c])) && uint32(c) < 16 -> (SLLconst x [c])
(Rsh16x64 x (Const64 [c])) && uint32(c) < 16 -> (SRAconst (SLLconst <fe.TypeUInt32()> x [16]) [c+16])
(Rsh16Ux64 x (Const64 [c])) && uint32(c) < 16 -> (SRLconst (SLLconst <fe.TypeUInt32()> x [16]) [c+16])
(Rsh16x64 x (Const64 [c])) && uint32(c) < 16 -> (SRAconst (SLLconst <types.UInt32> x [16]) [c+16])
(Rsh16Ux64 x (Const64 [c])) && uint32(c) < 16 -> (SRLconst (SLLconst <types.UInt32> x [16]) [c+16])
(Lsh8x64 x (Const64 [c])) && uint32(c) < 8 -> (SLLconst x [c])
(Rsh8x64 x (Const64 [c])) && uint32(c) < 8 -> (SRAconst (SLLconst <fe.TypeUInt32()> x [24]) [c+24])
(Rsh8Ux64 x (Const64 [c])) && uint32(c) < 8 -> (SRLconst (SLLconst <fe.TypeUInt32()> x [24]) [c+24])
(Rsh8x64 x (Const64 [c])) && uint32(c) < 8 -> (SRAconst (SLLconst <types.UInt32> x [24]) [c+24])
(Rsh8Ux64 x (Const64 [c])) && uint32(c) < 8 -> (SRLconst (SLLconst <types.UInt32> x [24]) [c+24])
// large constant shifts
(Lsh32x64 _ (Const64 [c])) && uint32(c) >= 32 -> (MOVWconst [0])
......@@ -88,8 +88,8 @@
// large constant signed right shift, we leave the sign bit
(Rsh32x64 x (Const64 [c])) && uint32(c) >= 32 -> (SRAconst x [31])
(Rsh16x64 x (Const64 [c])) && uint32(c) >= 16 -> (SRAconst (SLLconst <fe.TypeUInt32()> x [16]) [31])
(Rsh8x64 x (Const64 [c])) && uint32(c) >= 8 -> (SRAconst (SLLconst <fe.TypeUInt32()> x [24]) [31])
(Rsh16x64 x (Const64 [c])) && uint32(c) >= 16 -> (SRAconst (SLLconst <types.UInt32> x [16]) [31])
(Rsh8x64 x (Const64 [c])) && uint32(c) >= 8 -> (SRAconst (SLLconst <types.UInt32> x [24]) [31])
// shifts
// hardware instruction uses only the low 5 bits of the shift
......@@ -118,17 +118,17 @@
(Rsh8Ux16 <t> x y) -> (CMOVZ (SRL <t> (ZeroExt8to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
(Rsh8Ux8 <t> x y) -> (CMOVZ (SRL <t> (ZeroExt8to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
(Rsh32x32 x y) -> (SRA x ( CMOVZ <fe.TypeUInt32()> y (MOVWconst [-1]) (SGTUconst [32] y)))
(Rsh32x16 x y) -> (SRA x ( CMOVZ <fe.TypeUInt32()> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
(Rsh32x8 x y) -> (SRA x ( CMOVZ <fe.TypeUInt32()> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
(Rsh32x32 x y) -> (SRA x ( CMOVZ <types.UInt32> y (MOVWconst [-1]) (SGTUconst [32] y)))
(Rsh32x16 x y) -> (SRA x ( CMOVZ <types.UInt32> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
(Rsh32x8 x y) -> (SRA x ( CMOVZ <types.UInt32> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
(Rsh16x32 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <fe.TypeUInt32()> y (MOVWconst [-1]) (SGTUconst [32] y)))
(Rsh16x16 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <fe.TypeUInt32()> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
(Rsh16x8 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <fe.TypeUInt32()> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
(Rsh16x32 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <types.UInt32> y (MOVWconst [-1]) (SGTUconst [32] y)))
(Rsh16x16 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <types.UInt32> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
(Rsh16x8 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <types.UInt32> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
(Rsh8x32 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <fe.TypeUInt32()> y (MOVWconst [-1]) (SGTUconst [32] y)))
(Rsh8x16 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <fe.TypeUInt32()> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
(Rsh8x8 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <fe.TypeUInt32()> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
(Rsh8x32 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <types.UInt32> y (MOVWconst [-1]) (SGTUconst [32] y)))
(Rsh8x16 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <types.UInt32> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
(Rsh8x8 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <types.UInt32> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
// unary ops
(Neg32 x) -> (NEG x)
......@@ -153,7 +153,7 @@
// boolean ops -- booleans are represented with 0=false, 1=true
(AndB x y) -> (AND x y)
(OrB x y) -> (OR x y)
(EqB x y) -> (XORconst [1] (XOR <fe.TypeBool()> x y))
(EqB x y) -> (XORconst [1] (XOR <types.Bool> x y))
(NeqB x y) -> (XOR x y)
(Not x) -> (XORconst [1] x)
......@@ -393,41 +393,41 @@
// AtomicOr8(ptr,val) -> LoweredAtomicOr(ptr&^3,uint32(val) << ((ptr & 3) * 8))
(AtomicOr8 ptr val mem) && !config.BigEndian ->
(LoweredAtomicOr (AND <fe.TypeUInt32().PtrTo()> (MOVWconst [^3]) ptr)
(SLL <fe.TypeUInt32()> (ZeroExt8to32 val)
(SLLconst <fe.TypeUInt32()> [3]
(ANDconst <fe.TypeUInt32()> [3] ptr))) mem)
(LoweredAtomicOr (AND <types.UInt32.PtrTo()> (MOVWconst [^3]) ptr)
(SLL <types.UInt32> (ZeroExt8to32 val)
(SLLconst <types.UInt32> [3]
(ANDconst <types.UInt32> [3] ptr))) mem)
// AtomicAnd8(ptr,val) -> LoweredAtomicAnd(ptr&^3,(uint32(val) << ((ptr & 3) * 8)) | ^(uint32(0xFF) << ((ptr & 3) * 8))))
(AtomicAnd8 ptr val mem) && !config.BigEndian ->
(LoweredAtomicAnd (AND <fe.TypeUInt32().PtrTo()> (MOVWconst [^3]) ptr)
(OR <fe.TypeUInt32()> (SLL <fe.TypeUInt32()> (ZeroExt8to32 val)
(SLLconst <fe.TypeUInt32()> [3]
(ANDconst <fe.TypeUInt32()> [3] ptr)))
(NORconst [0] <fe.TypeUInt32()> (SLL <fe.TypeUInt32()>
(MOVWconst [0xff]) (SLLconst <fe.TypeUInt32()> [3]
(ANDconst <fe.TypeUInt32()> [3]
(XORconst <fe.TypeUInt32()> [3] ptr)))))) mem)
(LoweredAtomicAnd (AND <types.UInt32.PtrTo()> (MOVWconst [^3]) ptr)
(OR <types.UInt32> (SLL <types.UInt32> (ZeroExt8to32 val)
(SLLconst <types.UInt32> [3]
(ANDconst <types.UInt32> [3] ptr)))
(NORconst [0] <types.UInt32> (SLL <types.UInt32>
(MOVWconst [0xff]) (SLLconst <types.UInt32> [3]
(ANDconst <types.UInt32> [3]
(XORconst <types.UInt32> [3] ptr)))))) mem)
// AtomicOr8(ptr,val) -> LoweredAtomicOr(ptr&^3,uint32(val) << (((ptr^3) & 3) * 8))
(AtomicOr8 ptr val mem) && config.BigEndian ->
(LoweredAtomicOr (AND <fe.TypeUInt32().PtrTo()> (MOVWconst [^3]) ptr)
(SLL <fe.TypeUInt32()> (ZeroExt8to32 val)
(SLLconst <fe.TypeUInt32()> [3]
(ANDconst <fe.TypeUInt32()> [3]
(XORconst <fe.TypeUInt32()> [3] ptr)))) mem)
(LoweredAtomicOr (AND <types.UInt32.PtrTo()> (MOVWconst [^3]) ptr)
(SLL <types.UInt32> (ZeroExt8to32 val)
(SLLconst <types.UInt32> [3]
(ANDconst <types.UInt32> [3]
(XORconst <types.UInt32> [3] ptr)))) mem)
// AtomicAnd8(ptr,val) -> LoweredAtomicAnd(ptr&^3,(uint32(val) << (((ptr^3) & 3) * 8)) | ^(uint32(0xFF) << (((ptr^3) & 3) * 8))))
(AtomicAnd8 ptr val mem) && config.BigEndian ->
(LoweredAtomicAnd (AND <fe.TypeUInt32().PtrTo()> (MOVWconst [^3]) ptr)
(OR <fe.TypeUInt32()> (SLL <fe.TypeUInt32()> (ZeroExt8to32 val)
(SLLconst <fe.TypeUInt32()> [3]
(ANDconst <fe.TypeUInt32()> [3]
(XORconst <fe.TypeUInt32()> [3] ptr))))
(NORconst [0] <fe.TypeUInt32()> (SLL <fe.TypeUInt32()>
(MOVWconst [0xff]) (SLLconst <fe.TypeUInt32()> [3]
(ANDconst <fe.TypeUInt32()> [3]
(XORconst <fe.TypeUInt32()> [3] ptr)))))) mem)
(LoweredAtomicAnd (AND <types.UInt32.PtrTo()> (MOVWconst [^3]) ptr)
(OR <types.UInt32> (SLL <types.UInt32> (ZeroExt8to32 val)
(SLLconst <types.UInt32> [3]
(ANDconst <types.UInt32> [3]
(XORconst <types.UInt32> [3] ptr))))
(NORconst [0] <types.UInt32> (SLL <types.UInt32>
(MOVWconst [0xff]) (SLLconst <types.UInt32> [3]
(ANDconst <types.UInt32> [3]
(XORconst <types.UInt32> [3] ptr)))))) mem)
// checks
......
......@@ -437,7 +437,7 @@
(If (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) -> (GTF cmp yes no)
(If (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) -> (GEF cmp yes no)
(If cond yes no) -> (NE (CMPWconst [0] (MOVBZreg <fe.TypeBool()> cond)) yes no)
(If cond yes no) -> (NE (CMPWconst [0] (MOVBZreg <types.Bool> cond)) yes no)
// ***************************
// Above: lowering rules
......
......@@ -13,28 +13,28 @@
(Load <t> ptr mem) && t.IsComplex() && t.Size() == 8 ->
(ComplexMake
(Load <fe.TypeFloat32()> ptr mem)
(Load <fe.TypeFloat32()>
(OffPtr <fe.TypeFloat32().PtrTo()> [4] ptr)
(Load <types.Float32> ptr mem)
(Load <types.Float32>
(OffPtr <types.Float32.PtrTo()> [4] ptr)
mem)
)
(Store {t} dst (ComplexMake real imag) mem) && t.(Type).Size() == 8 ->
(Store {fe.TypeFloat32()}
(OffPtr <fe.TypeFloat32().PtrTo()> [4] dst)
(Store {types.Float32}
(OffPtr <types.Float32.PtrTo()> [4] dst)
imag
(Store {fe.TypeFloat32()} dst real mem))
(Store {types.Float32} dst real mem))
(Load <t> ptr mem) && t.IsComplex() && t.Size() == 16 ->
(ComplexMake
(Load <fe.TypeFloat64()> ptr mem)
(Load <fe.TypeFloat64()>
(OffPtr <fe.TypeFloat64().PtrTo()> [8] ptr)
(Load <types.Float64> ptr mem)
(Load <types.Float64>
(OffPtr <types.Float64.PtrTo()> [8] ptr)
mem)
)
(Store {t} dst (ComplexMake real imag) mem) && t.(Type).Size() == 16 ->
(Store {fe.TypeFloat64()}
(OffPtr <fe.TypeFloat64().PtrTo()> [8] dst)
(Store {types.Float64}
(OffPtr <types.Float64.PtrTo()> [8] dst)
imag
(Store {fe.TypeFloat64()} dst real mem))
(Store {types.Float64} dst real mem))
// string ops
(StringPtr (StringMake ptr _)) -> ptr
......@@ -42,15 +42,15 @@
(Load <t> ptr mem) && t.IsString() ->
(StringMake
(Load <fe.TypeBytePtr()> ptr mem)
(Load <fe.TypeInt()>
(OffPtr <fe.TypeInt().PtrTo()> [config.PtrSize] ptr)
(Load <types.BytePtr> ptr mem)
(Load <types.Int>
(OffPtr <types.Int.PtrTo()> [config.PtrSize] ptr)
mem))
(Store dst (StringMake ptr len) mem) ->
(Store {fe.TypeInt()}
(OffPtr <fe.TypeInt().PtrTo()> [config.PtrSize] dst)
(Store {types.Int}
(OffPtr <types.Int.PtrTo()> [config.PtrSize] dst)
len
(Store {fe.TypeBytePtr()} dst ptr mem))
(Store {types.BytePtr} dst ptr mem))
// slice ops
(SlicePtr (SliceMake ptr _ _ )) -> ptr
......@@ -60,20 +60,20 @@
(Load <t> ptr mem) && t.IsSlice() ->
(SliceMake
(Load <t.ElemType().PtrTo()> ptr mem)
(Load <fe.TypeInt()>
(OffPtr <fe.TypeInt().PtrTo()> [config.PtrSize] ptr)
(Load <types.Int>
(OffPtr <types.Int.PtrTo()> [config.PtrSize] ptr)
mem)
(Load <fe.TypeInt()>
(OffPtr <fe.TypeInt().PtrTo()> [2*config.PtrSize] ptr)
(Load <types.Int>
(OffPtr <types.Int.PtrTo()> [2*config.PtrSize] ptr)
mem))
(Store dst (SliceMake ptr len cap) mem) ->
(Store {fe.TypeInt()}
(OffPtr <fe.TypeInt().PtrTo()> [2*config.PtrSize] dst)
(Store {types.Int}
(OffPtr <types.Int.PtrTo()> [2*config.PtrSize] dst)
cap
(Store {fe.TypeInt()}
(OffPtr <fe.TypeInt().PtrTo()> [config.PtrSize] dst)
(Store {types.Int}
(OffPtr <types.Int.PtrTo()> [config.PtrSize] dst)
len
(Store {fe.TypeBytePtr()} dst ptr mem)))
(Store {types.BytePtr} dst ptr mem)))
// interface ops
(ITab (IMake itab _)) -> itab
......@@ -81,12 +81,12 @@
(Load <t> ptr mem) && t.IsInterface() ->
(IMake
(Load <fe.TypeBytePtr()> ptr mem)
(Load <fe.TypeBytePtr()>
(OffPtr <fe.TypeBytePtr().PtrTo()> [config.PtrSize] ptr)
(Load <types.BytePtr> ptr mem)
(Load <types.BytePtr>
(OffPtr <types.BytePtr.PtrTo()> [config.PtrSize] ptr)
mem))
(Store dst (IMake itab data) mem) ->
(Store {fe.TypeBytePtr()}
(OffPtr <fe.TypeBytePtr().PtrTo()> [config.PtrSize] dst)
(Store {types.BytePtr}
(OffPtr <types.BytePtr.PtrTo()> [config.PtrSize] dst)
data
(Store {fe.TypeUintptr()} dst itab mem))
(Store {types.Uintptr} dst itab mem))
......@@ -204,12 +204,13 @@ func genRules(arch arch) {
}
body := buf.String()
// Do a rough match to predict whether we need b, config, and/or fe.
// Do a rough match to predict whether we need b, config, fe, and/or types.
// It's not precise--thus the blank assignments--but it's good enough
// to avoid generating needless code and doing pointless nil checks.
hasb := strings.Contains(body, "b.")
hasconfig := strings.Contains(body, "config.") || strings.Contains(body, "config)")
hasfe := strings.Contains(body, "fe.")
hasts := strings.Contains(body, "types.")
fmt.Fprintf(w, "func rewriteValue%s_%s(v *Value) bool {\n", arch.name, op)
if hasb || hasconfig || hasfe {
fmt.Fprintln(w, "b := v.Block")
......@@ -223,6 +224,10 @@ func genRules(arch arch) {
fmt.Fprintln(w, "fe := b.Func.fe")
fmt.Fprintln(w, "_ = fe")
}
if hasts {
fmt.Fprintln(w, "types := &b.Func.Config.Types")
fmt.Fprintln(w, "_ = types")
}
fmt.Fprint(w, body)
fmt.Fprintf(w, "}\n")
}
......@@ -234,6 +239,8 @@ func genRules(arch arch) {
fmt.Fprintln(w, "_ = config")
fmt.Fprintln(w, "fe := b.Func.fe")
fmt.Fprintln(w, "_ = fe")
fmt.Fprintln(w, "types := &config.Types")
fmt.Fprintln(w, "_ = types")
fmt.Fprintf(w, "switch b.Kind {\n")
ops = nil
for op := range blockrules {
......@@ -719,7 +726,7 @@ func typeName(typ string) string {
case "Flags", "Mem", "Void", "Int128":
return "Type" + typ
default:
return "fe.Type" + typ + "()"
return "types." + typ
}
}
......
......@@ -197,7 +197,8 @@ func insertLoopReschedChecks(f *Func) {
// if sp < g.limit { goto sched }
// goto header
pt := f.fe.TypeUintptr()
types := &f.Config.Types
pt := types.Uintptr
g := test.NewValue1(bb.Pos, OpGetG, pt, mem0)
sp := test.NewValue0(bb.Pos, OpSP, pt)
cmpOp := OpLess64U
......@@ -206,7 +207,7 @@ func insertLoopReschedChecks(f *Func) {
}
limaddr := test.NewValue1I(bb.Pos, OpOffPtr, pt, 2*pt.Size(), g)
lim := test.NewValue2(bb.Pos, OpLoad, pt, limaddr, mem0)
cmp := test.NewValue2(bb.Pos, cmpOp, f.fe.TypeBool(), sp, lim)
cmp := test.NewValue2(bb.Pos, cmpOp, types.Bool, sp, lim)
test.SetControl(cmp)
// if true, goto sched
......
......@@ -2055,10 +2055,11 @@ func (e *edgeState) erase(loc Location) {
func (e *edgeState) findRegFor(typ Type) Location {
// Which registers are possibilities.
var m regMask
types := &e.s.f.Config.Types
if typ.IsFloat() {
m = e.s.compatRegs(e.s.f.fe.TypeFloat64())
m = e.s.compatRegs(types.Float64)
} else {
m = e.s.compatRegs(e.s.f.fe.TypeInt64())
m = e.s.compatRegs(types.Int64)
}
// Pick a register. In priority order:
......@@ -2082,7 +2083,7 @@ func (e *edgeState) findRegFor(typ Type) Location {
// No register is available. Allocate a temp location to spill a register to.
// The type of the slot is immaterial - it will not be live across
// any safepoint. Just use a type big enough to hold any register.
typ = e.s.f.fe.TypeInt64()
typ = types.Int64
t := LocalSlot{e.s.f.fe.Auto(typ), typ, 0}
// TODO: reuse these slots.
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -17,8 +17,8 @@ func shortcircuit(f *Func) {
// x = phi(a, ...)
//
// We can replace the "a" in the phi with the constant true.
ct := f.ConstBool(f.Entry.Pos, f.fe.TypeBool(), true)
cf := f.ConstBool(f.Entry.Pos, f.fe.TypeBool(), false)
ct := f.ConstBool(f.Entry.Pos, f.Config.Types.Bool, true)
cf := f.ConstBool(f.Entry.Pos, f.Config.Types.Bool, false)
for _, b := range f.Blocks {
for _, v := range b.Values {
if v.Op != OpPhi {
......
......@@ -88,17 +88,17 @@ func writebarrier(f *Func) {
}
}
if sb == nil {
sb = f.Entry.NewValue0(initpos, OpSB, f.fe.TypeUintptr())
sb = f.Entry.NewValue0(initpos, OpSB, f.Config.Types.Uintptr)
}
if sp == nil {
sp = f.Entry.NewValue0(initpos, OpSP, f.fe.TypeUintptr())
sp = f.Entry.NewValue0(initpos, OpSP, f.Config.Types.Uintptr)
}
wbsym := &ExternSymbol{Typ: f.fe.TypeBool(), Sym: f.fe.Syslook("writeBarrier")}
wbaddr = f.Entry.NewValue1A(initpos, OpAddr, f.fe.TypeUInt32().PtrTo(), wbsym, sb)
wbsym := &ExternSymbol{Typ: f.Config.Types.Bool, Sym: f.fe.Syslook("writeBarrier")}
wbaddr = f.Entry.NewValue1A(initpos, OpAddr, f.Config.Types.UInt32.PtrTo(), wbsym, sb)
writebarrierptr = f.fe.Syslook("writebarrierptr")
typedmemmove = f.fe.Syslook("typedmemmove")
typedmemclr = f.fe.Syslook("typedmemclr")
const0 = f.ConstInt32(initpos, f.fe.TypeUInt32(), 0)
const0 = f.ConstInt32(initpos, f.Config.Types.UInt32, 0)
// allocate auxiliary data structures for computing store order
sset = f.newSparseSet(f.NumValues())
......@@ -155,8 +155,9 @@ func writebarrier(f *Func) {
// set up control flow for write barrier test
// load word, test word, avoiding partial register write from load byte.
flag := b.NewValue2(pos, OpLoad, f.fe.TypeUInt32(), wbaddr, mem)
flag = b.NewValue2(pos, OpNeq32, f.fe.TypeBool(), flag, const0)
types := &f.Config.Types
flag := b.NewValue2(pos, OpLoad, types.UInt32, wbaddr, mem)
flag = b.NewValue2(pos, OpNeq32, types.Bool, flag, const0)
b.Kind = BlockIf
b.SetControl(flag)
b.Likely = BranchUnlikely
......@@ -175,7 +176,7 @@ func writebarrier(f *Func) {
ptr := w.Args[0]
var typ interface{}
if w.Op != OpStoreWB {
typ = &ExternSymbol{Typ: f.fe.TypeUintptr(), Sym: w.Aux.(Type).Symbol()}
typ = &ExternSymbol{Typ: types.Uintptr, Sym: w.Aux.(Type).Symbol()}
}
pos = w.Pos
......@@ -280,7 +281,7 @@ func wbcall(pos src.XPos, b *Block, fn *obj.LSym, typ interface{}, ptr, val, mem
off := config.ctxt.FixedFrameSize()
if typ != nil { // for typedmemmove
taddr := b.NewValue1A(pos, OpAddr, b.Func.fe.TypeUintptr(), typ, sb)
taddr := b.NewValue1A(pos, OpAddr, b.Func.Config.Types.Uintptr, typ, sb)
off = round(off, taddr.Type.Alignment())
arg := b.NewValue1I(pos, OpOffPtr, taddr.Type.PtrTo(), off, sp)
mem = b.NewValue3A(pos, OpStore, TypeMem, ptr.Type, arg, taddr, mem)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment