Commit 07b4b4a1 authored by Josh Bleecher Snyder's avatar Josh Bleecher Snyder

cmd/compile: add scale field to SSA Ops

Refactoring only.

This makes it easier to add ops
that do indexed memory loads/stores.

Passes toolstash-check.

Change-Id: I82df0d4154718577ec42106fa1bc76571bf65096
Reviewed-on: https://go-review.googlesource.com/c/go/+/166425
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarKeith Randall <khr@golang.org>
parent b4fbd291
......@@ -117,6 +117,21 @@ func opregreg(s *gc.SSAGenState, op obj.As, dest, src int16) *obj.Prog {
return p
}
// memIdx fills out a as an indexed memory reference for v.
// It assumes that the base register and the index register
// are v.Args[0].Reg() and v.Args[1].Reg(), respectively.
// The caller must still use gc.AddAux/gc.AddAux2 to handle v.Aux as necessary.
func memIdx(a *obj.Addr, v *ssa.Value) {
r, i := v.Args[0].Reg(), v.Args[1].Reg()
a.Type = obj.TYPE_MEM
a.Scale = v.Op.Scale()
if a.Scale == 1 && i == x86.REG_SP {
r, i = i, r
}
a.Reg = r
a.Index = i
}
// DUFFZERO consists of repeated blocks of 4 MOVUPSs + LEAQ,
// See runtime/mkduff.go.
func duffStart(size int64) int64 {
......@@ -571,26 +586,9 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpAMD64LEAQ1, ssa.OpAMD64LEAQ2, ssa.OpAMD64LEAQ4, ssa.OpAMD64LEAQ8,
ssa.OpAMD64LEAL1, ssa.OpAMD64LEAL2, ssa.OpAMD64LEAL4, ssa.OpAMD64LEAL8,
ssa.OpAMD64LEAW1, ssa.OpAMD64LEAW2, ssa.OpAMD64LEAW4, ssa.OpAMD64LEAW8:
o := v.Reg()
r := v.Args[0].Reg()
i := v.Args[1].Reg()
p := s.Prog(v.Op.Asm())
switch v.Op {
case ssa.OpAMD64LEAQ1, ssa.OpAMD64LEAL1, ssa.OpAMD64LEAW1:
p.From.Scale = 1
if i == x86.REG_SP {
r, i = i, r
}
case ssa.OpAMD64LEAQ2, ssa.OpAMD64LEAL2, ssa.OpAMD64LEAW2:
p.From.Scale = 2
case ssa.OpAMD64LEAQ4, ssa.OpAMD64LEAL4, ssa.OpAMD64LEAW4:
p.From.Scale = 4
case ssa.OpAMD64LEAQ8, ssa.OpAMD64LEAL8, ssa.OpAMD64LEAW8:
p.From.Scale = 8
}
p.From.Type = obj.TYPE_MEM
p.From.Reg = r
p.From.Index = i
memIdx(&p.From, v)
o := v.Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = o
if v.AuxInt != 0 && v.Aux == nil {
......@@ -702,25 +700,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Reg()
case ssa.OpAMD64MOVBloadidx1, ssa.OpAMD64MOVWloadidx1, ssa.OpAMD64MOVLloadidx1, ssa.OpAMD64MOVQloadidx1, ssa.OpAMD64MOVSSloadidx1, ssa.OpAMD64MOVSDloadidx1,
ssa.OpAMD64MOVQloadidx8, ssa.OpAMD64MOVSDloadidx8, ssa.OpAMD64MOVLloadidx8, ssa.OpAMD64MOVLloadidx4, ssa.OpAMD64MOVSSloadidx4, ssa.OpAMD64MOVWloadidx2:
r := v.Args[0].Reg()
i := v.Args[1].Reg()
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
switch v.Op {
case ssa.OpAMD64MOVBloadidx1, ssa.OpAMD64MOVWloadidx1, ssa.OpAMD64MOVLloadidx1, ssa.OpAMD64MOVQloadidx1, ssa.OpAMD64MOVSSloadidx1, ssa.OpAMD64MOVSDloadidx1:
if i == x86.REG_SP {
r, i = i, r
}
p.From.Scale = 1
case ssa.OpAMD64MOVQloadidx8, ssa.OpAMD64MOVSDloadidx8, ssa.OpAMD64MOVLloadidx8:
p.From.Scale = 8
case ssa.OpAMD64MOVLloadidx4, ssa.OpAMD64MOVSSloadidx4:
p.From.Scale = 4
case ssa.OpAMD64MOVWloadidx2:
p.From.Scale = 2
}
p.From.Reg = r
p.From.Index = i
memIdx(&p.From, v)
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
......@@ -736,27 +717,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
gc.AddAux(&p.To, v)
case ssa.OpAMD64MOVBstoreidx1, ssa.OpAMD64MOVWstoreidx1, ssa.OpAMD64MOVLstoreidx1, ssa.OpAMD64MOVQstoreidx1, ssa.OpAMD64MOVSSstoreidx1, ssa.OpAMD64MOVSDstoreidx1,
ssa.OpAMD64MOVQstoreidx8, ssa.OpAMD64MOVSDstoreidx8, ssa.OpAMD64MOVLstoreidx8, ssa.OpAMD64MOVSSstoreidx4, ssa.OpAMD64MOVLstoreidx4, ssa.OpAMD64MOVWstoreidx2:
r := v.Args[0].Reg()
i := v.Args[1].Reg()
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM
switch v.Op {
case ssa.OpAMD64MOVBstoreidx1, ssa.OpAMD64MOVWstoreidx1, ssa.OpAMD64MOVLstoreidx1, ssa.OpAMD64MOVQstoreidx1, ssa.OpAMD64MOVSSstoreidx1, ssa.OpAMD64MOVSDstoreidx1:
if i == x86.REG_SP {
r, i = i, r
}
p.To.Scale = 1
case ssa.OpAMD64MOVQstoreidx8, ssa.OpAMD64MOVSDstoreidx8, ssa.OpAMD64MOVLstoreidx8:
p.To.Scale = 8
case ssa.OpAMD64MOVSSstoreidx4, ssa.OpAMD64MOVLstoreidx4:
p.To.Scale = 4
case ssa.OpAMD64MOVWstoreidx2:
p.To.Scale = 2
}
p.To.Reg = r
p.To.Index = i
memIdx(&p.To, v)
gc.AddAux(&p.To, v)
case ssa.OpAMD64ADDQconstmodify, ssa.OpAMD64ADDLconstmodify:
sc := v.AuxValAndOff()
......@@ -809,24 +773,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Type = obj.TYPE_CONST
sc := v.AuxValAndOff()
p.From.Offset = sc.Val()
r := v.Args[0].Reg()
i := v.Args[1].Reg()
switch v.Op {
case ssa.OpAMD64MOVBstoreconstidx1, ssa.OpAMD64MOVWstoreconstidx1, ssa.OpAMD64MOVLstoreconstidx1, ssa.OpAMD64MOVQstoreconstidx1:
p.To.Scale = 1
if i == x86.REG_SP {
r, i = i, r
}
case ssa.OpAMD64MOVWstoreconstidx2:
p.To.Scale = 2
case ssa.OpAMD64MOVLstoreconstidx4:
p.To.Scale = 4
case ssa.OpAMD64MOVQstoreconstidx8:
p.To.Scale = 8
}
p.To.Type = obj.TYPE_MEM
p.To.Reg = r
p.To.Index = i
memIdx(&p.To, v)
gc.AddAux2(&p.To, v, sc.Off())
case ssa.OpAMD64MOVLQSX, ssa.OpAMD64MOVWQSX, ssa.OpAMD64MOVBQSX, ssa.OpAMD64MOVLQZX, ssa.OpAMD64MOVWQZX, ssa.OpAMD64MOVBQZX,
ssa.OpAMD64CVTTSS2SL, ssa.OpAMD64CVTTSD2SL, ssa.OpAMD64CVTTSS2SQ, ssa.OpAMD64CVTTSD2SQ,
......
......@@ -56,6 +56,7 @@ type opData struct {
hasSideEffects bool // for "reasons", not to be eliminated. E.g., atomic store, #19182.
zeroWidth bool // op never translates into any machine code. example: copy, which may sometimes translate to machine code, is not zero-width.
symEffect string // effect this op has on symbol in aux
scale uint8 // amd64/386 indexed load scale
}
type blockData struct {
......@@ -245,6 +246,9 @@ func genOp() {
if v.asm != "" {
fmt.Fprintf(w, "asm: %s.A%s,\n", pkg, v.asm)
}
if v.scale != 0 {
fmt.Fprintf(w, "scale: %d,\n", v.scale)
}
fmt.Fprintln(w, "reg:regInfo{")
// Compute input allocation order. We allocate from the
......@@ -291,6 +295,7 @@ func genOp() {
fmt.Fprintln(w, "}")
fmt.Fprintln(w, "func (o Op) Asm() obj.As {return opcodeTable[o].asm}")
fmt.Fprintln(w, "func (o Op) Scale() int16 {return int16(opcodeTable[o].scale)}")
// generate op string method
fmt.Fprintln(w, "func (o Op) String() string {return opcodeTable[o].name }")
......
......@@ -37,6 +37,7 @@ type opInfo struct {
hasSideEffects bool // for "reasons", not to be eliminated. E.g., atomic store, #19182.
zeroWidth bool // op never translates into any machine code. example: copy, which may sometimes translate to machine code, is not zero-width.
symEffect SymEffect // effect this op has on symbol in aux
scale uint8 // amd64/386 indexed load scale
}
type inputInfo struct {
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment