Commit 6a1153ac authored by Josh Bleecher Snyder's avatar Josh Bleecher Snyder

[dev.ssa] cmd/compile: refactor out rulegen value parsing

Previously, genMatch0 and genResult0 contained
lots of duplication: locating the op, parsing
the value, validation, etc.
Parsing and validation was mixed in with code gen.

Extract a helper, parseValue. It is responsible
for parsing the value, locating the op, and doing
shared validation.

As a bonus (and possibly as my original motivation),
make op selection pay attention to the number
of args present.
This allows arch-specific ops to share a name
with generic ops as long as there is no ambiguity.
It also detects and reports unresolved ambiguity,
unlike before, where it would simply always
pick the generic op, with no warning.

Also use parseValue when generating the top-level
op dispatch, to ensure its opinion about ops
matches genMatch0 and genResult0.

The order of statements in the generated code used
to depend on the exact rule. It is now somewhat
independent of the rule. That is the source
of some of the generated code changes in this CL.
See rewritedec64 and rewritegeneric for examples.
It is a one-time change.

The op dispatch switch and functions used to be
sorted by opname without architecture. The sort
now includes the architecture, leading to further
generated code changes.
See rewriteARM and rewriteAMD64 for examples.
Again, it is a one-time change.

There are no functional changes.

Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c
Reviewed-on: https://go-review.googlesource.com/24649
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarDavid Chase <drchase@google.com>
Reviewed-by: default avatarKeith Randall <khr@golang.org>
parent dede2061
...@@ -117,15 +117,17 @@ func genRules(arch arch) { ...@@ -117,15 +117,17 @@ func genRules(arch arch) {
if unbalanced(rule) { if unbalanced(rule) {
continue continue
} }
op := strings.Split(rule, " ")[0][1:]
if op[len(op)-1] == ')' {
op = op[:len(op)-1] // rule has only opcode, e.g. (ConstNil) -> ...
}
loc := fmt.Sprintf("%s.rules:%d", arch.name, ruleLineno) loc := fmt.Sprintf("%s.rules:%d", arch.name, ruleLineno)
if isBlock(op, arch) { r := Rule{rule: rule, loc: loc}
blockrules[op] = append(blockrules[op], Rule{rule: rule, loc: loc}) if rawop := strings.Split(rule, " ")[0][1:]; isBlock(rawop, arch) {
blockrules[rawop] = append(blockrules[rawop], r)
} else { } else {
oprules[op] = append(oprules[op], Rule{rule: rule, loc: loc}) // Do fancier value op matching.
match, _, _ := r.parse()
op, oparch, _, _, _, _ := parseValue(match, arch, loc)
opname := fmt.Sprintf("Op%s%s", oparch, op.name)
oprules[opname] = append(oprules[opname], r)
} }
rule = "" rule = ""
ruleLineno = 0 ruleLineno = 0
...@@ -157,8 +159,8 @@ func genRules(arch arch) { ...@@ -157,8 +159,8 @@ func genRules(arch arch) {
fmt.Fprintf(w, "func rewriteValue%s(v *Value, config *Config) bool {\n", arch.name) fmt.Fprintf(w, "func rewriteValue%s(v *Value, config *Config) bool {\n", arch.name)
fmt.Fprintf(w, "switch v.Op {\n") fmt.Fprintf(w, "switch v.Op {\n")
for _, op := range ops { for _, op := range ops {
fmt.Fprintf(w, "case %s:\n", opName(op, arch)) fmt.Fprintf(w, "case %s:\n", op)
fmt.Fprintf(w, "return rewriteValue%s_%s(v, config)\n", arch.name, opName(op, arch)) fmt.Fprintf(w, "return rewriteValue%s_%s(v, config)\n", arch.name, op)
} }
fmt.Fprintf(w, "}\n") fmt.Fprintf(w, "}\n")
fmt.Fprintf(w, "return false\n") fmt.Fprintf(w, "return false\n")
...@@ -167,7 +169,7 @@ func genRules(arch arch) { ...@@ -167,7 +169,7 @@ func genRules(arch arch) {
// Generate a routine per op. Note that we don't make one giant routine // Generate a routine per op. Note that we don't make one giant routine
// because it is too big for some compilers. // because it is too big for some compilers.
for _, op := range ops { for _, op := range ops {
fmt.Fprintf(w, "func rewriteValue%s_%s(v *Value, config *Config) bool {\n", arch.name, opName(op, arch)) fmt.Fprintf(w, "func rewriteValue%s_%s(v *Value, config *Config) bool {\n", arch.name, op)
fmt.Fprintln(w, "b := v.Block") fmt.Fprintln(w, "b := v.Block")
fmt.Fprintln(w, "_ = b") fmt.Fprintln(w, "_ = b")
var canFail bool var canFail bool
...@@ -334,141 +336,108 @@ func genMatch0(w io.Writer, arch arch, match, v string, m map[string]struct{}, t ...@@ -334,141 +336,108 @@ func genMatch0(w io.Writer, arch arch, match, v string, m map[string]struct{}, t
} }
canFail := false canFail := false
// split body up into regions. Split by spaces/tabs, except those op, oparch, typ, auxint, aux, args := parseValue(match, arch, loc)
// contained in () or {}.
s := split(match[1 : len(match)-1]) // remove parens, then split
// Find op record
var op opData
for _, x := range genericOps {
if x.name == s[0] {
op = x
break
}
}
for _, x := range arch.ops {
if x.name == s[0] {
op = x
break
}
}
if op.name == "" {
log.Fatalf("%s: unknown op %s", loc, s[0])
}
// check op // check op
if !top { if !top {
fmt.Fprintf(w, "if %s.Op != %s {\nbreak\n}\n", v, opName(s[0], arch)) fmt.Fprintf(w, "if %s.Op != Op%s%s {\nbreak\n}\n", v, oparch, op.name)
canFail = true canFail = true
} }
// check type/aux/args if typ != "" {
argnum := 0 if !isVariable(typ) {
for _, a := range s[1:] {
if a[0] == '<' {
// type restriction
t := a[1 : len(a)-1] // remove <>
if !isVariable(t) {
// code. We must match the results of this code. // code. We must match the results of this code.
fmt.Fprintf(w, "if %s.Type != %s {\nbreak\n}\n", v, t) fmt.Fprintf(w, "if %s.Type != %s {\nbreak\n}\n", v, typ)
canFail = true canFail = true
} else { } else {
// variable // variable
if _, ok := m[t]; ok { if _, ok := m[typ]; ok {
// must match previous variable // must match previous variable
fmt.Fprintf(w, "if %s.Type != %s {\nbreak\n}\n", v, t) fmt.Fprintf(w, "if %s.Type != %s {\nbreak\n}\n", v, typ)
canFail = true canFail = true
} else { } else {
m[t] = struct{}{} m[typ] = struct{}{}
fmt.Fprintf(w, "%s := %s.Type\n", t, v) fmt.Fprintf(w, "%s := %s.Type\n", typ, v)
} }
} }
} else if a[0] == '[' {
// auxint restriction
switch op.aux {
case "Bool", "Int8", "Int16", "Int32", "Int64", "Int128", "Float32", "Float64", "SymOff", "SymValAndOff", "SymInt32":
default:
log.Fatalf("%s: op %s %s can't have auxint", loc, op.name, op.aux)
} }
x := a[1 : len(a)-1] // remove []
if !isVariable(x) { if auxint != "" {
if !isVariable(auxint) {
// code // code
fmt.Fprintf(w, "if %s.AuxInt != %s {\nbreak\n}\n", v, x) fmt.Fprintf(w, "if %s.AuxInt != %s {\nbreak\n}\n", v, auxint)
canFail = true canFail = true
} else { } else {
// variable // variable
if _, ok := m[x]; ok { if _, ok := m[auxint]; ok {
fmt.Fprintf(w, "if %s.AuxInt != %s {\nbreak\n}\n", v, x) fmt.Fprintf(w, "if %s.AuxInt != %s {\nbreak\n}\n", v, auxint)
canFail = true canFail = true
} else { } else {
m[x] = struct{}{} m[auxint] = struct{}{}
fmt.Fprintf(w, "%s := %s.AuxInt\n", x, v) fmt.Fprintf(w, "%s := %s.AuxInt\n", auxint, v)
} }
} }
} else if a[0] == '{' {
// aux restriction
switch op.aux {
case "String", "Sym", "SymOff", "SymValAndOff", "SymInt32":
default:
log.Fatalf("%s: op %s %s can't have aux", loc, op.name, op.aux)
} }
x := a[1 : len(a)-1] // remove {}
if !isVariable(x) { if aux != "" {
if !isVariable(aux) {
// code // code
fmt.Fprintf(w, "if %s.Aux != %s {\nbreak\n}\n", v, x) fmt.Fprintf(w, "if %s.Aux != %s {\nbreak\n}\n", v, aux)
canFail = true canFail = true
} else { } else {
// variable // variable
if _, ok := m[x]; ok { if _, ok := m[aux]; ok {
fmt.Fprintf(w, "if %s.Aux != %s {\nbreak\n}\n", v, x) fmt.Fprintf(w, "if %s.Aux != %s {\nbreak\n}\n", v, aux)
canFail = true canFail = true
} else { } else {
m[x] = struct{}{} m[aux] = struct{}{}
fmt.Fprintf(w, "%s := %s.Aux\n", x, v) fmt.Fprintf(w, "%s := %s.Aux\n", aux, v)
} }
} }
} else if a == "_" { }
argnum++
} else if !strings.Contains(a, "(") { for i, arg := range args {
if arg == "_" {
continue
}
if !strings.Contains(arg, "(") {
// leaf variable // leaf variable
if _, ok := m[a]; ok { if _, ok := m[arg]; ok {
// variable already has a definition. Check whether // variable already has a definition. Check whether
// the old definition and the new definition match. // the old definition and the new definition match.
// For example, (add x x). Equality is just pointer equality // For example, (add x x). Equality is just pointer equality
// on Values (so cse is important to do before lowering). // on Values (so cse is important to do before lowering).
fmt.Fprintf(w, "if %s != %s.Args[%d] {\nbreak\n}\n", a, v, argnum) fmt.Fprintf(w, "if %s != %s.Args[%d] {\nbreak\n}\n", arg, v, i)
canFail = true canFail = true
} else { } else {
// remember that this variable references the given value // remember that this variable references the given value
m[a] = struct{}{} m[arg] = struct{}{}
fmt.Fprintf(w, "%s := %s.Args[%d]\n", a, v, argnum) fmt.Fprintf(w, "%s := %s.Args[%d]\n", arg, v, i)
}
continue
} }
argnum++
} else {
// compound sexpr // compound sexpr
var argname string var argname string
colon := strings.Index(a, ":") colon := strings.Index(arg, ":")
openparen := strings.Index(a, "(") openparen := strings.Index(arg, "(")
if colon >= 0 && openparen >= 0 && colon < openparen { if colon >= 0 && openparen >= 0 && colon < openparen {
// rule-specified name // rule-specified name
argname = a[:colon] argname = arg[:colon]
a = a[colon+1:] arg = arg[colon+1:]
} else { } else {
// autogenerated name // autogenerated name
argname = fmt.Sprintf("%s_%d", v, argnum) argname = fmt.Sprintf("%s_%d", v, i)
} }
fmt.Fprintf(w, "%s := %s.Args[%d]\n", argname, v, argnum) fmt.Fprintf(w, "%s := %s.Args[%d]\n", argname, v, i)
if genMatch0(w, arch, a, argname, m, false, loc) { if genMatch0(w, arch, arg, argname, m, false, loc) {
canFail = true canFail = true
} }
argnum++
}
} }
if op.argLength == -1 { if op.argLength == -1 {
fmt.Fprintf(w, "if len(%s.Args) != %d {\nbreak\n}\n", v, argnum) fmt.Fprintf(w, "if len(%s.Args) != %d {\nbreak\n}\n", v, len(args))
canFail = true canFail = true
} else if int(op.argLength) != argnum {
log.Fatalf("%s: op %s should have %d args, has %d", loc, op.name, op.argLength, argnum)
} }
return canFail return canFail
} }
...@@ -500,105 +469,44 @@ func genResult0(w io.Writer, arch arch, result string, alloc *int, top, move boo ...@@ -500,105 +469,44 @@ func genResult0(w io.Writer, arch arch, result string, alloc *int, top, move boo
return result return result
} }
s := split(result[1 : len(result)-1]) // remove parens, then split op, oparch, typ, auxint, aux, args := parseValue(result, arch, loc)
// Find op record
var op opData
for _, x := range genericOps {
if x.name == s[0] {
op = x
break
}
}
for _, x := range arch.ops {
if x.name == s[0] {
op = x
break
}
}
if op.name == "" {
log.Fatalf("%s: unknown op %s", loc, s[0])
}
// Find the type of the variable. // Find the type of the variable.
var opType string typeOverride := typ != ""
var typeOverride bool if typ == "" && op.typ != "" {
for _, a := range s[1:] { typ = typeName(op.typ)
if a[0] == '<' {
// type restriction
opType = a[1 : len(a)-1] // remove <>
typeOverride = true
break
}
}
if opType == "" {
// find default type, if any
for _, op := range arch.ops {
if op.name == s[0] && op.typ != "" {
opType = typeName(op.typ)
break
}
}
}
if opType == "" {
for _, op := range genericOps {
if op.name == s[0] && op.typ != "" {
opType = typeName(op.typ)
break
}
}
} }
var v string var v string
if top && !move { if top && !move {
v = "v" v = "v"
fmt.Fprintf(w, "v.reset(%s)\n", opName(s[0], arch)) fmt.Fprintf(w, "v.reset(Op%s%s)\n", oparch, op.name)
if typeOverride { if typeOverride {
fmt.Fprintf(w, "v.Type = %s\n", opType) fmt.Fprintf(w, "v.Type = %s\n", typ)
} }
} else { } else {
if opType == "" { if typ == "" {
log.Fatalf("sub-expression %s (op=%s) must have a type", result, s[0]) log.Fatalf("sub-expression %s (op=Op%s%s) must have a type", result, oparch, op.name)
} }
v = fmt.Sprintf("v%d", *alloc) v = fmt.Sprintf("v%d", *alloc)
*alloc++ *alloc++
fmt.Fprintf(w, "%s := b.NewValue0(v.Line, %s, %s)\n", v, opName(s[0], arch), opType) fmt.Fprintf(w, "%s := b.NewValue0(v.Line, Op%s%s, %s)\n", v, oparch, op.name, typ)
if move && top { if move && top {
// Rewrite original into a copy // Rewrite original into a copy
fmt.Fprintf(w, "v.reset(OpCopy)\n") fmt.Fprintf(w, "v.reset(OpCopy)\n")
fmt.Fprintf(w, "v.AddArg(%s)\n", v) fmt.Fprintf(w, "v.AddArg(%s)\n", v)
} }
} }
argnum := 0
for _, a := range s[1:] { if auxint != "" {
if a[0] == '<' { fmt.Fprintf(w, "%s.AuxInt = %s\n", v, auxint)
// type restriction, handled above
} else if a[0] == '[' {
// auxint restriction
switch op.aux {
case "Bool", "Int8", "Int16", "Int32", "Int64", "Int128", "Float32", "Float64", "SymOff", "SymValAndOff", "SymInt32":
default:
log.Fatalf("%s: op %s %s can't have auxint", loc, op.name, op.aux)
} }
x := a[1 : len(a)-1] // remove [] if aux != "" {
fmt.Fprintf(w, "%s.AuxInt = %s\n", v, x) fmt.Fprintf(w, "%s.Aux = %s\n", v, aux)
} else if a[0] == '{' {
// aux restriction
switch op.aux {
case "String", "Sym", "SymOff", "SymValAndOff", "SymInt32":
default:
log.Fatalf("%s: op %s %s can't have aux", loc, op.name, op.aux)
} }
x := a[1 : len(a)-1] // remove {} for _, arg := range args {
fmt.Fprintf(w, "%s.Aux = %s\n", v, x) x := genResult0(w, arch, arg, alloc, false, move, loc)
} else {
// regular argument (sexpr or variable)
x := genResult0(w, arch, a, alloc, false, move, loc)
fmt.Fprintf(w, "%s.AddArg(%s)\n", v, x) fmt.Fprintf(w, "%s.AddArg(%s)\n", v, x)
argnum++
}
}
if op.argLength != -1 && int(op.argLength) != argnum {
log.Fatalf("%s: op %s should have %d args, has %d", loc, op.name, op.argLength, argnum)
} }
return v return v
...@@ -666,16 +574,102 @@ func isBlock(name string, arch arch) bool { ...@@ -666,16 +574,102 @@ func isBlock(name string, arch arch) bool {
return false return false
} }
// opName converts from an op name specified in a rule file to an Op enum. // parseValue parses a parenthesized value from a rule.
// if the name matches a generic op, returns "Op" plus the specified name. // The value can be from the match or the result side.
// Otherwise, returns "Op" plus arch name plus op name. // It returns the op and unparsed strings for typ, auxint, and aux restrictions and for all args.
func opName(name string, arch arch) string { // oparch is the architecture that op is located in, or "" for generic.
for _, op := range genericOps { func parseValue(val string, arch arch, loc string) (op opData, oparch string, typ string, auxint string, aux string, args []string) {
if op.name == name { val = val[1 : len(val)-1] // remove ()
return "Op" + name
// Split val up into regions.
// Split by spaces/tabs, except those contained in (), {}, [], or <>.
s := split(val)
// Extract restrictions and args.
for _, a := range s[1:] {
switch a[0] {
case '<':
typ = a[1 : len(a)-1] // remove <>
case '[':
auxint = a[1 : len(a)-1] // remove []
case '{':
aux = a[1 : len(a)-1] // remove {}
default:
args = append(args, a)
}
}
// Resolve the op.
// match reports whether x is a good op to select.
// If strict is true, rule generation might succeed.
// If strict is false, rule generation has failed,
// but we're trying to generate a useful error.
// Doing strict=true then strict=false allows
// precise op matching while retaining good error messages.
match := func(x opData, strict bool, archname string) bool {
if x.name != s[0] {
return false
} }
if x.argLength != -1 && int(x.argLength) != len(args) {
if strict {
return false
} else {
log.Printf("%s: op %s (%s) should have %d args, has %d", loc, s[0], archname, op.argLength, len(args))
} }
return "Op" + arch.name + name }
return true
}
for _, x := range genericOps {
if match(x, true, "generic") {
op = x
break
}
}
if arch.name != "generic" {
for _, x := range arch.ops {
if match(x, true, arch.name) {
if op.name != "" {
log.Fatalf("%s: matches for op %s found in both generic and %s", loc, op.name, arch.name)
}
op = x
oparch = arch.name
break
}
}
}
if op.name == "" {
// Failed to find the op.
// Run through everything again with strict=false
// to generate useful diagnosic messages before failing.
for _, x := range genericOps {
match(x, false, "generic")
}
for _, x := range arch.ops {
match(x, false, arch.name)
}
log.Fatalf("%s: unknown op %s", loc, s)
}
// Sanity check aux, auxint.
if auxint != "" {
switch op.aux {
case "Bool", "Int8", "Int16", "Int32", "Int64", "Int128", "Float32", "Float64", "SymOff", "SymValAndOff", "SymInt32":
default:
log.Fatalf("%s: op %s %s can't have auxint", loc, op.name, op.aux)
}
}
if aux != "" {
switch op.aux {
case "String", "Sym", "SymOff", "SymValAndOff", "SymInt32":
default:
log.Fatalf("%s: op %s %s can't have aux", loc, op.name, op.aux)
}
}
return
} }
func blockName(name string, arch arch) string { func blockName(name string, arch arch) string {
......
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
...@@ -8,8 +8,6 @@ import "math" ...@@ -8,8 +8,6 @@ import "math"
var _ = math.MinInt8 // in case not otherwise used var _ = math.MinInt8 // in case not otherwise used
func rewriteValuePPC64(v *Value, config *Config) bool { func rewriteValuePPC64(v *Value, config *Config) bool {
switch v.Op { switch v.Op {
case OpPPC64ADD:
return rewriteValuePPC64_OpPPC64ADD(v, config)
case OpAdd16: case OpAdd16:
return rewriteValuePPC64_OpAdd16(v, config) return rewriteValuePPC64_OpAdd16(v, config)
case OpAdd32: case OpAdd32:
...@@ -154,22 +152,6 @@ func rewriteValuePPC64(v *Value, config *Config) bool { ...@@ -154,22 +152,6 @@ func rewriteValuePPC64(v *Value, config *Config) bool {
return rewriteValuePPC64_OpLess8U(v, config) return rewriteValuePPC64_OpLess8U(v, config)
case OpLoad: case OpLoad:
return rewriteValuePPC64_OpLoad(v, config) return rewriteValuePPC64_OpLoad(v, config)
case OpPPC64MOVBstore:
return rewriteValuePPC64_OpPPC64MOVBstore(v, config)
case OpPPC64MOVBstorezero:
return rewriteValuePPC64_OpPPC64MOVBstorezero(v, config)
case OpPPC64MOVDstore:
return rewriteValuePPC64_OpPPC64MOVDstore(v, config)
case OpPPC64MOVDstorezero:
return rewriteValuePPC64_OpPPC64MOVDstorezero(v, config)
case OpPPC64MOVHstore:
return rewriteValuePPC64_OpPPC64MOVHstore(v, config)
case OpPPC64MOVHstorezero:
return rewriteValuePPC64_OpPPC64MOVHstorezero(v, config)
case OpPPC64MOVWstore:
return rewriteValuePPC64_OpPPC64MOVWstore(v, config)
case OpPPC64MOVWstorezero:
return rewriteValuePPC64_OpPPC64MOVWstorezero(v, config)
case OpMove: case OpMove:
return rewriteValuePPC64_OpMove(v, config) return rewriteValuePPC64_OpMove(v, config)
case OpMul16: case OpMul16:
...@@ -216,6 +198,24 @@ func rewriteValuePPC64(v *Value, config *Config) bool { ...@@ -216,6 +198,24 @@ func rewriteValuePPC64(v *Value, config *Config) bool {
return rewriteValuePPC64_OpOr64(v, config) return rewriteValuePPC64_OpOr64(v, config)
case OpOr8: case OpOr8:
return rewriteValuePPC64_OpOr8(v, config) return rewriteValuePPC64_OpOr8(v, config)
case OpPPC64ADD:
return rewriteValuePPC64_OpPPC64ADD(v, config)
case OpPPC64MOVBstore:
return rewriteValuePPC64_OpPPC64MOVBstore(v, config)
case OpPPC64MOVBstorezero:
return rewriteValuePPC64_OpPPC64MOVBstorezero(v, config)
case OpPPC64MOVDstore:
return rewriteValuePPC64_OpPPC64MOVDstore(v, config)
case OpPPC64MOVDstorezero:
return rewriteValuePPC64_OpPPC64MOVDstorezero(v, config)
case OpPPC64MOVHstore:
return rewriteValuePPC64_OpPPC64MOVHstore(v, config)
case OpPPC64MOVHstorezero:
return rewriteValuePPC64_OpPPC64MOVHstorezero(v, config)
case OpPPC64MOVWstore:
return rewriteValuePPC64_OpPPC64MOVWstore(v, config)
case OpPPC64MOVWstorezero:
return rewriteValuePPC64_OpPPC64MOVWstorezero(v, config)
case OpSignExt16to32: case OpSignExt16to32:
return rewriteValuePPC64_OpSignExt16to32(v, config) return rewriteValuePPC64_OpSignExt16to32(v, config)
case OpSignExt16to64: case OpSignExt16to64:
...@@ -283,41 +283,6 @@ func rewriteValuePPC64(v *Value, config *Config) bool { ...@@ -283,41 +283,6 @@ func rewriteValuePPC64(v *Value, config *Config) bool {
} }
return false return false
} }
func rewriteValuePPC64_OpPPC64ADD(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (ADD (MOVDconst [c]) x)
// cond:
// result: (ADDconst [c] x)
for {
v_0 := v.Args[0]
if v_0.Op != OpPPC64MOVDconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
v.reset(OpPPC64ADDconst)
v.AuxInt = c
v.AddArg(x)
return true
}
// match: (ADD x (MOVDconst [c]))
// cond:
// result: (ADDconst [c] x)
for {
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpPPC64MOVDconst {
break
}
c := v_1.AuxInt
v.reset(OpPPC64ADDconst)
v.AuxInt = c
v.AddArg(x)
return true
}
return false
}
func rewriteValuePPC64_OpAdd16(v *Value, config *Config) bool { func rewriteValuePPC64_OpAdd16(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
...@@ -1691,439 +1656,115 @@ func rewriteValuePPC64_OpLoad(v *Value, config *Config) bool { ...@@ -1691,439 +1656,115 @@ func rewriteValuePPC64_OpLoad(v *Value, config *Config) bool {
} }
return false return false
} }
func rewriteValuePPC64_OpPPC64MOVBstore(v *Value, config *Config) bool { func rewriteValuePPC64_OpMove(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (MOVBstore [off1] {sym} (ADDconst [off2] x) val mem) // match: (Move [s] _ _ mem)
// cond: is16Bit(off1+off2) // cond: SizeAndAlign(s).Size() == 0
// result: (MOVBstore [off1+off2] {sym} x val mem) // result: mem
for { for {
off1 := v.AuxInt s := v.AuxInt
sym := v.Aux
v_0 := v.Args[0]
if v_0.Op != OpPPC64ADDconst {
break
}
off2 := v_0.AuxInt
x := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2] mem := v.Args[2]
if !(is16Bit(off1 + off2)) { if !(SizeAndAlign(s).Size() == 0) {
break break
} }
v.reset(OpPPC64MOVBstore) v.reset(OpCopy)
v.AuxInt = off1 + off2 v.Type = mem.Type
v.Aux = sym
v.AddArg(x)
v.AddArg(val)
v.AddArg(mem) v.AddArg(mem)
return true return true
} }
// match: (MOVBstore [off] {sym} ptr (MOVDconst [c]) mem) // match: (Move [s] dst src mem)
// cond: c == 0 // cond: SizeAndAlign(s).Size() == 1
// result: (MOVBstorezero [off] {sym} ptr mem) // result: (MOVBstore dst (MOVBZload src mem) mem)
for { for {
off := v.AuxInt s := v.AuxInt
sym := v.Aux dst := v.Args[0]
ptr := v.Args[0] src := v.Args[1]
v_1 := v.Args[1]
if v_1.Op != OpPPC64MOVDconst {
break
}
c := v_1.AuxInt
mem := v.Args[2] mem := v.Args[2]
if !(c == 0) { if !(SizeAndAlign(s).Size() == 1) {
break break
} }
v.reset(OpPPC64MOVBstorezero) v.reset(OpPPC64MOVBstore)
v.AuxInt = off v.AddArg(dst)
v.Aux = sym v0 := b.NewValue0(v.Line, OpPPC64MOVBZload, config.fe.TypeUInt8())
v.AddArg(ptr) v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v.AddArg(mem) v.AddArg(mem)
return true return true
} }
return false // match: (Move [s] dst src mem)
} // cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0
func rewriteValuePPC64_OpPPC64MOVBstorezero(v *Value, config *Config) bool { // result: (MOVHstore dst (MOVHZload src mem) mem)
b := v.Block
_ = b
// match: (MOVBstorezero [off1] {sym} (ADDconst [off2] x) mem)
// cond: is16Bit(off1+off2)
// result: (MOVBstorezero [off1+off2] {sym} x mem)
for { for {
off1 := v.AuxInt s := v.AuxInt
sym := v.Aux dst := v.Args[0]
v_0 := v.Args[0] src := v.Args[1]
if v_0.Op != OpPPC64ADDconst { mem := v.Args[2]
break if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) {
}
off2 := v_0.AuxInt
x := v_0.Args[0]
mem := v.Args[1]
if !(is16Bit(off1 + off2)) {
break break
} }
v.reset(OpPPC64MOVBstorezero) v.reset(OpPPC64MOVHstore)
v.AuxInt = off1 + off2 v.AddArg(dst)
v.Aux = sym v0 := b.NewValue0(v.Line, OpPPC64MOVHZload, config.fe.TypeUInt16())
v.AddArg(x) v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v.AddArg(mem) v.AddArg(mem)
return true return true
} }
return false // match: (Move [s] dst src mem)
} // cond: SizeAndAlign(s).Size() == 2
func rewriteValuePPC64_OpPPC64MOVDstore(v *Value, config *Config) bool { // result: (MOVBstore [1] dst (MOVBZload [1] src mem) (MOVBstore dst (MOVBZload src mem) mem))
b := v.Block
_ = b
// match: (MOVDstore [off1] {sym} (ADDconst [off2] x) val mem)
// cond: is16Bit(off1+off2)
// result: (MOVDstore [off1+off2] {sym} x val mem)
for { for {
off1 := v.AuxInt s := v.AuxInt
sym := v.Aux dst := v.Args[0]
v_0 := v.Args[0] src := v.Args[1]
if v_0.Op != OpPPC64ADDconst {
break
}
off2 := v_0.AuxInt
x := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2] mem := v.Args[2]
if !(is16Bit(off1 + off2)) { if !(SizeAndAlign(s).Size() == 2) {
break break
} }
v.reset(OpPPC64MOVDstore) v.reset(OpPPC64MOVBstore)
v.AuxInt = off1 + off2 v.AuxInt = 1
v.Aux = sym v.AddArg(dst)
v.AddArg(x) v0 := b.NewValue0(v.Line, OpPPC64MOVBZload, config.fe.TypeUInt8())
v.AddArg(val) v0.AuxInt = 1
v.AddArg(mem) v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpPPC64MOVBstore, TypeMem)
v1.AddArg(dst)
v2 := b.NewValue0(v.Line, OpPPC64MOVBZload, config.fe.TypeUInt8())
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v1.AddArg(mem)
v.AddArg(v1)
return true return true
} }
// match: (MOVDstore [off] {sym} ptr (MOVDconst [c]) mem) // match: (Move [s] dst src mem)
// cond: c == 0 // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0
// result: (MOVDstorezero [off] {sym} ptr mem) // result: (MOVWstore dst (MOVWload src mem) mem)
for { for {
off := v.AuxInt s := v.AuxInt
sym := v.Aux dst := v.Args[0]
ptr := v.Args[0] src := v.Args[1]
v_1 := v.Args[1]
if v_1.Op != OpPPC64MOVDconst {
break
}
c := v_1.AuxInt
mem := v.Args[2] mem := v.Args[2]
if !(c == 0) { if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0) {
break break
} }
v.reset(OpPPC64MOVDstorezero) v.reset(OpPPC64MOVWstore)
v.AuxInt = off v.AddArg(dst)
v.Aux = sym v0 := b.NewValue0(v.Line, OpPPC64MOVWload, config.fe.TypeInt32())
v.AddArg(ptr) v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v.AddArg(mem) v.AddArg(mem)
return true return true
} }
return false // match: (Move [s] dst src mem)
} // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0
func rewriteValuePPC64_OpPPC64MOVDstorezero(v *Value, config *Config) bool { // result: (MOVHstore [2] dst (MOVHZload [2] src mem) (MOVHstore dst (MOVHZload src mem) mem))
b := v.Block
_ = b
// match: (MOVDstorezero [off1] {sym} (ADDconst [off2] x) mem)
// cond: is16Bit(off1+off2)
// result: (MOVDstorezero [off1+off2] {sym} x mem)
for {
off1 := v.AuxInt
sym := v.Aux
v_0 := v.Args[0]
if v_0.Op != OpPPC64ADDconst {
break
}
off2 := v_0.AuxInt
x := v_0.Args[0]
mem := v.Args[1]
if !(is16Bit(off1 + off2)) {
break
}
v.reset(OpPPC64MOVDstorezero)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(x)
v.AddArg(mem)
return true
}
return false
}
func rewriteValuePPC64_OpPPC64MOVHstore(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVHstore [off1] {sym} (ADDconst [off2] x) val mem)
// cond: is16Bit(off1+off2)
// result: (MOVHstore [off1+off2] {sym} x val mem)
for {
off1 := v.AuxInt
sym := v.Aux
v_0 := v.Args[0]
if v_0.Op != OpPPC64ADDconst {
break
}
off2 := v_0.AuxInt
x := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(is16Bit(off1 + off2)) {
break
}
v.reset(OpPPC64MOVHstore)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(x)
v.AddArg(val)
v.AddArg(mem)
return true
}
// match: (MOVHstore [off] {sym} ptr (MOVDconst [c]) mem)
// cond: c == 0
// result: (MOVHstorezero [off] {sym} ptr mem)
for {
off := v.AuxInt
sym := v.Aux
ptr := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpPPC64MOVDconst {
break
}
c := v_1.AuxInt
mem := v.Args[2]
if !(c == 0) {
break
}
v.reset(OpPPC64MOVHstorezero)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
return false
}
func rewriteValuePPC64_OpPPC64MOVHstorezero(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVHstorezero [off1] {sym} (ADDconst [off2] x) mem)
// cond: is16Bit(off1+off2)
// result: (MOVHstorezero [off1+off2] {sym} x mem)
for {
off1 := v.AuxInt
sym := v.Aux
v_0 := v.Args[0]
if v_0.Op != OpPPC64ADDconst {
break
}
off2 := v_0.AuxInt
x := v_0.Args[0]
mem := v.Args[1]
if !(is16Bit(off1 + off2)) {
break
}
v.reset(OpPPC64MOVHstorezero)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(x)
v.AddArg(mem)
return true
}
return false
}
func rewriteValuePPC64_OpPPC64MOVWstore(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVWstore [off1] {sym} (ADDconst [off2] x) val mem)
// cond: is16Bit(off1+off2)
// result: (MOVWstore [off1+off2] {sym} x val mem)
for {
off1 := v.AuxInt
sym := v.Aux
v_0 := v.Args[0]
if v_0.Op != OpPPC64ADDconst {
break
}
off2 := v_0.AuxInt
x := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(is16Bit(off1 + off2)) {
break
}
v.reset(OpPPC64MOVWstore)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(x)
v.AddArg(val)
v.AddArg(mem)
return true
}
// match: (MOVWstore [off] {sym} ptr (MOVDconst [c]) mem)
// cond: c == 0
// result: (MOVWstorezero [off] {sym} ptr mem)
for {
off := v.AuxInt
sym := v.Aux
ptr := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpPPC64MOVDconst {
break
}
c := v_1.AuxInt
mem := v.Args[2]
if !(c == 0) {
break
}
v.reset(OpPPC64MOVWstorezero)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
return false
}
func rewriteValuePPC64_OpPPC64MOVWstorezero(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (MOVWstorezero [off1] {sym} (ADDconst [off2] x) mem)
// cond: is16Bit(off1+off2)
// result: (MOVWstorezero [off1+off2] {sym} x mem)
for {
off1 := v.AuxInt
sym := v.Aux
v_0 := v.Args[0]
if v_0.Op != OpPPC64ADDconst {
break
}
off2 := v_0.AuxInt
x := v_0.Args[0]
mem := v.Args[1]
if !(is16Bit(off1 + off2)) {
break
}
v.reset(OpPPC64MOVWstorezero)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(x)
v.AddArg(mem)
return true
}
return false
}
func rewriteValuePPC64_OpMove(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Move [s] _ _ mem)
// cond: SizeAndAlign(s).Size() == 0
// result: mem
for {
s := v.AuxInt
mem := v.Args[2]
if !(SizeAndAlign(s).Size() == 0) {
break
}
v.reset(OpCopy)
v.Type = mem.Type
v.AddArg(mem)
return true
}
// match: (Move [s] dst src mem)
// cond: SizeAndAlign(s).Size() == 1
// result: (MOVBstore dst (MOVBZload src mem) mem)
for {
s := v.AuxInt
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
if !(SizeAndAlign(s).Size() == 1) {
break
}
v.reset(OpPPC64MOVBstore)
v.AddArg(dst)
v0 := b.NewValue0(v.Line, OpPPC64MOVBZload, config.fe.TypeUInt8())
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v.AddArg(mem)
return true
}
// match: (Move [s] dst src mem)
// cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0
// result: (MOVHstore dst (MOVHZload src mem) mem)
for {
s := v.AuxInt
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) {
break
}
v.reset(OpPPC64MOVHstore)
v.AddArg(dst)
v0 := b.NewValue0(v.Line, OpPPC64MOVHZload, config.fe.TypeUInt16())
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v.AddArg(mem)
return true
}
// match: (Move [s] dst src mem)
// cond: SizeAndAlign(s).Size() == 2
// result: (MOVBstore [1] dst (MOVBZload [1] src mem) (MOVBstore dst (MOVBZload src mem) mem))
for {
s := v.AuxInt
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
if !(SizeAndAlign(s).Size() == 2) {
break
}
v.reset(OpPPC64MOVBstore)
v.AuxInt = 1
v.AddArg(dst)
v0 := b.NewValue0(v.Line, OpPPC64MOVBZload, config.fe.TypeUInt8())
v0.AuxInt = 1
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpPPC64MOVBstore, TypeMem)
v1.AddArg(dst)
v2 := b.NewValue0(v.Line, OpPPC64MOVBZload, config.fe.TypeUInt8())
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v1.AddArg(mem)
v.AddArg(v1)
return true
}
// match: (Move [s] dst src mem)
// cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0
// result: (MOVWstore dst (MOVWload src mem) mem)
for {
s := v.AuxInt
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0) {
break
}
v.reset(OpPPC64MOVWstore)
v.AddArg(dst)
v0 := b.NewValue0(v.Line, OpPPC64MOVWload, config.fe.TypeInt32())
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v.AddArg(mem)
return true
}
// match: (Move [s] dst src mem)
// cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0
// result: (MOVHstore [2] dst (MOVHZload [2] src mem) (MOVHstore dst (MOVHZload src mem) mem))
for { for {
s := v.AuxInt s := v.AuxInt
dst := v.Args[0] dst := v.Args[0]
...@@ -2347,8 +1988,8 @@ func rewriteValuePPC64_OpMove(v *Value, config *Config) bool { ...@@ -2347,8 +1988,8 @@ func rewriteValuePPC64_OpMove(v *Value, config *Config) bool {
v.AddArg(dst) v.AddArg(dst)
v.AddArg(src) v.AddArg(src)
v0 := b.NewValue0(v.Line, OpPPC64ADDconst, src.Type) v0 := b.NewValue0(v.Line, OpPPC64ADDconst, src.Type)
v0.AddArg(src)
v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config) v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
v0.AddArg(src)
v.AddArg(v0) v.AddArg(v0)
v.AddArg(mem) v.AddArg(mem)
return true return true
...@@ -2424,306 +2065,665 @@ func rewriteValuePPC64_OpMul64F(v *Value, config *Config) bool { ...@@ -2424,306 +2065,665 @@ func rewriteValuePPC64_OpMul64F(v *Value, config *Config) bool {
_ = b _ = b
// match: (Mul64F x y) // match: (Mul64F x y)
// cond: // cond:
// result: (FMUL x y) // result: (FMUL x y)
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64FMUL)
v.AddArg(x)
v.AddArg(y)
return true
}
}
func rewriteValuePPC64_OpMul8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Mul8 x y)
// cond:
// result: (MULLW (SignExt8to32 x) (SignExt8to32 y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64MULLW)
v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
func rewriteValuePPC64_OpNeg16(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Neg16 x)
// cond:
// result: (NEG (ZeroExt16to64 x))
for {
x := v.Args[0]
v.reset(OpPPC64NEG)
v0 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
v0.AddArg(x)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpNeg32(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Neg32 x)
// cond:
// result: (NEG (ZeroExt32to64 x))
for {
x := v.Args[0]
v.reset(OpPPC64NEG)
v0 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
v0.AddArg(x)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpNeg64(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Neg64 x)
// cond:
// result: (NEG x)
for {
x := v.Args[0]
v.reset(OpPPC64NEG)
v.AddArg(x)
return true
}
}
func rewriteValuePPC64_OpNeg8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Neg8 x)
// cond:
// result: (NEG (ZeroExt8to64 x))
for {
x := v.Args[0]
v.reset(OpPPC64NEG)
v0 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
v0.AddArg(x)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpNeq16(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Neq16 x y)
// cond:
// result: (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64NotEqual)
v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpNeq32(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Neq32 x y)
// cond:
// result: (NotEqual (CMPW x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64NotEqual)
v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpNeq64(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Neq64 x y)
// cond:
// result: (NotEqual (CMP x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64NotEqual)
v0 := b.NewValue0(v.Line, OpPPC64CMP, TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpNeq64F(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Neq64F x y)
// cond:
// result: (NotEqual (FCMPU x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64NotEqual)
v0 := b.NewValue0(v.Line, OpPPC64FCMPU, TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpNeq8(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Neq8 x y)
// cond:
// result: (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64NotEqual)
v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpNeqPtr(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (NeqPtr x y)
// cond:
// result: (NotEqual (CMP x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64NotEqual)
v0 := b.NewValue0(v.Line, OpPPC64CMP, TypeFlags)
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
func rewriteValuePPC64_OpNilCheck(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (NilCheck ptr mem)
// cond:
// result: (LoweredNilCheck ptr mem)
for { for {
x := v.Args[0] ptr := v.Args[0]
y := v.Args[1] mem := v.Args[1]
v.reset(OpPPC64FMUL) v.reset(OpPPC64LoweredNilCheck)
v.AddArg(x) v.AddArg(ptr)
v.AddArg(y) v.AddArg(mem)
return true return true
} }
} }
func rewriteValuePPC64_OpMul8(v *Value, config *Config) bool { func rewriteValuePPC64_OpOffPtr(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Mul8 x y) // match: (OffPtr [off] ptr)
// cond: // cond:
// result: (MULLW (SignExt8to32 x) (SignExt8to32 y)) // result: (ADD (MOVDconst <config.Frontend().TypeInt64()> [off]) ptr)
for { for {
x := v.Args[0] off := v.AuxInt
y := v.Args[1] ptr := v.Args[0]
v.reset(OpPPC64MULLW) v.reset(OpPPC64ADD)
v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32()) v0 := b.NewValue0(v.Line, OpPPC64MOVDconst, config.Frontend().TypeInt64())
v0.AddArg(x) v0.AuxInt = off
v.AddArg(v0) v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32()) v.AddArg(ptr)
v1.AddArg(y)
v.AddArg(v1)
return true return true
} }
} }
func rewriteValuePPC64_OpNeg16(v *Value, config *Config) bool { func rewriteValuePPC64_OpOr16(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Neg16 x) // match: (Or16 x y)
// cond: // cond:
// result: (NEG (ZeroExt16to64 x)) // result: (OR (ZeroExt16to64 x) (ZeroExt16to64 y))
for { for {
x := v.Args[0] x := v.Args[0]
v.reset(OpPPC64NEG) y := v.Args[1]
v.reset(OpPPC64OR)
v0 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64()) v0 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
v0.AddArg(x) v0.AddArg(x)
v.AddArg(v0) v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
v1.AddArg(y)
v.AddArg(v1)
return true return true
} }
} }
func rewriteValuePPC64_OpNeg32(v *Value, config *Config) bool { func rewriteValuePPC64_OpOr32(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Neg32 x) // match: (Or32 x y)
// cond: // cond:
// result: (NEG (ZeroExt32to64 x)) // result: (OR (ZeroExt32to64 x) (ZeroExt32to64 y))
for { for {
x := v.Args[0] x := v.Args[0]
v.reset(OpPPC64NEG) y := v.Args[1]
v.reset(OpPPC64OR)
v0 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64()) v0 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
v0.AddArg(x) v0.AddArg(x)
v.AddArg(v0) v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
v1.AddArg(y)
v.AddArg(v1)
return true return true
} }
} }
func rewriteValuePPC64_OpNeg64(v *Value, config *Config) bool { func rewriteValuePPC64_OpOr64(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Neg64 x) // match: (Or64 x y)
// cond: // cond:
// result: (NEG x) // result: (OR x y)
for { for {
x := v.Args[0] x := v.Args[0]
v.reset(OpPPC64NEG) y := v.Args[1]
v.reset(OpPPC64OR)
v.AddArg(x) v.AddArg(x)
v.AddArg(y)
return true return true
} }
} }
func rewriteValuePPC64_OpNeg8(v *Value, config *Config) bool { func rewriteValuePPC64_OpOr8(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Neg8 x) // match: (Or8 x y)
// cond: // cond:
// result: (NEG (ZeroExt8to64 x)) // result: (OR (ZeroExt8to64 x) (ZeroExt8to64 y))
for { for {
x := v.Args[0] x := v.Args[0]
v.reset(OpPPC64NEG) y := v.Args[1]
v.reset(OpPPC64OR)
v0 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64()) v0 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
v0.AddArg(x) v0.AddArg(x)
v.AddArg(v0) v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
v1.AddArg(y)
v.AddArg(v1)
return true return true
} }
} }
func rewriteValuePPC64_OpNeq16(v *Value, config *Config) bool { func rewriteValuePPC64_OpPPC64ADD(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Neq16 x y) // match: (ADD (MOVDconst [c]) x)
// cond: // cond:
// result: (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) // result: (ADDconst [c] x)
for { for {
x := v.Args[0] v_0 := v.Args[0]
y := v.Args[1] if v_0.Op != OpPPC64MOVDconst {
v.reset(OpPPC64NotEqual) break
v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags) }
v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32()) c := v_0.AuxInt
v1.AddArg(x) x := v.Args[1]
v0.AddArg(v1) v.reset(OpPPC64ADDconst)
v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32()) v.AuxInt = c
v2.AddArg(y) v.AddArg(x)
v0.AddArg(v2)
v.AddArg(v0)
return true return true
} }
} // match: (ADD x (MOVDconst [c]))
func rewriteValuePPC64_OpNeq32(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Neq32 x y)
// cond: // cond:
// result: (NotEqual (CMPW x y)) // result: (ADDconst [c] x)
for { for {
x := v.Args[0] x := v.Args[0]
y := v.Args[1] v_1 := v.Args[1]
v.reset(OpPPC64NotEqual) if v_1.Op != OpPPC64MOVDconst {
v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags) break
v0.AddArg(x) }
v0.AddArg(y) c := v_1.AuxInt
v.AddArg(v0) v.reset(OpPPC64ADDconst)
v.AuxInt = c
v.AddArg(x)
return true return true
} }
return false
} }
func rewriteValuePPC64_OpNeq64(v *Value, config *Config) bool { func rewriteValuePPC64_OpPPC64MOVBstore(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Neq64 x y) // match: (MOVBstore [off1] {sym} (ADDconst [off2] x) val mem)
// cond: // cond: is16Bit(off1+off2)
// result: (NotEqual (CMP x y)) // result: (MOVBstore [off1+off2] {sym} x val mem)
for { for {
x := v.Args[0] off1 := v.AuxInt
y := v.Args[1] sym := v.Aux
v.reset(OpPPC64NotEqual) v_0 := v.Args[0]
v0 := b.NewValue0(v.Line, OpPPC64CMP, TypeFlags) if v_0.Op != OpPPC64ADDconst {
v0.AddArg(x) break
v0.AddArg(y) }
v.AddArg(v0) off2 := v_0.AuxInt
x := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(is16Bit(off1 + off2)) {
break
}
v.reset(OpPPC64MOVBstore)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(x)
v.AddArg(val)
v.AddArg(mem)
return true
}
// match: (MOVBstore [off] {sym} ptr (MOVDconst [c]) mem)
// cond: c == 0
// result: (MOVBstorezero [off] {sym} ptr mem)
for {
off := v.AuxInt
sym := v.Aux
ptr := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpPPC64MOVDconst {
break
}
c := v_1.AuxInt
mem := v.Args[2]
if !(c == 0) {
break
}
v.reset(OpPPC64MOVBstorezero)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true return true
} }
return false
} }
func rewriteValuePPC64_OpNeq64F(v *Value, config *Config) bool { func rewriteValuePPC64_OpPPC64MOVBstorezero(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Neq64F x y) // match: (MOVBstorezero [off1] {sym} (ADDconst [off2] x) mem)
// cond: // cond: is16Bit(off1+off2)
// result: (NotEqual (FCMPU x y)) // result: (MOVBstorezero [off1+off2] {sym} x mem)
for { for {
x := v.Args[0] off1 := v.AuxInt
y := v.Args[1] sym := v.Aux
v.reset(OpPPC64NotEqual) v_0 := v.Args[0]
v0 := b.NewValue0(v.Line, OpPPC64FCMPU, TypeFlags) if v_0.Op != OpPPC64ADDconst {
v0.AddArg(x) break
v0.AddArg(y) }
v.AddArg(v0) off2 := v_0.AuxInt
x := v_0.Args[0]
mem := v.Args[1]
if !(is16Bit(off1 + off2)) {
break
}
v.reset(OpPPC64MOVBstorezero)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(x)
v.AddArg(mem)
return true return true
} }
return false
} }
func rewriteValuePPC64_OpNeq8(v *Value, config *Config) bool { func rewriteValuePPC64_OpPPC64MOVDstore(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Neq8 x y) // match: (MOVDstore [off1] {sym} (ADDconst [off2] x) val mem)
// cond: // cond: is16Bit(off1+off2)
// result: (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) // result: (MOVDstore [off1+off2] {sym} x val mem)
for { for {
x := v.Args[0] off1 := v.AuxInt
y := v.Args[1] sym := v.Aux
v.reset(OpPPC64NotEqual) v_0 := v.Args[0]
v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags) if v_0.Op != OpPPC64ADDconst {
v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32()) break
v1.AddArg(x) }
v0.AddArg(v1) off2 := v_0.AuxInt
v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32()) x := v_0.Args[0]
v2.AddArg(y) val := v.Args[1]
v0.AddArg(v2) mem := v.Args[2]
v.AddArg(v0) if !(is16Bit(off1 + off2)) {
break
}
v.reset(OpPPC64MOVDstore)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(x)
v.AddArg(val)
v.AddArg(mem)
return true return true
} }
} // match: (MOVDstore [off] {sym} ptr (MOVDconst [c]) mem)
func rewriteValuePPC64_OpNeqPtr(v *Value, config *Config) bool { // cond: c == 0
b := v.Block // result: (MOVDstorezero [off] {sym} ptr mem)
_ = b
// match: (NeqPtr x y)
// cond:
// result: (NotEqual (CMP x y))
for { for {
x := v.Args[0] off := v.AuxInt
y := v.Args[1] sym := v.Aux
v.reset(OpPPC64NotEqual) ptr := v.Args[0]
v0 := b.NewValue0(v.Line, OpPPC64CMP, TypeFlags) v_1 := v.Args[1]
v0.AddArg(x) if v_1.Op != OpPPC64MOVDconst {
v0.AddArg(y) break
v.AddArg(v0) }
c := v_1.AuxInt
mem := v.Args[2]
if !(c == 0) {
break
}
v.reset(OpPPC64MOVDstorezero)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true return true
} }
return false
} }
func rewriteValuePPC64_OpNilCheck(v *Value, config *Config) bool { func rewriteValuePPC64_OpPPC64MOVDstorezero(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (NilCheck ptr mem) // match: (MOVDstorezero [off1] {sym} (ADDconst [off2] x) mem)
// cond: // cond: is16Bit(off1+off2)
// result: (LoweredNilCheck ptr mem) // result: (MOVDstorezero [off1+off2] {sym} x mem)
for { for {
ptr := v.Args[0] off1 := v.AuxInt
sym := v.Aux
v_0 := v.Args[0]
if v_0.Op != OpPPC64ADDconst {
break
}
off2 := v_0.AuxInt
x := v_0.Args[0]
mem := v.Args[1] mem := v.Args[1]
v.reset(OpPPC64LoweredNilCheck) if !(is16Bit(off1 + off2)) {
v.AddArg(ptr) break
}
v.reset(OpPPC64MOVDstorezero)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(x)
v.AddArg(mem) v.AddArg(mem)
return true return true
} }
return false
} }
func rewriteValuePPC64_OpOffPtr(v *Value, config *Config) bool { func rewriteValuePPC64_OpPPC64MOVHstore(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (OffPtr [off] ptr) // match: (MOVHstore [off1] {sym} (ADDconst [off2] x) val mem)
// cond: // cond: is16Bit(off1+off2)
// result: (ADD (MOVDconst <config.Frontend().TypeInt64()> [off]) ptr) // result: (MOVHstore [off1+off2] {sym} x val mem)
for {
off1 := v.AuxInt
sym := v.Aux
v_0 := v.Args[0]
if v_0.Op != OpPPC64ADDconst {
break
}
off2 := v_0.AuxInt
x := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
if !(is16Bit(off1 + off2)) {
break
}
v.reset(OpPPC64MOVHstore)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(x)
v.AddArg(val)
v.AddArg(mem)
return true
}
// match: (MOVHstore [off] {sym} ptr (MOVDconst [c]) mem)
// cond: c == 0
// result: (MOVHstorezero [off] {sym} ptr mem)
for { for {
off := v.AuxInt off := v.AuxInt
sym := v.Aux
ptr := v.Args[0] ptr := v.Args[0]
v.reset(OpPPC64ADD) v_1 := v.Args[1]
v0 := b.NewValue0(v.Line, OpPPC64MOVDconst, config.Frontend().TypeInt64()) if v_1.Op != OpPPC64MOVDconst {
v0.AuxInt = off break
v.AddArg(v0) }
c := v_1.AuxInt
mem := v.Args[2]
if !(c == 0) {
break
}
v.reset(OpPPC64MOVHstorezero)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr) v.AddArg(ptr)
v.AddArg(mem)
return true return true
} }
return false
} }
func rewriteValuePPC64_OpOr16(v *Value, config *Config) bool { func rewriteValuePPC64_OpPPC64MOVHstorezero(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Or16 x y) // match: (MOVHstorezero [off1] {sym} (ADDconst [off2] x) mem)
// cond: // cond: is16Bit(off1+off2)
// result: (OR (ZeroExt16to64 x) (ZeroExt16to64 y)) // result: (MOVHstorezero [off1+off2] {sym} x mem)
for { for {
x := v.Args[0] off1 := v.AuxInt
y := v.Args[1] sym := v.Aux
v.reset(OpPPC64OR) v_0 := v.Args[0]
v0 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64()) if v_0.Op != OpPPC64ADDconst {
v0.AddArg(x) break
v.AddArg(v0) }
v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64()) off2 := v_0.AuxInt
v1.AddArg(y) x := v_0.Args[0]
v.AddArg(v1) mem := v.Args[1]
if !(is16Bit(off1 + off2)) {
break
}
v.reset(OpPPC64MOVHstorezero)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(x)
v.AddArg(mem)
return true return true
} }
return false
} }
func rewriteValuePPC64_OpOr32(v *Value, config *Config) bool { func rewriteValuePPC64_OpPPC64MOVWstore(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Or32 x y) // match: (MOVWstore [off1] {sym} (ADDconst [off2] x) val mem)
// cond: // cond: is16Bit(off1+off2)
// result: (OR (ZeroExt32to64 x) (ZeroExt32to64 y)) // result: (MOVWstore [off1+off2] {sym} x val mem)
for { for {
x := v.Args[0] off1 := v.AuxInt
y := v.Args[1] sym := v.Aux
v.reset(OpPPC64OR) v_0 := v.Args[0]
v0 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64()) if v_0.Op != OpPPC64ADDconst {
v0.AddArg(x) break
v.AddArg(v0) }
v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64()) off2 := v_0.AuxInt
v1.AddArg(y) x := v_0.Args[0]
v.AddArg(v1) val := v.Args[1]
mem := v.Args[2]
if !(is16Bit(off1 + off2)) {
break
}
v.reset(OpPPC64MOVWstore)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(x)
v.AddArg(val)
v.AddArg(mem)
return true return true
} }
} // match: (MOVWstore [off] {sym} ptr (MOVDconst [c]) mem)
func rewriteValuePPC64_OpOr64(v *Value, config *Config) bool { // cond: c == 0
b := v.Block // result: (MOVWstorezero [off] {sym} ptr mem)
_ = b
// match: (Or64 x y)
// cond:
// result: (OR x y)
for { for {
x := v.Args[0] off := v.AuxInt
y := v.Args[1] sym := v.Aux
v.reset(OpPPC64OR) ptr := v.Args[0]
v.AddArg(x) v_1 := v.Args[1]
v.AddArg(y) if v_1.Op != OpPPC64MOVDconst {
break
}
c := v_1.AuxInt
mem := v.Args[2]
if !(c == 0) {
break
}
v.reset(OpPPC64MOVWstorezero)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true return true
} }
return false
} }
func rewriteValuePPC64_OpOr8(v *Value, config *Config) bool { func rewriteValuePPC64_OpPPC64MOVWstorezero(v *Value, config *Config) bool {
b := v.Block b := v.Block
_ = b _ = b
// match: (Or8 x y) // match: (MOVWstorezero [off1] {sym} (ADDconst [off2] x) mem)
// cond: // cond: is16Bit(off1+off2)
// result: (OR (ZeroExt8to64 x) (ZeroExt8to64 y)) // result: (MOVWstorezero [off1+off2] {sym} x mem)
for { for {
x := v.Args[0] off1 := v.AuxInt
y := v.Args[1] sym := v.Aux
v.reset(OpPPC64OR) v_0 := v.Args[0]
v0 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64()) if v_0.Op != OpPPC64ADDconst {
v0.AddArg(x) break
v.AddArg(v0) }
v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64()) off2 := v_0.AuxInt
v1.AddArg(y) x := v_0.Args[0]
v.AddArg(v1) mem := v.Args[1]
if !(is16Bit(off1 + off2)) {
break
}
v.reset(OpPPC64MOVWstorezero)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(x)
v.AddArg(mem)
return true return true
} }
return false
} }
func rewriteValuePPC64_OpSignExt16to32(v *Value, config *Config) bool { func rewriteValuePPC64_OpSignExt16to32(v *Value, config *Config) bool {
b := v.Block b := v.Block
...@@ -3456,8 +3456,8 @@ func rewriteValuePPC64_OpZero(v *Value, config *Config) bool { ...@@ -3456,8 +3456,8 @@ func rewriteValuePPC64_OpZero(v *Value, config *Config) bool {
v.AuxInt = SizeAndAlign(s).Align() v.AuxInt = SizeAndAlign(s).Align()
v.AddArg(ptr) v.AddArg(ptr)
v0 := b.NewValue0(v.Line, OpPPC64ADDconst, ptr.Type) v0 := b.NewValue0(v.Line, OpPPC64ADDconst, ptr.Type)
v0.AddArg(ptr)
v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config) v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
v0.AddArg(ptr)
v.AddArg(v0) v.AddArg(v0)
v.AddArg(mem) v.AddArg(mem)
return true return true
......
...@@ -198,19 +198,19 @@ func rewriteValuedec64_OpArg(v *Value, config *Config) bool { ...@@ -198,19 +198,19 @@ func rewriteValuedec64_OpArg(v *Value, config *Config) bool {
// cond: is64BitInt(v.Type) && v.Type.IsSigned() // cond: is64BitInt(v.Type) && v.Type.IsSigned()
// result: (Int64Make (Arg <config.fe.TypeInt32()> {n} [off+4]) (Arg <config.fe.TypeUInt32()> {n} [off])) // result: (Int64Make (Arg <config.fe.TypeInt32()> {n} [off+4]) (Arg <config.fe.TypeUInt32()> {n} [off]))
for { for {
n := v.Aux
off := v.AuxInt off := v.AuxInt
n := v.Aux
if !(is64BitInt(v.Type) && v.Type.IsSigned()) { if !(is64BitInt(v.Type) && v.Type.IsSigned()) {
break break
} }
v.reset(OpInt64Make) v.reset(OpInt64Make)
v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeInt32()) v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeInt32())
v0.Aux = n
v0.AuxInt = off + 4 v0.AuxInt = off + 4
v0.Aux = n
v.AddArg(v0) v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeUInt32()) v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeUInt32())
v1.Aux = n
v1.AuxInt = off v1.AuxInt = off
v1.Aux = n
v.AddArg(v1) v.AddArg(v1)
return true return true
} }
...@@ -218,19 +218,19 @@ func rewriteValuedec64_OpArg(v *Value, config *Config) bool { ...@@ -218,19 +218,19 @@ func rewriteValuedec64_OpArg(v *Value, config *Config) bool {
// cond: is64BitInt(v.Type) && !v.Type.IsSigned() // cond: is64BitInt(v.Type) && !v.Type.IsSigned()
// result: (Int64Make (Arg <config.fe.TypeUInt32()> {n} [off+4]) (Arg <config.fe.TypeUInt32()> {n} [off])) // result: (Int64Make (Arg <config.fe.TypeUInt32()> {n} [off+4]) (Arg <config.fe.TypeUInt32()> {n} [off]))
for { for {
n := v.Aux
off := v.AuxInt off := v.AuxInt
n := v.Aux
if !(is64BitInt(v.Type) && !v.Type.IsSigned()) { if !(is64BitInt(v.Type) && !v.Type.IsSigned()) {
break break
} }
v.reset(OpInt64Make) v.reset(OpInt64Make)
v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeUInt32()) v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeUInt32())
v0.Aux = n
v0.AuxInt = off + 4 v0.AuxInt = off + 4
v0.Aux = n
v.AddArg(v0) v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeUInt32()) v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeUInt32())
v1.Aux = n
v1.AuxInt = off v1.AuxInt = off
v1.Aux = n
v.AddArg(v1) v.AddArg(v1)
return true return true
} }
...@@ -738,13 +738,13 @@ func rewriteValuedec64_OpLrot64(v *Value, config *Config) bool { ...@@ -738,13 +738,13 @@ func rewriteValuedec64_OpLrot64(v *Value, config *Config) bool {
// cond: c <= 32 // cond: c <= 32
// result: (Int64Make (Or32 <config.fe.TypeUInt32()> (Lsh32x32 <config.fe.TypeUInt32()> hi (Const32 <config.fe.TypeUInt32()> [c])) (Rsh32Ux32 <config.fe.TypeUInt32()> lo (Const32 <config.fe.TypeUInt32()> [32-c]))) (Or32 <config.fe.TypeUInt32()> (Lsh32x32 <config.fe.TypeUInt32()> lo (Const32 <config.fe.TypeUInt32()> [c])) (Rsh32Ux32 <config.fe.TypeUInt32()> hi (Const32 <config.fe.TypeUInt32()> [32-c])))) // result: (Int64Make (Or32 <config.fe.TypeUInt32()> (Lsh32x32 <config.fe.TypeUInt32()> hi (Const32 <config.fe.TypeUInt32()> [c])) (Rsh32Ux32 <config.fe.TypeUInt32()> lo (Const32 <config.fe.TypeUInt32()> [32-c]))) (Or32 <config.fe.TypeUInt32()> (Lsh32x32 <config.fe.TypeUInt32()> lo (Const32 <config.fe.TypeUInt32()> [c])) (Rsh32Ux32 <config.fe.TypeUInt32()> hi (Const32 <config.fe.TypeUInt32()> [32-c]))))
for { for {
c := v.AuxInt
v_0 := v.Args[0] v_0 := v.Args[0]
if v_0.Op != OpInt64Make { if v_0.Op != OpInt64Make {
break break
} }
hi := v_0.Args[0] hi := v_0.Args[0]
lo := v_0.Args[1] lo := v_0.Args[1]
c := v.AuxInt
if !(c <= 32) { if !(c <= 32) {
break break
} }
...@@ -783,22 +783,22 @@ func rewriteValuedec64_OpLrot64(v *Value, config *Config) bool { ...@@ -783,22 +783,22 @@ func rewriteValuedec64_OpLrot64(v *Value, config *Config) bool {
// cond: c > 32 // cond: c > 32
// result: (Lrot64 (Int64Make lo hi) [c-32]) // result: (Lrot64 (Int64Make lo hi) [c-32])
for { for {
c := v.AuxInt
v_0 := v.Args[0] v_0 := v.Args[0]
if v_0.Op != OpInt64Make { if v_0.Op != OpInt64Make {
break break
} }
hi := v_0.Args[0] hi := v_0.Args[0]
lo := v_0.Args[1] lo := v_0.Args[1]
c := v.AuxInt
if !(c > 32) { if !(c > 32) {
break break
} }
v.reset(OpLrot64) v.reset(OpLrot64)
v.AuxInt = c - 32
v0 := b.NewValue0(v.Line, OpInt64Make, config.fe.TypeUInt64()) v0 := b.NewValue0(v.Line, OpInt64Make, config.fe.TypeUInt64())
v0.AddArg(lo) v0.AddArg(lo)
v0.AddArg(hi) v0.AddArg(hi)
v.AddArg(v0) v.AddArg(v0)
v.AuxInt = c - 32
return true return true
} }
return false return false
......
...@@ -733,8 +733,8 @@ func rewriteValuegeneric_OpAddPtr(v *Value, config *Config) bool { ...@@ -733,8 +733,8 @@ func rewriteValuegeneric_OpAddPtr(v *Value, config *Config) bool {
c := v_1.AuxInt c := v_1.AuxInt
v.reset(OpOffPtr) v.reset(OpOffPtr)
v.Type = t v.Type = t
v.AddArg(x)
v.AuxInt = c v.AuxInt = c
v.AddArg(x)
return true return true
} }
return false return false
...@@ -1370,19 +1370,19 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool { ...@@ -1370,19 +1370,19 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool {
// cond: v.Type.IsString() // cond: v.Type.IsString()
// result: (StringMake (Arg <config.fe.TypeBytePtr()> {n} [off]) (Arg <config.fe.TypeInt()> {n} [off+config.PtrSize])) // result: (StringMake (Arg <config.fe.TypeBytePtr()> {n} [off]) (Arg <config.fe.TypeInt()> {n} [off+config.PtrSize]))
for { for {
n := v.Aux
off := v.AuxInt off := v.AuxInt
n := v.Aux
if !(v.Type.IsString()) { if !(v.Type.IsString()) {
break break
} }
v.reset(OpStringMake) v.reset(OpStringMake)
v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeBytePtr()) v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeBytePtr())
v0.Aux = n
v0.AuxInt = off v0.AuxInt = off
v0.Aux = n
v.AddArg(v0) v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeInt()) v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeInt())
v1.Aux = n
v1.AuxInt = off + config.PtrSize v1.AuxInt = off + config.PtrSize
v1.Aux = n
v.AddArg(v1) v.AddArg(v1)
return true return true
} }
...@@ -1390,23 +1390,23 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool { ...@@ -1390,23 +1390,23 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool {
// cond: v.Type.IsSlice() // cond: v.Type.IsSlice()
// result: (SliceMake (Arg <v.Type.ElemType().PtrTo()> {n} [off]) (Arg <config.fe.TypeInt()> {n} [off+config.PtrSize]) (Arg <config.fe.TypeInt()> {n} [off+2*config.PtrSize])) // result: (SliceMake (Arg <v.Type.ElemType().PtrTo()> {n} [off]) (Arg <config.fe.TypeInt()> {n} [off+config.PtrSize]) (Arg <config.fe.TypeInt()> {n} [off+2*config.PtrSize]))
for { for {
n := v.Aux
off := v.AuxInt off := v.AuxInt
n := v.Aux
if !(v.Type.IsSlice()) { if !(v.Type.IsSlice()) {
break break
} }
v.reset(OpSliceMake) v.reset(OpSliceMake)
v0 := b.NewValue0(v.Line, OpArg, v.Type.ElemType().PtrTo()) v0 := b.NewValue0(v.Line, OpArg, v.Type.ElemType().PtrTo())
v0.Aux = n
v0.AuxInt = off v0.AuxInt = off
v0.Aux = n
v.AddArg(v0) v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeInt()) v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeInt())
v1.Aux = n
v1.AuxInt = off + config.PtrSize v1.AuxInt = off + config.PtrSize
v1.Aux = n
v.AddArg(v1) v.AddArg(v1)
v2 := b.NewValue0(v.Line, OpArg, config.fe.TypeInt()) v2 := b.NewValue0(v.Line, OpArg, config.fe.TypeInt())
v2.Aux = n
v2.AuxInt = off + 2*config.PtrSize v2.AuxInt = off + 2*config.PtrSize
v2.Aux = n
v.AddArg(v2) v.AddArg(v2)
return true return true
} }
...@@ -1414,19 +1414,19 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool { ...@@ -1414,19 +1414,19 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool {
// cond: v.Type.IsInterface() // cond: v.Type.IsInterface()
// result: (IMake (Arg <config.fe.TypeBytePtr()> {n} [off]) (Arg <config.fe.TypeBytePtr()> {n} [off+config.PtrSize])) // result: (IMake (Arg <config.fe.TypeBytePtr()> {n} [off]) (Arg <config.fe.TypeBytePtr()> {n} [off+config.PtrSize]))
for { for {
n := v.Aux
off := v.AuxInt off := v.AuxInt
n := v.Aux
if !(v.Type.IsInterface()) { if !(v.Type.IsInterface()) {
break break
} }
v.reset(OpIMake) v.reset(OpIMake)
v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeBytePtr()) v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeBytePtr())
v0.Aux = n
v0.AuxInt = off v0.AuxInt = off
v0.Aux = n
v.AddArg(v0) v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeBytePtr()) v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeBytePtr())
v1.Aux = n
v1.AuxInt = off + config.PtrSize v1.AuxInt = off + config.PtrSize
v1.Aux = n
v.AddArg(v1) v.AddArg(v1)
return true return true
} }
...@@ -1434,19 +1434,19 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool { ...@@ -1434,19 +1434,19 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool {
// cond: v.Type.IsComplex() && v.Type.Size() == 16 // cond: v.Type.IsComplex() && v.Type.Size() == 16
// result: (ComplexMake (Arg <config.fe.TypeFloat64()> {n} [off]) (Arg <config.fe.TypeFloat64()> {n} [off+8])) // result: (ComplexMake (Arg <config.fe.TypeFloat64()> {n} [off]) (Arg <config.fe.TypeFloat64()> {n} [off+8]))
for { for {
n := v.Aux
off := v.AuxInt off := v.AuxInt
n := v.Aux
if !(v.Type.IsComplex() && v.Type.Size() == 16) { if !(v.Type.IsComplex() && v.Type.Size() == 16) {
break break
} }
v.reset(OpComplexMake) v.reset(OpComplexMake)
v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeFloat64()) v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeFloat64())
v0.Aux = n
v0.AuxInt = off v0.AuxInt = off
v0.Aux = n
v.AddArg(v0) v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeFloat64()) v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeFloat64())
v1.Aux = n
v1.AuxInt = off + 8 v1.AuxInt = off + 8
v1.Aux = n
v.AddArg(v1) v.AddArg(v1)
return true return true
} }
...@@ -1454,19 +1454,19 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool { ...@@ -1454,19 +1454,19 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool {
// cond: v.Type.IsComplex() && v.Type.Size() == 8 // cond: v.Type.IsComplex() && v.Type.Size() == 8
// result: (ComplexMake (Arg <config.fe.TypeFloat32()> {n} [off]) (Arg <config.fe.TypeFloat32()> {n} [off+4])) // result: (ComplexMake (Arg <config.fe.TypeFloat32()> {n} [off]) (Arg <config.fe.TypeFloat32()> {n} [off+4]))
for { for {
n := v.Aux
off := v.AuxInt off := v.AuxInt
n := v.Aux
if !(v.Type.IsComplex() && v.Type.Size() == 8) { if !(v.Type.IsComplex() && v.Type.Size() == 8) {
break break
} }
v.reset(OpComplexMake) v.reset(OpComplexMake)
v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeFloat32()) v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeFloat32())
v0.Aux = n
v0.AuxInt = off v0.AuxInt = off
v0.Aux = n
v.AddArg(v0) v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeFloat32()) v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeFloat32())
v1.Aux = n
v1.AuxInt = off + 4 v1.AuxInt = off + 4
v1.Aux = n
v.AddArg(v1) v.AddArg(v1)
return true return true
} }
...@@ -1486,15 +1486,15 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool { ...@@ -1486,15 +1486,15 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool {
// result: (StructMake1 (Arg <t.FieldType(0)> {n} [off+t.FieldOff(0)])) // result: (StructMake1 (Arg <t.FieldType(0)> {n} [off+t.FieldOff(0)]))
for { for {
t := v.Type t := v.Type
n := v.Aux
off := v.AuxInt off := v.AuxInt
n := v.Aux
if !(t.IsStruct() && t.NumFields() == 1 && config.fe.CanSSA(t)) { if !(t.IsStruct() && t.NumFields() == 1 && config.fe.CanSSA(t)) {
break break
} }
v.reset(OpStructMake1) v.reset(OpStructMake1)
v0 := b.NewValue0(v.Line, OpArg, t.FieldType(0)) v0 := b.NewValue0(v.Line, OpArg, t.FieldType(0))
v0.Aux = n
v0.AuxInt = off + t.FieldOff(0) v0.AuxInt = off + t.FieldOff(0)
v0.Aux = n
v.AddArg(v0) v.AddArg(v0)
return true return true
} }
...@@ -1503,19 +1503,19 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool { ...@@ -1503,19 +1503,19 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool {
// result: (StructMake2 (Arg <t.FieldType(0)> {n} [off+t.FieldOff(0)]) (Arg <t.FieldType(1)> {n} [off+t.FieldOff(1)])) // result: (StructMake2 (Arg <t.FieldType(0)> {n} [off+t.FieldOff(0)]) (Arg <t.FieldType(1)> {n} [off+t.FieldOff(1)]))
for { for {
t := v.Type t := v.Type
n := v.Aux
off := v.AuxInt off := v.AuxInt
n := v.Aux
if !(t.IsStruct() && t.NumFields() == 2 && config.fe.CanSSA(t)) { if !(t.IsStruct() && t.NumFields() == 2 && config.fe.CanSSA(t)) {
break break
} }
v.reset(OpStructMake2) v.reset(OpStructMake2)
v0 := b.NewValue0(v.Line, OpArg, t.FieldType(0)) v0 := b.NewValue0(v.Line, OpArg, t.FieldType(0))
v0.Aux = n
v0.AuxInt = off + t.FieldOff(0) v0.AuxInt = off + t.FieldOff(0)
v0.Aux = n
v.AddArg(v0) v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpArg, t.FieldType(1)) v1 := b.NewValue0(v.Line, OpArg, t.FieldType(1))
v1.Aux = n
v1.AuxInt = off + t.FieldOff(1) v1.AuxInt = off + t.FieldOff(1)
v1.Aux = n
v.AddArg(v1) v.AddArg(v1)
return true return true
} }
...@@ -1524,23 +1524,23 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool { ...@@ -1524,23 +1524,23 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool {
// result: (StructMake3 (Arg <t.FieldType(0)> {n} [off+t.FieldOff(0)]) (Arg <t.FieldType(1)> {n} [off+t.FieldOff(1)]) (Arg <t.FieldType(2)> {n} [off+t.FieldOff(2)])) // result: (StructMake3 (Arg <t.FieldType(0)> {n} [off+t.FieldOff(0)]) (Arg <t.FieldType(1)> {n} [off+t.FieldOff(1)]) (Arg <t.FieldType(2)> {n} [off+t.FieldOff(2)]))
for { for {
t := v.Type t := v.Type
n := v.Aux
off := v.AuxInt off := v.AuxInt
n := v.Aux
if !(t.IsStruct() && t.NumFields() == 3 && config.fe.CanSSA(t)) { if !(t.IsStruct() && t.NumFields() == 3 && config.fe.CanSSA(t)) {
break break
} }
v.reset(OpStructMake3) v.reset(OpStructMake3)
v0 := b.NewValue0(v.Line, OpArg, t.FieldType(0)) v0 := b.NewValue0(v.Line, OpArg, t.FieldType(0))
v0.Aux = n
v0.AuxInt = off + t.FieldOff(0) v0.AuxInt = off + t.FieldOff(0)
v0.Aux = n
v.AddArg(v0) v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpArg, t.FieldType(1)) v1 := b.NewValue0(v.Line, OpArg, t.FieldType(1))
v1.Aux = n
v1.AuxInt = off + t.FieldOff(1) v1.AuxInt = off + t.FieldOff(1)
v1.Aux = n
v.AddArg(v1) v.AddArg(v1)
v2 := b.NewValue0(v.Line, OpArg, t.FieldType(2)) v2 := b.NewValue0(v.Line, OpArg, t.FieldType(2))
v2.Aux = n
v2.AuxInt = off + t.FieldOff(2) v2.AuxInt = off + t.FieldOff(2)
v2.Aux = n
v.AddArg(v2) v.AddArg(v2)
return true return true
} }
...@@ -1549,27 +1549,27 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool { ...@@ -1549,27 +1549,27 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool {
// result: (StructMake4 (Arg <t.FieldType(0)> {n} [off+t.FieldOff(0)]) (Arg <t.FieldType(1)> {n} [off+t.FieldOff(1)]) (Arg <t.FieldType(2)> {n} [off+t.FieldOff(2)]) (Arg <t.FieldType(3)> {n} [off+t.FieldOff(3)])) // result: (StructMake4 (Arg <t.FieldType(0)> {n} [off+t.FieldOff(0)]) (Arg <t.FieldType(1)> {n} [off+t.FieldOff(1)]) (Arg <t.FieldType(2)> {n} [off+t.FieldOff(2)]) (Arg <t.FieldType(3)> {n} [off+t.FieldOff(3)]))
for { for {
t := v.Type t := v.Type
n := v.Aux
off := v.AuxInt off := v.AuxInt
n := v.Aux
if !(t.IsStruct() && t.NumFields() == 4 && config.fe.CanSSA(t)) { if !(t.IsStruct() && t.NumFields() == 4 && config.fe.CanSSA(t)) {
break break
} }
v.reset(OpStructMake4) v.reset(OpStructMake4)
v0 := b.NewValue0(v.Line, OpArg, t.FieldType(0)) v0 := b.NewValue0(v.Line, OpArg, t.FieldType(0))
v0.Aux = n
v0.AuxInt = off + t.FieldOff(0) v0.AuxInt = off + t.FieldOff(0)
v0.Aux = n
v.AddArg(v0) v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpArg, t.FieldType(1)) v1 := b.NewValue0(v.Line, OpArg, t.FieldType(1))
v1.Aux = n
v1.AuxInt = off + t.FieldOff(1) v1.AuxInt = off + t.FieldOff(1)
v1.Aux = n
v.AddArg(v1) v.AddArg(v1)
v2 := b.NewValue0(v.Line, OpArg, t.FieldType(2)) v2 := b.NewValue0(v.Line, OpArg, t.FieldType(2))
v2.Aux = n
v2.AuxInt = off + t.FieldOff(2) v2.AuxInt = off + t.FieldOff(2)
v2.Aux = n
v.AddArg(v2) v.AddArg(v2)
v3 := b.NewValue0(v.Line, OpArg, t.FieldType(3)) v3 := b.NewValue0(v.Line, OpArg, t.FieldType(3))
v3.Aux = n
v3.AuxInt = off + t.FieldOff(3) v3.AuxInt = off + t.FieldOff(3)
v3.Aux = n
v.AddArg(v3) v.AddArg(v3)
return true return true
} }
...@@ -6359,26 +6359,26 @@ func rewriteValuegeneric_OpOffPtr(v *Value, config *Config) bool { ...@@ -6359,26 +6359,26 @@ func rewriteValuegeneric_OpOffPtr(v *Value, config *Config) bool {
// cond: // cond:
// result: (OffPtr p [a+b]) // result: (OffPtr p [a+b])
for { for {
a := v.AuxInt
v_0 := v.Args[0] v_0 := v.Args[0]
if v_0.Op != OpOffPtr { if v_0.Op != OpOffPtr {
break break
} }
p := v_0.Args[0]
b := v_0.AuxInt b := v_0.AuxInt
a := v.AuxInt p := v_0.Args[0]
v.reset(OpOffPtr) v.reset(OpOffPtr)
v.AddArg(p)
v.AuxInt = a + b v.AuxInt = a + b
v.AddArg(p)
return true return true
} }
// match: (OffPtr p [0]) // match: (OffPtr p [0])
// cond: v.Type.Compare(p.Type) == CMPeq // cond: v.Type.Compare(p.Type) == CMPeq
// result: p // result: p
for { for {
p := v.Args[0]
if v.AuxInt != 0 { if v.AuxInt != 0 {
break break
} }
p := v.Args[0]
if !(v.Type.Compare(p.Type) == CMPeq) { if !(v.Type.Compare(p.Type) == CMPeq) {
break break
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment