Commit 932c1e3d authored by Marvin Stenger's avatar Marvin Stenger Committed by Brad Fitzpatrick

cmd/compile/internal: some janitoring

Nicer swaps, loops (removed tmp variables). Use of bool instead of int.

This change passes go build -toolexec 'toolstash -cmp' -a std.

Change-Id: I541904c74b57297848decc51a8a4913a8eca4af3
Reviewed-on: https://go-review.googlesource.com/14316
Run-TryBot: Brad Fitzpatrick <bradfitz@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarBrad Fitzpatrick <bradfitz@golang.org>
parent 31322996
...@@ -187,13 +187,13 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) { ...@@ -187,13 +187,13 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
t := nl.Type t := nl.Type
t0 := t t0 := t
check := 0 check := false
if gc.Issigned[t.Etype] { if gc.Issigned[t.Etype] {
check = 1 check = true
if gc.Isconst(nl, gc.CTINT) && nl.Int() != -(1<<uint64(t.Width*8-1)) { if gc.Isconst(nl, gc.CTINT) && nl.Int() != -(1<<uint64(t.Width*8-1)) {
check = 0 check = false
} else if gc.Isconst(nr, gc.CTINT) && nr.Int() != -1 { } else if gc.Isconst(nr, gc.CTINT) && nr.Int() != -1 {
check = 0 check = false
} }
} }
...@@ -203,7 +203,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) { ...@@ -203,7 +203,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
} else { } else {
t = gc.Types[gc.TUINT32] t = gc.Types[gc.TUINT32]
} }
check = 0 check = false
} }
a := optoas(op, t) a := optoas(op, t)
...@@ -252,7 +252,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) { ...@@ -252,7 +252,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
} }
var p2 *obj.Prog var p2 *obj.Prog
if check != 0 { if check {
gc.Nodconst(&n4, t, -1) gc.Nodconst(&n4, t, -1)
gins(optoas(gc.OCMP, t), &n3, &n4) gins(optoas(gc.OCMP, t), &n3, &n4)
p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1) p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
...@@ -289,7 +289,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) { ...@@ -289,7 +289,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
gmove(&dx, res) gmove(&dx, res)
} }
restx(&dx, &olddx) restx(&dx, &olddx)
if check != 0 { if check {
gc.Patch(p2, gc.Pc) gc.Patch(p2, gc.Pc)
} }
restx(&ax, &oldax) restx(&ax, &oldax)
...@@ -340,9 +340,7 @@ func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) { ...@@ -340,9 +340,7 @@ func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
t := nl.Type t := nl.Type
a := optoas(gc.OHMUL, t) a := optoas(gc.OHMUL, t)
if nl.Ullman < nr.Ullman { if nl.Ullman < nr.Ullman {
tmp := nl nl, nr = nr, nl
nl = nr
nr = tmp
} }
var n1 gc.Node var n1 gc.Node
...@@ -500,9 +498,7 @@ func cgen_bmul(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) bool { ...@@ -500,9 +498,7 @@ func cgen_bmul(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) bool {
// largest ullman on left. // largest ullman on left.
if nl.Ullman < nr.Ullman { if nl.Ullman < nr.Ullman {
tmp := nl nl, nr = nr, nl
nl = nr
nr = tmp
} }
// generate operands in "8-bit" registers. // generate operands in "8-bit" registers.
...@@ -564,12 +560,7 @@ func clearfat(nl *gc.Node) { ...@@ -564,12 +560,7 @@ func clearfat(nl *gc.Node) {
n1.Op = gc.OINDREG n1.Op = gc.OINDREG
var z gc.Node var z gc.Node
gc.Nodconst(&z, gc.Types[gc.TUINT64], 0) gc.Nodconst(&z, gc.Types[gc.TUINT64], 0)
for { for ; q > 0; q-- {
tmp14 := q
q--
if tmp14 <= 0 {
break
}
n1.Type = z.Type n1.Type = z.Type
gins(x86.AMOVQ, &z, &n1) gins(x86.AMOVQ, &z, &n1)
n1.Xoffset += 8 n1.Xoffset += 8
...@@ -584,12 +575,7 @@ func clearfat(nl *gc.Node) { ...@@ -584,12 +575,7 @@ func clearfat(nl *gc.Node) {
} }
gc.Nodconst(&z, gc.Types[gc.TUINT8], 0) gc.Nodconst(&z, gc.Types[gc.TUINT8], 0)
for { for ; c > 0; c-- {
tmp15 := c
c--
if tmp15 <= 0 {
break
}
n1.Type = z.Type n1.Type = z.Type
gins(x86.AMOVB, &z, &n1) gins(x86.AMOVB, &z, &n1)
n1.Xoffset++ n1.Xoffset++
......
...@@ -205,12 +205,7 @@ func blockcopy(n, res *gc.Node, osrc, odst, w int64) { ...@@ -205,12 +205,7 @@ func blockcopy(n, res *gc.Node, osrc, odst, w int64) {
gc.Regfree(&nend) gc.Regfree(&nend)
} else { } else {
var p *obj.Prog var p *obj.Prog
for { for ; c > 0; c-- {
tmp14 := c
c--
if tmp14 <= 0 {
break
}
p = gins(op, &src, &tmp) p = gins(op, &src, &tmp)
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Offset = int64(dir) p.From.Offset = int64(dir)
......
...@@ -120,9 +120,7 @@ func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int32, ttype int ...@@ -120,9 +120,7 @@ func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int32, ttype int
*/ */
func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) { func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
if nl.Ullman < nr.Ullman { if nl.Ullman < nr.Ullman {
tmp := nl nl, nr = nr, nl
nl = nr
nr = tmp
} }
t := nl.Type t := nl.Type
......
...@@ -132,13 +132,7 @@ func blockcopy(n, res *gc.Node, osrc, odst, w int64) { ...@@ -132,13 +132,7 @@ func blockcopy(n, res *gc.Node, osrc, odst, w int64) {
// ADDs. That will produce shorter, more // ADDs. That will produce shorter, more
// pipeline-able code. // pipeline-able code.
var p *obj.Prog var p *obj.Prog
for { for ; c > 0; c-- {
tmp14 := c
c--
if tmp14 <= 0 {
break
}
p = gins(op, &src, &tmp) p = gins(op, &src, &tmp)
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Offset = int64(dir) p.From.Offset = int64(dir)
......
...@@ -151,13 +151,13 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) { ...@@ -151,13 +151,13 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
t := nl.Type t := nl.Type
t0 := t t0 := t
check := 0 check := false
if gc.Issigned[t.Etype] { if gc.Issigned[t.Etype] {
check = 1 check = true
if gc.Isconst(nl, gc.CTINT) && nl.Int() != -(1<<uint64(t.Width*8-1)) { if gc.Isconst(nl, gc.CTINT) && nl.Int() != -(1<<uint64(t.Width*8-1)) {
check = 0 check = false
} else if gc.Isconst(nr, gc.CTINT) && nr.Int() != -1 { } else if gc.Isconst(nr, gc.CTINT) && nr.Int() != -1 {
check = 0 check = false
} }
} }
...@@ -167,7 +167,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) { ...@@ -167,7 +167,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
} else { } else {
t = gc.Types[gc.TUINT64] t = gc.Types[gc.TUINT64]
} }
check = 0 check = false
} }
a := optoas(gc.ODIV, t) a := optoas(gc.ODIV, t)
...@@ -206,7 +206,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) { ...@@ -206,7 +206,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
gc.Patch(p1, gc.Pc) gc.Patch(p1, gc.Pc)
var p2 *obj.Prog var p2 *obj.Prog
if check != 0 { if check {
var nm1 gc.Node var nm1 gc.Node
gc.Nodconst(&nm1, t, -1) gc.Nodconst(&nm1, t, -1)
gcmp(optoas(gc.OCMP, t), &tr, &nm1) gcmp(optoas(gc.OCMP, t), &tr, &nm1)
...@@ -250,7 +250,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) { ...@@ -250,7 +250,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
} }
gc.Regfree(&tl) gc.Regfree(&tl)
if check != 0 { if check {
gc.Patch(p2, gc.Pc) gc.Patch(p2, gc.Pc)
} }
} }
...@@ -262,9 +262,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) { ...@@ -262,9 +262,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) { func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
// largest ullman on left. // largest ullman on left.
if nl.Ullman < nr.Ullman { if nl.Ullman < nr.Ullman {
tmp := (*gc.Node)(nl) nl, nr = nr, nl
nl = nr
nr = tmp
} }
t := (*gc.Type)(nl.Type) t := (*gc.Type)(nl.Type)
......
...@@ -705,9 +705,7 @@ func cgen_wb(n, res *Node, wb bool) { ...@@ -705,9 +705,7 @@ func cgen_wb(n, res *Node, wb bool) {
*/ */
sbop: // symmetric binary sbop: // symmetric binary
if nl.Ullman < nr.Ullman || (nl.Ullman == nr.Ullman && (Smallintconst(nl) || (nr.Op == OLITERAL && !Smallintconst(nr)))) { if nl.Ullman < nr.Ullman || (nl.Ullman == nr.Ullman && (Smallintconst(nl) || (nr.Op == OLITERAL && !Smallintconst(nr)))) {
r := nl nl, nr = nr, nl
nl = nr
nr = r
} }
abop: // asymmetric binary abop: // asymmetric binary
......
...@@ -1685,15 +1685,13 @@ func livenessprintdebug(lv *Liveness) { ...@@ -1685,15 +1685,13 @@ func livenessprintdebug(lv *Liveness) {
for j = 0; j < len(lv.vars); j++ { for j = 0; j < len(lv.vars); j++ {
n = lv.vars[j] n = lv.vars[j]
if islive(n, args, locals) { if islive(n, args, locals) {
tmp9 := printed if printed != 0 {
printed++
if tmp9 != 0 {
fmt.Printf(",") fmt.Printf(",")
} }
fmt.Printf("%v", n) fmt.Printf("%v", n)
printed++
} }
} }
fmt.Printf("\n") fmt.Printf("\n")
} }
......
...@@ -665,12 +665,7 @@ func sortinter(t *Type) *Type { ...@@ -665,12 +665,7 @@ func sortinter(t *Type) *Type {
i++ i++
} }
sort.Sort(methcmp(a[:i])) sort.Sort(methcmp(a[:i]))
for { for i--; i >= 0; i-- {
tmp11 := i
i--
if tmp11 <= 0 {
break
}
a[i].Down = f a[i].Down = f
f = a[i] f = a[i]
} }
......
...@@ -3073,11 +3073,10 @@ func typecheckcomplit(np **Node) { ...@@ -3073,11 +3073,10 @@ func typecheckcomplit(np **Node) {
setlineno(ll.N) setlineno(ll.N)
typecheck(&ll.N, Erv) typecheck(&ll.N, Erv)
if f == nil { if f == nil {
tmp12 := bad if bad == 0 {
bad++
if tmp12 == 0 {
Yyerror("too many values in struct initializer") Yyerror("too many values in struct initializer")
} }
bad++
continue continue
} }
...@@ -3110,11 +3109,10 @@ func typecheckcomplit(np **Node) { ...@@ -3110,11 +3109,10 @@ func typecheckcomplit(np **Node) {
l = ll.N l = ll.N
setlineno(l) setlineno(l)
if l.Op != OKEY { if l.Op != OKEY {
tmp13 := bad if bad == 0 {
bad++
if tmp13 == 0 {
Yyerror("mixture of field:value and value initializers") Yyerror("mixture of field:value and value initializers")
} }
bad++
typecheck(&ll.N, Erv) typecheck(&ll.N, Erv)
continue continue
} }
......
...@@ -126,13 +126,7 @@ func blockcopy(n, res *gc.Node, osrc, odst, w int64) { ...@@ -126,13 +126,7 @@ func blockcopy(n, res *gc.Node, osrc, odst, w int64) {
// ADDs. That will produce shorter, more // ADDs. That will produce shorter, more
// pipeline-able code. // pipeline-able code.
var p *obj.Prog var p *obj.Prog
for { for ; c > 0; c-- {
tmp14 := c
c--
if tmp14 <= 0 {
break
}
p = gins(op, &src, &tmp) p = gins(op, &src, &tmp)
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Offset = int64(dir) p.From.Offset = int64(dir)
......
...@@ -141,13 +141,13 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) { ...@@ -141,13 +141,13 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
t := nl.Type t := nl.Type
t0 := t t0 := t
check := 0 check := false
if gc.Issigned[t.Etype] { if gc.Issigned[t.Etype] {
check = 1 check = true
if gc.Isconst(nl, gc.CTINT) && nl.Int() != -(1<<uint64(t.Width*8-1)) { if gc.Isconst(nl, gc.CTINT) && nl.Int() != -(1<<uint64(t.Width*8-1)) {
check = 0 check = false
} else if gc.Isconst(nr, gc.CTINT) && nr.Int() != -1 { } else if gc.Isconst(nr, gc.CTINT) && nr.Int() != -1 {
check = 0 check = false
} }
} }
...@@ -157,7 +157,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) { ...@@ -157,7 +157,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
} else { } else {
t = gc.Types[gc.TUINT64] t = gc.Types[gc.TUINT64]
} }
check = 0 check = false
} }
a := optoas(gc.ODIV, t) a := optoas(gc.ODIV, t)
...@@ -198,7 +198,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) { ...@@ -198,7 +198,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
gc.Patch(p1, gc.Pc) gc.Patch(p1, gc.Pc)
var p2 *obj.Prog var p2 *obj.Prog
if check != 0 { if check {
var nm1 gc.Node var nm1 gc.Node
gc.Nodconst(&nm1, t, -1) gc.Nodconst(&nm1, t, -1)
gins(optoas(gc.OCMP, t), &tr, &nm1) gins(optoas(gc.OCMP, t), &tr, &nm1)
...@@ -242,7 +242,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) { ...@@ -242,7 +242,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
} }
gc.Regfree(&tl) gc.Regfree(&tl)
if check != 0 { if check {
gc.Patch(p2, gc.Pc) gc.Patch(p2, gc.Pc)
} }
} }
...@@ -254,9 +254,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) { ...@@ -254,9 +254,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) { func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
// largest ullman on left. // largest ullman on left.
if nl.Ullman < nr.Ullman { if nl.Ullman < nr.Ullman {
tmp := (*gc.Node)(nl) nl, nr = nr, nl
nl = nr
nr = tmp
} }
t := (*gc.Type)(nl.Type) t := (*gc.Type)(nl.Type)
......
...@@ -133,24 +133,14 @@ func clearfat(nl *gc.Node) { ...@@ -133,24 +133,14 @@ func clearfat(nl *gc.Node) {
n1.Op = gc.OINDREG n1.Op = gc.OINDREG
var z gc.Node var z gc.Node
gc.Nodconst(&z, gc.Types[gc.TUINT64], 0) gc.Nodconst(&z, gc.Types[gc.TUINT64], 0)
for { for ; q > 0; q-- {
tmp14 := q
q--
if tmp14 <= 0 {
break
}
n1.Type = z.Type n1.Type = z.Type
gins(x86.AMOVL, &z, &n1) gins(x86.AMOVL, &z, &n1)
n1.Xoffset += 4 n1.Xoffset += 4
} }
gc.Nodconst(&z, gc.Types[gc.TUINT8], 0) gc.Nodconst(&z, gc.Types[gc.TUINT8], 0)
for { for ; c > 0; c-- {
tmp15 := c
c--
if tmp15 <= 0 {
break
}
n1.Type = z.Type n1.Type = z.Type
gins(x86.AMOVB, &z, &n1) gins(x86.AMOVB, &z, &n1)
n1.Xoffset++ n1.Xoffset++
...@@ -213,13 +203,13 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node, ax *gc.Node, dx *gc.N ...@@ -213,13 +203,13 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node, ax *gc.Node, dx *gc.N
t := nl.Type t := nl.Type
t0 := t t0 := t
check := 0 check := false
if gc.Issigned[t.Etype] { if gc.Issigned[t.Etype] {
check = 1 check = true
if gc.Isconst(nl, gc.CTINT) && nl.Int() != -1<<uint64(t.Width*8-1) { if gc.Isconst(nl, gc.CTINT) && nl.Int() != -1<<uint64(t.Width*8-1) {
check = 0 check = false
} else if gc.Isconst(nr, gc.CTINT) && nr.Int() != -1 { } else if gc.Isconst(nr, gc.CTINT) && nr.Int() != -1 {
check = 0 check = false
} }
} }
...@@ -229,7 +219,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node, ax *gc.Node, dx *gc.N ...@@ -229,7 +219,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node, ax *gc.Node, dx *gc.N
} else { } else {
t = gc.Types[gc.TUINT32] t = gc.Types[gc.TUINT32]
} }
check = 0 check = false
} }
var t1 gc.Node var t1 gc.Node
...@@ -278,7 +268,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node, ax *gc.Node, dx *gc.N ...@@ -278,7 +268,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node, ax *gc.Node, dx *gc.N
gc.Patch(p1, gc.Pc) gc.Patch(p1, gc.Pc)
} }
if check != 0 { if check {
gc.Nodconst(&n4, t, -1) gc.Nodconst(&n4, t, -1)
gins(optoas(gc.OCMP, t), &n1, &n4) gins(optoas(gc.OCMP, t), &n1, &n4)
p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1) p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
...@@ -313,7 +303,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node, ax *gc.Node, dx *gc.N ...@@ -313,7 +303,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node, ax *gc.Node, dx *gc.N
} else { } else {
gmove(dx, res) gmove(dx, res)
} }
if check != 0 { if check {
gc.Patch(p2, gc.Pc) gc.Patch(p2, gc.Pc)
} }
} }
...@@ -513,9 +503,7 @@ func cgen_bmul(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) bool { ...@@ -513,9 +503,7 @@ func cgen_bmul(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) bool {
// largest ullman on left. // largest ullman on left.
if nl.Ullman < nr.Ullman { if nl.Ullman < nr.Ullman {
tmp := nl nl, nr = nr, nl
nl = nr
nr = tmp
} }
var nt gc.Node var nt gc.Node
...@@ -705,9 +693,7 @@ func cgen_floatsse(n *gc.Node, res *gc.Node) { ...@@ -705,9 +693,7 @@ func cgen_floatsse(n *gc.Node, res *gc.Node) {
sbop: // symmetric binary sbop: // symmetric binary
if nl.Ullman < nr.Ullman || nl.Op == gc.OLITERAL { if nl.Ullman < nr.Ullman || nl.Op == gc.OLITERAL {
r := nl nl, nr = nr, nl
nl = nr
nr = r
} }
abop: // asymmetric binary abop: // asymmetric binary
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment