Commit 932c1e3d authored by Marvin Stenger's avatar Marvin Stenger Committed by Brad Fitzpatrick

cmd/compile/internal: some janitoring

Nicer swaps, loops (removed tmp variables). Use of bool instead of int.

This change passes go build -toolexec 'toolstash -cmp' -a std.

Change-Id: I541904c74b57297848decc51a8a4913a8eca4af3
Reviewed-on: https://go-review.googlesource.com/14316
Run-TryBot: Brad Fitzpatrick <bradfitz@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarBrad Fitzpatrick <bradfitz@golang.org>
parent 31322996
......@@ -187,13 +187,13 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
t := nl.Type
t0 := t
check := 0
check := false
if gc.Issigned[t.Etype] {
check = 1
check = true
if gc.Isconst(nl, gc.CTINT) && nl.Int() != -(1<<uint64(t.Width*8-1)) {
check = 0
check = false
} else if gc.Isconst(nr, gc.CTINT) && nr.Int() != -1 {
check = 0
check = false
}
}
......@@ -203,7 +203,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
} else {
t = gc.Types[gc.TUINT32]
}
check = 0
check = false
}
a := optoas(op, t)
......@@ -252,7 +252,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
}
var p2 *obj.Prog
if check != 0 {
if check {
gc.Nodconst(&n4, t, -1)
gins(optoas(gc.OCMP, t), &n3, &n4)
p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
......@@ -289,7 +289,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
gmove(&dx, res)
}
restx(&dx, &olddx)
if check != 0 {
if check {
gc.Patch(p2, gc.Pc)
}
restx(&ax, &oldax)
......@@ -340,9 +340,7 @@ func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
t := nl.Type
a := optoas(gc.OHMUL, t)
if nl.Ullman < nr.Ullman {
tmp := nl
nl = nr
nr = tmp
nl, nr = nr, nl
}
var n1 gc.Node
......@@ -500,9 +498,7 @@ func cgen_bmul(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) bool {
// largest ullman on left.
if nl.Ullman < nr.Ullman {
tmp := nl
nl = nr
nr = tmp
nl, nr = nr, nl
}
// generate operands in "8-bit" registers.
......@@ -564,12 +560,7 @@ func clearfat(nl *gc.Node) {
n1.Op = gc.OINDREG
var z gc.Node
gc.Nodconst(&z, gc.Types[gc.TUINT64], 0)
for {
tmp14 := q
q--
if tmp14 <= 0 {
break
}
for ; q > 0; q-- {
n1.Type = z.Type
gins(x86.AMOVQ, &z, &n1)
n1.Xoffset += 8
......@@ -584,12 +575,7 @@ func clearfat(nl *gc.Node) {
}
gc.Nodconst(&z, gc.Types[gc.TUINT8], 0)
for {
tmp15 := c
c--
if tmp15 <= 0 {
break
}
for ; c > 0; c-- {
n1.Type = z.Type
gins(x86.AMOVB, &z, &n1)
n1.Xoffset++
......
......@@ -205,12 +205,7 @@ func blockcopy(n, res *gc.Node, osrc, odst, w int64) {
gc.Regfree(&nend)
} else {
var p *obj.Prog
for {
tmp14 := c
c--
if tmp14 <= 0 {
break
}
for ; c > 0; c-- {
p = gins(op, &src, &tmp)
p.From.Type = obj.TYPE_MEM
p.From.Offset = int64(dir)
......
......@@ -120,9 +120,7 @@ func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int32, ttype int
*/
func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
if nl.Ullman < nr.Ullman {
tmp := nl
nl = nr
nr = tmp
nl, nr = nr, nl
}
t := nl.Type
......
......@@ -132,13 +132,7 @@ func blockcopy(n, res *gc.Node, osrc, odst, w int64) {
// ADDs. That will produce shorter, more
// pipeline-able code.
var p *obj.Prog
for {
tmp14 := c
c--
if tmp14 <= 0 {
break
}
for ; c > 0; c-- {
p = gins(op, &src, &tmp)
p.From.Type = obj.TYPE_MEM
p.From.Offset = int64(dir)
......
......@@ -151,13 +151,13 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
t := nl.Type
t0 := t
check := 0
check := false
if gc.Issigned[t.Etype] {
check = 1
check = true
if gc.Isconst(nl, gc.CTINT) && nl.Int() != -(1<<uint64(t.Width*8-1)) {
check = 0
check = false
} else if gc.Isconst(nr, gc.CTINT) && nr.Int() != -1 {
check = 0
check = false
}
}
......@@ -167,7 +167,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
} else {
t = gc.Types[gc.TUINT64]
}
check = 0
check = false
}
a := optoas(gc.ODIV, t)
......@@ -206,7 +206,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
gc.Patch(p1, gc.Pc)
var p2 *obj.Prog
if check != 0 {
if check {
var nm1 gc.Node
gc.Nodconst(&nm1, t, -1)
gcmp(optoas(gc.OCMP, t), &tr, &nm1)
......@@ -250,7 +250,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
}
gc.Regfree(&tl)
if check != 0 {
if check {
gc.Patch(p2, gc.Pc)
}
}
......@@ -262,9 +262,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
// largest ullman on left.
if nl.Ullman < nr.Ullman {
tmp := (*gc.Node)(nl)
nl = nr
nr = tmp
nl, nr = nr, nl
}
t := (*gc.Type)(nl.Type)
......
......@@ -705,9 +705,7 @@ func cgen_wb(n, res *Node, wb bool) {
*/
sbop: // symmetric binary
if nl.Ullman < nr.Ullman || (nl.Ullman == nr.Ullman && (Smallintconst(nl) || (nr.Op == OLITERAL && !Smallintconst(nr)))) {
r := nl
nl = nr
nr = r
nl, nr = nr, nl
}
abop: // asymmetric binary
......
......@@ -1685,15 +1685,13 @@ func livenessprintdebug(lv *Liveness) {
for j = 0; j < len(lv.vars); j++ {
n = lv.vars[j]
if islive(n, args, locals) {
tmp9 := printed
printed++
if tmp9 != 0 {
if printed != 0 {
fmt.Printf(",")
}
fmt.Printf("%v", n)
printed++
}
}
fmt.Printf("\n")
}
......
......@@ -665,12 +665,7 @@ func sortinter(t *Type) *Type {
i++
}
sort.Sort(methcmp(a[:i]))
for {
tmp11 := i
i--
if tmp11 <= 0 {
break
}
for i--; i >= 0; i-- {
a[i].Down = f
f = a[i]
}
......
......@@ -3073,11 +3073,10 @@ func typecheckcomplit(np **Node) {
setlineno(ll.N)
typecheck(&ll.N, Erv)
if f == nil {
tmp12 := bad
bad++
if tmp12 == 0 {
if bad == 0 {
Yyerror("too many values in struct initializer")
}
bad++
continue
}
......@@ -3110,11 +3109,10 @@ func typecheckcomplit(np **Node) {
l = ll.N
setlineno(l)
if l.Op != OKEY {
tmp13 := bad
bad++
if tmp13 == 0 {
if bad == 0 {
Yyerror("mixture of field:value and value initializers")
}
bad++
typecheck(&ll.N, Erv)
continue
}
......
......@@ -126,13 +126,7 @@ func blockcopy(n, res *gc.Node, osrc, odst, w int64) {
// ADDs. That will produce shorter, more
// pipeline-able code.
var p *obj.Prog
for {
tmp14 := c
c--
if tmp14 <= 0 {
break
}
for ; c > 0; c-- {
p = gins(op, &src, &tmp)
p.From.Type = obj.TYPE_MEM
p.From.Offset = int64(dir)
......
......@@ -141,13 +141,13 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
t := nl.Type
t0 := t
check := 0
check := false
if gc.Issigned[t.Etype] {
check = 1
check = true
if gc.Isconst(nl, gc.CTINT) && nl.Int() != -(1<<uint64(t.Width*8-1)) {
check = 0
check = false
} else if gc.Isconst(nr, gc.CTINT) && nr.Int() != -1 {
check = 0
check = false
}
}
......@@ -157,7 +157,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
} else {
t = gc.Types[gc.TUINT64]
}
check = 0
check = false
}
a := optoas(gc.ODIV, t)
......@@ -198,7 +198,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
gc.Patch(p1, gc.Pc)
var p2 *obj.Prog
if check != 0 {
if check {
var nm1 gc.Node
gc.Nodconst(&nm1, t, -1)
gins(optoas(gc.OCMP, t), &tr, &nm1)
......@@ -242,7 +242,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
}
gc.Regfree(&tl)
if check != 0 {
if check {
gc.Patch(p2, gc.Pc)
}
}
......@@ -254,9 +254,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
// largest ullman on left.
if nl.Ullman < nr.Ullman {
tmp := (*gc.Node)(nl)
nl = nr
nr = tmp
nl, nr = nr, nl
}
t := (*gc.Type)(nl.Type)
......
......@@ -133,24 +133,14 @@ func clearfat(nl *gc.Node) {
n1.Op = gc.OINDREG
var z gc.Node
gc.Nodconst(&z, gc.Types[gc.TUINT64], 0)
for {
tmp14 := q
q--
if tmp14 <= 0 {
break
}
for ; q > 0; q-- {
n1.Type = z.Type
gins(x86.AMOVL, &z, &n1)
n1.Xoffset += 4
}
gc.Nodconst(&z, gc.Types[gc.TUINT8], 0)
for {
tmp15 := c
c--
if tmp15 <= 0 {
break
}
for ; c > 0; c-- {
n1.Type = z.Type
gins(x86.AMOVB, &z, &n1)
n1.Xoffset++
......@@ -213,13 +203,13 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node, ax *gc.Node, dx *gc.N
t := nl.Type
t0 := t
check := 0
check := false
if gc.Issigned[t.Etype] {
check = 1
check = true
if gc.Isconst(nl, gc.CTINT) && nl.Int() != -1<<uint64(t.Width*8-1) {
check = 0
check = false
} else if gc.Isconst(nr, gc.CTINT) && nr.Int() != -1 {
check = 0
check = false
}
}
......@@ -229,7 +219,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node, ax *gc.Node, dx *gc.N
} else {
t = gc.Types[gc.TUINT32]
}
check = 0
check = false
}
var t1 gc.Node
......@@ -278,7 +268,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node, ax *gc.Node, dx *gc.N
gc.Patch(p1, gc.Pc)
}
if check != 0 {
if check {
gc.Nodconst(&n4, t, -1)
gins(optoas(gc.OCMP, t), &n1, &n4)
p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
......@@ -313,7 +303,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node, ax *gc.Node, dx *gc.N
} else {
gmove(dx, res)
}
if check != 0 {
if check {
gc.Patch(p2, gc.Pc)
}
}
......@@ -513,9 +503,7 @@ func cgen_bmul(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) bool {
// largest ullman on left.
if nl.Ullman < nr.Ullman {
tmp := nl
nl = nr
nr = tmp
nl, nr = nr, nl
}
var nt gc.Node
......@@ -705,9 +693,7 @@ func cgen_floatsse(n *gc.Node, res *gc.Node) {
sbop: // symmetric binary
if nl.Ullman < nr.Ullman || nl.Op == gc.OLITERAL {
r := nl
nl = nr
nr = r
nl, nr = nr, nl
}
abop: // asymmetric binary
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment