Commit 0112f6f6 authored by Josh Bleecher Snyder's avatar Josh Bleecher Snyder Committed by Russ Cox

cmd/5g, etc: prepare to unexport gc.Mp*

Remove all uses of Mp* outside of the gc package.

A subsequent, automated commit in the Go 1.6
cycle will unexport all Mp* functions and types.

No functional changes. Passes toolstash -cmp.

Change-Id: Ie1604cb5b84ffb30b47f4777d4235570f2c62709
Reviewed-on: https://go-review.googlesource.com/9263Reviewed-by: default avatarRuss Cox <rsc@golang.org>
parent 7e26a2d9
...@@ -237,7 +237,7 @@ func cgen64(n *gc.Node, res *gc.Node) { ...@@ -237,7 +237,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
// shld hi:lo, c // shld hi:lo, c
// shld lo:t, c // shld lo:t, c
case gc.OLROT: case gc.OLROT:
v := uint64(gc.Mpgetfix(r.Val.U.Xval)) v := uint64(r.Int())
var bl gc.Node var bl gc.Node
gc.Regalloc(&bl, lo1.Type, nil) gc.Regalloc(&bl, lo1.Type, nil)
...@@ -291,7 +291,7 @@ func cgen64(n *gc.Node, res *gc.Node) { ...@@ -291,7 +291,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
var p4 *obj.Prog var p4 *obj.Prog
var p5 *obj.Prog var p5 *obj.Prog
if r.Op == gc.OLITERAL { if r.Op == gc.OLITERAL {
v := uint64(gc.Mpgetfix(r.Val.U.Xval)) v := uint64(r.Int())
if v >= 64 { if v >= 64 {
// TODO(kaib): replace with gins(AMOVW, nodintconst(0), &al) // TODO(kaib): replace with gins(AMOVW, nodintconst(0), &al)
// here and below (verify it optimizes to EOR) // here and below (verify it optimizes to EOR)
...@@ -452,7 +452,7 @@ func cgen64(n *gc.Node, res *gc.Node) { ...@@ -452,7 +452,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
var creg gc.Node var creg gc.Node
var p3 *obj.Prog var p3 *obj.Prog
if r.Op == gc.OLITERAL { if r.Op == gc.OLITERAL {
v := uint64(gc.Mpgetfix(r.Val.U.Xval)) v := uint64(r.Int())
if v >= 64 { if v >= 64 {
if bh.Type.Etype == gc.TINT32 { if bh.Type.Etype == gc.TINT32 {
// MOVW bh->31, al // MOVW bh->31, al
......
...@@ -183,7 +183,7 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) { ...@@ -183,7 +183,7 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
w := int(nl.Type.Width * 8) w := int(nl.Type.Width * 8)
if op == gc.OLROT { if op == gc.OLROT {
v := int(gc.Mpgetfix(nr.Val.U.Xval)) v := nr.Int()
var n1 gc.Node var n1 gc.Node
gc.Regalloc(&n1, nl.Type, res) gc.Regalloc(&n1, nl.Type, res)
if w == 32 { if w == 32 {
...@@ -210,7 +210,7 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) { ...@@ -210,7 +210,7 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
var n1 gc.Node var n1 gc.Node
gc.Regalloc(&n1, nl.Type, res) gc.Regalloc(&n1, nl.Type, res)
gc.Cgen(nl, &n1) gc.Cgen(nl, &n1)
sc := uint64(gc.Mpgetfix(nr.Val.U.Xval)) sc := uint64(nr.Int())
if sc == 0 { if sc == 0 {
} else // nothing to do } else // nothing to do
if sc >= uint64(nl.Type.Width*8) { if sc >= uint64(nl.Type.Width*8) {
...@@ -480,7 +480,7 @@ func ginscon(as int, c int64, n *gc.Node) { ...@@ -480,7 +480,7 @@ func ginscon(as int, c int64, n *gc.Node) {
} }
func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog { func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && gc.Mpgetfix(n1.Val.U.Xval) == 0 && n2.Op != gc.OLITERAL { if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && n1.Int() == 0 && n2.Op != gc.OLITERAL {
op = gc.Brrev(op) op = gc.Brrev(op)
n1, n2 = n2, n1 n1, n2 = n2, n1
} }
...@@ -489,7 +489,7 @@ func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog { ...@@ -489,7 +489,7 @@ func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
gc.Regalloc(&g1, n1.Type, &r1) gc.Regalloc(&g1, n1.Type, &r1)
gc.Cgen(n1, &g1) gc.Cgen(n1, &g1)
gmove(&g1, &r1) gmove(&g1, &r1)
if gc.Isint[t.Etype] && n2.Op == gc.OLITERAL && gc.Mpgetfix(n2.Val.U.Xval) == 0 { if gc.Isint[t.Etype] && n2.Op == gc.OLITERAL && n2.Int() == 0 {
gins(arm.ACMP, &r1, n2) gins(arm.ACMP, &r1, n2)
} else { } else {
gc.Regalloc(&r2, t, n2) gc.Regalloc(&r2, t, n2)
......
...@@ -53,7 +53,7 @@ func ncon(i uint32) *gc.Node { ...@@ -53,7 +53,7 @@ func ncon(i uint32) *gc.Node {
if ncon_n.Type == nil { if ncon_n.Type == nil {
gc.Nodconst(&ncon_n, gc.Types[gc.TUINT32], 0) gc.Nodconst(&ncon_n, gc.Types[gc.TUINT32], 0)
} }
gc.Mpmovecfix(ncon_n.Val.U.Xval, int64(i)) ncon_n.SetInt(int64(i))
return &ncon_n return &ncon_n
} }
...@@ -112,7 +112,7 @@ func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) { ...@@ -112,7 +112,7 @@ func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) {
case gc.OLITERAL: case gc.OLITERAL:
var n1 gc.Node var n1 gc.Node
gc.Convconst(&n1, n.Type, &n.Val) gc.Convconst(&n1, n.Type, &n.Val)
i := gc.Mpgetfix(n1.Val.U.Xval) i := n1.Int()
gc.Nodconst(lo, gc.Types[gc.TUINT32], int64(uint32(i))) gc.Nodconst(lo, gc.Types[gc.TUINT32], int64(uint32(i)))
i >>= 32 i >>= 32
if n.Type.Etype == gc.TINT64 { if n.Type.Etype == gc.TINT64 {
...@@ -1118,7 +1118,7 @@ func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool { ...@@ -1118,7 +1118,7 @@ func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
if !gc.Isconst(n, gc.CTINT) { if !gc.Isconst(n, gc.CTINT) {
break break
} }
v := gc.Mpgetfix(n.Val.U.Xval) v := n.Int()
if v >= 32000 || v <= -32000 { if v >= 32000 || v <= -32000 {
break break
} }
......
...@@ -190,9 +190,9 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) { ...@@ -190,9 +190,9 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
check := 0 check := 0
if gc.Issigned[t.Etype] { if gc.Issigned[t.Etype] {
check = 1 check = 1
if gc.Isconst(nl, gc.CTINT) && gc.Mpgetfix(nl.Val.U.Xval) != -(1<<uint64(t.Width*8-1)) { if gc.Isconst(nl, gc.CTINT) && nl.Int() != -(1<<uint64(t.Width*8-1)) {
check = 0 check = 0
} else if gc.Isconst(nr, gc.CTINT) && gc.Mpgetfix(nr.Val.U.Xval) != -1 { } else if gc.Isconst(nr, gc.CTINT) && nr.Int() != -1 {
check = 0 check = 0
} }
} }
...@@ -381,7 +381,7 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) { ...@@ -381,7 +381,7 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
var n1 gc.Node var n1 gc.Node
gc.Regalloc(&n1, nl.Type, res) gc.Regalloc(&n1, nl.Type, res)
gc.Cgen(nl, &n1) gc.Cgen(nl, &n1)
sc := uint64(gc.Mpgetfix(nr.Val.U.Xval)) sc := uint64(nr.Int())
if sc >= uint64(nl.Type.Width*8) { if sc >= uint64(nl.Type.Width*8) {
// large shift gets 2 shifts by width-1 // large shift gets 2 shifts by width-1
var n3 gc.Node var n3 gc.Node
......
...@@ -32,6 +32,7 @@ package main ...@@ -32,6 +32,7 @@ package main
import ( import (
"cmd/internal/gc" "cmd/internal/gc"
"cmd/internal/gc/big"
"cmd/internal/obj" "cmd/internal/obj"
"cmd/internal/obj/x86" "cmd/internal/obj/x86"
"fmt" "fmt"
...@@ -139,29 +140,27 @@ func ginsboolval(a int, n *gc.Node) { ...@@ -139,29 +140,27 @@ func ginsboolval(a int, n *gc.Node) {
gins(jmptoset(a), nil, n) gins(jmptoset(a), nil, n)
} }
/* // set up nodes representing 2^63
* set up nodes representing 2^63 var (
*/ bigi gc.Node
var bigi gc.Node bigf gc.Node
bignodes_did bool
var bigf gc.Node )
var bignodes_did int
func bignodes() { func bignodes() {
if bignodes_did != 0 { if bignodes_did {
return return
} }
bignodes_did = 1 bignodes_did = true
var i big.Int
i.SetInt64(1)
i.Lsh(&i, 63)
gc.Nodconst(&bigi, gc.Types[gc.TUINT64], 1) gc.Nodconst(&bigi, gc.Types[gc.TUINT64], 0)
gc.Mpshiftfix(bigi.Val.U.Xval, 63) bigi.SetBigInt(&i)
bigf = bigi gc.Convconst(&bigf, gc.Types[gc.TFLOAT64], &bigi.Val)
bigf.Type = gc.Types[gc.TFLOAT64]
bigf.Val.Ctype = gc.CTFLT
bigf.Val.U.Fval = new(gc.Mpflt)
gc.Mpmovefixflt(bigf.Val.U.Fval, bigi.Val.U.Xval)
} }
/* /*
...@@ -206,10 +205,7 @@ func gmove(f *gc.Node, t *gc.Node) { ...@@ -206,10 +205,7 @@ func gmove(f *gc.Node, t *gc.Node) {
// 64-bit immediates are really 32-bit sign-extended // 64-bit immediates are really 32-bit sign-extended
// unless moving into a register. // unless moving into a register.
if gc.Isint[tt] { if gc.Isint[tt] {
if gc.Mpcmpfixfix(con.Val.U.Xval, gc.Minintval[gc.TINT32]) < 0 { if i := con.Int(); int64(int32(i)) != i {
goto hard
}
if gc.Mpcmpfixfix(con.Val.U.Xval, gc.Maxintval[gc.TINT32]) > 0 {
goto hard goto hard
} }
} }
...@@ -1273,7 +1269,7 @@ func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool { ...@@ -1273,7 +1269,7 @@ func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
if !gc.Isconst(n, gc.CTINT) { if !gc.Isconst(n, gc.CTINT) {
break break
} }
v := gc.Mpgetfix(n.Val.U.Xval) v := n.Int()
if v >= 32000 || v <= -32000 { if v >= 32000 || v <= -32000 {
break break
} }
......
...@@ -147,9 +147,9 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) { ...@@ -147,9 +147,9 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
check := 0 check := 0
if gc.Issigned[t.Etype] { if gc.Issigned[t.Etype] {
check = 1 check = 1
if gc.Isconst(nl, gc.CTINT) && gc.Mpgetfix(nl.Val.U.Xval) != -(1<<uint64(t.Width*8-1)) { if gc.Isconst(nl, gc.CTINT) && nl.Int() != -(1<<uint64(t.Width*8-1)) {
check = 0 check = 0
} else if gc.Isconst(nr, gc.CTINT) && gc.Mpgetfix(nr.Val.U.Xval) != -1 { } else if gc.Isconst(nr, gc.CTINT) && nr.Int() != -1 {
check = 0 check = 0
} }
} }
...@@ -312,7 +312,7 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) { ...@@ -312,7 +312,7 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
var n1 gc.Node var n1 gc.Node
gc.Regalloc(&n1, nl.Type, res) gc.Regalloc(&n1, nl.Type, res)
gc.Cgen(nl, &n1) gc.Cgen(nl, &n1)
sc := uint64(uint64(gc.Mpgetfix(nr.Val.U.Xval))) sc := uint64(nr.Int())
if sc >= uint64(nl.Type.Width*8) { if sc >= uint64(nl.Type.Width*8) {
// large shift gets 2 shifts by width-1 // large shift gets 2 shifts by width-1
var n3 gc.Node var n3 gc.Node
......
...@@ -115,7 +115,7 @@ func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog { ...@@ -115,7 +115,7 @@ func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
gc.Cgen(n1, &g1) gc.Cgen(n1, &g1)
gmove(&g1, &r1) gmove(&g1, &r1)
if gc.Isint[t.Etype] && gc.Isconst(n2, gc.CTINT) { if gc.Isint[t.Etype] && gc.Isconst(n2, gc.CTINT) {
ginscon2(optoas(gc.OCMP, t), &r1, gc.Mpgetfix(n2.Val.U.Xval)) ginscon2(optoas(gc.OCMP, t), &r1, n2.Int())
} else { } else {
gc.Regalloc(&r2, t, n2) gc.Regalloc(&r2, t, n2)
gc.Regalloc(&g2, n1.Type, &r2) gc.Regalloc(&g2, n1.Type, &r2)
...@@ -473,7 +473,7 @@ func intLiteral(n *gc.Node) (x int64, ok bool) { ...@@ -473,7 +473,7 @@ func intLiteral(n *gc.Node) (x int64, ok bool) {
} }
switch n.Val.Ctype { switch n.Val.Ctype {
case gc.CTINT, gc.CTRUNE: case gc.CTINT, gc.CTRUNE:
return gc.Mpgetfix(n.Val.U.Xval), true return n.Int(), true
case gc.CTBOOL: case gc.CTBOOL:
return int64(obj.Bool2int(n.Val.U.Bval)), true return int64(obj.Bool2int(n.Val.U.Bval)), true
} }
......
...@@ -162,7 +162,7 @@ func cgen64(n *gc.Node, res *gc.Node) { ...@@ -162,7 +162,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
// shld hi:lo, c // shld hi:lo, c
// shld lo:t, c // shld lo:t, c
case gc.OLROT: case gc.OLROT:
v := uint64(gc.Mpgetfix(r.Val.U.Xval)) v := uint64(r.Int())
if v >= 32 { if v >= 32 {
// reverse during load to do the first 32 bits of rotate // reverse during load to do the first 32 bits of rotate
...@@ -189,7 +189,7 @@ func cgen64(n *gc.Node, res *gc.Node) { ...@@ -189,7 +189,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
case gc.OLSH: case gc.OLSH:
if r.Op == gc.OLITERAL { if r.Op == gc.OLITERAL {
v := uint64(gc.Mpgetfix(r.Val.U.Xval)) v := uint64(r.Int())
if v >= 64 { if v >= 64 {
if gc.Is64(r.Type) { if gc.Is64(r.Type) {
splitclean() splitclean()
...@@ -278,7 +278,7 @@ func cgen64(n *gc.Node, res *gc.Node) { ...@@ -278,7 +278,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
case gc.ORSH: case gc.ORSH:
if r.Op == gc.OLITERAL { if r.Op == gc.OLITERAL {
v := uint64(gc.Mpgetfix(r.Val.U.Xval)) v := uint64(r.Int())
if v >= 64 { if v >= 64 {
if gc.Is64(r.Type) { if gc.Is64(r.Type) {
splitclean() splitclean()
...@@ -400,9 +400,8 @@ func cgen64(n *gc.Node, res *gc.Node) { ...@@ -400,9 +400,8 @@ func cgen64(n *gc.Node, res *gc.Node) {
if lo2.Op == gc.OLITERAL { if lo2.Op == gc.OLITERAL {
// special cases for constants. // special cases for constants.
lv := uint32(gc.Mpgetfix(lo2.Val.U.Xval)) lv := uint32(lo2.Int())
hv := uint32(hi2.Int())
hv := uint32(gc.Mpgetfix(hi2.Val.U.Xval))
splitclean() // right side splitclean() // right side
split64(res, &lo2, &hi2) split64(res, &lo2, &hi2)
switch n.Op { switch n.Op {
......
...@@ -216,9 +216,9 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node, ax *gc.Node, dx *gc.N ...@@ -216,9 +216,9 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node, ax *gc.Node, dx *gc.N
check := 0 check := 0
if gc.Issigned[t.Etype] { if gc.Issigned[t.Etype] {
check = 1 check = 1
if gc.Isconst(nl, gc.CTINT) && gc.Mpgetfix(nl.Val.U.Xval) != -1<<uint64(t.Width*8-1) { if gc.Isconst(nl, gc.CTINT) && nl.Int() != -1<<uint64(t.Width*8-1) {
check = 0 check = 0
} else if gc.Isconst(nr, gc.CTINT) && gc.Mpgetfix(nr.Val.U.Xval) != -1 { } else if gc.Isconst(nr, gc.CTINT) && nr.Int() != -1 {
check = 0 check = 0
} }
} }
...@@ -391,7 +391,7 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) { ...@@ -391,7 +391,7 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
var n1 gc.Node var n1 gc.Node
gc.Regalloc(&n1, nl.Type, res) gc.Regalloc(&n1, nl.Type, res)
gmove(&n2, &n1) gmove(&n2, &n1)
sc := uint64(gc.Mpgetfix(nr.Val.U.Xval)) sc := uint64(nr.Int())
if sc >= uint64(nl.Type.Width*8) { if sc >= uint64(nl.Type.Width*8) {
// large shift gets 2 shifts by width-1 // large shift gets 2 shifts by width-1
gins(a, ncon(uint32(w)-1), &n1) gins(a, ncon(uint32(w)-1), &n1)
......
...@@ -32,6 +32,7 @@ package main ...@@ -32,6 +32,7 @@ package main
import ( import (
"cmd/internal/gc" "cmd/internal/gc"
"cmd/internal/gc/big"
"cmd/internal/obj" "cmd/internal/obj"
"cmd/internal/obj/x86" "cmd/internal/obj/x86"
"fmt" "fmt"
...@@ -641,7 +642,7 @@ func ncon(i uint32) *gc.Node { ...@@ -641,7 +642,7 @@ func ncon(i uint32) *gc.Node {
if ncon_n.Type == nil { if ncon_n.Type == nil {
gc.Nodconst(&ncon_n, gc.Types[gc.TUINT32], 0) gc.Nodconst(&ncon_n, gc.Types[gc.TUINT32], 0)
} }
gc.Mpmovecfix(ncon_n.Val.U.Xval, int64(i)) ncon_n.SetInt(int64(i))
return &ncon_n return &ncon_n
} }
...@@ -700,7 +701,7 @@ func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) { ...@@ -700,7 +701,7 @@ func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) {
case gc.OLITERAL: case gc.OLITERAL:
var n1 gc.Node var n1 gc.Node
gc.Convconst(&n1, n.Type, &n.Val) gc.Convconst(&n1, n.Type, &n.Val)
i := gc.Mpgetfix(n1.Val.U.Xval) i := n1.Int()
gc.Nodconst(lo, gc.Types[gc.TUINT32], int64(uint32(i))) gc.Nodconst(lo, gc.Types[gc.TUINT32], int64(uint32(i)))
i >>= 32 i >>= 32
if n.Type.Etype == gc.TINT64 { if n.Type.Etype == gc.TINT64 {
...@@ -721,36 +722,36 @@ func splitclean() { ...@@ -721,36 +722,36 @@ func splitclean() {
} }
} }
/* // set up nodes representing fp constants
* set up nodes representing fp constants var (
*/ zerof gc.Node
var zerof gc.Node two63f gc.Node
two64f gc.Node
var two64f gc.Node bignodes_did bool
)
var two63f gc.Node
var bignodes_did int
func bignodes() { func bignodes() {
if bignodes_did != 0 { if bignodes_did {
return return
} }
bignodes_did = 1 bignodes_did = true
gc.Nodconst(&zerof, gc.Types[gc.TINT64], 0)
gc.Convconst(&zerof, gc.Types[gc.TFLOAT64], &zerof.Val)
two64f = *ncon(0) var i big.Int
two64f.Type = gc.Types[gc.TFLOAT64] i.SetInt64(1)
two64f.Val.Ctype = gc.CTFLT i.Lsh(&i, 63)
two64f.Val.U.Fval = new(gc.Mpflt) var bigi gc.Node
gc.Mpmovecflt(two64f.Val.U.Fval, 18446744073709551616.)
two63f = two64f gc.Nodconst(&bigi, gc.Types[gc.TUINT64], 0)
two63f.Val.U.Fval = new(gc.Mpflt) bigi.SetBigInt(&i)
gc.Mpmovecflt(two63f.Val.U.Fval, 9223372036854775808.) gc.Convconst(&two63f, gc.Types[gc.TFLOAT64], &bigi.Val)
zerof = two64f gc.Nodconst(&bigi, gc.Types[gc.TUINT64], 0)
zerof.Val.U.Fval = new(gc.Mpflt) i.Lsh(&i, 1)
gc.Mpmovecflt(zerof.Val.U.Fval, 0) bigi.SetBigInt(&i)
gc.Convconst(&two64f, gc.Types[gc.TFLOAT64], &bigi.Val)
} }
func memname(n *gc.Node, t *gc.Type) { func memname(n *gc.Node, t *gc.Type) {
......
...@@ -141,9 +141,9 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) { ...@@ -141,9 +141,9 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
check := 0 check := 0
if gc.Issigned[t.Etype] { if gc.Issigned[t.Etype] {
check = 1 check = 1
if gc.Isconst(nl, gc.CTINT) && gc.Mpgetfix(nl.Val.U.Xval) != -(1<<uint64(t.Width*8-1)) { if gc.Isconst(nl, gc.CTINT) && nl.Int() != -(1<<uint64(t.Width*8-1)) {
check = 0 check = 0
} else if gc.Isconst(nr, gc.CTINT) && gc.Mpgetfix(nr.Val.U.Xval) != -1 { } else if gc.Isconst(nr, gc.CTINT) && nr.Int() != -1 {
check = 0 check = 0
} }
} }
...@@ -308,7 +308,7 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) { ...@@ -308,7 +308,7 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
var n1 gc.Node var n1 gc.Node
gc.Regalloc(&n1, nl.Type, res) gc.Regalloc(&n1, nl.Type, res)
gc.Cgen(nl, &n1) gc.Cgen(nl, &n1)
sc := uint64(uint64(gc.Mpgetfix(nr.Val.U.Xval))) sc := uint64(nr.Int())
if sc >= uint64(nl.Type.Width*8) { if sc >= uint64(nl.Type.Width*8) {
// large shift gets 2 shifts by width-1 // large shift gets 2 shifts by width-1
var n3 gc.Node var n3 gc.Node
......
...@@ -32,6 +32,7 @@ package main ...@@ -32,6 +32,7 @@ package main
import ( import (
"cmd/internal/gc" "cmd/internal/gc"
"cmd/internal/gc/big"
"cmd/internal/obj" "cmd/internal/obj"
"cmd/internal/obj/ppc64" "cmd/internal/obj/ppc64"
"fmt" "fmt"
...@@ -129,7 +130,7 @@ func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog { ...@@ -129,7 +130,7 @@ func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
gc.Cgen(n1, &g1) gc.Cgen(n1, &g1)
gmove(&g1, &r1) gmove(&g1, &r1)
if gc.Isint[t.Etype] && gc.Isconst(n2, gc.CTINT) { if gc.Isint[t.Etype] && gc.Isconst(n2, gc.CTINT) {
ginscon2(optoas(gc.OCMP, t), &r1, gc.Mpgetfix(n2.Val.U.Xval)) ginscon2(optoas(gc.OCMP, t), &r1, n2.Int())
} else { } else {
gc.Regalloc(&r2, t, n2) gc.Regalloc(&r2, t, n2)
gc.Regalloc(&g2, n1.Type, &r2) gc.Regalloc(&g2, n1.Type, &r2)
...@@ -144,29 +145,27 @@ func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog { ...@@ -144,29 +145,27 @@ func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
return gc.Gbranch(optoas(op, t), nil, likely) return gc.Gbranch(optoas(op, t), nil, likely)
} }
/* // set up nodes representing 2^63
* set up nodes representing 2^63 var (
*/ bigi gc.Node
var bigi gc.Node bigf gc.Node
bignodes_did bool
var bigf gc.Node )
var bignodes_did int
func bignodes() { func bignodes() {
if bignodes_did != 0 { if bignodes_did {
return return
} }
bignodes_did = 1 bignodes_did = true
var i big.Int
i.SetInt64(1)
i.Lsh(&i, 63)
gc.Nodconst(&bigi, gc.Types[gc.TUINT64], 1) gc.Nodconst(&bigi, gc.Types[gc.TUINT64], 0)
gc.Mpshiftfix(bigi.Val.U.Xval, 63) bigi.SetBigInt(&i)
bigf = bigi gc.Convconst(&bigf, gc.Types[gc.TFLOAT64], &bigi.Val)
bigf.Type = gc.Types[gc.TFLOAT64]
bigf.Val.Ctype = gc.CTFLT
bigf.Val.U.Fval = new(gc.Mpflt)
gc.Mpmovefixflt(bigf.Val.U.Fval, bigi.Val.U.Xval)
} }
/* /*
...@@ -552,7 +551,7 @@ func intLiteral(n *gc.Node) (x int64, ok bool) { ...@@ -552,7 +551,7 @@ func intLiteral(n *gc.Node) (x int64, ok bool) {
} }
switch n.Val.Ctype { switch n.Val.Ctype {
case gc.CTINT, gc.CTRUNE: case gc.CTINT, gc.CTRUNE:
return gc.Mpgetfix(n.Val.U.Xval), true return n.Int(), true
case gc.CTBOOL: case gc.CTBOOL:
return int64(obj.Bool2int(n.Val.U.Bval)), true return int64(obj.Bool2int(n.Val.U.Bval)), true
} }
......
...@@ -5,10 +5,38 @@ ...@@ -5,10 +5,38 @@
package gc package gc
import ( import (
"cmd/internal/gc/big"
"cmd/internal/obj" "cmd/internal/obj"
"strings" "strings"
) )
// Int returns n as an int.
// n must be an integer constant.
func (n *Node) Int() int64 {
if !Isconst(n, CTINT) {
Fatal("Int(%v)", n)
}
return Mpgetfix(n.Val.U.Xval)
}
// SetInt sets n's value to i.
// n must be an integer constant.
func (n *Node) SetInt(i int64) {
if !Isconst(n, CTINT) {
Fatal("SetInt(%v)", n)
}
Mpmovecfix(n.Val.U.Xval, i)
}
// SetBigInt sets n's value to x.
// n must be an integer constant.
func (n *Node) SetBigInt(x *big.Int) {
if !Isconst(n, CTINT) {
Fatal("SetBigInt(%v)", n)
}
n.Val.U.Xval.Val.Set(x)
}
/* /*
* truncate float literal fv to 32-bit or 64-bit precision * truncate float literal fv to 32-bit or 64-bit precision
* according to type; return truncated value. * according to type; return truncated value.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment