Commit d3253876 authored by Matthew Dempsky's avatar Matthew Dempsky Committed by Robert Griesemer

cmd/compile: change Mp{int,flt} functions into methods

Also give them more idiomatic Go names. Adding godocs is outside the
scope of this CL. (Besides, the method names almost all directly
parallel an underlying math/big.Int or math/big.Float method.)

CL prepared mechanically with sed (for rewriting mpint.go/mpfloat.go)
and gofmt (for rewriting call sites).

Passes toolstash -cmp.

Change-Id: Id76f4aee476ba740f48db33162463e7978c2083d
Reviewed-on: https://go-review.googlesource.com/20909
Run-TryBot: Matthew Dempsky <mdempsky@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarRobert Griesemer <gri@golang.org>
parent 5fb6aa3e
...@@ -721,16 +721,16 @@ func (p *exporter) value(x Val) { ...@@ -721,16 +721,16 @@ func (p *exporter) value(x Val) {
p.tag(tag) p.tag(tag)
case *Mpint: case *Mpint:
if Mpcmpfixfix(Minintval[TINT64], x) <= 0 && Mpcmpfixfix(x, Maxintval[TINT64]) <= 0 { if Minintval[TINT64].Cmp(x) <= 0 && x.Cmp(Maxintval[TINT64]) <= 0 {
// common case: x fits into an int64 - use compact encoding // common case: x fits into an int64 - use compact encoding
p.tag(int64Tag) p.tag(int64Tag)
p.int64(Mpgetfix(x)) p.int64(x.Int64())
return return
} }
// uncommon case: large x - use float encoding // uncommon case: large x - use float encoding
// (powers of 2 will be encoded efficiently with exponent) // (powers of 2 will be encoded efficiently with exponent)
f := newMpflt() f := newMpflt()
Mpmovefixflt(f, x) f.SetInt(x)
p.tag(floatTag) p.tag(floatTag)
p.float(f) p.float(f)
......
...@@ -484,7 +484,7 @@ func (p *importer) value(typ *Type) (x Val) { ...@@ -484,7 +484,7 @@ func (p *importer) value(typ *Type) (x Val) {
case int64Tag: case int64Tag:
u := new(Mpint) u := new(Mpint)
Mpmovecfix(u, p.int64()) u.SetInt64(p.int64())
u.Rune = typ == idealrune u.Rune = typ == idealrune
x.U = u x.U = u
...@@ -494,7 +494,7 @@ func (p *importer) value(typ *Type) (x Val) { ...@@ -494,7 +494,7 @@ func (p *importer) value(typ *Type) (x Val) {
if typ == idealint || Isint[typ.Etype] { if typ == idealint || Isint[typ.Etype] {
// uncommon case: large int encoded as float // uncommon case: large int encoded as float
u := new(Mpint) u := new(Mpint)
mpmovefltfix(u, f) u.SetFloat(f)
x.U = u x.U = u
break break
} }
...@@ -530,7 +530,7 @@ func (p *importer) value(typ *Type) (x Val) { ...@@ -530,7 +530,7 @@ func (p *importer) value(typ *Type) (x Val) {
func (p *importer) float(x *Mpflt) { func (p *importer) float(x *Mpflt) {
sign := p.int() sign := p.int()
if sign == 0 { if sign == 0 {
Mpmovecflt(x, 0) x.SetFloat64(0)
return return
} }
......
...@@ -1033,7 +1033,7 @@ func Agenr(n *Node, a *Node, res *Node) { ...@@ -1033,7 +1033,7 @@ func Agenr(n *Node, a *Node, res *Node) {
if Isconst(nl, CTSTR) { if Isconst(nl, CTSTR) {
Fatalf("constant string constant index") Fatalf("constant string constant index")
} }
v := uint64(Mpgetfix(nr.Val().U.(*Mpint))) v := uint64(nr.Val().U.(*Mpint).Int64())
var n2 Node var n2 Node
if Isslice(nl.Type) || nl.Type.Etype == TSTRING { if Isslice(nl.Type) || nl.Type.Etype == TSTRING {
if Debug['B'] == 0 && !n.Bounded { if Debug['B'] == 0 && !n.Bounded {
...@@ -1185,7 +1185,7 @@ func Agenr(n *Node, a *Node, res *Node) { ...@@ -1185,7 +1185,7 @@ func Agenr(n *Node, a *Node, res *Node) {
if Isconst(nl, CTSTR) { if Isconst(nl, CTSTR) {
Fatalf("constant string constant index") // front end should handle Fatalf("constant string constant index") // front end should handle
} }
v := uint64(Mpgetfix(nr.Val().U.(*Mpint))) v := uint64(nr.Val().U.(*Mpint).Int64())
if Isslice(nl.Type) || nl.Type.Etype == TSTRING { if Isslice(nl.Type) || nl.Type.Etype == TSTRING {
if Debug['B'] == 0 && !n.Bounded { if Debug['B'] == 0 && !n.Bounded {
nlen := n3 nlen := n3
...@@ -1375,7 +1375,7 @@ func Agenr(n *Node, a *Node, res *Node) { ...@@ -1375,7 +1375,7 @@ func Agenr(n *Node, a *Node, res *Node) {
if Isconst(nl, CTSTR) { if Isconst(nl, CTSTR) {
Fatalf("constant string constant index") // front end should handle Fatalf("constant string constant index") // front end should handle
} }
v := uint64(Mpgetfix(nr.Val().U.(*Mpint))) v := uint64(nr.Val().U.(*Mpint).Int64())
if Isslice(nl.Type) || nl.Type.Etype == TSTRING { if Isslice(nl.Type) || nl.Type.Etype == TSTRING {
if Debug['B'] == 0 && !n.Bounded { if Debug['B'] == 0 && !n.Bounded {
p1 := Thearch.Ginscmp(OGT, Types[Simtype[TUINT]], &nlen, Nodintconst(int64(v)), +1) p1 := Thearch.Ginscmp(OGT, Types[Simtype[TUINT]], &nlen, Nodintconst(int64(v)), +1)
...@@ -1709,7 +1709,7 @@ func Igen(n *Node, a *Node, res *Node) { ...@@ -1709,7 +1709,7 @@ func Igen(n *Node, a *Node, res *Node) {
// Compute &a[i] as &a + i*width. // Compute &a[i] as &a + i*width.
a.Type = n.Type a.Type = n.Type
a.Xoffset += Mpgetfix(n.Right.Val().U.(*Mpint)) * n.Type.Width a.Xoffset += n.Right.Val().U.(*Mpint).Int64() * n.Type.Width
Fixlargeoffset(a) Fixlargeoffset(a)
return return
} }
...@@ -2215,7 +2215,7 @@ func stkof(n *Node) int64 { ...@@ -2215,7 +2215,7 @@ func stkof(n *Node) int64 {
return off return off
} }
if Isconst(n.Right, CTINT) { if Isconst(n.Right, CTINT) {
return off + t.Type.Width*Mpgetfix(n.Right.Val().U.(*Mpint)) return off + t.Type.Width*n.Right.Val().U.(*Mpint).Int64()
} }
return +1000 // on stack but not sure exactly where return +1000 // on stack but not sure exactly where
...@@ -2646,7 +2646,7 @@ func cgen_div(op Op, nl *Node, nr *Node, res *Node) { ...@@ -2646,7 +2646,7 @@ func cgen_div(op Op, nl *Node, nr *Node, res *Node) {
case TUINT64: case TUINT64:
var m Magic var m Magic
m.W = w m.W = w
m.Ud = uint64(Mpgetfix(nr.Val().U.(*Mpint))) m.Ud = uint64(nr.Val().U.(*Mpint).Int64())
Umagic(&m) Umagic(&m)
if m.Bad != 0 { if m.Bad != 0 {
break break
...@@ -2684,7 +2684,7 @@ func cgen_div(op Op, nl *Node, nr *Node, res *Node) { ...@@ -2684,7 +2684,7 @@ func cgen_div(op Op, nl *Node, nr *Node, res *Node) {
case TINT64: case TINT64:
var m Magic var m Magic
m.W = w m.W = w
m.Sd = Mpgetfix(nr.Val().U.(*Mpint)) m.Sd = nr.Val().U.(*Mpint).Int64()
Smagic(&m) Smagic(&m)
if m.Bad != 0 { if m.Bad != 0 {
break break
...@@ -3189,17 +3189,17 @@ func cgen_slice(n, res *Node, wb bool) { ...@@ -3189,17 +3189,17 @@ func cgen_slice(n, res *Node, wb bool) {
bound = int64(len(n.Left.Val().U.(string))) bound = int64(len(n.Left.Val().U.(string)))
} }
if Isconst(&i, CTINT) { if Isconst(&i, CTINT) {
if mpcmpfixc(i.Val().U.(*Mpint), 0) < 0 || bound >= 0 && mpcmpfixc(i.Val().U.(*Mpint), bound) > 0 { if i.Val().U.(*Mpint).CmpInt64(0) < 0 || bound >= 0 && i.Val().U.(*Mpint).CmpInt64(bound) > 0 {
Yyerror("slice index out of bounds") Yyerror("slice index out of bounds")
} }
} }
if Isconst(&j, CTINT) { if Isconst(&j, CTINT) {
if mpcmpfixc(j.Val().U.(*Mpint), 0) < 0 || bound >= 0 && mpcmpfixc(j.Val().U.(*Mpint), bound) > 0 { if j.Val().U.(*Mpint).CmpInt64(0) < 0 || bound >= 0 && j.Val().U.(*Mpint).CmpInt64(bound) > 0 {
Yyerror("slice index out of bounds") Yyerror("slice index out of bounds")
} }
} }
if Isconst(&k, CTINT) { if Isconst(&k, CTINT) {
if mpcmpfixc(k.Val().U.(*Mpint), 0) < 0 || bound >= 0 && mpcmpfixc(k.Val().U.(*Mpint), bound) > 0 { if k.Val().U.(*Mpint).CmpInt64(0) < 0 || bound >= 0 && k.Val().U.(*Mpint).CmpInt64(bound) > 0 {
Yyerror("slice index out of bounds") Yyerror("slice index out of bounds")
} }
} }
...@@ -3208,7 +3208,7 @@ func cgen_slice(n, res *Node, wb bool) { ...@@ -3208,7 +3208,7 @@ func cgen_slice(n, res *Node, wb bool) {
same := func(n1, n2 *Node) bool { same := func(n1, n2 *Node) bool {
return n1.Op == OREGISTER && n2.Op == OREGISTER && n1.Reg == n2.Reg || return n1.Op == OREGISTER && n2.Op == OREGISTER && n1.Reg == n2.Reg ||
n1.Op == ONAME && n2.Op == ONAME && n1.Orig == n2.Orig && n1.Type == n2.Type && n1.Xoffset == n2.Xoffset || n1.Op == ONAME && n2.Op == ONAME && n1.Orig == n2.Orig && n1.Type == n2.Type && n1.Xoffset == n2.Xoffset ||
n1.Op == OLITERAL && n2.Op == OLITERAL && Mpcmpfixfix(n1.Val().U.(*Mpint), n2.Val().U.(*Mpint)) == 0 n1.Op == OLITERAL && n2.Op == OLITERAL && n1.Val().U.(*Mpint).Cmp(n2.Val().U.(*Mpint)) == 0
} }
// obvious reports whether n1 <= n2 is obviously true, // obvious reports whether n1 <= n2 is obviously true,
...@@ -3227,7 +3227,7 @@ func cgen_slice(n, res *Node, wb bool) { ...@@ -3227,7 +3227,7 @@ func cgen_slice(n, res *Node, wb bool) {
return true // len(x) <= cap(x) always true return true // len(x) <= cap(x) always true
} }
if Isconst(n1, CTINT) && Isconst(n2, CTINT) { if Isconst(n1, CTINT) && Isconst(n2, CTINT) {
if Mpcmpfixfix(n1.Val().U.(*Mpint), n2.Val().U.(*Mpint)) <= 0 { if n1.Val().U.(*Mpint).Cmp(n2.Val().U.(*Mpint)) <= 0 {
return true // n1, n2 constants such that n1 <= n2 return true // n1, n2 constants such that n1 <= n2
} }
Yyerror("slice index out of bounds") Yyerror("slice index out of bounds")
...@@ -3240,11 +3240,11 @@ func cgen_slice(n, res *Node, wb bool) { ...@@ -3240,11 +3240,11 @@ func cgen_slice(n, res *Node, wb bool) {
// n1 might be a 64-bit constant, even on 32-bit architectures, // n1 might be a 64-bit constant, even on 32-bit architectures,
// but it will be represented in 32 bits. // but it will be represented in 32 bits.
if Ctxt.Arch.Regsize == 4 && Is64(n1.Type) { if Ctxt.Arch.Regsize == 4 && Is64(n1.Type) {
if mpcmpfixc(n1.Val().U.(*Mpint), 1<<31) >= 0 { if n1.Val().U.(*Mpint).CmpInt64(1<<31) >= 0 {
Fatalf("missed slice out of bounds check") Fatalf("missed slice out of bounds check")
} }
var tmp Node var tmp Node
Nodconst(&tmp, indexRegType, Mpgetfix(n1.Val().U.(*Mpint))) Nodconst(&tmp, indexRegType, n1.Val().U.(*Mpint).Int64())
n1 = &tmp n1 = &tmp
} }
p := Thearch.Ginscmp(OGT, indexRegType, n1, n2, -1) p := Thearch.Ginscmp(OGT, indexRegType, n1, n2, -1)
...@@ -3328,9 +3328,9 @@ func cgen_slice(n, res *Node, wb bool) { ...@@ -3328,9 +3328,9 @@ func cgen_slice(n, res *Node, wb bool) {
switch j.Op { switch j.Op {
case OLITERAL: case OLITERAL:
if Isconst(&i, CTINT) { if Isconst(&i, CTINT) {
Nodconst(&j, indexRegType, Mpgetfix(j.Val().U.(*Mpint))-Mpgetfix(i.Val().U.(*Mpint))) Nodconst(&j, indexRegType, j.Val().U.(*Mpint).Int64()-i.Val().U.(*Mpint).Int64())
if Debug_slice > 0 { if Debug_slice > 0 {
Warn("slice: result len == %d", Mpgetfix(j.Val().U.(*Mpint))) Warn("slice: result len == %d", j.Val().U.(*Mpint).Int64())
} }
break break
} }
...@@ -3345,7 +3345,7 @@ func cgen_slice(n, res *Node, wb bool) { ...@@ -3345,7 +3345,7 @@ func cgen_slice(n, res *Node, wb bool) {
fallthrough fallthrough
case OREGISTER: case OREGISTER:
if i.Op == OLITERAL { if i.Op == OLITERAL {
v := Mpgetfix(i.Val().U.(*Mpint)) v := i.Val().U.(*Mpint).Int64()
if v != 0 { if v != 0 {
ginscon(Thearch.Optoas(OSUB, indexRegType), v, &j) ginscon(Thearch.Optoas(OSUB, indexRegType), v, &j)
} }
...@@ -3388,9 +3388,9 @@ func cgen_slice(n, res *Node, wb bool) { ...@@ -3388,9 +3388,9 @@ func cgen_slice(n, res *Node, wb bool) {
switch k.Op { switch k.Op {
case OLITERAL: case OLITERAL:
if Isconst(&i, CTINT) { if Isconst(&i, CTINT) {
Nodconst(&k, indexRegType, Mpgetfix(k.Val().U.(*Mpint))-Mpgetfix(i.Val().U.(*Mpint))) Nodconst(&k, indexRegType, k.Val().U.(*Mpint).Int64()-i.Val().U.(*Mpint).Int64())
if Debug_slice > 0 { if Debug_slice > 0 {
Warn("slice: result cap == %d", Mpgetfix(k.Val().U.(*Mpint))) Warn("slice: result cap == %d", k.Val().U.(*Mpint).Int64())
} }
break break
} }
...@@ -3411,7 +3411,7 @@ func cgen_slice(n, res *Node, wb bool) { ...@@ -3411,7 +3411,7 @@ func cgen_slice(n, res *Node, wb bool) {
Warn("slice: result cap == 0") Warn("slice: result cap == 0")
} }
} else if i.Op == OLITERAL { } else if i.Op == OLITERAL {
v := Mpgetfix(i.Val().U.(*Mpint)) v := i.Val().U.(*Mpint).Int64()
if v != 0 { if v != 0 {
ginscon(Thearch.Optoas(OSUB, indexRegType), v, &k) ginscon(Thearch.Optoas(OSUB, indexRegType), v, &k)
} }
...@@ -3494,7 +3494,7 @@ func cgen_slice(n, res *Node, wb bool) { ...@@ -3494,7 +3494,7 @@ func cgen_slice(n, res *Node, wb bool) {
w = res.Type.Type.Width // res is []T, elem size is T.width w = res.Type.Type.Width // res is []T, elem size is T.width
} }
if Isconst(&i, CTINT) { if Isconst(&i, CTINT) {
ginscon(Thearch.Optoas(OADD, xbase.Type), Mpgetfix(i.Val().U.(*Mpint))*w, &xbase) ginscon(Thearch.Optoas(OADD, xbase.Type), i.Val().U.(*Mpint).Int64()*w, &xbase)
} else if Thearch.AddIndex != nil && Thearch.AddIndex(&i, w, &xbase) { } else if Thearch.AddIndex != nil && Thearch.AddIndex(&i, w, &xbase) {
// done by back end // done by back end
} else if w == 1 { } else if w == 1 {
......
This diff is collapsed.
...@@ -335,7 +335,7 @@ func Vconv(v Val, flag FmtFlag) string { ...@@ -335,7 +335,7 @@ func Vconv(v Val, flag FmtFlag) string {
return Bconv(v.U.(*Mpint), 0) return Bconv(v.U.(*Mpint), 0)
case CTRUNE: case CTRUNE:
x := Mpgetfix(v.U.(*Mpint)) x := v.U.(*Mpint).Int64()
if ' ' <= x && x < 0x80 && x != '\\' && x != '\'' { if ' ' <= x && x < 0x80 && x != '\\' && x != '\'' {
return fmt.Sprintf("'%c'", int(x)) return fmt.Sprintf("'%c'", int(x))
} }
...@@ -357,13 +357,13 @@ func Vconv(v Val, flag FmtFlag) string { ...@@ -357,13 +357,13 @@ func Vconv(v Val, flag FmtFlag) string {
if (flag&FmtSharp != 0) || fmtmode == FExp { if (flag&FmtSharp != 0) || fmtmode == FExp {
return fmt.Sprintf("(%v+%vi)", &v.U.(*Mpcplx).Real, &v.U.(*Mpcplx).Imag) return fmt.Sprintf("(%v+%vi)", &v.U.(*Mpcplx).Real, &v.U.(*Mpcplx).Imag)
} }
if mpcmpfltc(&v.U.(*Mpcplx).Real, 0) == 0 { if v.U.(*Mpcplx).Real.CmpFloat64(0) == 0 {
return fmt.Sprintf("%vi", Fconv(&v.U.(*Mpcplx).Imag, FmtSharp)) return fmt.Sprintf("%vi", Fconv(&v.U.(*Mpcplx).Imag, FmtSharp))
} }
if mpcmpfltc(&v.U.(*Mpcplx).Imag, 0) == 0 { if v.U.(*Mpcplx).Imag.CmpFloat64(0) == 0 {
return Fconv(&v.U.(*Mpcplx).Real, FmtSharp) return Fconv(&v.U.(*Mpcplx).Real, FmtSharp)
} }
if mpcmpfltc(&v.U.(*Mpcplx).Imag, 0) < 0 { if v.U.(*Mpcplx).Imag.CmpFloat64(0) < 0 {
return fmt.Sprintf("(%v%vi)", Fconv(&v.U.(*Mpcplx).Real, FmtSharp), Fconv(&v.U.(*Mpcplx).Imag, FmtSharp)) return fmt.Sprintf("(%v%vi)", Fconv(&v.U.(*Mpcplx).Real, FmtSharp), Fconv(&v.U.(*Mpcplx).Imag, FmtSharp))
} }
return fmt.Sprintf("(%v+%vi)", Fconv(&v.U.(*Mpcplx).Real, FmtSharp), Fconv(&v.U.(*Mpcplx).Imag, FmtSharp)) return fmt.Sprintf("(%v+%vi)", Fconv(&v.U.(*Mpcplx).Real, FmtSharp), Fconv(&v.U.(*Mpcplx).Imag, FmtSharp))
......
...@@ -319,12 +319,12 @@ func Clearslim(n *Node) { ...@@ -319,12 +319,12 @@ func Clearslim(n *Node) {
switch Simtype[n.Type.Etype] { switch Simtype[n.Type.Etype] {
case TCOMPLEX64, TCOMPLEX128: case TCOMPLEX64, TCOMPLEX128:
z.SetVal(Val{new(Mpcplx)}) z.SetVal(Val{new(Mpcplx)})
Mpmovecflt(&z.Val().U.(*Mpcplx).Real, 0.0) z.Val().U.(*Mpcplx).Real.SetFloat64(0.0)
Mpmovecflt(&z.Val().U.(*Mpcplx).Imag, 0.0) z.Val().U.(*Mpcplx).Imag.SetFloat64(0.0)
case TFLOAT32, TFLOAT64: case TFLOAT32, TFLOAT64:
var zero Mpflt var zero Mpflt
Mpmovecflt(&zero, 0.0) zero.SetFloat64(0.0)
z.SetVal(Val{&zero}) z.SetVal(Val{&zero})
case TPTR32, TPTR64, TCHAN, TMAP: case TPTR32, TPTR64, TCHAN, TMAP:
...@@ -342,7 +342,7 @@ func Clearslim(n *Node) { ...@@ -342,7 +342,7 @@ func Clearslim(n *Node) {
TUINT32, TUINT32,
TUINT64: TUINT64:
z.SetVal(Val{new(Mpint)}) z.SetVal(Val{new(Mpint)})
Mpmovecfix(z.Val().U.(*Mpint), 0) z.Val().U.(*Mpint).SetInt64(0)
default: default:
Fatalf("clearslim called on type %v", n.Type) Fatalf("clearslim called on type %v", n.Type)
......
...@@ -433,12 +433,12 @@ func Naddr(a *obj.Addr, n *Node) { ...@@ -433,12 +433,12 @@ func Naddr(a *obj.Addr, n *Node) {
case CTFLT: case CTFLT:
a.Type = obj.TYPE_FCONST a.Type = obj.TYPE_FCONST
a.Val = mpgetflt(n.Val().U.(*Mpflt)) a.Val = n.Val().U.(*Mpflt).Float64()
case CTINT, CTRUNE: case CTINT, CTRUNE:
a.Sym = nil a.Sym = nil
a.Type = obj.TYPE_CONST a.Type = obj.TYPE_CONST
a.Offset = Mpgetfix(n.Val().U.(*Mpint)) a.Offset = n.Val().U.(*Mpint).Int64()
case CTSTR: case CTSTR:
datagostring(n.Val().U.(string), a) datagostring(n.Val().U.(string), a)
......
...@@ -687,11 +687,11 @@ func (l *lexer) number(c rune) { ...@@ -687,11 +687,11 @@ func (l *lexer) number(c rune) {
if c == 'i' { if c == 'i' {
str = lexbuf.String() str = lexbuf.String()
x := new(Mpcplx) x := new(Mpcplx)
Mpmovecflt(&x.Real, 0.0) x.Real.SetFloat64(0.0)
mpatoflt(&x.Imag, str) x.Imag.SetString(str)
if x.Imag.Val.IsInf() { if x.Imag.Val.IsInf() {
Yyerror("overflow in imaginary constant") Yyerror("overflow in imaginary constant")
Mpmovecflt(&x.Imag, 0.0) x.Imag.SetFloat64(0.0)
} }
l.val.U = x l.val.U = x
...@@ -711,10 +711,10 @@ func (l *lexer) number(c rune) { ...@@ -711,10 +711,10 @@ func (l *lexer) number(c rune) {
str = lexbuf.String() str = lexbuf.String()
x := new(Mpint) x := new(Mpint)
mpatofix(x, str) x.SetString(str)
if x.Ovf { if x.Ovf {
Yyerror("overflow in constant") Yyerror("overflow in constant")
Mpmovecfix(x, 0) x.SetInt64(0)
} }
l.val.U = x l.val.U = x
...@@ -726,10 +726,10 @@ func (l *lexer) number(c rune) { ...@@ -726,10 +726,10 @@ func (l *lexer) number(c rune) {
str = lexbuf.String() str = lexbuf.String()
x := newMpflt() x := newMpflt()
mpatoflt(x, str) x.SetString(str)
if x.Val.IsInf() { if x.Val.IsInf() {
Yyerror("overflow in float constant") Yyerror("overflow in float constant")
Mpmovecflt(x, 0.0) x.SetFloat64(0.0)
} }
l.val.U = x l.val.U = x
...@@ -820,7 +820,7 @@ func (l *lexer) rune() { ...@@ -820,7 +820,7 @@ func (l *lexer) rune() {
x := new(Mpint) x := new(Mpint)
l.val.U = x l.val.U = x
Mpmovecfix(x, int64(r)) x.SetInt64(int64(r))
x.Rune = true x.Rune = true
if Debug['x'] != 0 { if Debug['x'] != 0 {
fmt.Printf("lex: codepoint literal\n") fmt.Printf("lex: codepoint literal\n")
......
...@@ -37,7 +37,7 @@ func newMpflt() *Mpflt { ...@@ -37,7 +37,7 @@ func newMpflt() *Mpflt {
return &a return &a
} }
func Mpmovefixflt(a *Mpflt, b *Mpint) { func (a *Mpflt) SetInt(b *Mpint) {
if b.Ovf { if b.Ovf {
// sign doesn't really matter but copy anyway // sign doesn't really matter but copy anyway
a.Val.SetInf(b.Val.Sign() < 0) a.Val.SetInf(b.Val.Sign() < 0)
...@@ -46,11 +46,11 @@ func Mpmovefixflt(a *Mpflt, b *Mpint) { ...@@ -46,11 +46,11 @@ func Mpmovefixflt(a *Mpflt, b *Mpint) {
a.Val.SetInt(&b.Val) a.Val.SetInt(&b.Val)
} }
func mpmovefltflt(a *Mpflt, b *Mpflt) { func (a *Mpflt) Set(b *Mpflt) {
a.Val.Set(&b.Val) a.Val.Set(&b.Val)
} }
func mpaddfltflt(a *Mpflt, b *Mpflt) { func (a *Mpflt) Add(b *Mpflt) {
if Mpdebug { if Mpdebug {
fmt.Printf("\n%v + %v", a, b) fmt.Printf("\n%v + %v", a, b)
} }
...@@ -62,14 +62,14 @@ func mpaddfltflt(a *Mpflt, b *Mpflt) { ...@@ -62,14 +62,14 @@ func mpaddfltflt(a *Mpflt, b *Mpflt) {
} }
} }
func mpaddcflt(a *Mpflt, c float64) { func (a *Mpflt) AddFloat64(c float64) {
var b Mpflt var b Mpflt
Mpmovecflt(&b, c) b.SetFloat64(c)
mpaddfltflt(a, &b) a.Add(&b)
} }
func mpsubfltflt(a *Mpflt, b *Mpflt) { func (a *Mpflt) Sub(b *Mpflt) {
if Mpdebug { if Mpdebug {
fmt.Printf("\n%v - %v", a, b) fmt.Printf("\n%v - %v", a, b)
} }
...@@ -81,7 +81,7 @@ func mpsubfltflt(a *Mpflt, b *Mpflt) { ...@@ -81,7 +81,7 @@ func mpsubfltflt(a *Mpflt, b *Mpflt) {
} }
} }
func mpmulfltflt(a *Mpflt, b *Mpflt) { func (a *Mpflt) Mul(b *Mpflt) {
if Mpdebug { if Mpdebug {
fmt.Printf("%v\n * %v\n", a, b) fmt.Printf("%v\n * %v\n", a, b)
} }
...@@ -93,14 +93,14 @@ func mpmulfltflt(a *Mpflt, b *Mpflt) { ...@@ -93,14 +93,14 @@ func mpmulfltflt(a *Mpflt, b *Mpflt) {
} }
} }
func mpmulcflt(a *Mpflt, c float64) { func (a *Mpflt) MulFloat64(c float64) {
var b Mpflt var b Mpflt
Mpmovecflt(&b, c) b.SetFloat64(c)
mpmulfltflt(a, &b) a.Mul(&b)
} }
func mpdivfltflt(a *Mpflt, b *Mpflt) { func (a *Mpflt) Quo(b *Mpflt) {
if Mpdebug { if Mpdebug {
fmt.Printf("%v\n / %v\n", a, b) fmt.Printf("%v\n / %v\n", a, b)
} }
...@@ -112,18 +112,18 @@ func mpdivfltflt(a *Mpflt, b *Mpflt) { ...@@ -112,18 +112,18 @@ func mpdivfltflt(a *Mpflt, b *Mpflt) {
} }
} }
func mpcmpfltflt(a *Mpflt, b *Mpflt) int { func (a *Mpflt) Cmp(b *Mpflt) int {
return a.Val.Cmp(&b.Val) return a.Val.Cmp(&b.Val)
} }
func mpcmpfltc(b *Mpflt, c float64) int { func (b *Mpflt) CmpFloat64(c float64) int {
var a Mpflt var a Mpflt
Mpmovecflt(&a, c) a.SetFloat64(c)
return mpcmpfltflt(b, &a) return b.Cmp(&a)
} }
func mpgetflt(a *Mpflt) float64 { func (a *Mpflt) Float64() float64 {
x, _ := a.Val.Float64() x, _ := a.Val.Float64()
// check for overflow // check for overflow
...@@ -134,7 +134,7 @@ func mpgetflt(a *Mpflt) float64 { ...@@ -134,7 +134,7 @@ func mpgetflt(a *Mpflt) float64 {
return x + 0 // avoid -0 (should not be needed, but be conservative) return x + 0 // avoid -0 (should not be needed, but be conservative)
} }
func mpgetflt32(a *Mpflt) float64 { func (a *Mpflt) Float32() float64 {
x32, _ := a.Val.Float32() x32, _ := a.Val.Float32()
x := float64(x32) x := float64(x32)
...@@ -146,7 +146,7 @@ func mpgetflt32(a *Mpflt) float64 { ...@@ -146,7 +146,7 @@ func mpgetflt32(a *Mpflt) float64 {
return x + 0 // avoid -0 (should not be needed, but be conservative) return x + 0 // avoid -0 (should not be needed, but be conservative)
} }
func Mpmovecflt(a *Mpflt, c float64) { func (a *Mpflt) SetFloat64(c float64) {
if Mpdebug { if Mpdebug {
fmt.Printf("\nconst %g", c) fmt.Printf("\nconst %g", c)
} }
...@@ -162,7 +162,7 @@ func Mpmovecflt(a *Mpflt, c float64) { ...@@ -162,7 +162,7 @@ func Mpmovecflt(a *Mpflt, c float64) {
} }
} }
func mpnegflt(a *Mpflt) { func (a *Mpflt) Neg() {
// avoid -0 // avoid -0
if a.Val.Sign() != 0 { if a.Val.Sign() != 0 {
a.Val.Neg(&a.Val) a.Val.Neg(&a.Val)
...@@ -173,7 +173,7 @@ func mpnegflt(a *Mpflt) { ...@@ -173,7 +173,7 @@ func mpnegflt(a *Mpflt) {
// floating point input // floating point input
// required syntax is [+-]d*[.]d*[e[+-]d*] or [+-]0xH*[e[+-]d*] // required syntax is [+-]d*[.]d*[e[+-]d*] or [+-]0xH*[e[+-]d*]
// //
func mpatoflt(a *Mpflt, as string) { func (a *Mpflt) SetString(as string) {
for len(as) > 0 && (as[0] == ' ' || as[0] == '\t') { for len(as) > 0 && (as[0] == ' ' || as[0] == '\t') {
as = as[1:] as = as[1:]
} }
......
...@@ -18,25 +18,25 @@ type Mpint struct { ...@@ -18,25 +18,25 @@ type Mpint struct {
Rune bool // set if syntax indicates default type rune Rune bool // set if syntax indicates default type rune
} }
func mpsetovf(a *Mpint) { func (a *Mpint) SetOverflow() {
a.Val.SetUint64(1) // avoid spurious div-zero errors a.Val.SetUint64(1) // avoid spurious div-zero errors
a.Ovf = true a.Ovf = true
} }
func mptestovf(a *Mpint, extra int) bool { func (a *Mpint) checkOverflow(extra int) bool {
// We don't need to be precise here, any reasonable upper limit would do. // We don't need to be precise here, any reasonable upper limit would do.
// For now, use existing limit so we pass all the tests unchanged. // For now, use existing limit so we pass all the tests unchanged.
if a.Val.BitLen()+extra > Mpprec { if a.Val.BitLen()+extra > Mpprec {
mpsetovf(a) a.SetOverflow()
} }
return a.Ovf return a.Ovf
} }
func mpmovefixfix(a, b *Mpint) { func (a *Mpint) Set(b *Mpint) {
a.Val.Set(&b.Val) a.Val.Set(&b.Val)
} }
func mpmovefltfix(a *Mpint, b *Mpflt) int { func (a *Mpint) SetFloat(b *Mpflt) int {
// avoid converting huge floating-point numbers to integers // avoid converting huge floating-point numbers to integers
// (2*Mpprec is large enough to permit all tests to pass) // (2*Mpprec is large enough to permit all tests to pass)
if b.Val.MantExp(nil) > 2*Mpprec { if b.Val.MantExp(nil) > 2*Mpprec {
...@@ -68,130 +68,130 @@ func mpmovefltfix(a *Mpint, b *Mpflt) int { ...@@ -68,130 +68,130 @@ func mpmovefltfix(a *Mpint, b *Mpflt) int {
return -1 return -1
} }
func mpaddfixfix(a, b *Mpint, quiet int) { func (a *Mpint) Add(b *Mpint, quiet int) {
if a.Ovf || b.Ovf { if a.Ovf || b.Ovf {
if nsavederrors+nerrors == 0 { if nsavederrors+nerrors == 0 {
Yyerror("ovf in mpaddfixfix") Yyerror("ovf in mpaddfixfix")
} }
mpsetovf(a) a.SetOverflow()
return return
} }
a.Val.Add(&a.Val, &b.Val) a.Val.Add(&a.Val, &b.Val)
if mptestovf(a, 0) && quiet == 0 { if a.checkOverflow(0) && quiet == 0 {
Yyerror("constant addition overflow") Yyerror("constant addition overflow")
} }
} }
func mpsubfixfix(a, b *Mpint) { func (a *Mpint) Sub(b *Mpint) {
if a.Ovf || b.Ovf { if a.Ovf || b.Ovf {
if nsavederrors+nerrors == 0 { if nsavederrors+nerrors == 0 {
Yyerror("ovf in mpsubfixfix") Yyerror("ovf in mpsubfixfix")
} }
mpsetovf(a) a.SetOverflow()
return return
} }
a.Val.Sub(&a.Val, &b.Val) a.Val.Sub(&a.Val, &b.Val)
if mptestovf(a, 0) { if a.checkOverflow(0) {
Yyerror("constant subtraction overflow") Yyerror("constant subtraction overflow")
} }
} }
func mpmulfixfix(a, b *Mpint) { func (a *Mpint) Mul(b *Mpint) {
if a.Ovf || b.Ovf { if a.Ovf || b.Ovf {
if nsavederrors+nerrors == 0 { if nsavederrors+nerrors == 0 {
Yyerror("ovf in mpmulfixfix") Yyerror("ovf in mpmulfixfix")
} }
mpsetovf(a) a.SetOverflow()
return return
} }
a.Val.Mul(&a.Val, &b.Val) a.Val.Mul(&a.Val, &b.Val)
if mptestovf(a, 0) { if a.checkOverflow(0) {
Yyerror("constant multiplication overflow") Yyerror("constant multiplication overflow")
} }
} }
func mpdivfixfix(a, b *Mpint) { func (a *Mpint) Quo(b *Mpint) {
if a.Ovf || b.Ovf { if a.Ovf || b.Ovf {
if nsavederrors+nerrors == 0 { if nsavederrors+nerrors == 0 {
Yyerror("ovf in mpdivfixfix") Yyerror("ovf in mpdivfixfix")
} }
mpsetovf(a) a.SetOverflow()
return return
} }
a.Val.Quo(&a.Val, &b.Val) a.Val.Quo(&a.Val, &b.Val)
if mptestovf(a, 0) { if a.checkOverflow(0) {
// can only happen for div-0 which should be checked elsewhere // can only happen for div-0 which should be checked elsewhere
Yyerror("constant division overflow") Yyerror("constant division overflow")
} }
} }
func mpmodfixfix(a, b *Mpint) { func (a *Mpint) Rem(b *Mpint) {
if a.Ovf || b.Ovf { if a.Ovf || b.Ovf {
if nsavederrors+nerrors == 0 { if nsavederrors+nerrors == 0 {
Yyerror("ovf in mpmodfixfix") Yyerror("ovf in mpmodfixfix")
} }
mpsetovf(a) a.SetOverflow()
return return
} }
a.Val.Rem(&a.Val, &b.Val) a.Val.Rem(&a.Val, &b.Val)
if mptestovf(a, 0) { if a.checkOverflow(0) {
// should never happen // should never happen
Yyerror("constant modulo overflow") Yyerror("constant modulo overflow")
} }
} }
func mporfixfix(a, b *Mpint) { func (a *Mpint) Or(b *Mpint) {
if a.Ovf || b.Ovf { if a.Ovf || b.Ovf {
if nsavederrors+nerrors == 0 { if nsavederrors+nerrors == 0 {
Yyerror("ovf in mporfixfix") Yyerror("ovf in mporfixfix")
} }
mpsetovf(a) a.SetOverflow()
return return
} }
a.Val.Or(&a.Val, &b.Val) a.Val.Or(&a.Val, &b.Val)
} }
func mpandfixfix(a, b *Mpint) { func (a *Mpint) And(b *Mpint) {
if a.Ovf || b.Ovf { if a.Ovf || b.Ovf {
if nsavederrors+nerrors == 0 { if nsavederrors+nerrors == 0 {
Yyerror("ovf in mpandfixfix") Yyerror("ovf in mpandfixfix")
} }
mpsetovf(a) a.SetOverflow()
return return
} }
a.Val.And(&a.Val, &b.Val) a.Val.And(&a.Val, &b.Val)
} }
func mpandnotfixfix(a, b *Mpint) { func (a *Mpint) AndNot(b *Mpint) {
if a.Ovf || b.Ovf { if a.Ovf || b.Ovf {
if nsavederrors+nerrors == 0 { if nsavederrors+nerrors == 0 {
Yyerror("ovf in mpandnotfixfix") Yyerror("ovf in mpandnotfixfix")
} }
mpsetovf(a) a.SetOverflow()
return return
} }
a.Val.AndNot(&a.Val, &b.Val) a.Val.AndNot(&a.Val, &b.Val)
} }
func mpxorfixfix(a, b *Mpint) { func (a *Mpint) Xor(b *Mpint) {
if a.Ovf || b.Ovf { if a.Ovf || b.Ovf {
if nsavederrors+nerrors == 0 { if nsavederrors+nerrors == 0 {
Yyerror("ovf in mpxorfixfix") Yyerror("ovf in mpxorfixfix")
} }
mpsetovf(a) a.SetOverflow()
return return
} }
...@@ -199,10 +199,10 @@ func mpxorfixfix(a, b *Mpint) { ...@@ -199,10 +199,10 @@ func mpxorfixfix(a, b *Mpint) {
} }
// shift left by s (or right by -s) // shift left by s (or right by -s)
func Mpshiftfix(a *Mpint, s int) { func (a *Mpint) shift(s int) {
switch { switch {
case s > 0: case s > 0:
if mptestovf(a, s) { if a.checkOverflow(s) {
Yyerror("constant shift overflow") Yyerror("constant shift overflow")
return return
} }
...@@ -212,65 +212,65 @@ func Mpshiftfix(a *Mpint, s int) { ...@@ -212,65 +212,65 @@ func Mpshiftfix(a *Mpint, s int) {
} }
} }
func mplshfixfix(a, b *Mpint) { func (a *Mpint) Lsh(b *Mpint) {
if a.Ovf || b.Ovf { if a.Ovf || b.Ovf {
if nsavederrors+nerrors == 0 { if nsavederrors+nerrors == 0 {
Yyerror("ovf in mplshfixfix") Yyerror("ovf in mplshfixfix")
} }
mpsetovf(a) a.SetOverflow()
return return
} }
s := Mpgetfix(b) s := b.Int64()
if s < 0 || s >= Mpprec { if s < 0 || s >= Mpprec {
msg := "shift count too large" msg := "shift count too large"
if s < 0 { if s < 0 {
msg = "invalid negative shift count" msg = "invalid negative shift count"
} }
Yyerror("%s: %d", msg, s) Yyerror("%s: %d", msg, s)
Mpmovecfix(a, 0) a.SetInt64(0)
return return
} }
Mpshiftfix(a, int(s)) a.shift(int(s))
} }
func mprshfixfix(a, b *Mpint) { func (a *Mpint) Rsh(b *Mpint) {
if a.Ovf || b.Ovf { if a.Ovf || b.Ovf {
if nsavederrors+nerrors == 0 { if nsavederrors+nerrors == 0 {
Yyerror("ovf in mprshfixfix") Yyerror("ovf in mprshfixfix")
} }
mpsetovf(a) a.SetOverflow()
return return
} }
s := Mpgetfix(b) s := b.Int64()
if s < 0 { if s < 0 {
Yyerror("invalid negative shift count: %d", s) Yyerror("invalid negative shift count: %d", s)
if a.Val.Sign() < 0 { if a.Val.Sign() < 0 {
Mpmovecfix(a, -1) a.SetInt64(-1)
} else { } else {
Mpmovecfix(a, 0) a.SetInt64(0)
} }
return return
} }
Mpshiftfix(a, int(-s)) a.shift(int(-s))
} }
func Mpcmpfixfix(a, b *Mpint) int { func (a *Mpint) Cmp(b *Mpint) int {
return a.Val.Cmp(&b.Val) return a.Val.Cmp(&b.Val)
} }
func mpcmpfixc(b *Mpint, c int64) int { func (b *Mpint) CmpInt64(c int64) int {
return b.Val.Cmp(big.NewInt(c)) return b.Val.Cmp(big.NewInt(c))
} }
func mpnegfix(a *Mpint) { func (a *Mpint) Neg() {
a.Val.Neg(&a.Val) a.Val.Neg(&a.Val)
} }
func Mpgetfix(a *Mpint) int64 { func (a *Mpint) Int64() int64 {
if a.Ovf { if a.Ovf {
if nsavederrors+nerrors == 0 { if nsavederrors+nerrors == 0 {
Yyerror("constant overflow") Yyerror("constant overflow")
...@@ -281,11 +281,11 @@ func Mpgetfix(a *Mpint) int64 { ...@@ -281,11 +281,11 @@ func Mpgetfix(a *Mpint) int64 {
return a.Val.Int64() return a.Val.Int64()
} }
func Mpmovecfix(a *Mpint, c int64) { func (a *Mpint) SetInt64(c int64) {
a.Val.SetInt64(c) a.Val.SetInt64(c)
} }
func mpatofix(a *Mpint, as string) { func (a *Mpint) SetString(as string) {
_, ok := a.Val.SetString(as, 0) _, ok := a.Val.SetString(as, 0)
if !ok { if !ok {
// required syntax is [+-][0[x]]d* // required syntax is [+-][0[x]]d*
...@@ -299,7 +299,7 @@ func mpatofix(a *Mpint, as string) { ...@@ -299,7 +299,7 @@ func mpatofix(a *Mpint, as string) {
a.Val.SetUint64(0) a.Val.SetUint64(0)
return return
} }
if mptestovf(a, 0) { if a.checkOverflow(0) {
Yyerror("constant too large: %s", as) Yyerror("constant too large: %s", as)
} }
} }
......
...@@ -343,7 +343,7 @@ func gdata(nam *Node, nr *Node, wid int) { ...@@ -343,7 +343,7 @@ func gdata(nam *Node, nr *Node, wid int) {
case CTFLT: case CTFLT:
s := Linksym(nam.Sym) s := Linksym(nam.Sym)
f := mpgetflt(nr.Val().U.(*Mpflt)) f := nr.Val().U.(*Mpflt).Float64()
switch nam.Type.Etype { switch nam.Type.Etype {
case TFLOAT32: case TFLOAT32:
s.WriteFloat32(Ctxt, nam.Xoffset, float32(f)) s.WriteFloat32(Ctxt, nam.Xoffset, float32(f))
...@@ -375,8 +375,8 @@ func gdata(nam *Node, nr *Node, wid int) { ...@@ -375,8 +375,8 @@ func gdata(nam *Node, nr *Node, wid int) {
func gdatacomplex(nam *Node, cval *Mpcplx) { func gdatacomplex(nam *Node, cval *Mpcplx) {
t := Types[cplxsubtype(nam.Type.Etype)] t := Types[cplxsubtype(nam.Type.Etype)]
r := mpgetflt(&cval.Real) r := cval.Real.Float64()
i := mpgetflt(&cval.Imag) i := cval.Imag.Float64()
s := Linksym(nam.Sym) s := Linksym(nam.Sym)
switch t.Etype { switch t.Etype {
......
...@@ -3269,14 +3269,14 @@ func (p *parser) hidden_literal() *Node { ...@@ -3269,14 +3269,14 @@ func (p *parser) hidden_literal() *Node {
p.next() p.next()
switch ss.Val().Ctype() { switch ss.Val().Ctype() {
case CTINT, CTRUNE: case CTINT, CTRUNE:
mpnegfix(ss.Val().U.(*Mpint)) ss.Val().U.(*Mpint).Neg()
break break
case CTFLT: case CTFLT:
mpnegflt(ss.Val().U.(*Mpflt)) ss.Val().U.(*Mpflt).Neg()
break break
case CTCPLX: case CTCPLX:
mpnegflt(&ss.Val().U.(*Mpcplx).Real) ss.Val().U.(*Mpcplx).Real.Neg()
mpnegflt(&ss.Val().U.(*Mpcplx).Imag) ss.Val().U.(*Mpcplx).Imag.Neg()
break break
default: default:
Yyerror("bad negated constant") Yyerror("bad negated constant")
...@@ -3318,11 +3318,11 @@ func (p *parser) hidden_constant() *Node { ...@@ -3318,11 +3318,11 @@ func (p *parser) hidden_constant() *Node {
if s2.Val().Ctype() == CTRUNE && s4.Val().Ctype() == CTINT { if s2.Val().Ctype() == CTRUNE && s4.Val().Ctype() == CTINT {
ss := s2 ss := s2
mpaddfixfix(s2.Val().U.(*Mpint), s4.Val().U.(*Mpint), 0) s2.Val().U.(*Mpint).Add(s4.Val().U.(*Mpint), 0)
return ss return ss
} }
s4.Val().U.(*Mpcplx).Real = s4.Val().U.(*Mpcplx).Imag s4.Val().U.(*Mpcplx).Real = s4.Val().U.(*Mpcplx).Imag
Mpmovecflt(&s4.Val().U.(*Mpcplx).Imag, 0.0) s4.Val().U.(*Mpcplx).Imag.SetFloat64(0.0)
return nodcplxlit(s2.Val(), s4.Val()) return nodcplxlit(s2.Val(), s4.Val())
} }
} }
......
...@@ -436,7 +436,7 @@ func staticassign(l *Node, r *Node, out *[]*Node) bool { ...@@ -436,7 +436,7 @@ func staticassign(l *Node, r *Node, out *[]*Node) bool {
ta := typ(TARRAY) ta := typ(TARRAY)
ta.Type = r.Type.Type ta.Type = r.Type.Type
ta.Bound = Mpgetfix(r.Right.Val().U.(*Mpint)) ta.Bound = r.Right.Val().U.(*Mpint).Int64()
a := staticname(ta, 1) a := staticname(ta, 1)
inittemps[r] = a inittemps[r] = a
n := *l n := *l
...@@ -691,7 +691,7 @@ func arraylit(ctxt int, pass int, n *Node, var_ *Node, init *Nodes) { ...@@ -691,7 +691,7 @@ func arraylit(ctxt int, pass int, n *Node, var_ *Node, init *Nodes) {
func slicelit(ctxt int, n *Node, var_ *Node, init *Nodes) { func slicelit(ctxt int, n *Node, var_ *Node, init *Nodes) {
// make an array type // make an array type
t := n.Type.Copy() t := n.Type.Copy()
t.Bound = Mpgetfix(n.Right.Val().U.(*Mpint)) t.Bound = n.Right.Val().U.(*Mpint).Int64()
t.Width = 0 t.Width = 0
t.Sym = nil t.Sym = nil
t.Haspointers = 0 t.Haspointers = 0
...@@ -1178,7 +1178,7 @@ func oaslit(n *Node, init *Nodes) bool { ...@@ -1178,7 +1178,7 @@ func oaslit(n *Node, init *Nodes) bool {
func getlit(lit *Node) int { func getlit(lit *Node) int {
if Smallintconst(lit) { if Smallintconst(lit) {
return int(Mpgetfix(lit.Val().U.(*Mpint))) return int(lit.Val().U.(*Mpint).Int64())
} }
return -1 return -1
} }
...@@ -1241,7 +1241,7 @@ func initplan(n *Node) { ...@@ -1241,7 +1241,7 @@ func initplan(n *Node) {
if a.Op != OKEY || !Smallintconst(a.Left) { if a.Op != OKEY || !Smallintconst(a.Left) {
Fatalf("initplan arraylit") Fatalf("initplan arraylit")
} }
addvalue(p, n.Type.Type.Width*Mpgetfix(a.Left.Val().U.(*Mpint)), a.Right) addvalue(p, n.Type.Type.Width*a.Left.Val().U.(*Mpint).Int64(), a.Right)
} }
case OSTRUCTLIT: case OSTRUCTLIT:
...@@ -1302,13 +1302,13 @@ func iszero(n *Node) bool { ...@@ -1302,13 +1302,13 @@ func iszero(n *Node) bool {
return !n.Val().U.(bool) return !n.Val().U.(bool)
case CTINT, CTRUNE: case CTINT, CTRUNE:
return mpcmpfixc(n.Val().U.(*Mpint), 0) == 0 return n.Val().U.(*Mpint).CmpInt64(0) == 0
case CTFLT: case CTFLT:
return mpcmpfltc(n.Val().U.(*Mpflt), 0) == 0 return n.Val().U.(*Mpflt).CmpFloat64(0) == 0
case CTCPLX: case CTCPLX:
return mpcmpfltc(&n.Val().U.(*Mpcplx).Real, 0) == 0 && mpcmpfltc(&n.Val().U.(*Mpcplx).Imag, 0) == 0 return n.Val().U.(*Mpcplx).Real.CmpFloat64(0) == 0 && n.Val().U.(*Mpcplx).Imag.CmpFloat64(0) == 0
} }
case OARRAYLIT: case OARRAYLIT:
......
...@@ -1379,7 +1379,7 @@ func (s *state) expr(n *Node) *ssa.Value { ...@@ -1379,7 +1379,7 @@ func (s *state) expr(n *Node) *ssa.Value {
case OLITERAL: case OLITERAL:
switch n.Val().Ctype() { switch n.Val().Ctype() {
case CTINT: case CTINT:
i := Mpgetfix(n.Val().U.(*Mpint)) i := n.Val().U.(*Mpint).Int64()
switch n.Type.Size() { switch n.Type.Size() {
case 1: case 1:
return s.constInt8(n.Type, int8(i)) return s.constInt8(n.Type, int8(i))
...@@ -1421,9 +1421,9 @@ func (s *state) expr(n *Node) *ssa.Value { ...@@ -1421,9 +1421,9 @@ func (s *state) expr(n *Node) *ssa.Value {
f := n.Val().U.(*Mpflt) f := n.Val().U.(*Mpflt)
switch n.Type.Size() { switch n.Type.Size() {
case 4: case 4:
return s.constFloat32(n.Type, mpgetflt32(f)) return s.constFloat32(n.Type, f.Float32())
case 8: case 8:
return s.constFloat64(n.Type, mpgetflt(f)) return s.constFloat64(n.Type, f.Float64())
default: default:
s.Fatalf("bad float size %d", n.Type.Size()) s.Fatalf("bad float size %d", n.Type.Size())
return nil return nil
...@@ -1437,15 +1437,15 @@ func (s *state) expr(n *Node) *ssa.Value { ...@@ -1437,15 +1437,15 @@ func (s *state) expr(n *Node) *ssa.Value {
{ {
pt := Types[TFLOAT32] pt := Types[TFLOAT32]
return s.newValue2(ssa.OpComplexMake, n.Type, return s.newValue2(ssa.OpComplexMake, n.Type,
s.constFloat32(pt, mpgetflt32(r)), s.constFloat32(pt, r.Float32()),
s.constFloat32(pt, mpgetflt32(i))) s.constFloat32(pt, i.Float32()))
} }
case 16: case 16:
{ {
pt := Types[TFLOAT64] pt := Types[TFLOAT64]
return s.newValue2(ssa.OpComplexMake, n.Type, return s.newValue2(ssa.OpComplexMake, n.Type,
s.constFloat64(pt, mpgetflt(r)), s.constFloat64(pt, r.Float64()),
s.constFloat64(pt, mpgetflt(i))) s.constFloat64(pt, i.Float64()))
} }
default: default:
s.Fatalf("bad float size %d", n.Type.Size()) s.Fatalf("bad float size %d", n.Type.Size())
......
...@@ -438,7 +438,7 @@ func Nodintconst(v int64) *Node { ...@@ -438,7 +438,7 @@ func Nodintconst(v int64) *Node {
c := Nod(OLITERAL, nil, nil) c := Nod(OLITERAL, nil, nil)
c.Addable = true c.Addable = true
c.SetVal(Val{new(Mpint)}) c.SetVal(Val{new(Mpint)})
Mpmovecfix(c.Val().U.(*Mpint), v) c.Val().U.(*Mpint).SetInt64(v)
c.Type = Types[TIDEAL] c.Type = Types[TIDEAL]
ullmancalc(c) ullmancalc(c)
return c return c
...@@ -448,7 +448,7 @@ func nodfltconst(v *Mpflt) *Node { ...@@ -448,7 +448,7 @@ func nodfltconst(v *Mpflt) *Node {
c := Nod(OLITERAL, nil, nil) c := Nod(OLITERAL, nil, nil)
c.Addable = true c.Addable = true
c.SetVal(Val{newMpflt()}) c.SetVal(Val{newMpflt()})
mpmovefltflt(c.Val().U.(*Mpflt), v) c.Val().U.(*Mpflt).Set(v)
c.Type = Types[TIDEAL] c.Type = Types[TIDEAL]
ullmancalc(c) ullmancalc(c)
return c return c
...@@ -460,7 +460,7 @@ func Nodconst(n *Node, t *Type, v int64) { ...@@ -460,7 +460,7 @@ func Nodconst(n *Node, t *Type, v int64) {
n.Addable = true n.Addable = true
ullmancalc(n) ullmancalc(n)
n.SetVal(Val{new(Mpint)}) n.SetVal(Val{new(Mpint)})
Mpmovecfix(n.Val().U.(*Mpint), v) n.Val().U.(*Mpint).SetInt64(v)
n.Type = t n.Type = t
if Isfloat[t.Etype] { if Isfloat[t.Etype] {
...@@ -491,7 +491,7 @@ func aindex(b *Node, t *Type) *Type { ...@@ -491,7 +491,7 @@ func aindex(b *Node, t *Type) *Type {
Yyerror("array bound must be an integer expression") Yyerror("array bound must be an integer expression")
case CTINT, CTRUNE: case CTINT, CTRUNE:
bound = Mpgetfix(b.Val().U.(*Mpint)) bound = b.Val().U.(*Mpint).Int64()
if bound < 0 { if bound < 0 {
Yyerror("array bound must be non negative") Yyerror("array bound must be non negative")
} }
...@@ -2198,7 +2198,7 @@ func powtwo(n *Node) int { ...@@ -2198,7 +2198,7 @@ func powtwo(n *Node) int {
return -1 return -1
} }
v := uint64(Mpgetfix(n.Val().U.(*Mpint))) v := uint64(n.Val().U.(*Mpint).Int64())
b := uint64(1) b := uint64(1)
for i := 0; i < 64; i++ { for i := 0; i < 64; i++ {
if b == v { if b == v {
......
...@@ -795,9 +795,9 @@ func exprcmp(c1, c2 *caseClause) int { ...@@ -795,9 +795,9 @@ func exprcmp(c1, c2 *caseClause) int {
// sort by constant value to enable binary search // sort by constant value to enable binary search
switch ct { switch ct {
case CTFLT: case CTFLT:
return mpcmpfltflt(n1.Val().U.(*Mpflt), n2.Val().U.(*Mpflt)) return n1.Val().U.(*Mpflt).Cmp(n2.Val().U.(*Mpflt))
case CTINT, CTRUNE: case CTINT, CTRUNE:
return Mpcmpfixfix(n1.Val().U.(*Mpint), n2.Val().U.(*Mpint)) return n1.Val().U.(*Mpint).Cmp(n2.Val().U.(*Mpint))
case CTSTR: case CTSTR:
// Sort strings by length and then by value. // Sort strings by length and then by value.
// It is much cheaper to compare lengths than values, // It is much cheaper to compare lengths than values,
......
...@@ -367,7 +367,7 @@ OpSwitch: ...@@ -367,7 +367,7 @@ OpSwitch:
return return
} }
t.Bound = Mpgetfix(v.U.(*Mpint)) t.Bound = v.U.(*Mpint).Int64()
if doesoverflow(v, Types[TINT]) { if doesoverflow(v, Types[TINT]) {
Yyerror("array bound is too large") Yyerror("array bound is too large")
n.Type = nil n.Type = nil
...@@ -733,7 +733,7 @@ OpSwitch: ...@@ -733,7 +733,7 @@ OpSwitch:
} }
if (op == ODIV || op == OMOD) && Isconst(r, CTINT) { if (op == ODIV || op == OMOD) && Isconst(r, CTINT) {
if mpcmpfixc(r.Val().U.(*Mpint), 0) == 0 { if r.Val().U.(*Mpint).CmpInt64(0) == 0 {
Yyerror("division by zero") Yyerror("division by zero")
n.Type = nil n.Type = nil
return return
...@@ -1000,14 +1000,14 @@ OpSwitch: ...@@ -1000,14 +1000,14 @@ OpSwitch:
} }
if !n.Bounded && Isconst(n.Right, CTINT) { if !n.Bounded && Isconst(n.Right, CTINT) {
x := Mpgetfix(n.Right.Val().U.(*Mpint)) x := n.Right.Val().U.(*Mpint).Int64()
if x < 0 { if x < 0 {
Yyerror("invalid %s index %v (index must be non-negative)", why, n.Right) Yyerror("invalid %s index %v (index must be non-negative)", why, n.Right)
} else if Isfixedarray(t) && x >= t.Bound { } else if Isfixedarray(t) && x >= t.Bound {
Yyerror("invalid array index %v (out of bounds for %d-element array)", n.Right, t.Bound) Yyerror("invalid array index %v (out of bounds for %d-element array)", n.Right, t.Bound)
} else if Isconst(n.Left, CTSTR) && x >= int64(len(n.Left.Val().U.(string))) { } else if Isconst(n.Left, CTSTR) && x >= int64(len(n.Left.Val().U.(string))) {
Yyerror("invalid string index %v (out of bounds for %d-byte string)", n.Right, len(n.Left.Val().U.(string))) Yyerror("invalid string index %v (out of bounds for %d-byte string)", n.Right, len(n.Left.Val().U.(string)))
} else if Mpcmpfixfix(n.Right.Val().U.(*Mpint), Maxintval[TINT]) > 0 { } else if n.Right.Val().U.(*Mpint).Cmp(Maxintval[TINT]) > 0 {
Yyerror("invalid %s index %v (index too large)", why, n.Right) Yyerror("invalid %s index %v (index too large)", why, n.Right)
} }
} }
...@@ -1808,7 +1808,7 @@ OpSwitch: ...@@ -1808,7 +1808,7 @@ OpSwitch:
n.Type = nil n.Type = nil
return return
} }
if Isconst(l, CTINT) && r != nil && Isconst(r, CTINT) && Mpcmpfixfix(l.Val().U.(*Mpint), r.Val().U.(*Mpint)) > 0 { if Isconst(l, CTINT) && r != nil && Isconst(r, CTINT) && l.Val().U.(*Mpint).Cmp(r.Val().U.(*Mpint)) > 0 {
Yyerror("len larger than cap in make(%v)", t) Yyerror("len larger than cap in make(%v)", t)
n.Type = nil n.Type = nil
return return
...@@ -2204,16 +2204,16 @@ func checksliceindex(l *Node, r *Node, tp *Type) bool { ...@@ -2204,16 +2204,16 @@ func checksliceindex(l *Node, r *Node, tp *Type) bool {
} }
if r.Op == OLITERAL { if r.Op == OLITERAL {
if Mpgetfix(r.Val().U.(*Mpint)) < 0 { if r.Val().U.(*Mpint).Int64() < 0 {
Yyerror("invalid slice index %v (index must be non-negative)", r) Yyerror("invalid slice index %v (index must be non-negative)", r)
return false return false
} else if tp != nil && tp.Bound > 0 && Mpgetfix(r.Val().U.(*Mpint)) > tp.Bound { } else if tp != nil && tp.Bound > 0 && r.Val().U.(*Mpint).Int64() > tp.Bound {
Yyerror("invalid slice index %v (out of bounds for %d-element array)", r, tp.Bound) Yyerror("invalid slice index %v (out of bounds for %d-element array)", r, tp.Bound)
return false return false
} else if Isconst(l, CTSTR) && Mpgetfix(r.Val().U.(*Mpint)) > int64(len(l.Val().U.(string))) { } else if Isconst(l, CTSTR) && r.Val().U.(*Mpint).Int64() > int64(len(l.Val().U.(string))) {
Yyerror("invalid slice index %v (out of bounds for %d-byte string)", r, len(l.Val().U.(string))) Yyerror("invalid slice index %v (out of bounds for %d-byte string)", r, len(l.Val().U.(string)))
return false return false
} else if Mpcmpfixfix(r.Val().U.(*Mpint), Maxintval[TINT]) > 0 { } else if r.Val().U.(*Mpint).Cmp(Maxintval[TINT]) > 0 {
Yyerror("invalid slice index %v (index too large)", r) Yyerror("invalid slice index %v (index too large)", r)
return false return false
} }
...@@ -2223,7 +2223,7 @@ func checksliceindex(l *Node, r *Node, tp *Type) bool { ...@@ -2223,7 +2223,7 @@ func checksliceindex(l *Node, r *Node, tp *Type) bool {
} }
func checksliceconst(lo *Node, hi *Node) bool { func checksliceconst(lo *Node, hi *Node) bool {
if lo != nil && hi != nil && lo.Op == OLITERAL && hi.Op == OLITERAL && Mpcmpfixfix(lo.Val().U.(*Mpint), hi.Val().U.(*Mpint)) > 0 { if lo != nil && hi != nil && lo.Op == OLITERAL && hi.Op == OLITERAL && lo.Val().U.(*Mpint).Cmp(hi.Val().U.(*Mpint)) > 0 {
Yyerror("invalid slice index: %v > %v", lo, hi) Yyerror("invalid slice index: %v > %v", lo, hi)
return false return false
} }
...@@ -2779,10 +2779,10 @@ func keydup(n *Node, hash map[uint32][]*Node) { ...@@ -2779,10 +2779,10 @@ func keydup(n *Node, hash map[uint32][]*Node) {
h = 23 h = 23
case CTINT, CTRUNE: case CTINT, CTRUNE:
h = uint32(Mpgetfix(n.Val().U.(*Mpint))) h = uint32(n.Val().U.(*Mpint).Int64())
case CTFLT: case CTFLT:
d := mpgetflt(n.Val().U.(*Mpflt)) d := n.Val().U.(*Mpflt).Float64()
x := math.Float64bits(d) x := math.Float64bits(d)
for i := 0; i < 8; i++ { for i := 0; i < 8; i++ {
h = h*PRIME1 + uint32(x&0xFF) h = h*PRIME1 + uint32(x&0xFF)
...@@ -2834,7 +2834,7 @@ func indexdup(n *Node, hash map[int64]*Node) { ...@@ -2834,7 +2834,7 @@ func indexdup(n *Node, hash map[int64]*Node) {
Fatalf("indexdup: not OLITERAL") Fatalf("indexdup: not OLITERAL")
} }
v := Mpgetfix(n.Val().U.(*Mpint)) v := n.Val().U.(*Mpint).Int64()
if hash[v] != nil { if hash[v] != nil {
Yyerror("duplicate index in array literal: %d", v) Yyerror("duplicate index in array literal: %d", v)
return return
...@@ -3794,12 +3794,12 @@ func checkmake(t *Type, arg string, n *Node) bool { ...@@ -3794,12 +3794,12 @@ func checkmake(t *Type, arg string, n *Node) bool {
switch n.Val().Ctype() { switch n.Val().Ctype() {
case CTINT, CTRUNE, CTFLT, CTCPLX: case CTINT, CTRUNE, CTFLT, CTCPLX:
n.SetVal(toint(n.Val())) n.SetVal(toint(n.Val()))
if mpcmpfixc(n.Val().U.(*Mpint), 0) < 0 { if n.Val().U.(*Mpint).CmpInt64(0) < 0 {
Yyerror("negative %s argument in make(%v)", arg, t) Yyerror("negative %s argument in make(%v)", arg, t)
return false return false
} }
if Mpcmpfixfix(n.Val().U.(*Mpint), Maxintval[TINT]) > 0 { if n.Val().U.(*Mpint).Cmp(Maxintval[TINT]) > 0 {
Yyerror("%s argument too large in make(%v)", arg, t) Yyerror("%s argument too large in make(%v)", arg, t)
return false return false
} }
......
...@@ -309,25 +309,25 @@ func typeinit() { ...@@ -309,25 +309,25 @@ func typeinit() {
iscmp[OEQ] = true iscmp[OEQ] = true
iscmp[ONE] = true iscmp[ONE] = true
mpatofix(Maxintval[TINT8], "0x7f") Maxintval[TINT8].SetString("0x7f")
mpatofix(Minintval[TINT8], "-0x80") Minintval[TINT8].SetString("-0x80")
mpatofix(Maxintval[TINT16], "0x7fff") Maxintval[TINT16].SetString("0x7fff")
mpatofix(Minintval[TINT16], "-0x8000") Minintval[TINT16].SetString("-0x8000")
mpatofix(Maxintval[TINT32], "0x7fffffff") Maxintval[TINT32].SetString("0x7fffffff")
mpatofix(Minintval[TINT32], "-0x80000000") Minintval[TINT32].SetString("-0x80000000")
mpatofix(Maxintval[TINT64], "0x7fffffffffffffff") Maxintval[TINT64].SetString("0x7fffffffffffffff")
mpatofix(Minintval[TINT64], "-0x8000000000000000") Minintval[TINT64].SetString("-0x8000000000000000")
mpatofix(Maxintval[TUINT8], "0xff") Maxintval[TUINT8].SetString("0xff")
mpatofix(Maxintval[TUINT16], "0xffff") Maxintval[TUINT16].SetString("0xffff")
mpatofix(Maxintval[TUINT32], "0xffffffff") Maxintval[TUINT32].SetString("0xffffffff")
mpatofix(Maxintval[TUINT64], "0xffffffffffffffff") Maxintval[TUINT64].SetString("0xffffffffffffffff")
// f is valid float if min < f < max. (min and max are not themselves valid.) // f is valid float if min < f < max. (min and max are not themselves valid.)
mpatoflt(maxfltval[TFLOAT32], "33554431p103") // 2^24-1 p (127-23) + 1/2 ulp maxfltval[TFLOAT32].SetString("33554431p103") // 2^24-1 p (127-23) + 1/2 ulp
mpatoflt(minfltval[TFLOAT32], "-33554431p103") minfltval[TFLOAT32].SetString("-33554431p103")
mpatoflt(maxfltval[TFLOAT64], "18014398509481983p970") // 2^53-1 p (1023-52) + 1/2 ulp maxfltval[TFLOAT64].SetString("18014398509481983p970") // 2^53-1 p (1023-52) + 1/2 ulp
mpatoflt(minfltval[TFLOAT64], "-18014398509481983p970") minfltval[TFLOAT64].SetString("-18014398509481983p970")
maxfltval[TCOMPLEX64] = maxfltval[TFLOAT32] maxfltval[TCOMPLEX64] = maxfltval[TFLOAT32]
minfltval[TCOMPLEX64] = minfltval[TFLOAT32] minfltval[TCOMPLEX64] = minfltval[TFLOAT32]
......
...@@ -103,7 +103,7 @@ ret: ...@@ -103,7 +103,7 @@ ret:
// any side effects disappear; ignore init // any side effects disappear; ignore init
var val Val var val Val
val.U = new(Mpint) val.U = new(Mpint)
Mpmovecfix(val.U.(*Mpint), v) val.U.(*Mpint).SetInt64(v)
n := Nod(OLITERAL, nil, nil) n := Nod(OLITERAL, nil, nil)
n.Orig = nn n.Orig = nn
n.SetVal(val) n.SetVal(val)
......
...@@ -366,7 +366,7 @@ func isSmallMakeSlice(n *Node) bool { ...@@ -366,7 +366,7 @@ func isSmallMakeSlice(n *Node) bool {
} }
t := n.Type t := n.Type
return Smallintconst(l) && Smallintconst(r) && (t.Type.Width == 0 || Mpgetfix(r.Val().U.(*Mpint)) < (1<<16)/t.Type.Width) return Smallintconst(l) && Smallintconst(r) && (t.Type.Width == 0 || r.Val().U.(*Mpint).Int64() < (1<<16)/t.Type.Width)
} }
// walk the whole tree of the body of an // walk the whole tree of the body of an
...@@ -1220,7 +1220,7 @@ opswitch: ...@@ -1220,7 +1220,7 @@ opswitch:
// replace "abc"[1] with 'b'. // replace "abc"[1] with 'b'.
// delayed until now because "abc"[1] is not // delayed until now because "abc"[1] is not
// an ideal constant. // an ideal constant.
v := Mpgetfix(n.Right.Val().U.(*Mpint)) v := n.Right.Val().U.(*Mpint).Int64()
Nodconst(n, n.Type, int64(n.Left.Val().U.(string)[v])) Nodconst(n, n.Type, int64(n.Left.Val().U.(string)[v]))
n.Typecheck = 1 n.Typecheck = 1
...@@ -1229,7 +1229,7 @@ opswitch: ...@@ -1229,7 +1229,7 @@ opswitch:
} }
if Isconst(n.Right, CTINT) { if Isconst(n.Right, CTINT) {
if Mpcmpfixfix(n.Right.Val().U.(*Mpint), &mpzero) < 0 || Mpcmpfixfix(n.Right.Val().U.(*Mpint), Maxintval[TINT]) > 0 { if n.Right.Val().U.(*Mpint).Cmp(&mpzero) < 0 || n.Right.Val().U.(*Mpint).Cmp(Maxintval[TINT]) > 0 {
Yyerror("index out of bounds") Yyerror("index out of bounds")
} }
} }
...@@ -3304,7 +3304,7 @@ func samecheap(a *Node, b *Node) bool { ...@@ -3304,7 +3304,7 @@ func samecheap(a *Node, b *Node) bool {
case OINDEX: case OINDEX:
ar = a.Right ar = a.Right
br = b.Right br = b.Right
if !Isconst(ar, CTINT) || !Isconst(br, CTINT) || Mpcmpfixfix(ar.Val().U.(*Mpint), br.Val().U.(*Mpint)) != 0 { if !Isconst(ar, CTINT) || !Isconst(br, CTINT) || ar.Val().U.(*Mpint).Cmp(br.Val().U.(*Mpint)) != 0 {
return false return false
} }
} }
...@@ -3340,9 +3340,9 @@ func walkrotate(np **Node) { ...@@ -3340,9 +3340,9 @@ func walkrotate(np **Node) {
w := int(l.Type.Width * 8) w := int(l.Type.Width * 8)
if Smallintconst(l.Right) && Smallintconst(r.Right) { if Smallintconst(l.Right) && Smallintconst(r.Right) {
sl := int(Mpgetfix(l.Right.Val().U.(*Mpint))) sl := int(l.Right.Val().U.(*Mpint).Int64())
if sl >= 0 { if sl >= 0 {
sr := int(Mpgetfix(r.Right.Val().U.(*Mpint))) sr := int(r.Right.Val().U.(*Mpint).Int64())
if sr >= 0 && sl+sr == w { if sr >= 0 && sl+sr == w {
// Rewrite left shift half to left rotate. // Rewrite left shift half to left rotate.
if l.Op == OLSH { if l.Op == OLSH {
...@@ -3353,7 +3353,7 @@ func walkrotate(np **Node) { ...@@ -3353,7 +3353,7 @@ func walkrotate(np **Node) {
n.Op = OLROT n.Op = OLROT
// Remove rotate 0 and rotate w. // Remove rotate 0 and rotate w.
s := int(Mpgetfix(n.Right.Val().U.(*Mpint))) s := int(n.Right.Val().U.(*Mpint).Int64())
if s == 0 || s == w { if s == 0 || s == w {
n = n.Left n = n.Left
...@@ -3394,7 +3394,7 @@ func walkmul(np **Node, init *Nodes) { ...@@ -3394,7 +3394,7 @@ func walkmul(np **Node, init *Nodes) {
// x*0 is 0 (and side effects of x). // x*0 is 0 (and side effects of x).
var pow int var pow int
var w int var w int
if Mpgetfix(nr.Val().U.(*Mpint)) == 0 { if nr.Val().U.(*Mpint).Int64() == 0 {
cheapexpr(nl, init) cheapexpr(nl, init)
Nodconst(n, n.Type, 0) Nodconst(n, n.Type, 0)
goto ret goto ret
...@@ -3485,10 +3485,10 @@ func walkdiv(np **Node, init *Nodes) { ...@@ -3485,10 +3485,10 @@ func walkdiv(np **Node, init *Nodes) {
m.W = w m.W = w
if Issigned[nl.Type.Etype] { if Issigned[nl.Type.Etype] {
m.Sd = Mpgetfix(nr.Val().U.(*Mpint)) m.Sd = nr.Val().U.(*Mpint).Int64()
Smagic(&m) Smagic(&m)
} else { } else {
m.Ud = uint64(Mpgetfix(nr.Val().U.(*Mpint))) m.Ud = uint64(nr.Val().U.(*Mpint).Int64())
Umagic(&m) Umagic(&m)
} }
...@@ -3680,7 +3680,7 @@ func walkdiv(np **Node, init *Nodes) { ...@@ -3680,7 +3680,7 @@ func walkdiv(np **Node, init *Nodes) {
// n = nl & (nr-1) // n = nl & (nr-1)
n.Op = OAND n.Op = OAND
Nodconst(nc, nl.Type, Mpgetfix(nr.Val().U.(*Mpint))-1) Nodconst(nc, nl.Type, nr.Val().U.(*Mpint).Int64()-1)
} else { } else {
// n = nl >> pow // n = nl >> pow
n.Op = ORSH n.Op = ORSH
...@@ -3710,7 +3710,7 @@ func bounded(n *Node, max int64) bool { ...@@ -3710,7 +3710,7 @@ func bounded(n *Node, max int64) bool {
bits := int32(8 * n.Type.Width) bits := int32(8 * n.Type.Width)
if Smallintconst(n) { if Smallintconst(n) {
v := Mpgetfix(n.Val().U.(*Mpint)) v := n.Val().U.(*Mpint).Int64()
return 0 <= v && v < max return 0 <= v && v < max
} }
...@@ -3718,9 +3718,9 @@ func bounded(n *Node, max int64) bool { ...@@ -3718,9 +3718,9 @@ func bounded(n *Node, max int64) bool {
case OAND: case OAND:
v := int64(-1) v := int64(-1)
if Smallintconst(n.Left) { if Smallintconst(n.Left) {
v = Mpgetfix(n.Left.Val().U.(*Mpint)) v = n.Left.Val().U.(*Mpint).Int64()
} else if Smallintconst(n.Right) { } else if Smallintconst(n.Right) {
v = Mpgetfix(n.Right.Val().U.(*Mpint)) v = n.Right.Val().U.(*Mpint).Int64()
} }
if 0 <= v && v < max { if 0 <= v && v < max {
...@@ -3729,7 +3729,7 @@ func bounded(n *Node, max int64) bool { ...@@ -3729,7 +3729,7 @@ func bounded(n *Node, max int64) bool {
case OMOD: case OMOD:
if !sign && Smallintconst(n.Right) { if !sign && Smallintconst(n.Right) {
v := Mpgetfix(n.Right.Val().U.(*Mpint)) v := n.Right.Val().U.(*Mpint).Int64()
if 0 <= v && v <= max { if 0 <= v && v <= max {
return true return true
} }
...@@ -3737,7 +3737,7 @@ func bounded(n *Node, max int64) bool { ...@@ -3737,7 +3737,7 @@ func bounded(n *Node, max int64) bool {
case ODIV: case ODIV:
if !sign && Smallintconst(n.Right) { if !sign && Smallintconst(n.Right) {
v := Mpgetfix(n.Right.Val().U.(*Mpint)) v := n.Right.Val().U.(*Mpint).Int64()
for bits > 0 && v >= 2 { for bits > 0 && v >= 2 {
bits-- bits--
v >>= 1 v >>= 1
...@@ -3746,7 +3746,7 @@ func bounded(n *Node, max int64) bool { ...@@ -3746,7 +3746,7 @@ func bounded(n *Node, max int64) bool {
case ORSH: case ORSH:
if !sign && Smallintconst(n.Right) { if !sign && Smallintconst(n.Right) {
v := Mpgetfix(n.Right.Val().U.(*Mpint)) v := n.Right.Val().U.(*Mpint).Int64()
if v > int64(bits) { if v > int64(bits) {
return true return true
} }
...@@ -3926,17 +3926,17 @@ func candiscard(n *Node) bool { ...@@ -3926,17 +3926,17 @@ func candiscard(n *Node) bool {
// Discardable as long as we know it's not division by zero. // Discardable as long as we know it's not division by zero.
case ODIV, OMOD: case ODIV, OMOD:
if Isconst(n.Right, CTINT) && mpcmpfixc(n.Right.Val().U.(*Mpint), 0) != 0 { if Isconst(n.Right, CTINT) && n.Right.Val().U.(*Mpint).CmpInt64(0) != 0 {
break break
} }
if Isconst(n.Right, CTFLT) && mpcmpfltc(n.Right.Val().U.(*Mpflt), 0) != 0 { if Isconst(n.Right, CTFLT) && n.Right.Val().U.(*Mpflt).CmpFloat64(0) != 0 {
break break
} }
return false return false
// Discardable as long as we know it won't fail because of a bad size. // Discardable as long as we know it won't fail because of a bad size.
case OMAKECHAN, OMAKEMAP: case OMAKECHAN, OMAKEMAP:
if Isconst(n.Left, CTINT) && mpcmpfixc(n.Left.Val().U.(*Mpint), 0) == 0 { if Isconst(n.Left, CTINT) && n.Left.Val().U.(*Mpint).CmpInt64(0) == 0 {
break break
} }
return false return false
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment