cmd/internal/gc: use big.Int to represent Mpint bits

- renamed (existing) Mpint -> Mpfix
- defined (new) Mpint using big.Int
- modified funcs mpxxx operating on new Mpint
- renamed funcs mpxxx -> _mpxxx if still needed with Mpfix
- left old (possibly unused) code in place for comparison

Passes all.bash.

Change-Id: I1fc7bba7dc4b6386f2f0950d745cec17c1e67615

cmd/internal/gc: renamed Mpint -> Mpfix

Change-Id: Ia06aeae1081ef29d5ad9b711fb57e4c5579ce29b
Reviewed-on: https://go-review.googlesource.com/7830
Reviewed-by: Russ Cox <rsc@golang.org>
diff --git a/src/cmd/internal/gc/go.go b/src/cmd/internal/gc/go.go
index 67a226e..26f545a 100644
--- a/src/cmd/internal/gc/go.go
+++ b/src/cmd/internal/gc/go.go
@@ -7,6 +7,7 @@
 import (
 	"bytes"
 	"cmd/internal/obj"
+	"math/big"
 )
 
 // avoid <ctype.h>
@@ -65,14 +66,22 @@
 	Mpdebug = 0
 )
 
+// Mpint represents an integer constant.
 type Mpint struct {
+	Val big.Int
+	Ovf bool // set if Val overflowed compiler limit (sticky)
+}
+
+// Mpfix is the original (old) representation of an integer constant.
+// Still needed for Mpflt.
+type Mpfix struct {
 	A   [Mpprec]int
 	Neg uint8
 	Ovf uint8
 }
 
 type Mpflt struct {
-	Val Mpint
+	Val Mpfix
 	Exp int16
 }
 
diff --git a/src/cmd/internal/gc/lex.go b/src/cmd/internal/gc/lex.go
index 7470a1f..61e8281 100644
--- a/src/cmd/internal/gc/lex.go
+++ b/src/cmd/internal/gc/lex.go
@@ -1401,7 +1401,7 @@
 	str = lexbuf.String()
 	yylval.val.U.Xval = new(Mpint)
 	mpatofix(yylval.val.U.Xval, str)
-	if yylval.val.U.Xval.Ovf != 0 {
+	if yylval.val.U.Xval.Ovf {
 		Yyerror("overflow in constant")
 		Mpmovecfix(yylval.val.U.Xval, 0)
 	}
diff --git a/src/cmd/internal/gc/mparith1.go b/src/cmd/internal/gc/mparith1.go
index 08f5544..7d90515 100644
--- a/src/cmd/internal/gc/mparith1.go
+++ b/src/cmd/internal/gc/mparith1.go
@@ -8,39 +8,48 @@
 	"cmd/internal/obj"
 	"fmt"
 	"math"
+	"math/big"
 )
 
 /// uses arithmetic
 
-func mpcmpfixflt(a *Mpint, b *Mpflt) int {
+func mpcmpfixflt(a *Mpfix, b *Mpflt) int {
 	var c Mpflt
 
-	buf := Bconv(a, 0)
+	buf := _Bconv(a, 0)
 	mpatoflt(&c, buf)
 	return mpcmpfltflt(&c, b)
 }
 
-func mpcmpfltfix(a *Mpflt, b *Mpint) int {
+func mpcmpfltfix(a *Mpflt, b *Mpfix) int {
 	var c Mpflt
 
-	buf := Bconv(b, 0)
+	buf := _Bconv(b, 0)
 	mpatoflt(&c, buf)
 	return mpcmpfltflt(a, &c)
 }
 
-func Mpcmpfixfix(a *Mpint, b *Mpint) int {
-	var c Mpint
+func Mpcmpfixfix(a, b *Mpint) int {
+	return a.Val.Cmp(&b.Val)
+}
 
-	mpmovefixfix(&c, a)
-	mpsubfixfix(&c, b)
+func _Mpcmpfixfix(a *Mpfix, b *Mpfix) int {
+	var c Mpfix
+
+	_mpmovefixfix(&c, a)
+	_mpsubfixfix(&c, b)
 	return mptestfix(&c)
 }
 
 func mpcmpfixc(b *Mpint, c int64) int {
-	var c1 Mpint
+	return b.Val.Cmp(big.NewInt(c))
+}
 
-	Mpmovecfix(&c1, c)
-	return Mpcmpfixfix(b, &c1)
+func _mpcmpfixc(b *Mpfix, c int64) int {
+	var c1 Mpfix
+
+	_Mpmovecfix(&c1, c)
+	return _Mpcmpfixfix(b, &c1)
 }
 
 func mpcmpfltflt(a *Mpflt, b *Mpflt) int {
@@ -58,10 +67,14 @@
 	return mpcmpfltflt(b, &a)
 }
 
-func mpsubfixfix(a *Mpint, b *Mpint) {
-	mpnegfix(a)
-	mpaddfixfix(a, b, 0)
-	mpnegfix(a)
+func mpsubfixfix(a, b *Mpint) {
+	a.Val.Sub(&a.Val, &b.Val)
+}
+
+func _mpsubfixfix(a *Mpfix, b *Mpfix) {
+	_mpnegfix(a)
+	_mpaddfixfix(a, b, 0)
+	_mpnegfix(a)
 }
 
 func mpsubfltflt(a *Mpflt, b *Mpflt) {
@@ -70,11 +83,11 @@
 	mpnegflt(a)
 }
 
-func mpaddcfix(a *Mpint, c int64) {
-	var b Mpint
+func mpaddcfix(a *Mpfix, c int64) {
+	var b Mpfix
 
-	Mpmovecfix(&b, c)
-	mpaddfixfix(a, &b, 0)
+	_Mpmovecfix(&b, c)
+	_mpaddfixfix(a, &b, 0)
 }
 
 func mpaddcflt(a *Mpflt, c float64) {
@@ -84,11 +97,11 @@
 	mpaddfltflt(a, &b)
 }
 
-func mpmulcfix(a *Mpint, c int64) {
-	var b Mpint
+func mpmulcfix(a *Mpfix, c int64) {
+	var b Mpfix
 
-	Mpmovecfix(&b, c)
-	mpmulfixfix(a, &b)
+	_Mpmovecfix(&b, c)
+	_mpmulfixfix(a, &b)
 }
 
 func mpmulcflt(a *Mpflt, c float64) {
@@ -98,41 +111,136 @@
 	mpmulfltflt(a, &b)
 }
 
-func mpdivfixfix(a *Mpint, b *Mpint) {
-	var q Mpint
-	var r Mpint
-
-	mpdivmodfixfix(&q, &r, a, b)
-	mpmovefixfix(a, &q)
+func mpdivfixfix(a, b *Mpint) {
+	a.Val.Quo(&a.Val, &b.Val)
 }
 
-func mpmodfixfix(a *Mpint, b *Mpint) {
-	var q Mpint
-	var r Mpint
+func _mpdivfixfix(a *Mpfix, b *Mpfix) {
+	var q Mpfix
+	var r Mpfix
 
 	mpdivmodfixfix(&q, &r, a, b)
-	mpmovefixfix(a, &r)
+	_mpmovefixfix(a, &q)
 }
 
-func mpcomfix(a *Mpint) {
-	var b Mpint
+func mpmodfixfix(a, b *Mpint) {
+	a.Val.Rem(&a.Val, &b.Val)
+}
 
-	Mpmovecfix(&b, 1)
-	mpnegfix(a)
-	mpsubfixfix(a, &b)
+func _mpmodfixfix(a *Mpfix, b *Mpfix) {
+	var q Mpfix
+	var r Mpfix
+
+	mpdivmodfixfix(&q, &r, a, b)
+	_mpmovefixfix(a, &r)
+}
+
+func mpcomfix(a *Mpfix) {
+	var b Mpfix
+
+	_Mpmovecfix(&b, 1)
+	_mpnegfix(a)
+	_mpsubfixfix(a, &b)
+}
+
+// *a = Mpfix(*b)
+func mpmoveintfix(a *Mpfix, b *Mpint) {
+	if b.Ovf {
+		_Mpmovecfix(a, 0)
+		a.Ovf = 1
+		return
+	}
+
+	var bb big.Int
+	bb.Abs(&b.Val)
+	i := 0
+	for ; i < Mpprec && bb.Sign() != 0; i++ {
+		// depends on (unspecified) behavior of Int.Uint64
+		a.A[i] = int(bb.Uint64() & Mpmask)
+		bb.Rsh(&bb, Mpscale)
+	}
+
+	if bb.Sign() != 0 {
+		// MPint overflows
+		_Mpmovecfix(a, 0)
+		a.Ovf = 1
+		return
+	}
+
+	for ; i < Mpprec; i++ {
+		a.A[i] = 0
+	}
+
+	a.Neg = 0
+	if b.Val.Sign() < 0 {
+		a.Neg = 1
+	}
+	a.Ovf = 0
+
+	// leave for debugging
+	// println("mpmoveintfix:", b.Val.String(), "->", _Bconv(a, 0))
+}
+
+// *a = big.Int(*b)
+func mpmovefixint(a *Mpint, b *Mpfix) {
+	if b.Ovf != 0 {
+		mpsetovf(a)
+		return
+	}
+
+	i := Mpprec - 1
+	for ; i >= 0 && b.A[i] == 0; i-- {
+	}
+
+	a.Val.SetUint64(0)
+	var x big.Int
+	for ; i >= 0; i-- {
+		a.Val.Lsh(&a.Val, Mpscale)
+		a.Val.Or(&a.Val, x.SetUint64(uint64(b.A[i]&Mpmask)))
+	}
+
+	if b.Neg != 0 {
+		a.Val.Neg(&a.Val)
+	}
+	a.Ovf = false
+
+	// leave for debugging
+	// println("mpmovefixint:", _Bconv(b, 0), "->", a.Val.String())
 }
 
 func Mpmovefixflt(a *Mpflt, b *Mpint) {
+	mpmoveintfix(&a.Val, b) // a.Val = *b
+	a.Exp = 0
+	mpnorm(a)
+}
+
+func _Mpmovefixflt(a *Mpflt, b *Mpfix) {
 	a.Val = *b
 	a.Exp = 0
 	mpnorm(a)
 }
 
+func mpexactfltfix(a *Mpint, b *Mpflt) int {
+	mpmovefixint(a, &b.Val) // *a = b.Val
+	Mpshiftfix(a, int(b.Exp))
+	if b.Exp < 0 {
+		var f Mpflt
+		mpmoveintfix(&f.Val, a) // f.Val = *a
+		f.Exp = 0
+		mpnorm(&f)
+		if mpcmpfltflt(b, &f) != 0 {
+			return -1
+		}
+	}
+
+	return 0
+}
+
 // convert (truncate) b to a.
 // return -1 (but still convert) if b was non-integer.
-func mpexactfltfix(a *Mpint, b *Mpflt) int {
+func _mpexactfltfix(a *Mpfix, b *Mpflt) int {
 	*a = b.Val
-	Mpshiftfix(a, int(b.Exp))
+	_Mpshiftfix(a, int(b.Exp))
 	if b.Exp < 0 {
 		var f Mpflt
 		f.Val = *a
@@ -176,7 +284,41 @@
 	return -1
 }
 
-func mpmovefixfix(a *Mpint, b *Mpint) {
+func _mpmovefltfix(a *Mpfix, b *Mpflt) int {
+	if _mpexactfltfix(a, b) == 0 {
+		return 0
+	}
+
+	// try rounding down a little
+	f := *b
+
+	f.Val.A[0] = 0
+	if _mpexactfltfix(a, &f) == 0 {
+		return 0
+	}
+
+	// try rounding up a little
+	for i := 1; i < Mpprec; i++ {
+		f.Val.A[i]++
+		if f.Val.A[i] != Mpbase {
+			break
+		}
+		f.Val.A[i] = 0
+	}
+
+	mpnorm(&f)
+	if _mpexactfltfix(a, &f) == 0 {
+		return 0
+	}
+
+	return -1
+}
+
+func mpmovefixfix(a, b *Mpint) {
+	a.Val.Set(&b.Val)
+}
+
+func _mpmovefixfix(a *Mpfix, b *Mpfix) {
 	*a = *b
 }
 
@@ -202,7 +344,7 @@
 	}
 }
 
-func mphextofix(a *Mpint, s string) {
+func mphextofix(a *Mpfix, s string) {
 	for s != "" && s[0] == '0' {
 		s = s[1:]
 	}
@@ -445,16 +587,35 @@
 	Mpmovecflt(a, 0.0)
 }
 
+func mpatofix(a *Mpint, as string) {
+	_, ok := a.Val.SetString(as, 0)
+	if !ok {
+		// required syntax is [+-][0[x]]d*
+		// At the moment we lose precise error cause;
+		// the old code distinguished between:
+		// - malformed hex constant
+		// - malformed octal constant
+		// - malformed decimal constant
+		// TODO(gri) use different conversion function
+		Yyerror("malformed integer constant: %s", as)
+		a.Val.SetUint64(0)
+		return
+	}
+	if mptestovf(a, 0) {
+		Yyerror("constant too large: %s", as)
+	}
+}
+
 //
 // fixed point input
 // required syntax is [+-][0[x]]d*
 //
-func mpatofix(a *Mpint, as string) {
+func _mpatofix(a *Mpfix, as string) {
 	var c int
 
 	s := as
 	f := 0
-	Mpmovecfix(a, 0)
+	_Mpmovecfix(a, 0)
 
 	c, s = intstarstringplusplus(s)
 	switch c {
@@ -525,36 +686,43 @@
 
 out:
 	if f != 0 {
-		mpnegfix(a)
+		_mpnegfix(a)
 	}
 	return
 
 bad:
-	Mpmovecfix(a, 0)
+	_Mpmovecfix(a, 0)
 }
 
 func Bconv(xval *Mpint, flag int) string {
-	var q Mpint
+	if flag&obj.FmtSharp != 0 {
+		return fmt.Sprintf("%#x", &xval.Val)
+	}
+	return xval.Val.String()
+}
 
-	mpmovefixfix(&q, xval)
+func _Bconv(xval *Mpfix, flag int) string {
+	var q Mpfix
+
+	_mpmovefixfix(&q, xval)
 	f := 0
 	if mptestfix(&q) < 0 {
 		f = 1
-		mpnegfix(&q)
+		_mpnegfix(&q)
 	}
 
 	var buf [500]byte
 	p := len(buf)
-	var r Mpint
+	var r Mpfix
 	if flag&obj.FmtSharp != 0 /*untyped*/ {
 		// Hexadecimal
-		var sixteen Mpint
-		Mpmovecfix(&sixteen, 16)
+		var sixteen Mpfix
+		_Mpmovecfix(&sixteen, 16)
 
 		var digit int
 		for {
 			mpdivmodfixfix(&q, &r, &q, &sixteen)
-			digit = int(Mpgetfix(&r))
+			digit = int(_Mpgetfix(&r))
 			if digit < 10 {
 				p--
 				buf[p] = byte(digit + '0')
@@ -573,13 +741,13 @@
 		buf[p] = '0'
 	} else {
 		// Decimal
-		var ten Mpint
-		Mpmovecfix(&ten, 10)
+		var ten Mpfix
+		_Mpmovecfix(&ten, 10)
 
 		for {
 			mpdivmodfixfix(&q, &r, &q, &ten)
 			p--
-			buf[p] = byte(Mpgetfix(&r) + '0')
+			buf[p] = byte(_Mpgetfix(&r) + '0')
 			if mptestfix(&q) <= 0 {
 				break
 			}
@@ -646,21 +814,21 @@
 	fv = *fvp
 
 	for fv.Val.A[0] == 0 {
-		Mpshiftfix(&fv.Val, -Mpscale)
+		_Mpshiftfix(&fv.Val, -Mpscale)
 		fv.Exp += Mpscale
 	}
 
 	for fv.Val.A[0]&1 == 0 {
-		Mpshiftfix(&fv.Val, -1)
+		_Mpshiftfix(&fv.Val, -1)
 		fv.Exp += 1
 	}
 
 	if fv.Exp >= 0 {
-		buf = fmt.Sprintf("%vp+%d", Bconv(&fv.Val, obj.FmtSharp), fv.Exp)
+		buf = fmt.Sprintf("%vp+%d", _Bconv(&fv.Val, obj.FmtSharp), fv.Exp)
 		goto out
 	}
 
-	buf = fmt.Sprintf("%vp-%d", Bconv(&fv.Val, obj.FmtSharp), -fv.Exp)
+	buf = fmt.Sprintf("%vp-%d", _Bconv(&fv.Val, obj.FmtSharp), -fv.Exp)
 
 out:
 	var fp string
diff --git a/src/cmd/internal/gc/mparith2.go b/src/cmd/internal/gc/mparith2.go
index 7e12913..f0a3742 100644
--- a/src/cmd/internal/gc/mparith2.go
+++ b/src/cmd/internal/gc/mparith2.go
@@ -8,7 +8,7 @@
 // return the significant
 // words of the argument
 //
-func mplen(a *Mpint) int {
+func mplen(a *Mpfix) int {
 	n := -1
 	for i := 0; i < Mpprec; i++ {
 		if a.A[i] != 0 {
@@ -23,7 +23,7 @@
 // left shift mpint by one
 // ignores sign
 //
-func mplsh(a *Mpint, quiet int) {
+func mplsh(a *Mpfix, quiet int) {
 	var x int
 
 	c := 0
@@ -48,7 +48,7 @@
 // left shift mpint by Mpscale
 // ignores sign
 //
-func mplshw(a *Mpint, quiet int) {
+func mplshw(a *Mpfix, quiet int) {
 	i := Mpprec - 1
 	if a.A[i] != 0 {
 		a.Ovf = 1
@@ -67,7 +67,7 @@
 // right shift mpint by one
 // ignores sign and overflow
 //
-func mprsh(a *Mpint) {
+func mprsh(a *Mpfix) {
 	var x int
 
 	c := 0
@@ -90,7 +90,7 @@
 // right shift mpint by Mpscale
 // ignores sign and overflow
 //
-func mprshw(a *Mpint) {
+func mprshw(a *Mpfix) {
 	var i int
 
 	lo := a.A[0]
@@ -107,7 +107,7 @@
 //
 // return the sign of (abs(a)-abs(b))
 //
-func mpcmp(a *Mpint, b *Mpint) int {
+func mpcmp(a *Mpfix, b *Mpfix) int {
 	if a.Ovf != 0 || b.Ovf != 0 {
 		if nsavederrors+nerrors == 0 {
 			Yyerror("ovf in cmp")
@@ -133,7 +133,7 @@
 // negate a
 // ignore sign and ovf
 //
-func mpneg(a *Mpint) {
+func mpneg(a *Mpfix) {
 	var x int
 
 	c := 0
@@ -149,8 +149,21 @@
 	}
 }
 
-// shift left by s (or right by -s)
 func Mpshiftfix(a *Mpint, s int) {
+	switch {
+	case s > 0:
+		if mptestovf(a, s) {
+			Yyerror("constant shift overflow")
+			return
+		}
+		a.Val.Lsh(&a.Val, uint(s))
+	case s < 0:
+		a.Val.Rsh(&a.Val, uint(-s))
+	}
+}
+
+// shift left by s (or right by -s)
+func _Mpshiftfix(a *Mpfix, s int) {
 	if s >= 0 {
 		for s >= Mpscale {
 			mplshw(a, 0)
@@ -175,9 +188,40 @@
 	}
 }
 
-/// implements fix arihmetic
+/// implements fix arithmetic
 
-func mpaddfixfix(a *Mpint, b *Mpint, quiet int) {
+func mpsetovf(a *Mpint) {
+	a.Val.SetUint64(0)
+	a.Ovf = true
+}
+
+func mptestovf(a *Mpint, extra int) bool {
+	// We don't need to be precise here, any reasonable upper limit would do.
+	// For now, use existing limit so we pass all the tests unchanged.
+	const limit = Mpscale * Mpprec
+	if a.Val.BitLen()+extra > limit {
+		mpsetovf(a)
+	}
+	return a.Ovf
+}
+
+func mpaddfixfix(a, b *Mpint, quiet int) {
+	if a.Ovf || b.Ovf {
+		if nsavederrors+nerrors == 0 {
+			Yyerror("ovf in mpaddxx")
+		}
+		mpsetovf(a)
+		return
+	}
+
+	a.Val.Add(&a.Val, &b.Val)
+
+	if mptestovf(a, 0) && quiet == 0 {
+		Yyerror("constant addition overflow")
+	}
+}
+
+func _mpaddfixfix(a *Mpfix, b *Mpfix, quiet int) {
 	if a.Ovf != 0 || b.Ovf != 0 {
 		if nsavederrors+nerrors == 0 {
 			Yyerror("ovf in mpaddxx")
@@ -191,7 +235,7 @@
 		// perform a-b
 		switch mpcmp(a, b) {
 		case 0:
-			Mpmovecfix(a, 0)
+			_Mpmovecfix(a, 0)
 
 		case 1:
 			var x int
@@ -244,7 +288,23 @@
 	return
 }
 
-func mpmulfixfix(a *Mpint, b *Mpint) {
+func mpmulfixfix(a, b *Mpint) {
+	if a.Ovf || b.Ovf {
+		if nsavederrors+nerrors == 0 {
+			Yyerror("ovf in mpmulfixfix")
+		}
+		mpsetovf(a)
+		return
+	}
+
+	a.Val.Mul(&a.Val, &b.Val)
+
+	if mptestovf(a, 0) {
+		Yyerror("constant multiplication overflow")
+	}
+}
+
+func _mpmulfixfix(a *Mpfix, b *Mpfix) {
 	if a.Ovf != 0 || b.Ovf != 0 {
 		if nsavederrors+nerrors == 0 {
 			Yyerror("ovf in mpmulfixfix")
@@ -258,21 +318,21 @@
 	na := mplen(a)
 
 	nb := mplen(b)
-	var s Mpint
-	var c *Mpint
+	var s Mpfix
+	var c *Mpfix
 	if na > nb {
-		mpmovefixfix(&s, a)
+		_mpmovefixfix(&s, a)
 		c = b
 		na = nb
 	} else {
-		mpmovefixfix(&s, b)
+		_mpmovefixfix(&s, b)
 		c = a
 	}
 
 	s.Neg = 0
 
-	var q Mpint
-	Mpmovecfix(&q, 0)
+	var q Mpfix
+	_Mpmovecfix(&q, 0)
 	var j int
 	var x int
 	for i := 0; i < na; i++ {
@@ -284,7 +344,7 @@
 					goto out
 				}
 
-				mpaddfixfix(&q, &s, 1)
+				_mpaddfixfix(&q, &s, 1)
 				if q.Ovf != 0 {
 					goto out
 				}
@@ -297,13 +357,13 @@
 
 out:
 	q.Neg = a.Neg ^ b.Neg
-	mpmovefixfix(a, &q)
+	_mpmovefixfix(a, &q)
 	if a.Ovf != 0 {
 		Yyerror("constant multiplication overflow")
 	}
 }
 
-func mpmulfract(a *Mpint, b *Mpint) {
+func mpmulfract(a *Mpfix, b *Mpfix) {
 	if a.Ovf != 0 || b.Ovf != 0 {
 		if nsavederrors+nerrors == 0 {
 			Yyerror("ovf in mpmulflt")
@@ -312,11 +372,11 @@
 		return
 	}
 
-	var s Mpint
-	mpmovefixfix(&s, b)
+	var s Mpfix
+	_mpmovefixfix(&s, b)
 	s.Neg = 0
-	var q Mpint
-	Mpmovecfix(&q, 0)
+	var q Mpfix
+	_Mpmovecfix(&q, 0)
 
 	i := Mpprec - 1
 	x := a.A[i]
@@ -335,26 +395,38 @@
 		for j = 0; j < Mpscale; j++ {
 			x <<= 1
 			if x&Mpbase != 0 {
-				mpaddfixfix(&q, &s, 1)
+				_mpaddfixfix(&q, &s, 1)
 			}
 			mprsh(&s)
 		}
 	}
 
 	q.Neg = a.Neg ^ b.Neg
-	mpmovefixfix(a, &q)
+	_mpmovefixfix(a, &q)
 	if a.Ovf != 0 {
 		Yyerror("constant multiplication overflow")
 	}
 }
 
-func mporfixfix(a *Mpint, b *Mpint) {
+func mporfixfix(a, b *Mpint) {
+	if a.Ovf || b.Ovf {
+		if nsavederrors+nerrors == 0 {
+			Yyerror("ovf in mporfixfix")
+		}
+		mpsetovf(a)
+		return
+	}
+
+	a.Val.Or(&a.Val, &b.Val)
+}
+
+func _mporfixfix(a *Mpfix, b *Mpfix) {
 	x := 0
 	if a.Ovf != 0 || b.Ovf != 0 {
 		if nsavederrors+nerrors == 0 {
 			Yyerror("ovf in mporfixfix")
 		}
-		Mpmovecfix(a, 0)
+		_Mpmovecfix(a, 0)
 		a.Ovf = 1
 		return
 	}
@@ -382,13 +454,25 @@
 	}
 }
 
-func mpandfixfix(a *Mpint, b *Mpint) {
+func mpandfixfix(a, b *Mpint) {
+	if a.Ovf || b.Ovf {
+		if nsavederrors+nerrors == 0 {
+			Yyerror("ovf in mpandfixfix")
+		}
+		mpsetovf(a)
+		return
+	}
+
+	a.Val.And(&a.Val, &b.Val)
+}
+
+func _mpandfixfix(a *Mpfix, b *Mpfix) {
 	x := 0
 	if a.Ovf != 0 || b.Ovf != 0 {
 		if nsavederrors+nerrors == 0 {
 			Yyerror("ovf in mpandfixfix")
 		}
-		Mpmovecfix(a, 0)
+		_Mpmovecfix(a, 0)
 		a.Ovf = 1
 		return
 	}
@@ -416,13 +500,25 @@
 	}
 }
 
-func mpandnotfixfix(a *Mpint, b *Mpint) {
+func mpandnotfixfix(a, b *Mpint) {
+	if a.Ovf || b.Ovf {
+		if nsavederrors+nerrors == 0 {
+			Yyerror("ovf in mpandnotfixfix")
+		}
+		mpsetovf(a)
+		return
+	}
+
+	a.Val.AndNot(&a.Val, &b.Val)
+}
+
+func _mpandnotfixfix(a *Mpfix, b *Mpfix) {
 	x := 0
 	if a.Ovf != 0 || b.Ovf != 0 {
 		if nsavederrors+nerrors == 0 {
 			Yyerror("ovf in mpandnotfixfix")
 		}
-		Mpmovecfix(a, 0)
+		_Mpmovecfix(a, 0)
 		a.Ovf = 1
 		return
 	}
@@ -450,13 +546,25 @@
 	}
 }
 
-func mpxorfixfix(a *Mpint, b *Mpint) {
+func mpxorfixfix(a, b *Mpint) {
+	if a.Ovf || b.Ovf {
+		if nsavederrors+nerrors == 0 {
+			Yyerror("ovf in mpxorfixfix")
+		}
+		mpsetovf(a)
+		return
+	}
+
+	a.Val.Xor(&a.Val, &b.Val)
+}
+
+func _mpxorfixfix(a *Mpfix, b *Mpfix) {
 	x := 0
 	if a.Ovf != 0 || b.Ovf != 0 {
 		if nsavederrors+nerrors == 0 {
 			Yyerror("ovf in mporfixfix")
 		}
-		Mpmovecfix(a, 0)
+		_Mpmovecfix(a, 0)
 		a.Ovf = 1
 		return
 	}
@@ -484,13 +592,12 @@
 	}
 }
 
-func mplshfixfix(a *Mpint, b *Mpint) {
-	if a.Ovf != 0 || b.Ovf != 0 {
+func mplshfixfix(a, b *Mpint) {
+	if a.Ovf || b.Ovf {
 		if nsavederrors+nerrors == 0 {
-			Yyerror("ovf in mporfixfix")
+			Yyerror("ovf in mplshfixfix")
 		}
-		Mpmovecfix(a, 0)
-		a.Ovf = 1
+		mpsetovf(a)
 		return
 	}
 
@@ -504,20 +611,39 @@
 	Mpshiftfix(a, int(s))
 }
 
-func mprshfixfix(a *Mpint, b *Mpint) {
+func _mplshfixfix(a *Mpfix, b *Mpfix) {
 	if a.Ovf != 0 || b.Ovf != 0 {
 		if nsavederrors+nerrors == 0 {
+			Yyerror("ovf in mplshfixfix")
+		}
+		_Mpmovecfix(a, 0)
+		a.Ovf = 1
+		return
+	}
+
+	s := _Mpgetfix(b)
+	if s < 0 || s >= Mpprec*Mpscale {
+		Yyerror("stupid shift: %d", s)
+		_Mpmovecfix(a, 0)
+		return
+	}
+
+	_Mpshiftfix(a, int(s))
+}
+
+func mprshfixfix(a, b *Mpint) {
+	if a.Ovf || b.Ovf {
+		if nsavederrors+nerrors == 0 {
 			Yyerror("ovf in mprshfixfix")
 		}
-		Mpmovecfix(a, 0)
-		a.Ovf = 1
+		mpsetovf(a)
 		return
 	}
 
 	s := Mpgetfix(b)
 	if s < 0 || s >= Mpprec*Mpscale {
 		Yyerror("stupid shift: %d", s)
-		if a.Neg != 0 {
+		if a.Val.Sign() < 0 {
 			Mpmovecfix(a, -1)
 		} else {
 			Mpmovecfix(a, 0)
@@ -528,11 +654,50 @@
 	Mpshiftfix(a, int(-s))
 }
 
+func _mprshfixfix(a *Mpfix, b *Mpfix) {
+	if a.Ovf != 0 || b.Ovf != 0 {
+		if nsavederrors+nerrors == 0 {
+			Yyerror("ovf in mprshfixfix")
+		}
+		_Mpmovecfix(a, 0)
+		a.Ovf = 1
+		return
+	}
+
+	s := _Mpgetfix(b)
+	if s < 0 || s >= Mpprec*Mpscale {
+		Yyerror("stupid shift: %d", s)
+		if a.Neg != 0 {
+			_Mpmovecfix(a, -1)
+		} else {
+			_Mpmovecfix(a, 0)
+		}
+		return
+	}
+
+	_Mpshiftfix(a, int(-s))
+}
+
 func mpnegfix(a *Mpint) {
+	a.Val.Neg(&a.Val)
+}
+
+func _mpnegfix(a *Mpfix) {
 	a.Neg ^= 1
 }
 
 func Mpgetfix(a *Mpint) int64 {
+	if a.Ovf {
+		if nsavederrors+nerrors == 0 {
+			Yyerror("constant overflow")
+		}
+		return 0
+	}
+
+	return a.Val.Int64()
+}
+
+func _Mpgetfix(a *Mpfix) int64 {
 	if a.Ovf != 0 {
 		if nsavederrors+nerrors == 0 {
 			Yyerror("constant overflow")
@@ -550,6 +715,10 @@
 }
 
 func Mpmovecfix(a *Mpint, c int64) {
+	a.Val.SetInt64(c)
+}
+
+func _Mpmovecfix(a *Mpfix, c int64) {
 	a.Neg = 0
 	a.Ovf = 0
 
@@ -565,7 +734,7 @@
 	}
 }
 
-func mpdivmodfixfix(q *Mpint, r *Mpint, n *Mpint, d *Mpint) {
+func mpdivmodfixfix(q *Mpfix, r *Mpfix, n *Mpfix, d *Mpfix) {
 	var i int
 
 	ns := int(n.Neg)
@@ -573,8 +742,8 @@
 	n.Neg = 0
 	d.Neg = 0
 
-	mpmovefixfix(r, n)
-	Mpmovecfix(q, 0)
+	_mpmovefixfix(r, n)
+	_Mpmovecfix(q, 0)
 
 	// shift denominator until it
 	// is larger than numerator
@@ -605,7 +774,7 @@
 		mprsh(d)
 		if mpcmp(d, r) <= 0 {
 			mpaddcfix(q, 1)
-			mpsubfixfix(r, d)
+			_mpsubfixfix(r, d)
 		}
 	}
 
@@ -615,7 +784,7 @@
 	q.Neg = uint8(ns ^ ds)
 }
 
-func mpiszero(a *Mpint) bool {
+func mpiszero(a *Mpfix) bool {
 	for i := Mpprec - 1; i >= 0; i-- {
 		if a.A[i] != 0 {
 			return false
@@ -624,14 +793,14 @@
 	return true
 }
 
-func mpdivfract(a *Mpint, b *Mpint) {
-	var n Mpint
-	var d Mpint
+func mpdivfract(a *Mpfix, b *Mpfix) {
+	var n Mpfix
+	var d Mpfix
 	var j int
 	var x int
 
-	mpmovefixfix(&n, a) // numerator
-	mpmovefixfix(&d, b) // denominator
+	_mpmovefixfix(&n, a) // numerator
+	_mpmovefixfix(&d, b) // denominator
 
 	neg := int(n.Neg) ^ int(d.Neg)
 
@@ -645,7 +814,7 @@
 				if !mpiszero(&d) {
 					x |= 1
 				}
-				mpsubfixfix(&n, &d)
+				_mpsubfixfix(&n, &d)
 			}
 
 			mprsh(&d)
@@ -657,10 +826,10 @@
 	a.Neg = uint8(neg)
 }
 
-func mptestfix(a *Mpint) int {
-	var b Mpint
+func mptestfix(a *Mpfix) int {
+	var b Mpfix
 
-	Mpmovecfix(&b, 0)
+	_Mpmovecfix(&b, 0)
 	r := mpcmp(a, &b)
 	if a.Neg != 0 {
 		if r > 0 {
diff --git a/src/cmd/internal/gc/mparith3.go b/src/cmd/internal/gc/mparith3.go
index 103c53d..57263a0 100644
--- a/src/cmd/internal/gc/mparith3.go
+++ b/src/cmd/internal/gc/mparith3.go
@@ -80,7 +80,7 @@
 		}
 	}
 
-	Mpshiftfix(&a.Val, s)
+	_Mpshiftfix(&a.Val, s)
 	mpsetexp(a, int(a.Exp)-s)
 }
 
@@ -110,21 +110,21 @@
 		var c Mpflt
 		mpmovefltflt(&c, b)
 
-		Mpshiftfix(&c.Val, -s)
-		mpaddfixfix(&a.Val, &c.Val, 0)
+		_Mpshiftfix(&c.Val, -s)
+		_mpaddfixfix(&a.Val, &c.Val, 0)
 		goto out
 	}
 
 	if s < 0 {
 		// b is larger, shift a right
-		Mpshiftfix(&a.Val, s)
+		_Mpshiftfix(&a.Val, s)
 
 		mpsetexp(a, int(a.Exp)-s)
-		mpaddfixfix(&a.Val, &b.Val, 0)
+		_mpaddfixfix(&a.Val, &b.Val, 0)
 		goto out
 	}
 
-	mpaddfixfix(&a.Val, &b.Val, 0)
+	_mpaddfixfix(&a.Val, &b.Val, 0)
 
 out:
 	mpnorm(a)
@@ -193,7 +193,7 @@
 	var c Mpflt
 	mpmovefltflt(&c, b)
 
-	Mpshiftfix(&c.Val, Mpscale)
+	_Mpshiftfix(&c.Val, Mpscale)
 
 	// divide
 	mpdivfract(&a.Val, &c.Val)
@@ -222,7 +222,7 @@
 	}
 
 	for a.Val.A[Mpnorm-1]&Mpsign == 0 {
-		Mpshiftfix(&a.Val, 1)
+		_Mpshiftfix(&a.Val, 1)
 		mpsetexp(a, int(a.Exp)-1) // can set 'a' to zero
 		s = sigfig(a)
 		if s == 0 {
@@ -298,7 +298,7 @@
 	if Mpdebug != 0 /*TypeKind(100016)*/ {
 		fmt.Printf("\nconst %g", c)
 	}
-	Mpmovecfix(&a.Val, 0)
+	_Mpmovecfix(&a.Val, 0)
 	a.Exp = 0
 	var f float64
 	var l int
@@ -323,7 +323,7 @@
 		if f == 0 {
 			break
 		}
-		Mpshiftfix(&a.Val, Mpscale)
+		_Mpshiftfix(&a.Val, Mpscale)
 	}
 
 out: