[dev.cc] cmd/internal/obj, cmd/internal/gc, new6g: reconvert

Reconvert using rsc.io/c2go rev 27b3f59.

Changes to converter:
 - fatal does not return, so no fallthrough after fatal in switch
 - many more function results and variables identified as bool
 - simplification of negated boolean expressions

Change-Id: I3bc67da5e46cb7ee613e230cf7e9533036cc870b
Reviewed-on: https://go-review.googlesource.com/5171
Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
diff --git a/src/cmd/internal/gc/align.go b/src/cmd/internal/gc/align.go
index 062d35a..994b7a2 100644
--- a/src/cmd/internal/gc/align.go
+++ b/src/cmd/internal/gc/align.go
@@ -138,7 +138,7 @@
 	if t.Width == -2 {
 		lno = int(lineno)
 		lineno = int32(t.Lineno)
-		if !(t.Broke != 0) {
+		if t.Broke == 0 {
 			t.Broke = 1
 			Yyerror("invalid recursive type %v", Tconv(t, 0))
 		}
@@ -253,14 +253,14 @@
 		checkwidth(t.Down)
 
 	case TFORW: // should have been filled in
-		if !(t.Broke != 0) {
+		if t.Broke == 0 {
 			Yyerror("invalid recursive type %v", Tconv(t, 0))
 		}
 		w = 1 // anything will do
 
 		// dummy type; should be replaced before use.
 	case TANY:
-		if !(Debug['A'] != 0) {
+		if Debug['A'] == 0 {
 			Fatal("dowidth any")
 		}
 		w = 1 // anything will do
@@ -294,7 +294,7 @@
 			checkwidth(t.Type)
 			t.Align = uint8(Widthptr)
 		} else if t.Bound == -100 {
-			if !(t.Broke != 0) {
+			if t.Broke == 0 {
 				Yyerror("use of [...] array outside of array literal")
 				t.Broke = 1
 			}
@@ -394,7 +394,7 @@
 		Fatal("checkwidth %v", Tconv(t, 0))
 	}
 
-	if !(defercalc != 0) {
+	if defercalc == 0 {
 		dowidth(t)
 		return
 	}
@@ -427,7 +427,7 @@
 func resumecheckwidth() {
 	var l *TypeList
 
-	if !(defercalc != 0) {
+	if defercalc == 0 {
 		Fatal("resumecheckwidth")
 	}
 	for l = tlq; l != nil; l = tlq {
diff --git a/src/cmd/internal/gc/bits.go b/src/cmd/internal/gc/bits.go
index 613e15d..23da356 100644
--- a/src/cmd/internal/gc/bits.go
+++ b/src/cmd/internal/gc/bits.go
@@ -66,15 +66,15 @@
 	return c;
 }
 */
-func bany(a *Bits) int {
+func bany(a *Bits) bool {
 	var i int
 
 	for i = 0; i < BITS; i++ {
 		if a.b[i] != 0 {
-			return 1
+			return true
 		}
 	}
-	return 0
+	return false
 }
 
 /*
@@ -112,8 +112,8 @@
 	return c
 }
 
-func btest(a *Bits, n uint) int {
-	return bool2int(a.b[n/64]&(1<<(n%64)) != 0)
+func btest(a *Bits, n uint) bool {
+	return a.b[n/64]&(1<<(n%64)) != 0
 }
 
 func biset(a *Bits, n uint) {
@@ -144,7 +144,7 @@
 
 	first = 1
 
-	for bany(&bits) != 0 {
+	for bany(&bits) {
 		i = bnum(bits)
 		if first != 0 {
 			first = 0
diff --git a/src/cmd/internal/gc/bv.go b/src/cmd/internal/gc/bv.go
index 998a1f5..002b5a4 100644
--- a/src/cmd/internal/gc/bv.go
+++ b/src/cmd/internal/gc/bv.go
@@ -120,15 +120,15 @@
 	return int(i)
 }
 
-func bvisempty(bv *Bvec) int {
+func bvisempty(bv *Bvec) bool {
 	var i int32
 
 	for i = 0; i < bv.n; i += WORDBITS {
 		if bv.b[i>>WORDSHIFT] != 0 {
-			return 0
+			return false
 		}
 	}
-	return 1
+	return true
 }
 
 func bvnot(bv *Bvec) {
diff --git a/src/cmd/internal/gc/closure.go b/src/cmd/internal/gc/closure.go
index 5a1ae65..4c9b0af 100644
--- a/src/cmd/internal/gc/closure.go
+++ b/src/cmd/internal/gc/closure.go
@@ -91,7 +91,7 @@
 
 	for l = func_.Cvars; l != nil; l = l.Next {
 		n = l.N.Closure
-		if !(n.Captured != 0) {
+		if n.Captured == 0 {
 			n.Captured = 1
 			if n.Decldepth == 0 {
 				Fatal("typecheckclosure: var %v does not have decldepth assigned", Nconv(n, obj.FmtShort))
@@ -218,7 +218,7 @@
 		v.Outerexpr = nil
 
 		// out parameters will be assigned to implicitly upon return.
-		if outer.Class != PPARAMOUT && !(v.Closure.Addrtaken != 0) && !(v.Closure.Assigned != 0) && v.Type.Width <= 128 {
+		if outer.Class != PPARAMOUT && v.Closure.Addrtaken == 0 && v.Closure.Assigned == 0 && v.Type.Width <= 128 {
 			v.Byval = 1
 		} else {
 			v.Closure.Addrtaken = 1
@@ -351,7 +351,7 @@
 			cv = Nod(OCLOSUREVAR, nil, nil)
 
 			cv.Type = v.Type
-			if !(v.Byval != 0) {
+			if v.Byval == 0 {
 				cv.Type = Ptrto(v.Type)
 			}
 			offset = Rnd(offset, int64(cv.Type.Align))
@@ -389,7 +389,7 @@
 		typechecklist(body, Etop)
 		walkstmtlist(body)
 		xfunc.Enter = body
-		xfunc.Needctxt = uint8(bool2int(nvar > 0))
+		xfunc.Needctxt = nvar > 0
 	}
 
 	lineno = int32(lno)
@@ -430,7 +430,7 @@
 			continue
 		}
 		typ1 = typenod(v.Type)
-		if !(v.Byval != 0) {
+		if v.Byval == 0 {
 			typ1 = Nod(OIND, typ1, nil)
 		}
 		typ.List = list(typ.List, Nod(ODCLFIELD, newname(v.Sym), typ1))
@@ -594,7 +594,7 @@
 	// Declare and initialize variable holding receiver.
 	body = nil
 
-	xfunc.Needctxt = 1
+	xfunc.Needctxt = true
 	cv = Nod(OCLOSUREVAR, nil, nil)
 	cv.Xoffset = int64(Widthptr)
 	cv.Type = rcvrtype
@@ -609,7 +609,7 @@
 	ptr.Used = 1
 	ptr.Curfn = xfunc
 	xfunc.Dcl = list(xfunc.Dcl, ptr)
-	if Isptr[rcvrtype.Etype] != 0 || Isinter(rcvrtype) != 0 {
+	if Isptr[rcvrtype.Etype] != 0 || Isinter(rcvrtype) {
 		ptr.Ntype = typenod(rcvrtype)
 		body = list(body, Nod(OAS, ptr, cv))
 	} else {
@@ -652,7 +652,7 @@
 	//
 	// Like walkclosure above.
 
-	if Isinter(n.Left.Type) != 0 {
+	if Isinter(n.Left.Type) {
 		// Trigger panic for method on nil interface now.
 		// Otherwise it happens in the wrapper and is confusing.
 		n.Left = cheapexpr(n.Left, init)
diff --git a/src/cmd/internal/gc/const.go b/src/cmd/internal/gc/const.go
index c8c244b..f9bd557 100644
--- a/src/cmd/internal/gc/const.go
+++ b/src/cmd/internal/gc/const.go
@@ -47,7 +47,7 @@
  * implicit conversion.
  */
 func Convlit(np **Node, t *Type) {
-	convlit1(np, t, 0)
+	convlit1(np, t, false)
 }
 
 /*
@@ -55,17 +55,17 @@
  * return a new node if necessary
  * (if n is a named constant, can't edit n->type directly).
  */
-func convlit1(np **Node, t *Type, explicit int) {
+func convlit1(np **Node, t *Type, explicit bool) {
 	var ct int
 	var et int
 	var n *Node
 	var nn *Node
 
 	n = *np
-	if n == nil || t == nil || n.Type == nil || isideal(t) != 0 || n.Type == t {
+	if n == nil || t == nil || n.Type == nil || isideal(t) || n.Type == t {
 		return
 	}
-	if !(explicit != 0) && !(isideal(n.Type) != 0) {
+	if !explicit && !isideal(n.Type) {
 		return
 	}
 
@@ -96,7 +96,7 @@
 
 		// target is invalid type for a constant?  leave alone.
 	case OLITERAL:
-		if !(okforconst[t.Etype] != 0) && n.Type.Etype != TNIL {
+		if okforconst[t.Etype] == 0 && n.Type.Etype != TNIL {
 			defaultlit(&n, nil)
 			*np = n
 			return
@@ -104,12 +104,12 @@
 
 	case OLSH,
 		ORSH:
-		convlit1(&n.Left, t, bool2int(explicit != 0 && isideal(n.Left.Type) != 0))
+		convlit1(&n.Left, t, explicit && isideal(n.Left.Type))
 		t = n.Left.Type
 		if t != nil && t.Etype == TIDEAL && n.Val.Ctype != CTINT {
 			n.Val = toint(n.Val)
 		}
-		if t != nil && !(Isint[t.Etype] != 0) {
+		if t != nil && Isint[t.Etype] == 0 {
 			Yyerror("invalid operation: %v (shift of type %v)", Nconv(n, 0), Tconv(t, 0))
 			t = nil
 		}
@@ -179,7 +179,7 @@
 			return
 
 		case TARRAY:
-			if !(Isslice(t) != 0) {
+			if !Isslice(t) {
 				goto bad
 			}
 
@@ -258,7 +258,7 @@
 			case CTCPLX:
 				overflow(n.Val, t)
 			}
-		} else if et == TSTRING && (ct == CTINT || ct == CTRUNE) && explicit != 0 {
+		} else if et == TSTRING && (ct == CTINT || ct == CTRUNE) && explicit {
 			n.Val = tostr(n.Val)
 		} else {
 			goto bad
@@ -269,14 +269,14 @@
 	return
 
 bad:
-	if !(n.Diag != 0) {
-		if !(t.Broke != 0) {
+	if n.Diag == 0 {
+		if t.Broke == 0 {
 			Yyerror("cannot convert %v to type %v", Nconv(n, 0), Tconv(t, 0))
 		}
 		n.Diag = 1
 	}
 
-	if isideal(n.Type) != 0 {
+	if isideal(n.Type) {
 		defaultlit(&n, nil)
 		*np = n
 	}
@@ -388,35 +388,35 @@
 	return v
 }
 
-func doesoverflow(v Val, t *Type) int {
+func doesoverflow(v Val, t *Type) bool {
 	switch v.Ctype {
 	case CTINT,
 		CTRUNE:
-		if !(Isint[t.Etype] != 0) {
+		if Isint[t.Etype] == 0 {
 			Fatal("overflow: %v integer constant", Tconv(t, 0))
 		}
 		if Mpcmpfixfix(v.U.Xval, Minintval[t.Etype]) < 0 || Mpcmpfixfix(v.U.Xval, Maxintval[t.Etype]) > 0 {
-			return 1
+			return true
 		}
 
 	case CTFLT:
-		if !(Isfloat[t.Etype] != 0) {
+		if Isfloat[t.Etype] == 0 {
 			Fatal("overflow: %v floating-point constant", Tconv(t, 0))
 		}
 		if mpcmpfltflt(v.U.Fval, minfltval[t.Etype]) <= 0 || mpcmpfltflt(v.U.Fval, maxfltval[t.Etype]) >= 0 {
-			return 1
+			return true
 		}
 
 	case CTCPLX:
-		if !(Iscomplex[t.Etype] != 0) {
+		if Iscomplex[t.Etype] == 0 {
 			Fatal("overflow: %v complex constant", Tconv(t, 0))
 		}
 		if mpcmpfltflt(&v.U.Cval.Real, minfltval[t.Etype]) <= 0 || mpcmpfltflt(&v.U.Cval.Real, maxfltval[t.Etype]) >= 0 || mpcmpfltflt(&v.U.Cval.Imag, minfltval[t.Etype]) <= 0 || mpcmpfltflt(&v.U.Cval.Imag, maxfltval[t.Etype]) >= 0 {
-			return 1
+			return true
 		}
 	}
 
-	return 0
+	return false
 }
 
 func overflow(v Val, t *Type) {
@@ -426,7 +426,7 @@
 		return
 	}
 
-	if !(doesoverflow(v, t) != 0) {
+	if !doesoverflow(v, t) {
 		return
 	}
 
@@ -479,14 +479,14 @@
 	return int(n.Val.Ctype)
 }
 
-func Isconst(n *Node, ct int) int {
+func Isconst(n *Node, ct int) bool {
 	var t int
 
 	t = consttype(n)
 
 	// If the caller is asking for CTINT, allow CTRUNE too.
 	// Makes life easier for back ends.
-	return bool2int(t == ct || (ct == CTINT && t == CTRUNE))
+	return t == ct || (ct == CTINT && t == CTRUNE)
 }
 
 func saveorig(n *Node) *Node {
@@ -557,18 +557,18 @@
 		if n.Type == nil {
 			return
 		}
-		if !(okforconst[n.Type.Etype] != 0) && n.Type.Etype != TNIL {
+		if okforconst[n.Type.Etype] == 0 && n.Type.Etype != TNIL {
 			return
 		}
 
 		// merge adjacent constants in the argument list.
 	case OADDSTR:
 		for l1 = n.List; l1 != nil; l1 = l1.Next {
-			if Isconst(l1.N, CTSTR) != 0 && l1.Next != nil && Isconst(l1.Next.N, CTSTR) != 0 {
+			if Isconst(l1.N, CTSTR) && l1.Next != nil && Isconst(l1.Next.N, CTSTR) {
 				// merge from l1 up to but not including l2
 				str = new(Strlit)
 				l2 = l1
-				for l2 != nil && Isconst(l2.N, CTSTR) != 0 {
+				for l2 != nil && Isconst(l2.N, CTSTR) {
 					nr = l2.N
 					str.S += nr.Val.U.Sval.S
 					l2 = l2.Next
@@ -590,7 +590,7 @@
 		}
 
 		// collapse single-constant list to single constant.
-		if count(n.List) == 1 && Isconst(n.List.N, CTSTR) != 0 {
+		if count(n.List) == 1 && Isconst(n.List.N, CTSTR) {
 			n.Op = OLITERAL
 			n.Val = n.List.N.Val
 		}
@@ -655,7 +655,7 @@
 		defaultlit(&nr, Types[TUINT])
 
 		n.Right = nr
-		if nr.Type != nil && (Issigned[nr.Type.Etype] != 0 || !(Isint[nr.Type.Etype] != 0)) {
+		if nr.Type != nil && (Issigned[nr.Type.Etype] != 0 || Isint[nr.Type.Etype] == 0) {
 			goto illegal
 		}
 		if nl.Val.Ctype != CTRUNE {
@@ -787,7 +787,7 @@
 		// The default case above would print 'ideal % ideal',
 	// which is not quite an ideal error.
 	case OMOD<<16 | CTFLT:
-		if !(n.Diag != 0) {
+		if n.Diag == 0 {
 			Yyerror("illegal constant expression: floating-point % operation")
 			n.Diag = 1
 		}
@@ -985,7 +985,7 @@
 
 	switch uint32(n.Op)<<16 | uint32(v.Ctype) {
 	default:
-		if !(n.Diag != 0) {
+		if n.Diag == 0 {
 			Yyerror("illegal constant expression %v %v", Oconv(int(n.Op), 0), Tconv(nl.Type, 0))
 			n.Diag = 1
 		}
@@ -1006,7 +1006,7 @@
 		OCONV<<16 | CTRUNE,
 		OCONV<<16 | CTFLT,
 		OCONV<<16 | CTSTR:
-		convlit1(&nl, n.Type, 1)
+		convlit1(&nl, n.Type, true)
 
 		v = nl.Val
 
@@ -1058,7 +1058,7 @@
 		mpnegflt(&v.U.Cval.Imag)
 
 	case ONOT<<16 | CTBOOL:
-		if !(v.U.Bval != 0) {
+		if v.U.Bval == 0 {
 			goto settrue
 		}
 		goto setfalse
@@ -1087,18 +1087,18 @@
 
 settrue:
 	norig = saveorig(n)
-	*n = *Nodbool(1)
+	*n = *Nodbool(true)
 	n.Orig = norig
 	return
 
 setfalse:
 	norig = saveorig(n)
-	*n = *Nodbool(0)
+	*n = *Nodbool(false)
 	n.Orig = norig
 	return
 
 illegal:
-	if !(n.Diag != 0) {
+	if n.Diag == 0 {
 		Yyerror("illegal constant expression: %v %v %v", Tconv(nl.Type, 0), Oconv(int(n.Op), 0), Tconv(nr.Type, 0))
 		n.Diag = 1
 	}
@@ -1114,7 +1114,6 @@
 	switch v.Ctype {
 	default:
 		Fatal("nodlit ctype %d", v.Ctype)
-		fallthrough
 
 	case CTSTR:
 		n.Type = idealstring
@@ -1163,7 +1162,7 @@
 	var k1 int
 	var k2 int
 
-	if n == nil || !(isideal(n.Type) != 0) {
+	if n == nil || !isideal(n.Type) {
 		return CTxxx
 	}
 
@@ -1235,7 +1234,7 @@
 	var t1 *Type
 
 	n = *np
-	if n == nil || !(isideal(n.Type) != 0) {
+	if n == nil || !isideal(n.Type) {
 		return
 	}
 
@@ -1257,7 +1256,7 @@
 
 		if n.Val.Ctype == CTNIL {
 			lineno = int32(lno)
-			if !(n.Diag != 0) {
+			if n.Diag == 0 {
 				Yyerror("use of untyped nil")
 				n.Diag = 1
 			}
@@ -1341,17 +1340,17 @@
 	if l.Type == nil || r.Type == nil {
 		return
 	}
-	if !(isideal(l.Type) != 0) {
+	if !isideal(l.Type) {
 		Convlit(rp, l.Type)
 		return
 	}
 
-	if !(isideal(r.Type) != 0) {
+	if !isideal(r.Type) {
 		Convlit(lp, r.Type)
 		return
 	}
 
-	if !(force != 0) {
+	if force == 0 {
 		return
 	}
 	if l.Type.Etype == TBOOL {
@@ -1387,8 +1386,8 @@
 	return stringsCompare(l.Val.U.Sval.S, r.Val.U.Sval.S)
 }
 
-func Smallintconst(n *Node) int {
-	if n.Op == OLITERAL && Isconst(n, CTINT) != 0 && n.Type != nil {
+func Smallintconst(n *Node) bool {
+	if n.Op == OLITERAL && Isconst(n, CTINT) && n.Type != nil {
 		switch Simtype[n.Type.Etype] {
 		case TINT8,
 			TUINT8,
@@ -1398,7 +1397,7 @@
 			TUINT32,
 			TBOOL,
 			TPTR32:
-			return 1
+			return true
 
 		case TIDEAL,
 			TINT64,
@@ -1407,11 +1406,11 @@
 			if Mpcmpfixfix(n.Val.U.Xval, Minintval[TINT32]) < 0 || Mpcmpfixfix(n.Val.U.Xval, Maxintval[TINT32]) > 0 {
 				break
 			}
-			return 1
+			return true
 		}
 	}
 
-	return 0
+	return false
 }
 
 func nonnegconst(n *Node) int {
@@ -1491,7 +1490,6 @@
 		switch val.Ctype {
 		default:
 			Fatal("convconst ctype=%d %v", val.Ctype, Tconv(t, obj.FmtLong))
-			fallthrough
 
 		case CTINT,
 			CTRUNE:
@@ -1615,7 +1613,7 @@
 // may be known at compile time, are not Go language constants.
 // Only called for expressions known to evaluated to compile-time
 // constants.
-func isgoconst(n *Node) int {
+func isgoconst(n *Node) bool {
 	var l *Node
 	var t *Type
 
@@ -1652,20 +1650,20 @@
 		OCOMPLEX,
 		OREAL,
 		OIMAG:
-		if isgoconst(n.Left) != 0 && (n.Right == nil || isgoconst(n.Right) != 0) {
-			return 1
+		if isgoconst(n.Left) && (n.Right == nil || isgoconst(n.Right)) {
+			return true
 		}
 
 	case OCONV:
-		if okforconst[n.Type.Etype] != 0 && isgoconst(n.Left) != 0 {
-			return 1
+		if okforconst[n.Type.Etype] != 0 && isgoconst(n.Left) {
+			return true
 		}
 
 	case OLEN,
 		OCAP:
 		l = n.Left
-		if isgoconst(l) != 0 {
-			return 1
+		if isgoconst(l) {
+			return true
 		}
 
 		// Special case: len/cap is constant when applied to array or
@@ -1676,24 +1674,24 @@
 		if t != nil && Isptr[t.Etype] != 0 {
 			t = t.Type
 		}
-		if Isfixedarray(t) != 0 && !(hascallchan(l) != 0) {
-			return 1
+		if Isfixedarray(t) && !hascallchan(l) {
+			return true
 		}
 
 	case OLITERAL:
 		if n.Val.Ctype != CTNIL {
-			return 1
+			return true
 		}
 
 	case ONAME:
 		l = n.Sym.Def
 		if l != nil && l.Op == OLITERAL && n.Val.Ctype != CTNIL {
-			return 1
+			return true
 		}
 
 	case ONONAME:
 		if n.Sym.Def != nil && n.Sym.Def.Op == OIOTA {
-			return 1
+			return true
 		}
 
 		// Only constant calls are unsafe.Alignof, Offsetof, and Sizeof.
@@ -1707,19 +1705,19 @@
 			break
 		}
 		if l.Sym.Name == "Alignof" || l.Sym.Name == "Offsetof" || l.Sym.Name == "Sizeof" {
-			return 1
+			return true
 		}
 	}
 
 	//dump("nonconst", n);
-	return 0
+	return false
 }
 
-func hascallchan(n *Node) int {
+func hascallchan(n *Node) bool {
 	var l *NodeList
 
 	if n == nil {
-		return 0
+		return false
 	}
 	switch n.Op {
 	case OAPPEND,
@@ -1742,23 +1740,23 @@
 		OREAL,
 		ORECOVER,
 		ORECV:
-		return 1
+		return true
 	}
 
-	if hascallchan(n.Left) != 0 || hascallchan(n.Right) != 0 {
-		return 1
+	if hascallchan(n.Left) || hascallchan(n.Right) {
+		return true
 	}
 
 	for l = n.List; l != nil; l = l.Next {
-		if hascallchan(l.N) != 0 {
-			return 1
+		if hascallchan(l.N) {
+			return true
 		}
 	}
 	for l = n.Rlist; l != nil; l = l.Next {
-		if hascallchan(l.N) != 0 {
-			return 1
+		if hascallchan(l.N) {
+			return true
 		}
 	}
 
-	return 0
+	return false
 }
diff --git a/src/cmd/internal/gc/cplx.go b/src/cmd/internal/gc/cplx.go
index bc5b547..34decd1 100644
--- a/src/cmd/internal/gc/cplx.go
+++ b/src/cmd/internal/gc/cplx.go
@@ -10,12 +10,12 @@
 	return a<<16 | b
 }
 
-func overlap_cplx(f *Node, t *Node) int {
+func overlap_cplx(f *Node, t *Node) bool {
 	// check whether f and t could be overlapping stack references.
 	// not exact, because it's hard to check for the stack register
 	// in portable code.  close enough: worst case we will allocate
 	// an extra temporary and the registerizer will clean it up.
-	return bool2int(f.Op == OINDREG && t.Op == OINDREG && f.Xoffset+f.Type.Width >= t.Xoffset && t.Xoffset+t.Type.Width >= f.Xoffset)
+	return f.Op == OINDREG && t.Op == OINDREG && f.Xoffset+f.Type.Width >= t.Xoffset && t.Xoffset+t.Type.Width >= f.Xoffset
 }
 
 func Complexbool(op int, nl *Node, nr *Node, true_ bool, likely int, to *obj.Prog) {
@@ -31,20 +31,20 @@
 
 	// make both sides addable in ullman order
 	if nr != nil {
-		if nl.Ullman > nr.Ullman && !(nl.Addable != 0) {
+		if nl.Ullman > nr.Ullman && nl.Addable == 0 {
 			Tempname(&tnl, nl.Type)
 			Thearch.Cgen(nl, &tnl)
 			nl = &tnl
 		}
 
-		if !(nr.Addable != 0) {
+		if nr.Addable == 0 {
 			Tempname(&tnr, nr.Type)
 			Thearch.Cgen(nr, &tnr)
 			nr = &tnr
 		}
 	}
 
-	if !(nl.Addable != 0) {
+	if nl.Addable == 0 {
 		Tempname(&tnl, nl.Type)
 		Thearch.Cgen(nl, &tnl)
 		nl = &tnl
@@ -87,7 +87,7 @@
 	var tc int
 	var t *Type
 
-	if !(nc.Addable != 0) {
+	if nc.Addable == 0 {
 		Fatal("subnode not addable")
 	}
 
@@ -243,7 +243,7 @@
 	n.Val.Ctype = CTFLT
 	n.Type = t
 
-	if !(Isfloat[t.Etype] != 0) {
+	if Isfloat[t.Etype] == 0 {
 		Fatal("nodfconst: bad type %v", Tconv(t, 0))
 	}
 }
@@ -251,7 +251,7 @@
 /*
  * cplx.c
  */
-func Complexop(n *Node, res *Node) int {
+func Complexop(n *Node, res *Node) bool {
 	if n != nil && n.Type != nil {
 		if Iscomplex[n.Type.Etype] != 0 {
 			goto maybe
@@ -292,11 +292,11 @@
 
 	//dump("\ncomplex-no", n);
 no:
-	return 0
+	return false
 
 	//dump("\ncomplex-yes", n);
 yes:
-	return 1
+	return true
 }
 
 func Complexmove(f *Node, t *Node) {
@@ -313,7 +313,7 @@
 		Dump("complexmove-t", t)
 	}
 
-	if !(t.Addable != 0) {
+	if t.Addable == 0 {
 		Fatal("complexmove: to not addable")
 	}
 
@@ -322,7 +322,6 @@
 	switch uint32(ft)<<16 | uint32(tt) {
 	default:
 		Fatal("complexmove: unknown conversion: %v -> %v\n", Tconv(f.Type, 0), Tconv(t.Type, 0))
-		fallthrough
 
 		// complex to complex move/convert.
 	// make f addable.
@@ -331,7 +330,7 @@
 		TCOMPLEX64<<16 | TCOMPLEX128,
 		TCOMPLEX128<<16 | TCOMPLEX64,
 		TCOMPLEX128<<16 | TCOMPLEX128:
-		if !(f.Addable != 0) || overlap_cplx(f, t) != 0 {
+		if f.Addable == 0 || overlap_cplx(f, t) {
 			Tempname(&tmp, f.Type)
 			Complexmove(f, &tmp)
 			f = &tmp
@@ -380,7 +379,7 @@
 	case OREAL,
 		OIMAG:
 		nl = n.Left
-		if !(nl.Addable != 0) {
+		if nl.Addable == 0 {
 			Tempname(&tmp, nl.Type)
 			Complexgen(nl, &tmp)
 			nl = &tmp
@@ -403,7 +402,7 @@
 	tr = Simsimtype(n.Type)
 	tr = cplxsubtype(tr)
 	if tl != tr {
-		if !(n.Addable != 0) {
+		if n.Addable == 0 {
 			Tempname(&n1, n.Type)
 			Complexmove(n, &n1)
 			n = &n1
@@ -413,7 +412,7 @@
 		return
 	}
 
-	if !(res.Addable != 0) {
+	if res.Addable == 0 {
 		Thearch.Igen(res, &n1, nil)
 		Thearch.Cgen(n, &n1)
 		Thearch.Regfree(&n1)
@@ -429,7 +428,6 @@
 	default:
 		Dump("complexgen: unknown op", n)
 		Fatal("complexgen: unknown op %v", Oconv(int(n.Op), 0))
-		fallthrough
 
 	case ODOT,
 		ODOTPTR,
@@ -464,20 +462,20 @@
 
 	// make both sides addable in ullman order
 	if nr != nil {
-		if nl.Ullman > nr.Ullman && !(nl.Addable != 0) {
+		if nl.Ullman > nr.Ullman && nl.Addable == 0 {
 			Tempname(&tnl, nl.Type)
 			Thearch.Cgen(nl, &tnl)
 			nl = &tnl
 		}
 
-		if !(nr.Addable != 0) {
+		if nr.Addable == 0 {
 			Tempname(&tnr, nr.Type)
 			Thearch.Cgen(nr, &tnr)
 			nr = &tnr
 		}
 	}
 
-	if !(nl.Addable != 0) {
+	if nl.Addable == 0 {
 		Tempname(&tnl, nl.Type)
 		Thearch.Cgen(nl, &tnl)
 		nl = &tnl
diff --git a/src/cmd/internal/gc/dcl.go b/src/cmd/internal/gc/dcl.go
index 0aeb587..577f7ec 100644
--- a/src/cmd/internal/gc/dcl.go
+++ b/src/cmd/internal/gc/dcl.go
@@ -10,17 +10,17 @@
 	"strings"
 )
 
-func dflag() int {
-	if !(Debug['d'] != 0) {
-		return 0
+func dflag() bool {
+	if Debug['d'] == 0 {
+		return false
 	}
 	if Debug['y'] != 0 {
-		return 1
+		return true
 	}
 	if incannedimport != 0 {
-		return 0
+		return false
 	}
-	return 1
+	return true
 }
 
 /*
@@ -49,7 +49,7 @@
 
 	d = push()
 	dcopy(d, s)
-	if dflag() != 0 {
+	if dflag() {
 		fmt.Printf("\t%v push %v %p\n", Ctxt.Line(int(lineno)), Sconv(s, 0), s.Def)
 	}
 	return d
@@ -71,7 +71,7 @@
 		lno = int(s.Lastlineno)
 		dcopy(s, d)
 		d.Lastlineno = int32(lno)
-		if dflag() != 0 {
+		if dflag() {
 			fmt.Printf("\t%v pop %v %p\n", Ctxt.Line(int(lineno)), Sconv(s, 0), s.Def)
 		}
 	}
@@ -195,7 +195,7 @@
 	s = n.Sym
 
 	// kludgy: typecheckok means we're past parsing.  Eg genwrapper may declare out of package names later.
-	if importpkg == nil && !(typecheckok != 0) && s.Pkg != localpkg {
+	if importpkg == nil && typecheckok == 0 && s.Pkg != localpkg {
 		Yyerror("cannot declare name %v", Sconv(s, 0))
 	}
 
@@ -206,7 +206,7 @@
 	gen = 0
 	if ctxt == PEXTERN {
 		externdcl = list(externdcl, n)
-		if dflag() != 0 {
+		if dflag() {
 			fmt.Printf("\t%v global decl %v %p\n", Ctxt.Line(int(lineno)), Sconv(s, 0), n)
 		}
 	} else {
@@ -264,14 +264,14 @@
  * new_name_list (type | [type] = expr_list)
  */
 func variter(vl *NodeList, t *Node, el *NodeList) *NodeList {
-	var doexpr int
+	var doexpr bool
 	var v *Node
 	var e *Node
 	var as2 *Node
 	var init *NodeList
 
 	init = nil
-	doexpr = bool2int(el != nil)
+	doexpr = el != nil
 
 	if count(el) == 1 && count(vl) > 1 {
 		e = el.N
@@ -293,7 +293,7 @@
 	}
 
 	for ; vl != nil; vl = vl.Next {
-		if doexpr != 0 {
+		if doexpr {
 			if el == nil {
 				Yyerror("missing expression in var declaration")
 				break
@@ -479,17 +479,17 @@
 /*
  * := declarations
  */
-func colasname(n *Node) int {
+func colasname(n *Node) bool {
 	switch n.Op {
 	case ONAME,
 		ONONAME,
 		OPACK,
 		OTYPE,
 		OLITERAL:
-		return bool2int(n.Sym != nil)
+		return n.Sym != nil
 	}
 
-	return 0
+	return false
 }
 
 func colasdefn(left *NodeList, defn *Node) {
@@ -511,7 +511,7 @@
 		if isblank(n) {
 			continue
 		}
-		if !(colasname(n) != 0) {
+		if !colasname(n) {
 			yyerrorl(int(defn.Lineno), "non-name %v on left side of :=", Nconv(n, 0))
 			nerr++
 			continue
@@ -735,7 +735,7 @@
 
 	if t.Thistuple != 0 {
 		for ft = getthisx(t).Type; ft != nil; ft = ft.Down {
-			if !(ft.Nname != nil) || !(ft.Nname.Sym != nil) {
+			if ft.Nname == nil || ft.Nname.Sym == nil {
 				continue
 			}
 			n = ft.Nname // no need for newname(ft->nname->sym)
@@ -746,7 +746,7 @@
 
 	if t.Intuple != 0 {
 		for ft = getinargx(t).Type; ft != nil; ft = ft.Down {
-			if !(ft.Nname != nil) || !(ft.Nname.Sym != nil) {
+			if ft.Nname == nil || ft.Nname.Sym == nil {
 				continue
 			}
 			n = ft.Nname
@@ -757,7 +757,7 @@
 
 	if t.Outtuple != 0 {
 		for ft = getoutargx(t).Type; ft != nil; ft = ft.Down {
-			if !(ft.Nname != nil) || !(ft.Nname.Sym != nil) {
+			if ft.Nname == nil || ft.Nname.Sym == nil {
 				continue
 			}
 			n = ft.Nname
@@ -925,7 +925,7 @@
 		tp = &f.Down
 	}
 
-	for f = t.Type; f != nil && !(t.Broke != 0); f = f.Down {
+	for f = t.Type; f != nil && t.Broke == 0; f = f.Down {
 		if f.Broke != 0 {
 			t.Broke = 1
 		}
@@ -934,7 +934,7 @@
 	uniqgen++
 	checkdupfields(t.Type, "field")
 
-	if !(t.Broke != 0) {
+	if t.Broke == 0 {
 		checkwidth(t)
 	}
 
@@ -962,7 +962,7 @@
 		tp = &f.Down
 	}
 
-	for f = t.Type; f != nil && !(t.Broke != 0); f = f.Down {
+	for f = t.Type; f != nil && t.Broke == 0; f = f.Down {
 		if f.Broke != 0 {
 			t.Broke = 1
 		}
@@ -1072,7 +1072,7 @@
 		}
 	}
 
-	for f = t.Type; f != nil && !(t.Broke != 0); f = f.Down {
+	for f = t.Type; f != nil && t.Broke == 0; f = f.Down {
 		if f.Broke != 0 {
 			t.Broke = 1
 		}
@@ -1199,7 +1199,7 @@
 		}
 		n = Nod(ODCLFIELD, n, t)
 		if n.Right != nil && n.Right.Op == ODDD {
-			if !(input != 0) {
+			if input == 0 {
 				Yyerror("cannot use ... in output argument list")
 			} else if l.Next != nil {
 				Yyerror("can only use ... as final argument in list")
@@ -1232,23 +1232,23 @@
  * *struct{} as the receiver.
  * (See fakethis above.)
  */
-func isifacemethod(f *Type) int {
+func isifacemethod(f *Type) bool {
 	var rcvr *Type
 	var t *Type
 
 	rcvr = getthisx(f).Type
 	if rcvr.Sym != nil {
-		return 0
+		return false
 	}
 	t = rcvr.Type
-	if !(Isptr[t.Etype] != 0) {
-		return 0
+	if Isptr[t.Etype] == 0 {
+		return false
 	}
 	t = t.Type
 	if t.Sym != nil || t.Etype != TSTRUCT || t.Type != nil {
-		return 0
+		return false
 	}
-	return 1
+	return true
 }
 
 /*
@@ -1480,7 +1480,7 @@
 		}
 	}
 
-	if local && !(pa.Local != 0) {
+	if local && pa.Local == 0 {
 		// defining method on non-local type.
 		Yyerror("cannot define new methods on non-local type %v", Tconv(pa, 0))
 
@@ -1506,7 +1506,7 @@
 	}
 
 	f = structfield(n)
-	f.Nointerface = uint8(bool2int(nointerface))
+	f.Nointerface = nointerface
 
 	// during import unexported method names should be in the type's package
 	if importpkg != nil && f.Sym != nil && !exportname(f.Sym.Name) && f.Sym.Pkg != structpkg {
diff --git a/src/cmd/internal/gc/esc.go b/src/cmd/internal/gc/esc.go
index 35543e1..f4d5b43 100644
--- a/src/cmd/internal/gc/esc.go
+++ b/src/cmd/internal/gc/esc.go
@@ -69,7 +69,7 @@
 
 func visit(n *Node) uint32 {
 	var min uint32
-	var recursive uint32
+	var recursive bool
 	var l *NodeList
 	var block *NodeList
 
@@ -95,7 +95,7 @@
 		// If visitcodelist found its way back to n->walkgen, then this
 		// block is a set of mutually recursive functions.
 		// Otherwise it's just a lone function that does not recurse.
-		recursive = uint32(bool2int(min == n.Walkgen))
+		recursive = min == n.Walkgen
 
 		// Remove connected component from stack.
 		// Mark walkgen so that future visits return a large number
@@ -110,7 +110,7 @@
 		l.Next = nil
 
 		// Run escape analysis on this set of functions.
-		analyze(block, int(recursive))
+		analyze(block, recursive)
 	}
 
 	return min
@@ -199,7 +199,7 @@
 	dstcount  int
 	edgecount int
 	noesc     *NodeList
-	recursive int
+	recursive bool
 }
 
 var tags [16]*Strlit
@@ -247,7 +247,7 @@
 	return EscReturn | em<<EscBits
 }
 
-func analyze(all *NodeList, recursive int) {
+func analyze(all *NodeList, recursive bool) {
 	var l *NodeList
 	var es EscState
 	var e *EscState
@@ -351,7 +351,7 @@
 	}
 
 	// in a mutually recursive group we lose track of the return values
-	if e.recursive != 0 {
+	if e.recursive {
 		for ll = Curfn.Dcl; ll != nil; ll = ll.Next {
 			if ll.N.Op == ONAME && ll.N.Class == PPARAMOUT {
 				escflows(e, &e.theSink, ll.N)
@@ -387,7 +387,7 @@
 
 	switch n.Op {
 	case OLABEL:
-		if !(n.Left != nil) || !(n.Left.Sym != nil) {
+		if n.Left == nil || n.Left.Sym == nil {
 			Fatal("esc:label without label: %v", Nconv(n, obj.FmtSign))
 		}
 
@@ -398,7 +398,7 @@
 		n.Left.Sym.Label = &nonlooping
 
 	case OGOTO:
-		if !(n.Left != nil) || !(n.Left.Sym != nil) {
+		if n.Left == nil || n.Left.Sym == nil {
 			Fatal("esc:goto without label: %v", Nconv(n, obj.FmtSign))
 		}
 
@@ -509,7 +509,7 @@
 
 		// Everything but fixed array is a dereference.
 	case ORANGE:
-		if Isfixedarray(n.Type) != 0 && n.List != nil && n.List.Next != nil {
+		if Isfixedarray(n.Type) && n.List != nil && n.List.Next != nil {
 			escassign(e, n.List.Next.N, n.Right)
 		}
 
@@ -639,7 +639,7 @@
 		escassign(e, &e.theSink, n.Left)
 
 	case OAPPEND:
-		if !(n.Isddd != 0) {
+		if n.Isddd == 0 {
 			for ll = n.List.Next; ll != nil; ll = ll.Next {
 				escassign(e, &e.theSink, ll.N) // lose track of assign to dereference
 			}
@@ -651,7 +651,7 @@
 		escassign(e, n, n.Left)
 
 	case OARRAYLIT:
-		if Isslice(n.Type) != 0 {
+		if Isslice(n.Type) {
 			n.Esc = EscNone // until proven otherwise
 			e.noesc = list(e.noesc, n)
 			n.Escloopdepth = e.loopdepth
@@ -708,7 +708,7 @@
 				continue
 			}
 			a = v.Closure
-			if !(v.Byval != 0) {
+			if v.Byval == 0 {
 				a = Nod(OADDR, a, nil)
 				a.Lineno = v.Lineno
 				a.Escloopdepth = e.loopdepth
@@ -805,7 +805,6 @@
 	default:
 		Dump("dst", dst)
 		Fatal("escassign: unexpected dst")
-		fallthrough
 
 	case OARRAYLIT,
 		OCLOSURE,
@@ -829,7 +828,7 @@
 		return
 
 	case OINDEX:
-		if Isfixedarray(dst.Left.Type) != 0 {
+		if Isfixedarray(dst.Left.Type) {
 			escassign(e, dst.Left, src)
 			return
 		}
@@ -914,7 +913,7 @@
 
 		// Index of array preserves input value.
 	case OINDEX:
-		if Isfixedarray(src.Left.Type) != 0 {
+		if Isfixedarray(src.Left.Type) {
 			escassign(e, dst, src.Left)
 		}
 
@@ -999,7 +998,6 @@
 	switch n.Op {
 	default:
 		Fatal("esccall")
-		fallthrough
 
 	case OCALLFUNC:
 		fn = n.Left
@@ -1044,7 +1042,7 @@
 
 		for lr = fn.Ntype.List; ll != nil && lr != nil; (func() { ll = ll.Next; lr = lr.Next })() {
 			src = ll.N
-			if lr.N.Isddd != 0 && !(n.Isddd != 0) {
+			if lr.N.Isddd != 0 && n.Isddd == 0 {
 				// Introduce ODDDARG node to represent ... allocation.
 				src = Nod(ODDDARG, nil, nil)
 
@@ -1110,7 +1108,7 @@
 
 	for t = getinargx(fntype).Type; ll != nil; ll = ll.Next {
 		src = ll.N
-		if t.Isddd != 0 && !(n.Isddd != 0) {
+		if t.Isddd != 0 && n.Isddd == 0 {
 			// Introduce ODDDARG node to represent ... allocation.
 			src = Nod(ODDDARG, nil, nil)
 
@@ -1243,7 +1241,7 @@
 
 func escwalk(e *EscState, level int, dst *Node, src *Node) {
 	var ll *NodeList
-	var leaks int
+	var leaks bool
 	var newlevel int
 
 	if src.Walkgen == walkgen && src.Esclevel <= int32(level) {
@@ -1292,11 +1290,11 @@
 	// The second clause is for values pointed at by an object passed to a call
 	// that returns something reached via indirect from the object.
 	// We don't know which result it is or how many indirects, so we treat it as leaking.
-	leaks = bool2int(level <= 0 && dst.Escloopdepth < src.Escloopdepth || level < 0 && dst == &e.funcParam && haspointers(src.Type))
+	leaks = level <= 0 && dst.Escloopdepth < src.Escloopdepth || level < 0 && dst == &e.funcParam && haspointers(src.Type)
 
 	switch src.Op {
 	case ONAME:
-		if src.Class == PPARAM && (leaks != 0 || dst.Escloopdepth < 0) && src.Esc != EscHeap {
+		if src.Class == PPARAM && (leaks || dst.Escloopdepth < 0) && src.Esc != EscHeap {
 			src.Esc = EscScope
 			if Debug['m'] != 0 {
 				Warnl(int(src.Lineno), "leaking param: %v", Nconv(src, obj.FmtShort))
@@ -1306,7 +1304,7 @@
 		// Treat a PPARAMREF closure variable as equivalent to the
 		// original variable.
 		if src.Class == PPARAMREF {
-			if leaks != 0 && Debug['m'] != 0 {
+			if leaks && Debug['m'] != 0 {
 				Warnl(int(src.Lineno), "leaking closure reference %v", Nconv(src, obj.FmtShort))
 			}
 			escwalk(e, level, dst, src.Closure)
@@ -1314,7 +1312,7 @@
 
 	case OPTRLIT,
 		OADDR:
-		if leaks != 0 {
+		if leaks {
 			src.Esc = EscHeap
 			addrescapes(src.Left)
 			if Debug['m'] != 0 {
@@ -1329,7 +1327,7 @@
 		escwalk(e, newlevel, dst, src.Left)
 
 	case OARRAYLIT:
-		if Isfixedarray(src.Type) != 0 {
+		if Isfixedarray(src.Type) {
 			break
 		}
 		fallthrough
@@ -1349,7 +1347,7 @@
 		OCLOSURE,
 		OCALLPART,
 		ORUNESTR:
-		if leaks != 0 {
+		if leaks {
 			src.Esc = EscHeap
 			if Debug['m'] != 0 {
 				Warnl(int(src.Lineno), "%v escapes to heap", Nconv(src, obj.FmtShort))
@@ -1365,7 +1363,7 @@
 		escwalk(e, level, dst, src.Left)
 
 	case OINDEX:
-		if Isfixedarray(src.Left.Type) != 0 {
+		if Isfixedarray(src.Left.Type) {
 			escwalk(e, level, dst, src.Left)
 			break
 		}
diff --git a/src/cmd/internal/gc/export.go b/src/cmd/internal/gc/export.go
index 5b34fe2..b0c5931 100644
--- a/src/cmd/internal/gc/export.go
+++ b/src/cmd/internal/gc/export.go
@@ -42,19 +42,19 @@
 	return unicode.IsUpper(r)
 }
 
-func initname(s string) int {
-	return bool2int(s == "init")
+func initname(s string) bool {
+	return s == "init"
 }
 
 // exportedsym reports whether a symbol will be visible
 // to files that import our package.
-func exportedsym(sym *Sym) int {
+func exportedsym(sym *Sym) bool {
 	// Builtins are visible everywhere.
 	if sym.Pkg == builtinpkg || sym.Origpkg == builtinpkg {
-		return 1
+		return true
 	}
 
-	return bool2int(sym.Pkg == localpkg && exportname(sym.Name))
+	return sym.Pkg == localpkg && exportname(sym.Name)
 }
 
 func autoexport(n *Node, ctxt int) {
@@ -69,10 +69,10 @@
 	}
 
 	// -A is for cmd/gc/mkbuiltin script, so export everything
-	if Debug['A'] != 0 || exportname(n.Sym.Name) || initname(n.Sym.Name) != 0 {
+	if Debug['A'] != 0 || exportname(n.Sym.Name) || initname(n.Sym.Name) {
 		exportsym(n)
 	}
-	if asmhdr != "" && n.Sym.Pkg == localpkg && !(n.Sym.Flags&SymAsm != 0) {
+	if asmhdr != "" && n.Sym.Pkg == localpkg && n.Sym.Flags&SymAsm == 0 {
 		n.Sym.Flags |= SymAsm
 		asmlist = list(asmlist, n)
 	}
@@ -86,7 +86,7 @@
 	}
 	p.Exported = 1
 	suffix = ""
-	if !(p.Direct != 0) {
+	if p.Direct == 0 {
 		suffix = " // indirect"
 	}
 	fmt.Fprintf(bout, "\timport %s \"%v\"%s\n", p.Name, Zconv(p.Path, 0), suffix)
@@ -102,7 +102,7 @@
 func reexportdep(n *Node) {
 	var t *Type
 
-	if !(n != nil) {
+	if n == nil {
 		return
 	}
 
@@ -118,14 +118,14 @@
 			}
 
 			// nodes for method calls.
-			if !(n.Type != nil) || n.Type.Thistuple > 0 {
+			if n.Type == nil || n.Type.Thistuple > 0 {
 				break
 			}
 			fallthrough
 
 			// fallthrough
 		case PEXTERN:
-			if n.Sym != nil && !(exportedsym(n.Sym) != 0) {
+			if n.Sym != nil && !exportedsym(n.Sym) {
 				if Debug['E'] != 0 {
 					fmt.Printf("reexport name %v\n", Sconv(n.Sym, 0))
 				}
@@ -141,7 +141,7 @@
 			if Isptr[t.Etype] != 0 {
 				t = t.Type
 			}
-			if t != nil && t.Sym != nil && t.Sym.Def != nil && !(exportedsym(t.Sym) != 0) {
+			if t != nil && t.Sym != nil && t.Sym.Def != nil && !exportedsym(t.Sym) {
 				if Debug['E'] != 0 {
 					fmt.Printf("reexport type %v from declaration\n", Sconv(t.Sym, 0))
 				}
@@ -155,7 +155,7 @@
 			if Isptr[t.Etype] != 0 {
 				t = t.Type
 			}
-			if t != nil && t.Sym != nil && t.Sym.Def != nil && !(exportedsym(t.Sym) != 0) {
+			if t != nil && t.Sym != nil && t.Sym.Def != nil && !exportedsym(t.Sym) {
 				if Debug['E'] != 0 {
 					fmt.Printf("reexport literal type %v\n", Sconv(t.Sym, 0))
 				}
@@ -166,7 +166,7 @@
 
 		// fallthrough
 	case OTYPE:
-		if n.Sym != nil && !(exportedsym(n.Sym) != 0) {
+		if n.Sym != nil && !exportedsym(n.Sym) {
 			if Debug['E'] != 0 {
 				fmt.Printf("reexport literal/type %v\n", Sconv(n.Sym, 0))
 			}
@@ -192,10 +192,10 @@
 		OMAKECHAN:
 		t = n.Type
 
-		if !(t.Sym != nil) && t.Type != nil {
+		if t.Sym == nil && t.Type != nil {
 			t = t.Type
 		}
-		if t != nil && t.Sym != nil && t.Sym.Def != nil && !(exportedsym(t.Sym) != 0) {
+		if t != nil && t.Sym != nil && t.Sym.Def != nil && !exportedsym(t.Sym) {
 			if Debug['E'] != 0 {
 				fmt.Printf("reexport type for expression %v\n", Sconv(t.Sym, 0))
 			}
@@ -227,7 +227,7 @@
 	t = n.Type // may or may not be specified
 	dumpexporttype(t)
 
-	if t != nil && !(isideal(t) != 0) {
+	if t != nil && !isideal(t) {
 		fmt.Fprintf(bout, "\tconst %v %v = %v\n", Sconv(s, obj.FmtSharp), Tconv(t, obj.FmtSharp), Vconv(&n.Val, obj.FmtSharp))
 	} else {
 		fmt.Fprintf(bout, "\tconst %v = %v\n", Sconv(s, obj.FmtSharp), Vconv(&n.Val, obj.FmtSharp))
@@ -329,7 +329,7 @@
 	fmt.Fprintf(bout, "\ttype %v %v\n", Sconv(t.Sym, obj.FmtSharp), Tconv(t, obj.FmtSharp|obj.FmtLong))
 	for i = 0; i < n; i++ {
 		f = m[i]
-		if f.Nointerface != 0 {
+		if f.Nointerface {
 			fmt.Fprintf(bout, "\t//go:nointerface\n")
 		}
 		if f.Type.Nname != nil && f.Type.Nname.Inl != nil { // nname was set by caninl
@@ -428,7 +428,7 @@
 
 	// mark the symbol so it is not reexported
 	if s.Def == nil {
-		if exportname(s.Name) || initname(s.Name) != 0 {
+		if exportname(s.Name) || initname(s.Name) {
 			s.Flags |= SymExport
 		} else {
 			s.Flags |= SymPackage // package scope
@@ -474,7 +474,7 @@
 		Yyerror("conflicting names %s and %s for package \"%v\"", p.Name, s.Name, Zconv(p.Path, 0))
 	}
 
-	if !(incannedimport != 0) && myimportpath != "" && z.S == myimportpath {
+	if incannedimport == 0 && myimportpath != "" && z.S == myimportpath {
 		Yyerror("import \"%v\": package depends on \"%v\" (import cycle)", Zconv(importpkg.Path, 0), Zconv(z, 0))
 		errorexit()
 	}
diff --git a/src/cmd/internal/gc/fmt.go b/src/cmd/internal/gc/fmt.go
index 08c08a4..3df4bc3 100644
--- a/src/cmd/internal/gc/fmt.go
+++ b/src/cmd/internal/gc/fmt.go
@@ -207,15 +207,15 @@
 
 	c = flag & obj.FmtShort
 
-	if !(c != 0) && n.Ullman != 0 {
+	if c == 0 && n.Ullman != 0 {
 		fp += fmt.Sprintf(" u(%d)", n.Ullman)
 	}
 
-	if !(c != 0) && n.Addable != 0 {
+	if c == 0 && n.Addable != 0 {
 		fp += fmt.Sprintf(" a(%d)", n.Addable)
 	}
 
-	if !(c != 0) && n.Vargen != 0 {
+	if c == 0 && n.Vargen != 0 {
 		fp += fmt.Sprintf(" g(%d)", n.Vargen)
 	}
 
@@ -223,7 +223,7 @@
 		fp += fmt.Sprintf(" l(%d)", n.Lineno)
 	}
 
-	if !(c != 0) && n.Xoffset != BADWIDTH {
+	if c == 0 && n.Xoffset != BADWIDTH {
 		fp += fmt.Sprintf(" x(%d%+d)", n.Xoffset, n.Stkdelta)
 	}
 
@@ -261,7 +261,7 @@
 		fp += fmt.Sprintf(" esc(no)")
 
 	case EscNever:
-		if !(c != 0) {
+		if c == 0 {
 			fp += fmt.Sprintf(" esc(N)")
 		}
 
@@ -273,11 +273,11 @@
 		fp += fmt.Sprintf(" ld(%d)", n.Escloopdepth)
 	}
 
-	if !(c != 0) && n.Typecheck != 0 {
+	if c == 0 && n.Typecheck != 0 {
 		fp += fmt.Sprintf(" tc(%d)", n.Typecheck)
 	}
 
-	if !(c != 0) && n.Dodata != 0 {
+	if c == 0 && n.Dodata != 0 {
 		fp += fmt.Sprintf(" dd(%d)", n.Dodata)
 	}
 
@@ -301,7 +301,7 @@
 		fp += fmt.Sprintf(" assigned")
 	}
 
-	if !(c != 0) && n.Used != 0 {
+	if c == 0 && n.Used != 0 {
 		fp += fmt.Sprintf(" used(%d)", n.Used)
 	}
 	return fp
@@ -497,7 +497,7 @@
 
 	var p string
 
-	if s.Pkg != nil && !(flag&obj.FmtShort != 0 /*untyped*/) {
+	if s.Pkg != nil && flag&obj.FmtShort == 0 /*untyped*/ {
 		switch fmtmode {
 		case FErr: // This is for the user
 			if s.Pkg == localpkg {
@@ -608,7 +608,7 @@
 	}
 
 	// Unless the 'l' flag was specified, if the type has a name, just print that name.
-	if !(flag&obj.FmtLong != 0 /*untyped*/) && t.Sym != nil && t.Etype != TFIELD && t != Types[t.Etype] {
+	if flag&obj.FmtLong == 0 /*untyped*/ && t.Sym != nil && t.Etype != TFIELD && t != Types[t.Etype] {
 		switch fmtmode {
 		case FTypeId:
 			if flag&obj.FmtShort != 0 /*untyped*/ {
@@ -802,7 +802,7 @@
 		return fp
 
 	case TFIELD:
-		if !(flag&obj.FmtShort != 0 /*untyped*/) {
+		if flag&obj.FmtShort == 0 /*untyped*/ {
 			s = t.Sym
 
 			// Take the name from the original, lest we substituted it with ~r%d or ~b%d.
@@ -822,7 +822,7 @@
 				}
 			}
 
-			if s != nil && !(t.Embedded != 0) {
+			if s != nil && t.Embedded == 0 {
 				if t.Funarg != 0 {
 					fp += fmt.Sprintf("%v ", Nconv(t.Nname, 0))
 				} else if flag&obj.FmtLong != 0 /*untyped*/ {
@@ -850,7 +850,7 @@
 			fp += fmt.Sprintf("%v", Tconv(t.Type, 0))
 		}
 
-		if !(flag&obj.FmtShort != 0 /*untyped*/) && t.Note != nil {
+		if flag&obj.FmtShort == 0 /*untyped*/ && t.Note != nil {
 			fp += fmt.Sprintf(" \"%v\"", Zconv(t.Note, 0))
 		}
 		return fp
@@ -882,23 +882,23 @@
 }
 
 // Statements which may be rendered with a simplestmt as init.
-func stmtwithinit(op int) int {
+func stmtwithinit(op int) bool {
 	switch op {
 	case OIF,
 		OFOR,
 		OSWITCH:
-		return 1
+		return true
 	}
 
-	return 0
+	return false
 }
 
 func stmtfmt(n *Node) string {
 	var f string
 
-	var complexinit int
-	var simpleinit int
-	var extrablock int
+	var complexinit bool
+	var simpleinit bool
+	var extrablock bool
 
 	// some statements allow for an init, but at most one,
 	// but we may have an arbitrary number added, eg by typecheck
@@ -906,19 +906,19 @@
 	// block starting with the init statements.
 
 	// if we can just say "for" n->ninit; ... then do so
-	simpleinit = bool2int(n.Ninit != nil && !(n.Ninit.Next != nil) && !(n.Ninit.N.Ninit != nil) && stmtwithinit(int(n.Op)) != 0)
+	simpleinit = n.Ninit != nil && n.Ninit.Next == nil && n.Ninit.N.Ninit == nil && stmtwithinit(int(n.Op))
 
 	// otherwise, print the inits as separate statements
-	complexinit = bool2int(n.Ninit != nil && !(simpleinit != 0) && (fmtmode != FErr))
+	complexinit = n.Ninit != nil && !simpleinit && (fmtmode != FErr)
 
 	// but if it was for if/for/switch, put in an extra surrounding block to limit the scope
-	extrablock = bool2int(complexinit != 0 && stmtwithinit(int(n.Op)) != 0)
+	extrablock = complexinit && stmtwithinit(int(n.Op))
 
-	if extrablock != 0 {
+	if extrablock {
 		f += "{"
 	}
 
-	if complexinit != 0 {
+	if complexinit {
 		f += fmt.Sprintf(" %v; ", Hconv(n.Ninit, 0))
 	}
 
@@ -951,7 +951,7 @@
 			break
 		}
 
-		if n.Colas != 0 && !(complexinit != 0) {
+		if n.Colas != 0 && !complexinit {
 			f += fmt.Sprintf("%v := %v", Nconv(n.Left, 0), Nconv(n.Right, 0))
 		} else {
 			f += fmt.Sprintf("%v = %v", Nconv(n.Left, 0), Nconv(n.Right, 0))
@@ -970,7 +970,7 @@
 		f += fmt.Sprintf("%v %v= %v", Nconv(n.Left, 0), Oconv(int(n.Etype), obj.FmtSharp), Nconv(n.Right, 0))
 
 	case OAS2:
-		if n.Colas != 0 && !(complexinit != 0) {
+		if n.Colas != 0 && !complexinit {
 			f += fmt.Sprintf("%v := %v", Hconv(n.List, obj.FmtComma), Hconv(n.Rlist, obj.FmtComma))
 			break
 		}
@@ -996,7 +996,7 @@
 		f += fmt.Sprintf("defer %v", Nconv(n.Left, 0))
 
 	case OIF:
-		if simpleinit != 0 {
+		if simpleinit {
 			f += fmt.Sprintf("if %v; %v { %v }", Nconv(n.Ninit.N, 0), Nconv(n.Ntest, 0), Hconv(n.Nbody, 0))
 		} else {
 			f += fmt.Sprintf("if %v { %v }", Nconv(n.Ntest, 0), Hconv(n.Nbody, 0))
@@ -1012,7 +1012,7 @@
 		}
 
 		f += "for"
-		if simpleinit != 0 {
+		if simpleinit {
 			f += fmt.Sprintf(" %v;", Nconv(n.Ninit.N, 0))
 		} else if n.Nincr != nil {
 			f += " ;"
@@ -1024,7 +1024,7 @@
 
 		if n.Nincr != nil {
 			f += fmt.Sprintf("; %v", Nconv(n.Nincr, 0))
-		} else if simpleinit != 0 {
+		} else if simpleinit {
 			f += ";"
 		}
 
@@ -1051,7 +1051,7 @@
 		}
 
 		f += fmt.Sprintf("%v", Oconv(int(n.Op), obj.FmtSharp))
-		if simpleinit != 0 {
+		if simpleinit {
 			f += fmt.Sprintf(" %v;", Nconv(n.Ninit.N, 0))
 		}
 		if n.Ntest != nil {
@@ -1087,7 +1087,7 @@
 	}
 
 ret:
-	if extrablock != 0 {
+	if extrablock {
 		f += "}"
 	}
 
@@ -1211,7 +1211,7 @@
 	var f string
 
 	var nprec int
-	var ptrlit int
+	var ptrlit bool
 	var l *NodeList
 
 	for n != nil && n.Implicit != 0 && (n.Op == OIND || n.Op == OADDR) {
@@ -1368,10 +1368,10 @@
 		return f
 
 	case OCOMPLIT:
-		ptrlit = bool2int(n.Right != nil && n.Right.Implicit != 0 && n.Right.Type != nil && Isptr[n.Right.Type.Etype] != 0)
+		ptrlit = n.Right != nil && n.Right.Implicit != 0 && n.Right.Type != nil && Isptr[n.Right.Type.Etype] != 0
 		if fmtmode == FErr {
-			if n.Right != nil && n.Right.Type != nil && !(n.Implicit != 0) {
-				if ptrlit != 0 {
+			if n.Right != nil && n.Right.Type != nil && n.Implicit == 0 {
+				if ptrlit {
 					f += fmt.Sprintf("&%v literal", Tconv(n.Right.Type.Type, 0))
 					return f
 				} else {
@@ -1384,7 +1384,7 @@
 			return f
 		}
 
-		if fmtmode == FExp && ptrlit != 0 {
+		if fmtmode == FExp && ptrlit {
 			// typecheck has overwritten OIND by OTYPE with pointer type.
 			f += fmt.Sprintf("(&%v{ %v })", Tconv(n.Right.Type.Type, 0), Hconv(n.List, obj.FmtComma))
 			return f
@@ -1418,7 +1418,7 @@
 				}
 			}
 
-			if !(n.Implicit != 0) {
+			if n.Implicit == 0 {
 				f += "})"
 				return f
 			}
@@ -1454,11 +1454,11 @@
 			}
 		}
 
-		if !(n.Left != nil) && n.Right != nil {
+		if n.Left == nil && n.Right != nil {
 			f += fmt.Sprintf(":%v", Nconv(n.Right, 0))
 			return f
 		}
-		if n.Left != nil && !(n.Right != nil) {
+		if n.Left != nil && n.Right == nil {
 			f += fmt.Sprintf("%v:", Nconv(n.Left, 0))
 			return f
 		}
@@ -1686,15 +1686,15 @@
 func nodedump(n *Node, flag int) string {
 	var fp string
 
-	var recur int
+	var recur bool
 
 	if n == nil {
 		return fp
 	}
 
-	recur = bool2int(!(flag&obj.FmtShort != 0 /*untyped*/))
+	recur = flag&obj.FmtShort == 0 /*untyped*/
 
-	if recur != 0 {
+	if recur {
 		fp = indent(fp)
 		if dumpdepth > 10 {
 			fp += "..."
@@ -1727,7 +1727,7 @@
 		} else {
 			fp += fmt.Sprintf("%v%v", Oconv(int(n.Op), 0), Jconv(n, 0))
 		}
-		if recur != 0 && n.Type == nil && n.Ntype != nil {
+		if recur && n.Type == nil && n.Ntype != nil {
 			fp = indent(fp)
 			fp += fmt.Sprintf("%v-ntype%v", Oconv(int(n.Op), 0), Nconv(n.Ntype, 0))
 		}
@@ -1737,7 +1737,7 @@
 
 	case OTYPE:
 		fp += fmt.Sprintf("%v %v%v type=%v", Oconv(int(n.Op), 0), Sconv(n.Sym, 0), Jconv(n, 0), Tconv(n.Type, 0))
-		if recur != 0 && n.Type == nil && n.Ntype != nil {
+		if recur && n.Type == nil && n.Ntype != nil {
 			fp = indent(fp)
 			fp += fmt.Sprintf("%v-ntype%v", Oconv(int(n.Op), 0), Nconv(n.Ntype, 0))
 		}
@@ -1751,7 +1751,7 @@
 		fp += fmt.Sprintf(" %v", Tconv(n.Type, 0))
 	}
 
-	if recur != 0 {
+	if recur {
 		if n.Left != nil {
 			fp += fmt.Sprintf("%v", Nconv(n.Left, 0))
 		}
diff --git a/src/cmd/internal/gc/gen.go b/src/cmd/internal/gc/gen.go
index a40a347..9d41b6f 100644
--- a/src/cmd/internal/gc/gen.go
+++ b/src/cmd/internal/gc/gen.go
@@ -109,7 +109,7 @@
 	// is always a heap pointer anyway.
 	case ODOT,
 		OINDEX:
-		if !(Isslice(n.Left.Type) != 0) {
+		if !Isslice(n.Left.Type) {
 			addrescapes(n.Left)
 		}
 	}
@@ -253,7 +253,6 @@
 	switch n.Left.Op {
 	default:
 		Fatal("cgen_proc: unknown call %v", Oconv(int(n.Left.Op), 0))
-		fallthrough
 
 	case OCALLMETH:
 		Cgen_callmeth(n.Left, proc)
@@ -280,7 +279,7 @@
 		Fatal("cgen_dcl")
 	}
 
-	if !(n.Class&PHEAP != 0) {
+	if n.Class&PHEAP == 0 {
 		return
 	}
 	if compiling_runtime != 0 {
@@ -304,7 +303,7 @@
 
 	switch nr.Op {
 	case ONAME:
-		if !(nr.Class&PHEAP != 0) && nr.Class != PEXTERN && nr.Class != PFUNC && nr.Class != PPARAMREF {
+		if nr.Class&PHEAP == 0 && nr.Class != PEXTERN && nr.Class != PFUNC && nr.Class != PPARAMREF {
 			gused(nr)
 		}
 
@@ -480,7 +479,7 @@
 		tmpcap = tmplen
 	}
 
-	if isnil(n.Left) != 0 {
+	if isnil(n.Left) {
 		Tempname(&src, n.Left.Type)
 		Thearch.Cgen(n.Left, &src)
 	} else {
@@ -491,7 +490,7 @@
 	}
 
 	if n.Op == OSLICEARR || n.Op == OSLICE3ARR {
-		if !(Isptr[n.Left.Type.Etype] != 0) {
+		if Isptr[n.Left.Type.Etype] == 0 {
 			Fatal("slicearr is supposed to work on pointer: %v\n", Nconv(n, obj.FmtSign))
 		}
 		Thearch.Cgen(&src, base)
@@ -668,13 +667,12 @@
 	var p2 *obj.Prog
 	var p3 *obj.Prog
 	var lab *Label
-	var wasregalloc int32
 
 	//dump("gen", n);
 
 	lno = setlineno(n)
 
-	wasregalloc = int32(Thearch.Anyregalloc())
+	wasregalloc := Thearch.Anyregalloc()
 
 	if n == nil {
 		goto ret
@@ -879,7 +877,7 @@
 		cgen_dcl(n.Left)
 
 	case OAS:
-		if gen_as_init(n) != 0 {
+		if gen_as_init(n) {
 			break
 		}
 		Cgen_as(n.Left, n.Right)
@@ -911,7 +909,7 @@
 	}
 
 ret:
-	if int32(Thearch.Anyregalloc()) != wasregalloc {
+	if Thearch.Anyregalloc() != wasregalloc {
 		Dump("node", n)
 		Fatal("registers left allocated")
 	}
@@ -936,7 +934,7 @@
 		return
 	}
 
-	if nr == nil || iszero(nr) != 0 {
+	if nr == nil || iszero(nr) {
 		// heaps should already be clear
 		if nr == nil && (nl.Class&PHEAP != 0) {
 			return
@@ -946,7 +944,7 @@
 		if tl == nil {
 			return
 		}
-		if Isfat(tl) != 0 {
+		if Isfat(tl) {
 			if nl.Op == ONAME {
 				Gvardef(nl)
 			}
@@ -1002,7 +1000,7 @@
 			continue
 		}
 
-		if lab.Use == nil && !(lab.Used != 0) {
+		if lab.Use == nil && lab.Used == 0 {
 			yyerrorl(int(lab.Def.Lineno), "label %v defined and not used", Sconv(lab.Sym, 0))
 			continue
 		}
diff --git a/src/cmd/internal/gc/go.go b/src/cmd/internal/gc/go.go
index ff1a429..16cc449 100644
--- a/src/cmd/internal/gc/go.go
+++ b/src/cmd/internal/gc/go.go
@@ -7,7 +7,6 @@
 import (
 	"bytes"
 	"cmd/internal/obj"
-	"encoding/binary"
 )
 
 // Copyright 2009 The Go Authors. All rights reserved.
@@ -162,7 +161,7 @@
 	Addable        uint8
 	Trecur         uint8
 	Etype          uint8
-	Bounded        uint8
+	Bounded        bool
 	Class          uint8
 	Method         uint8
 	Embedded       uint8
@@ -191,7 +190,7 @@
 	Likely         int8
 	Hasbreak       uint8
 	Needzero       uint8
-	Needctxt       uint8
+	Needctxt       bool
 	Esc            uint
 	Funcdepth      int
 	Type           *Type
@@ -245,7 +244,7 @@
 
 type Type struct {
 	Etype       uint8
-	Nointerface uint8
+	Nointerface bool
 	Noalg       uint8
 	Chan        uint8
 	Trecur      uint8
@@ -918,6 +917,10 @@
 
 var Use_sse int
 
+var hunk string
+
+var nhunk int32
+
 var thunk int32
 
 var Funcdepth int
@@ -1119,7 +1122,6 @@
 )
 
 type Arch struct {
-	ByteOrder      binary.ByteOrder
 	Thechar        int
 	Thestring      string
 	Thelinkarch    *obj.LinkArch
@@ -1127,7 +1129,7 @@
 	REGSP          int
 	REGCTXT        int
 	MAXWIDTH       int64
-	Anyregalloc    func() int
+	Anyregalloc    func() bool
 	Betypeinit     func()
 	Bgen           func(*Node, bool, int, *obj.Prog)
 	Cgen           func(*Node, *Node)
@@ -1148,10 +1150,10 @@
 	Proginfo       func(*ProgInfo, *obj.Prog)
 	Regalloc       func(*Node, *Type, *Node)
 	Regfree        func(*Node)
-	Regtyp         func(*obj.Addr) int
-	Sameaddr       func(*obj.Addr, *obj.Addr) int
-	Smallindir     func(*obj.Addr, *obj.Addr) int
-	Stackaddr      func(*obj.Addr) int
+	Regtyp         func(*obj.Addr) bool
+	Sameaddr       func(*obj.Addr, *obj.Addr) bool
+	Smallindir     func(*obj.Addr, *obj.Addr) bool
+	Stackaddr      func(*obj.Addr) bool
 	Excludedregs   func() uint64
 	RtoB           func(int) uint64
 	FtoB           func(int) uint64
diff --git a/src/cmd/internal/gc/gsubr.go b/src/cmd/internal/gc/gsubr.go
index 6762171..6fd6057 100644
--- a/src/cmd/internal/gc/gsubr.go
+++ b/src/cmd/internal/gc/gsubr.go
@@ -41,7 +41,7 @@
 /*
  * Is this node a memory operand?
  */
-func Ismem(n *Node) int {
+func Ismem(n *Node) bool {
 	switch n.Op {
 	case OITAB,
 		OSPTR,
@@ -51,29 +51,29 @@
 		ONAME,
 		OPARAM,
 		OCLOSUREVAR:
-		return 1
+		return true
 
 	case OADDR:
-		return bool2int(Thearch.Thechar == '6' || Thearch.Thechar == '9') // because 6g uses PC-relative addressing; TODO(rsc): not sure why 9g too
+		return Thearch.Thechar == '6' || Thearch.Thechar == '9' // because 6g uses PC-relative addressing; TODO(rsc): not sure why 9g too
 	}
 
-	return 0
+	return false
 }
 
-func Samereg(a *Node, b *Node) int {
+func Samereg(a *Node, b *Node) bool {
 	if a == nil || b == nil {
-		return 0
+		return false
 	}
 	if a.Op != OREGISTER {
-		return 0
+		return false
 	}
 	if b.Op != OREGISTER {
-		return 0
+		return false
 	}
 	if a.Val.U.Reg != b.Val.U.Reg {
-		return 0
+		return false
 	}
-	return 1
+	return true
 }
 
 /*
@@ -174,15 +174,15 @@
 
 	for lp = &p; ; {
 		p = *lp
-		if !(p != nil) {
+		if p == nil {
 			break
 		}
-		if p.As == obj.ATYPE && p.From.Node != nil && p.From.Name == obj.NAME_AUTO && !(((p.From.Node).(*Node)).Used != 0) {
+		if p.As == obj.ATYPE && p.From.Node != nil && p.From.Name == obj.NAME_AUTO && ((p.From.Node).(*Node)).Used == 0 {
 			*lp = p.Link
 			continue
 		}
 
-		if (p.As == obj.AVARDEF || p.As == obj.AVARKILL) && p.To.Node != nil && !(((p.To.Node).(*Node)).Used != 0) {
+		if (p.As == obj.AVARDEF || p.As == obj.AVARKILL) && p.To.Node != nil && ((p.To.Node).(*Node)).Used == 0 {
 			// Cannot remove VARDEF instruction, because - unlike TYPE handled above -
 			// VARDEFs are interspersed with other code, and a jump might be using the
 			// VARDEF as a target. Replace with a no-op instead. A later pass will remove
@@ -256,18 +256,18 @@
 	Thearch.Gins(obj.ANOP, n, nil) // used
 }
 
-func Isfat(t *Type) int {
+func Isfat(t *Type) bool {
 	if t != nil {
 		switch t.Etype {
 		case TSTRUCT,
 			TARRAY,
 			TSTRING,
 			TINTER: // maybe remove later
-			return 1
+			return true
 		}
 	}
 
-	return 0
+	return false
 }
 
 func markautoused(p *obj.Prog) {
@@ -289,7 +289,7 @@
 func Naddr(n *Node, a *obj.Addr, canemitcode int) {
 	var s *Sym
 
-	*a = obj.Zprog.From
+	*a = obj.Addr{}
 	if n == nil {
 		return
 	}
@@ -343,7 +343,7 @@
 		a.Node = n.Left.Orig
 
 	case OCLOSUREVAR:
-		if !(Curfn.Needctxt != 0) {
+		if !Curfn.Needctxt {
 			Fatal("closurevar without needctxt")
 		}
 		a.Type = obj.TYPE_MEM
@@ -383,7 +383,6 @@
 		switch n.Class {
 		default:
 			Fatal("naddr: ONAME class %v %d\n", Sconv(n.Sym, 0), n.Class)
-			fallthrough
 
 		case PEXTERN:
 			a.Name = obj.NAME_EXTERN
diff --git a/src/cmd/internal/gc/init.go b/src/cmd/internal/gc/init.go
index e738dba..2805f39 100644
--- a/src/cmd/internal/gc/init.go
+++ b/src/cmd/internal/gc/init.go
@@ -53,7 +53,7 @@
  *		return					(11)
  *	}
  */
-func anyinit(n *NodeList) int {
+func anyinit(n *NodeList) bool {
 	var h uint32
 	var s *Sym
 	var l *NodeList
@@ -68,20 +68,20 @@
 			break
 
 		case OAS:
-			if isblank(l.N.Left) && candiscard(l.N.Right) != 0 {
+			if isblank(l.N.Left) && candiscard(l.N.Right) {
 				break
 			}
 			fallthrough
 
 			// fall through
 		default:
-			return 1
+			return true
 		}
 	}
 
 	// is this main
 	if localpkg.Name == "main" {
-		return 1
+		return true
 	}
 
 	// is there an explicit init function
@@ -89,7 +89,7 @@
 
 	s = Lookup(namebuf)
 	if s.Def != nil {
-		return 1
+		return true
 	}
 
 	// are there any imported init functions
@@ -101,12 +101,12 @@
 			if s.Def == nil {
 				continue
 			}
-			return 1
+			return true
 		}
 	}
 
 	// then none
-	return 0
+	return false
 }
 
 func fninit(n *NodeList) {
@@ -126,7 +126,7 @@
 	}
 
 	n = initfix(n)
-	if !(anyinit(n) != 0) {
+	if !anyinit(n) {
 		return
 	}
 
diff --git a/src/cmd/internal/gc/inl.go b/src/cmd/internal/gc/inl.go
index 1b4b40d..73d6481 100644
--- a/src/cmd/internal/gc/inl.go
+++ b/src/cmd/internal/gc/inl.go
@@ -56,7 +56,7 @@
 		if Isptr[rcvr.Etype] != 0 {
 			rcvr = rcvr.Type
 		}
-		if !(rcvr.Sym != nil) {
+		if rcvr.Sym == nil {
 			Fatal("receiver with no sym: [%v] %v  (%v)", Sconv(fn.Sym, 0), Nconv(fn, obj.FmtLong), Tconv(rcvr, 0))
 		}
 		return rcvr.Sym.Pkg
@@ -114,7 +114,7 @@
 	if fn.Op != ODCLFUNC {
 		Fatal("caninl %v", Nconv(fn, 0))
 	}
-	if !(fn.Nname != nil) {
+	if fn.Nname == nil {
 		Fatal("caninl no nname %v", Nconv(fn, obj.FmtSign))
 	}
 
@@ -137,7 +137,7 @@
 	}
 
 	budget = 40 // allowed hairyness
-	if ishairylist(fn.Nbody, &budget) != 0 {
+	if ishairylist(fn.Nbody, &budget) {
 		return
 	}
 
@@ -162,18 +162,18 @@
 }
 
 // Look for anything we want to punt on.
-func ishairylist(ll *NodeList, budget *int) int {
+func ishairylist(ll *NodeList, budget *int) bool {
 	for ; ll != nil; ll = ll.Next {
-		if ishairy(ll.N, budget) != 0 {
-			return 1
+		if ishairy(ll.N, budget) {
+			return true
 		}
 	}
-	return 0
+	return false
 }
 
-func ishairy(n *Node, budget *int) int {
-	if !(n != nil) {
-		return 0
+func ishairy(n *Node, budget *int) bool {
+	if n == nil {
+		return false
 	}
 
 	// Things that are too hairy, irrespective of the budget
@@ -185,7 +185,7 @@
 		OPANIC,
 		ORECOVER:
 		if Debug['l'] < 4 {
-			return 1
+			return true
 		}
 
 	case OCLOSURE,
@@ -199,12 +199,12 @@
 		ODCLTYPE,  // can't print yet
 		ODCLCONST, // can't print yet
 		ORETJMP:
-		return 1
+		return true
 	}
 
 	(*budget)--
 
-	return bool2int(*budget < 0 || ishairy(n.Left, budget) != 0 || ishairy(n.Right, budget) != 0 || ishairylist(n.List, budget) != 0 || ishairylist(n.Rlist, budget) != 0 || ishairylist(n.Ninit, budget) != 0 || ishairy(n.Ntest, budget) != 0 || ishairy(n.Nincr, budget) != 0 || ishairylist(n.Nbody, budget) != 0 || ishairylist(n.Nelse, budget) != 0)
+	return *budget < 0 || ishairy(n.Left, budget) || ishairy(n.Right, budget) || ishairylist(n.List, budget) || ishairylist(n.Rlist, budget) || ishairylist(n.Ninit, budget) || ishairy(n.Ntest, budget) || ishairy(n.Nincr, budget) || ishairylist(n.Nbody, budget) || ishairylist(n.Nelse, budget)
 }
 
 // Inlcopy and inlcopylist recursively copy the body of a function.
@@ -506,7 +506,7 @@
 
 func tinlvar(t *Type) *Node {
 	if t.Nname != nil && !isblank(t.Nname) {
-		if !(t.Nname.Inlvar != nil) {
+		if t.Nname.Inlvar == nil {
 			Fatal("missing inlvar for %v\n", Nconv(t.Nname, 0))
 		}
 		return t.Nname.Inlvar
@@ -524,7 +524,7 @@
 // parameters.
 func mkinlcall1(np **Node, fn *Node, isddd int) {
 	var i int
-	var chkargcount int
+	var chkargcount bool
 	var n *Node
 	var call *Node
 	var saveinlfn *Node
@@ -535,7 +535,7 @@
 	var ninit *NodeList
 	var body *NodeList
 	var t *Type
-	var variadic int
+	var variadic bool
 	var varargcount int
 	var multiret int
 	var vararg *Node
@@ -623,10 +623,10 @@
 		// method call with a receiver.
 		t = getthisx(fn.Type).Type
 
-		if t != nil && t.Nname != nil && !isblank(t.Nname) && !(t.Nname.Inlvar != nil) {
+		if t != nil && t.Nname != nil && !isblank(t.Nname) && t.Nname.Inlvar == nil {
 			Fatal("missing inlvar for %v\n", Nconv(t.Nname, 0))
 		}
-		if !(n.Left.Left != nil) {
+		if n.Left.Left == nil {
 			Fatal("method call without receiver: %v", Nconv(n, obj.FmtSign))
 		}
 		if t == nil {
@@ -640,26 +640,26 @@
 	}
 
 	// check if inlined function is variadic.
-	variadic = 0
+	variadic = false
 
 	varargtype = nil
 	varargcount = 0
 	for t = fn.Type.Type.Down.Down.Type; t != nil; t = t.Down {
 		if t.Isddd != 0 {
-			variadic = 1
+			variadic = true
 			varargtype = t.Type
 		}
 	}
 
 	// but if argument is dotted too forget about variadicity.
-	if variadic != 0 && isddd != 0 {
-		variadic = 0
+	if variadic && isddd != 0 {
+		variadic = false
 	}
 
 	// check if argument is actually a returned tuple from call.
 	multiret = 0
 
-	if n.List != nil && !(n.List.Next != nil) {
+	if n.List != nil && n.List.Next == nil {
 		switch n.List.N.Op {
 		case OCALL,
 			OCALLFUNC,
@@ -671,7 +671,7 @@
 		}
 	}
 
-	if variadic != 0 {
+	if variadic {
 		varargcount = count(n.List) + multiret
 		if n.Left.Op != ODOTMETH {
 			varargcount -= fn.Type.Thistuple
@@ -688,14 +688,14 @@
 	// TODO: if len(nlist) == 1 but multiple args, check that n->list->n is a call?
 	if fn.Type.Thistuple != 0 && n.Left.Op != ODOTMETH {
 		// non-method call to method
-		if !(n.List != nil) {
+		if n.List == nil {
 			Fatal("non-method call to method without first arg: %v", Nconv(n, obj.FmtSign))
 		}
 
 		// append receiver inlvar to LHS.
 		t = getthisx(fn.Type).Type
 
-		if t != nil && t.Nname != nil && !isblank(t.Nname) && !(t.Nname.Inlvar != nil) {
+		if t != nil && t.Nname != nil && !isblank(t.Nname) && t.Nname.Inlvar == nil {
 			Fatal("missing inlvar for %v\n", Nconv(t.Nname, 0))
 		}
 		if t == nil {
@@ -706,14 +706,14 @@
 	}
 
 	// append ordinary arguments to LHS.
-	chkargcount = bool2int(n.List != nil && n.List.Next != nil)
+	chkargcount = n.List != nil && n.List.Next != nil
 
 	vararg = nil  // the slice argument to a variadic call
 	varargs = nil // the list of LHS names to put in vararg.
-	if !(chkargcount != 0) {
+	if !chkargcount {
 		// 0 or 1 expression on RHS.
 		for t = getinargx(fn.Type).Type; t != nil; t = t.Down {
-			if variadic != 0 && t.Isddd != 0 {
+			if variadic && t.Isddd != 0 {
 				vararg = tinlvar(t)
 				for i = 0; i < varargcount && ll != nil; i++ {
 					m = argvar(varargtype, i)
@@ -729,10 +729,10 @@
 	} else {
 		// match arguments except final variadic (unless the call is dotted itself)
 		for t = getinargx(fn.Type).Type; t != nil; {
-			if !(ll != nil) {
+			if ll == nil {
 				break
 			}
-			if variadic != 0 && t.Isddd != 0 {
+			if variadic && t.Isddd != 0 {
 				break
 			}
 			as.List = list(as.List, tinlvar(t))
@@ -741,7 +741,7 @@
 		}
 
 		// match varargcount arguments with variadic parameters.
-		if variadic != 0 && t != nil && t.Isddd != 0 {
+		if variadic && t != nil && t.Isddd != 0 {
 			vararg = tinlvar(t)
 			for i = 0; i < varargcount && ll != nil; i++ {
 				m = argvar(varargtype, i)
@@ -766,9 +766,9 @@
 	}
 
 	// turn the variadic args into a slice.
-	if variadic != 0 {
+	if variadic {
 		as = Nod(OAS, vararg, nil)
-		if !(varargcount != 0) {
+		if varargcount == 0 {
 			as.Right = nodnil()
 			as.Right.Type = varargtype
 		} else {
@@ -1019,7 +1019,7 @@
 }
 
 func setlno(n *Node, lno int) {
-	if !(n != nil) {
+	if n == nil {
 		return
 	}
 
diff --git a/src/cmd/internal/gc/lex.go b/src/cmd/internal/gc/lex.go
index 98cebe8..651ba5f 100644
--- a/src/cmd/internal/gc/lex.go
+++ b/src/cmd/internal/gc/lex.go
@@ -504,20 +504,20 @@
 	return i
 }
 
-func skiptopkgdef(b *obj.Biobuf) int {
+func skiptopkgdef(b *obj.Biobuf) bool {
 	var p string
 	var sz int
 
 	/* archive header */
 	p = obj.Brdline(b, '\n')
 	if p == "" {
-		return 0
+		return false
 	}
 	if obj.Blinelen(b) != 8 {
-		return 0
+		return false
 	}
 	if p != "!<arch>\n" {
-		return 0
+		return false
 	}
 
 	/* symbol table may be first; skip it */
@@ -533,9 +533,9 @@
 	sz = arsize(b, "__.PKGDEF")
 
 	if sz <= 0 {
-		return 0
+		return false
 	}
-	return 1
+	return true
 }
 
 func addidir(dir string) {
@@ -560,7 +560,7 @@
 		strings.HasPrefix(name.S, "../") || name.S == ".."
 }
 
-func findpkg(name *Strlit) int {
+func findpkg(name *Strlit) bool {
 	var p *Idir
 	var q string
 	var suffix string
@@ -568,7 +568,7 @@
 
 	if islocalname(name) {
 		if safemode != 0 || nolocalimports != 0 {
-			return 0
+			return false
 		}
 
 		// try .a before .6.  important for building libraries:
@@ -577,13 +577,13 @@
 		namebuf = fmt.Sprintf("%v.a", Zconv(name, 0))
 
 		if obj.Access(namebuf, 0) >= 0 {
-			return 1
+			return true
 		}
 		namebuf = fmt.Sprintf("%v.%c", Zconv(name, 0), Thearch.Thechar)
 		if obj.Access(namebuf, 0) >= 0 {
-			return 1
+			return true
 		}
-		return 0
+		return false
 	}
 
 	// local imports should be canonicalized already.
@@ -592,17 +592,17 @@
 	_ = q
 	if path.Clean(name.S) != name.S {
 		Yyerror("non-canonical import path %v (should be %s)", Zconv(name, 0), q)
-		return 0
+		return false
 	}
 
 	for p = idirs; p != nil; p = p.link {
 		namebuf = fmt.Sprintf("%s/%v.a", p.dir, Zconv(name, 0))
 		if obj.Access(namebuf, 0) >= 0 {
-			return 1
+			return true
 		}
 		namebuf = fmt.Sprintf("%s/%v.%c", p.dir, Zconv(name, 0), Thearch.Thechar)
 		if obj.Access(namebuf, 0) >= 0 {
-			return 1
+			return true
 		}
 	}
 
@@ -619,15 +619,15 @@
 
 		namebuf = fmt.Sprintf("%s/pkg/%s_%s%s%s/%v.a", goroot, goos, goarch, suffixsep, suffix, Zconv(name, 0))
 		if obj.Access(namebuf, 0) >= 0 {
-			return 1
+			return true
 		}
 		namebuf = fmt.Sprintf("%s/pkg/%s_%s%s%s/%v.%c", goroot, goos, goarch, suffixsep, suffix, Zconv(name, 0), Thearch.Thechar)
 		if obj.Access(namebuf, 0) >= 0 {
-			return 1
+			return true
 		}
 	}
 
-	return 0
+	return false
 }
 
 func fakeimport() {
@@ -714,7 +714,7 @@
 		}
 	}
 
-	if !(findpkg(path_) != 0) {
+	if !findpkg(path_) {
 		Yyerror("can't find import: \"%v\"", Zconv(f.U.Sval, 0))
 		errorexit()
 	}
@@ -748,7 +748,7 @@
 
 	n = len(namebuf)
 	if n > 2 && namebuf[n-2] == '.' && namebuf[n-1] == 'a' {
-		if !(skiptopkgdef(imp) != 0) {
+		if !skiptopkgdef(imp) {
 			Yyerror("import %s: not a package file", file)
 			errorexit()
 		}
@@ -946,7 +946,7 @@
 
 		for {
 
-			if escchar('"', &escflag, &v) != 0 {
+			if escchar('"', &escflag, &v) {
 				break
 			}
 			if v < utf8.RuneSelf || escflag != 0 {
@@ -988,12 +988,12 @@
 
 		/* '.' */
 	case '\'':
-		if escchar('\'', &escflag, &v) != 0 {
+		if escchar('\'', &escflag, &v) {
 			Yyerror("empty character literal or unescaped ' in character literal")
 			v = '\''
 		}
 
-		if !(escchar('\'', &escflag, &v) != 0) {
+		if !escchar('\'', &escflag, &v) {
 			Yyerror("missing '")
 			ungetc(int(v))
 		}
@@ -1629,7 +1629,7 @@
 	}
 
 	if verb == "go:linkname" {
-		if !(imported_unsafe != 0) {
+		if imported_unsafe == 0 {
 			Yyerror("//go:linkname only allowed in Go files that import \"unsafe\"")
 		}
 		f := strings.Fields(cmd)
@@ -1658,7 +1658,7 @@
 	}
 
 	if verb == "go:nowritebarrier" {
-		if !(compiling_runtime != 0) {
+		if compiling_runtime == 0 {
 			Yyerror("//go:nowritebarrier only allowed in runtime")
 		}
 		nowritebarrier = true
@@ -1961,7 +1961,7 @@
 	}
 }
 
-func escchar(e int, escflg *int, val *int64) int {
+func escchar(e int, escflg *int, val *int64) bool {
 	var i int
 	var u int
 	var c int
@@ -1973,21 +1973,21 @@
 	switch c {
 	case EOF:
 		Yyerror("eof in string")
-		return 1
+		return true
 
 	case '\n':
 		Yyerror("newline in string")
-		return 1
+		return true
 
 	case '\\':
 		break
 
 	default:
 		if c == e {
-			return 1
+			return true
 		}
 		*val = int64(c)
-		return 0
+		return false
 	}
 
 	u = 0
@@ -2043,7 +2043,7 @@
 	}
 
 	*val = int64(c)
-	return 0
+	return false
 
 hex:
 	l = 0
@@ -2075,7 +2075,7 @@
 	}
 
 	*val = l
-	return 0
+	return false
 
 oct:
 	l = int64(c) - '0'
@@ -2095,7 +2095,7 @@
 	}
 
 	*val = l
-	return 0
+	return false
 }
 
 var syms = []struct {
@@ -2530,12 +2530,12 @@
 	idealbool = typ(TBOOL)
 
 	s = Pkglookup("true", builtinpkg)
-	s.Def = Nodbool(1)
+	s.Def = Nodbool(true)
 	s.Def.Sym = Lookup("true")
 	s.Def.Type = idealbool
 
 	s = Pkglookup("false", builtinpkg)
-	s.Def = Nodbool(0)
+	s.Def = Nodbool(false)
 	s.Def.Sym = Lookup("false")
 	s.Def.Type = idealbool
 
@@ -2704,14 +2704,14 @@
 
 	s = Lookup("true")
 	if s.Def == nil {
-		s.Def = Nodbool(1)
+		s.Def = Nodbool(true)
 		s.Def.Sym = s
 		s.Origpkg = builtinpkg
 	}
 
 	s = Lookup("false")
 	if s.Def == nil {
-		s.Def = Nodbool(0)
+		s.Def = Nodbool(false)
 		s.Def.Sym = s
 		s.Origpkg = builtinpkg
 	}
@@ -3163,7 +3163,7 @@
 					// leave s->block set to cause redeclaration
 					// errors if a conflicting top-level name is
 					// introduced by a different file.
-					if !(s.Def.Used != 0) && !(nsyntaxerrors != 0) {
+					if s.Def.Used == 0 && nsyntaxerrors == 0 {
 						pkgnotused(int(s.Def.Lineno), s.Def.Pkg.Path, s.Name)
 					}
 					s.Def = nil
@@ -3173,7 +3173,7 @@
 				if s.Def.Sym != s {
 					// throw away top-level name left over
 					// from previous import . "x"
-					if s.Def.Pack != nil && !(s.Def.Pack.Used != 0) && !(nsyntaxerrors != 0) {
+					if s.Def.Pack != nil && s.Def.Pack.Used == 0 && nsyntaxerrors == 0 {
 						pkgnotused(int(s.Def.Pack.Lineno), s.Def.Pack.Pkg.Path, "")
 						s.Def.Pack.Used = 1
 					}
diff --git a/src/cmd/internal/gc/mparith2.go b/src/cmd/internal/gc/mparith2.go
index 415adf0..057585c 100644
--- a/src/cmd/internal/gc/mparith2.go
+++ b/src/cmd/internal/gc/mparith2.go
@@ -44,7 +44,7 @@
 	}
 
 	a.Ovf = uint8(c)
-	if a.Ovf != 0 && !(quiet != 0) {
+	if a.Ovf != 0 && quiet == 0 {
 		Yyerror("constant shift overflow")
 	}
 }
@@ -59,7 +59,7 @@
 	i = Mpprec - 1
 	if a.A[i] != 0 {
 		a.Ovf = 1
-		if !(quiet != 0) {
+		if quiet == 0 {
 			Yyerror("constant shift overflow")
 		}
 	}
@@ -223,7 +223,7 @@
 	}
 
 	a.Ovf = uint8(c)
-	if a.Ovf != 0 && !(quiet != 0) {
+	if a.Ovf != 0 && quiet == 0 {
 		Yyerror("constant addition overflow")
 	}
 
@@ -663,15 +663,15 @@
 	q.Neg = uint8(ns ^ ds)
 }
 
-func mpiszero(a *Mpint) int {
+func mpiszero(a *Mpint) bool {
 	var i int
 
 	for i = Mpprec - 1; i >= 0; i-- {
 		if a.A[i] != 0 {
-			return 0
+			return false
 		}
 	}
-	return 1
+	return true
 }
 
 func mpdivfract(a *Mpint, b *Mpint) {
@@ -694,7 +694,7 @@
 		for j = 0; j < Mpscale; j++ {
 			x <<= 1
 			if mpcmp(&d, &n) <= 0 {
-				if !(mpiszero(&d) != 0) {
+				if !mpiszero(&d) {
 					x |= 1
 				}
 				mpsubfixfix(&n, &d)
diff --git a/src/cmd/internal/gc/order.go b/src/cmd/internal/gc/order.go
index 6f18952..743ca80 100644
--- a/src/cmd/internal/gc/order.go
+++ b/src/cmd/internal/gc/order.go
@@ -62,13 +62,13 @@
 // Ordertemp allocates a new temporary with the given type,
 // pushes it onto the temp stack, and returns it.
 // If clear is true, ordertemp emits code to zero the temporary.
-func ordertemp(t *Type, order *Order, clear int) *Node {
+func ordertemp(t *Type, order *Order, clear bool) *Node {
 	var var_ *Node
 	var a *Node
 	var l *NodeList
 
 	var_ = temp(t)
-	if clear != 0 {
+	if clear {
 		a = Nod(OAS, var_, nil)
 		typecheck(&a, Etop)
 		order.out = list(order.out, a)
@@ -101,7 +101,7 @@
 	var a *Node
 	var var_ *Node
 
-	var_ = ordertemp(t, order, clear)
+	var_ = ordertemp(t, order, clear != 0)
 	a = Nod(OAS, var_, n)
 	typecheck(&a, Etop)
 	order.out = list(order.out, a)
@@ -135,10 +135,6 @@
 	var a *Node
 
 	switch n.Op {
-	default:
-		Fatal("ordersafeexpr %v", Oconv(int(n.Op), 0))
-		fallthrough
-
 	case ONAME,
 		OLITERAL:
 		return n
@@ -170,7 +166,7 @@
 
 	case OINDEX,
 		OINDEXMAP:
-		if Isfixedarray(n.Left.Type) != 0 {
+		if Isfixedarray(n.Left.Type) {
 			l = ordersafeexpr(n.Left, order)
 		} else {
 			l = ordercheapexpr(n.Left, order)
@@ -187,14 +183,17 @@
 		typecheck(&a, Erv)
 		return a
 	}
+
+	Fatal("ordersafeexpr %v", Oconv(int(n.Op), 0))
+	return nil // not reached
 }
 
 // Istemp reports whether n is a temporary variable.
-func istemp(n *Node) int {
+func istemp(n *Node) bool {
 	if n.Op != ONAME {
-		return 0
+		return false
 	}
-	return bool2int(strings.HasPrefix(n.Sym.Name, "autotmp_"))
+	return strings.HasPrefix(n.Sym.Name, "autotmp_")
 }
 
 // Isaddrokay reports whether it is okay to pass n's address to runtime routines.
@@ -203,8 +202,8 @@
 // of ordinary stack variables, those are not 'isaddrokay'. Temporaries are okay,
 // because we emit explicit VARKILL instructions marking the end of those
 // temporaries' lifetimes.
-func isaddrokay(n *Node) int {
-	return bool2int(islvalue(n) != 0 && (n.Op != ONAME || n.Class == PEXTERN || istemp(n) != 0))
+func isaddrokay(n *Node) bool {
+	return islvalue(n) && (n.Op != ONAME || n.Class == PEXTERN || istemp(n))
 }
 
 // Orderaddrtemp ensures that *np is okay to pass by address to runtime routines.
@@ -214,7 +213,7 @@
 	var n *Node
 
 	n = *np
-	if isaddrokay(n) != 0 {
+	if isaddrokay(n) {
 		return
 	}
 	*np = ordercopyexpr(n, n.Type, order, 0)
@@ -232,7 +231,7 @@
 
 	for {
 		l = order.temp
-		if !(l != mark) {
+		if l == mark {
 			break
 		}
 		order.temp = l.Next
@@ -330,19 +329,19 @@
 
 // Ismulticall reports whether the list l is f() for a multi-value function.
 // Such an f() could appear as the lone argument to a multi-arg function.
-func ismulticall(l *NodeList) int {
+func ismulticall(l *NodeList) bool {
 	var n *Node
 
 	// one arg only
 	if l == nil || l.Next != nil {
-		return 0
+		return false
 	}
 	n = l.N
 
 	// must be call
 	switch n.Op {
 	default:
-		return 0
+		return false
 
 	case OCALLFUNC,
 		OCALLMETH,
@@ -351,7 +350,7 @@
 	}
 
 	// call must return multiple values
-	return bool2int(n.Left.Type.Outtuple > 1)
+	return n.Left.Type.Outtuple > 1
 }
 
 // Copyret emits t1, t2, ... = n, where n is a function call,
@@ -364,7 +363,7 @@
 	var l2 *NodeList
 	var tl Iter
 
-	if n.Type.Etype != TSTRUCT || !(n.Type.Funarg != 0) {
+	if n.Type.Etype != TSTRUCT || n.Type.Funarg == 0 {
 		Fatal("copyret %v %d", Tconv(n.Type, 0), n.Left.Type.Outtuple)
 	}
 
@@ -387,7 +386,7 @@
 
 // Ordercallargs orders the list of call arguments *l.
 func ordercallargs(l **NodeList, order *Order) {
-	if ismulticall(*l) != 0 {
+	if ismulticall(*l) {
 		// return f() where f() is multiple values.
 		*l = copyret((*l).N, order)
 	} else {
@@ -435,15 +434,14 @@
 	switch n.Op {
 	default:
 		Fatal("ordermapassign %v", Oconv(int(n.Op), 0))
-		fallthrough
 
 	case OAS:
 		order.out = list(order.out, n)
 
 		// We call writebarrierfat only for values > 4 pointers long. See walk.c.
-		if (n.Left.Op == OINDEXMAP || (needwritebarrier(n.Left, n.Right) != 0 && n.Left.Type.Width > int64(4*Widthptr))) && !(isaddrokay(n.Right) != 0) {
+		if (n.Left.Op == OINDEXMAP || (needwritebarrier(n.Left, n.Right) && n.Left.Type.Width > int64(4*Widthptr))) && !isaddrokay(n.Right) {
 			m = n.Left
-			n.Left = ordertemp(m.Type, order, 0)
+			n.Left = ordertemp(m.Type, order, false)
 			a = Nod(OAS, m, n.Left)
 			typecheck(&a, Etop)
 			order.out = list(order.out, a)
@@ -457,13 +455,13 @@
 		for l = n.List; l != nil; l = l.Next {
 			if l.N.Op == OINDEXMAP {
 				m = l.N
-				if !(istemp(m.Left) != 0) {
+				if !istemp(m.Left) {
 					m.Left = ordercopyexpr(m.Left, m.Left.Type, order, 0)
 				}
-				if !(istemp(m.Right) != 0) {
+				if !istemp(m.Right) {
 					m.Right = ordercopyexpr(m.Right, m.Right.Type, order, 0)
 				}
-				l.N = ordertemp(m.Type, order, 0)
+				l.N = ordertemp(m.Type, order, false)
 				a = Nod(OAS, m, l.N)
 				typecheck(&a, Etop)
 				post = list(post, a)
@@ -501,7 +499,6 @@
 	switch n.Op {
 	default:
 		Fatal("orderstmt %v", Oconv(int(n.Op), 0))
-		fallthrough
 
 	case OVARKILL:
 		order.out = list(order.out, n)
@@ -593,7 +590,7 @@
 			order.out = list(order.out, n)
 		} else {
 			typ = n.Rlist.N.Type
-			tmp1 = ordertemp(typ, order, bool2int(haspointers(typ)))
+			tmp1 = ordertemp(typ, order, haspointers(typ))
 			order.out = list(order.out, n)
 			r = Nod(OAS, n.List.N, tmp1)
 			typecheck(&r, Etop)
@@ -611,11 +608,11 @@
 		orderexprlist(n.List, order)
 		orderexpr(&n.Rlist.N.Left, order) // arg to recv
 		ch = n.Rlist.N.Left.Type
-		tmp1 = ordertemp(ch.Type, order, bool2int(haspointers(ch.Type)))
+		tmp1 = ordertemp(ch.Type, order, haspointers(ch.Type))
 		if !isblank(n.List.Next.N) {
-			tmp2 = ordertemp(n.List.Next.N.Type, order, 0)
+			tmp2 = ordertemp(n.List.Next.N.Type, order, false)
 		} else {
-			tmp2 = ordertemp(Types[TBOOL], order, 0)
+			tmp2 = ordertemp(Types[TBOOL], order, false)
 		}
 		order.out = list(order.out, n)
 		r = Nod(OAS, n.List.N, tmp1)
@@ -724,7 +721,7 @@
 		t = marktemp(order)
 
 		orderexpr(&n.Left, order)
-		if !(Isinter(n.Left.Type) != 0) {
+		if !Isinter(n.Left.Type) {
 			orderaddrtemp(&n.Left, order)
 		}
 		order.out = list(order.out, n)
@@ -745,7 +742,6 @@
 		switch n.Type.Etype {
 		default:
 			Fatal("orderstmt range %v", Tconv(n.Type, 0))
-			fallthrough
 
 			// Mark []byte(str) range expression to reuse string backing storage.
 		// It is safe because the storage cannot be mutated.
@@ -784,7 +780,7 @@
 			n.Right = ordercopyexpr(r, r.Type, order, 0)
 
 			// n->alloc is the temp for the iterator.
-			n.Alloc = ordertemp(Types[TUINT8], order, 1)
+			n.Alloc = ordertemp(Types[TUINT8], order, true)
 		}
 
 		for l = n.List; l != nil; l = l.Next {
@@ -884,7 +880,7 @@
 							l.N.Ninit = list(l.N.Ninit, tmp2)
 						}
 
-						r.Left = ordertemp(r.Right.Left.Type.Type, order, bool2int(haspointers(r.Right.Left.Type.Type)))
+						r.Left = ordertemp(r.Right.Left.Type.Type, order, haspointers(r.Right.Left.Type.Type))
 						tmp2 = Nod(OAS, tmp1, r.Left)
 						typecheck(&tmp2, Etop)
 						l.N.Ninit = list(l.N.Ninit, tmp2)
@@ -901,7 +897,7 @@
 							l.N.Ninit = list(l.N.Ninit, tmp2)
 						}
 
-						r.Ntest = ordertemp(tmp1.Type, order, 0)
+						r.Ntest = ordertemp(tmp1.Type, order, false)
 						tmp2 = Nod(OAS, tmp1, r.Ntest)
 						typecheck(&tmp2, Etop)
 						l.N.Ninit = list(l.N.Ninit, tmp2)
@@ -919,11 +915,11 @@
 					// r->left is c, r->right is x, both are always evaluated.
 					orderexpr(&r.Left, order)
 
-					if !(istemp(r.Left) != 0) {
+					if !istemp(r.Left) {
 						r.Left = ordercopyexpr(r.Left, r.Left.Type, order, 0)
 					}
 					orderexpr(&r.Right, order)
-					if !(istemp(r.Right) != 0) {
+					if !istemp(r.Right) {
 						r.Right = ordercopyexpr(r.Right, r.Right.Type, order, 0)
 					}
 				}
@@ -1003,8 +999,8 @@
 	var l *NodeList
 	var t *Type
 	var lno int
-	var haslit int
-	var hasbyte int
+	var haslit bool
+	var hasbyte bool
 
 	n = *np
 	if n == nil {
@@ -1031,7 +1027,7 @@
 			t = typ(TARRAY)
 			t.Bound = int64(count(n.List))
 			t.Type = Types[TSTRING]
-			n.Alloc = ordertemp(t, order, 0)
+			n.Alloc = ordertemp(t, order, false)
 		}
 
 		// Mark string(byteSlice) arguments to reuse byteSlice backing
@@ -1041,15 +1037,15 @@
 		// Otherwise if all other arguments are empty strings,
 		// concatstrings will return the reference to the temp string
 		// to the caller.
-		hasbyte = 0
+		hasbyte = false
 
-		haslit = 0
+		haslit = false
 		for l = n.List; l != nil; l = l.Next {
-			hasbyte |= bool2int(l.N.Op == OARRAYBYTESTR)
-			haslit |= bool2int(l.N.Op == OLITERAL && len(l.N.Val.U.Sval.S) != 0)
+			hasbyte = hasbyte || l.N.Op == OARRAYBYTESTR
+			haslit = haslit || l.N.Op == OLITERAL && len(l.N.Val.U.Sval.S) != 0
 		}
 
-		if haslit != 0 && hasbyte != 0 {
+		if haslit && hasbyte {
 			for l = n.List; l != nil; l = l.Next {
 				if l.N.Op == OARRAYBYTESTR {
 					l.N.Op = OARRAYBYTESTRTMP
@@ -1103,7 +1099,7 @@
 	case OCONVIFACE:
 		orderexpr(&n.Left, order)
 
-		if !(Isinter(n.Left.Type) != 0) {
+		if !Isinter(n.Left.Type) {
 			orderaddrtemp(&n.Left, order)
 		}
 
@@ -1141,7 +1137,7 @@
 
 	case OCLOSURE:
 		if n.Noescape && n.Cvars != nil {
-			n.Alloc = ordertemp(Types[TUINT8], order, 0) // walk will fill in correct type
+			n.Alloc = ordertemp(Types[TUINT8], order, false) // walk will fill in correct type
 		}
 
 	case OARRAYLIT,
@@ -1151,7 +1147,7 @@
 		orderexprlist(n.List, order)
 		orderexprlist(n.Rlist, order)
 		if n.Noescape {
-			n.Alloc = ordertemp(Types[TUINT8], order, 0) // walk will fill in correct type
+			n.Alloc = ordertemp(Types[TUINT8], order, false) // walk will fill in correct type
 		}
 
 	case ODDDARG:
@@ -1160,7 +1156,7 @@
 			// Allocate a temporary that will be cleaned up when this statement
 			// completes. We could be more aggressive and try to arrange for it
 			// to be cleaned up when the call completes.
-			n.Alloc = ordertemp(n.Type.Type, order, 0)
+			n.Alloc = ordertemp(n.Type.Type, order, false)
 		}
 
 	case ORECV,
@@ -1173,7 +1169,7 @@
 		orderexpr(&n.Left, order)
 		orderexpr(&n.Right, order)
 		t = n.Left.Type
-		if t.Etype == TSTRUCT || Isfixedarray(t) != 0 {
+		if t.Etype == TSTRUCT || Isfixedarray(t) {
 			// for complex comparisons, we need both args to be
 			// addressable so we can pass them to the runtime.
 			orderaddrtemp(&n.Left, order)
diff --git a/src/cmd/internal/gc/pgen.go b/src/cmd/internal/gc/pgen.go
index 052d7dc..5d93ee6 100644
--- a/src/cmd/internal/gc/pgen.go
+++ b/src/cmd/internal/gc/pgen.go
@@ -272,7 +272,7 @@
 	ll = Curfn.Dcl
 
 	n = ll.N
-	if n.Class == PAUTO && n.Op == ONAME && !(n.Used != 0) {
+	if n.Class == PAUTO && n.Op == ONAME && n.Used == 0 {
 		// No locals used at all
 		Curfn.Dcl = nil
 
@@ -282,7 +282,7 @@
 
 	for ll = Curfn.Dcl; ll.Next != nil; ll = ll.Next {
 		n = ll.Next.N
-		if n.Class == PAUTO && n.Op == ONAME && !(n.Used != 0) {
+		if n.Class == PAUTO && n.Op == ONAME && n.Used == 0 {
 			ll.Next = nil
 			Curfn.Dcl.End = ll
 			break
@@ -360,12 +360,12 @@
 	}
 
 	// Ideally we wouldn't see any integer types here, but we do.
-	if n.Type == nil || (!(Isptr[n.Type.Etype] != 0) && !(Isint[n.Type.Etype] != 0) && n.Type.Etype != TUNSAFEPTR) {
+	if n.Type == nil || (Isptr[n.Type.Etype] == 0 && Isint[n.Type.Etype] == 0 && n.Type.Etype != TUNSAFEPTR) {
 		Dump("checknil", n)
 		Fatal("bad checknil")
 	}
 
-	if ((Thearch.Thechar == '5' || Thearch.Thechar == '9') && n.Op != OREGISTER) || !(n.Addable != 0) || n.Op == OLITERAL {
+	if ((Thearch.Thechar == '5' || Thearch.Thechar == '9') && n.Op != OREGISTER) || n.Addable == 0 || n.Op == OLITERAL {
 		Thearch.Regalloc(&reg, Types[Tptr], n)
 		Thearch.Cgen(n, &reg)
 		Thearch.Gins(obj.ACHECKNIL, &reg, nil)
@@ -478,7 +478,7 @@
 	if fn.Wrapper != 0 {
 		ptxt.From3.Offset |= obj.WRAPPER
 	}
-	if fn.Needctxt != 0 {
+	if fn.Needctxt {
 		ptxt.From3.Offset |= obj.NEEDCTXT
 	}
 	if fn.Nosplit {
@@ -557,7 +557,7 @@
 	Pc.Lineno = lineno
 
 	fixjmp(ptxt)
-	if !(Debug['N'] != 0) || Debug['R'] != 0 || Debug['P'] != 0 {
+	if Debug['N'] == 0 || Debug['R'] != 0 || Debug['P'] != 0 {
 		regopt(ptxt)
 		nilopt(ptxt)
 	}
diff --git a/src/cmd/internal/gc/plive.go b/src/cmd/internal/gc/plive.go
index 04173fc..73f6086 100644
--- a/src/cmd/internal/gc/plive.go
+++ b/src/cmd/internal/gc/plive.go
@@ -183,18 +183,13 @@
 // are two criteria for termination.  If the end of basic block is reached a
 // value of zero is returned.  If the callback returns a non-zero value, the
 // iteration is stopped and the value of the callback is returned.
-func blockany(bb *BasicBlock, callback func(*obj.Prog) int) int {
-	var p *obj.Prog
-	var result int
-
-	for p = bb.last; p != nil; p = p.Opt.(*obj.Prog) {
-		result = callback(p)
-		if result != 0 {
-			return result
+func blockany(bb *BasicBlock, f func(*obj.Prog) bool) bool {
+	for p := bb.last; p != nil; p = p.Opt.(*obj.Prog) {
+		if f(p) {
+			return true
 		}
 	}
-
-	return 0
+	return false
 }
 
 // Collects and returns and array of Node*s for functions arguments and local
@@ -303,7 +298,7 @@
 
 var isselectcommcasecall_names [5]*obj.LSym
 
-func isselectcommcasecall(prog *obj.Prog) int {
+func isselectcommcasecall(prog *obj.Prog) bool {
 	var i int32
 
 	if isselectcommcasecall_names[0] == nil {
@@ -315,41 +310,41 @@
 
 	for i = 0; isselectcommcasecall_names[i] != nil; i++ {
 		if iscall(prog, isselectcommcasecall_names[i]) {
-			return 1
+			return true
 		}
 	}
-	return 0
+	return false
 }
 
 // Returns true for call instructions that target runtime·newselect.
 
 var isnewselect_sym *obj.LSym
 
-func isnewselect(prog *obj.Prog) int {
+func isnewselect(prog *obj.Prog) bool {
 	if isnewselect_sym == nil {
 		isnewselect_sym = Linksym(Pkglookup("newselect", Runtimepkg))
 	}
-	return bool2int(iscall(prog, isnewselect_sym))
+	return iscall(prog, isnewselect_sym)
 }
 
 // Returns true for call instructions that target runtime·selectgo.
 
 var isselectgocall_sym *obj.LSym
 
-func isselectgocall(prog *obj.Prog) int {
+func isselectgocall(prog *obj.Prog) bool {
 	if isselectgocall_sym == nil {
 		isselectgocall_sym = Linksym(Pkglookup("selectgo", Runtimepkg))
 	}
-	return bool2int(iscall(prog, isselectgocall_sym))
+	return iscall(prog, isselectgocall_sym)
 }
 
 var isdeferreturn_sym *obj.LSym
 
-func isdeferreturn(prog *obj.Prog) int {
+func isdeferreturn(prog *obj.Prog) bool {
 	if isdeferreturn_sym == nil {
 		isdeferreturn_sym = Linksym(Pkglookup("deferreturn", Runtimepkg))
 	}
-	return bool2int(iscall(prog, isdeferreturn_sym))
+	return iscall(prog, isdeferreturn_sym)
 }
 
 // Walk backwards from a runtime·selectgo call up to its immediately dominating
@@ -366,7 +361,7 @@
 			Fatal("selectgo does not have a newselect")
 		}
 		pred = pred.pred[0]
-		if blockany(pred, isselectcommcasecall) != 0 {
+		if blockany(pred, isselectcommcasecall) {
 			// A select comm case block should have exactly one
 			// successor.
 			if len(pred.succ) != 1 {
@@ -386,7 +381,7 @@
 			addedge(selectgo, succ)
 		}
 
-		if blockany(pred, isnewselect) != 0 {
+		if blockany(pred, isnewselect) {
 			// Reached the matching newselect.
 			break
 		}
@@ -451,7 +446,7 @@
 				p.Link.Opt = newblock(p.Link)
 				cfg = append(cfg, p.Link.Opt.(*BasicBlock))
 			}
-		} else if isselectcommcasecall(p) != 0 || isselectgocall(p) != 0 {
+		} else if isselectcommcasecall(p) || isselectgocall(p) {
 			// Accommodate implicit selectgo control flow.
 			if p.Link.Opt == nil {
 				p.Link.Opt = newblock(p.Link)
@@ -478,7 +473,7 @@
 			}
 
 			// Collect basic blocks with selectgo calls.
-			if isselectgocall(p) != 0 {
+			if isselectgocall(p) {
 				selectgo = append(selectgo, bb)
 			}
 		}
@@ -627,7 +622,7 @@
 			// non-tail-call return instructions; see note above
 			// the for loop for details.
 			case PPARAMOUT:
-				if !(node.Addrtaken != 0) && prog.To.Type == obj.TYPE_NONE {
+				if node.Addrtaken == 0 && prog.To.Type == obj.TYPE_NONE {
 					bvset(uevar, i)
 				}
 			}
@@ -674,7 +669,7 @@
 						bvset(uevar, pos)
 					}
 					if info.Flags&LeftWrite != 0 {
-						if from.Node != nil && !(Isfat(((from.Node).(*Node)).Type) != 0) {
+						if from.Node != nil && !Isfat(((from.Node).(*Node)).Type) {
 							bvset(varkill, pos)
 						}
 					}
@@ -718,7 +713,7 @@
 						bvset(uevar, pos)
 					}
 					if info.Flags&RightWrite != 0 {
-						if to.Node != nil && (!(Isfat(((to.Node).(*Node)).Type) != 0) || prog.As == obj.AVARDEF) {
+						if to.Node != nil && (!Isfat(((to.Node).(*Node)).Type) || prog.As == obj.AVARDEF) {
 							bvset(varkill, pos)
 						}
 					}
@@ -1050,7 +1045,7 @@
 		if t.Bound < -1 {
 			Fatal("twobitwalktype1: invalid bound, %v", Tconv(t, 0))
 		}
-		if Isslice(t) != 0 {
+		if Isslice(t) {
 			// struct { byte *array; uintgo len; uintgo cap; }
 			if *xoffset&int64(Widthptr-1) != 0 {
 				Fatal("twobitwalktype1: invalid TARRAY alignment, %v", Tconv(t, 0))
@@ -1101,7 +1096,7 @@
 
 	for i = 0; ; i++ {
 		i = int32(bvnext(liveout, i))
-		if !(i >= 0) {
+		if i < 0 {
 			break
 		}
 		node = vars[i]
@@ -1163,8 +1158,8 @@
 
 // Returns true for instructions that are safe points that must be annotated
 // with liveness information.
-func issafepoint(prog *obj.Prog) int {
-	return bool2int(prog.As == obj.ATEXT || prog.As == obj.ACALL)
+func issafepoint(prog *obj.Prog) bool {
+	return prog.As == obj.ATEXT || prog.As == obj.ACALL
 }
 
 // Initializes the sets for solving the live variables.  Visits all the
@@ -1332,7 +1327,7 @@
 
 // This function is slow but it is only used for generating debug prints.
 // Check whether n is marked live in args/locals.
-func islive(n *Node, args *Bvec, locals *Bvec) int {
+func islive(n *Node, args *Bvec, locals *Bvec) bool {
 	var i int
 
 	switch n.Class {
@@ -1340,19 +1335,19 @@
 		PPARAMOUT:
 		for i = 0; int64(i) < n.Type.Width/int64(Widthptr)*obj.BitsPerPointer; i++ {
 			if bvget(args, int32(n.Xoffset/int64(Widthptr)*obj.BitsPerPointer+int64(i))) != 0 {
-				return 1
+				return true
 			}
 		}
 
 	case PAUTO:
 		for i = 0; int64(i) < n.Type.Width/int64(Widthptr)*obj.BitsPerPointer; i++ {
 			if bvget(locals, int32((n.Xoffset+stkptrsize)/int64(Widthptr)*obj.BitsPerPointer+int64(i))) != 0 {
-				return 1
+				return true
 			}
 		}
 	}
 
-	return 0
+	return false
 }
 
 // Visits all instructions in a basic block and computes a bit vector of live
@@ -1427,21 +1422,21 @@
 			bvor(any, any, avarinit)
 			bvor(all, all, avarinit)
 
-			if issafepoint(p) != 0 {
+			if issafepoint(p) {
 				// Annotate ambiguously live variables so that they can
 				// be zeroed at function entry.
 				// livein and liveout are dead here and used as temporaries.
 				bvresetall(livein)
 
 				bvandnot(liveout, any, all)
-				if !(bvisempty(liveout) != 0) {
+				if !bvisempty(liveout) {
 					for pos = 0; pos < liveout.n; pos++ {
-						if !(bvget(liveout, pos) != 0) {
+						if bvget(liveout, pos) == 0 {
 							continue
 						}
 						bvset(all, pos) // silence future warnings in this block
 						n = lv.vars[pos]
-						if !(n.Needzero != 0) {
+						if n.Needzero == 0 {
 							n.Needzero = 1
 							if debuglive >= 1 {
 								Warnl(int(p.Lineno), "%v: %v is ambiguously live", Nconv(Curfn.Nname, 0), Nconv(n, obj.FmtLong))
@@ -1517,7 +1512,7 @@
 			bvcopy(liveout, livein)
 			bvandnot(livein, liveout, varkill)
 			bvor(livein, livein, uevar)
-			if debuglive >= 3 && issafepoint(p) != 0 {
+			if debuglive >= 3 && issafepoint(p) {
 				fmt.Printf("%v\n", p)
 				printvars("uevar", uevar, lv.vars)
 				printvars("varkill", varkill, lv.vars)
@@ -1525,7 +1520,7 @@
 				printvars("liveout", liveout, lv.vars)
 			}
 
-			if issafepoint(p) != 0 {
+			if issafepoint(p) {
 				// Found an interesting instruction, record the
 				// corresponding liveness information.
 
@@ -1534,7 +1529,7 @@
 				// input parameters.
 				if p.As == obj.ATEXT {
 					for j = 0; j < liveout.n; j++ {
-						if !(bvget(liveout, j) != 0) {
+						if bvget(liveout, j) == 0 {
 							continue
 						}
 						n = lv.vars[j]
@@ -1574,7 +1569,7 @@
 					numlive = 0
 					for j = 0; j < int32(len(lv.vars)); j++ {
 						n = lv.vars[j]
-						if islive(n, args, locals) != 0 {
+						if islive(n, args, locals) {
 							fmt_ += fmt.Sprintf(" %v", Nconv(n, 0))
 							numlive++
 						}
@@ -1592,7 +1587,7 @@
 				// Only CALL instructions need a PCDATA annotation.
 				// The TEXT instruction annotation is implicit.
 				if p.As == obj.ACALL {
-					if isdeferreturn(p) != 0 {
+					if isdeferreturn(p) {
 						// runtime.deferreturn modifies its return address to return
 						// back to the CALL, not to the subsequent instruction.
 						// Because the return comes back one instruction early,
@@ -1760,11 +1755,11 @@
 
 	started = 0
 	for i = 0; i < len(vars); i++ {
-		if !(bvget(bits, int32(i)) != 0) {
+		if bvget(bits, int32(i)) == 0 {
 			continue
 		}
-		if !(started != 0) {
-			if !(printed != 0) {
+		if started == 0 {
+			if printed == 0 {
 				fmt.Printf("\t")
 			} else {
 				fmt.Printf(" ")
@@ -1856,14 +1851,14 @@
 			if printed != 0 {
 				fmt.Printf("\n")
 			}
-			if issafepoint(p) != 0 {
+			if issafepoint(p) {
 				args = lv.argslivepointers[pcdata]
 				locals = lv.livepointers[pcdata]
 				fmt.Printf("\tlive=")
 				printed = 0
 				for j = 0; j < len(lv.vars); j++ {
 					n = lv.vars[j]
-					if islive(n, args, locals) != 0 {
+					if islive(n, args, locals) {
 						tmp9 := printed
 						printed++
 						if tmp9 != 0 {
diff --git a/src/cmd/internal/gc/popt.go b/src/cmd/internal/gc/popt.go
index 6d69120..8a3601b 100644
--- a/src/cmd/internal/gc/popt.go
+++ b/src/cmd/internal/gc/popt.go
@@ -179,7 +179,7 @@
 
 var noreturn_symlist [10]*Sym
 
-func Noreturn(p *obj.Prog) int {
+func Noreturn(p *obj.Prog) bool {
 	var s *Sym
 	var i int
 
@@ -195,18 +195,18 @@
 	}
 
 	if p.To.Node == nil {
-		return 0
+		return false
 	}
 	s = ((p.To.Node).(*Node)).Sym
 	if s == nil {
-		return 0
+		return false
 	}
 	for i = 0; noreturn_symlist[i] != nil; i++ {
 		if s == noreturn_symlist[i] {
-			return 1
+			return true
 		}
 	}
-	return 0
+	return false
 }
 
 // JMP chasing and removal.
@@ -325,7 +325,7 @@
 
 	// pass 4: elide JMP to next instruction.
 	// only safe if there are no jumps to JMPs anymore.
-	if !(jmploop != 0) {
+	if jmploop == 0 {
 		last = nil
 		for p = firstp; p != nil; p = p.Link {
 			if p.As == obj.AJMP && p.To.Type == obj.TYPE_BRANCH && p.To.U.Branch == p.Link {
@@ -434,7 +434,7 @@
 	for f = start; f != nil; f = f.Link {
 		p = f.Prog
 		Thearch.Proginfo(&info, p)
-		if !(info.Flags&Break != 0) {
+		if info.Flags&Break == 0 {
 			f1 = f.Link
 			f.S1 = f1
 			f1.P1 = f
@@ -492,11 +492,11 @@
 
 	r.Rpo = 1
 	r1 = r.S1
-	if r1 != nil && !(r1.Rpo != 0) {
+	if r1 != nil && r1.Rpo == 0 {
 		n = postorder(r1, rpo2r, n)
 	}
 	r1 = r.S2
-	if r1 != nil && !(r1.Rpo != 0) {
+	if r1 != nil && r1.Rpo == 0 {
 		n = postorder(r1, rpo2r, n)
 	}
 	rpo2r[n] = r
@@ -529,26 +529,26 @@
 	return rpo1
 }
 
-func doms(idom []int32, r int32, s int32) int {
+func doms(idom []int32, r int32, s int32) bool {
 	for s > r {
 		s = idom[s]
 	}
-	return bool2int(s == r)
+	return s == r
 }
 
-func loophead(idom []int32, r *Flow) int {
+func loophead(idom []int32, r *Flow) bool {
 	var src int32
 
 	src = r.Rpo
-	if r.P1 != nil && doms(idom, src, r.P1.Rpo) != 0 {
-		return 1
+	if r.P1 != nil && doms(idom, src, r.P1.Rpo) {
+		return true
 	}
 	for r = r.P2; r != nil; r = r.P2link {
-		if doms(idom, src, r.Rpo) != 0 {
-			return 1
+		if doms(idom, src, r.Rpo) {
+			return true
 		}
 	}
-	return 0
+	return false
 }
 
 func loopmark(rpo2r **Flow, head int32, r *Flow) {
@@ -620,7 +620,7 @@
 	for i = 0; i < nr; i++ {
 		r1 = rpo2r[i]
 		r1.Loop++
-		if r1.P2 != nil && loophead(idom, r1) != 0 {
+		if r1.P2 != nil && loophead(idom, r1) {
 			loopmark(&rpo2r[0], i, r1)
 		}
 	}
@@ -718,8 +718,8 @@
 }
 
 // Is n available for merging?
-func canmerge(n *Node) int {
-	return bool2int(n.Class == PAUTO && strings.HasPrefix(n.Sym.Name, "autotmp"))
+func canmerge(n *Node) bool {
+	return n.Class == PAUTO && strings.HasPrefix(n.Sym.Name, "autotmp")
 }
 
 func mergetemp(firstp *obj.Prog) {
@@ -757,7 +757,7 @@
 	// Build list of all mergeable variables.
 	nvar = 0
 	for l = Curfn.Dcl; l != nil; l = l.Next {
-		if canmerge(l.N) != 0 {
+		if canmerge(l.N) {
 			nvar++
 		}
 	}
@@ -766,7 +766,7 @@
 	nvar = 0
 	for l = Curfn.Dcl; l != nil; l = l.Next {
 		n = l.N
-		if canmerge(n) != 0 {
+		if canmerge(n) {
 			v = &var_[nvar]
 			nvar++
 			n.Opt = v
@@ -826,9 +826,9 @@
 		if f != nil && f.Data.(*Flow) == nil {
 			p = f.Prog
 			Thearch.Proginfo(&info, p)
-			if p.To.Node == v.node && (info.Flags&RightWrite != 0) && !(info.Flags&RightRead != 0) {
+			if p.To.Node == v.node && (info.Flags&RightWrite != 0) && info.Flags&RightRead == 0 {
 				p.As = obj.ANOP
-				p.To = obj.Zprog.To
+				p.To = obj.Addr{}
 				v.removed = 1
 				if debugmerge > 0 && Debug['v'] != 0 {
 					fmt.Printf("drop write-only %v\n", Sconv(v.node.Sym, 0))
@@ -851,7 +851,7 @@
 			const (
 				SizeAny = SizeB | SizeW | SizeL | SizeQ | SizeF | SizeD
 			)
-			if p.From.Node == v.node && p1.To.Node == v.node && (info.Flags&Move != 0) && !((info.Flags|info1.Flags)&(LeftAddr|RightAddr) != 0) && info.Flags&SizeAny == info1.Flags&SizeAny {
+			if p.From.Node == v.node && p1.To.Node == v.node && (info.Flags&Move != 0) && (info.Flags|info1.Flags)&(LeftAddr|RightAddr) == 0 && info.Flags&SizeAny == info1.Flags&SizeAny {
 				p1.From = p.From
 				Thearch.Excise(f)
 				v.removed = 1
@@ -1010,7 +1010,7 @@
 	// Delete merged nodes from declaration list.
 	for lp = &Curfn.Dcl; ; {
 		l = *lp
-		if !(l != nil) {
+		if l == nil {
 			break
 		}
 
@@ -1126,11 +1126,11 @@
 	nkill = 0
 	for f = g.Start; f != nil; f = f.Link {
 		p = f.Prog
-		if p.As != obj.ACHECKNIL || !(Thearch.Regtyp(&p.From) != 0) {
+		if p.As != obj.ACHECKNIL || !Thearch.Regtyp(&p.From) {
 			continue
 		}
 		ncheck++
-		if Thearch.Stackaddr(&p.From) != 0 {
+		if Thearch.Stackaddr(&p.From) {
 			if Debug_checknil != 0 && p.Lineno > 1 {
 				Warnl(int(p.Lineno), "removed nil check of SP address")
 			}
@@ -1177,13 +1177,13 @@
 	for f = fcheck; f != nil; f = Uniqp(f) {
 		p = f.Prog
 		Thearch.Proginfo(&info, p)
-		if (info.Flags&RightWrite != 0) && Thearch.Sameaddr(&p.To, &fcheck.Prog.From) != 0 {
+		if (info.Flags&RightWrite != 0) && Thearch.Sameaddr(&p.To, &fcheck.Prog.From) {
 			// Found initialization of value we're checking for nil.
 			// without first finding the check, so this one is unchecked.
 			return
 		}
 
-		if f != fcheck && p.As == obj.ACHECKNIL && Thearch.Sameaddr(&p.From, &fcheck.Prog.From) != 0 {
+		if f != fcheck && p.As == obj.ACHECKNIL && Thearch.Sameaddr(&p.From, &fcheck.Prog.From) {
 			fcheck.Data = &killed
 			return
 		}
@@ -1249,12 +1249,12 @@
 		p = f.Prog
 		Thearch.Proginfo(&info, p)
 
-		if (info.Flags&LeftRead != 0) && Thearch.Smallindir(&p.From, &fcheck.Prog.From) != 0 {
+		if (info.Flags&LeftRead != 0) && Thearch.Smallindir(&p.From, &fcheck.Prog.From) {
 			fcheck.Data = &killed
 			return
 		}
 
-		if (info.Flags&(RightRead|RightWrite) != 0) && Thearch.Smallindir(&p.To, &fcheck.Prog.From) != 0 {
+		if (info.Flags&(RightRead|RightWrite) != 0) && Thearch.Smallindir(&p.To, &fcheck.Prog.From) {
 			fcheck.Data = &killed
 			return
 		}
@@ -1265,12 +1265,12 @@
 		}
 
 		// Stop if value is lost.
-		if (info.Flags&RightWrite != 0) && Thearch.Sameaddr(&p.To, &fcheck.Prog.From) != 0 {
+		if (info.Flags&RightWrite != 0) && Thearch.Sameaddr(&p.To, &fcheck.Prog.From) {
 			return
 		}
 
 		// Stop if memory write.
-		if (info.Flags&RightWrite != 0) && !(Thearch.Regtyp(&p.To) != 0) {
+		if (info.Flags&RightWrite != 0) && !Thearch.Regtyp(&p.To) {
 			return
 		}
 
diff --git a/src/cmd/internal/gc/racewalk.go b/src/cmd/internal/gc/racewalk.go
index fe7a82c..582f6b4 100644
--- a/src/cmd/internal/gc/racewalk.go
+++ b/src/cmd/internal/gc/racewalk.go
@@ -30,27 +30,27 @@
 // Memory accesses in the packages are either uninteresting or will cause false positives.
 var noinst_pkgs = []string{"sync", "sync/atomic"}
 
-func ispkgin(pkgs []string) int {
+func ispkgin(pkgs []string) bool {
 	var i int
 
 	if myimportpath != "" {
 		for i = 0; i < len(pkgs); i++ {
 			if myimportpath == pkgs[i] {
-				return 1
+				return true
 			}
 		}
 	}
 
-	return 0
+	return false
 }
 
-func isforkfunc(fn *Node) int {
+func isforkfunc(fn *Node) bool {
 	// Special case for syscall.forkAndExecInChild.
 	// In the child, this function must not acquire any locks, because
 	// they might have been locked at the time of the fork.  This means
 	// no rescheduling, no malloc calls, and no new stack segments.
 	// Race instrumentation does all of the above.
-	return bool2int(myimportpath != "" && myimportpath == "syscall" && fn.Nname.Sym.Name == "forkAndExecInChild")
+	return myimportpath != "" && myimportpath == "syscall" && fn.Nname.Sym.Name == "forkAndExecInChild"
 }
 
 func racewalk(fn *Node) {
@@ -58,11 +58,11 @@
 	var nodpc *Node
 	var s string
 
-	if ispkgin(omit_pkgs) != 0 || isforkfunc(fn) != 0 {
+	if ispkgin(omit_pkgs) || isforkfunc(fn) {
 		return
 	}
 
-	if !(ispkgin(noinst_pkgs) != 0) {
+	if !ispkgin(noinst_pkgs) {
 		racewalklist(fn.Nbody, nil)
 
 		// nothing interesting for race detector in fn->enter
@@ -147,7 +147,6 @@
 	switch n.Op {
 	default:
 		Fatal("racewalk: unknown node type %v", Oconv(int(n.Op), 0))
-		fallthrough
 
 	case OAS,
 		OAS2FUNC:
@@ -263,7 +262,7 @@
 		OLEN,
 		OCAP:
 		racewalknode(&n.Left, init, 0, 0)
-		if Istype(n.Left.Type, TMAP) != 0 {
+		if Istype(n.Left.Type, TMAP) {
 			n1 = Nod(OCONVNOP, n.Left, nil)
 			n1.Type = Ptrto(Types[TUINT8])
 			n1 = Nod(OIND, n1, nil)
@@ -326,9 +325,9 @@
 		goto ret
 
 	case OINDEX:
-		if !(Isfixedarray(n.Left.Type) != 0) {
+		if !Isfixedarray(n.Left.Type) {
 			racewalknode(&n.Left, init, 0, 0)
-		} else if !(islvalue(n.Left) != 0) {
+		} else if !islvalue(n.Left) {
 			// index of unaddressable array, like Map[k][i].
 			racewalknode(&n.Left, init, wr, 0)
 
@@ -468,34 +467,34 @@
 	*np = n
 }
 
-func isartificial(n *Node) int {
+func isartificial(n *Node) bool {
 	// compiler-emitted artificial things that we do not want to instrument,
 	// cant' possibly participate in a data race.
 	if n.Op == ONAME && n.Sym != nil && n.Sym.Name != "" {
 		if n.Sym.Name == "_" {
-			return 1
+			return true
 		}
 
 		// autotmp's are always local
 		if strings.HasPrefix(n.Sym.Name, "autotmp_") {
-			return 1
+			return true
 		}
 
 		// statictmp's are read-only
 		if strings.HasPrefix(n.Sym.Name, "statictmp_") {
-			return 1
+			return true
 		}
 
 		// go.itab is accessed only by the compiler and runtime (assume safe)
 		if n.Sym.Pkg != nil && n.Sym.Pkg.Name != "" && n.Sym.Pkg.Name == "go.itab" {
-			return 1
+			return true
 		}
 	}
 
-	return 0
+	return false
 }
 
-func callinstr(np **Node, init **NodeList, wr int, skip int) int {
+func callinstr(np **Node, init **NodeList, wr int, skip int) bool {
 	var name string
 	var f *Node
 	var b *Node
@@ -510,18 +509,18 @@
 	//	  n, n->op, n->type ? n->type->etype : -1, n->class);
 
 	if skip != 0 || n.Type == nil || n.Type.Etype >= TIDEAL {
-		return 0
+		return false
 	}
 	t = n.Type
-	if isartificial(n) != 0 {
-		return 0
+	if isartificial(n) {
+		return false
 	}
 
 	b = outervalue(n)
 
 	// it skips e.g. stores to ... parameter array
-	if isartificial(b) != 0 {
-		return 0
+	if isartificial(b) {
+		return false
 	}
 	class = int(b.Class)
 
@@ -539,7 +538,7 @@
 
 		n = treecopy(n)
 		makeaddable(n)
-		if t.Etype == TSTRUCT || Isfixedarray(t) != 0 {
+		if t.Etype == TSTRUCT || Isfixedarray(t) {
 			name = "racereadrange"
 			if wr != 0 {
 				name = "racewriterange"
@@ -554,10 +553,10 @@
 		}
 
 		*init = list(*init, f)
-		return 1
+		return true
 	}
 
-	return 0
+	return false
 }
 
 // makeaddable returns a node whose memory location is the
@@ -572,7 +571,7 @@
 	// an addressable value.
 	switch n.Op {
 	case OINDEX:
-		if Isfixedarray(n.Left.Type) != 0 {
+		if Isfixedarray(n.Left.Type) {
 			makeaddable(n.Left)
 		}
 
@@ -596,7 +595,7 @@
 	var r *Node
 
 	r = Nod(OADDR, n, nil)
-	r.Bounded = 1
+	r.Bounded = true
 	r = conv(r, Types[TUNSAFEPTR])
 	r = conv(r, Types[TUINTPTR])
 	return r
diff --git a/src/cmd/internal/gc/range.go b/src/cmd/internal/gc/range.go
index 1e33da3..bb30bcf 100644
--- a/src/cmd/internal/gc/range.go
+++ b/src/cmd/internal/gc/range.go
@@ -43,7 +43,7 @@
 		}
 	}
 
-	if Isptr[t.Etype] != 0 && Isfixedarray(t.Type) != 0 {
+	if Isptr[t.Etype] != 0 && Isfixedarray(t.Type) {
 		t = t.Type
 	}
 	n.Type = t
@@ -63,7 +63,7 @@
 		t2 = t.Type
 
 	case TCHAN:
-		if !(t.Chan&Crecv != 0) {
+		if t.Chan&Crecv == 0 {
 			Yyerror("invalid operation: range %v (receive from send-only type %v)", Nconv(n.Right, 0), Tconv(n.Right.Type, 0))
 			goto out
 		}
@@ -184,7 +184,6 @@
 	switch t.Etype {
 	default:
 		Fatal("walkrange")
-		fallthrough
 
 		// Lower n into runtime·memclr if possible, for
 	// fast zeroing of slices and arrays (issue 5373).
@@ -196,8 +195,8 @@
 	//
 	// in which the evaluation of a is side-effect-free.
 	case TARRAY:
-		if !(Debug['N'] != 0) {
-			if !(flag_race != 0) {
+		if Debug['N'] == 0 {
+			if flag_race == 0 {
 				if v1 != nil {
 					if v2 == nil {
 						if n.Nbody != nil {
@@ -206,10 +205,10 @@
 									tmp = n.Nbody.N // first statement of body
 									if tmp.Op == OAS {
 										if tmp.Left.Op == OINDEX {
-											if samesafeexpr(tmp.Left.Left, a) != 0 {
-												if samesafeexpr(tmp.Left.Right, v1) != 0 {
+											if samesafeexpr(tmp.Left.Left, a) {
+												if samesafeexpr(tmp.Left.Right, v1) {
 													if t.Type.Width > 0 {
-														if iszero(tmp.Right) != 0 {
+														if iszero(tmp.Right) {
 															// Convert to
 															// if len(a) != 0 {
 															// 	hp = &a[0]
@@ -227,7 +226,7 @@
 															hp = temp(Ptrto(Types[TUINT8]))
 
 															tmp = Nod(OINDEX, a, Nodintconst(0))
-															tmp.Bounded = 1
+															tmp.Bounded = true
 															tmp = Nod(OADDR, tmp, nil)
 															tmp = Nod(OCONVNOP, tmp, nil)
 															tmp.Type = Ptrto(Types[TUINT8])
@@ -282,7 +281,7 @@
 		if v2 != nil {
 			hp = temp(Ptrto(n.Type.Type))
 			tmp = Nod(OINDEX, ha, Nodintconst(0))
-			tmp.Bounded = 1
+			tmp.Bounded = true
 			init = list(init, Nod(OAS, hp, Nod(OADDR, tmp, nil)))
 		}
 
@@ -369,7 +368,7 @@
 		}
 		hb = temp(Types[TBOOL])
 
-		n.Ntest = Nod(ONE, hb, Nodbool(0))
+		n.Ntest = Nod(ONE, hb, Nodbool(false))
 		a = Nod(OAS2RECV, nil, nil)
 		a.Typecheck = 1
 		a.List = list(list1(hv1), hb)
diff --git a/src/cmd/internal/gc/reflect.go b/src/cmd/internal/gc/reflect.go
index 4be0f1d..ba9b75d 100644
--- a/src/cmd/internal/gc/reflect.go
+++ b/src/cmd/internal/gc/reflect.go
@@ -355,7 +355,7 @@
 	// type stored in interface word
 	it = t
 
-	if !(isdirectiface(it) != 0) {
+	if !isdirectiface(it) {
 		it = Ptrto(t)
 	}
 
@@ -370,10 +370,10 @@
 		if f.Type.Etype != TFUNC || f.Type.Thistuple == 0 {
 			Fatal("non-method on %v method %v %v\n", Tconv(mt, 0), Sconv(f.Sym, 0), Tconv(f, 0))
 		}
-		if !(getthisx(f.Type).Type != nil) {
+		if getthisx(f.Type).Type == nil {
 			Fatal("receiver with no type on %v method %v %v\n", Tconv(mt, 0), Sconv(f.Sym, 0), Tconv(f, 0))
 		}
-		if f.Nointerface != 0 {
+		if f.Nointerface {
 			continue
 		}
 
@@ -391,7 +391,7 @@
 		if Isptr[this.Etype] != 0 && this.Type == t {
 			continue
 		}
-		if Isptr[this.Etype] != 0 && !(Isptr[t.Etype] != 0) && f.Embedded != 2 && !(isifacemethod(f.Type) != 0) {
+		if Isptr[this.Etype] != 0 && Isptr[t.Etype] == 0 && f.Embedded != 2 && !isifacemethod(f.Type) {
 			continue
 		}
 
@@ -412,7 +412,7 @@
 		a.type_ = methodfunc(f.Type, t)
 		a.mtype = methodfunc(f.Type, nil)
 
-		if !(a.isym.Flags&SymSiggen != 0) {
+		if a.isym.Flags&SymSiggen == 0 {
 			a.isym.Flags |= SymSiggen
 			if !Eqtype(this, it) || this.Width < Types[Tptr].Width {
 				compiling_wrappers = 1
@@ -421,7 +421,7 @@
 			}
 		}
 
-		if !(a.tsym.Flags&SymSiggen != 0) {
+		if a.tsym.Flags&SymSiggen == 0 {
 			a.tsym.Flags |= SymSiggen
 			if !Eqtype(this, t) {
 				compiling_wrappers = 1
@@ -489,7 +489,7 @@
 		// code can refer to it.
 		isym = methodsym(method, t, 0)
 
-		if !(isym.Flags&SymSiggen != 0) {
+		if isym.Flags&SymSiggen == 0 {
 			isym.Flags |= SymSiggen
 			genwrapper(t, f, isym, 0)
 		}
@@ -649,7 +649,7 @@
 
 func haspointers(t *Type) bool {
 	var t1 *Type
-	var ret int
+	var ret bool
 
 	if t.Haspointers != 0 {
 		return t.Haspointers-1 != 0
@@ -672,26 +672,26 @@
 		TCOMPLEX64,
 		TCOMPLEX128,
 		TBOOL:
-		ret = 0
+		ret = false
 
 	case TARRAY:
 		if t.Bound < 0 { // slice
-			ret = 1
+			ret = true
 			break
 		}
 
 		if t.Bound == 0 { // empty array
-			ret = 0
+			ret = false
 			break
 		}
 
-		ret = bool2int(haspointers(t.Type))
+		ret = haspointers(t.Type)
 
 	case TSTRUCT:
-		ret = 0
+		ret = false
 		for t1 = t.Type; t1 != nil; t1 = t1.Down {
 			if haspointers(t1.Type) {
-				ret = 1
+				ret = true
 				break
 			}
 		}
@@ -706,11 +706,11 @@
 		TFUNC:
 		fallthrough
 	default:
-		ret = 1
+		ret = true
 	}
 
-	t.Haspointers = uint8(1 + ret)
-	return ret != 0
+	t.Haspointers = 1 + uint8(bool2int(ret))
+	return ret
 }
 
 /*
@@ -724,7 +724,7 @@
 	var i int
 	var alg int
 	var sizeofAlg int
-	var gcprog int
+	var gcprog bool
 	var sptr *Sym
 	var algsym *Sym
 	var zero *Sym
@@ -751,7 +751,7 @@
 		algsym = dalgsym(t)
 	}
 
-	if t.Sym != nil && !(Isptr[t.Etype] != 0) {
+	if t.Sym != nil && Isptr[t.Etype] == 0 {
 		sptr = dtypesym(Ptrto(t))
 	} else {
 		sptr = weaktypesym(Ptrto(t))
@@ -811,10 +811,10 @@
 	if !haspointers(t) {
 		i |= obj.KindNoPointers
 	}
-	if isdirectiface(t) != 0 {
+	if isdirectiface(t) {
 		i |= obj.KindDirectIface
 	}
-	if gcprog != 0 {
+	if gcprog {
 		i |= obj.KindGCProg
 	}
 	ot = duint8(s, ot, uint8(i)) // kind
@@ -825,7 +825,7 @@
 	}
 
 	// gc
-	if gcprog != 0 {
+	if gcprog {
 		gengcprog(t, &gcprog0, &gcprog1)
 		if gcprog0 != nil {
 			ot = dsymptr(s, ot, gcprog0, 0)
@@ -937,7 +937,7 @@
 	var s *Sym
 	var n *Node
 
-	if t == nil || (Isptr[t.Etype] != 0 && t.Type == nil) || isideal(t) != 0 {
+	if t == nil || (Isptr[t.Etype] != 0 && t.Type == nil) || isideal(t) {
 		Fatal("typename %v", Tconv(t, 0))
 	}
 	s = typesym(t)
@@ -987,7 +987,7 @@
  * Returns 1 if t has a reflexive equality operator.
  * That is, if x==x for all x of type t.
  */
-func isreflexive(t *Type) int {
+func isreflexive(t *Type) bool {
 	var t1 *Type
 	switch t.Etype {
 	case TBOOL,
@@ -1007,33 +1007,33 @@
 		TUNSAFEPTR,
 		TSTRING,
 		TCHAN:
-		return 1
+		return true
 
 	case TFLOAT32,
 		TFLOAT64,
 		TCOMPLEX64,
 		TCOMPLEX128,
 		TINTER:
-		return 0
+		return false
 
 	case TARRAY:
-		if Isslice(t) != 0 {
+		if Isslice(t) {
 			Fatal("slice can't be a map key: %v", Tconv(t, 0))
 		}
 		return isreflexive(t.Type)
 
 	case TSTRUCT:
 		for t1 = t.Type; t1 != nil; t1 = t1.Down {
-			if !(isreflexive(t1.Type) != 0) {
-				return 0
+			if !isreflexive(t1.Type) {
+				return false
 			}
 		}
 
-		return 1
+		return true
 
 	default:
 		Fatal("bad type for map key: %v", Tconv(t, 0))
-		return 0
+		return false
 	}
 }
 
@@ -1062,7 +1062,7 @@
 		t = Types[t.Etype]
 	}
 
-	if isideal(t) != 0 {
+	if isideal(t) {
 		Fatal("dtypesym %v", Tconv(t, 0))
 	}
 
@@ -1090,7 +1090,7 @@
 	}
 
 	// named types from other files are defined only by those files
-	if tbase.Sym != nil && !(tbase.Local != 0) {
+	if tbase.Sym != nil && tbase.Local == 0 {
 		return s
 	}
 	if isforw[tbase.Etype] != 0 {
@@ -1230,7 +1230,7 @@
 		}
 
 		ot = duint16(s, ot, uint16(mapbucket(t).Width))
-		ot = duint8(s, ot, uint8(isreflexive(t.Down)))
+		ot = duint8(s, ot, uint8(bool2int(isreflexive(t.Down))))
 
 	case TPTR32,
 		TPTR64:
@@ -1265,7 +1265,7 @@
 		ot = duintxx(s, ot, uint64(n), Widthint)
 		for t1 = t.Type; t1 != nil; t1 = t1.Down {
 			// ../../runtime/type.go:/structField
-			if t1.Sym != nil && !(t1.Embedded != 0) {
+			if t1.Sym != nil && t1.Embedded == 0 {
 				ot = dgostringptr(s, ot, t1.Sym.Name)
 				if exportname(t1.Sym.Name) {
 					ot = dgostringptr(s, ot, "")
@@ -1447,12 +1447,12 @@
 	return s
 }
 
-func usegcprog(t *Type) int {
+func usegcprog(t *Type) bool {
 	var size int64
 	var nptr int64
 
 	if !haspointers(t) {
-		return 0
+		return false
 	}
 	if t.Width == BADWIDTH {
 		dowidth(t)
@@ -1473,7 +1473,7 @@
 	// While large objects usually contain arrays; and even if it don't
 	// the program uses 2-bits per word while mask uses 4-bits per word,
 	// so the program is still smaller.
-	return bool2int(size > int64(2*Widthptr))
+	return size > int64(2*Widthptr)
 }
 
 // Generates sparse GC bitmask (4 bits per word).
@@ -1483,7 +1483,7 @@
 	var nptr int64
 	var i int64
 	var j int64
-	var half int
+	var half bool
 	var bits uint8
 	var pos []byte
 
@@ -1505,7 +1505,7 @@
 	pos = gcmask
 
 	nptr = (t.Width + int64(Widthptr) - 1) / int64(Widthptr)
-	half = 0
+	half = false
 
 	// If number of words is odd, repeat the mask.
 	// This makes simpler handling of arrays in runtime.
@@ -1520,12 +1520,12 @@
 				bits = obj.BitsScalar
 			}
 			bits <<= 2
-			if half != 0 {
+			if half {
 				bits <<= 4
 			}
 			pos[0] |= byte(bits)
-			half = bool2int(!(half != 0))
-			if !(half != 0) {
+			half = !half
+			if !half {
 				pos = pos[1:]
 			}
 		}
@@ -1699,7 +1699,7 @@
 		*xoffset += t.Width
 
 	case TARRAY:
-		if Isslice(t) != 0 {
+		if Isslice(t) {
 			proggendata(g, obj.BitsPointer)
 			proggendata(g, obj.BitsScalar)
 			proggendata(g, obj.BitsScalar)
diff --git a/src/cmd/internal/gc/reg.go b/src/cmd/internal/gc/reg.go
index 4cc9286..37a394c 100644
--- a/src/cmd/internal/gc/reg.go
+++ b/src/cmd/internal/gc/reg.go
@@ -74,7 +74,7 @@
 	var v *Var
 	var node *Node
 
-	for bany(&bit) != 0 {
+	for bany(&bit) {
 		// convert each bit to a variable
 		i = bnum(bit)
 
@@ -169,9 +169,9 @@
 	p1.From.Type = obj.TYPE_REG
 	p1.From.Reg = int16(rn)
 	p1.From.Name = obj.NAME_NONE
-	if !(f != 0) {
+	if f == 0 {
 		p1.From = *a
-		*a = obj.Zprog.From
+		*a = obj.Addr{}
 		a.Type = obj.TYPE_REG
 		a.Reg = int16(rn)
 	}
@@ -182,18 +182,18 @@
 	Ostats.Nspill++
 }
 
-func overlap_reg(o1 int64, w1 int, o2 int64, w2 int) int {
+func overlap_reg(o1 int64, w1 int, o2 int64, w2 int) bool {
 	var t1 int64
 	var t2 int64
 
 	t1 = o1 + int64(w1)
 	t2 = o2 + int64(w2)
 
-	if !(t1 > o2 && t2 > o1) {
-		return 0
+	if t1 <= o2 || t2 <= o1 {
+		return false
 	}
 
-	return 1
+	return true
 }
 
 func mkvar(f *Flow, a *obj.Addr) Bits {
@@ -292,7 +292,7 @@
 				if int(v.etype) == et {
 					if int64(v.width) == w {
 						// TODO(rsc): Remove special case for arm here.
-						if !(flag != 0) || Thearch.Thechar != '5' {
+						if flag == 0 || Thearch.Thechar != '5' {
 							return blsh(uint(i))
 						}
 					}
@@ -300,7 +300,7 @@
 			}
 
 			// if they overlap, disable both
-			if overlap_reg(v.offset, v.width, o, int(w)) != 0 {
+			if overlap_reg(v.offset, v.width, o, int(w)) {
 				//				print("disable overlap %s %d %d %d %d, %E != %E\n", s->name, v->offset, v->width, o, w, v->etype, et);
 				v.addr = 1
 
@@ -446,7 +446,7 @@
 
 		switch f1.Prog.As {
 		case obj.ACALL:
-			if Noreturn(f1.Prog) != 0 {
+			if Noreturn(f1.Prog) {
 				break
 			}
 
@@ -499,7 +499,7 @@
 					// This will set the bits at most twice, keeping the overall loop linear.
 					v1, _ = v.node.Opt.(*Var)
 
-					if v == v1 || !(btest(&cal, uint(v1.id)) != 0) {
+					if v == v1 || !btest(&cal, uint(v1.id)) {
 						for ; v1 != nil; v1 = v1.nextinnode {
 							biset(&cal, uint(v1.id))
 						}
@@ -633,7 +633,7 @@
 		return
 	}
 	for {
-		if !(r.refbehind.b[z]&bb != 0) {
+		if r.refbehind.b[z]&bb == 0 {
 			break
 		}
 		f1 = f.P1
@@ -641,7 +641,7 @@
 			break
 		}
 		r1 = f1.Data.(*Reg)
-		if !(r1.refahead.b[z]&bb != 0) {
+		if r1.refahead.b[z]&bb == 0 {
 			break
 		}
 		if r1.act.b[z]&bb != 0 {
@@ -679,7 +679,7 @@
 			}
 		}
 
-		if !(r.refahead.b[z]&bb != 0) {
+		if r.refahead.b[z]&bb == 0 {
 			break
 		}
 		f1 = f.S2
@@ -696,7 +696,7 @@
 		if r.act.b[z]&bb != 0 {
 			break
 		}
-		if !(r.refbehind.b[z]&bb != 0) {
+		if r.refbehind.b[z]&bb == 0 {
 			break
 		}
 	}
@@ -714,11 +714,11 @@
 	bb = 1 << uint(bn%64)
 	vreg = regbits
 	r = f.Data.(*Reg)
-	if !(r.act.b[z]&bb != 0) {
+	if r.act.b[z]&bb == 0 {
 		return vreg
 	}
 	for {
-		if !(r.refbehind.b[z]&bb != 0) {
+		if r.refbehind.b[z]&bb == 0 {
 			break
 		}
 		f1 = f.P1
@@ -726,10 +726,10 @@
 			break
 		}
 		r1 = f1.Data.(*Reg)
-		if !(r1.refahead.b[z]&bb != 0) {
+		if r1.refahead.b[z]&bb == 0 {
 			break
 		}
-		if !(r1.act.b[z]&bb != 0) {
+		if r1.act.b[z]&bb == 0 {
 			break
 		}
 		f = f1
@@ -753,7 +753,7 @@
 			}
 		}
 
-		if !(r.refahead.b[z]&bb != 0) {
+		if r.refahead.b[z]&bb == 0 {
 			break
 		}
 		f1 = f.S2
@@ -767,10 +767,10 @@
 			break
 		}
 		r = f.Data.(*Reg)
-		if !(r.act.b[z]&bb != 0) {
+		if r.act.b[z]&bb == 0 {
 			break
 		}
-		if !(r.refbehind.b[z]&bb != 0) {
+		if r.refbehind.b[z]&bb == 0 {
 			break
 		}
 	}
@@ -793,7 +793,7 @@
 		return
 	}
 	for {
-		if !(r.refbehind.b[z]&bb != 0) {
+		if r.refbehind.b[z]&bb == 0 {
 			break
 		}
 		f1 = f.P1
@@ -801,7 +801,7 @@
 			break
 		}
 		r1 = f1.Data.(*Reg)
-		if !(r1.refahead.b[z]&bb != 0) {
+		if r1.refahead.b[z]&bb == 0 {
 			break
 		}
 		if r1.act.b[z]&bb != 0 {
@@ -851,7 +851,7 @@
 			}
 		}
 
-		if !(r.refahead.b[z]&bb != 0) {
+		if r.refahead.b[z]&bb == 0 {
 			break
 		}
 		f1 = f.S2
@@ -868,7 +868,7 @@
 		if r.act.b[z]&bb != 0 {
 			break
 		}
-		if !(r.refbehind.b[z]&bb != 0) {
+		if r.refbehind.b[z]&bb == 0 {
 			break
 		}
 	}
@@ -896,33 +896,33 @@
 		for z = 0; z < BITS; z++ {
 			bit.b[z] = r.set.b[z] | r.use1.b[z] | r.use2.b[z] | r.refbehind.b[z] | r.refahead.b[z] | r.calbehind.b[z] | r.calahead.b[z] | r.regdiff.b[z] | r.act.b[z] | 0
 		}
-		if bany(&bit) != 0 {
+		if bany(&bit) {
 			fmt.Printf("\t")
-			if bany(&r.set) != 0 {
+			if bany(&r.set) {
 				fmt.Printf(" s:%v", Qconv(r.set, 0))
 			}
-			if bany(&r.use1) != 0 {
+			if bany(&r.use1) {
 				fmt.Printf(" u1:%v", Qconv(r.use1, 0))
 			}
-			if bany(&r.use2) != 0 {
+			if bany(&r.use2) {
 				fmt.Printf(" u2:%v", Qconv(r.use2, 0))
 			}
-			if bany(&r.refbehind) != 0 {
+			if bany(&r.refbehind) {
 				fmt.Printf(" rb:%v ", Qconv(r.refbehind, 0))
 			}
-			if bany(&r.refahead) != 0 {
+			if bany(&r.refahead) {
 				fmt.Printf(" ra:%v ", Qconv(r.refahead, 0))
 			}
-			if bany(&r.calbehind) != 0 {
+			if bany(&r.calbehind) {
 				fmt.Printf(" cb:%v ", Qconv(r.calbehind, 0))
 			}
-			if bany(&r.calahead) != 0 {
+			if bany(&r.calahead) {
 				fmt.Printf(" ca:%v ", Qconv(r.calahead, 0))
 			}
-			if bany(&r.regdiff) != 0 {
+			if bany(&r.regdiff) {
 				fmt.Printf(" d:%v ", Qconv(r.regdiff, 0))
 			}
-			if bany(&r.act) != 0 {
+			if bany(&r.act) {
 				fmt.Printf(" a:%v ", Qconv(r.act, 0))
 			}
 		}
@@ -1052,7 +1052,7 @@
 		r.set.b[0] |= info.Regset
 
 		bit = mkvar(f, &p.From)
-		if bany(&bit) != 0 {
+		if bany(&bit) {
 			if info.Flags&LeftAddr != 0 {
 				setaddrs(bit)
 			}
@@ -1080,7 +1080,7 @@
 		}
 
 		bit = mkvar(f, &p.To)
-		if bany(&bit) != 0 {
+		if bany(&bit) {
 			if info.Flags&RightAddr != 0 {
 				setaddrs(bit)
 			}
@@ -1143,7 +1143,7 @@
 
 	for f = firstf; f != nil; f = f.Link {
 		p = f.Prog
-		if p.As == obj.AVARDEF && Isfat(((p.To.Node).(*Node)).Type) != 0 && ((p.To.Node).(*Node)).Opt != nil {
+		if p.As == obj.AVARDEF && Isfat(((p.To.Node).(*Node)).Type) && ((p.To.Node).(*Node)).Opt != nil {
 			active++
 			walkvardef(p.To.Node.(*Node), f, active)
 		}
@@ -1172,7 +1172,7 @@
 
 	for f = firstf; f != nil; f = f1 {
 		f1 = f.Link
-		if f1 != nil && f1.Active != 0 && !(f.Active != 0) {
+		if f1 != nil && f1.Active != 0 && f.Active == 0 {
 			prop(f, zbits, zbits)
 			i = 1
 		}
@@ -1244,7 +1244,7 @@
 		for z = 0; z < BITS; z++ {
 			bit.b[z] = (r.refahead.b[z] | r.calahead.b[z]) &^ (externs.b[z] | params.b[z] | addrs.b[z] | consts.b[z])
 		}
-		if bany(&bit) != 0 && !(f.Refset != 0) {
+		if bany(&bit) && f.Refset == 0 {
 			// should never happen - all variables are preset
 			if Debug['w'] != 0 {
 				fmt.Printf("%v: used and not set: %v\n", f.Prog.Line(), Qconv(bit, 0))
@@ -1262,7 +1262,7 @@
 		for z = 0; z < BITS; z++ {
 			bit.b[z] = r.set.b[z] &^ (r.refahead.b[z] | r.calahead.b[z] | addrs.b[z])
 		}
-		if bany(&bit) != 0 && !(f.Refset != 0) {
+		if bany(&bit) && f.Refset == 0 {
 			if Debug['w'] != 0 {
 				fmt.Printf("%v: set and not used: %v\n", f.Prog.Line(), Qconv(bit, 0))
 			}
@@ -1273,7 +1273,7 @@
 		for z = 0; z < BITS; z++ {
 			bit.b[z] = LOAD(r, z) &^ (r.act.b[z] | addrs.b[z])
 		}
-		for bany(&bit) != 0 {
+		for bany(&bit) {
 			i = bnum(bit)
 			change = 0
 			paint1(f, i)
@@ -1354,7 +1354,7 @@
 	 * pass 7
 	 * peep-hole on basic block
 	 */
-	if !(Debug['R'] != 0) || Debug['P'] != 0 {
+	if Debug['R'] == 0 || Debug['P'] != 0 {
 		Thearch.Peep(firstp)
 	}
 
diff --git a/src/cmd/internal/gc/select.go b/src/cmd/internal/gc/select.go
index 9e659d1..ab7a144 100644
--- a/src/cmd/internal/gc/select.go
+++ b/src/cmd/internal/gc/select.go
@@ -134,7 +134,6 @@
 			switch n.Op {
 			default:
 				Fatal("select %v", Oconv(int(n.Op), 0))
-				fallthrough
 
 				// ok already
 			case OSEND:
@@ -232,7 +231,6 @@
 		switch n.Op {
 		default:
 			Fatal("select %v", Oconv(int(n.Op), 0))
-			fallthrough
 
 			// if selectnbsend(c, v) { body } else { default body }
 		case OSEND:
@@ -299,7 +297,6 @@
 			switch n.Op {
 			default:
 				Fatal("select %v", Oconv(int(n.Op), 0))
-				fallthrough
 
 				// selectsend(sel *byte, hchan *chan any, elem *any) (selected bool);
 			case OSEND:
diff --git a/src/cmd/internal/gc/sinit.go b/src/cmd/internal/gc/sinit.go
index 6d044f1..57422b4 100644
--- a/src/cmd/internal/gc/sinit.go
+++ b/src/cmd/internal/gc/sinit.go
@@ -155,7 +155,7 @@
 			if n.Defn.Left != n {
 				goto bad
 			}
-			if isblank(n.Defn.Left) && candiscard(n.Defn.Right) != 0 {
+			if isblank(n.Defn.Left) && candiscard(n.Defn.Right) {
 				n.Defn.Op = OEMPTY
 				n.Defn.Left = nil
 				n.Defn.Right = nil
@@ -166,7 +166,7 @@
 			if Debug['j'] != 0 {
 				fmt.Printf("%v\n", Sconv(n.Sym, 0))
 			}
-			if isblank(n) || !(staticinit(n, out) != 0) {
+			if isblank(n) || !staticinit(n, out) {
 				if Debug['%'] != 0 {
 					Dump("nonstatic", n.Defn)
 				}
@@ -275,7 +275,7 @@
  * compilation of top-level (static) assignments
  * into DATA statements if at all possible.
  */
-func staticinit(n *Node, out **NodeList) int {
+func staticinit(n *Node, out **NodeList) bool {
 	var l *Node
 	var r *Node
 
@@ -291,7 +291,7 @@
 
 // like staticassign but we are copying an already
 // initialized value r.
-func staticcopy(l *Node, r *Node, out **NodeList) int {
+func staticcopy(l *Node, r *Node, out **NodeList) bool {
 	var i int
 	var e *InitEntry
 	var p *InitPlan
@@ -302,37 +302,37 @@
 	var n1 Node
 
 	if r.Op != ONAME || r.Class != PEXTERN || r.Sym.Pkg != localpkg {
-		return 0
+		return false
 	}
 	if r.Defn == nil { // probably zeroed but perhaps supplied externally and of unknown value
-		return 0
+		return false
 	}
 	if r.Defn.Op != OAS {
-		return 0
+		return false
 	}
 	orig = r
 	r = r.Defn.Right
 
 	switch r.Op {
 	case ONAME:
-		if staticcopy(l, r, out) != 0 {
-			return 1
+		if staticcopy(l, r, out) {
+			return true
 		}
 		*out = list(*out, Nod(OAS, l, r))
-		return 1
+		return true
 
 	case OLITERAL:
-		if iszero(r) != 0 {
-			return 1
+		if iszero(r) {
+			return true
 		}
 		gdata(l, r, int(l.Type.Width))
-		return 1
+		return true
 
 	case OADDR:
 		switch r.Left.Op {
 		case ONAME:
 			gdata(l, r, int(l.Type.Width))
-			return 1
+			return true
 		}
 
 	case OPTRLIT:
@@ -347,11 +347,11 @@
 			OMAPLIT:
 			gdata(l, Nod(OADDR, r.Nname, nil), int(l.Type.Width))
 
-			return 1
+			return true
 		}
 
 	case OARRAYLIT:
-		if Isslice(r.Type) != 0 {
+		if Isslice(r.Type) {
 			// copy slice
 			a = r.Nname
 
@@ -362,7 +362,7 @@
 			gdata(&n1, r.Right, Widthint)
 			n1.Xoffset = l.Xoffset + int64(Array_cap)
 			gdata(&n1, r.Right, Widthint)
-			return 1
+			return true
 		}
 		fallthrough
 
@@ -381,7 +381,7 @@
 				ll = Nod(OXXX, nil, nil)
 				*ll = n1
 				ll.Orig = ll // completely separate copy
-				if !(staticassign(ll, e.Expr, out) != 0) {
+				if !staticassign(ll, e.Expr, out) {
 					// Requires computation, but we're
 					// copying someone else's computation.
 					rr = Nod(OXXX, nil, nil)
@@ -395,13 +395,13 @@
 			}
 		}
 
-		return 1
+		return true
 	}
 
-	return 0
+	return false
 }
 
-func staticassign(l *Node, r *Node, out **NodeList) int {
+func staticassign(l *Node, r *Node, out **NodeList) bool {
 	var a *Node
 	var n1 Node
 	var nam Node
@@ -422,18 +422,18 @@
 		}
 
 	case OLITERAL:
-		if iszero(r) != 0 {
-			return 1
+		if iszero(r) {
+			return true
 		}
 		gdata(l, r, int(l.Type.Width))
-		return 1
+		return true
 
 	case OADDR:
-		if stataddr(&nam, r.Left) != 0 {
+		if stataddr(&nam, r.Left) {
 			n1 = *r
 			n1.Left = &nam
 			gdata(l, &n1, int(l.Type.Width))
-			return 1
+			return true
 		}
 		fallthrough
 
@@ -453,22 +453,22 @@
 			gdata(l, Nod(OADDR, a, nil), int(l.Type.Width))
 
 			// Init underlying literal.
-			if !(staticassign(a, r.Left, out) != 0) {
+			if !staticassign(a, r.Left, out) {
 				*out = list(*out, Nod(OAS, a, r.Left))
 			}
-			return 1
+			return true
 		}
 
 	case OSTRARRAYBYTE:
 		if l.Class == PEXTERN && r.Left.Op == OLITERAL {
 			sval = r.Left.Val.U.Sval
 			slicebytes(l, sval.S, len(sval.S))
-			return 1
+			return true
 		}
 
 	case OARRAYLIT:
 		initplan(r)
-		if Isslice(r.Type) != 0 {
+		if Isslice(r.Type) {
 			// Init slice.
 			ta = typ(TARRAY)
 
@@ -505,20 +505,20 @@
 				a = Nod(OXXX, nil, nil)
 				*a = n1
 				a.Orig = a // completely separate copy
-				if !(staticassign(a, e.Expr, out) != 0) {
+				if !staticassign(a, e.Expr, out) {
 					*out = list(*out, Nod(OAS, a, e.Expr))
 				}
 			}
 		}
 
-		return 1
+		return true
 
 		// TODO: Table-driven map insert.
 	case OMAPLIT:
 		break
 	}
 
-	return 0
+	return false
 }
 
 /*
@@ -534,27 +534,27 @@
 	namebuf = fmt.Sprintf("statictmp_%.4d", statuniqgen)
 	statuniqgen++
 	n = newname(Lookup(namebuf))
-	if !(ctxt != 0) {
+	if ctxt == 0 {
 		n.Readonly = 1
 	}
 	addvar(n, t, PEXTERN)
 	return n
 }
 
-func isliteral(n *Node) int {
+func isliteral(n *Node) bool {
 	if n.Op == OLITERAL {
 		if n.Val.Ctype != CTNIL {
-			return 1
+			return true
 		}
 	}
-	return 0
+	return false
 }
 
-func simplename(n *Node) int {
+func simplename(n *Node) bool {
 	if n.Op != ONAME {
 		goto no
 	}
-	if !(n.Addable != 0) {
+	if n.Addable == 0 {
 		goto no
 	}
 	if n.Class&PHEAP != 0 {
@@ -563,10 +563,10 @@
 	if n.Class == PPARAMREF {
 		goto no
 	}
-	return 1
+	return true
 
 no:
-	return 0
+	return false
 }
 
 func litas(l *Node, r *Node, init **NodeList) {
@@ -591,13 +591,13 @@
 	mode = 0
 	switch n.Op {
 	default:
-		if isliteral(n) != 0 {
+		if isliteral(n) {
 			return MODECONST
 		}
 		return MODEDYNAM
 
 	case OARRAYLIT:
-		if !(top != 0) && n.Type.Bound < 0 {
+		if top == 0 && n.Type.Bound < 0 {
 			return MODEDYNAM
 		}
 		fallthrough
@@ -657,7 +657,7 @@
 			continue
 		}
 
-		if isliteral(value) != 0 {
+		if isliteral(value) {
 			if pass == 2 {
 				continue
 			}
@@ -725,7 +725,7 @@
 			continue
 		}
 
-		if isliteral(index) != 0 && isliteral(value) != 0 {
+		if isliteral(index) && isliteral(value) {
 			if pass == 2 {
 				continue
 			}
@@ -881,7 +881,7 @@
 		index = r.Left
 		value = r.Right
 		a = Nod(OINDEX, var_, index)
-		a.Bounded = 1
+		a.Bounded = true
 
 		// TODO need to check bounds?
 
@@ -898,7 +898,7 @@
 			continue
 		}
 
-		if isliteral(index) != 0 && isliteral(value) != 0 {
+		if isliteral(index) && isliteral(value) {
 			continue
 		}
 
@@ -951,7 +951,7 @@
 		index = r.Left
 		value = r.Right
 
-		if isliteral(index) != 0 && isliteral(value) != 0 {
+		if isliteral(index) && isliteral(value) {
 			b++
 		}
 	}
@@ -999,7 +999,7 @@
 			index = r.Left
 			value = r.Right
 
-			if isliteral(index) != 0 && isliteral(value) != 0 {
+			if isliteral(index) && isliteral(value) {
 				// build vstat[b].a = key;
 				a = Nodintconst(b)
 
@@ -1033,11 +1033,11 @@
 		index = temp(Types[TINT])
 
 		a = Nod(OINDEX, vstat, index)
-		a.Bounded = 1
+		a.Bounded = true
 		a = Nod(ODOT, a, newname(symb))
 
 		r = Nod(OINDEX, vstat, index)
-		r.Bounded = 1
+		r.Bounded = true
 		r = Nod(ODOT, r, newname(syma))
 		r = Nod(OINDEX, var_, r)
 
@@ -1068,7 +1068,7 @@
 		index = r.Left
 		value = r.Right
 
-		if isliteral(index) != 0 && isliteral(value) != 0 {
+		if isliteral(index) && isliteral(value) {
 			continue
 		}
 
@@ -1118,10 +1118,9 @@
 	switch n.Op {
 	default:
 		Fatal("anylit: not lit")
-		fallthrough
 
 	case OPTRLIT:
-		if !(Isptr[t.Etype] != 0) {
+		if Isptr[t.Etype] == 0 {
 			Fatal("anylit: not ptr")
 		}
 
@@ -1150,7 +1149,7 @@
 			Fatal("anylit: not struct")
 		}
 
-		if simplename(var_) != 0 && count(n.List) > 4 {
+		if simplename(var_) && count(n.List) > 4 {
 			if ctxt == 0 {
 				// lay out static data
 				vstat = staticname(t, ctxt)
@@ -1176,7 +1175,7 @@
 		}
 
 		// initialize of not completely specified
-		if simplename(var_) != 0 || count(n.List) < structcount(t) {
+		if simplename(var_) || count(n.List) < structcount(t) {
 			a = Nod(OAS, var_, nil)
 			typecheck(&a, Etop)
 			walkexpr(&a, init)
@@ -1194,7 +1193,7 @@
 			break
 		}
 
-		if simplename(var_) != 0 && count(n.List) > 4 {
+		if simplename(var_) && count(n.List) > 4 {
 			if ctxt == 0 {
 				// lay out static data
 				vstat = staticname(t, ctxt)
@@ -1220,7 +1219,7 @@
 		}
 
 		// initialize of not completely specified
-		if simplename(var_) != 0 || int64(count(n.List)) < t.Bound {
+		if simplename(var_) || int64(count(n.List)) < t.Bound {
 			a = Nod(OAS, var_, nil)
 			typecheck(&a, Etop)
 			walkexpr(&a, init)
@@ -1237,7 +1236,7 @@
 	}
 }
 
-func oaslit(n *Node, init **NodeList) int {
+func oaslit(n *Node, init **NodeList) bool {
 	var ctxt int
 
 	if n.Left == nil || n.Right == nil {
@@ -1246,7 +1245,7 @@
 	if n.Left.Type == nil || n.Right.Type == nil {
 		goto no
 	}
-	if !(simplename(n.Left) != 0) {
+	if !simplename(n.Left) {
 		goto no
 	}
 	if !Eqtype(n.Left.Type, n.Right.Type) {
@@ -1268,28 +1267,28 @@
 	case OSTRUCTLIT,
 		OARRAYLIT,
 		OMAPLIT:
-		if vmatch1(n.Left, n.Right) != 0 {
+		if vmatch1(n.Left, n.Right) {
 			goto no
 		}
 		anylit(ctxt, n.Right, n.Left, init)
 	}
 
 	n.Op = OEMPTY
-	return 1
+	return true
 
 	// not a special composit literal assignment
 no:
-	return 0
+	return false
 }
 
 func getlit(lit *Node) int {
-	if Smallintconst(lit) != 0 {
+	if Smallintconst(lit) {
 		return int(Mpgetfix(lit.Val.U.Xval))
 	}
 	return -1
 }
 
-func stataddr(nam *Node, n *Node) int {
+func stataddr(nam *Node, n *Node) bool {
 	var l int
 
 	if n == nil {
@@ -1299,21 +1298,21 @@
 	switch n.Op {
 	case ONAME:
 		*nam = *n
-		return int(n.Addable)
+		return n.Addable != 0
 
 	case ODOT:
-		if !(stataddr(nam, n.Left) != 0) {
+		if !stataddr(nam, n.Left) {
 			break
 		}
 		nam.Xoffset += n.Xoffset
 		nam.Type = n.Type
-		return 1
+		return true
 
 	case OINDEX:
 		if n.Left.Type.Bound < 0 {
 			break
 		}
-		if !(stataddr(nam, n.Left) != 0) {
+		if !stataddr(nam, n.Left) {
 			break
 		}
 		l = getlit(n.Right)
@@ -1327,11 +1326,11 @@
 		}
 		nam.Xoffset += int64(l) * n.Type.Width
 		nam.Type = n.Type
-		return 1
+		return true
 	}
 
 no:
-	return 0
+	return false
 }
 
 func initplan(n *Node) {
@@ -1347,12 +1346,11 @@
 	switch n.Op {
 	default:
 		Fatal("initplan")
-		fallthrough
 
 	case OARRAYLIT:
 		for l = n.List; l != nil; l = l.Next {
 			a = l.N
-			if a.Op != OKEY || !(Smallintconst(a.Left) != 0) {
+			if a.Op != OKEY || !Smallintconst(a.Left) {
 				Fatal("initplan arraylit")
 			}
 			addvalue(p, n.Type.Type.Width*Mpgetfix(a.Left.Val.U.Xval), nil, a.Right)
@@ -1384,13 +1382,13 @@
 	var e *InitEntry
 
 	// special case: zero can be dropped entirely
-	if iszero(n) != 0 {
+	if iszero(n) {
 		p.Zero += n.Type.Width
 		return
 	}
 
 	// special case: inline struct and array (not slice) literals
-	if isvaluelit(n) != 0 {
+	if isvaluelit(n) {
 		initplan(n)
 		q = n.Initplan
 		for i = 0; i < len(q.E); i++ {
@@ -1414,7 +1412,7 @@
 	e.Expr = n
 }
 
-func iszero(n *Node) int {
+func iszero(n *Node) bool {
 	var l *NodeList
 
 	switch n.Op {
@@ -1423,30 +1421,29 @@
 		default:
 			Dump("unexpected literal", n)
 			Fatal("iszero")
-			fallthrough
 
 		case CTNIL:
-			return 1
+			return true
 
 		case CTSTR:
-			return bool2int(n.Val.U.Sval == nil || len(n.Val.U.Sval.S) == 0)
+			return n.Val.U.Sval == nil || len(n.Val.U.Sval.S) == 0
 
 		case CTBOOL:
-			return bool2int(n.Val.U.Bval == 0)
+			return n.Val.U.Bval == 0
 
 		case CTINT,
 			CTRUNE:
-			return bool2int(mpcmpfixc(n.Val.U.Xval, 0) == 0)
+			return mpcmpfixc(n.Val.U.Xval, 0) == 0
 
 		case CTFLT:
-			return bool2int(mpcmpfltc(n.Val.U.Fval, 0) == 0)
+			return mpcmpfltc(n.Val.U.Fval, 0) == 0
 
 		case CTCPLX:
-			return bool2int(mpcmpfltc(&n.Val.U.Cval.Real, 0) == 0 && mpcmpfltc(&n.Val.U.Cval.Imag, 0) == 0)
+			return mpcmpfltc(&n.Val.U.Cval.Real, 0) == 0 && mpcmpfltc(&n.Val.U.Cval.Imag, 0) == 0
 		}
 
 	case OARRAYLIT:
-		if Isslice(n.Type) != 0 {
+		if Isslice(n.Type) {
 			break
 		}
 		fallthrough
@@ -1454,18 +1451,18 @@
 		// fall through
 	case OSTRUCTLIT:
 		for l = n.List; l != nil; l = l.Next {
-			if !(iszero(l.N.Right) != 0) {
-				return 0
+			if !iszero(l.N.Right) {
+				return false
 			}
 		}
-		return 1
+		return true
 	}
 
-	return 0
+	return false
 }
 
-func isvaluelit(n *Node) int {
-	return bool2int((n.Op == OARRAYLIT && Isfixedarray(n.Type) != 0) || n.Op == OSTRUCTLIT)
+func isvaluelit(n *Node) bool {
+	return (n.Op == OARRAYLIT && Isfixedarray(n.Type)) || n.Op == OSTRUCTLIT
 }
 
 func entry(p *InitPlan) *InitEntry {
@@ -1473,7 +1470,7 @@
 	return &p.E[len(p.E)-1]
 }
 
-func gen_as_init(n *Node) int {
+func gen_as_init(n *Node) bool {
 	var nr *Node
 	var nl *Node
 	var nam Node
@@ -1486,7 +1483,7 @@
 	nr = n.Right
 	nl = n.Left
 	if nr == nil {
-		if !(stataddr(&nam, nl) != 0) {
+		if !stataddr(&nam, nl) {
 			goto no
 		}
 		if nam.Class != PEXTERN {
@@ -1499,7 +1496,7 @@
 		goto no
 	}
 
-	if !(stataddr(&nam, nl) != 0) {
+	if !stataddr(&nam, nl) {
 		goto no
 	}
 
@@ -1562,7 +1559,7 @@
 	}
 
 yes:
-	return 1
+	return true
 
 slice:
 	gused(nil) // in case the data is the dest of a goto
@@ -1598,5 +1595,5 @@
 		Fatal("gen_as_init couldnt make data statement")
 	}
 
-	return 0
+	return false
 }
diff --git a/src/cmd/internal/gc/subr.go b/src/cmd/internal/gc/subr.go
index c28bfbd..c8e613c 100644
--- a/src/cmd/internal/gc/subr.go
+++ b/src/cmd/internal/gc/subr.go
@@ -119,7 +119,7 @@
 
 	hcrash()
 	nerrors++
-	if nsavederrors+nerrors >= 10 && !(Debug['e'] != 0) {
+	if nsavederrors+nerrors >= 10 && Debug['e'] == 0 {
 		Flusherrors()
 		fmt.Printf("%v: too many errors\n", Ctxt.Line(line))
 		errorexit()
@@ -192,7 +192,7 @@
 
 	hcrash()
 	nerrors++
-	if nsavederrors+nerrors >= 10 && !(Debug['e'] != 0) {
+	if nsavederrors+nerrors >= 10 && Debug['e'] == 0 {
 		Flusherrors()
 		fmt.Printf("%v: too many errors\n", Ctxt.Line(parserline()))
 		errorexit()
@@ -248,7 +248,7 @@
 		fmt.Printf(" at line %v\n", Ctxt.Line(int(lexlineno)))
 	}
 
-	if off < 0 && file[0] != '/' && !(relative != 0) {
+	if off < 0 && file[0] != '/' && relative == 0 {
 		file = fmt.Sprintf("%s/%s", Ctxt.Pathname, file)
 	}
 	obj.Linklinehist(Ctxt, int(lexlineno), file, int(off))
@@ -384,6 +384,26 @@
 	}
 }
 
+func gethunk() {
+	var h string
+	var nh int32
+
+	nh = NHUNK
+	if thunk >= 10*NHUNK {
+		nh = 10 * NHUNK
+	}
+	h = string(make([]byte, nh))
+	if h == "" {
+		Flusherrors()
+		Yyerror("out of memory")
+		errorexit()
+	}
+
+	hunk = h
+	nhunk = nh
+	thunk += nh
+}
+
 func Nod(op int, nleft *Node, nright *Node) *Node {
 	var n *Node
 
@@ -412,14 +432,14 @@
 // ispaddedfield reports whether the given field
 // is followed by padding. For the case where t is
 // the last field, total gives the size of the enclosing struct.
-func ispaddedfield(t *Type, total int64) int {
+func ispaddedfield(t *Type, total int64) bool {
 	if t.Etype != TFIELD {
 		Fatal("ispaddedfield called non-field %v", Tconv(t, 0))
 	}
 	if t.Down == nil {
-		return bool2int(t.Width+t.Type.Width != total)
+		return t.Width+t.Type.Width != total
 	}
-	return bool2int(t.Width+t.Type.Width != t.Down.Width)
+	return t.Width+t.Type.Width != t.Down.Width
 }
 
 func algtype1(t *Type, bad **Type) int {
@@ -486,13 +506,13 @@
 		return ASTRING
 
 	case TINTER:
-		if isnilinter(t) != 0 {
+		if isnilinter(t) {
 			return ANILINTER
 		}
 		return AINTER
 
 	case TARRAY:
-		if Isslice(t) != 0 {
+		if Isslice(t) {
 			if bad != nil {
 				*bad = t
 			}
@@ -526,7 +546,7 @@
 
 			// Blank fields, padded fields, fields with non-memory
 			// equality need special compare.
-			if a != AMEM || isblanksym(t1.Sym) || ispaddedfield(t1, t.Width) != 0 {
+			if a != AMEM || isblanksym(t1.Sym) || ispaddedfield(t1, t.Width) {
 				ret = -1
 				continue
 			}
@@ -544,7 +564,7 @@
 
 	a = algtype1(t, nil)
 	if a == AMEM || a == ANOEQ {
-		if Isslice(t) != 0 {
+		if Isslice(t) {
 			return ASLICE
 		}
 		switch t.Width {
@@ -687,7 +707,7 @@
 	for {
 		tmp11 := i
 		i--
-		if !(tmp11 > 0) {
+		if tmp11 <= 0 {
 			break
 		}
 		a[i].Down = f
@@ -748,12 +768,12 @@
 	return c
 }
 
-func Nodbool(b int) *Node {
+func Nodbool(b bool) *Node {
 	var c *Node
 
 	c = Nodintconst(0)
 	c.Val.Ctype = CTBOOL
-	c.Val.U.Bval = int16(b)
+	c.Val.U.Bval = int16(bool2int(b))
 	c.Type = idealbool
 	return c
 }
@@ -829,46 +849,46 @@
 	return m
 }
 
-func isnil(n *Node) int {
+func isnil(n *Node) bool {
 	if n == nil {
-		return 0
+		return false
 	}
 	if n.Op != OLITERAL {
-		return 0
+		return false
 	}
 	if n.Val.Ctype != CTNIL {
-		return 0
+		return false
 	}
-	return 1
+	return true
 }
 
-func isptrto(t *Type, et int) int {
+func isptrto(t *Type, et int) bool {
 	if t == nil {
-		return 0
+		return false
 	}
-	if !(Isptr[t.Etype] != 0) {
-		return 0
+	if Isptr[t.Etype] == 0 {
+		return false
 	}
 	t = t.Type
 	if t == nil {
-		return 0
+		return false
 	}
 	if int(t.Etype) != et {
-		return 0
+		return false
 	}
-	return 1
+	return true
 }
 
-func Istype(t *Type, et int) int {
-	return bool2int(t != nil && int(t.Etype) == et)
+func Istype(t *Type, et int) bool {
+	return t != nil && int(t.Etype) == et
 }
 
-func Isfixedarray(t *Type) int {
-	return bool2int(t != nil && t.Etype == TARRAY && t.Bound >= 0)
+func Isfixedarray(t *Type) bool {
+	return t != nil && t.Etype == TARRAY && t.Bound >= 0
 }
 
-func Isslice(t *Type) int {
-	return bool2int(t != nil && t.Etype == TARRAY && t.Bound < 0)
+func Isslice(t *Type) bool {
+	return t != nil && t.Etype == TARRAY && t.Bound < 0
 }
 
 func isblank(n *Node) bool {
@@ -882,34 +902,34 @@
 	return s != nil && s.Name == "_"
 }
 
-func Isinter(t *Type) int {
-	return bool2int(t != nil && t.Etype == TINTER)
+func Isinter(t *Type) bool {
+	return t != nil && t.Etype == TINTER
 }
 
-func isnilinter(t *Type) int {
-	if !(Isinter(t) != 0) {
-		return 0
+func isnilinter(t *Type) bool {
+	if !Isinter(t) {
+		return false
 	}
 	if t.Type != nil {
-		return 0
+		return false
 	}
-	return 1
+	return true
 }
 
-func isideal(t *Type) int {
+func isideal(t *Type) bool {
 	if t == nil {
-		return 0
+		return false
 	}
 	if t == idealstring || t == idealbool {
-		return 1
+		return true
 	}
 	switch t.Etype {
 	case TNIL,
 		TIDEAL:
-		return 1
+		return true
 	}
 
-	return 0
+	return false
 }
 
 /*
@@ -938,7 +958,7 @@
 	}
 
 	// check types
-	if !(issimple[t.Etype] != 0) {
+	if issimple[t.Etype] == 0 {
 		switch t.Etype {
 		default:
 			return nil
@@ -979,13 +999,13 @@
 	next *TypePairList
 }
 
-func onlist(l *TypePairList, t1 *Type, t2 *Type) int {
+func onlist(l *TypePairList, t1 *Type, t2 *Type) bool {
 	for ; l != nil; l = l.next {
 		if (l.t1 == t1 && l.t2 == t2) || (l.t1 == t2 && l.t2 == t1) {
-			return 1
+			return true
 		}
 	}
-	return 0
+	return false
 }
 
 // Return 1 if t1 and t2 are identical, following the spec rules.
@@ -995,17 +1015,17 @@
 // pointer (t1 == t2), so there's no chance of chasing cycles
 // ad infinitum, so no need for a depth counter.
 func Eqtype(t1 *Type, t2 *Type) bool {
-	return eqtype1(t1, t2, nil) != 0
+	return eqtype1(t1, t2, nil)
 }
 
-func eqtype1(t1 *Type, t2 *Type, assumed_equal *TypePairList) int {
+func eqtype1(t1 *Type, t2 *Type, assumed_equal *TypePairList) bool {
 	var l TypePairList
 
 	if t1 == t2 {
-		return 1
+		return true
 	}
 	if t1 == nil || t2 == nil || t1.Etype != t2.Etype {
-		return 0
+		return false
 	}
 	if t1.Sym != nil || t2.Sym != nil {
 		// Special case: we keep byte and uint8 separate
@@ -1013,21 +1033,21 @@
 		switch t1.Etype {
 		case TUINT8:
 			if (t1 == Types[TUINT8] || t1 == bytetype) && (t2 == Types[TUINT8] || t2 == bytetype) {
-				return 1
+				return true
 			}
 
 		case TINT,
 			TINT32:
 			if (t1 == Types[runetype.Etype] || t1 == runetype) && (t2 == Types[runetype.Etype] || t2 == runetype) {
-				return 1
+				return true
 			}
 		}
 
-		return 0
+		return false
 	}
 
-	if onlist(assumed_equal, t1, t2) != 0 {
-		return 1
+	if onlist(assumed_equal, t1, t2) {
+		return true
 	}
 	l.next = assumed_equal
 	l.t1 = t1
@@ -1042,7 +1062,7 @@
 			if t1.Etype != TFIELD || t2.Etype != TFIELD {
 				Fatal("struct/interface missing field: %v %v", Tconv(t1, 0), Tconv(t2, 0))
 			}
-			if t1.Sym != t2.Sym || t1.Embedded != t2.Embedded || !(eqtype1(t1.Type, t2.Type, &l) != 0) || !eqnote(t1.Note, t2.Note) {
+			if t1.Sym != t2.Sym || t1.Embedded != t2.Embedded || !eqtype1(t1.Type, t2.Type, &l) || !eqnote(t1.Note, t2.Note) {
 				goto no
 			}
 		}
@@ -1071,7 +1091,7 @@
 				if ta.Etype != TFIELD || tb.Etype != TFIELD {
 					Fatal("func struct missing field: %v %v", Tconv(ta, 0), Tconv(tb, 0))
 				}
-				if ta.Isddd != tb.Isddd || !(eqtype1(ta.Type, tb.Type, &l) != 0) {
+				if ta.Isddd != tb.Isddd || !eqtype1(ta.Type, tb.Type, &l) {
 					goto no
 				}
 			}
@@ -1097,34 +1117,34 @@
 		}
 	}
 
-	if eqtype1(t1.Down, t2.Down, &l) != 0 && eqtype1(t1.Type, t2.Type, &l) != 0 {
+	if eqtype1(t1.Down, t2.Down, &l) && eqtype1(t1.Type, t2.Type, &l) {
 		goto yes
 	}
 	goto no
 
 yes:
-	return 1
+	return true
 
 no:
-	return 0
+	return false
 }
 
 // Are t1 and t2 equal struct types when field names are ignored?
 // For deciding whether the result struct from g can be copied
 // directly when compiling f(g()).
-func eqtypenoname(t1 *Type, t2 *Type) int {
+func eqtypenoname(t1 *Type, t2 *Type) bool {
 	if t1 == nil || t2 == nil || t1.Etype != TSTRUCT || t2.Etype != TSTRUCT {
-		return 0
+		return false
 	}
 
 	t1 = t1.Type
 	t2 = t2.Type
 	for {
 		if !Eqtype(t1, t2) {
-			return 0
+			return false
 		}
 		if t1 == nil {
-			return 1
+			return true
 		}
 		t1 = t1.Down
 		t2 = t2.Down
@@ -1167,13 +1187,13 @@
 	// both are empty interface types.
 	// For assignable but different non-empty interface types,
 	// we want to recompute the itab.
-	if Eqtype(src.Orig, dst.Orig) && (src.Sym == nil || dst.Sym == nil || isnilinter(src) != 0) {
+	if Eqtype(src.Orig, dst.Orig) && (src.Sym == nil || dst.Sym == nil || isnilinter(src)) {
 		return OCONVNOP
 	}
 
 	// 3. dst is an interface type and src implements dst.
 	if dst.Etype == TINTER && src.Etype != TNIL {
-		if implements(src, dst, &missing, &have, &ptr) != 0 {
+		if implements(src, dst, &missing, &have, &ptr) {
 			return OCONVIFACE
 		}
 
@@ -1183,9 +1203,9 @@
 		}
 
 		if why != nil {
-			if isptrto(src, TINTER) != 0 {
+			if isptrto(src, TINTER) {
 				*why = fmt.Sprintf(":\n\t%v is pointer to interface, not interface", Tconv(src, 0))
-			} else if have != nil && have.Sym == missing.Sym && have.Nointerface != 0 {
+			} else if have != nil && have.Sym == missing.Sym && have.Nointerface {
 				*why = fmt.Sprintf(":\n\t%v does not implement %v (%v method is marked 'nointerface')", Tconv(src, 0), Tconv(dst, 0), Sconv(missing.Sym, 0))
 			} else if have != nil && have.Sym == missing.Sym {
 				*why = fmt.Sprintf(":\n\t%v does not implement %v (wrong type for %v method)\n"+"\t\thave %v%v\n\t\twant %v%v", Tconv(src, 0), Tconv(dst, 0), Sconv(missing.Sym, 0), Sconv(have.Sym, 0), Tconv(have.Type, obj.FmtShort|obj.FmtByte), Sconv(missing.Sym, 0), Tconv(missing.Type, obj.FmtShort|obj.FmtByte))
@@ -1201,7 +1221,7 @@
 		return 0
 	}
 
-	if isptrto(dst, TINTER) != 0 {
+	if isptrto(dst, TINTER) {
 		if why != nil {
 			*why = fmt.Sprintf(":\n\t%v is pointer to interface, not interface", Tconv(dst, 0))
 		}
@@ -1209,7 +1229,7 @@
 	}
 
 	if src.Etype == TINTER && dst.Etype != TBLANK {
-		if why != nil && implements(dst, src, &missing, &have, &ptr) != 0 {
+		if why != nil && implements(dst, src, &missing, &have, &ptr) {
 			*why = ": need type assertion"
 		}
 		return 0
@@ -1322,7 +1342,7 @@
 		return ORUNESTR
 	}
 
-	if Isslice(src) != 0 && dst.Etype == TSTRING {
+	if Isslice(src) && dst.Etype == TSTRING {
 		if src.Type.Etype == bytetype.Etype {
 			return OARRAYBYTESTR
 		}
@@ -1333,7 +1353,7 @@
 
 	// 7. src is a string and dst is []byte or []rune.
 	// String to slice.
-	if src.Etype == TSTRING && Isslice(dst) != 0 {
+	if src.Etype == TSTRING && Isslice(dst) {
 		if dst.Type.Etype == bytetype.Etype {
 			return OSTRARRAYBYTE
 		}
@@ -1408,23 +1428,23 @@
 	return r
 }
 
-func subtype(stp **Type, t *Type, d int) int {
+func subtype(stp **Type, t *Type, d int) bool {
 	var st *Type
 
 loop:
 	st = *stp
 	if st == nil {
-		return 0
+		return false
 	}
 
 	d++
 	if d >= 10 {
-		return 0
+		return false
 	}
 
 	switch st.Etype {
 	default:
-		return 0
+		return false
 
 	case TPTR32,
 		TPTR64,
@@ -1434,13 +1454,13 @@
 		goto loop
 
 	case TANY:
-		if !(st.Copyany != 0) {
-			return 0
+		if st.Copyany == 0 {
+			return false
 		}
 		*stp = t
 
 	case TMAP:
-		if subtype(&st.Down, t, d) != 0 {
+		if subtype(&st.Down, t, d) {
 			break
 		}
 		stp = &st.Type
@@ -1448,51 +1468,51 @@
 
 	case TFUNC:
 		for {
-			if subtype(&st.Type, t, d) != 0 {
+			if subtype(&st.Type, t, d) {
 				break
 			}
-			if subtype(&st.Type.Down.Down, t, d) != 0 {
+			if subtype(&st.Type.Down.Down, t, d) {
 				break
 			}
-			if subtype(&st.Type.Down, t, d) != 0 {
+			if subtype(&st.Type.Down, t, d) {
 				break
 			}
-			return 0
+			return false
 		}
 
 	case TSTRUCT:
 		for st = st.Type; st != nil; st = st.Down {
-			if subtype(&st.Type, t, d) != 0 {
-				return 1
+			if subtype(&st.Type, t, d) {
+				return true
 			}
 		}
-		return 0
+		return false
 	}
 
-	return 1
+	return true
 }
 
 /*
  * Is this a 64-bit type?
  */
-func Is64(t *Type) int {
+func Is64(t *Type) bool {
 	if t == nil {
-		return 0
+		return false
 	}
 	switch Simtype[t.Etype] {
 	case TINT64,
 		TUINT64,
 		TPTR64:
-		return 1
+		return true
 	}
 
-	return 0
+	return false
 }
 
 /*
  * Is a conversion between t1 and t2 a no-op?
  */
-func Noconv(t1 *Type, t2 *Type) int {
+func Noconv(t1 *Type, t2 *Type) bool {
 	var e1 int
 	var e2 int
 
@@ -1502,35 +1522,35 @@
 	switch e1 {
 	case TINT8,
 		TUINT8:
-		return bool2int(e2 == TINT8 || e2 == TUINT8)
+		return e2 == TINT8 || e2 == TUINT8
 
 	case TINT16,
 		TUINT16:
-		return bool2int(e2 == TINT16 || e2 == TUINT16)
+		return e2 == TINT16 || e2 == TUINT16
 
 	case TINT32,
 		TUINT32,
 		TPTR32:
-		return bool2int(e2 == TINT32 || e2 == TUINT32 || e2 == TPTR32)
+		return e2 == TINT32 || e2 == TUINT32 || e2 == TPTR32
 
 	case TINT64,
 		TUINT64,
 		TPTR64:
-		return bool2int(e2 == TINT64 || e2 == TUINT64 || e2 == TPTR64)
+		return e2 == TINT64 || e2 == TUINT64 || e2 == TPTR64
 
 	case TFLOAT32:
-		return bool2int(e2 == TFLOAT32)
+		return e2 == TFLOAT32
 
 	case TFLOAT64:
-		return bool2int(e2 == TFLOAT64)
+		return e2 == TFLOAT64
 	}
 
-	return 0
+	return false
 }
 
 func argtype(on *Node, t *Type) {
 	dowidth(t)
-	if !(subtype(&on.Type, t, 0) != 0) {
+	if !subtype(&on.Type, t, 0) {
 		Fatal("argtype: failed %v %v\n", Nconv(on, 0), Tconv(t, 0))
 	}
 }
@@ -1607,7 +1627,7 @@
 		Fatal("syslook: can't find runtime.%s", name)
 	}
 
-	if !(copy != 0) {
+	if copy == 0 {
 		return s.Def
 	}
 
@@ -1886,7 +1906,7 @@
 	var fp *Type
 
 	fp = structnext(s)
-	if fp == nil && !(s.Done != 0) {
+	if fp == nil && s.Done == 0 {
 		s.Done = 1
 		fp = Structfirst(s, getinarg(s.Tfunc))
 	}
@@ -2039,7 +2059,7 @@
 	}
 
 	// make a copy; must not be used as an lvalue
-	if islvalue(n) != 0 {
+	if islvalue(n) {
 		Fatal("missing lvalue case in safeexpr: %v", Nconv(n, 0))
 	}
 	return cheapexpr(n, init)
@@ -2077,7 +2097,7 @@
  * assignment to it.
  */
 func localexpr(n *Node, t *Type, init **NodeList) *Node {
-	if n.Op == ONAME && (!(n.Addrtaken != 0) || strings.HasPrefix(n.Sym.Name, "autotmp_")) && (n.Class == PAUTO || n.Class == PPARAM || n.Class == PPARAMOUT) && convertop(n.Type, t, nil) == OCONVNOP {
+	if n.Op == ONAME && (n.Addrtaken == 0 || strings.HasPrefix(n.Sym.Name, "autotmp_")) && (n.Class == PAUTO || n.Class == PPARAM || n.Class == PPARAMOUT) && convertop(n.Type, t, nil) == OCONVNOP {
 		return n
 	}
 
@@ -2182,7 +2202,7 @@
 
 	d--
 	for f = u.Type; f != nil; f = f.Down {
-		if !(f.Embedded != 0) {
+		if f.Embedded == 0 {
 			continue
 		}
 		if f.Sym == nil {
@@ -2343,7 +2363,7 @@
 	}
 
 	for f = u.Type; f != nil; f = f.Down {
-		if !(f.Embedded != 0) {
+		if f.Embedded == 0 {
 			continue
 		}
 		if f.Sym == nil {
@@ -2583,10 +2603,10 @@
 	dot = adddot(Nod(OXDOT, this.Left, newname(method.Sym)))
 
 	// generate call
-	if !(flag_race != 0) && Isptr[rcvr.Etype] != 0 && Isptr[methodrcvr.Etype] != 0 && method.Embedded != 0 && !(isifacemethod(method.Type) != 0) {
+	if flag_race == 0 && Isptr[rcvr.Etype] != 0 && Isptr[methodrcvr.Etype] != 0 && method.Embedded != 0 && !isifacemethod(method.Type) {
 		// generate tail call: adjust pointer receiver and jump to embedded method.
 		dot = dot.Left // skip final .M
-		if !(Isptr[dotlist[0].field.Type.Etype] != 0) {
+		if Isptr[dotlist[0].field.Type.Etype] == 0 {
 			dot = Nod(OADDR, dot, nil)
 		}
 		as = Nod(OAS, this.Left, Nod(OCONVNOP, dot, nil))
@@ -2625,7 +2645,7 @@
 
 	// Set inl_nonlocal to whether we are calling a method on a
 	// type defined in a different package.  Checked in inlvar.
-	if !(methodrcvr.Local != 0) {
+	if methodrcvr.Local == 0 {
 		inl_nonlocal = 1
 	}
 
@@ -2666,7 +2686,6 @@
 	switch a {
 	case AMEM:
 		Fatal("hashfor with AMEM type")
-		fallthrough
 
 	case AINTER:
 		sym = Pkglookup("interhash", Runtimepkg)
@@ -2760,10 +2779,9 @@
 	switch t.Etype {
 	default:
 		Fatal("genhash %v", Tconv(t, 0))
-		fallthrough
 
 	case TARRAY:
-		if Isslice(t) != 0 {
+		if Isslice(t) {
 			Fatal("genhash %v", Tconv(t, 0))
 		}
 
@@ -2798,7 +2816,7 @@
 		call = Nod(OCALL, hashel, nil)
 
 		nx = Nod(OINDEX, np, ni)
-		nx.Bounded = 1
+		nx.Bounded = true
 		na = Nod(OADDR, nx, nil)
 		na.Etype = 1 // no escape to heap
 		call.List = list(call.List, na)
@@ -2821,7 +2839,7 @@
 				}
 
 				// If it's a memory field but it's padded, stop here.
-				if ispaddedfield(t1, t.Width) != 0 {
+				if ispaddedfield(t1, t.Width) {
 					t1 = t1.Down
 				} else {
 					continue
@@ -2924,7 +2942,7 @@
 	nif = Nod(OIF, nil, nil)
 	nif.Ntest = Nod(ONE, nx, ny)
 	r = Nod(ORETURN, nil, nil)
-	r.List = list(r.List, Nodbool(0))
+	r.List = list(r.List, Nodbool(false))
 	nif.Nbody = list(nif.Nbody, r)
 	return nif
 }
@@ -2981,7 +2999,7 @@
 	nif.Ninit = list(nif.Ninit, call)
 	nif.Ntest = Nod(ONOT, call, nil)
 	r = Nod(ORETURN, nil, nil)
-	r.List = list(r.List, Nodbool(0))
+	r.List = list(r.List, Nodbool(false))
 	nif.Nbody = list(nif.Nbody, r)
 	return nif
 }
@@ -3040,10 +3058,9 @@
 	switch t.Etype {
 	default:
 		Fatal("geneq %v", Tconv(t, 0))
-		fallthrough
 
 	case TARRAY:
-		if Isslice(t) != 0 {
+		if Isslice(t) {
 			Fatal("geneq %v", Tconv(t, 0))
 		}
 
@@ -3064,14 +3081,14 @@
 		// if p[i] != q[i] { return false }
 		nx = Nod(OINDEX, np, ni)
 
-		nx.Bounded = 1
+		nx.Bounded = true
 		ny = Nod(OINDEX, nq, ni)
-		ny.Bounded = 1
+		ny.Bounded = true
 
 		nif = Nod(OIF, nil, nil)
 		nif.Ntest = Nod(ONE, nx, ny)
 		r = Nod(ORETURN, nil, nil)
-		r.List = list(r.List, Nodbool(0))
+		r.List = list(r.List, Nodbool(false))
 		nif.Nbody = list(nif.Nbody, r)
 		nrange.Nbody = list(nrange.Nbody, nif)
 		fn.Nbody = list(fn.Nbody, nrange)
@@ -3091,7 +3108,7 @@
 				}
 
 				// If it's a memory field but it's padded, stop here.
-				if ispaddedfield(t1, t.Width) != 0 {
+				if ispaddedfield(t1, t.Width) {
 					t1 = t1.Down
 				} else {
 					continue
@@ -3134,7 +3151,7 @@
 	// return true
 	r = Nod(ORETURN, nil, nil)
 
-	r.List = list(r.List, Nodbool(1))
+	r.List = list(r.List, Nodbool(true))
 	fn.Nbody = list(fn.Nbody, r)
 
 	if Debug['r'] != 0 {
@@ -3199,7 +3216,7 @@
 	return nil
 }
 
-func implements(t *Type, iface *Type, m **Type, samename **Type, ptr *int) int {
+func implements(t *Type, iface *Type, m **Type, samename **Type, ptr *int) bool {
 	var t0 *Type
 	var im *Type
 	var tm *Type
@@ -3209,7 +3226,7 @@
 
 	t0 = t
 	if t == nil {
-		return 0
+		return false
 	}
 
 	// if this is too slow,
@@ -3226,18 +3243,18 @@
 					*m = im
 					*samename = tm
 					*ptr = 0
-					return 0
+					return false
 				}
 			}
 
 			*m = im
 			*samename = nil
 			*ptr = 0
-			return 0
+			return false
 		found:
 		}
 
-		return 1
+		return true
 	}
 
 	t = methtype(t, 0)
@@ -3247,21 +3264,21 @@
 	for im = iface.Type; im != nil; im = im.Down {
 		imtype = methodfunc(im.Type, nil)
 		tm = ifacelookdot(im.Sym, t, &followptr, 0)
-		if tm == nil || tm.Nointerface != 0 || !Eqtype(methodfunc(tm.Type, nil), imtype) {
+		if tm == nil || tm.Nointerface || !Eqtype(methodfunc(tm.Type, nil), imtype) {
 			if tm == nil {
 				tm = ifacelookdot(im.Sym, t, &followptr, 1)
 			}
 			*m = im
 			*samename = tm
 			*ptr = 0
-			return 0
+			return false
 		}
 
 		// if pointer receiver in method,
 		// the method does not exist for value types.
 		rcvr = getthisx(tm.Type).Type.Type
 
-		if Isptr[rcvr.Etype] != 0 && !(Isptr[t0.Etype] != 0) && !(followptr != 0) && !(isifacemethod(tm.Type) != 0) {
+		if Isptr[rcvr.Etype] != 0 && Isptr[t0.Etype] == 0 && followptr == 0 && !isifacemethod(tm.Type) {
 			if false && Debug['r'] != 0 {
 				Yyerror("interface pointer mismatch")
 			}
@@ -3269,11 +3286,11 @@
 			*m = im
 			*samename = nil
 			*ptr = 1
-			return 0
+			return false
 		}
 	}
 
-	return 1
+	return true
 }
 
 /*
@@ -3470,7 +3487,7 @@
 	if n == nil || n.Op != OLITERAL || n.Type == nil {
 		goto no
 	}
-	if !(Isint[n.Type.Etype] != 0) {
+	if Isint[n.Type.Etype] == 0 {
 		goto no
 	}
 
@@ -3483,7 +3500,7 @@
 		b = b << 1
 	}
 
-	if !(Issigned[n.Type.Etype] != 0) {
+	if Issigned[n.Type.Etype] == 0 {
 		goto no
 	}
 
@@ -3895,7 +3912,7 @@
 func checknil(x *Node, init **NodeList) {
 	var n *Node
 
-	if Isinter(x.Type) != 0 {
+	if Isinter(x.Type) {
 		x = Nod(OITAB, x, nil)
 		typecheck(&x, Erv)
 	}
@@ -3909,7 +3926,7 @@
  * Can this type be stored directly in an interface word?
  * Yes, if the representation is a single pointer.
  */
-func isdirectiface(t *Type) int {
+func isdirectiface(t *Type) bool {
 	switch t.Etype {
 	case TPTR32,
 		TPTR64,
@@ -3917,16 +3934,16 @@
 		TMAP,
 		TFUNC,
 		TUNSAFEPTR:
-		return 1
+		return true
 
 		// Array of 1 direct iface type can be direct.
 	case TARRAY:
-		return bool2int(t.Bound == 1 && isdirectiface(t.Type) != 0)
+		return t.Bound == 1 && isdirectiface(t.Type)
 
 		// Struct with 1 field of direct iface type can be direct.
 	case TSTRUCT:
-		return bool2int(t.Type != nil && t.Type.Down == nil && isdirectiface(t.Type.Type) != 0)
+		return t.Type != nil && t.Type.Down == nil && isdirectiface(t.Type.Type)
 	}
 
-	return 0
+	return false
 }
diff --git a/src/cmd/internal/gc/swt.go b/src/cmd/internal/gc/swt.go
index cf1f7d4..7c25041 100644
--- a/src/cmd/internal/gc/swt.go
+++ b/src/cmd/internal/gc/swt.go
@@ -281,7 +281,7 @@
 	var go_ *Node
 	var br *Node
 	var lno int32
-	var needvar int32
+	var needvar bool
 
 	if sw.List == nil {
 		return
@@ -301,7 +301,7 @@
 			Fatal("casebody %v", Oconv(int(n.Op), 0))
 		}
 		n.Op = OCASE
-		needvar = int32(bool2int(count(n.List) != 1 || n.List.N.Op == OLITERAL))
+		needvar = count(n.List) != 1 || n.List.N.Op == OLITERAL
 
 		go_ = Nod(OGOTO, newlabel_swt(), nil)
 		if n.List == nil {
@@ -332,7 +332,7 @@
 		}
 
 		stat = list(stat, Nod(OLABEL, go_.Left, nil))
-		if typeswvar != nil && needvar != 0 && n.Nname != nil {
+		if typeswvar != nil && needvar && n.Nname != nil {
 			var l *NodeList
 
 			l = list1(Nod(ODCL, n.Nname, nil))
@@ -410,7 +410,7 @@
 				continue
 			}
 
-			if Istype(n.Left.Type, TINTER) != 0 {
+			if Istype(n.Left.Type, TINTER) {
 				c.type_ = Ttypevar
 				continue
 			}
@@ -552,7 +552,7 @@
 	casebody(sw, nil)
 
 	arg = Snorm
-	if Isconst(sw.Ntest, CTBOOL) != 0 {
+	if Isconst(sw.Ntest, CTBOOL) {
 		arg = Strue
 		if sw.Ntest.Val.U.Bval == 0 {
 			arg = Sfalse
@@ -572,7 +572,7 @@
 
 	cas = nil
 	if arg == Strue || arg == Sfalse {
-		exprname = Nodbool(bool2int(arg == Strue))
+		exprname = Nodbool(arg == Strue)
 	} else if consttype(sw.Ntest) >= 0 {
 		// leave constants to enable dead code elimination (issue 9608)
 		exprname = sw.Ntest
@@ -600,7 +600,7 @@
 	}
 
 	// deal with the variables one-at-a-time
-	if !(okforcmp[t.Etype] != 0) || c0.type_ != Texprconst {
+	if okforcmp[t.Etype] == 0 || c0.type_ != Texprconst {
 		a = exprbsw(c0, 1, arg)
 		cas = list(cas, a)
 		c0 = c0.link
@@ -738,7 +738,7 @@
 	}
 
 	walkexpr(&sw.Ntest.Right, &sw.Ninit)
-	if !(Istype(sw.Ntest.Right.Type, TINTER) != 0) {
+	if !Istype(sw.Ntest.Right.Type, TINTER) {
 		Yyerror("type switch must be on an interface")
 		return
 	}
@@ -764,7 +764,7 @@
 	typecheck(&hashname, Erv)
 
 	t = sw.Ntest.Right.Type
-	if isnilinter(t) != 0 {
+	if isnilinter(t) {
 		a = syslook("efacethash", 1)
 	} else {
 		a = syslook("ifacethash", 1)
@@ -871,7 +871,7 @@
 	 * both have inserted OBREAK statements
 	 */
 	if sw.Ntest == nil {
-		sw.Ntest = Nodbool(1)
+		sw.Ntest = Nodbool(true)
 		typecheck(&sw.Ntest, Erv)
 	}
 
@@ -933,11 +933,11 @@
 			t = Types[TBOOL]
 		}
 		if t != nil {
-			if !(okforeq[t.Etype] != 0) {
+			if okforeq[t.Etype] == 0 {
 				Yyerror("cannot switch on %v", Nconv(n.Ntest, obj.FmtLong))
-			} else if t.Etype == TARRAY && !(Isfixedarray(t) != 0) {
+			} else if t.Etype == TARRAY && !Isfixedarray(t) {
 				nilonly = "slice"
-			} else if t.Etype == TARRAY && Isfixedarray(t) != 0 && algtype1(t, nil) == ANOEQ {
+			} else if t.Etype == TARRAY && Isfixedarray(t) && algtype1(t, nil) == ANOEQ {
 				Yyerror("cannot switch on %v", Nconv(n.Ntest, obj.FmtLong))
 			} else if t.Etype == TSTRUCT && algtype1(t, &badtype) == ANOEQ {
 				Yyerror("cannot switch on %v (struct containing %v cannot be compared)", Nconv(n.Ntest, obj.FmtLong), Tconv(badtype, 0))
@@ -976,27 +976,27 @@
 
 					if ll.N.Op == OTYPE {
 						Yyerror("type %v is not an expression", Tconv(ll.N.Type, 0))
-					} else if ll.N.Type != nil && !(assignop(ll.N.Type, t, nil) != 0) && !(assignop(t, ll.N.Type, nil) != 0) {
+					} else if ll.N.Type != nil && assignop(ll.N.Type, t, nil) == 0 && assignop(t, ll.N.Type, nil) == 0 {
 						if n.Ntest != nil {
 							Yyerror("invalid case %v in switch on %v (mismatched types %v and %v)", Nconv(ll.N, 0), Nconv(n.Ntest, 0), Tconv(ll.N.Type, 0), Tconv(t, 0))
 						} else {
 							Yyerror("invalid case %v in switch (mismatched types %v and bool)", Nconv(ll.N, 0), Tconv(ll.N.Type, 0))
 						}
-					} else if nilonly != "" && !(Isconst(ll.N, CTNIL) != 0) {
+					} else if nilonly != "" && !Isconst(ll.N, CTNIL) {
 						Yyerror("invalid case %v in switch (can only compare %s %v to nil)", Nconv(ll.N, 0), nilonly, Nconv(n.Ntest, 0))
 					}
 
 				case Etype: // type switch
-					if ll.N.Op == OLITERAL && Istype(ll.N.Type, TNIL) != 0 {
+					if ll.N.Op == OLITERAL && Istype(ll.N.Type, TNIL) {
 					} else if ll.N.Op != OTYPE && ll.N.Type != nil { // should this be ||?
 						Yyerror("%v is not a type", Nconv(ll.N, obj.FmtLong))
 
 						// reset to original type
 						ll.N = n.Ntest.Right
-					} else if ll.N.Type.Etype != TINTER && t.Etype == TINTER && !(implements(ll.N.Type, t, &missing, &have, &ptr) != 0) {
-						if have != nil && !(missing.Broke != 0) && !(have.Broke != 0) {
+					} else if ll.N.Type.Etype != TINTER && t.Etype == TINTER && !implements(ll.N.Type, t, &missing, &have, &ptr) {
+						if have != nil && missing.Broke == 0 && have.Broke == 0 {
 							Yyerror("impossible type switch case: %v cannot have dynamic type %v"+" (wrong type for %v method)\n\thave %v%v\n\twant %v%v", Nconv(n.Ntest.Right, obj.FmtLong), Tconv(ll.N.Type, 0), Sconv(missing.Sym, 0), Sconv(have.Sym, 0), Tconv(have.Type, obj.FmtShort), Sconv(missing.Sym, 0), Tconv(missing.Type, obj.FmtShort))
-						} else if !(missing.Broke != 0) {
+						} else if missing.Broke == 0 {
 							Yyerror("impossible type switch case: %v cannot have dynamic type %v"+" (missing %v method)", Nconv(n.Ntest.Right, obj.FmtLong), Tconv(ll.N.Type, 0), Sconv(missing.Sym, 0))
 						}
 					}
@@ -1008,7 +1008,7 @@
 			ll = ncase.List
 			nvar = ncase.Nname
 			if nvar != nil {
-				if ll != nil && ll.Next == nil && ll.N.Type != nil && !(Istype(ll.N.Type, TNIL) != 0) {
+				if ll != nil && ll.Next == nil && ll.N.Type != nil && !Istype(ll.N.Type, TNIL) {
 					// single entry type switch
 					nvar.Ntype = typenod(ll.N.Type)
 				} else {
diff --git a/src/cmd/internal/gc/typecheck.go b/src/cmd/internal/gc/typecheck.go
index 9bba6e7..3cd7408 100644
--- a/src/cmd/internal/gc/typecheck.go
+++ b/src/cmd/internal/gc/typecheck.go
@@ -83,7 +83,7 @@
 	var et int
 	var s string
 
-	if Isslice(t) != 0 {
+	if Isslice(t) {
 		return "slice"
 	}
 	et = int(t.Etype)
@@ -133,7 +133,7 @@
 	var l *NodeList
 
 	// cannot type check until all the source has been parsed
-	if !(typecheckok != 0) {
+	if typecheckok == 0 {
 		Fatal("early typecheck")
 	}
 
@@ -233,9 +233,9 @@
 /*
  * does n contain a call or receive operation?
  */
-func callrecv(n *Node) int {
+func callrecv(n *Node) bool {
 	if n == nil {
-		return 0
+		return false
 	}
 
 	switch n.Op {
@@ -250,19 +250,19 @@
 		ONEW,
 		OAPPEND,
 		ODELETE:
-		return 1
+		return true
 	}
 
-	return bool2int(callrecv(n.Left) != 0 || callrecv(n.Right) != 0 || callrecv(n.Ntest) != 0 || callrecv(n.Nincr) != 0 || callrecvlist(n.Ninit) != 0 || callrecvlist(n.Nbody) != 0 || callrecvlist(n.Nelse) != 0 || callrecvlist(n.List) != 0 || callrecvlist(n.Rlist) != 0)
+	return callrecv(n.Left) || callrecv(n.Right) || callrecv(n.Ntest) || callrecv(n.Nincr) || callrecvlist(n.Ninit) || callrecvlist(n.Nbody) || callrecvlist(n.Nelse) || callrecvlist(n.List) || callrecvlist(n.Rlist)
 }
 
-func callrecvlist(l *NodeList) int {
+func callrecvlist(l *NodeList) bool {
 	for ; l != nil; l = l.Next {
-		if callrecv(l.N) != 0 {
-			return 1
+		if callrecv(l.N) {
+			return true
 		}
 	}
-	return 0
+	return false
 }
 
 // indexlit implements typechecking of untyped values as
@@ -273,7 +273,7 @@
 	var n *Node
 
 	n = *np
-	if n == nil || !(isideal(n.Type) != 0) {
+	if n == nil || !isideal(n.Type) {
 		return
 	}
 	switch consttype(n) {
@@ -315,7 +315,7 @@
 	n = *np
 
 	if n.Sym != nil {
-		if n.Op == ONAME && n.Etype != 0 && !(top&Ecall != 0) {
+		if n.Op == ONAME && n.Etype != 0 && top&Ecall == 0 {
 			Yyerror("use of builtin %v not in function call", Sconv(n.Sym, 0))
 			goto error
 		}
@@ -336,7 +336,6 @@
 		Dump("typecheck", n)
 
 		Fatal("typecheck %v", Oconv(int(n.Op), 0))
-		fallthrough
 
 		/*
 		 * names
@@ -362,7 +361,7 @@
 			goto ret
 		}
 
-		if !(top&Easgn != 0) {
+		if top&Easgn == 0 {
 			// not a write to the variable
 			if isblank(n) {
 				Yyerror("cannot use _ as value")
@@ -372,7 +371,7 @@
 			n.Used = 1
 		}
 
-		if !(top&Ecall != 0) && isunsafebuiltin(n) != 0 {
+		if top&Ecall == 0 && isunsafebuiltin(n) {
 			Yyerror("%v is not an expression, must be called", Nconv(n, 0))
 			goto error
 		}
@@ -406,7 +405,7 @@
 			t.Bound = -1 // slice
 		} else if l.Op == ODDD {
 			t.Bound = -100 // to be filled in
-			if !(top&Ecomplit != 0) && !(n.Diag != 0) {
+			if top&Ecomplit == 0 && n.Diag == 0 {
 				t.Broke = 1
 				n.Diag = 1
 				Yyerror("use of [...] array outside of array literal")
@@ -431,7 +430,7 @@
 			}
 
 			t.Bound = Mpgetfix(v.U.Xval)
-			if doesoverflow(v, Types[TINT]) != 0 {
+			if doesoverflow(v, Types[TINT]) {
 				Yyerror("array bound is too large")
 				goto error
 			} else if t.Bound < 0 {
@@ -510,7 +509,7 @@
 	case OIND:
 		ntop = Erv | Etype
 
-		if !(top&Eaddr != 0) { // The *x in &*x is not an indirect.
+		if top&Eaddr == 0 {
 			ntop |= Eindir
 		}
 		ntop |= top & Ecomplit
@@ -527,7 +526,7 @@
 			goto ret
 		}
 
-		if !(Isptr[t.Etype] != 0) {
+		if Isptr[t.Etype] == 0 {
 			if top&(Erv|Etop) != 0 {
 				Yyerror("invalid indirect of %v", Nconv(n.Left, obj.FmtLong))
 				goto error
@@ -593,7 +592,7 @@
 		if t == nil {
 			goto error
 		}
-		if !(okfor[n.Op][t.Etype] != 0) {
+		if okfor[n.Op][t.Etype] == 0 {
 			Yyerror("invalid operation: %v %v", Oconv(int(n.Op), 0), Tconv(t, 0))
 			goto error
 		}
@@ -671,8 +670,8 @@
 		r = n.Right
 
 		if n.Left.Op == OTYPE {
-			if !(looktypedot(n, t, 0) != 0) {
-				if looktypedot(n, t, 1) != 0 {
+			if !looktypedot(n, t, 0) {
+				if looktypedot(n, t, 1) {
 					Yyerror("%v undefined (cannot refer to unexported method %v)", Nconv(n, 0), Sconv(n.Right.Sym, 0))
 				} else {
 					Yyerror("%v undefined (type %v has no method %v)", Nconv(n, 0), Tconv(t, 0), Sconv(n.Right.Sym, 0))
@@ -709,8 +708,8 @@
 			goto error
 		}
 
-		if !(lookdot(n, t, 0) != 0) {
-			if lookdot(n, t, 1) != 0 {
+		if !lookdot(n, t, 0) {
+			if lookdot(n, t, 1) {
 				Yyerror("%v undefined (cannot refer to unexported field or method %v)", Nconv(n, 0), Sconv(n.Right.Sym, 0))
 			} else {
 				Yyerror("%v undefined (type %v has no field or method %v)", Nconv(n, 0), Tconv(n.Left.Type, 0), Sconv(n.Right.Sym, 0))
@@ -743,7 +742,7 @@
 		if t == nil {
 			goto error
 		}
-		if !(Isinter(t) != 0) {
+		if !Isinter(t) {
 			Yyerror("invalid type assertion: %v (non-interface type %v on left)", Nconv(n, 0), Tconv(t, 0))
 			goto error
 		}
@@ -758,7 +757,7 @@
 		}
 
 		if n.Type != nil && n.Type.Etype != TINTER {
-			if !(implements(n.Type, t, &missing, &have, &ptr) != 0) {
+			if !implements(n.Type, t, &missing, &have, &ptr) {
 				if have != nil && have.Sym == missing.Sym {
 					Yyerror("impossible type assertion:\n\t%v does not implement %v (wrong type for %v method)\n"+"\t\thave %v%v\n\t\twant %v%v", Tconv(n.Type, 0), Tconv(t, 0), Sconv(missing.Sym, 0), Sconv(have.Sym, 0), Tconv(have.Type, obj.FmtShort|obj.FmtByte), Sconv(missing.Sym, 0), Tconv(missing.Type, obj.FmtShort|obj.FmtByte))
 				} else if ptr != 0 {
@@ -801,25 +800,25 @@
 			}
 			why = "string"
 			if t.Etype == TARRAY {
-				if Isfixedarray(t) != 0 {
+				if Isfixedarray(t) {
 					why = "array"
 				} else {
 					why = "slice"
 				}
 			}
 
-			if n.Right.Type != nil && !(Isint[n.Right.Type.Etype] != 0) {
+			if n.Right.Type != nil && Isint[n.Right.Type.Etype] == 0 {
 				Yyerror("non-integer %s index %v", why, Nconv(n.Right, 0))
 				break
 			}
 
-			if Isconst(n.Right, CTINT) != 0 {
+			if Isconst(n.Right, CTINT) {
 				x = Mpgetfix(n.Right.Val.U.Xval)
 				if x < 0 {
 					Yyerror("invalid %s index %v (index must be non-negative)", why, Nconv(n.Right, 0))
-				} else if Isfixedarray(t) != 0 && t.Bound > 0 && x >= t.Bound {
+				} else if Isfixedarray(t) && t.Bound > 0 && x >= t.Bound {
 					Yyerror("invalid array index %v (out of bounds for %d-element array)", Nconv(n.Right, 0), t.Bound)
-				} else if Isconst(n.Left, CTSTR) != 0 && x >= int64(len(n.Left.Val.U.Sval.S)) {
+				} else if Isconst(n.Left, CTSTR) && x >= int64(len(n.Left.Val.U.Sval.S)) {
 					Yyerror("invalid string index %v (out of bounds for %d-byte string)", Nconv(n.Right, 0), len(n.Left.Val.U.Sval.S))
 				} else if Mpcmpfixfix(n.Right.Val.U.Xval, Maxintval[TINT]) > 0 {
 					Yyerror("invalid %s index %v (index too large)", why, Nconv(n.Right, 0))
@@ -852,7 +851,7 @@
 			goto error
 		}
 
-		if !(t.Chan&Crecv != 0) {
+		if t.Chan&Crecv == 0 {
 			Yyerror("invalid operation: %v (receive from send-only type %v)", Nconv(n, 0), Tconv(t, 0))
 			goto error
 		}
@@ -875,7 +874,7 @@
 			goto error
 		}
 
-		if !(t.Chan&Csend != 0) {
+		if t.Chan&Csend == 0 {
 			Yyerror("invalid operation: %v (send to receive-only type %v)", Nconv(n, 0), Tconv(t, 0))
 			goto error
 		}
@@ -902,8 +901,8 @@
 		indexlit(&n.Right.Left)
 		indexlit(&n.Right.Right)
 		l = n.Left
-		if Isfixedarray(l.Type) != 0 {
-			if !(islvalue(n.Left) != 0) {
+		if Isfixedarray(l.Type) {
+			if !islvalue(n.Left) {
 				Yyerror("invalid operation %v (slice of unaddressable value)", Nconv(n, 0))
 				goto error
 			}
@@ -919,17 +918,17 @@
 			goto error
 		}
 		tp = nil
-		if Istype(t, TSTRING) != 0 {
+		if Istype(t, TSTRING) {
 			n.Type = t
 			n.Op = OSLICESTR
-		} else if Isptr[t.Etype] != 0 && Isfixedarray(t.Type) != 0 {
+		} else if Isptr[t.Etype] != 0 && Isfixedarray(t.Type) {
 			tp = t.Type
 			n.Type = typ(TARRAY)
 			n.Type.Type = tp.Type
 			n.Type.Bound = -1
 			dowidth(n.Type)
 			n.Op = OSLICEARR
-		} else if Isslice(t) != 0 {
+		} else if Isslice(t) {
 			n.Type = t
 		} else {
 			Yyerror("cannot slice %v (type %v)", Nconv(l, 0), Tconv(t, 0))
@@ -960,8 +959,8 @@
 		indexlit(&n.Right.Right.Left)
 		indexlit(&n.Right.Right.Right)
 		l = n.Left
-		if Isfixedarray(l.Type) != 0 {
-			if !(islvalue(n.Left) != 0) {
+		if Isfixedarray(l.Type) {
+			if !islvalue(n.Left) {
 				Yyerror("invalid operation %v (slice of unaddressable value)", Nconv(n, 0))
 				goto error
 			}
@@ -977,19 +976,19 @@
 			goto error
 		}
 		tp = nil
-		if Istype(t, TSTRING) != 0 {
+		if Istype(t, TSTRING) {
 			Yyerror("invalid operation %v (3-index slice of string)", Nconv(n, 0))
 			goto error
 		}
 
-		if Isptr[t.Etype] != 0 && Isfixedarray(t.Type) != 0 {
+		if Isptr[t.Etype] != 0 && Isfixedarray(t.Type) {
 			tp = t.Type
 			n.Type = typ(TARRAY)
 			n.Type.Type = tp.Type
 			n.Type.Bound = -1
 			dowidth(n.Type)
 			n.Op = OSLICE3ARR
-		} else if Isslice(t) != 0 {
+		} else if Isslice(t) {
 			n.Type = t
 		} else {
 			Yyerror("cannot slice %v (type %v)", Nconv(l, 0), Tconv(t, 0))
@@ -1050,7 +1049,7 @@
 		l = n.Left
 		if l.Op == OTYPE {
 			if n.Isddd != 0 || l.Type.Bound == -100 {
-				if !(l.Type.Broke != 0) {
+				if l.Type.Broke == 0 {
 					Yyerror("invalid use of ... in type conversion", l)
 				}
 				n.Diag = 1
@@ -1070,7 +1069,7 @@
 			goto doconv
 		}
 
-		if count(n.List) == 1 && !(n.Isddd != 0) {
+		if count(n.List) == 1 && n.Isddd == 0 {
 			typecheck(&n.List.N, Erv|Efnstruct)
 		} else {
 			typechecklist(n.List, Erv)
@@ -1127,7 +1126,7 @@
 		}
 
 		// multiple return
-		if !(top&(Efnstruct|Etop) != 0) {
+		if top&(Efnstruct|Etop) == 0 {
 			Yyerror("multiple-value %v() in single-value context", Nconv(l, 0))
 			goto ret
 		}
@@ -1153,21 +1152,21 @@
 		}
 		switch n.Op {
 		case OCAP:
-			if !(okforcap[t.Etype] != 0) {
+			if okforcap[t.Etype] == 0 {
 				goto badcall1
 			}
 
 		case OLEN:
-			if !(okforlen[t.Etype] != 0) {
+			if okforlen[t.Etype] == 0 {
 				goto badcall1
 			}
 
 		case OREAL,
 			OIMAG:
-			if !(Iscomplex[t.Etype] != 0) {
+			if Iscomplex[t.Etype] == 0 {
 				goto badcall1
 			}
-			if Isconst(l, CTCPLX) != 0 {
+			if Isconst(l, CTCPLX) {
 				r = n
 				if n.Op == OREAL {
 					n = nodfltconst(&l.Val.U.Cval.Real)
@@ -1184,7 +1183,7 @@
 		// might be constant
 		switch t.Etype {
 		case TSTRING:
-			if Isconst(l, CTSTR) != 0 {
+			if Isconst(l, CTSTR) {
 				r = Nod(OXXX, nil, nil)
 				Nodconst(r, Types[TINT], int64(len(l.Val.U.Sval.S)))
 				r.Orig = n
@@ -1195,7 +1194,7 @@
 			if t.Bound < 0 { // slice
 				break
 			}
-			if callrecv(l) != 0 { // has call or receive
+			if callrecv(l) { // has call or receive
 				break
 			}
 			r = Nod(OXXX, nil, nil)
@@ -1289,7 +1288,7 @@
 			goto error
 		}
 
-		if !(t.Chan&Csend != 0) {
+		if t.Chan&Csend == 0 {
 			Yyerror("invalid operation: %v (cannot close receive-only channel)", Nconv(n, 0))
 			goto error
 		}
@@ -1334,7 +1333,7 @@
 			goto error
 		}
 
-		if count(args) == 1 && !(n.Isddd != 0) {
+		if count(args) == 1 && n.Isddd == 0 {
 			typecheck(&args.N, Erv|Efnstruct)
 		} else {
 			typechecklist(args, Erv)
@@ -1346,16 +1345,16 @@
 		}
 
 		// Unpack multiple-return result before type-checking.
-		if Istype(t, TSTRUCT) != 0 && t.Funarg != 0 {
+		if Istype(t, TSTRUCT) && t.Funarg != 0 {
 			t = t.Type
-			if Istype(t, TFIELD) != 0 {
+			if Istype(t, TFIELD) {
 				t = t.Type
 			}
 		}
 
 		n.Type = t
-		if !(Isslice(t) != 0) {
-			if Isconst(args.N, CTNIL) != 0 {
+		if !Isslice(t) {
+			if Isconst(args.N, CTNIL) {
 				Yyerror("first argument to append must be typed slice; have untyped nil", t)
 				goto error
 			}
@@ -1375,7 +1374,7 @@
 				goto error
 			}
 
-			if Istype(t.Type, TUINT8) != 0 && Istype(args.Next.N.Type, TSTRING) != 0 {
+			if Istype(t.Type, TUINT8) && Istype(args.Next.N.Type, TSTRING) {
 				defaultlit(&args.Next.N, Types[TSTRING])
 				goto ret
 			}
@@ -1422,7 +1421,7 @@
 		}
 
 		// copy([]byte, string)
-		if Isslice(n.Left.Type) != 0 && n.Right.Type.Etype == TSTRING {
+		if Isslice(n.Left.Type) && n.Right.Type.Etype == TSTRING {
 			if Eqtype(n.Left.Type.Type, bytetype) {
 				goto ret
 			}
@@ -1430,10 +1429,10 @@
 			goto error
 		}
 
-		if !(Isslice(n.Left.Type) != 0) || !(Isslice(n.Right.Type) != 0) {
-			if !(Isslice(n.Left.Type) != 0) && !(Isslice(n.Right.Type) != 0) {
+		if !Isslice(n.Left.Type) || !Isslice(n.Right.Type) {
+			if !Isslice(n.Left.Type) && !Isslice(n.Right.Type) {
 				Yyerror("arguments to copy must be slices; have %v, %v", Tconv(n.Left.Type, obj.FmtLong), Tconv(n.Right.Type, obj.FmtLong))
-			} else if !(Isslice(n.Left.Type) != 0) {
+			} else if !Isslice(n.Left.Type) {
 				Yyerror("first argument to copy should be slice; have %v", Tconv(n.Left.Type, obj.FmtLong))
 			} else {
 				Yyerror("second argument to copy should be slice or string; have %v", Tconv(n.Right.Type, obj.FmtLong))
@@ -1474,7 +1473,7 @@
 			goto error
 
 		case TARRAY:
-			if !(Isslice(t) != 0) {
+			if !Isslice(t) {
 				Yyerror("cannot make type %v", Tconv(t, 0))
 				goto error
 			}
@@ -1502,7 +1501,7 @@
 			if et != 0 {
 				goto error
 			}
-			if Isconst(l, CTINT) != 0 && r != nil && Isconst(r, CTINT) != 0 && Mpcmpfixfix(l.Val.U.Xval, r.Val.U.Xval) > 0 {
+			if Isconst(l, CTINT) && r != nil && Isconst(r, CTINT) && Mpcmpfixfix(l.Val.U.Xval, r.Val.U.Xval) > 0 {
 				Yyerror("len larger than cap in make(%v)", Tconv(t, 0))
 				goto error
 			}
@@ -1587,7 +1586,7 @@
 		typechecklist(n.List, Erv|Eindir) // Eindir: address does not escape
 		for args = n.List; args != nil; args = args.Next {
 			// Special case for print: int constant is int64, not int.
-			if Isconst(args.N, CTINT) != 0 {
+			if Isconst(args.N, CTINT) {
 				defaultlit(&args.N, Types[TINT64])
 			} else {
 				defaultlit(&args.N, nil)
@@ -1646,7 +1645,7 @@
 		if t == nil {
 			goto error
 		}
-		if !(Isslice(t) != 0) && t.Etype != TSTRING {
+		if !Isslice(t) && t.Etype != TSTRING {
 			Fatal("OSPTR of %v", Tconv(t, 0))
 		}
 		if t.Etype == TSTRING {
@@ -1708,7 +1707,7 @@
 	case ODEFER:
 		ok |= Etop
 		typecheck(&n.Left, Etop|Erv)
-		if !(n.Left.Diag != 0) {
+		if n.Left.Diag == 0 {
 			checkdefergo(n)
 		}
 		goto ret
@@ -1809,7 +1808,7 @@
 	case ODCLTYPE:
 		ok |= Etop
 		typecheck(&n.Left, Etype)
-		if !(incannedimport != 0) {
+		if incannedimport == 0 {
 			checkwidth(n.Left.Type)
 		}
 		goto ret
@@ -1850,7 +1849,7 @@
 		if r.Type.Etype != TBLANK {
 			aop = assignop(l.Type, r.Type, nil)
 			if aop != 0 {
-				if Isinter(r.Type) != 0 && !(Isinter(l.Type) != 0) && algtype1(l.Type, nil) == ANOEQ {
+				if Isinter(r.Type) && !Isinter(l.Type) && algtype1(l.Type, nil) == ANOEQ {
 					Yyerror("invalid operation: %v (operator %v not defined on %s)", Nconv(n, 0), Oconv(int(op), 0), typekind(l.Type))
 					goto error
 				}
@@ -1871,7 +1870,7 @@
 		if l.Type.Etype != TBLANK {
 			aop = assignop(r.Type, l.Type, nil)
 			if aop != 0 {
-				if Isinter(l.Type) != 0 && !(Isinter(r.Type) != 0) && algtype1(r.Type, nil) == ANOEQ {
+				if Isinter(l.Type) && !Isinter(r.Type) && algtype1(r.Type, nil) == ANOEQ {
 					Yyerror("invalid operation: %v (operator %v not defined on %s)", Nconv(n, 0), Oconv(int(op), 0), typekind(r.Type))
 					goto error
 				}
@@ -1905,29 +1904,29 @@
 		}
 	}
 
-	if !(okfor[op][et] != 0) {
+	if okfor[op][et] == 0 {
 		Yyerror("invalid operation: %v (operator %v not defined on %s)", Nconv(n, 0), Oconv(int(op), 0), typekind(t))
 		goto error
 	}
 
 	// okfor allows any array == array, map == map, func == func.
 	// restrict to slice/map/func == nil and nil == slice/map/func.
-	if Isfixedarray(l.Type) != 0 && algtype1(l.Type, nil) == ANOEQ {
+	if Isfixedarray(l.Type) && algtype1(l.Type, nil) == ANOEQ {
 		Yyerror("invalid operation: %v (%v cannot be compared)", Nconv(n, 0), Tconv(l.Type, 0))
 		goto error
 	}
 
-	if Isslice(l.Type) != 0 && !(isnil(l) != 0) && !(isnil(r) != 0) {
+	if Isslice(l.Type) && !isnil(l) && !isnil(r) {
 		Yyerror("invalid operation: %v (slice can only be compared to nil)", Nconv(n, 0))
 		goto error
 	}
 
-	if l.Type.Etype == TMAP && !(isnil(l) != 0) && !(isnil(r) != 0) {
+	if l.Type.Etype == TMAP && !isnil(l) && !isnil(r) {
 		Yyerror("invalid operation: %v (map can only be compared to nil)", Nconv(n, 0))
 		goto error
 	}
 
-	if l.Type.Etype == TFUNC && !(isnil(l) != 0) && !(isnil(r) != 0) {
+	if l.Type.Etype == TFUNC && !isnil(l) && !isnil(r) {
 		Yyerror("invalid operation: %v (func can only be compared to nil)", Nconv(n, 0))
 		goto error
 	}
@@ -1997,7 +1996,7 @@
 		}
 	}
 
-	if (op == ODIV || op == OMOD) && Isconst(r, CTINT) != 0 {
+	if (op == ODIV || op == OMOD) && Isconst(r, CTINT) {
 		if mpcmpfixc(r.Val.U.Xval, 0) == 0 {
 			Yyerror("division by zero")
 			goto error
@@ -2011,13 +2010,13 @@
 	defaultlit(&r, Types[TUINT])
 	n.Right = r
 	t = r.Type
-	if !(Isint[t.Etype] != 0) || Issigned[t.Etype] != 0 {
+	if Isint[t.Etype] == 0 || Issigned[t.Etype] != 0 {
 		Yyerror("invalid operation: %v (shift count type %v, must be unsigned integer)", Nconv(n, 0), Tconv(r.Type, 0))
 		goto error
 	}
 
 	t = l.Type
-	if t != nil && t.Etype != TIDEAL && !(Isint[t.Etype] != 0) {
+	if t != nil && t.Etype != TIDEAL && Isint[t.Etype] == 0 {
 		Yyerror("invalid operation: %v (shift of type %v)", Nconv(n, 0), Tconv(t, 0))
 		goto error
 	}
@@ -2032,14 +2031,14 @@
 	ok |= Erv
 	saveorignode(n)
 	typecheck(&n.Left, Erv|top&(Eindir|Eiota))
-	convlit1(&n.Left, n.Type, 1)
+	convlit1(&n.Left, n.Type, true)
 	t = n.Left.Type
 	if t == nil || n.Type == nil {
 		goto error
 	}
 	n.Op = uint8(convertop(t, n.Type, &why))
 	if (n.Op) == 0 {
-		if !(n.Diag != 0) && !(n.Type.Broke != 0) {
+		if n.Diag == 0 && n.Type.Broke == 0 {
 			Yyerror("cannot convert %v to type %v%s", Nconv(n.Left, obj.FmtLong), Tconv(n.Type, 0), why)
 			n.Diag = 1
 		}
@@ -2073,7 +2072,7 @@
 
 ret:
 	t = n.Type
-	if t != nil && !(t.Funarg != 0) && n.Op != OTYPE {
+	if t != nil && t.Funarg == 0 && n.Op != OTYPE {
 		switch t.Etype {
 		case TFUNC, // might have TANY; wait until its called
 			TANY,
@@ -2088,12 +2087,12 @@
 		}
 	}
 
-	if safemode != 0 && !(incannedimport != 0) && !(importpkg != nil) && !(compiling_wrappers != 0) && t != nil && t.Etype == TUNSAFEPTR {
+	if safemode != 0 && incannedimport == 0 && importpkg == nil && compiling_wrappers == 0 && t != nil && t.Etype == TUNSAFEPTR {
 		Yyerror("cannot use unsafe.Pointer")
 	}
 
 	evconst(n)
-	if n.Op == OTYPE && !(top&Etype != 0) {
+	if n.Op == OTYPE && top&Etype == 0 {
 		Yyerror("type %v is not an expression", Tconv(n.Type, 0))
 		goto error
 	}
@@ -2104,12 +2103,12 @@
 	}
 
 	// TODO(rsc): simplify
-	if (top&(Ecall|Erv|Etype) != 0) && !(top&Etop != 0) && !(ok&(Erv|Etype|Ecall) != 0) {
+	if (top&(Ecall|Erv|Etype) != 0) && top&Etop == 0 && ok&(Erv|Etype|Ecall) == 0 {
 		Yyerror("%v used as value", Nconv(n, 0))
 		goto error
 	}
 
-	if (top&Etop != 0) && !(top&(Ecall|Erv|Etype) != 0) && !(ok&Etop != 0) {
+	if (top&Etop != 0) && top&(Ecall|Erv|Etype) == 0 && ok&Etop == 0 {
 		if n.Diag == 0 {
 			Yyerror("%v evaluated but not used", Nconv(n, 0))
 			n.Diag = 1
@@ -2142,7 +2141,7 @@
 	if t == nil {
 		return -1
 	}
-	if !(Isint[t.Etype] != 0) {
+	if Isint[t.Etype] == 0 {
 		Yyerror("invalid slice index %v (type %v)", Nconv(r, 0), Tconv(t, 0))
 		return -1
 	}
@@ -2154,7 +2153,7 @@
 		} else if tp != nil && tp.Bound > 0 && Mpgetfix(r.Val.U.Xval) > tp.Bound {
 			Yyerror("invalid slice index %v (out of bounds for %d-element array)", Nconv(r, 0), tp.Bound)
 			return -1
-		} else if Isconst(l, CTSTR) != 0 && Mpgetfix(r.Val.U.Xval) > int64(len(l.Val.U.Sval.S)) {
+		} else if Isconst(l, CTSTR) && Mpgetfix(r.Val.U.Xval) > int64(len(l.Val.U.Sval.S)) {
 			Yyerror("invalid slice index %v (out of bounds for %d-byte string)", Nconv(r, 0), len(l.Val.U.Sval.S))
 			return -1
 		} else if Mpcmpfixfix(r.Val.U.Xval, Maxintval[TINT]) > 0 {
@@ -2222,7 +2221,7 @@
 		return
 	}
 
-	if !(n.Diag != 0) {
+	if n.Diag == 0 {
 		// The syntax made sure it was a call, so this must be
 		// a conversion.
 		n.Diag = 1
@@ -2239,14 +2238,14 @@
 	n = *nn
 
 	t = n.Type
-	if t == nil || !(Isptr[t.Etype] != 0) {
+	if t == nil || Isptr[t.Etype] == 0 {
 		return
 	}
 	t = t.Type
 	if t == nil {
 		return
 	}
-	if !(Isfixedarray(t) != 0) {
+	if !Isfixedarray(t) {
 		return
 	}
 	n = Nod(OIND, n, nil)
@@ -2335,7 +2334,7 @@
 	return r
 }
 
-func looktypedot(n *Node, t *Type, dostrcmp int) int {
+func looktypedot(n *Node, t *Type, dostrcmp int) bool {
 	var f1 *Type
 	var f2 *Type
 	var s *Sym
@@ -2345,14 +2344,14 @@
 	if t.Etype == TINTER {
 		f1 = lookdot1(n, s, t, t.Type, dostrcmp)
 		if f1 == nil {
-			return 0
+			return false
 		}
 
 		n.Right = methodname(n.Right, t)
 		n.Xoffset = f1.Width
 		n.Type = f1.Type
 		n.Op = ODOTINTER
-		return 1
+		return true
 	}
 
 	// Find the base type: methtype will fail if t
@@ -2360,26 +2359,26 @@
 	f2 = methtype(t, 0)
 
 	if f2 == nil {
-		return 0
+		return false
 	}
 
 	expandmeth(f2)
 	f2 = lookdot1(n, s, f2, f2.Xmethod, dostrcmp)
 	if f2 == nil {
-		return 0
+		return false
 	}
 
 	// disallow T.m if m requires *T receiver
-	if Isptr[getthisx(f2.Type).Type.Type.Etype] != 0 && !(Isptr[t.Etype] != 0) && f2.Embedded != 2 && !(isifacemethod(f2.Type) != 0) {
+	if Isptr[getthisx(f2.Type).Type.Type.Etype] != 0 && Isptr[t.Etype] == 0 && f2.Embedded != 2 && !isifacemethod(f2.Type) {
 		Yyerror("invalid method expression %v (needs pointer receiver: (*%v).%v)", Nconv(n, 0), Tconv(t, 0), Sconv(f2.Sym, obj.FmtShort))
-		return 0
+		return false
 	}
 
 	n.Right = methodname(n.Right, t)
 	n.Xoffset = f2.Width
 	n.Type = f2.Type
 	n.Op = ODOTMETH
-	return 1
+	return true
 }
 
 func derefall(t *Type) *Type {
@@ -2389,7 +2388,7 @@
 	return t
 }
 
-func lookdot(n *Node, t *Type, dostrcmp int) int {
+func lookdot(n *Node, t *Type, dostrcmp int) bool {
 	var f1 *Type
 	var f2 *Type
 	var tt *Type
@@ -2434,7 +2433,7 @@
 			n.Op = ODOTINTER
 		}
 
-		return 1
+		return true
 	}
 
 	if f2 != nil {
@@ -2475,31 +2474,31 @@
 		//		print("lookdot found [%p] %T\n", f2->type, f2->type);
 		n.Op = ODOTMETH
 
-		return 1
+		return true
 	}
 
-	return 0
+	return false
 }
 
-func nokeys(l *NodeList) int {
+func nokeys(l *NodeList) bool {
 	for ; l != nil; l = l.Next {
 		if l.N.Op == OKEY {
-			return 0
+			return false
 		}
 	}
-	return 1
+	return true
 }
 
-func hasddd(t *Type) int {
+func hasddd(t *Type) bool {
 	var tl *Type
 
 	for tl = t.Type; tl != nil; tl = tl.Down {
 		if tl.Isddd != 0 {
-			return 1
+			return true
 		}
 	}
 
-	return 0
+	return false
 }
 
 func downcount(t *Type) int {
@@ -2538,7 +2537,7 @@
 		n = nl.N
 		if n.Type != nil {
 			if n.Type.Etype == TSTRUCT && n.Type.Funarg != 0 {
-				if !(hasddd(tstruct) != 0) {
+				if !hasddd(tstruct) {
 					n1 = downcount(tstruct)
 					n2 = downcount(n.Type)
 					if n2 > n1 {
@@ -2589,7 +2588,7 @@
 
 	n1 = downcount(tstruct)
 	n2 = count(nl)
-	if !(hasddd(tstruct) != 0) {
+	if !hasddd(tstruct) {
 		if n2 > n1 {
 			goto toomany
 		}
@@ -2597,7 +2596,7 @@
 			goto notenough
 		}
 	} else {
-		if !(isddd != 0) {
+		if isddd == 0 {
 			if n2 < n1-1 {
 				goto notenough
 			}
@@ -2667,7 +2666,7 @@
 	return
 
 notenough:
-	if n == nil || !(n.Diag != 0) {
+	if n == nil || n.Diag == 0 {
 		if call != nil {
 			Yyerror("not enough arguments in call to %v", Nconv(call, 0))
 		} else {
@@ -2809,15 +2808,15 @@
 	hash[h] = n
 }
 
-func prime(h uint32, sr uint32) int {
+func prime(h uint32, sr uint32) bool {
 	var n uint32
 
 	for n = 3; n <= sr; n += 2 {
 		if h%n == 0 {
-			return 0
+			return false
 		}
 	}
-	return 1
+	return true
 }
 
 func inithash(n *Node, autohash []*Node) []*Node {
@@ -2855,7 +2854,7 @@
 	}
 
 	// check for primeality
-	for !(prime(h, sr) != 0) {
+	for !prime(h, sr) {
 		h += 2
 	}
 
@@ -2863,12 +2862,12 @@
 	return make([]*Node, h)
 }
 
-func iscomptype(t *Type) int {
+func iscomptype(t *Type) bool {
 	switch t.Etype {
 	case TARRAY,
 		TSTRUCT,
 		TMAP:
-		return 1
+		return true
 
 	case TPTR32,
 		TPTR64:
@@ -2876,15 +2875,15 @@
 		case TARRAY,
 			TSTRUCT,
 			TMAP:
-			return 1
+			return true
 		}
 	}
 
-	return 0
+	return false
 }
 
 func pushtype(n *Node, t *Type) {
-	if n == nil || n.Op != OCOMPLIT || !(iscomptype(t) != 0) {
+	if n == nil || n.Op != OCOMPLIT || !iscomptype(t) {
 		return
 	}
 
@@ -2946,13 +2945,13 @@
 	if Isptr[t.Etype] != 0 {
 		// For better or worse, we don't allow pointers as the composite literal type,
 		// except when using the &T syntax, which sets implicit on the OIND.
-		if !(n.Right.Implicit != 0) {
+		if n.Right.Implicit == 0 {
 			Yyerror("invalid pointer type %v for composite literal (use &%v instead)", Tconv(t, 0), Tconv(t.Type, 0))
 			goto error
 		}
 
 		// Also, the underlying type must be a struct, map, slice, or array.
-		if !(iscomptype(t) != 0) {
+		if !iscomptype(t) {
 			Yyerror("invalid pointer type %v for composite literal", Tconv(t, 0))
 			goto error
 		}
@@ -2983,7 +2982,7 @@
 			typecheck(&l.Left, Erv)
 			evconst(l.Left)
 			i = nonnegconst(l.Left)
-			if i < 0 && !(l.Left.Diag != 0) {
+			if i < 0 && l.Left.Diag == 0 {
 				Yyerror("array index must be non-negative integer constant")
 				l.Left.Diag = 1
 				i = -(1 << 30) // stay negative for a while
@@ -3047,7 +3046,7 @@
 
 	case TSTRUCT:
 		bad = 0
-		if n.List != nil && nokeys(n.List) != 0 {
+		if n.List != nil && nokeys(n.List) {
 			// simple list of variables
 			f = t.Type
 
@@ -3057,7 +3056,7 @@
 				if f == nil {
 					tmp12 := bad
 					bad++
-					if !(tmp12 != 0) {
+					if tmp12 == 0 {
 						Yyerror("too many values in struct initializer")
 					}
 					continue
@@ -3090,7 +3089,7 @@
 				if l.Op != OKEY {
 					tmp13 := bad
 					bad++
-					if !(tmp13 != 0) {
+					if tmp13 == 0 {
 						Yyerror("mixture of field:value and value initializers")
 					}
 					typecheck(&ll.N, Erv)
@@ -3164,14 +3163,14 @@
 /*
  * lvalue etc
  */
-func islvalue(n *Node) int {
+func islvalue(n *Node) bool {
 	switch n.Op {
 	case OINDEX:
-		if Isfixedarray(n.Left.Type) != 0 {
+		if Isfixedarray(n.Left.Type) {
 			return islvalue(n.Left)
 		}
 		if n.Left.Type != nil && n.Left.Type.Etype == TSTRING {
-			return 0
+			return false
 		}
 		fallthrough
 
@@ -3180,23 +3179,23 @@
 		ODOTPTR,
 		OCLOSUREVAR,
 		OPARAM:
-		return 1
+		return true
 
 	case ODOT:
 		return islvalue(n.Left)
 
 	case ONAME:
 		if n.Class == PFUNC {
-			return 0
+			return false
 		}
-		return 1
+		return true
 	}
 
-	return 0
+	return false
 }
 
 func checklvalue(n *Node, verb string) {
-	if !(islvalue(n) != 0) {
+	if !islvalue(n) {
 		Yyerror("cannot %s %v", verb, Nconv(n, 0))
 	}
 }
@@ -3221,7 +3220,7 @@
 		}
 	}
 
-	if islvalue(n) != 0 {
+	if islvalue(n) {
 		return
 	}
 	if n.Op == OINDEXMAP {
@@ -3245,28 +3244,28 @@
 
 // Check whether l and r are the same side effect-free expression,
 // so that it is safe to reuse one instead of computing both.
-func samesafeexpr(l *Node, r *Node) int {
+func samesafeexpr(l *Node, r *Node) bool {
 	if l.Op != r.Op || !Eqtype(l.Type, r.Type) {
-		return 0
+		return false
 	}
 
 	switch l.Op {
 	case ONAME,
 		OCLOSUREVAR:
-		return bool2int(l == r)
+		return l == r
 
 	case ODOT,
 		ODOTPTR:
-		return bool2int(l.Right != nil && r.Right != nil && l.Right.Sym == r.Right.Sym && samesafeexpr(l.Left, r.Left) != 0)
+		return l.Right != nil && r.Right != nil && l.Right.Sym == r.Right.Sym && samesafeexpr(l.Left, r.Left)
 
 	case OIND:
 		return samesafeexpr(l.Left, r.Left)
 
 	case OINDEX:
-		return bool2int(samesafeexpr(l.Left, r.Left) != 0 && samesafeexpr(l.Right, r.Right) != 0)
+		return samesafeexpr(l.Left, r.Left) && samesafeexpr(l.Right, r.Right)
 	}
 
-	return 0
+	return false
 }
 
 /*
@@ -3313,14 +3312,14 @@
 	// Recognize slices being updated in place, for better code generation later.
 	// Don't rewrite if using race detector, to avoid needing to teach race detector
 	// about this optimization.
-	if n.Left != nil && n.Left.Op != OINDEXMAP && n.Right != nil && !(flag_race != 0) {
+	if n.Left != nil && n.Left.Op != OINDEXMAP && n.Right != nil && flag_race == 0 {
 		switch n.Right.Op {
 		// For x = x[0:y], x can be updated in place, without touching pointer.
 		// TODO(rsc): Reenable once it is actually updated in place without touching the pointer.
 		case OSLICE,
 			OSLICE3,
 			OSLICESTR:
-			if false && samesafeexpr(n.Left, n.Right.Left) != 0 && (n.Right.Right.Left == nil || iszero(n.Right.Right.Left) != 0) {
+			if false && samesafeexpr(n.Left, n.Right.Left) && (n.Right.Right.Left == nil || iszero(n.Right.Right.Left)) {
 				n.Right.Reslice = 1
 			}
 
@@ -3329,7 +3328,7 @@
 		// can take care of updating the pointer, and only in that case.
 		// TODO(rsc): Reenable once the emitted code does update the pointer.
 		case OAPPEND:
-			if false && n.Right.List != nil && samesafeexpr(n.Left, n.Right.List.N) != 0 {
+			if false && n.Right.List != nil && samesafeexpr(n.Left, n.Right.List.N) {
 				n.Right.Reslice = 1
 			}
 		}
@@ -3664,7 +3663,7 @@
 	if ntypecheckdeftype == 1 {
 		for {
 			l = methodqueue
-			if !(l != nil) {
+			if l == nil {
 				break
 			}
 			methodqueue = nil
@@ -3704,7 +3703,7 @@
 	setlineno(n)
 
 	if n.Op == ONONAME {
-		if !(n.Diag != 0) {
+		if n.Diag == 0 {
 			n.Diag = 1
 			if n.Lineno != 0 {
 				lineno = n.Lineno
@@ -3746,7 +3745,6 @@
 	switch n.Op {
 	default:
 		Fatal("typecheckdef %v", Oconv(int(n.Op), 0))
-		fallthrough
 
 		// not really syms
 	case OGOTO,
@@ -3773,13 +3771,13 @@
 		}
 
 		typecheck(&e, Erv|Eiota)
-		if Isconst(e, CTNIL) != 0 {
+		if Isconst(e, CTNIL) {
 			Yyerror("const initializer cannot be nil")
 			goto ret
 		}
 
-		if e.Type != nil && e.Op != OLITERAL || !(isgoconst(e) != 0) {
-			if !(e.Diag != 0) {
+		if e.Type != nil && e.Op != OLITERAL || !isgoconst(e) {
+			if e.Diag == 0 {
 				Yyerror("const initializer %v is not a constant", Nconv(e, 0))
 				e.Diag = 1
 			}
@@ -3789,12 +3787,12 @@
 
 		t = n.Type
 		if t != nil {
-			if !(okforconst[t.Etype] != 0) {
+			if okforconst[t.Etype] == 0 {
 				Yyerror("invalid constant type %v", Tconv(t, 0))
 				goto ret
 			}
 
-			if !(isideal(e.Type) != 0) && !Eqtype(t, e.Type) {
+			if !isideal(e.Type) && !Eqtype(t, e.Type) {
 				Yyerror("cannot use %v as type %v in const initializer", Nconv(e, obj.FmtLong), Tconv(t, 0))
 				goto ret
 			}
@@ -3867,7 +3865,7 @@
 	}
 
 ret:
-	if n.Op != OLITERAL && n.Type != nil && isideal(n.Type) != 0 {
+	if n.Op != OLITERAL && n.Type != nil && isideal(n.Type) {
 		Fatal("got %v for %v", Tconv(n.Type, 0), Nconv(n, 0))
 	}
 	if typecheckdefstack.N != n {
@@ -3910,7 +3908,7 @@
 		}
 	}
 
-	if !(Isint[n.Type.Etype] != 0) && n.Type.Etype != TIDEAL {
+	if Isint[n.Type.Etype] == 0 && n.Type.Etype != TIDEAL {
 		Yyerror("non-integer %s argument in make(%v) - %v", arg, Tconv(t, 0), Tconv(n.Type, 0))
 		return -1
 	}
@@ -3991,12 +3989,12 @@
 	}
 }
 
-func isterminating(l *NodeList, top int) int {
+func isterminating(l *NodeList, top int) bool {
 	var def int
 	var n *Node
 
 	if l == nil {
-		return 0
+		return false
 	}
 	if top != 0 {
 		for l.Next != nil && l.N.Op != OLABEL {
@@ -4011,7 +4009,7 @@
 	n = l.N
 
 	if n == nil {
-		return 0
+		return false
 	}
 
 	switch n.Op {
@@ -4028,48 +4026,48 @@
 		ORETJMP,
 		OPANIC,
 		OXFALL:
-		return 1
+		return true
 
 	case OFOR:
 		if n.Ntest != nil {
-			return 0
+			return false
 		}
 		if n.Hasbreak != 0 {
-			return 0
+			return false
 		}
-		return 1
+		return true
 
 	case OIF:
-		return bool2int(isterminating(n.Nbody, 0) != 0 && isterminating(n.Nelse, 0) != 0)
+		return isterminating(n.Nbody, 0) && isterminating(n.Nelse, 0)
 
 	case OSWITCH,
 		OTYPESW,
 		OSELECT:
 		if n.Hasbreak != 0 {
-			return 0
+			return false
 		}
 		def = 0
 		for l = n.List; l != nil; l = l.Next {
-			if !(isterminating(l.N.Nbody, 0) != 0) {
-				return 0
+			if !isterminating(l.N.Nbody, 0) {
+				return false
 			}
 			if l.N.List == nil { // default
 				def = 1
 			}
 		}
 
-		if n.Op != OSELECT && !(def != 0) {
-			return 0
+		if n.Op != OSELECT && def == 0 {
+			return false
 		}
-		return 1
+		return true
 	}
 
-	return 0
+	return false
 }
 
 func checkreturn(fn *Node) {
 	if fn.Type.Outtuple != 0 && fn.Nbody != nil {
-		if !(isterminating(fn.Nbody, 1) != 0) {
+		if !isterminating(fn.Nbody, 1) {
 			yyerrorl(int(fn.Endlineno), "missing return at end of function")
 		}
 	}
diff --git a/src/cmd/internal/gc/unsafe.go b/src/cmd/internal/gc/unsafe.go
index e50ea19..3970468 100644
--- a/src/cmd/internal/gc/unsafe.go
+++ b/src/cmd/internal/gc/unsafe.go
@@ -161,18 +161,18 @@
 	return n
 }
 
-func isunsafebuiltin(n *Node) int {
+func isunsafebuiltin(n *Node) bool {
 	if n == nil || n.Op != ONAME || n.Sym == nil || n.Sym.Pkg != unsafepkg {
-		return 0
+		return false
 	}
 	if n.Sym.Name == "Sizeof" {
-		return 1
+		return true
 	}
 	if n.Sym.Name == "Offsetof" {
-		return 1
+		return true
 	}
 	if n.Sym.Name == "Alignof" {
-		return 1
+		return true
 	}
-	return 0
+	return false
 }
diff --git a/src/cmd/internal/gc/walk.go b/src/cmd/internal/gc/walk.go
index 37299ca..dfb965e 100644
--- a/src/cmd/internal/gc/walk.go
+++ b/src/cmd/internal/gc/walk.go
@@ -86,13 +86,13 @@
 	}
 }
 
-func samelist(a *NodeList, b *NodeList) int {
+func samelist(a *NodeList, b *NodeList) bool {
 	for ; a != nil && b != nil; (func() { a = a.Next; b = b.Next })() {
 		if a.N != b.N {
-			return 0
+			return false
 		}
 	}
-	return bool2int(a == b)
+	return a == b
 }
 
 func paramoutheap(fn *Node) int {
@@ -314,7 +314,7 @@
 				}
 			}
 
-			if samelist(rl, n.List) != 0 {
+			if samelist(rl, n.List) {
 				// special return in disguise
 				n.List = nil
 
@@ -520,7 +520,7 @@
 		if Isptr[t.Etype] != 0 {
 			t = t.Type
 		}
-		if Isfixedarray(t) != 0 {
+		if Isfixedarray(t) {
 			safeexpr(n.Left, init)
 			Nodconst(n, n.Type, t.Bound)
 			n.Typecheck = 1
@@ -533,8 +533,8 @@
 		walkexpr(&n.Left, init)
 		walkexpr(&n.Right, init)
 		t = n.Left.Type
-		n.Bounded = uint8(bounded(n.Right, 8*t.Width))
-		if Debug['m'] != 0 && n.Etype != 0 && !(Isconst(n.Right, CTINT) != 0) {
+		n.Bounded = bounded(n.Right, 8*t.Width)
+		if Debug['m'] != 0 && n.Etype != 0 && !Isconst(n.Right, CTINT) {
 			Warn("shift bounds check elided")
 		}
 		goto ret
@@ -620,7 +620,7 @@
 		goto ret
 
 	case ONAME:
-		if !(n.Class&PHEAP != 0) && n.Class != PPARAMREF {
+		if n.Class&PHEAP == 0 && n.Class != PPARAMREF {
 			n.Addable = 1
 		}
 		goto ret
@@ -696,11 +696,11 @@
 		walkexpr(&n.Left, init)
 		n.Left = safeexpr(n.Left, init)
 
-		if oaslit(n, init) != 0 {
+		if oaslit(n, init) {
 			goto ret
 		}
 
-		if n.Right == nil || iszero(n.Right) != 0 && !(flag_race != 0) {
+		if n.Right == nil || iszero(n.Right) && flag_race == 0 {
 			goto ret
 		}
 
@@ -719,12 +719,12 @@
 			from = "I"
 
 			to = "T"
-			if isnilinter(r.Left.Type) != 0 {
+			if isnilinter(r.Left.Type) {
 				from = "E"
 			}
-			if isnilinter(r.Type) != 0 {
+			if isnilinter(r.Type) {
 				to = "E"
-			} else if Isinter(r.Type) != 0 {
+			} else if Isinter(r.Type) {
 				to = "I"
 			}
 
@@ -920,12 +920,12 @@
 		from = "I"
 
 		to = "T"
-		if isnilinter(r.Left.Type) != 0 {
+		if isnilinter(r.Left.Type) {
 			from = "E"
 		}
-		if isnilinter(r.Type) != 0 {
+		if isnilinter(r.Type) {
 			to = "E"
-		} else if Isinter(r.Type) != 0 {
+		} else if Isinter(r.Type) {
 			to = "I"
 		}
 		buf = fmt.Sprintf("assert%s2%s2", from, to)
@@ -947,13 +947,12 @@
 	case ODOTTYPE,
 		ODOTTYPE2:
 		Fatal("walkexpr ODOTTYPE") // should see inside OAS or OAS2 only
-		fallthrough
 
 	case OCONVIFACE:
 		walkexpr(&n.Left, init)
 
 		// Optimize convT2E as a two-word copy when T is pointer-shaped.
-		if isnilinter(n.Type) != 0 && isdirectiface(n.Left.Type) != 0 {
+		if isnilinter(n.Type) && isdirectiface(n.Left.Type) {
 			l = Nod(OEFACE, typename(n.Left.Type), n.Left)
 			l.Type = n.Type
 			l.Typecheck = n.Typecheck
@@ -967,25 +966,25 @@
 		from = "T"
 
 		to = "I"
-		if isnilinter(n.Left.Type) != 0 {
+		if isnilinter(n.Left.Type) {
 			from = "E"
-		} else if Isinter(n.Left.Type) != 0 {
+		} else if Isinter(n.Left.Type) {
 			from = "I"
 		}
-		if isnilinter(n.Type) != 0 {
+		if isnilinter(n.Type) {
 			to = "E"
 		}
 		buf = fmt.Sprintf("conv%s2%s", from, to)
 
 		fn = syslook(buf, 1)
 		ll = nil
-		if !(Isinter(n.Left.Type) != 0) {
+		if !Isinter(n.Left.Type) {
 			ll = list(ll, typename(n.Left.Type))
 		}
-		if !(isnilinter(n.Type) != 0) {
+		if !isnilinter(n.Type) {
 			ll = list(ll, typename(n.Type))
 		}
-		if !(Isinter(n.Left.Type) != 0) && !(isnilinter(n.Type) != 0) {
+		if !Isinter(n.Left.Type) && !isnilinter(n.Type) {
 			sym = Pkglookup(fmt.Sprintf("%v.%v", Tconv(n.Left.Type, obj.FmtLeft), Tconv(n.Type, obj.FmtLeft)), itabpkg)
 			if sym.Def == nil {
 				l = Nod(ONAME, nil, nil)
@@ -1002,7 +1001,7 @@
 			l.Addable = 1
 			ll = list(ll, l)
 
-			if isdirectiface(n.Left.Type) != 0 {
+			if isdirectiface(n.Left.Type) {
 				/* For pointer types, we can make a special form of optimization
 				 *
 				 * These statements are put onto the expression init list:
@@ -1040,7 +1039,7 @@
 			}
 		}
 
-		if Isinter(n.Left.Type) != 0 {
+		if Isinter(n.Left.Type) {
 			ll = list(ll, n.Left)
 		} else {
 			// regular types are passed by reference to avoid C vararg calls
@@ -1049,7 +1048,7 @@
 			// with a non-interface, especially in a switch on interface value
 			// with non-interface cases, is not visible to orderstmt, so we
 			// have to fall back on allocating a temp here.
-			if islvalue(n.Left) != 0 {
+			if islvalue(n.Left) {
 				ll = list(ll, Nod(OADDR, n.Left, nil))
 			} else {
 				ll = list(ll, Nod(OADDR, copyexpr(n.Left, n.Left.Type, init), nil))
@@ -1174,28 +1173,28 @@
 
 		// if range of type cannot exceed static array bound,
 		// disable bounds check.
-		if n.Bounded != 0 {
+		if n.Bounded {
 			goto ret
 		}
 		t = n.Left.Type
 		if t != nil && Isptr[t.Etype] != 0 {
 			t = t.Type
 		}
-		if Isfixedarray(t) != 0 {
-			n.Bounded = uint8(bounded(r, t.Bound))
-			if Debug['m'] != 0 && n.Bounded != 0 && !(Isconst(n.Right, CTINT) != 0) {
+		if Isfixedarray(t) {
+			n.Bounded = bounded(r, t.Bound)
+			if Debug['m'] != 0 && n.Bounded && !Isconst(n.Right, CTINT) {
 				Warn("index bounds check elided")
 			}
-			if Smallintconst(n.Right) != 0 && !(n.Bounded != 0) {
+			if Smallintconst(n.Right) && !n.Bounded {
 				Yyerror("index out of bounds")
 			}
-		} else if Isconst(n.Left, CTSTR) != 0 {
-			n.Bounded = uint8(bounded(r, int64(len(n.Left.Val.U.Sval.S))))
-			if Debug['m'] != 0 && n.Bounded != 0 && !(Isconst(n.Right, CTINT) != 0) {
+		} else if Isconst(n.Left, CTSTR) {
+			n.Bounded = bounded(r, int64(len(n.Left.Val.U.Sval.S)))
+			if Debug['m'] != 0 && n.Bounded && !Isconst(n.Right, CTINT) {
 				Warn("index bounds check elided")
 			}
-			if Smallintconst(n.Right) != 0 {
-				if !(n.Bounded != 0) {
+			if Smallintconst(n.Right) {
+				if !n.Bounded {
 					Yyerror("index out of bounds")
 				} else {
 					// replace "abc"[1] with 'b'.
@@ -1209,7 +1208,7 @@
 			}
 		}
 
-		if Isconst(n.Right, CTINT) != 0 {
+		if Isconst(n.Right, CTINT) {
 			if Mpcmpfixfix(n.Right.Val.U.Xval, &mpzero) < 0 || Mpcmpfixfix(n.Right.Val.U.Xval, Maxintval[TINT]) > 0 {
 				Yyerror("index out of bounds")
 			}
@@ -1264,7 +1263,6 @@
 
 	case ORECV:
 		Fatal("walkexpr ORECV") // should see inside OAS only
-		fallthrough
 
 	case OSLICE:
 		if n.Right != nil && n.Right.Left == nil && n.Right.Right == nil { // noop
@@ -1344,7 +1342,7 @@
 	// comparing the lengths instead will yield the same result
 	// without the function call.
 	case OCMPSTR:
-		if (Isconst(n.Left, CTSTR) != 0 && len(n.Left.Val.U.Sval.S) == 0) || (Isconst(n.Right, CTSTR) != 0 && len(n.Right.Val.U.Sval.S) == 0) {
+		if (Isconst(n.Left, CTSTR) && len(n.Left.Val.U.Sval.S) == 0) || (Isconst(n.Right, CTSTR) && len(n.Right.Val.U.Sval.S) == 0) {
 			r = Nod(int(n.Etype), Nod(OLEN, n.Left, nil), Nod(OLEN, n.Right, nil))
 			typecheck(&r, Erv)
 			walkexpr(&r, init)
@@ -1354,7 +1352,7 @@
 		}
 
 		// s + "badgerbadgerbadger" == "badgerbadgerbadger"
-		if (n.Etype == OEQ || n.Etype == ONE) && Isconst(n.Right, CTSTR) != 0 && n.Left.Op == OADDSTR && count(n.Left.List) == 2 && Isconst(n.Left.List.Next.N, CTSTR) != 0 && cmpslit(n.Right, n.Left.List.Next.N) == 0 {
+		if (n.Etype == OEQ || n.Etype == ONE) && Isconst(n.Right, CTSTR) && n.Left.Op == OADDSTR && count(n.Left.List) == 2 && Isconst(n.Left.List.Next.N, CTSTR) && cmpslit(n.Right, n.Left.List.Next.N) == 0 {
 			r = Nod(int(n.Etype), Nod(OLEN, n.Left.List.N, nil), Nodintconst(0))
 			typecheck(&r, Erv)
 			walkexpr(&r, init)
@@ -1470,7 +1468,7 @@
 			l = r
 		}
 		t = n.Type
-		if n.Esc == EscNone && Smallintconst(l) != 0 && Smallintconst(r) != 0 && (t.Type.Width == 0 || Mpgetfix(r.Val.U.Xval) < (1<<16)/t.Type.Width) {
+		if n.Esc == EscNone && Smallintconst(l) && Smallintconst(r) && (t.Type.Width == 0 || Mpgetfix(r.Val.U.Xval) < (1<<16)/t.Type.Width) {
 			// var arr [r]T
 			// n = arr[:l]
 			t = aindex(r, t.Type) // [r]T
@@ -1579,7 +1577,7 @@
 		if !Eqtype(n.Left.Type, n.Right.Type) {
 			Fatal("ifaceeq %v %v %v", Oconv(int(n.Op), 0), Tconv(n.Left.Type, 0), Tconv(n.Right.Type, 0))
 		}
-		if isnilinter(n.Left.Type) != 0 {
+		if isnilinter(n.Left.Type) {
 			fn = syslook("efaceeq", 1)
 		} else {
 			fn = syslook("ifaceeq", 1)
@@ -1715,20 +1713,20 @@
  * evaluating the lv or a function call
  * in the conversion of the types
  */
-func fncall(l *Node, rt *Type) int {
+func fncall(l *Node, rt *Type) bool {
 	var r Node
 
 	if l.Ullman >= UINF || l.Op == OINDEXMAP {
-		return 1
+		return true
 	}
 	r = Node{}
-	if needwritebarrier(l, &r) != 0 {
-		return 1
+	if needwritebarrier(l, &r) {
+		return true
 	}
 	if Eqtype(l.Type, rt) {
-		return 0
+		return false
 	}
-	return 1
+	return true
 }
 
 func ascompatet(op int, nl *NodeList, nr **Type, fp int, init **NodeList) *NodeList {
@@ -1765,7 +1763,7 @@
 		// any lv that causes a fn call must be
 		// deferred until all the return arguments
 		// have been pulled from the output arguments
-		if fncall(l, r.Type) != 0 {
+		if fncall(l, r.Type) {
 			tmp = temp(r.Type)
 			typecheck(&tmp, Erv)
 			a = Nod(OAS, l, tmp)
@@ -1916,7 +1914,7 @@
 	// f(g()) where g has multiple return values
 	if r != nil && lr.Next == nil && r.Type.Etype == TSTRUCT && r.Type.Funarg != 0 {
 		// optimization - can do block copy
-		if eqtypenoname(r.Type, *nl) != 0 {
+		if eqtypenoname(r.Type, *nl) {
 			a = nodarg(*nl, fp)
 			r = Nod(OCONVNOP, r, nil)
 			r.Type = a.Type
@@ -2013,7 +2011,7 @@
 	var all *NodeList
 	var on *Node
 	var t *Type
-	var notfirst int
+	var notfirst bool
 	var et int
 	var op int
 	var calls *NodeList
@@ -2021,7 +2019,7 @@
 	op = int(nn.Op)
 	all = nn.List
 	calls = nil
-	notfirst = 0
+	notfirst = false
 
 	// Hoist all the argument evaluation up before the lock.
 	walkexprlistcheap(all, init)
@@ -2029,11 +2027,11 @@
 	calls = list(calls, mkcall("printlock", nil, init))
 
 	for l = all; l != nil; l = l.Next {
-		if notfirst != 0 {
+		if notfirst {
 			calls = list(calls, mkcall("printsp", nil, init))
 		}
 
-		notfirst = bool2int(op == OPRINTN)
+		notfirst = op == OPRINTN
 
 		n = l.N
 		if n.Op == OLITERAL {
@@ -2060,8 +2058,8 @@
 
 		t = n.Type
 		et = int(n.Type.Etype)
-		if Isinter(n.Type) != 0 {
-			if isnilinter(n.Type) != 0 {
+		if Isinter(n.Type) {
+			if isnilinter(n.Type) {
 				on = syslook("printeface", 1)
 			} else {
 				on = syslook("printiface", 1)
@@ -2070,7 +2068,7 @@
 		} else if Isptr[et] != 0 || et == TCHAN || et == TMAP || et == TFUNC || et == TUNSAFEPTR {
 			on = syslook("printpointer", 1)
 			argtype(on, n.Type) // any-1
-		} else if Isslice(n.Type) != 0 {
+		} else if Isslice(n.Type) {
 			on = syslook("printslice", 1)
 			argtype(on, n.Type) // any-1
 		} else if Isint[et] != 0 {
@@ -2139,7 +2137,7 @@
 	return mkcall1(fn, Ptrto(t), nil, typename(t))
 }
 
-func isstack(n *Node) int {
+func isstack(n *Node) bool {
 	var defn *Node
 
 	n = outervalue(n)
@@ -2156,81 +2154,81 @@
 	switch n.Op {
 	// OINDREG only ends up in walk if it's indirect of SP.
 	case OINDREG:
-		return 1
+		return true
 
 	case ONAME:
 		switch n.Class {
 		case PAUTO,
 			PPARAM,
 			PPARAMOUT:
-			return 1
+			return true
 		}
 	}
 
-	return 0
+	return false
 }
 
-func isglobal(n *Node) int {
+func isglobal(n *Node) bool {
 	n = outervalue(n)
 
 	switch n.Op {
 	case ONAME:
 		switch n.Class {
 		case PEXTERN:
-			return 1
+			return true
 		}
 	}
 
-	return 0
+	return false
 }
 
 // Do we need a write barrier for the assignment l = r?
-func needwritebarrier(l *Node, r *Node) int {
-	if !(use_writebarrier != 0) {
-		return 0
+func needwritebarrier(l *Node, r *Node) bool {
+	if use_writebarrier == 0 {
+		return false
 	}
 
 	if l == nil || isblank(l) {
-		return 0
+		return false
 	}
 
 	// No write barrier for write of non-pointers.
 	dowidth(l.Type)
 
 	if !haspointers(l.Type) {
-		return 0
+		return false
 	}
 
 	// No write barrier for write to stack.
-	if isstack(l) != 0 {
-		return 0
+	if isstack(l) {
+		return false
 	}
 
 	// No write barrier for implicit or explicit zeroing.
-	if r == nil || iszero(r) != 0 {
-		return 0
+	if r == nil || iszero(r) {
+		return false
 	}
 
 	// No write barrier for initialization to constant.
 	if r.Op == OLITERAL {
-		return 0
+		return false
 	}
 
 	// No write barrier for storing static (read-only) data.
 	if r.Op == ONAME && strings.HasPrefix(r.Sym.Name, "statictmp_") {
-		return 0
+		return false
 	}
 
 	// No write barrier for storing address of stack values,
 	// which are guaranteed only to be written to the stack.
-	if r.Op == OADDR && isstack(r.Left) != 0 {
-		return 0
+	if r.Op == OADDR && isstack(r.Left) {
+		return false
 	}
 
 	// No write barrier for storing address of global, which
 	// is live no matter what.
-	if r.Op == OADDR && isglobal(r.Left) != 0 {
-		return 0
+	if r.Op == OADDR && isglobal(r.Left) {
+		return false
 	}
 
 	// No write barrier for reslice: x = x[0:y] or x = append(x, ...).
@@ -2253,11 +2251,11 @@
 			Dump("bad reslice-r", r)
 		}
 
-		return 0
+		return false
 	}
 
 	// Otherwise, be conservative and use write barrier.
-	return 1
+	return true
 }
 
 // TODO(rsc): Perhaps componentgen should run before this.
@@ -2271,7 +2269,7 @@
 	var x int64
 	var name string
 
-	if n.Left != nil && n.Right != nil && needwritebarrier(n.Left, n.Right) != 0 {
+	if n.Left != nil && n.Right != nil && needwritebarrier(n.Left, n.Right) {
 		if Curfn != nil && Curfn.Nowritebarrier {
 			Yyerror("write barrier prohibited")
 		}
@@ -2282,9 +2280,9 @@
 			n = mkcall1(writebarrierfn("writebarrierptr", t, n.Right.Type), nil, init, l, n.Right)
 		} else if t.Etype == TSTRING {
 			n = mkcall1(writebarrierfn("writebarrierstring", t, n.Right.Type), nil, init, l, n.Right)
-		} else if Isslice(t) != 0 {
+		} else if Isslice(t) {
 			n = mkcall1(writebarrierfn("writebarrierslice", t, n.Right.Type), nil, init, l, n.Right)
-		} else if Isinter(t) != 0 {
+		} else if Isinter(t) {
 			n = mkcall1(writebarrierfn("writebarrieriface", t, n.Right.Type), nil, init, l, n.Right)
 		} else if t.Width <= int64(4*Widthptr) {
 			x = 0
@@ -2303,7 +2301,6 @@
 			switch t.Width / int64(Widthptr) {
 			default:
 				Fatal("found writebarrierfat for %d-byte object of type %v", int(t.Width), Tconv(t, 0))
-				fallthrough
 
 			case 2:
 				name = fmt.Sprintf("writebarrierfat%d%d", bvget(applywritebarrier_bv, PtrBit), bvget(applywritebarrier_bv, obj.BitsPerPointer+PtrBit))
@@ -2488,7 +2485,7 @@
 				continue
 			}
 
-			if l.Op == OINDEX && Isfixedarray(l.Left.Type) != 0 {
+			if l.Op == OINDEX && Isfixedarray(l.Left.Type) {
 				reorder3save(&l.Right, all, list, &early)
 				l = l.Left
 				continue
@@ -2500,7 +2497,6 @@
 		switch l.Op {
 		default:
 			Fatal("reorder3 unexpected lvalue %v", Oconv(int(l.Op), obj.FmtSharp))
-			fallthrough
 
 		case ONAME:
 			break
@@ -2537,7 +2533,7 @@
 	var q *Node
 
 	n = *np
-	if !(aliased(n, all, stop) != 0) {
+	if !aliased(n, all, stop) {
 		return
 	}
 
@@ -2562,7 +2558,7 @@
 			continue
 		}
 
-		if n.Op == OINDEX && Isfixedarray(n.Left.Type) != 0 {
+		if n.Op == OINDEX && Isfixedarray(n.Left.Type) {
 			n = n.Left
 			continue
 		}
@@ -2577,14 +2573,14 @@
  * Is it possible that the computation of n might be
  * affected by writes in as up to but not including stop?
  */
-func aliased(n *Node, all *NodeList, stop *NodeList) int {
+func aliased(n *Node, all *NodeList, stop *NodeList) bool {
 	var memwrite int
 	var varwrite int
 	var a *Node
 	var l *NodeList
 
 	if n == nil {
-		return 0
+		return false
 	}
 
 	// Look for obvious aliasing: a variable being assigned
@@ -2615,9 +2611,9 @@
 				continue
 			}
 
-			if vmatch2(a, n) != 0 {
+			if vmatch2(a, n) {
 				// Direct hit.
-				return 1
+				return true
 			}
 		}
 	}
@@ -2627,20 +2623,20 @@
 	// that are being written.
 
 	// If no computed addresses are affected by the writes, no aliasing.
-	if !(memwrite != 0) && !(varwrite != 0) {
-		return 0
+	if memwrite == 0 && varwrite == 0 {
+		return false
 	}
 
 	// If n does not refer to computed addresses
 	// (that is, if n only refers to variables whose addresses
 	// have not been taken), no aliasing.
-	if varexpr(n) != 0 {
-		return 0
+	if varexpr(n) {
+		return false
 	}
 
 	// Otherwise, both the writes and n refer to computed memory addresses.
 	// Assume that they might conflict.
-	return 1
+	return true
 }
 
 /*
@@ -2648,26 +2644,26 @@
  * whose addresses have not been taken?
  * (and no other memory)
  */
-func varexpr(n *Node) int {
+func varexpr(n *Node) bool {
 	if n == nil {
-		return 1
+		return true
 	}
 
 	switch n.Op {
 	case OLITERAL:
-		return 1
+		return true
 
 	case ONAME:
 		switch n.Class {
 		case PAUTO,
 			PPARAM,
 			PPARAMOUT:
-			if !(n.Addrtaken != 0) {
-				return 1
+			if n.Addrtaken == 0 {
+				return true
 			}
 		}
 
-		return 0
+		return false
 
 	case OADD,
 		OSUB,
@@ -2691,57 +2687,57 @@
 		OCONVNOP,
 		OCONVIFACE,
 		ODOTTYPE:
-		return bool2int(varexpr(n.Left) != 0 && varexpr(n.Right) != 0)
+		return varexpr(n.Left) && varexpr(n.Right)
 	}
 
 	// Be conservative.
-	return 0
+	return false
 }
 
 /*
  * is the name l mentioned in r?
  */
-func vmatch2(l *Node, r *Node) int {
+func vmatch2(l *Node, r *Node) bool {
 	var ll *NodeList
 
 	if r == nil {
-		return 0
+		return false
 	}
 	switch r.Op {
 	// match each right given left
 	case ONAME:
-		return bool2int(l == r)
+		return l == r
 
 	case OLITERAL:
-		return 0
+		return false
 	}
 
-	if vmatch2(l, r.Left) != 0 {
-		return 1
+	if vmatch2(l, r.Left) {
+		return true
 	}
-	if vmatch2(l, r.Right) != 0 {
-		return 1
+	if vmatch2(l, r.Right) {
+		return true
 	}
 	for ll = r.List; ll != nil; ll = ll.Next {
-		if vmatch2(l, ll.N) != 0 {
-			return 1
+		if vmatch2(l, ll.N) {
+			return true
 		}
 	}
-	return 0
+	return false
 }
 
 /*
  * is any name mentioned in l also mentioned in r?
  * called by sinit.c
  */
-func vmatch1(l *Node, r *Node) int {
+func vmatch1(l *Node, r *Node) bool {
 	var ll *NodeList
 
 	/*
 	 * isolate all left sides
 	 */
 	if l == nil || r == nil {
-		return 0
+		return false
 	}
 	switch l.Op {
 	case ONAME:
@@ -2755,28 +2751,28 @@
 		// must be delayed if right has function calls.
 		default:
 			if r.Ullman >= UINF {
-				return 1
+				return true
 			}
 		}
 
 		return vmatch2(l, r)
 
 	case OLITERAL:
-		return 0
+		return false
 	}
 
-	if vmatch1(l.Left, r) != 0 {
-		return 1
+	if vmatch1(l.Left, r) {
+		return true
 	}
-	if vmatch1(l.Right, r) != 0 {
-		return 1
+	if vmatch1(l.Right, r) {
+		return true
 	}
 	for ll = l.List; ll != nil; ll = ll.Next {
-		if vmatch1(ll.N, r) != 0 {
-			return 1
+		if vmatch1(ll.N, r) {
+			return true
 		}
 	}
-	return 0
+	return false
 }
 
 /*
@@ -2807,7 +2803,7 @@
 			nn = list(nn, Nod(OAS, nodarg(t, 1), nil))
 		}
 
-		if v == nil || !(v.Class&PHEAP != 0) {
+		if v == nil || v.Class&PHEAP == 0 {
 			continue
 		}
 
@@ -3131,7 +3127,7 @@
 		// memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T))
 		nptr1 = Nod(OINDEX, s, Nod(OLEN, l1, nil))
 
-		nptr1.Bounded = 1
+		nptr1.Bounded = true
 		nptr1 = Nod(OADDR, nptr1, nil)
 
 		nptr2 = Nod(OSPTR, l2, nil)
@@ -3198,7 +3194,7 @@
 	nsrc = n.List.N
 
 	// Resolve slice type of multi-valued return.
-	if Istype(nsrc.Type, TSTRUCT) != 0 {
+	if Istype(nsrc.Type, TSTRUCT) {
 		nsrc.Type = nsrc.Type.Type.Type
 	}
 	argc = count(n.List) - 1
@@ -3232,7 +3228,7 @@
 
 	for a = n.List.Next; a != nil; a = a.Next {
 		nx = Nod(OINDEX, ns, nn) // s[n] ...
-		nx.Bounded = 1
+		nx.Bounded = true
 		l = list(l, Nod(OAS, nx, a.N)) // s[n] = arg
 		if a.Next != nil {
 			l = list(l, Nod(OAS, nn, Nod(OADD, nn, Nodintconst(1)))) // n = n + 1
@@ -3326,7 +3322,7 @@
 //
 func sliceany(n *Node, init **NodeList) *Node {
 	var bounded int
-	var slice3 int
+	var slice3 bool
 	var src *Node
 	var lb *Node
 	var hb *Node
@@ -3348,8 +3344,8 @@
 	src = n.Left
 
 	lb = n.Right.Left
-	slice3 = bool2int(n.Op == OSLICE3 || n.Op == OSLICE3ARR)
-	if slice3 != 0 {
+	slice3 = n.Op == OSLICE3 || n.Op == OSLICE3ARR
+	if slice3 {
 		hb = n.Right.Right.Left
 		cb = n.Right.Right.Right
 	} else {
@@ -3371,29 +3367,29 @@
 	// static checks if possible
 	bv = 1 << 50
 
-	if Isconst(bound, CTINT) != 0 {
-		if !(Smallintconst(bound) != 0) {
+	if Isconst(bound, CTINT) {
+		if !Smallintconst(bound) {
 			Yyerror("array len too large")
 		} else {
 			bv = Mpgetfix(bound.Val.U.Xval)
 		}
 	}
 
-	if Isconst(cb, CTINT) != 0 {
+	if Isconst(cb, CTINT) {
 		cbv = Mpgetfix(cb.Val.U.Xval)
 		if cbv < 0 || cbv > bv {
 			Yyerror("slice index out of bounds")
 		}
 	}
 
-	if Isconst(hb, CTINT) != 0 {
+	if Isconst(hb, CTINT) {
 		hbv = Mpgetfix(hb.Val.U.Xval)
 		if hbv < 0 || hbv > bv {
 			Yyerror("slice index out of bounds")
 		}
 	}
 
-	if Isconst(lb, CTINT) != 0 {
+	if Isconst(lb, CTINT) {
 		lbv = Mpgetfix(lb.Val.U.Xval)
 		if lbv < 0 || lbv > bv {
 			Yyerror("slice index out of bounds")
@@ -3430,24 +3426,24 @@
 
 	if cb != nil {
 		cb = cheapexpr(conv(cb, bt), init)
-		if !(bounded != 0) {
+		if bounded == 0 {
 			chk0 = Nod(OLT, bound, cb)
 		}
-	} else if slice3 != 0 {
+	} else if slice3 {
 		// When we figure out what this means, implement it.
 		Fatal("slice3 with cb == N") // rejected by parser
 	}
 
 	if hb != nil {
 		hb = cheapexpr(conv(hb, bt), init)
-		if !(bounded != 0) {
+		if bounded == 0 {
 			if cb != nil {
 				chk1 = Nod(OLT, cb, hb)
 			} else {
 				chk1 = Nod(OLT, bound, hb)
 			}
 		}
-	} else if slice3 != 0 {
+	} else if slice3 {
 		// When we figure out what this means, implement it.
 		Fatal("slice3 with hb == N") // rejected by parser
 	} else if n.Op == OSLICEARR {
@@ -3461,7 +3457,7 @@
 
 	if lb != nil {
 		lb = cheapexpr(conv(lb, bt), init)
-		if !(bounded != 0) {
+		if bounded == 0 {
 			chk2 = Nod(OLT, hb, lb)
 		}
 	}
@@ -3501,7 +3497,7 @@
 	n.Right = nil
 
 	n.List = nil
-	if !(slice3 != 0) {
+	if !slice3 {
 		cb = bound
 	}
 	if lb == nil {
@@ -3623,10 +3619,10 @@
 	l = nil
 
 	r = nil
-	if Isinter(n.Left.Type) != 0 && !(Isinter(n.Right.Type) != 0) {
+	if Isinter(n.Left.Type) && !Isinter(n.Right.Type) {
 		l = n.Left
 		r = n.Right
-	} else if !(Isinter(n.Left.Type) != 0) && Isinter(n.Right.Type) != 0 {
+	} else if !Isinter(n.Left.Type) && Isinter(n.Right.Type) {
 		l = n.Right
 		r = n.Left
 	}
@@ -3667,7 +3663,7 @@
 		return
 
 	case TARRAY:
-		if Isslice(t) != 0 {
+		if Isslice(t) {
 			return
 		}
 
@@ -3684,7 +3680,7 @@
 		cmpr = cmpr.Left
 	}
 
-	if !(islvalue(cmpl) != 0) || !(islvalue(cmpr) != 0) {
+	if !islvalue(cmpl) || !islvalue(cmpr) {
 		Fatal("arguments of comparison must be lvalues - %v %v", Nconv(cmpl, 0), Nconv(cmpr, 0))
 	}
 
@@ -3721,7 +3717,7 @@
 		}
 
 		if expr == nil {
-			expr = Nodbool(bool2int(n.Op == OEQ))
+			expr = Nodbool(n.Op == OEQ)
 		}
 		r = expr
 		goto ret
@@ -3745,7 +3741,7 @@
 		}
 
 		if expr == nil {
-			expr = Nodbool(bool2int(n.Op == OEQ))
+			expr = Nodbool(n.Op == OEQ)
 		}
 		r = expr
 		goto ret
@@ -3778,30 +3774,30 @@
 	return
 }
 
-func samecheap(a *Node, b *Node) int {
+func samecheap(a *Node, b *Node) bool {
 	var ar *Node
 	var br *Node
 	for a != nil && b != nil && a.Op == b.Op {
 		switch a.Op {
 		default:
-			return 0
+			return false
 
 		case ONAME:
-			return bool2int(a == b)
+			return a == b
 
 		case ODOT,
 			ODOTPTR:
 			ar = a.Right
 			br = b.Right
 			if ar.Op != ONAME || br.Op != ONAME || ar.Sym != br.Sym {
-				return 0
+				return false
 			}
 
 		case OINDEX:
 			ar = a.Right
 			br = b.Right
-			if !(Isconst(ar, CTINT) != 0) || !(Isconst(br, CTINT) != 0) || Mpcmpfixfix(ar.Val.U.Xval, br.Val.U.Xval) != 0 {
-				return 0
+			if !Isconst(ar, CTINT) || !Isconst(br, CTINT) || Mpcmpfixfix(ar.Val.U.Xval, br.Val.U.Xval) != 0 {
+				return false
 			}
 		}
 
@@ -3809,7 +3805,7 @@
 		b = b.Left
 	}
 
-	return 0
+	return false
 }
 
 func walkrotate(np **Node) {
@@ -3836,14 +3832,14 @@
 	}
 
 	// Want same, side effect-free expression on lhs of both shifts.
-	if !(samecheap(l.Left, r.Left) != 0) {
+	if !samecheap(l.Left, r.Left) {
 		return
 	}
 
 	// Constants adding to width?
 	w = int(l.Type.Width * 8)
 
-	if Smallintconst(l.Right) != 0 && Smallintconst(r.Right) != 0 {
+	if Smallintconst(l.Right) && Smallintconst(r.Right) {
 		sl = int(Mpgetfix(l.Right.Val.U.Xval))
 		if sl >= 0 {
 			sr = int(Mpgetfix(r.Right.Val.U.Xval))
@@ -3889,7 +3885,7 @@
 	var w int
 
 	n = *np
-	if !(Isint[n.Type.Etype] != 0) {
+	if Isint[n.Type.Etype] == 0 {
 		return
 	}
 
@@ -4240,46 +4236,46 @@
 }
 
 // return 1 if integer n must be in range [0, max), 0 otherwise
-func bounded(n *Node, max int64) int {
+func bounded(n *Node, max int64) bool {
 	var v int64
 	var bits int32
 	var sign int
 
-	if n.Type == nil || !(Isint[n.Type.Etype] != 0) {
-		return 0
+	if n.Type == nil || Isint[n.Type.Etype] == 0 {
+		return false
 	}
 
 	sign = int(Issigned[n.Type.Etype])
 	bits = int32(8 * n.Type.Width)
 
-	if Smallintconst(n) != 0 {
+	if Smallintconst(n) {
 		v = Mpgetfix(n.Val.U.Xval)
-		return bool2int(0 <= v && v < max)
+		return 0 <= v && v < max
 	}
 
 	switch n.Op {
 	case OAND:
 		v = -1
-		if Smallintconst(n.Left) != 0 {
+		if Smallintconst(n.Left) {
 			v = Mpgetfix(n.Left.Val.U.Xval)
-		} else if Smallintconst(n.Right) != 0 {
+		} else if Smallintconst(n.Right) {
 			v = Mpgetfix(n.Right.Val.U.Xval)
 		}
 
 		if 0 <= v && v < max {
-			return 1
+			return true
 		}
 
 	case OMOD:
-		if !(sign != 0) && Smallintconst(n.Right) != 0 {
+		if sign == 0 && Smallintconst(n.Right) {
 			v = Mpgetfix(n.Right.Val.U.Xval)
 			if 0 <= v && v <= max {
-				return 1
+				return true
 			}
 		}
 
 	case ODIV:
-		if !(sign != 0) && Smallintconst(n.Right) != 0 {
+		if sign == 0 && Smallintconst(n.Right) {
 			v = Mpgetfix(n.Right.Val.U.Xval)
 			for bits > 0 && v >= 2 {
 				bits--
@@ -4288,34 +4284,33 @@
 		}
 
 	case ORSH:
-		if !(sign != 0) && Smallintconst(n.Right) != 0 {
+		if sign == 0 && Smallintconst(n.Right) {
 			v = Mpgetfix(n.Right.Val.U.Xval)
 			if v > int64(bits) {
-				return 1
+				return true
 			}
 			bits -= int32(v)
 		}
 	}
 
-	if !(sign != 0) && bits <= 62 && 1<<uint(bits) <= max {
-		return 1
+	if sign == 0 && bits <= 62 && 1<<uint(bits) <= max {
+		return true
 	}
 
-	return 0
+	return false
 }
 
 func usefield(n *Node) {
 	var field *Type
 	var l *Type
 
-	if !(obj.Fieldtrack_enabled != 0) {
+	if obj.Fieldtrack_enabled == 0 {
 		return
 	}
 
 	switch n.Op {
 	default:
 		Fatal("usefield %v", Oconv(int(n.Op), 0))
-		fallthrough
 
 	case ODOT,
 		ODOTPTR:
@@ -4352,23 +4347,23 @@
 	Curfn.Paramfld = l
 }
 
-func candiscardlist(l *NodeList) int {
+func candiscardlist(l *NodeList) bool {
 	for ; l != nil; l = l.Next {
-		if !(candiscard(l.N) != 0) {
-			return 0
+		if !candiscard(l.N) {
+			return false
 		}
 	}
-	return 1
+	return true
 }
 
-func candiscard(n *Node) int {
+func candiscard(n *Node) bool {
 	if n == nil {
-		return 1
+		return true
 	}
 
 	switch n.Op {
 	default:
-		return 0
+		return false
 
 		// Discardable as long as the subpieces are.
 	case ONAME,
@@ -4428,32 +4423,32 @@
 		// Discardable as long as we know it's not division by zero.
 	case ODIV,
 		OMOD:
-		if Isconst(n.Right, CTINT) != 0 && mpcmpfixc(n.Right.Val.U.Xval, 0) != 0 {
+		if Isconst(n.Right, CTINT) && mpcmpfixc(n.Right.Val.U.Xval, 0) != 0 {
 			break
 		}
-		if Isconst(n.Right, CTFLT) != 0 && mpcmpfltc(n.Right.Val.U.Fval, 0) != 0 {
+		if Isconst(n.Right, CTFLT) && mpcmpfltc(n.Right.Val.U.Fval, 0) != 0 {
 			break
 		}
-		return 0
+		return false
 
 		// Discardable as long as we know it won't fail because of a bad size.
 	case OMAKECHAN,
 		OMAKEMAP:
-		if Isconst(n.Left, CTINT) != 0 && mpcmpfixc(n.Left.Val.U.Xval, 0) == 0 {
+		if Isconst(n.Left, CTINT) && mpcmpfixc(n.Left.Val.U.Xval, 0) == 0 {
 			break
 		}
-		return 0
+		return false
 
 		// Difficult to tell what sizes are okay.
 	case OMAKESLICE:
-		return 0
+		return false
 	}
 
-	if !(candiscard(n.Left) != 0) || !(candiscard(n.Right) != 0) || !(candiscard(n.Ntest) != 0) || !(candiscard(n.Nincr) != 0) || !(candiscardlist(n.Ninit) != 0) || !(candiscardlist(n.Nbody) != 0) || !(candiscardlist(n.Nelse) != 0) || !(candiscardlist(n.List) != 0) || !(candiscardlist(n.Rlist) != 0) {
-		return 0
+	if !candiscard(n.Left) || !candiscard(n.Right) || !candiscard(n.Ntest) || !candiscard(n.Nincr) || !candiscardlist(n.Ninit) || !candiscardlist(n.Nbody) || !candiscardlist(n.Nelse) || !candiscardlist(n.List) || !candiscardlist(n.Rlist) {
+		return false
 	}
 
-	return 1
+	return true
 }
 
 // rewrite
diff --git a/src/cmd/internal/gc/y.go b/src/cmd/internal/gc/y.go
index 0e9157c..5b79856 100644
--- a/src/cmd/internal/gc/y.go
+++ b/src/cmd/internal/gc/y.go
@@ -851,6 +851,7 @@
 }
 
 type yyParser interface {
+	Parse(yyLexer) int
 	Lookahead() int
 }
 
@@ -862,6 +863,13 @@
 	return p.lookahead()
 }
 
+func yyNewParser() yyParser {
+	p := &yyParserImpl{
+		lookahead: func() int { return -1 },
+	}
+	return p
+}
+
 const yyFlag = -1000
 
 func yyTokname(c int) string {
@@ -919,6 +927,10 @@
 }
 
 func yyParse(yylex yyLexer) int {
+	return yyNewParser().Parse(yylex)
+}
+
+func (yyrcvr *yyParserImpl) Parse(yylex yyLexer) int {
 	var yyn int
 	var yylval yySymType
 	var yyVAL yySymType
@@ -930,19 +942,12 @@
 	yystate := 0
 	yychar := -1
 	yytoken := -1 // yychar translated into internal numbering
-	if lx, ok := yylex.(interface {
-		SetParser(yyParser)
-	}); ok {
-		p := &yyParserImpl{
-			lookahead: func() int { return yychar },
-		}
-		lx.SetParser(p)
-		defer func() {
-			// Make sure we report no lookahead when not parsing.
-			yychar = -1
-			yytoken = -1
-		}()
-	}
+	yyrcvr.lookahead = func() int { return yychar }
+	defer func() {
+		// Make sure we report no lookahead when not parsing.
+		yychar = -1
+		yytoken = -1
+	}()
 	yyp := -1
 	goto yystack
 
diff --git a/src/cmd/internal/obj/ar.go b/src/cmd/internal/obj/ar.go
index 3991e73..7cbeafd 100644
--- a/src/cmd/internal/obj/ar.go
+++ b/src/cmd/internal/obj/ar.go
@@ -35,11 +35,11 @@
 // THE SOFTWARE.
 
 type ar_hdr struct {
-	Name string
-	Date string
-	Uid  string
-	Gid  string
-	Mode string
-	Size string
-	Fmag string
+	name string
+	date string
+	uid  string
+	gid  string
+	mode string
+	size string
+	fmag string
 }
diff --git a/src/cmd/internal/obj/arm/asm5.go b/src/cmd/internal/obj/arm/asm5.go
index aff9a17..8e42d6b 100644
--- a/src/cmd/internal/obj/arm/asm5.go
+++ b/src/cmd/internal/obj/arm/asm5.go
@@ -437,7 +437,7 @@
 			// split it into two instructions:
 			// 	ADD $-100004, R13
 			// 	MOVW R14, 0(R13)
-			q = new(obj.Prog)
+			q = ctxt.NewProg()
 
 			p.Scond &^= C_WBIT
 			*q = *p
@@ -462,14 +462,14 @@
 			p.From = *a
 			p.From.Reg = 0
 			p.From.Type = obj.TYPE_CONST
-			p.To = obj.Zprog.To
+			p.To = obj.Addr{}
 			p.To.Type = obj.TYPE_REG
 			p.To.Reg = REG_R13
 
 			// make q into p but load/store from 0(R13)
 			q.Spadj = 0
 
-			*a2 = obj.Zprog.From
+			*a2 = obj.Addr{}
 			a2.Type = obj.TYPE_MEM
 			a2.Reg = REG_R13
 			a2.Sym = nil
@@ -514,7 +514,7 @@
 				if p.Scond&(C_PBIT|C_WBIT) != 0 {
 					ctxt.Diag("unsupported instruction (.P/.W): %v", p)
 				}
-				q = new(obj.Prog)
+				q = ctxt.NewProg()
 				*q = *p
 				if p.To.Type == obj.TYPE_MEM {
 					a2 = &q.To
@@ -535,12 +535,12 @@
 
 				p.From = *a
 				p.From.Type = obj.TYPE_ADDR
-				p.To = obj.Zprog.To
+				p.To = obj.Addr{}
 				p.To.Type = obj.TYPE_REG
 				p.To.Reg = REG_R11
 
 				// make q into p but load/store from 0(R11)
-				*a2 = obj.Zprog.From
+				*a2 = obj.Addr{}
 
 				a2.Type = obj.TYPE_MEM
 				a2.Reg = REG_R11
@@ -606,7 +606,7 @@
 	p = p.Link
 	for ; p != nil || ctxt.Blitrl != nil; (func() { op = p; p = p.Link })() {
 		if p == nil {
-			if checkpool(ctxt, op, 0) != 0 {
+			if checkpool(ctxt, op, 0) {
 				p = op
 				continue
 			}
@@ -638,7 +638,7 @@
 			if p.As == ACASE {
 				i = int(casesz(ctxt, p))
 			}
-			if checkpool(ctxt, op, i) != 0 {
+			if checkpool(ctxt, op, i) {
 				p = op
 				continue
 			}
@@ -749,7 +749,7 @@
 		}
 
 		cursym.Size = int64(c)
-		if !(bflag != 0) {
+		if bflag == 0 {
 			break
 		}
 	}
@@ -834,16 +834,16 @@
  * drop the pool now, and branch round it.
  * this happens only in extended basic blocks that exceed 4k.
  */
-func checkpool(ctxt *obj.Link, p *obj.Prog, sz int) int {
+func checkpool(ctxt *obj.Link, p *obj.Prog, sz int) bool {
 	if pool.size >= 0xff0 || immaddr(int32((p.Pc+int64(sz)+4)+4+int64(12+pool.size)-int64(pool.start+8))) == 0 {
 		return flushpool(ctxt, p, 1, 0)
 	} else if p.Link == nil {
 		return flushpool(ctxt, p, 2, 0)
 	}
-	return 0
+	return false
 }
 
-func flushpool(ctxt *obj.Link, p *obj.Prog, skip int, force int) int {
+func flushpool(ctxt *obj.Link, p *obj.Prog, skip int, force int) bool {
 	var q *obj.Prog
 
 	if ctxt.Blitrl != nil {
@@ -851,23 +851,21 @@
 			if false && skip == 1 {
 				fmt.Printf("note: flush literal pool at %x: len=%d ref=%x\n", uint64(p.Pc+4), pool.size, pool.start)
 			}
-			q = new(obj.Prog)
+			q = ctxt.NewProg()
 			q.As = AB
 			q.To.Type = obj.TYPE_BRANCH
 			q.Pcond = p.Link
 			q.Link = ctxt.Blitrl
 			q.Lineno = p.Lineno
-			q.Ctxt = p.Ctxt
 			ctxt.Blitrl = q
-		} else if !(force != 0) && (p.Pc+int64(12+pool.size)-int64(pool.start) < 2048) { // 12 take into account the maximum nacl literal pool alignment padding size
-			return 0
+		} else if force == 0 && (p.Pc+int64(12+pool.size)-int64(pool.start) < 2048) { // 12 take into account the maximum nacl literal pool alignment padding size
+			return false
 		}
 		if ctxt.Headtype == obj.Hnacl && pool.size%16 != 0 {
 			// if pool is not multiple of 16 bytes, add an alignment marker
-			q = new(obj.Prog)
+			q = ctxt.NewProg()
 
 			q.As = ADATABUNDLEEND
-			q.Ctxt = p.Ctxt
 			ctxt.Elitrl.Link = q
 			ctxt.Elitrl = q
 		}
@@ -888,10 +886,10 @@
 		pool.size = 0
 		pool.start = 0
 		pool.extra = 0
-		return 1
+		return true
 	}
 
-	return 0
+	return false
 }
 
 func addpool(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) {
@@ -901,9 +899,8 @@
 
 	c = aclass(ctxt, a)
 
-	t = obj.Zprog
+	t.Ctxt = ctxt
 	t.As = AWORD
-	t.Ctxt = p.Ctxt
 
 	switch c {
 	default:
@@ -941,12 +938,9 @@
 
 	if ctxt.Headtype == obj.Hnacl && pool.size%16 == 0 {
 		// start a new data bundle
-		q = new(obj.Prog)
-
-		*q = obj.Zprog
+		q = ctxt.NewProg()
 		q.As = ADATABUNDLE
 		q.Pc = int64(pool.size)
-		q.Ctxt = p.Ctxt
 		pool.size += 4
 		if ctxt.Blitrl == nil {
 			ctxt.Blitrl = q
@@ -958,7 +952,7 @@
 		ctxt.Elitrl = q
 	}
 
-	q = new(obj.Prog)
+	q = ctxt.NewProg()
 	*q = t
 	q.Pc = int64(pool.size)
 
@@ -1740,7 +1734,7 @@
 	case 13: /* op $lcon, [R], R */
 		o1 = omvl(ctxt, p, &p.From, REGTMP)
 
-		if !(o1 != 0) {
+		if o1 == 0 {
 			break
 		}
 		o2 = oprrr(ctxt, int(p.As), int(p.Scond))
@@ -1836,7 +1830,7 @@
 	case 30: /* mov/movb/movbu R,L(R) */
 		o1 = omvl(ctxt, p, &p.To, REGTMP)
 
-		if !(o1 != 0) {
+		if o1 == 0 {
 			break
 		}
 		r = int(p.To.Reg)
@@ -1851,7 +1845,7 @@
 	case 31: /* mov/movbu L(R),R -> lr[b] */
 		o1 = omvl(ctxt, p, &p.From, REGTMP)
 
-		if !(o1 != 0) {
+		if o1 == 0 {
 			break
 		}
 		r = int(p.From.Reg)
@@ -1866,7 +1860,7 @@
 	case 34: /* mov $lacon,R */
 		o1 = omvl(ctxt, p, &p.From, REGTMP)
 
-		if !(o1 != 0) {
+		if o1 == 0 {
 			break
 		}
 
@@ -1984,7 +1978,7 @@
 	case 52: /* floating point store, int32 offset UGLY */
 		o1 = omvl(ctxt, p, &p.To, REGTMP)
 
-		if !(o1 != 0) {
+		if o1 == 0 {
 			break
 		}
 		r = int(p.To.Reg)
@@ -1997,7 +1991,7 @@
 	case 53: /* floating point load, int32 offset UGLY */
 		o1 = omvl(ctxt, p, &p.From, REGTMP)
 
-		if !(o1 != 0) {
+		if o1 == 0 {
 			break
 		}
 		r = int(p.From.Reg)
@@ -2122,7 +2116,7 @@
 	case 64: /* mov/movb/movbu R,addr */
 		o1 = omvl(ctxt, p, &p.To, REGTMP)
 
-		if !(o1 != 0) {
+		if o1 == 0 {
 			break
 		}
 		o2 = osr(ctxt, int(p.As), int(p.From.Reg), 0, REGTMP, int(p.Scond))
@@ -2134,7 +2128,7 @@
 	case 65: /* mov/movbu addr,R */
 		o1 = omvl(ctxt, p, &p.From, REGTMP)
 
-		if !(o1 != 0) {
+		if o1 == 0 {
 			break
 		}
 		o2 = olr(ctxt, 0, REGTMP, int(p.To.Reg), int(p.Scond))
@@ -2149,7 +2143,7 @@
 	case 68: /* floating point store -> ADDR */
 		o1 = omvl(ctxt, p, &p.To, REGTMP)
 
-		if !(o1 != 0) {
+		if o1 == 0 {
 			break
 		}
 		o2 = ofsr(ctxt, int(p.As), int(p.From.Reg), 0, REGTMP, int(p.Scond), p)
@@ -2161,7 +2155,7 @@
 	case 69: /* floating point load <- ADDR */
 		o1 = omvl(ctxt, p, &p.From, REGTMP)
 
-		if !(o1 != 0) {
+		if o1 == 0 {
 			break
 		}
 		o2 = ofsr(ctxt, int(p.As), int(p.To.Reg), 0, (REGTMP&15), int(p.Scond), p) | 1<<20
@@ -2197,7 +2191,7 @@
 	case 72: /* movh/movhu R,L(R) -> strh */
 		o1 = omvl(ctxt, p, &p.To, REGTMP)
 
-		if !(o1 != 0) {
+		if o1 == 0 {
 			break
 		}
 		r = int(p.To.Reg)
@@ -2209,7 +2203,7 @@
 	case 73: /* movb/movh/movhu L(R),R -> ldrsb/ldrsh/ldrh */
 		o1 = omvl(ctxt, p, &p.From, REGTMP)
 
-		if !(o1 != 0) {
+		if o1 == 0 {
 			break
 		}
 		r = int(p.From.Reg)
@@ -2394,7 +2388,7 @@
 	case 93: /* movb/movh/movhu addr,R -> ldrsb/ldrsh/ldrh */
 		o1 = omvl(ctxt, p, &p.From, REGTMP)
 
-		if !(o1 != 0) {
+		if o1 == 0 {
 			break
 		}
 		o2 = olhr(ctxt, 0, REGTMP, int(p.To.Reg), int(p.Scond))
@@ -2411,7 +2405,7 @@
 	case 94: /* movh/movhu R,addr -> strh */
 		o1 = omvl(ctxt, p, &p.To, REGTMP)
 
-		if !(o1 != 0) {
+		if o1 == 0 {
 			break
 		}
 		o2 = oshr(ctxt, int(p.From.Reg), 0, REGTMP, int(p.Scond))
@@ -2725,10 +2719,10 @@
 		ctxt.Diag(".nil on LDR/STR instruction")
 	}
 	o = ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
-	if !(sc&C_PBIT != 0) {
+	if sc&C_PBIT == 0 {
 		o |= 1 << 24
 	}
-	if !(sc&C_UBIT != 0) {
+	if sc&C_UBIT == 0 {
 		o |= 1 << 23
 	}
 	if sc&C_WBIT != 0 {
@@ -2759,7 +2753,7 @@
 		ctxt.Diag(".nil on LDRH/STRH instruction")
 	}
 	o = ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
-	if !(sc&C_PBIT != 0) {
+	if sc&C_PBIT == 0 {
 		o |= 1 << 24
 	}
 	if sc&C_WBIT != 0 {
@@ -2820,7 +2814,7 @@
 		ctxt.Diag(".nil on FLDR/FSTR instruction")
 	}
 	o = ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
-	if !(sc&C_PBIT != 0) {
+	if sc&C_PBIT == 0 {
 		o |= 1 << 24
 	}
 	if sc&C_WBIT != 0 {
@@ -2860,7 +2854,7 @@
 func omvl(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, dr int) uint32 {
 	var v int32
 	var o1 uint32
-	if !(p.Pcond != nil) {
+	if p.Pcond == nil {
 		aclass(ctxt, a)
 		v = immrot(^uint32(ctxt.Instoffset))
 		if v == 0 {
diff --git a/src/cmd/internal/obj/arm/obj5.go b/src/cmd/internal/obj/arm/obj5.go
index 406939e..ce75ab1 100644
--- a/src/cmd/internal/obj/arm/obj5.go
+++ b/src/cmd/internal/obj/arm/obj5.go
@@ -225,7 +225,7 @@
 	cursym.Args = p.To.U.Argsize
 
 	if ctxt.Debugzerostack != 0 {
-		if autoffset != 0 && !(p.From3.Offset&obj.NOSPLIT != 0) {
+		if autoffset != 0 && p.From3.Offset&obj.NOSPLIT == 0 {
 			// MOVW $4(R13), R1
 			p = obj.Appendp(ctxt, p)
 
@@ -370,7 +370,7 @@
 				}
 			}
 
-			if !(autosize != 0) && !(cursym.Text.Mark&LEAF != 0) {
+			if autosize == 0 && cursym.Text.Mark&LEAF == 0 {
 				if ctxt.Debugvlog != 0 {
 					fmt.Fprintf(ctxt.Bso, "save suppressed in: %s\n", cursym.Name)
 					obj.Bflush(ctxt.Bso)
@@ -381,13 +381,13 @@
 
 			if cursym.Text.Mark&LEAF != 0 {
 				cursym.Leaf = 1
-				if !(autosize != 0) {
+				if autosize == 0 {
 					break
 				}
 			}
 
-			if !(p.From3.Offset&obj.NOSPLIT != 0) {
-				p = stacksplit(ctxt, p, autosize, bool2int(!(cursym.Text.From3.Offset&obj.NEEDCTXT != 0))) // emit split check
+			if p.From3.Offset&obj.NOSPLIT == 0 {
+				p = stacksplit(ctxt, p, autosize, cursym.Text.From3.Offset&obj.NEEDCTXT == 0) // emit split check
 			}
 
 			// MOVW.W		R14,$-autosize(SP)
@@ -493,9 +493,9 @@
 		case obj.ARET:
 			obj.Nocache(p)
 			if cursym.Text.Mark&LEAF != 0 {
-				if !(autosize != 0) {
+				if autosize == 0 {
 					p.As = AB
-					p.From = obj.Zprog.From
+					p.From = obj.Addr{}
 					if p.To.Sym != nil { // retjmp
 						p.To.Type = obj.TYPE_BRANCH
 					} else {
@@ -662,8 +662,8 @@
 	}
 }
 
-func isfloatreg(a *obj.Addr) int {
-	return bool2int(a.Type == obj.TYPE_REG && REG_F0 <= a.Reg && a.Reg <= REG_F15)
+func isfloatreg(a *obj.Addr) bool {
+	return a.Type == obj.TYPE_REG && REG_F0 <= a.Reg && a.Reg <= REG_F15
 }
 
 func softfloat(ctxt *obj.Link, cursym *obj.LSym) {
@@ -687,7 +687,7 @@
 	for p = cursym.Text; p != nil; p = p.Link {
 		switch p.As {
 		case AMOVW:
-			if isfloatreg(&p.To) != 0 || isfloatreg(&p.From) != 0 {
+			if isfloatreg(&p.To) || isfloatreg(&p.From) {
 				goto soft
 			}
 			goto notsoft
@@ -721,13 +721,13 @@
 		}
 
 	soft:
-		if !(wasfloat != 0) || (p.Mark&LABEL != 0) {
-			next = new(obj.Prog)
+		if wasfloat == 0 || (p.Mark&LABEL != 0) {
+			next = ctxt.NewProg()
 			*next = *p
 
 			// BL _sfloat(SB)
-			*p = obj.Zprog
-
+			*p = obj.Prog{}
+			p.Ctxt = ctxt
 			p.Link = next
 			p.As = ABL
 			p.To.Type = obj.TYPE_BRANCH
@@ -745,7 +745,7 @@
 	}
 }
 
-func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, noctxt int) *obj.Prog {
+func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, noctxt bool) *obj.Prog {
 	// MOVW			g_stackguard(g), R1
 	p = obj.Appendp(ctxt, p)
 
@@ -856,7 +856,7 @@
 	if ctxt.Cursym.Cfunc != 0 {
 		p.To.Sym = obj.Linklookup(ctxt, "runtime.morestackc", 0)
 	} else {
-		p.To.Sym = ctxt.Symmorestack[noctxt]
+		p.To.Sym = ctxt.Symmorestack[bool2int(noctxt)]
 	}
 
 	// BLS	start
@@ -885,7 +885,7 @@
 
 	ctxt.Cursym = s
 
-	firstp = new(obj.Prog)
+	firstp = ctxt.NewProg()
 	lastp = firstp
 	xfol(ctxt, s.Text, &lastp)
 	lastp.Link = nil
@@ -948,7 +948,7 @@
 		if q != nil && q.As != obj.ATEXT {
 			p.Mark |= FOLL
 			p = q
-			if !(p.Mark&FOLL != 0) {
+			if p.Mark&FOLL == 0 {
 				goto loop
 			}
 		}
@@ -979,9 +979,9 @@
 
 		copy:
 			for {
-				r = new(obj.Prog)
+				r = ctxt.NewProg()
 				*r = *p
-				if !(r.Mark&FOLL != 0) {
+				if r.Mark&FOLL == 0 {
 					fmt.Printf("can't happen 1\n")
 				}
 				r.Mark |= FOLL
@@ -1003,10 +1003,10 @@
 				}
 				r.Pcond = p.Link
 				r.Link = p.Pcond
-				if !(r.Link.Mark&FOLL != 0) {
+				if r.Link.Mark&FOLL == 0 {
 					xfol(ctxt, r.Link, last)
 				}
-				if !(r.Pcond.Mark&FOLL != 0) {
+				if r.Pcond.Mark&FOLL == 0 {
 					fmt.Printf("can't happen 2\n")
 				}
 				return
@@ -1014,13 +1014,12 @@
 		}
 
 		a = AB
-		q = new(obj.Prog)
+		q = ctxt.NewProg()
 		q.As = int16(a)
 		q.Lineno = p.Lineno
 		q.To.Type = obj.TYPE_BRANCH
 		q.To.Offset = p.Pc
 		q.Pcond = p
-		q.Ctxt = p.Ctxt
 		p = q
 	}
 
diff --git a/src/cmd/internal/obj/data.go b/src/cmd/internal/obj/data.go
index 265e609..66995a3 100644
--- a/src/cmd/internal/obj/data.go
+++ b/src/cmd/internal/obj/data.go
@@ -142,7 +142,7 @@
 	return off + wid
 }
 
-func Adduintxx(ctxt *Link, s *LSym, v uint64, wid int) int64 {
+func adduintxx(ctxt *Link, s *LSym, v uint64, wid int) int64 {
 	var off int64
 
 	off = s.Size
@@ -150,23 +150,23 @@
 	return off
 }
 
-func Adduint8(ctxt *Link, s *LSym, v uint8) int64 {
-	return Adduintxx(ctxt, s, uint64(v), 1)
+func adduint8(ctxt *Link, s *LSym, v uint8) int64 {
+	return adduintxx(ctxt, s, uint64(v), 1)
 }
 
-func Adduint16(ctxt *Link, s *LSym, v uint16) int64 {
-	return Adduintxx(ctxt, s, uint64(v), 2)
+func adduint16(ctxt *Link, s *LSym, v uint16) int64 {
+	return adduintxx(ctxt, s, uint64(v), 2)
 }
 
 func Adduint32(ctxt *Link, s *LSym, v uint32) int64 {
-	return Adduintxx(ctxt, s, uint64(v), 4)
+	return adduintxx(ctxt, s, uint64(v), 4)
 }
 
 func Adduint64(ctxt *Link, s *LSym, v uint64) int64 {
-	return Adduintxx(ctxt, s, v, 8)
+	return adduintxx(ctxt, s, v, 8)
 }
 
-func Setuint8(ctxt *Link, s *LSym, r int64, v uint8) int64 {
+func setuint8(ctxt *Link, s *LSym, r int64, v uint8) int64 {
 	return Setuintxx(ctxt, s, r, uint64(v), 1)
 }
 
@@ -174,7 +174,7 @@
 	return Setuintxx(ctxt, s, r, uint64(v), 2)
 }
 
-func Setuint32(ctxt *Link, s *LSym, r int64, v uint32) int64 {
+func setuint32(ctxt *Link, s *LSym, r int64, v uint32) int64 {
 	return Setuintxx(ctxt, s, r, uint64(v), 4)
 }
 
@@ -182,7 +182,7 @@
 	return Setuintxx(ctxt, s, r, v, 8)
 }
 
-func Addaddrplus(ctxt *Link, s *LSym, t *LSym, add int64) int64 {
+func addaddrplus(ctxt *Link, s *LSym, t *LSym, add int64) int64 {
 	var i int64
 	var r *Reloc
 
@@ -222,11 +222,11 @@
 	return i + int64(r.Siz)
 }
 
-func Addaddr(ctxt *Link, s *LSym, t *LSym) int64 {
-	return Addaddrplus(ctxt, s, t, 0)
+func addaddr(ctxt *Link, s *LSym, t *LSym) int64 {
+	return addaddrplus(ctxt, s, t, 0)
 }
 
-func Setaddrplus(ctxt *Link, s *LSym, off int64, t *LSym, add int64) int64 {
+func setaddrplus(ctxt *Link, s *LSym, off int64, t *LSym, add int64) int64 {
 	var r *Reloc
 
 	if s.Type == 0 {
@@ -247,11 +247,11 @@
 	return off + int64(r.Siz)
 }
 
-func Setaddr(ctxt *Link, s *LSym, off int64, t *LSym) int64 {
-	return Setaddrplus(ctxt, s, off, t, 0)
+func setaddr(ctxt *Link, s *LSym, off int64, t *LSym) int64 {
+	return setaddrplus(ctxt, s, off, t, 0)
 }
 
-func Addsize(ctxt *Link, s *LSym, t *LSym) int64 {
+func addsize(ctxt *Link, s *LSym, t *LSym) int64 {
 	var i int64
 	var r *Reloc
 
@@ -270,7 +270,7 @@
 	return i + int64(r.Siz)
 }
 
-func Addaddrplus4(ctxt *Link, s *LSym, t *LSym, add int64) int64 {
+func addaddrplus4(ctxt *Link, s *LSym, t *LSym, add int64) int64 {
 	var i int64
 	var r *Reloc
 
diff --git a/src/cmd/internal/obj/go.go b/src/cmd/internal/obj/go.go
index 3c17634..1bd10fc 100644
--- a/src/cmd/internal/obj/go.go
+++ b/src/cmd/internal/obj/go.go
@@ -17,8 +17,6 @@
 
 var Fieldtrack_enabled int
 
-var Zprog Prog
-
 // Toolchain experiments.
 // These are controlled by the GOEXPERIMENT environment
 // variable recorded when the toolchain is built.
@@ -72,11 +70,11 @@
 
 func Nopout(p *Prog) {
 	p.As = ANOP
-	p.Scond = Zprog.Scond
-	p.From = Zprog.From
-	p.From3 = Zprog.From3
-	p.Reg = Zprog.Reg
-	p.To = Zprog.To
+	p.Scond = 0
+	p.From = Addr{}
+	p.From3 = Addr{}
+	p.Reg = 0
+	p.To = Addr{}
 }
 
 func Nocache(p *Prog) {
diff --git a/src/cmd/internal/obj/i386/asm8.go b/src/cmd/internal/obj/i386/asm8.go
index b176d50..97d4012 100644
--- a/src/cmd/internal/obj/i386/asm8.go
+++ b/src/cmd/internal/obj/i386/asm8.go
@@ -1853,7 +1853,7 @@
 			ctxt.Diag("span must be looping")
 			log.Fatalf("bad code")
 		}
-		if !(loop != 0) {
+		if loop == 0 {
 			break
 		}
 	}
@@ -3355,7 +3355,7 @@
 	case Zlit:
 		for ; ; z++ {
 			op = int(o.op[z])
-			if !(op != 0) {
+			if op == 0 {
 				break
 			}
 			ctxt.Andptr[0] = byte(op)
@@ -3365,7 +3365,7 @@
 	case Zlitm_r:
 		for ; ; z++ {
 			op = int(o.op[z])
-			if !(op != 0) {
+			if op == 0 {
 				break
 			}
 			ctxt.Andptr[0] = byte(op)
@@ -3400,7 +3400,7 @@
 			tmp2 := z
 			z++
 			op = int(o.op[tmp2])
-			if !(op != 0) {
+			if op == 0 {
 				break
 			}
 			ctxt.Andptr[0] = byte(op)
diff --git a/src/cmd/internal/obj/i386/obj8.go b/src/cmd/internal/obj/i386/obj8.go
index f833a9a..ffb7524 100644
--- a/src/cmd/internal/obj/i386/obj8.go
+++ b/src/cmd/internal/obj/i386/obj8.go
@@ -38,16 +38,16 @@
 	"math"
 )
 
-func canuselocaltls(ctxt *obj.Link) int {
+func canuselocaltls(ctxt *obj.Link) bool {
 	switch ctxt.Headtype {
 	case obj.Hlinux,
 		obj.Hnacl,
 		obj.Hplan9,
 		obj.Hwindows:
-		return 0
+		return false
 	}
 
-	return 1
+	return true
 }
 
 func progedit(ctxt *obj.Link, p *obj.Prog) {
@@ -56,7 +56,7 @@
 	var q *obj.Prog
 
 	// See obj6.c for discussion of TLS.
-	if canuselocaltls(ctxt) != 0 {
+	if canuselocaltls(ctxt) {
 		// Reduce TLS initial exec model to TLS local exec model.
 		// Sequences like
 		//	MOVL TLS, BX
@@ -261,13 +261,13 @@
 
 	q = nil
 
-	if !(p.From3.Offset&obj.NOSPLIT != 0) || (p.From3.Offset&obj.WRAPPER != 0) {
+	if p.From3.Offset&obj.NOSPLIT == 0 || (p.From3.Offset&obj.WRAPPER != 0) {
 		p = obj.Appendp(ctxt, p)
 		p = load_g_cx(ctxt, p) // load g into CX
 	}
 
-	if !(cursym.Text.From3.Offset&obj.NOSPLIT != 0) {
-		p = stacksplit(ctxt, p, autoffset, bool2int(!(cursym.Text.From3.Offset&obj.NEEDCTXT != 0)), &q) // emit split check
+	if cursym.Text.From3.Offset&obj.NOSPLIT == 0 {
+		p = stacksplit(ctxt, p, autoffset, cursym.Text.From3.Offset&obj.NEEDCTXT == 0, &q) // emit split check
 	}
 
 	if autoffset != 0 {
@@ -367,7 +367,7 @@
 		p2.Pcond = p
 	}
 
-	if ctxt.Debugzerostack != 0 && autoffset != 0 && !(cursym.Text.From3.Offset&obj.NOSPLIT != 0) {
+	if ctxt.Debugzerostack != 0 && autoffset != 0 && cursym.Text.From3.Offset&obj.NOSPLIT == 0 {
 		// 8l -Z means zero the stack frame on entry.
 		// This slows down function calls but can help avoid
 		// false positives in garbage collection.
@@ -507,7 +507,7 @@
 // Returns last new instruction.
 // On return, *jmpok is the instruction that should jump
 // to the stack frame allocation if no split is needed.
-func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, noctxt int, jmpok **obj.Prog) *obj.Prog {
+func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, noctxt bool, jmpok **obj.Prog) *obj.Prog {
 	var q *obj.Prog
 	var q1 *obj.Prog
 
@@ -659,7 +659,7 @@
 	if ctxt.Cursym.Cfunc != 0 {
 		p.To.Sym = obj.Linklookup(ctxt, "runtime.morestackc", 0)
 	} else {
-		p.To.Sym = ctxt.Symmorestack[noctxt]
+		p.To.Sym = ctxt.Symmorestack[bool2int(noctxt)]
 	}
 
 	p = obj.Appendp(ctxt, p)
@@ -684,27 +684,27 @@
 
 	ctxt.Cursym = s
 
-	firstp = new(obj.Prog)
+	firstp = ctxt.NewProg()
 	lastp = firstp
 	xfol(ctxt, s.Text, &lastp)
 	lastp.Link = nil
 	s.Text = firstp.Link
 }
 
-func nofollow(a int) int {
+func nofollow(a int) bool {
 	switch a {
 	case obj.AJMP,
 		obj.ARET,
 		AIRETL,
 		AIRETW,
 		obj.AUNDEF:
-		return 1
+		return true
 	}
 
-	return 0
+	return false
 }
 
-func pushpop(a int) int {
+func pushpop(a int) bool {
 	switch a {
 	case APUSHL,
 		APUSHFL,
@@ -714,10 +714,10 @@
 		APOPFL,
 		APOPW,
 		APOPFW:
-		return 1
+		return true
 	}
 
-	return 0
+	return false
 }
 
 func relinv(a int) int {
@@ -802,7 +802,7 @@
 				continue
 			}
 
-			if nofollow(a) != 0 || pushpop(a) != 0 {
+			if nofollow(a) || pushpop(a) {
 				break // NOTE(rsc): arm does goto copy
 			}
 			if q.Pcond == nil || q.Pcond.Mark != 0 {
@@ -839,7 +839,7 @@
 				/* */
 			}
 		}
-		q = new(obj.Prog)
+		q = ctxt.NewProg()
 		q.As = obj.AJMP
 		q.Lineno = p.Lineno
 		q.To.Type = obj.TYPE_BRANCH
@@ -856,7 +856,7 @@
 	a = int(p.As)
 
 	/* continue loop with what comes after p */
-	if nofollow(a) != 0 {
+	if nofollow(a) {
 		return
 	}
 	if p.Pcond != nil && a != obj.ACALL {
diff --git a/src/cmd/internal/obj/ld.go b/src/cmd/internal/obj/ld.go
index a53c867..99cf64a 100644
--- a/src/cmd/internal/obj/ld.go
+++ b/src/cmd/internal/obj/ld.go
@@ -31,50 +31,6 @@
 
 package obj
 
-import (
-	"fmt"
-	"os"
-	"path"
-	"strings"
-)
-
-func addlib(ctxt *Link, src, obj, pathname string) {
-	name := path.Clean(pathname)
-
-	// runtime.a -> runtime
-	short := strings.TrimSuffix(name, ".a")
-
-	// already loaded?
-	for i := range ctxt.Library {
-		if ctxt.Library[i].Pkg == short {
-			return
-		}
-	}
-
-	var pname string
-	// runtime -> runtime.a for search
-	if (!(ctxt.Windows != 0) && name[0] == '/') || (ctxt.Windows != 0 && name[1] == ':') {
-		pname = name
-	} else {
-		// try dot, -L "libdir", and then goroot.
-		for _, dir := range ctxt.Libdir {
-			pname = dir + "/" + name
-			if _, err := os.Stat(pname); !os.IsNotExist(err) {
-				break
-			}
-		}
-	}
-	pname = path.Clean(pname)
-
-	// runtime.a -> runtime
-	pname = strings.TrimSuffix(pname, ".a")
-
-	if ctxt.Debugvlog > 1 && ctxt.Bso != nil {
-		fmt.Fprintf(ctxt.Bso, "%5.2f addlib: %s %s pulls in %s\n", Cputime(), obj, src, pname)
-	}
-	Addlibpath(ctxt, src, obj, pname, name)
-}
-
 /*
  * add library to library list.
  *	srcref: src file referring to package
@@ -82,24 +38,6 @@
  *	file: object file, e.g., /home/rsc/go/pkg/container/vector.a
  *	pkg: package import path, e.g. container/vector
  */
-func Addlibpath(ctxt *Link, srcref, objref, file, pkg string) {
-	for _, lib := range ctxt.Library {
-		if lib.File == file {
-			return
-		}
-	}
-
-	if ctxt.Debugvlog > 1 && ctxt.Bso != nil {
-		fmt.Fprintf(ctxt.Bso, "%5.2f addlibpath: srcref: %s objref: %s file: %s pkg: %s\n", Cputime(), srcref, objref, file, pkg)
-	}
-
-	ctxt.Library = append(ctxt.Library, Library{
-		Objref: objref,
-		Srcref: srcref,
-		File:   file,
-		Pkg:    pkg,
-	})
-}
 
 const (
 	LOG = 5
diff --git a/src/cmd/internal/obj/link.go b/src/cmd/internal/obj/link.go
index d256fbb..7daf12e 100644
--- a/src/cmd/internal/obj/link.go
+++ b/src/cmd/internal/obj/link.go
@@ -280,14 +280,14 @@
 }
 
 type Pciter struct {
-	D       Pcdata
-	P       []byte
-	Pc      uint32
-	Nextpc  uint32
-	Pcscale uint32
-	Value   int32
-	Start   int
-	Done    int
+	d       Pcdata
+	p       []byte
+	pc      uint32
+	nextpc  uint32
+	pcscale uint32
+	value   int32
+	start   int
+	done    int
 }
 
 // An Addr is an argument to an instruction.
diff --git a/src/cmd/internal/obj/objfile.go b/src/cmd/internal/obj/objfile.go
index 7e4482c..b15bd00 100644
--- a/src/cmd/internal/obj/objfile.go
+++ b/src/cmd/internal/obj/objfile.go
@@ -188,7 +188,7 @@
 			}
 		}
 
-		if !(found != 0) {
+		if found == 0 {
 			p = Appendp(ctxt, s.Text)
 			p.As = AFUNCDATA
 			p.From.Type = TYPE_CONST
diff --git a/src/cmd/internal/obj/pcln.go b/src/cmd/internal/obj/pcln.go
index 746ca2d..f5cdd3a 100644
--- a/src/cmd/internal/obj/pcln.go
+++ b/src/cmd/internal/obj/pcln.go
@@ -307,7 +307,7 @@
 		v |= uint32(p[0]&0x7F) << uint(shift)
 		tmp7 := p
 		p = p[1:]
-		if !(tmp7[0]&0x80 != 0) {
+		if tmp7[0]&0x80 == 0 {
 			break
 		}
 	}
@@ -316,45 +316,45 @@
 	return v
 }
 
-func Pciternext(it *Pciter) {
+func pciternext(it *Pciter) {
 	var v uint32
 	var dv int32
 
-	it.Pc = it.Nextpc
-	if it.Done != 0 {
+	it.pc = it.nextpc
+	if it.done != 0 {
 		return
 	}
-	if -cap(it.P) >= -cap(it.D.P[len(it.D.P):]) {
-		it.Done = 1
+	if -cap(it.p) >= -cap(it.d.P[len(it.d.P):]) {
+		it.done = 1
 		return
 	}
 
 	// value delta
-	v = getvarint(&it.P)
+	v = getvarint(&it.p)
 
-	if v == 0 && !(it.Start != 0) {
-		it.Done = 1
+	if v == 0 && it.start == 0 {
+		it.done = 1
 		return
 	}
 
-	it.Start = 0
+	it.start = 0
 	dv = int32(v>>1) ^ (int32(v<<31) >> 31)
-	it.Value += dv
+	it.value += dv
 
 	// pc delta
-	v = getvarint(&it.P)
+	v = getvarint(&it.p)
 
-	it.Nextpc = it.Pc + v*it.Pcscale
+	it.nextpc = it.pc + v*it.pcscale
 }
 
-func Pciterinit(ctxt *Link, it *Pciter, d *Pcdata) {
-	it.D = *d
-	it.P = it.D.P
-	it.Pc = 0
-	it.Nextpc = 0
-	it.Value = -1
-	it.Start = 1
-	it.Done = 0
-	it.Pcscale = uint32(ctxt.Arch.Minlc)
-	Pciternext(it)
+func pciterinit(ctxt *Link, it *Pciter, d *Pcdata) {
+	it.d = *d
+	it.p = it.d.P
+	it.pc = 0
+	it.nextpc = 0
+	it.value = -1
+	it.start = 1
+	it.done = 0
+	it.pcscale = uint32(ctxt.Arch.Minlc)
+	pciternext(it)
 }
diff --git a/src/cmd/internal/obj/ppc64/asm9.go b/src/cmd/internal/obj/ppc64/asm9.go
index 1433710..a4e46be 100644
--- a/src/cmd/internal/obj/ppc64/asm9.go
+++ b/src/cmd/internal/obj/ppc64/asm9.go
@@ -470,16 +470,14 @@
 			if (o.type_ == 16 || o.type_ == 17) && p.Pcond != nil {
 				otxt = p.Pcond.Pc - c
 				if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 {
-					q = new(obj.Prog)
-					q.Ctxt = p.Ctxt
+					q = ctxt.NewProg()
 					q.Link = p.Link
 					p.Link = q
 					q.As = ABR
 					q.To.Type = obj.TYPE_BRANCH
 					q.Pcond = p.Pcond
 					p.Pcond = q
-					q = new(obj.Prog)
-					q.Ctxt = p.Ctxt
+					q = ctxt.NewProg()
 					q.Link = p.Link
 					p.Link = q
 					q.As = ABR
@@ -534,12 +532,12 @@
 	}
 }
 
-func isint32(v int64) int {
-	return bool2int(int64(int32(v)) == v)
+func isint32(v int64) bool {
+	return int64(int32(v)) == v
 }
 
-func isuint32(v uint64) int {
-	return bool2int(uint64(uint32(v)) == v)
+func isuint32(v uint64) bool {
+	return uint64(uint32(v)) == v
 }
 
 func aclass(ctxt *obj.Link, a *obj.Addr) int {
@@ -637,7 +635,7 @@
 				if -BIG <= ctxt.Instoffset && ctxt.Instoffset <= BIG {
 					return C_SACON
 				}
-				if isint32(ctxt.Instoffset) != 0 {
+				if isint32(ctxt.Instoffset) {
 					return C_LACON
 				}
 				return C_DACON
@@ -689,10 +687,10 @@
 			if ctxt.Instoffset <= 0xffff {
 				return C_ANDCON
 			}
-			if ctxt.Instoffset&0xffff == 0 && isuint32(uint64(ctxt.Instoffset)) != 0 { /* && (instoffset & (1<<31)) == 0) */
+			if ctxt.Instoffset&0xffff == 0 && isuint32(uint64(ctxt.Instoffset)) { /* && (instoffset & (1<<31)) == 0) */
 				return C_UCON
 			}
-			if isint32(ctxt.Instoffset) != 0 || isuint32(uint64(ctxt.Instoffset)) != 0 {
+			if isint32(ctxt.Instoffset) || isuint32(uint64(ctxt.Instoffset)) {
 				return C_LCON
 			}
 			return C_DCON
@@ -701,10 +699,10 @@
 		if ctxt.Instoffset >= -0x8000 {
 			return C_ADDCON
 		}
-		if ctxt.Instoffset&0xffff == 0 && isint32(ctxt.Instoffset) != 0 {
+		if ctxt.Instoffset&0xffff == 0 && isint32(ctxt.Instoffset) {
 			return C_UCON
 		}
-		if isint32(ctxt.Instoffset) != 0 {
+		if isint32(ctxt.Instoffset) {
 			return C_LCON
 		}
 		return C_DCON
@@ -1407,20 +1405,20 @@
 /*
  * 32-bit masks
  */
-func getmask(m []byte, v uint32) int {
+func getmask(m []byte, v uint32) bool {
 	var i int
 
 	m[1] = 0
 	m[0] = m[1]
 	if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */
-		if getmask(m, ^v) != 0 {
+		if getmask(m, ^v) {
 			i = int(m[0])
 			m[0] = m[1] + 1
 			m[1] = byte(i - 1)
-			return 1
+			return true
 		}
 
-		return 0
+		return false
 	}
 
 	for i = 0; i < 32; i++ {
@@ -1429,25 +1427,25 @@
 			for {
 				m[1] = byte(i)
 				i++
-				if !(i < 32 && v&(1<<uint(31-i)) != 0) {
+				if i >= 32 || v&(1<<uint(31-i)) == 0 {
 					break
 				}
 			}
 
 			for ; i < 32; i++ {
 				if v&(1<<uint(31-i)) != 0 {
-					return 0
+					return false
 				}
 			}
-			return 1
+			return true
 		}
 	}
 
-	return 0
+	return false
 }
 
 func maskgen(ctxt *obj.Link, p *obj.Prog, m []byte, v uint32) {
-	if !(getmask(m, v) != 0) {
+	if !getmask(m, v) {
 		ctxt.Diag("cannot generate mask #%x\n%v", v, p)
 	}
 }
@@ -1455,7 +1453,7 @@
 /*
  * 64-bit masks (rldic etc)
  */
-func getmask64(m []byte, v uint64) int {
+func getmask64(m []byte, v uint64) bool {
 	var i int
 
 	m[1] = 0
@@ -1466,25 +1464,25 @@
 			for {
 				m[1] = byte(i)
 				i++
-				if !(i < 64 && v&(uint64(1)<<uint(63-i)) != 0) {
+				if i >= 64 || v&(uint64(1)<<uint(63-i)) == 0 {
 					break
 				}
 			}
 
 			for ; i < 64; i++ {
 				if v&(uint64(1)<<uint(63-i)) != 0 {
-					return 0
+					return false
 				}
 			}
-			return 1
+			return true
 		}
 	}
 
-	return 0
+	return false
 }
 
 func maskgen64(ctxt *obj.Link, p *obj.Prog, m []byte, v uint64) {
-	if !(getmask64(m, v) != 0) {
+	if !getmask64(m, v) {
 		ctxt.Diag("cannot generate mask #%x\n%v", v, p)
 	}
 }
@@ -1493,7 +1491,7 @@
 	var v int32
 
 	v = int32(d >> 16)
-	if isuint32(uint64(d)) != 0 {
+	if isuint32(uint64(d)) {
 		return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
 	}
 	return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v))
@@ -1574,7 +1572,7 @@
 				log.Fatalf("invalid handling of %v", p)
 			}
 			v >>= 16
-			if r == REGZERO && isuint32(uint64(d)) != 0 {
+			if r == REGZERO && isuint32(uint64(d)) {
 				o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v))
 				break
 			}
@@ -1862,7 +1860,7 @@
 		if r == 0 {
 			r = int(p.To.Reg)
 		}
-		if p.As == AADD && (!(r0iszero != 0 /*TypeKind(100016)*/) && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
+		if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
 			ctxt.Diag("literal operation on R0\n%v", p)
 		}
 		o1 = AOP_IRR(uint32(opirr(ctxt, int(p.As)+ALAST)), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
diff --git a/src/cmd/internal/obj/ppc64/obj9.go b/src/cmd/internal/obj/ppc64/obj9.go
index 4675a9e..7524674 100644
--- a/src/cmd/internal/obj/ppc64/obj9.go
+++ b/src/cmd/internal/obj/ppc64/obj9.go
@@ -289,7 +289,7 @@
 					p.Pcond = q1
 				}
 
-				if !(q1.Mark&LEAF != 0) {
+				if q1.Mark&LEAF == 0 {
 					q1.Mark |= LABEL
 				}
 			} else {
@@ -341,15 +341,15 @@
 			}
 			p.To.Offset = int64(autosize) - 8
 
-			if !(p.From3.Offset&obj.NOSPLIT != 0) {
-				p = stacksplit(ctxt, p, autosize, bool2int(!(cursym.Text.From3.Offset&obj.NEEDCTXT != 0))) // emit split check
+			if p.From3.Offset&obj.NOSPLIT == 0 {
+				p = stacksplit(ctxt, p, autosize, cursym.Text.From3.Offset&obj.NEEDCTXT == 0) // emit split check
 			}
 
 			q = p
 
 			if autosize != 0 {
 				/* use MOVDU to adjust R1 when saving R31, if autosize is small */
-				if !(cursym.Text.Mark&LEAF != 0) && autosize >= -BIG && autosize <= BIG {
+				if cursym.Text.Mark&LEAF == 0 && autosize >= -BIG && autosize <= BIG {
 					mov = AMOVDU
 					aoffset = int(-autosize)
 				} else {
@@ -362,7 +362,7 @@
 					q.To.Reg = REGSP
 					q.Spadj = +autosize
 				}
-			} else if !(cursym.Text.Mark&LEAF != 0) {
+			} else if cursym.Text.Mark&LEAF == 0 {
 				if ctxt.Debugvlog != 0 {
 					fmt.Fprintf(ctxt.Bso, "save suppressed in: %s\n", cursym.Name)
 					obj.Bflush(ctxt.Bso)
@@ -499,9 +499,9 @@
 			}
 
 			if cursym.Text.Mark&LEAF != 0 {
-				if !(autosize != 0) {
+				if autosize == 0 {
 					p.As = ABR
-					p.From = obj.Zprog.From
+					p.From = obj.Addr{}
 					p.To.Type = obj.TYPE_REG
 					p.To.Reg = REG_LR
 					p.Mark |= BRANCH
@@ -515,7 +515,7 @@
 				p.To.Reg = REGSP
 				p.Spadj = -autosize
 
-				q = p.Ctxt.NewProg()
+				q = ctxt.NewProg()
 				q.As = ABR
 				q.Lineno = p.Lineno
 				q.To.Type = obj.TYPE_REG
@@ -535,7 +535,7 @@
 			p.To.Type = obj.TYPE_REG
 			p.To.Reg = REGTMP
 
-			q = p.Ctxt.NewProg()
+			q = ctxt.NewProg()
 			q.As = AMOVD
 			q.Lineno = p.Lineno
 			q.From.Type = obj.TYPE_REG
@@ -549,7 +549,7 @@
 
 			if false {
 				// Debug bad returns
-				q = p.Ctxt.NewProg()
+				q = ctxt.NewProg()
 				q.As = AMOVD
 				q.Lineno = p.Lineno
 				q.From.Type = obj.TYPE_MEM
@@ -564,7 +564,7 @@
 			}
 
 			if autosize != 0 {
-				q = p.Ctxt.NewProg()
+				q = ctxt.NewProg()
 				q.As = AADD
 				q.Lineno = p.Lineno
 				q.From.Type = obj.TYPE_CONST
@@ -577,7 +577,7 @@
 				p.Link = q
 			}
 
-			q1 = p.Ctxt.NewProg()
+			q1 = ctxt.NewProg()
 			q1.As = ABR
 			q1.Lineno = p.Lineno
 			q1.To.Type = obj.TYPE_REG
@@ -641,7 +641,7 @@
 		q = p;
 	}
 */
-func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, noctxt int) *obj.Prog {
+func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, noctxt bool) *obj.Prog {
 	var q *obj.Prog
 	var q1 *obj.Prog
 
@@ -774,7 +774,7 @@
 	if ctxt.Cursym.Cfunc != 0 {
 		p.To.Sym = obj.Linklookup(ctxt, "runtime.morestackc", 0)
 	} else {
-		p.To.Sym = ctxt.Symmorestack[noctxt]
+		p.To.Sym = ctxt.Symmorestack[bool2int(noctxt)]
 	}
 
 	// BR	start
@@ -799,7 +799,7 @@
 
 	ctxt.Cursym = s
 
-	firstp = new(obj.Prog)
+	firstp = ctxt.NewProg()
 	lastp = firstp
 	xfol(ctxt, s.Text, &lastp)
 	lastp.Link = nil
@@ -853,7 +853,7 @@
 			p = p.Link
 			xfol(ctxt, p, last)
 			p = q
-			if p != nil && !(p.Mark&FOLL != 0) {
+			if p != nil && p.Mark&FOLL == 0 {
 				goto loop
 			}
 			return
@@ -862,7 +862,7 @@
 		if q != nil {
 			p.Mark |= FOLL
 			p = q
-			if !(p.Mark&FOLL != 0) {
+			if p.Mark&FOLL == 0 {
 				goto loop
 			}
 		}
@@ -885,19 +885,19 @@
 			if a == ABR || a == ARETURN || a == ARFI || a == ARFCI || a == ARFID || a == AHRFID {
 				goto copy
 			}
-			if !(q.Pcond != nil) || (q.Pcond.Mark&FOLL != 0) {
+			if q.Pcond == nil || (q.Pcond.Mark&FOLL != 0) {
 				continue
 			}
 			b = relinv(a)
-			if !(b != 0) {
+			if b == 0 {
 				continue
 			}
 
 		copy:
 			for {
-				r = new(obj.Prog)
+				r = ctxt.NewProg()
 				*r = *p
-				if !(r.Mark&FOLL != 0) {
+				if r.Mark&FOLL == 0 {
 					fmt.Printf("cant happen 1\n")
 				}
 				r.Mark |= FOLL
@@ -916,10 +916,10 @@
 				r.As = int16(b)
 				r.Pcond = p.Link
 				r.Link = p.Pcond
-				if !(r.Link.Mark&FOLL != 0) {
+				if r.Link.Mark&FOLL == 0 {
 					xfol(ctxt, r.Link, last)
 				}
-				if !(r.Pcond.Mark&FOLL != 0) {
+				if r.Pcond.Mark&FOLL == 0 {
 					fmt.Printf("cant happen 2\n")
 				}
 				return
@@ -927,7 +927,7 @@
 		}
 
 		a = ABR
-		q = p.Ctxt.NewProg()
+		q = ctxt.NewProg()
 		q.As = int16(a)
 		q.Lineno = p.Lineno
 		q.To.Type = obj.TYPE_BRANCH
diff --git a/src/cmd/internal/obj/sym.go b/src/cmd/internal/obj/sym.go
index e1ba5de..30d0eb1 100644
--- a/src/cmd/internal/obj/sym.go
+++ b/src/cmd/internal/obj/sym.go
@@ -100,7 +100,7 @@
 	}{"windowsgui", Hwindows},
 }
 
-func Headtype(name string) int {
+func headtype(name string) int {
 	var i int
 
 	for i = 0; i < len(headers); i++ {
@@ -146,7 +146,7 @@
 
 	ctxt.Pathname = buf
 
-	ctxt.Headtype = Headtype(Getgoos())
+	ctxt.Headtype = headtype(Getgoos())
 	if ctxt.Headtype < 0 {
 		log.Fatalf("unknown goos %s", Getgoos())
 	}
@@ -222,7 +222,7 @@
 	return ctxt
 }
 
-func Linknewsym(ctxt *Link, symb string, v int) *LSym {
+func linknewsym(ctxt *Link, symb string, v int) *LSym {
 	var s *LSym
 
 	s = new(LSym)
@@ -261,11 +261,11 @@
 			return s
 		}
 	}
-	if !(creat != 0) {
+	if creat == 0 {
 		return nil
 	}
 
-	s = Linknewsym(ctxt, symb, v)
+	s = linknewsym(ctxt, symb, v)
 	s.Extname = s.Name
 	s.Hash = ctxt.Hash[h]
 	ctxt.Hash[h] = s
@@ -278,7 +278,7 @@
 }
 
 // read-only lookup
-func Linkrlookup(ctxt *Link, name string, v int) *LSym {
+func linkrlookup(ctxt *Link, name string, v int) *LSym {
 	return _lookup(ctxt, name, v, 0)
 }
 
diff --git a/src/cmd/internal/obj/x86/asm6.go b/src/cmd/internal/obj/x86/asm6.go
index 1d86d46..3c1213e 100644
--- a/src/cmd/internal/obj/x86/asm6.go
+++ b/src/cmd/internal/obj/x86/asm6.go
@@ -2118,9 +2118,9 @@
 // It does not seem to be necessary for any other systems. This is probably working
 // around a Solaris-specific bug that should be fixed differently, but we don't know
 // what that bug is. And this does fix it.
-func isextern(s *obj.LSym) int {
+func isextern(s *obj.LSym) bool {
 	// All the Solaris dynamic imports from libc.so begin with "libc_".
-	return bool2int(strings.HasPrefix(s.Name, "libc_"))
+	return strings.HasPrefix(s.Name, "libc_")
 }
 
 // single-instruction no-ops of various lengths.
@@ -2348,7 +2348,7 @@
 			ctxt.Diag("span must be looping")
 			log.Fatalf("loop")
 		}
-		if !(loop != 0) {
+		if loop == 0 {
 			break
 		}
 	}
@@ -2589,7 +2589,7 @@
 		switch a.Name {
 		case obj.NAME_EXTERN,
 			obj.NAME_STATIC:
-			if a.Sym != nil && isextern(a.Sym) != 0 {
+			if a.Sym != nil && isextern(a.Sym) {
 				return Yi32
 			}
 			return Yiauto // use pc-relative addressing
@@ -2997,7 +2997,7 @@
 			log.Fatalf("reloc")
 		}
 
-		if isextern(s) != 0 {
+		if isextern(s) {
 			r.Siz = 4
 			r.Type = obj.R_ADDR
 		} else {
@@ -3074,7 +3074,7 @@
 		switch a.Name {
 		case obj.NAME_EXTERN,
 			obj.NAME_STATIC:
-			if !(isextern(a.Sym) != 0) {
+			if !isextern(a.Sym) {
 				goto bad
 			}
 			base = REG_NONE
@@ -3136,7 +3136,7 @@
 
 	ctxt.Rexflag |= regrex[base]&Rxb | rex
 	if base == REG_NONE || (REG_CS <= base && base <= REG_GS) || base == REG_TLS {
-		if (a.Sym == nil || !(isextern(a.Sym) != 0)) && base == REG_NONE && (a.Name == obj.NAME_STATIC || a.Name == obj.NAME_EXTERN) || ctxt.Asmode != 64 {
+		if (a.Sym == nil || !isextern(a.Sym)) && base == REG_NONE && (a.Name == obj.NAME_STATIC || a.Name == obj.NAME_EXTERN) || ctxt.Asmode != 64 {
 			ctxt.Andptr[0] = byte(0<<6 | 5<<0 | r<<3)
 			ctxt.Andptr = ctxt.Andptr[1:]
 			goto putrelv
@@ -3370,18 +3370,18 @@
 	Movtab{0, 0, 0, 0, [4]uint8{}},
 }
 
-func isax(a *obj.Addr) int {
+func isax(a *obj.Addr) bool {
 	switch a.Reg {
 	case REG_AX,
 		REG_AL,
 		REG_AH:
-		return 1
+		return true
 	}
 
 	if a.Index == REG_AX {
-		return 1
+		return true
 	}
-	return 0
+	return false
 }
 
 func subreg(p *obj.Prog, from int, to int) {
@@ -3587,7 +3587,7 @@
 	case Zlit:
 		for ; ; z++ {
 			op = int(o.op[z])
-			if !(op != 0) {
+			if op == 0 {
 				break
 			}
 			ctxt.Andptr[0] = byte(op)
@@ -3597,7 +3597,7 @@
 	case Zlitm_r:
 		for ; ; z++ {
 			op = int(o.op[z])
-			if !(op != 0) {
+			if op == 0 {
 				break
 			}
 			ctxt.Andptr[0] = byte(op)
@@ -3652,7 +3652,7 @@
 			tmp1 := z
 			z++
 			op = int(o.op[tmp1])
-			if !(op != 0) {
+			if op == 0 {
 				break
 			}
 			ctxt.Andptr[0] = byte(op)
@@ -4097,7 +4097,7 @@
 
 		z = int(p.From.Reg)
 		if p.From.Type == obj.TYPE_REG && z >= REG_BP && z <= REG_DI {
-			if isax(&p.To) != 0 || p.To.Type == obj.TYPE_NONE {
+			if isax(&p.To) || p.To.Type == obj.TYPE_NONE {
 				// We certainly don't want to exchange
 				// with AX if the op is MUL or DIV.
 				ctxt.Andptr[0] = 0x87
@@ -4122,7 +4122,7 @@
 
 		z = int(p.To.Reg)
 		if p.To.Type == obj.TYPE_REG && z >= REG_BP && z <= REG_DI {
-			if isax(&p.From) != 0 {
+			if isax(&p.From) {
 				ctxt.Andptr[0] = 0x87
 				ctxt.Andptr = ctxt.Andptr[1:] /* xchg rhs,bx */
 				asmando(ctxt, p, &p.To, reg[REG_BX])
diff --git a/src/cmd/internal/obj/x86/obj6.go b/src/cmd/internal/obj/x86/obj6.go
index c1ad6b7..0dc4c3a 100644
--- a/src/cmd/internal/obj/x86/obj6.go
+++ b/src/cmd/internal/obj/x86/obj6.go
@@ -38,14 +38,14 @@
 	"math"
 )
 
-func canuselocaltls(ctxt *obj.Link) int {
+func canuselocaltls(ctxt *obj.Link) bool {
 	switch ctxt.Headtype {
 	case obj.Hplan9,
 		obj.Hwindows:
-		return 0
+		return false
 	}
 
-	return 1
+	return true
 }
 
 func progedit(ctxt *obj.Link, p *obj.Prog) {
@@ -86,7 +86,7 @@
 	// access TLS, and they are rewritten appropriately first here in
 	// liblink and then finally using relocations in the linker.
 
-	if canuselocaltls(ctxt) != 0 {
+	if canuselocaltls(ctxt) {
 		// Reduce TLS initial exec model to TLS local exec model.
 		// Sequences like
 		//	MOVQ TLS, BX
@@ -366,7 +366,7 @@
 	cursym.Args = int32(textarg)
 	cursym.Locals = int32(p.To.Offset)
 
-	if autoffset < obj.StackSmall && !(p.From3.Offset&obj.NOSPLIT != 0) {
+	if autoffset < obj.StackSmall && p.From3.Offset&obj.NOSPLIT == 0 {
 		for q = p; q != nil; q = q.Link {
 			if q.As == obj.ACALL {
 				goto noleaf
@@ -381,13 +381,13 @@
 	}
 
 	q = nil
-	if !(p.From3.Offset&obj.NOSPLIT != 0) || (p.From3.Offset&obj.WRAPPER != 0) {
+	if p.From3.Offset&obj.NOSPLIT == 0 || (p.From3.Offset&obj.WRAPPER != 0) {
 		p = obj.Appendp(ctxt, p)
 		p = load_g_cx(ctxt, p) // load g into CX
 	}
 
-	if !(cursym.Text.From3.Offset&obj.NOSPLIT != 0) {
-		p = stacksplit(ctxt, p, autoffset, int32(textarg), bool2int(!(cursym.Text.From3.Offset&obj.NEEDCTXT != 0)), &q) // emit split check
+	if cursym.Text.From3.Offset&obj.NOSPLIT == 0 {
+		p = stacksplit(ctxt, p, autoffset, int32(textarg), cursym.Text.From3.Offset&obj.NEEDCTXT == 0, &q) // emit split check
 	}
 
 	if autoffset != 0 {
@@ -540,7 +540,7 @@
 		p2.Pcond = p
 	}
 
-	if ctxt.Debugzerostack != 0 && autoffset != 0 && !(cursym.Text.From3.Offset&obj.NOSPLIT != 0) {
+	if ctxt.Debugzerostack != 0 && autoffset != 0 && cursym.Text.From3.Offset&obj.NOSPLIT == 0 {
 		// 6l -Z means zero the stack frame on entry.
 		// This slows down function calls but can help avoid
 		// false positives in garbage collection.
@@ -722,7 +722,7 @@
 // Returns last new instruction.
 // On return, *jmpok is the instruction that should jump
 // to the stack frame allocation if no split is needed.
-func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, textarg int32, noctxt int, jmpok **obj.Prog) *obj.Prog {
+func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, textarg int32, noctxt bool, jmpok **obj.Prog) *obj.Prog {
 	var q *obj.Prog
 	var q1 *obj.Prog
 	var cmp int
@@ -853,7 +853,7 @@
 	if ctxt.Cursym.Cfunc != 0 {
 		p.To.Sym = obj.Linklookup(ctxt, "runtime.morestackc", 0)
 	} else {
-		p.To.Sym = ctxt.Symmorestack[noctxt]
+		p.To.Sym = ctxt.Symmorestack[bool2int(noctxt)]
 	}
 
 	p = obj.Appendp(ctxt, p)
@@ -878,14 +878,14 @@
 
 	ctxt.Cursym = s
 
-	firstp = new(obj.Prog)
+	firstp = ctxt.NewProg()
 	lastp = firstp
 	xfol(ctxt, s.Text, &lastp)
 	lastp.Link = nil
 	s.Text = firstp.Link
 }
 
-func nofollow(a int) int {
+func nofollow(a int) bool {
 	switch a {
 	case obj.AJMP,
 		obj.ARET,
@@ -896,13 +896,13 @@
 		ARETFQ,
 		ARETFW,
 		obj.AUNDEF:
-		return 1
+		return true
 	}
 
-	return 0
+	return false
 }
 
-func pushpop(a int) int {
+func pushpop(a int) bool {
 	switch a {
 	case APUSHL,
 		APUSHFL,
@@ -916,10 +916,10 @@
 		APOPFQ,
 		APOPW,
 		APOPFW:
-		return 1
+		return true
 	}
 
-	return 0
+	return false
 }
 
 func relinv(a int) int {
@@ -1004,7 +1004,7 @@
 				continue
 			}
 
-			if nofollow(a) != 0 || pushpop(a) != 0 {
+			if nofollow(a) || pushpop(a) {
 				break // NOTE(rsc): arm does goto copy
 			}
 			if q.Pcond == nil || q.Pcond.Mark != 0 {
@@ -1041,7 +1041,7 @@
 				/* */
 			}
 		}
-		q = new(obj.Prog)
+		q = ctxt.NewProg()
 		q.As = obj.AJMP
 		q.Lineno = p.Lineno
 		q.To.Type = obj.TYPE_BRANCH
@@ -1058,7 +1058,7 @@
 	a = int(p.As)
 
 	/* continue loop with what comes after p */
-	if nofollow(a) != 0 {
+	if nofollow(a) {
 		return
 	}
 	if p.Pcond != nil && a != obj.ACALL {
diff --git a/src/cmd/new5g/cgen.go b/src/cmd/new5g/cgen.go
index f1a42c1..bdee52a 100644
--- a/src/cmd/new5g/cgen.go
+++ b/src/cmd/new5g/cgen.go
@@ -54,7 +54,7 @@
 		gc.OSLICESTR,
 		gc.OSLICE3,
 		gc.OSLICE3ARR:
-		if res.Op != gc.ONAME || !(res.Addable != 0) {
+		if res.Op != gc.ONAME || res.Addable == 0 {
 			gc.Tempname(&n1, n.Type)
 			gc.Cgen_slice(n, &n1)
 			cgen(&n1, res)
@@ -64,7 +64,7 @@
 		return
 
 	case gc.OEFACE:
-		if res.Op != gc.ONAME || !(res.Addable != 0) {
+		if res.Op != gc.ONAME || res.Addable == 0 {
 			gc.Tempname(&n1, n.Type)
 			gc.Cgen_eface(n, &n1)
 			cgen(&n1, res)
@@ -90,7 +90,7 @@
 		}
 	}
 
-	if gc.Isfat(n.Type) != 0 {
+	if gc.Isfat(n.Type) {
 		if n.Type.Width < 0 {
 			gc.Fatal("forgot to compute width for %v", gc.Tconv(n.Type, 0))
 		}
@@ -104,12 +104,12 @@
 	switch n.Op {
 	case gc.OSPTR,
 		gc.OLEN:
-		if gc.Isslice(n.Left.Type) != 0 || gc.Istype(n.Left.Type, gc.TSTRING) != 0 {
+		if gc.Isslice(n.Left.Type) || gc.Istype(n.Left.Type, gc.TSTRING) {
 			n.Addable = n.Left.Addable
 		}
 
 	case gc.OCAP:
-		if gc.Isslice(n.Left.Type) != 0 {
+		if gc.Isslice(n.Left.Type) {
 			n.Addable = n.Left.Addable
 		}
 
@@ -119,7 +119,7 @@
 
 	// if both are addressable, move
 	if n.Addable != 0 && res.Addable != 0 {
-		if gc.Is64(n.Type) != 0 || gc.Is64(res.Type) != 0 || n.Op == gc.OREGISTER || res.Op == gc.OREGISTER || gc.Iscomplex[n.Type.Etype] != 0 || gc.Iscomplex[res.Type.Etype] != 0 {
+		if gc.Is64(n.Type) || gc.Is64(res.Type) || n.Op == gc.OREGISTER || res.Op == gc.OREGISTER || gc.Iscomplex[n.Type.Etype] != 0 || gc.Iscomplex[res.Type.Etype] != 0 {
 			gmove(n, res)
 		} else {
 			regalloc(&n1, n.Type, nil)
@@ -132,7 +132,7 @@
 	}
 
 	// if both are not addressable, use a temporary.
-	if !(n.Addable != 0) && !(res.Addable != 0) {
+	if n.Addable == 0 && res.Addable == 0 {
 		// could use regalloc here sometimes,
 		// but have to check for ullman >= UINF.
 		gc.Tempname(&n1, n.Type)
@@ -144,22 +144,22 @@
 
 	// if result is not addressable directly but n is,
 	// compute its address and then store via the address.
-	if !(res.Addable != 0) {
+	if res.Addable == 0 {
 		igen(res, &n1, nil)
 		cgen(n, &n1)
 		regfree(&n1)
 		return
 	}
 
-	if gc.Complexop(n, res) != 0 {
+	if gc.Complexop(n, res) {
 		gc.Complexgen(n, res)
 		return
 	}
 
 	// if n is sudoaddable generate addr and move
-	if !(gc.Is64(n.Type) != 0) && !(gc.Is64(res.Type) != 0) && !(gc.Iscomplex[n.Type.Etype] != 0) && !(gc.Iscomplex[res.Type.Etype] != 0) {
+	if !gc.Is64(n.Type) && !gc.Is64(res.Type) && gc.Iscomplex[n.Type.Etype] == 0 && gc.Iscomplex[res.Type.Etype] == 0 {
 		a = optoas(gc.OAS, n.Type)
-		if sudoaddable(a, n, &addr, &w) != 0 {
+		if sudoaddable(a, n, &addr, &w) {
 			if res.Op != gc.OREGISTER {
 				regalloc(&n2, res.Type, nil)
 				p1 = gins(a, nil, &n2)
@@ -201,7 +201,7 @@
 	}
 
 	// 64-bit ops are hard on 32-bit machine.
-	if gc.Is64(n.Type) != 0 || gc.Is64(res.Type) != 0 || n.Left != nil && gc.Is64(n.Left.Type) != 0 {
+	if gc.Is64(n.Type) || gc.Is64(res.Type) || n.Left != nil && gc.Is64(n.Left.Type) {
 		switch n.Op {
 		// math goes to cgen64.
 		case gc.OMINUS,
@@ -247,11 +247,11 @@
 		p1 = gc.Gbranch(arm.AB, nil, 0)
 
 		p2 = gc.Pc
-		gmove(gc.Nodbool(1), res)
+		gmove(gc.Nodbool(true), res)
 		p3 = gc.Gbranch(arm.AB, nil, 0)
 		gc.Patch(p1, gc.Pc)
 		bgen(n, true, 0, p2)
-		gmove(gc.Nodbool(0), res)
+		gmove(gc.Nodbool(false), res)
 		gc.Patch(p3, gc.Pc)
 		goto ret
 
@@ -298,19 +298,19 @@
 	case gc.OLROT,
 		gc.OLSH,
 		gc.ORSH:
-		cgen_shift(int(n.Op), int(n.Bounded), nl, nr, res)
+		cgen_shift(int(n.Op), n.Bounded, nl, nr, res)
 
 	case gc.OCONV:
-		if gc.Eqtype(n.Type, nl.Type) || gc.Noconv(n.Type, nl.Type) != 0 {
+		if gc.Eqtype(n.Type, nl.Type) || gc.Noconv(n.Type, nl.Type) {
 			cgen(nl, res)
 			break
 		}
 
-		if nl.Addable != 0 && !(gc.Is64(nl.Type) != 0) {
+		if nl.Addable != 0 && !gc.Is64(nl.Type) {
 			regalloc(&n1, nl.Type, res)
 			gmove(nl, &n1)
 		} else {
-			if n.Type.Width > int64(gc.Widthptr) || gc.Is64(nl.Type) != 0 || gc.Isfloat[nl.Type.Etype] != 0 {
+			if n.Type.Width > int64(gc.Widthptr) || gc.Is64(nl.Type) || gc.Isfloat[nl.Type.Etype] != 0 {
 				gc.Tempname(&n1, nl.Type)
 			} else {
 				regalloc(&n1, nl.Type, res)
@@ -318,7 +318,7 @@
 			cgen(nl, &n1)
 		}
 
-		if n.Type.Width > int64(gc.Widthptr) || gc.Is64(n.Type) != 0 || gc.Isfloat[n.Type.Etype] != 0 {
+		if n.Type.Width > int64(gc.Widthptr) || gc.Is64(n.Type) || gc.Isfloat[n.Type.Etype] != 0 {
 			gc.Tempname(&n2, n.Type)
 		} else {
 			regalloc(&n2, n.Type, nil)
@@ -352,7 +352,7 @@
 
 		// pointer is the first word of string or slice.
 	case gc.OSPTR:
-		if gc.Isconst(nl, gc.CTSTR) != 0 {
+		if gc.Isconst(nl, gc.CTSTR) {
 			regalloc(&n1, gc.Types[gc.Tptr], res)
 			p1 = gins(arm.AMOVW, nil, &n1)
 			gc.Datastring(nl.Val.U.Sval.S, &p1.From)
@@ -367,7 +367,7 @@
 		regfree(&n1)
 
 	case gc.OLEN:
-		if gc.Istype(nl.Type, gc.TMAP) != 0 || gc.Istype(nl.Type, gc.TCHAN) != 0 {
+		if gc.Istype(nl.Type, gc.TMAP) || gc.Istype(nl.Type, gc.TCHAN) {
 			// map has len in the first 32-bit word.
 			// a zero pointer means zero length
 			regalloc(&n1, gc.Types[gc.Tptr], res)
@@ -390,7 +390,7 @@
 			break
 		}
 
-		if gc.Istype(nl.Type, gc.TSTRING) != 0 || gc.Isslice(nl.Type) != 0 {
+		if gc.Istype(nl.Type, gc.TSTRING) || gc.Isslice(nl.Type) {
 			// both slice and string have len one pointer into the struct.
 			igen(nl, &n1, res)
 
@@ -404,7 +404,7 @@
 		gc.Fatal("cgen: OLEN: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
 
 	case gc.OCAP:
-		if gc.Istype(nl.Type, gc.TCHAN) != 0 {
+		if gc.Istype(nl.Type, gc.TCHAN) {
 			// chan has cap in the second 32-bit word.
 			// a zero pointer means zero length
 			regalloc(&n1, gc.Types[gc.Tptr], res)
@@ -428,7 +428,7 @@
 			break
 		}
 
-		if gc.Isslice(nl.Type) != 0 {
+		if gc.Isslice(nl.Type) {
 			igen(nl, &n1, res)
 			n1.Type = gc.Types[gc.TUINT32]
 			n1.Xoffset += int64(gc.Array_cap)
@@ -495,7 +495,7 @@
 			gc.OAND,
 			gc.OOR,
 			gc.OXOR:
-			if gc.Smallintconst(nr) != 0 {
+			if gc.Smallintconst(nr) {
 				n2 = *nr
 				break
 			}
@@ -512,7 +512,7 @@
 			gc.OAND,
 			gc.OOR,
 			gc.OXOR:
-			if gc.Smallintconst(nr) != 0 {
+			if gc.Smallintconst(nr) {
 				n2 = *nr
 				break
 			}
@@ -600,7 +600,7 @@
  * n might be any size; res is 32-bit.
  * returns Prog* to patch to panic call.
  */
-func cgenindex(n *gc.Node, res *gc.Node, bounded int) *obj.Prog {
+func cgenindex(n *gc.Node, res *gc.Node, bounded bool) *obj.Prog {
 	var tmp gc.Node
 	var lo gc.Node
 	var hi gc.Node
@@ -608,7 +608,7 @@
 	var n1 gc.Node
 	var n2 gc.Node
 
-	if !(gc.Is64(n.Type) != 0) {
+	if !gc.Is64(n.Type) {
 		cgen(n, res)
 		return nil
 	}
@@ -617,7 +617,7 @@
 	cgen(n, &tmp)
 	split64(&tmp, &lo, &hi)
 	gmove(&lo, res)
-	if bounded != 0 {
+	if bounded {
 		splitclean()
 		return nil
 	}
@@ -659,7 +659,7 @@
 		n = n.Left
 	}
 
-	if gc.Isconst(n, gc.CTNIL) != 0 && n.Type.Width > int64(gc.Widthptr) {
+	if gc.Isconst(n, gc.CTNIL) && n.Type.Width > int64(gc.Widthptr) {
 		// Use of a nil interface or nil slice.
 		// Create a temporary we can take the address of and read.
 		// The generated code is just going to panic, so it need not
@@ -746,7 +746,7 @@
 		}
 
 		// should only get here for heap vars or paramref
-		if !(n.Class&gc.PHEAP != 0) && n.Class != gc.PPARAMREF {
+		if n.Class&gc.PHEAP == 0 && n.Class != gc.PPARAMREF {
 			gc.Dump("bad agen", n)
 			gc.Fatal("agen: bad ONAME class %#x", n.Class)
 		}
@@ -912,7 +912,7 @@
 		gc.Dump("cgenr-n", n)
 	}
 
-	if gc.Isfat(n.Type) != 0 {
+	if gc.Isfat(n.Type) {
 		gc.Fatal("cgenr on fat node")
 	}
 
@@ -960,7 +960,7 @@
 	var p2 *obj.Prog
 	var w uint32
 	var v uint64
-	var bounded int
+	var bounded bool
 
 	if gc.Debug['g'] != 0 {
 		gc.Dump("agenr-n", n)
@@ -987,35 +987,35 @@
 	case gc.OINDEX:
 		p2 = nil // to be patched to panicindex.
 		w = uint32(n.Type.Width)
-		bounded = bool2int(gc.Debug['B'] != 0 || n.Bounded != 0)
+		bounded = gc.Debug['B'] != 0 || n.Bounded
 		if nr.Addable != 0 {
-			if !(gc.Isconst(nr, gc.CTINT) != 0) {
+			if !gc.Isconst(nr, gc.CTINT) {
 				gc.Tempname(&tmp, gc.Types[gc.TINT32])
 			}
-			if !(gc.Isconst(nl, gc.CTSTR) != 0) {
+			if !gc.Isconst(nl, gc.CTSTR) {
 				agenr(nl, &n3, res)
 			}
-			if !(gc.Isconst(nr, gc.CTINT) != 0) {
+			if !gc.Isconst(nr, gc.CTINT) {
 				p2 = cgenindex(nr, &tmp, bounded)
 				regalloc(&n1, tmp.Type, nil)
 				gmove(&tmp, &n1)
 			}
 		} else if nl.Addable != 0 {
-			if !(gc.Isconst(nr, gc.CTINT) != 0) {
+			if !gc.Isconst(nr, gc.CTINT) {
 				gc.Tempname(&tmp, gc.Types[gc.TINT32])
 				p2 = cgenindex(nr, &tmp, bounded)
 				regalloc(&n1, tmp.Type, nil)
 				gmove(&tmp, &n1)
 			}
 
-			if !(gc.Isconst(nl, gc.CTSTR) != 0) {
+			if !gc.Isconst(nl, gc.CTSTR) {
 				agenr(nl, &n3, res)
 			}
 		} else {
 			gc.Tempname(&tmp, gc.Types[gc.TINT32])
 			p2 = cgenindex(nr, &tmp, bounded)
 			nr = &tmp
-			if !(gc.Isconst(nl, gc.CTSTR) != 0) {
+			if !gc.Isconst(nl, gc.CTSTR) {
 				agenr(nl, &n3, res)
 			}
 			regalloc(&n1, tmp.Type, nil)
@@ -1027,13 +1027,13 @@
 		// w is width
 
 		// constant index
-		if gc.Isconst(nr, gc.CTINT) != 0 {
-			if gc.Isconst(nl, gc.CTSTR) != 0 {
+		if gc.Isconst(nr, gc.CTINT) {
+			if gc.Isconst(nl, gc.CTSTR) {
 				gc.Fatal("constant string constant index")
 			}
 			v = uint64(gc.Mpgetfix(nr.Val.U.Xval))
-			if gc.Isslice(nl.Type) != 0 || nl.Type.Etype == gc.TSTRING {
-				if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
+			if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
+				if gc.Debug['B'] == 0 && !n.Bounded {
 					n1 = n3
 					n1.Op = gc.OINDREG
 					n1.Type = gc.Types[gc.Tptr]
@@ -1065,11 +1065,11 @@
 		gmove(&n1, &n2)
 		regfree(&n1)
 
-		if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
+		if gc.Debug['B'] == 0 && !n.Bounded {
 			// check bounds
-			if gc.Isconst(nl, gc.CTSTR) != 0 {
+			if gc.Isconst(nl, gc.CTSTR) {
 				gc.Nodconst(&n4, gc.Types[gc.TUINT32], int64(len(nl.Val.U.Sval.S)))
-			} else if gc.Isslice(nl.Type) != 0 || nl.Type.Etype == gc.TSTRING {
+			} else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
 				n1 = n3
 				n1.Op = gc.OINDREG
 				n1.Type = gc.Types[gc.Tptr]
@@ -1092,12 +1092,12 @@
 			gc.Patch(p1, gc.Pc)
 		}
 
-		if gc.Isconst(nl, gc.CTSTR) != 0 {
+		if gc.Isconst(nl, gc.CTSTR) {
 			regalloc(&n3, gc.Types[gc.Tptr], res)
 			p1 = gins(arm.AMOVW, nil, &n3)
 			gc.Datastring(nl.Val.U.Sval.S, &p1.From)
 			p1.From.Type = obj.TYPE_ADDR
-		} else if gc.Isslice(nl.Type) != 0 || nl.Type.Etype == gc.TSTRING {
+		} else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
 			n1 = n3
 			n1.Op = gc.OINDREG
 			n1.Type = gc.Types[gc.Tptr]
@@ -1185,7 +1185,7 @@
 	}
 
 	if n == nil {
-		n = gc.Nodbool(1)
+		n = gc.Nodbool(true)
 	}
 
 	if n.Ninit != nil {
@@ -1219,7 +1219,7 @@
 
 		// need to ask if it is bool?
 	case gc.OLITERAL:
-		if !true_ == !(n.Val.U.Bval != 0) {
+		if !true_ == (n.Val.U.Bval == 0) {
 			gc.Patch(gc.Gbranch(arm.AB, nil, 0), to)
 		}
 		goto ret
@@ -1302,7 +1302,7 @@
 			nr = r
 		}
 
-		if gc.Isslice(nl.Type) != 0 {
+		if gc.Isslice(nl.Type) {
 			// only valid to cmp darray to literal nil
 			if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
 				gc.Yyerror("illegal array comparison")
@@ -1317,7 +1317,7 @@
 			break
 		}
 
-		if gc.Isinter(nl.Type) != 0 {
+		if gc.Isinter(nl.Type) {
 			// front end shold only leave cmp to literal nil
 			if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
 				gc.Yyerror("illegal interface comparison")
@@ -1337,14 +1337,14 @@
 			break
 		}
 
-		if gc.Is64(nr.Type) != 0 {
-			if !(nl.Addable != 0) {
+		if gc.Is64(nr.Type) {
+			if nl.Addable == 0 {
 				gc.Tempname(&n1, nl.Type)
 				cgen(nl, &n1)
 				nl = &n1
 			}
 
-			if !(nr.Addable != 0) {
+			if nr.Addable == 0 {
 				gc.Tempname(&n2, nr.Type)
 				cgen(nr, &n2)
 				nr = &n2
@@ -1355,7 +1355,7 @@
 		}
 
 		if nr.Op == gc.OLITERAL {
-			if gc.Isconst(nr, gc.CTINT) != 0 && gc.Mpgetfix(nr.Val.U.Xval) == 0 {
+			if gc.Isconst(nr, gc.CTINT) && gc.Mpgetfix(nr.Val.U.Xval) == 0 {
 				gencmp0(nl, nl.Type, a, likely, to)
 				break
 			}
@@ -1453,14 +1453,14 @@
 
 	case gc.OINDEX:
 		t = n.Left.Type
-		if !(gc.Isfixedarray(t) != 0) {
+		if !gc.Isfixedarray(t) {
 			break
 		}
 		off = stkof(n.Left)
 		if off == -1000 || off == 1000 {
 			return off
 		}
-		if gc.Isconst(n.Right, gc.CTINT) != 0 {
+		if gc.Isconst(n.Right, gc.CTINT) {
 			return int32(int64(off) + t.Type.Width*gc.Mpgetfix(n.Right.Val.U.Xval))
 		}
 		return 1000
@@ -1547,7 +1547,7 @@
 	}
 
 	// Avoid taking the address for simple enough types.
-	if componentgen(n, res) != 0 {
+	if componentgen(n, res) {
 		return
 	}
 
@@ -1560,7 +1560,6 @@
 	switch align {
 	default:
 		gc.Fatal("sgen: invalid alignment %d for %v", align, gc.Tconv(n.Type, 0))
-		fallthrough
 
 	case 1:
 		op = arm.AMOVB
@@ -1712,7 +1711,7 @@
 		for {
 			tmp14 := c
 			c--
-			if !(tmp14 > 0) {
+			if tmp14 <= 0 {
 				break
 			}
 			p = gins(op, &src, &tmp)
@@ -1732,19 +1731,19 @@
 	regfree(&tmp)
 }
 
-func cadable(n *gc.Node) int {
-	if !(n.Addable != 0) {
+func cadable(n *gc.Node) bool {
+	if n.Addable == 0 {
 		// dont know how it happens,
 		// but it does
-		return 0
+		return false
 	}
 
 	switch n.Op {
 	case gc.ONAME:
-		return 1
+		return true
 	}
 
-	return 0
+	return false
 }
 
 /*
@@ -1755,7 +1754,7 @@
  * nr is N when assigning a zero value.
  * return 1 if can do, 0 if cant.
  */
-func componentgen(nr *gc.Node, nl *gc.Node) int {
+func componentgen(nr *gc.Node, nl *gc.Node) bool {
 	var nodl gc.Node
 	var nodr gc.Node
 	var tmp gc.Node
@@ -1777,12 +1776,12 @@
 		t = nl.Type
 
 		// Slices are ok.
-		if gc.Isslice(t) != 0 {
+		if gc.Isslice(t) {
 			break
 		}
 
 		// Small arrays are ok.
-		if t.Bound > 0 && t.Bound <= 3 && !(gc.Isfat(t.Type) != 0) {
+		if t.Bound > 0 && t.Bound <= 3 && !gc.Isfat(t.Type) {
 			break
 		}
 
@@ -1794,7 +1793,7 @@
 		fldcount = 0
 
 		for t = nl.Type.Type; t != nil; t = t.Down {
-			if gc.Isfat(t.Type) != 0 {
+			if gc.Isfat(t.Type) {
 				goto no
 			}
 			if t.Etype != gc.TFIELD {
@@ -1813,8 +1812,8 @@
 	}
 
 	nodl = *nl
-	if !(cadable(nl) != 0) {
-		if nr != nil && !(cadable(nr) != 0) {
+	if !cadable(nl) {
+		if nr != nil && !cadable(nr) {
 			goto no
 		}
 		igen(nl, &nodl, nil)
@@ -1823,7 +1822,7 @@
 
 	if nr != nil {
 		nodr = *nr
-		if !(cadable(nr) != 0) {
+		if !cadable(nr) {
 			igen(nr, &nodr, nil)
 			freer = 1
 		}
@@ -1851,7 +1850,7 @@
 			gc.Gvardef(nl)
 		}
 		t = nl.Type
-		if !(gc.Isslice(t) != 0) {
+		if !gc.Isslice(t) {
 			nodl.Type = t.Type
 			nodr.Type = nodl.Type
 			for fldcount = 0; fldcount < t.Bound; fldcount++ {
@@ -1991,7 +1990,7 @@
 	if freel != 0 {
 		regfree(&nodl)
 	}
-	return 0
+	return false
 
 yes:
 	if freer != 0 {
@@ -2000,5 +1999,5 @@
 	if freel != 0 {
 		regfree(&nodl)
 	}
-	return 1
+	return true
 }
diff --git a/src/cmd/new5g/cgen64.go b/src/cmd/new5g/cgen64.go
index e5fefe3..f89c21c 100644
--- a/src/cmd/new5g/cgen64.go
+++ b/src/cmd/new5g/cgen64.go
@@ -48,7 +48,7 @@
 	}
 
 	l = n.Left
-	if !(l.Addable != 0) {
+	if l.Addable == 0 {
 		gc.Tempname(&t1, l.Type)
 		cgen(l, &t1)
 		l = &t1
@@ -58,7 +58,6 @@
 	switch n.Op {
 	default:
 		gc.Fatal("cgen64 %v", gc.Oconv(int(n.Op), 0))
-		fallthrough
 
 	case gc.OMINUS:
 		split64(res, &lo2, &hi2)
@@ -124,13 +123,13 @@
 	// setup for binary operators
 	r = n.Right
 
-	if r != nil && !(r.Addable != 0) {
+	if r != nil && r.Addable == 0 {
 		gc.Tempname(&t2, r.Type)
 		cgen(r, &t2)
 		r = &t2
 	}
 
-	if gc.Is64(r.Type) != 0 {
+	if gc.Is64(r.Type) {
 		split64(r, &lo2, &hi2)
 	}
 
@@ -141,7 +140,6 @@
 	switch n.Op {
 	default:
 		gc.Fatal("cgen64: not implemented: %v\n", gc.Nconv(n, 0))
-		fallthrough
 
 		// TODO: Constants
 	case gc.OADD:
@@ -316,7 +314,7 @@
 
 		regalloc(&s, gc.Types[gc.TUINT32], nil)
 		regalloc(&creg, gc.Types[gc.TUINT32], nil)
-		if gc.Is64(r.Type) != 0 {
+		if gc.Is64(r.Type) {
 			// shift is >= 1<<32
 			split64(r, &cl, &ch)
 
@@ -487,7 +485,7 @@
 
 		regalloc(&s, gc.Types[gc.TUINT32], nil)
 		regalloc(&creg, gc.Types[gc.TUINT32], nil)
-		if gc.Is64(r.Type) != 0 {
+		if gc.Is64(r.Type) {
 			// shift is >= 1<<32
 			split64(r, &cl, &ch)
 
@@ -721,7 +719,7 @@
 		regfree(&n1)
 	}
 
-	if gc.Is64(r.Type) != 0 {
+	if gc.Is64(r.Type) {
 		splitclean()
 	}
 	splitclean()
@@ -770,7 +768,6 @@
 	switch op {
 	default:
 		gc.Fatal("cmp64 %v %v", gc.Oconv(int(op), 0), gc.Tconv(t, 0))
-		fallthrough
 
 		// cmp hi
 	// bne L
diff --git a/src/cmd/new5g/ggen.go b/src/cmd/new5g/ggen.go
index d81f405..3b007d8 100644
--- a/src/cmd/new5g/ggen.go
+++ b/src/cmd/new5g/ggen.go
@@ -36,7 +36,7 @@
 	r0 = 0
 	for l = gc.Curfn.Dcl; l != nil; l = l.Next {
 		n = l.N
-		if !(n.Needzero != 0) {
+		if n.Needzero == 0 {
 			continue
 		}
 		if n.Class != gc.PAUTO {
@@ -176,7 +176,7 @@
 
 			p = gins(arm.ABL, nil, f)
 			gc.Afunclit(&p.To, f)
-			if proc == -1 || gc.Noreturn(p) != 0 {
+			if proc == -1 || gc.Noreturn(p) {
 				gins(obj.AUNDEF, nil, nil)
 			}
 			break
@@ -265,7 +265,7 @@
 		reg[r]--
 	}
 
-	if !(i.Addable != 0) {
+	if i.Addable == 0 {
 		gc.Tempname(&tmpi, i.Type)
 		cgen(i, &tmpi)
 		i = &tmpi
@@ -529,7 +529,7 @@
  *	res = nl << nr
  *	res = nl >> nr
  */
-func cgen_shift(op int, bounded int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 	var n1 gc.Node
 	var n2 gc.Node
 	var n3 gc.Node
@@ -709,7 +709,7 @@
 	w = uint32(nl.Type.Width)
 
 	// Avoid taking the address for simple enough types.
-	if componentgen(nil, nl) != 0 {
+	if componentgen(nil, nl) {
 		return
 	}
 
diff --git a/src/cmd/new5g/gsubr.go b/src/cmd/new5g/gsubr.go
index c1ca679..857bafa 100644
--- a/src/cmd/new5g/gsubr.go
+++ b/src/cmd/new5g/gsubr.go
@@ -73,7 +73,7 @@
 	}
 }
 
-func anyregalloc() int {
+func anyregalloc() bool {
 	var i int
 	var j int
 
@@ -86,11 +86,11 @@
 				goto ok
 			}
 		}
-		return 1
+		return true
 	ok:
 	}
 
-	return 0
+	return false
 }
 
 var regpc [REGALLOC_FMAX + 1]uint32
@@ -126,7 +126,7 @@
 		gc.Fatal("regalloc: t nil")
 	}
 	et = int(gc.Simtype[t.Etype])
-	if gc.Is64(t) != 0 {
+	if gc.Is64(t) {
 		gc.Fatal("regalloc: 64 bit type %v")
 	}
 
@@ -263,7 +263,7 @@
 	var n1 gc.Node
 	var i int64
 
-	if !(gc.Is64(n.Type) != 0) {
+	if !gc.Is64(n.Type) {
 		gc.Fatal("split64 %v", gc.Tconv(n.Type, 0))
 	}
 
@@ -276,7 +276,7 @@
 	default:
 		switch n.Op {
 		default:
-			if !(dotaddable(n, &n1) != 0) {
+			if !dotaddable(n, &n1) {
 				igen(n, &n1, nil)
 				sclean[nsclean-1] = n1
 			}
@@ -359,7 +359,7 @@
 
 	// cannot have two memory operands;
 	// except 64-bit, which always copies via registers anyway.
-	if !(gc.Is64(f.Type) != 0) && !(gc.Is64(t.Type) != 0) && gc.Ismem(f) != 0 && gc.Ismem(t) != 0 {
+	if !gc.Is64(f.Type) && !gc.Is64(t.Type) && gc.Ismem(f) && gc.Ismem(t) {
 		goto hard
 	}
 
@@ -392,7 +392,7 @@
 		ft = gc.Simsimtype(con.Type)
 
 		// constants can't move directly to memory
-		if gc.Ismem(t) != 0 && !(gc.Is64(t.Type) != 0) {
+		if gc.Ismem(t) && !gc.Is64(t.Type) {
 			goto hard
 		}
 	}
@@ -412,7 +412,7 @@
 		 * integer copy and truncate
 		 */
 	case gc.TINT8<<16 | gc.TINT8: // same size
-		if !(gc.Ismem(f) != 0) {
+		if !gc.Ismem(f) {
 			a = arm.AMOVB
 			break
 		}
@@ -426,7 +426,7 @@
 		a = arm.AMOVBS
 
 	case gc.TUINT8<<16 | gc.TUINT8:
-		if !(gc.Ismem(f) != 0) {
+		if !gc.Ismem(f) {
 			a = arm.AMOVB
 			break
 		}
@@ -451,7 +451,7 @@
 		goto trunc64
 
 	case gc.TINT16<<16 | gc.TINT16: // same size
-		if !(gc.Ismem(f) != 0) {
+		if !gc.Ismem(f) {
 			a = arm.AMOVH
 			break
 		}
@@ -463,7 +463,7 @@
 		a = arm.AMOVHS
 
 	case gc.TUINT16<<16 | gc.TUINT16:
-		if !(gc.Ismem(f) != 0) {
+		if !gc.Ismem(f) {
 			a = arm.AMOVH
 			break
 		}
@@ -795,9 +795,9 @@
 	gc.Fatal("gmove %v -> %v", gc.Nconv(f, 0), gc.Nconv(t, 0))
 }
 
-func samaddr(f *gc.Node, t *gc.Node) int {
+func samaddr(f *gc.Node, t *gc.Node) bool {
 	if f.Op != t.Op {
-		return 0
+		return false
 	}
 
 	switch f.Op {
@@ -805,10 +805,10 @@
 		if f.Val.U.Reg != t.Val.U.Reg {
 			break
 		}
-		return 1
+		return true
 	}
 
-	return 0
+	return false
 }
 
 /*
@@ -1245,13 +1245,13 @@
 	cleani -= 2
 }
 
-func dotaddable(n *gc.Node, n1 *gc.Node) int {
+func dotaddable(n *gc.Node, n1 *gc.Node) bool {
 	var o int
 	var oary [10]int64
 	var nn *gc.Node
 
 	if n.Op != gc.ODOT {
-		return 0
+		return false
 	}
 
 	o = gc.Dotoffset(n, oary[:], &nn)
@@ -1259,10 +1259,10 @@
 		*n1 = *nn
 		n1.Type = n.Type
 		n1.Xoffset += oary[0]
-		return 1
+		return true
 	}
 
-	return 0
+	return false
 }
 
 /*
@@ -1276,7 +1276,7 @@
  * after successful sudoaddable,
  * to release the register used for a.
  */
-func sudoaddable(as int, n *gc.Node, a *obj.Addr, w *int) int {
+func sudoaddable(as int, n *gc.Node, a *obj.Addr, w *int) bool {
 	var o int
 	var i int
 	var oary [10]int64
@@ -1295,14 +1295,14 @@
 	var t *gc.Type
 
 	if n.Type == nil {
-		return 0
+		return false
 	}
 
 	*a = obj.Addr{}
 
 	switch n.Op {
 	case gc.OLITERAL:
-		if !(gc.Isconst(n, gc.CTINT) != 0) {
+		if !gc.Isconst(n, gc.CTINT) {
 			break
 		}
 		v = gc.Mpgetfix(n.Val.U.Xval)
@@ -1321,12 +1321,12 @@
 		goto odot
 
 	case gc.OINDEX:
-		return 0
+		return false
 
 		// disabled: OINDEX case is now covered by agenr
 		// for a more suitable register allocation pattern.
 		if n.Left.Type.Etype == gc.TSTRING {
-			return 0
+			return false
 		}
 		cleani += 2
 		reg = &clean[cleani-1]
@@ -1336,12 +1336,12 @@
 		goto oindex
 	}
 
-	return 0
+	return false
 
 lit:
 	switch as {
 	default:
-		return 0
+		return false
 
 	case arm.AADD,
 		arm.ASUB,
@@ -1437,7 +1437,7 @@
 	}
 
 	*w = int(n.Type.Width)
-	if gc.Isconst(r, gc.CTINT) != 0 {
+	if gc.Isconst(r, gc.CTINT) {
 		goto oindex_const
 	}
 
@@ -1471,7 +1471,7 @@
 	}
 	regalloc(reg1, t, nil)
 	regalloc(&n3, gc.Types[gc.TINT32], reg1)
-	p2 = cgenindex(r, &n3, bool2int(gc.Debug['B'] != 0 || n.Bounded != 0))
+	p2 = cgenindex(r, &n3, gc.Debug['B'] != 0 || n.Bounded)
 	gmove(&n3, reg1)
 	regfree(&n3)
 
@@ -1487,7 +1487,7 @@
 	}
 
 	// check bounds
-	if !(gc.Debug['B'] != 0) {
+	if gc.Debug['B'] == 0 {
 		if o&ODynam != 0 {
 			n2 = *reg
 			n2.Op = gc.OINDREG
@@ -1557,7 +1557,7 @@
 
 	v = gc.Mpgetfix(r.Val.U.Xval)
 	if o&ODynam != 0 {
-		if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
+		if gc.Debug['B'] == 0 && !n.Bounded {
 			n1 = *reg
 			n1.Op = gc.OINDREG
 			n1.Type = gc.Types[gc.Tptr]
@@ -1591,9 +1591,9 @@
 	goto yes
 
 yes:
-	return 1
+	return true
 
 no:
 	sudoclean()
-	return 0
+	return false
 }
diff --git a/src/cmd/new5g/peep.go b/src/cmd/new5g/peep.go
index 7534aae..2fbb1e5 100644
--- a/src/cmd/new5g/peep.go
+++ b/src/cmd/new5g/peep.go
@@ -79,16 +79,16 @@
 			arm.AMOVW,
 			arm.AMOVF,
 			arm.AMOVD:
-			if regtyp(&p.From) != 0 {
+			if regtyp(&p.From) {
 				if p.From.Type == p.To.Type && isfloatreg(&p.From) == isfloatreg(&p.To) {
 					if p.Scond == arm.C_SCOND_NONE {
-						if copyprop(g, r) != 0 {
+						if copyprop(g, r) {
 							excise(r)
 							t++
 							break
 						}
 
-						if subprop(r) != 0 && copyprop(g, r) != 0 {
+						if subprop(r) && copyprop(g, r) {
 							excise(r)
 							t++
 							break
@@ -102,7 +102,7 @@
 			arm.AMOVBS,
 			arm.AMOVBU:
 			if p.From.Type == obj.TYPE_REG {
-				if shortprop(r) != 0 {
+				if shortprop(r) {
 					t++
 				}
 			}
@@ -128,7 +128,7 @@
 		 * EOR -1,x,y => MVN x,y
 		 */
 		case arm.AEOR:
-			if isdconst(&p.From) != 0 && p.From.Offset == -1 {
+			if isdconst(&p.From) && p.From.Offset == -1 {
 				p.As = arm.AMVN
 				p.From.Type = obj.TYPE_REG
 				if p.Reg != 0 {
@@ -231,8 +231,8 @@
 	gc.Flowend(g)
 }
 
-func regtyp(a *obj.Addr) int {
-	return bool2int(a.Type == obj.TYPE_REG && (arm.REG_R0 <= a.Reg && a.Reg <= arm.REG_R15 || arm.REG_F0 <= a.Reg && a.Reg <= arm.REG_F15))
+func regtyp(a *obj.Addr) bool {
+	return a.Type == obj.TYPE_REG && (arm.REG_R0 <= a.Reg && a.Reg <= arm.REG_R15 || arm.REG_F0 <= a.Reg && a.Reg <= arm.REG_F15)
 }
 
 /*
@@ -249,7 +249,7 @@
  * hopefully, then the former or latter MOV
  * will be eliminated by copy propagation.
  */
-func subprop(r0 *gc.Flow) int {
+func subprop(r0 *gc.Flow) bool {
 	var p *obj.Prog
 	var v1 *obj.Addr
 	var v2 *obj.Addr
@@ -259,12 +259,12 @@
 
 	p = r0.Prog
 	v1 = &p.From
-	if !(regtyp(v1) != 0) {
-		return 0
+	if !regtyp(v1) {
+		return false
 	}
 	v2 = &p.To
-	if !(regtyp(v2) != 0) {
-		return 0
+	if !regtyp(v2) {
+		return false
 	}
 	for r = gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
 		if gc.Uniqs(r) == nil {
@@ -276,7 +276,7 @@
 		}
 		proginfo(&info, p)
 		if info.Flags&gc.Call != 0 {
-			return 0
+			return false
 		}
 
 		if (info.Flags&gc.CanRegRead != 0) && p.To.Type == obj.TYPE_REG {
@@ -289,7 +289,7 @@
 		case arm.AMULLU,
 			arm.AMULA,
 			arm.AMVN:
-			return 0
+			return false
 		}
 
 		if info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightWrite {
@@ -302,7 +302,7 @@
 			}
 		}
 
-		if copyau(&p.From, v2) != 0 || copyau1(p, v2) != 0 || copyau(&p.To, v2) != 0 {
+		if copyau(&p.From, v2) || copyau1(p, v2) || copyau(&p.To, v2) {
 			break
 		}
 		if copysub(&p.From, v1, v2, 0) != 0 || copysub1(p, v1, v2, 0) != 0 || copysub(&p.To, v1, v2, 0) != 0 {
@@ -310,7 +310,7 @@
 		}
 	}
 
-	return 0
+	return false
 
 gotit:
 	copysub(&p.To, v1, v2, 1)
@@ -338,7 +338,7 @@
 	if gc.Debug['P'] != 0 {
 		fmt.Printf("%v last\n", r.Prog)
 	}
-	return 1
+	return true
 }
 
 /*
@@ -353,7 +353,7 @@
  *	set v1	F=1
  *	set v2	return success
  */
-func copyprop(g *gc.Graph, r0 *gc.Flow) int {
+func copyprop(g *gc.Graph, r0 *gc.Flow) bool {
 	var p *obj.Prog
 	var v1 *obj.Addr
 	var v2 *obj.Addr
@@ -361,14 +361,14 @@
 	p = r0.Prog
 	v1 = &p.From
 	v2 = &p.To
-	if copyas(v1, v2) != 0 {
-		return 1
+	if copyas(v1, v2) {
+		return true
 	}
 	gactive++
 	return copy1(v1, v2, r0.S1, 0)
 }
 
-func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) int {
+func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) bool {
 	var t int
 	var p *obj.Prog
 
@@ -376,7 +376,7 @@
 		if gc.Debug['P'] != 0 {
 			fmt.Printf("act set; return 1\n")
 		}
-		return 1
+		return true
 	}
 
 	r.Active = int32(gactive)
@@ -388,7 +388,7 @@
 		if gc.Debug['P'] != 0 {
 			fmt.Printf("%v", p)
 		}
-		if !(f != 0) && gc.Uniqp(r) == nil {
+		if f == 0 && gc.Uniqp(r) == nil {
 			f = 1
 			if gc.Debug['P'] != 0 {
 				fmt.Printf("; merge; f=%d", f)
@@ -401,33 +401,33 @@
 			if gc.Debug['P'] != 0 {
 				fmt.Printf("; %vrar; return 0\n", gc.Ctxt.Dconv(v2))
 			}
-			return 0
+			return false
 
 		case 3: /* set */
 			if gc.Debug['P'] != 0 {
 				fmt.Printf("; %vset; return 1\n", gc.Ctxt.Dconv(v2))
 			}
-			return 1
+			return true
 
 		case 1, /* used, substitute */
 			4: /* use and set */
 			if f != 0 {
-				if !(gc.Debug['P'] != 0) {
-					return 0
+				if gc.Debug['P'] == 0 {
+					return false
 				}
 				if t == 4 {
 					fmt.Printf("; %vused+set and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
 				} else {
 					fmt.Printf("; %vused and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
 				}
-				return 0
+				return false
 			}
 
 			if copyu(p, v2, v1) != 0 {
 				if gc.Debug['P'] != 0 {
 					fmt.Printf("; sub fail; return 0\n")
 				}
-				return 0
+				return false
 			}
 
 			if gc.Debug['P'] != 0 {
@@ -437,13 +437,13 @@
 				if gc.Debug['P'] != 0 {
 					fmt.Printf("; %vused+set; return 1\n", gc.Ctxt.Dconv(v2))
 				}
-				return 1
+				return true
 			}
 		}
 
-		if !(f != 0) {
+		if f == 0 {
 			t = copyu(p, v1, nil)
-			if !(f != 0) && (t == 2 || t == 3 || t == 4) {
+			if f == 0 && (t == 2 || t == 3 || t == 4) {
 				f = 1
 				if gc.Debug['P'] != 0 {
 					fmt.Printf("; %vset and !f; f=%d", gc.Ctxt.Dconv(v1), f)
@@ -455,13 +455,13 @@
 			fmt.Printf("\n")
 		}
 		if r.S2 != nil {
-			if !(copy1(v1, v2, r.S2, f) != 0) {
-				return 0
+			if !copy1(v1, v2, r.S2, f) {
+				return false
 			}
 		}
 	}
 
-	return 1
+	return true
 }
 
 // UNUSED
@@ -490,7 +490,7 @@
 			return
 		}
 
-		if p.As == arm.AMOVW && copyas(&p.From, c1) != 0 {
+		if p.As == arm.AMOVW && copyas(&p.From, c1) {
 			if gc.Debug['P'] != 0 {
 				fmt.Printf("; sub%v/%v", gc.Ctxt.Dconv(&p.From), gc.Ctxt.Dconv(v1))
 			}
@@ -526,7 +526,7 @@
  *
  * MOVBS above can be a MOVBS, MOVBU, MOVHS or MOVHU.
  */
-func shortprop(r *gc.Flow) int {
+func shortprop(r *gc.Flow) bool {
 	var p *obj.Prog
 	var p1 *obj.Prog
 	var r1 *gc.Flow
@@ -534,7 +534,7 @@
 	p = r.Prog
 	r1 = findpre(r, &p.From)
 	if r1 == nil {
-		return 0
+		return false
 	}
 
 	p1 = r1.Prog
@@ -543,12 +543,12 @@
 		goto gotit
 	}
 
-	if p1.As == arm.AMOVW && isdconst(&p1.From) != 0 && p1.From.Offset >= 0 && p1.From.Offset < 128 {
+	if p1.As == arm.AMOVW && isdconst(&p1.From) && p1.From.Offset >= 0 && p1.From.Offset < 128 {
 		// Loaded an immediate.
 		goto gotit
 	}
 
-	return 0
+	return false
 
 gotit:
 	if gc.Debug['P'] != 0 {
@@ -567,7 +567,7 @@
 	if gc.Debug['P'] != 0 {
 		fmt.Printf(" => %v\n", arm.Aconv(int(p.As)))
 	}
-	return 1
+	return true
 }
 
 // UNUSED
@@ -582,7 +582,7 @@
  * AXXX (x<<y),a,b
  * ..
  */
-func shiftprop(r *gc.Flow) int {
+func shiftprop(r *gc.Flow) bool {
 	var r1 *gc.Flow
 	var p *obj.Prog
 	var p1 *obj.Prog
@@ -596,11 +596,11 @@
 		if gc.Debug['P'] != 0 {
 			fmt.Printf("\tBOTCH: result not reg; FAILURE\n")
 		}
-		return 0
+		return false
 	}
 
 	n = int(p.To.Reg)
-	a = obj.Zprog.From
+	a = obj.Addr{}
 	if p.Reg != 0 && p.Reg != p.To.Reg {
 		a.Type = obj.TYPE_REG
 		a.Reg = p.Reg
@@ -618,14 +618,14 @@
 			if gc.Debug['P'] != 0 {
 				fmt.Printf("\tbranch; FAILURE\n")
 			}
-			return 0
+			return false
 		}
 
 		if gc.Uniqp(r1) == nil {
 			if gc.Debug['P'] != 0 {
 				fmt.Printf("\tmerge; FAILURE\n")
 			}
-			return 0
+			return false
 		}
 
 		p1 = r1.Prog
@@ -638,7 +638,7 @@
 				if gc.Debug['P'] != 0 {
 					fmt.Printf("\targs modified; FAILURE\n")
 				}
-				return 0
+				return false
 			}
 
 			continue
@@ -647,7 +647,7 @@
 				if gc.Debug['P'] != 0 {
 					fmt.Printf("\tBOTCH: noref; FAILURE\n")
 				}
-				return 0
+				return false
 			}
 		}
 
@@ -660,7 +660,7 @@
 		if gc.Debug['P'] != 0 {
 			fmt.Printf("\tnon-dpi; FAILURE\n")
 		}
-		return 0
+		return false
 
 	case arm.AAND,
 		arm.AEOR,
@@ -676,7 +676,7 @@
 				if gc.Debug['P'] != 0 {
 					fmt.Printf("\tcan't swap; FAILURE\n")
 				}
-				return 0
+				return false
 			}
 
 			p1.Reg = p1.From.Reg
@@ -709,14 +709,14 @@
 			if gc.Debug['P'] != 0 {
 				fmt.Printf("\tcan't swap; FAILURE\n")
 			}
-			return 0
+			return false
 		}
 
 		if p1.Reg == 0 && int(p1.To.Reg) == n {
 			if gc.Debug['P'] != 0 {
 				fmt.Printf("\tshift result used twice; FAILURE\n")
 			}
-			return 0
+			return false
 		}
 
 		//	case AMVN:
@@ -724,14 +724,14 @@
 			if gc.Debug['P'] != 0 {
 				fmt.Printf("\tshift result used in shift; FAILURE\n")
 			}
-			return 0
+			return false
 		}
 
 		if p1.From.Type != obj.TYPE_REG || int(p1.From.Reg) != n {
 			if gc.Debug['P'] != 0 {
 				fmt.Printf("\tBOTCH: where is it used?; FAILURE\n")
 			}
-			return 0
+			return false
 		}
 	}
 
@@ -745,7 +745,7 @@
 				if gc.Debug['P'] != 0 {
 					fmt.Printf("\tinconclusive; FAILURE\n")
 				}
-				return 0
+				return false
 			}
 
 			p1 = r1.Prog
@@ -763,7 +763,7 @@
 				if gc.Debug['P'] != 0 {
 					fmt.Printf("\treused; FAILURE\n")
 				}
-				return 0
+				return false
 			}
 
 			break
@@ -798,13 +798,13 @@
 		o |= 2 << 5
 	}
 
-	p2.From = obj.Zprog.From
+	p2.From = obj.Addr{}
 	p2.From.Type = obj.TYPE_SHIFT
 	p2.From.Offset = int64(o)
 	if gc.Debug['P'] != 0 {
 		fmt.Printf("\t=>%v\tSUCCEED\n", p2)
 	}
-	return 1
+	return true
 }
 
 /*
@@ -853,7 +853,7 @@
 			p = r1.Prog
 
 			if p.As == arm.AADD {
-				if isdconst(&p.From) != 0 {
+				if isdconst(&p.From) {
 					if p.From.Offset > -4096 && p.From.Offset < 4096 {
 						return r1
 					}
@@ -869,13 +869,13 @@
 	return nil
 }
 
-func nochange(r *gc.Flow, r2 *gc.Flow, p *obj.Prog) int {
+func nochange(r *gc.Flow, r2 *gc.Flow, p *obj.Prog) bool {
 	var a [3]obj.Addr
 	var i int
 	var n int
 
 	if r == r2 {
-		return 1
+		return true
 	}
 	n = 0
 	if p.Reg != 0 && p.Reg != p.To.Reg {
@@ -898,47 +898,47 @@
 	}
 
 	if n == 0 {
-		return 1
+		return true
 	}
 	for ; r != nil && r != r2; r = gc.Uniqs(r) {
 		p = r.Prog
 		for i = 0; i < n; i++ {
 			if copyu(p, &a[i], nil) > 1 {
-				return 0
+				return false
 			}
 		}
 	}
 
-	return 1
+	return true
 }
 
-func findu1(r *gc.Flow, v *obj.Addr) int {
+func findu1(r *gc.Flow, v *obj.Addr) bool {
 	for ; r != nil; r = r.S1 {
 		if r.Active != 0 {
-			return 0
+			return false
 		}
 		r.Active = 1
 		switch copyu(r.Prog, v, nil) {
 		case 1, /* used */
 			2, /* read-alter-rewrite */
 			4: /* set and used */
-			return 1
+			return true
 
 		case 3: /* set */
-			return 0
+			return false
 		}
 
 		if r.S2 != nil {
-			if findu1(r.S2, v) != 0 {
-				return 1
+			if findu1(r.S2, v) {
+				return true
 			}
 		}
 	}
 
-	return 0
+	return false
 }
 
-func finduse(g *gc.Graph, r *gc.Flow, v *obj.Addr) int {
+func finduse(g *gc.Graph, r *gc.Flow, v *obj.Addr) bool {
 	var r1 *gc.Flow
 
 	for r1 = g.Start; r1 != nil; r1 = r1.Link {
@@ -960,7 +960,7 @@
  * into
  *   MOVBU  R0<<0(R1),R0
  */
-func xtramodes(g *gc.Graph, r *gc.Flow, a *obj.Addr) int {
+func xtramodes(g *gc.Graph, r *gc.Flow, a *obj.Addr) bool {
 	var r1 *gc.Flow
 	var r2 *gc.Flow
 	var r3 *gc.Flow
@@ -983,14 +983,14 @@
 				}
 
 				if p1.From.Type == obj.TYPE_REG || (p1.From.Type == obj.TYPE_SHIFT && p1.From.Offset&(1<<4) == 0 && ((p.As != arm.AMOVB && p.As != arm.AMOVBS) || (a == &p.From && p1.From.Offset&^0xf == 0))) || ((p1.From.Type == obj.TYPE_ADDR || p1.From.Type == obj.TYPE_CONST) && p1.From.Offset > -4096 && p1.From.Offset < 4096) {
-					if nochange(gc.Uniqs(r1), r, p1) != 0 {
+					if nochange(gc.Uniqs(r1), r, p1) {
 						if a != &p.From || v.Reg != p.To.Reg {
-							if finduse(g, r.S1, &v) != 0 {
+							if finduse(g, r.S1, &v) {
 								if p1.Reg == 0 || p1.Reg == v.Reg {
 									/* pre-indexing */
 									p.Scond |= arm.C_WBIT
 								} else {
-									return 0
+									return false
 								}
 							}
 						}
@@ -999,18 +999,18 @@
 						/* register offset */
 						case obj.TYPE_REG:
 							if gc.Nacl {
-								return 0
+								return false
 							}
-							*a = obj.Zprog.From
+							*a = obj.Addr{}
 							a.Type = obj.TYPE_SHIFT
 							a.Offset = int64(p1.From.Reg) & 15
 
 							/* scaled register offset */
 						case obj.TYPE_SHIFT:
 							if gc.Nacl {
-								return 0
+								return false
 							}
-							*a = obj.Zprog.From
+							*a = obj.Addr{}
 							a.Type = obj.TYPE_SHIFT
 							fallthrough
 
@@ -1024,7 +1024,7 @@
 							a.Reg = p1.Reg
 						}
 						excise(r1)
-						return 1
+						return true
 					}
 				}
 
@@ -1041,11 +1041,11 @@
 							a.Reg = p1.To.Reg
 							a.Offset = p1.From.Offset
 							p.Scond |= arm.C_PBIT
-							if !(finduse(g, r, &r1.Prog.To) != 0) {
+							if !finduse(g, r, &r1.Prog.To) {
 								excise(r1)
 							}
 							excise(r2)
-							return 1
+							return true
 						}
 					}
 				}
@@ -1062,11 +1062,11 @@
 			a.Offset = p1.From.Offset
 			p.Scond |= arm.C_PBIT
 			excise(r1)
-			return 1
+			return true
 		}
 	}
 
-	return 0
+	return false
 }
 
 /*
@@ -1098,7 +1098,7 @@
 				return 0
 			}
 
-			if copyau(&p.To, v) != 0 {
+			if copyau(&p.To, v) {
 				if p.Scond&arm.C_WBIT != 0 {
 					return 2
 				}
@@ -1119,7 +1119,7 @@
 				return 0
 			}
 
-			if copyau(&p.From, v) != 0 {
+			if copyau(&p.From, v) {
 				if p.Scond&arm.C_WBIT != 0 {
 					return 2
 				}
@@ -1170,7 +1170,7 @@
 			if copysub(&p.From, v, s, 1) != 0 {
 				return 1
 			}
-			if !(copyas(&p.To, v) != 0) {
+			if !copyas(&p.To, v) {
 				if copysub(&p.To, v, s, 1) != 0 {
 					return 1
 				}
@@ -1178,20 +1178,20 @@
 			return 0
 		}
 
-		if copyas(&p.To, v) != 0 {
+		if copyas(&p.To, v) {
 			if p.Scond != arm.C_SCOND_NONE {
 				return 2
 			}
-			if copyau(&p.From, v) != 0 {
+			if copyau(&p.From, v) {
 				return 4
 			}
 			return 3
 		}
 
-		if copyau(&p.From, v) != 0 {
+		if copyau(&p.From, v) {
 			return 1
 		}
-		if copyau(&p.To, v) != 0 {
+		if copyau(&p.To, v) {
 			return 1
 		}
 		return 0
@@ -1243,7 +1243,7 @@
 			if copysub1(p, v, s, 1) != 0 {
 				return 1
 			}
-			if !(copyas(&p.To, v) != 0) {
+			if !copyas(&p.To, v) {
 				if copysub(&p.To, v, s, 1) != 0 {
 					return 1
 				}
@@ -1251,29 +1251,29 @@
 			return 0
 		}
 
-		if copyas(&p.To, v) != 0 {
+		if copyas(&p.To, v) {
 			if p.Scond != arm.C_SCOND_NONE {
 				return 2
 			}
 			if p.Reg == 0 {
 				p.Reg = p.To.Reg
 			}
-			if copyau(&p.From, v) != 0 {
+			if copyau(&p.From, v) {
 				return 4
 			}
-			if copyau1(p, v) != 0 {
+			if copyau1(p, v) {
 				return 4
 			}
 			return 3
 		}
 
-		if copyau(&p.From, v) != 0 {
+		if copyau(&p.From, v) {
 			return 1
 		}
-		if copyau1(p, v) != 0 {
+		if copyau1(p, v) {
 			return 1
 		}
-		if copyau(&p.To, v) != 0 {
+		if copyau(&p.To, v) {
 			return 1
 		}
 		return 0
@@ -1301,10 +1301,10 @@
 			return copysub1(p, v, s, 1)
 		}
 
-		if copyau(&p.From, v) != 0 {
+		if copyau(&p.From, v) {
 			return 1
 		}
-		if copyau1(p, v) != 0 {
+		if copyau1(p, v) {
 			return 1
 		}
 		return 0
@@ -1317,7 +1317,7 @@
 			return 0
 		}
 
-		if copyau(&p.To, v) != 0 {
+		if copyau(&p.To, v) {
 			return 1
 		}
 		return 0
@@ -1357,7 +1357,7 @@
 			return 0
 		}
 
-		if copyau(&p.To, v) != 0 {
+		if copyau(&p.To, v) {
 			return 4
 		}
 		return 3
@@ -1411,11 +1411,11 @@
  * could be set/use depending on
  * semantics
  */
-func copyas(a *obj.Addr, v *obj.Addr) int {
-	if regtyp(v) != 0 {
+func copyas(a *obj.Addr, v *obj.Addr) bool {
+	if regtyp(v) {
 		if a.Type == v.Type {
 			if a.Reg == v.Reg {
-				return 1
+				return true
 			}
 		}
 	} else if v.Type == obj.TYPE_CONST { /* for constprop */
@@ -1424,7 +1424,7 @@
 				if a.Sym == v.Sym {
 					if a.Reg == v.Reg {
 						if a.Offset == v.Offset {
-							return 1
+							return true
 						}
 					}
 				}
@@ -1432,15 +1432,15 @@
 		}
 	}
 
-	return 0
+	return false
 }
 
-func sameaddr(a *obj.Addr, v *obj.Addr) int {
+func sameaddr(a *obj.Addr, v *obj.Addr) bool {
 	if a.Type != v.Type {
-		return 0
+		return false
 	}
-	if regtyp(v) != 0 && a.Reg == v.Reg {
-		return 1
+	if regtyp(v) && a.Reg == v.Reg {
+		return true
 	}
 
 	// TODO(rsc): Change v->type to v->name and enable.
@@ -1448,54 +1448,54 @@
 	//	if(v->offset == a->offset)
 	//		return 1;
 	//}
-	return 0
+	return false
 }
 
 /*
  * either direct or indirect
  */
-func copyau(a *obj.Addr, v *obj.Addr) int {
-	if copyas(a, v) != 0 {
-		return 1
+func copyau(a *obj.Addr, v *obj.Addr) bool {
+	if copyas(a, v) {
+		return true
 	}
 	if v.Type == obj.TYPE_REG {
 		if a.Type == obj.TYPE_ADDR && a.Reg != 0 {
 			if a.Reg == v.Reg {
-				return 1
+				return true
 			}
 		} else if a.Type == obj.TYPE_MEM {
 			if a.Reg == v.Reg {
-				return 1
+				return true
 			}
 		} else if a.Type == obj.TYPE_REGREG || a.Type == obj.TYPE_REGREG2 {
 			if a.Reg == v.Reg {
-				return 1
+				return true
 			}
 			if a.Offset == int64(v.Reg) {
-				return 1
+				return true
 			}
 		} else if a.Type == obj.TYPE_SHIFT {
 			if a.Offset&0xf == int64(v.Reg-arm.REG_R0) {
-				return 1
+				return true
 			}
 			if (a.Offset&(1<<4) != 0) && (a.Offset>>8)&0xf == int64(v.Reg-arm.REG_R0) {
-				return 1
+				return true
 			}
 		}
 	}
 
-	return 0
+	return false
 }
 
 /*
  * compare v to the center
  * register in p (p->reg)
  */
-func copyau1(p *obj.Prog, v *obj.Addr) int {
+func copyau1(p *obj.Prog, v *obj.Addr) bool {
 	if v.Type == obj.TYPE_REG && v.Reg == 0 {
-		return 0
+		return false
 	}
-	return bool2int(p.Reg == v.Reg)
+	return p.Reg == v.Reg
 }
 
 /*
@@ -1504,7 +1504,7 @@
  */
 func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
 	if f != 0 {
-		if copyau(a, v) != 0 {
+		if copyau(a, v) {
 			if a.Type == obj.TYPE_SHIFT {
 				if a.Offset&0xf == int64(v.Reg-arm.REG_R0) {
 					a.Offset = a.Offset&^0xf | int64(s.Reg)&0xf
@@ -1530,7 +1530,7 @@
 
 func copysub1(p1 *obj.Prog, v *obj.Addr, s *obj.Addr, f int) int {
 	if f != 0 {
-		if copyau1(p1, v) != 0 {
+		if copyau1(p1, v) {
 			p1.Reg = s.Reg
 		}
 	}
@@ -1664,11 +1664,11 @@
 	Keepbranch
 )
 
-func isbranch(p *obj.Prog) int {
-	return bool2int((arm.ABEQ <= p.As) && (p.As <= arm.ABLE))
+func isbranch(p *obj.Prog) bool {
+	return (arm.ABEQ <= p.As) && (p.As <= arm.ABLE)
 }
 
-func predicable(p *obj.Prog) int {
+func predicable(p *obj.Prog) bool {
 	switch p.As {
 	case obj.ANOP,
 		obj.AXXX,
@@ -1678,13 +1678,13 @@
 		arm.AWORD,
 		arm.ABCASE,
 		arm.ACASE:
-		return 0
+		return false
 	}
 
-	if isbranch(p) != 0 {
-		return 0
+	if isbranch(p) {
+		return false
 	}
-	return 1
+	return true
 }
 
 /*
@@ -1694,7 +1694,7 @@
  *
  * C_SBIT may also have been set explicitly in p->scond.
  */
-func modifiescpsr(p *obj.Prog) int {
+func modifiescpsr(p *obj.Prog) bool {
 	switch p.As {
 	case arm.AMULLU,
 		arm.AMULA,
@@ -1709,13 +1709,13 @@
 		arm.AMOD,
 		arm.AMODU,
 		arm.ABL:
-		return 1
+		return true
 	}
 
 	if p.Scond&arm.C_SBIT != 0 {
-		return 1
+		return true
 	}
-	return 0
+	return false
 }
 
 /*
@@ -1741,7 +1741,7 @@
 		if r.Prog.As != obj.ANOP {
 			j.len++
 		}
-		if !(r.S1 != nil) && !(r.S2 != nil) {
+		if r.S1 == nil && r.S2 == nil {
 			j.end = r.Link
 			return End
 		}
@@ -1751,13 +1751,13 @@
 			return Branch
 		}
 
-		if modifiescpsr(r.Prog) != 0 {
+		if modifiescpsr(r.Prog) {
 			j.end = r.S1
 			return Setcond
 		}
 
 		r = r.S1
-		if !(j.len < 4) {
+		if j.len >= 4 {
 			break
 		}
 	}
@@ -1798,7 +1798,7 @@
 					r.Prog.As = int16(predinfo[rstart.Prog.As-arm.ABEQ].notopcode)
 				}
 			}
-		} else if predicable(r.Prog) != 0 {
+		} else if predicable(r.Prog) {
 			r.Prog.Scond = uint8(int(r.Prog.Scond&^arm.C_SCOND) | pred)
 		}
 		if r.S1 != r.Link {
@@ -1820,7 +1820,7 @@
 	var j2 Joininfo
 
 	for r = g.Start; r != nil; r = r.Link {
-		if isbranch(r.Prog) != 0 {
+		if isbranch(r.Prog) {
 			t1 = joinsplit(r.S1, &j1)
 			t2 = joinsplit(r.S2, &j2)
 			if j1.last.Link != j2.start {
@@ -1844,20 +1844,20 @@
 	}
 }
 
-func isdconst(a *obj.Addr) int {
-	return bool2int(a.Type == obj.TYPE_CONST)
+func isdconst(a *obj.Addr) bool {
+	return a.Type == obj.TYPE_CONST
 }
 
-func isfloatreg(a *obj.Addr) int {
-	return bool2int(arm.REG_F0 <= a.Reg && a.Reg <= arm.REG_F15)
+func isfloatreg(a *obj.Addr) bool {
+	return arm.REG_F0 <= a.Reg && a.Reg <= arm.REG_F15
 }
 
-func stackaddr(a *obj.Addr) int {
-	return bool2int(regtyp(a) != 0 && a.Reg == arm.REGSP)
+func stackaddr(a *obj.Addr) bool {
+	return regtyp(a) && a.Reg == arm.REGSP
 }
 
-func smallindir(a *obj.Addr, reg *obj.Addr) int {
-	return bool2int(reg.Type == obj.TYPE_REG && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && 0 <= a.Offset && a.Offset < 4096)
+func smallindir(a *obj.Addr, reg *obj.Addr) bool {
+	return reg.Type == obj.TYPE_REG && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && 0 <= a.Offset && a.Offset < 4096
 }
 
 func excise(r *gc.Flow) {
diff --git a/src/cmd/new6g/cgen.go b/src/cmd/new6g/cgen.go
index 83c063e..36fa62c 100644
--- a/src/cmd/new6g/cgen.go
+++ b/src/cmd/new6g/cgen.go
@@ -59,7 +59,7 @@
 		gc.OSLICESTR,
 		gc.OSLICE3,
 		gc.OSLICE3ARR:
-		if res.Op != gc.ONAME || !(res.Addable != 0) {
+		if res.Op != gc.ONAME || res.Addable == 0 {
 			gc.Tempname(&n1, n.Type)
 			gc.Cgen_slice(n, &n1)
 			cgen(&n1, res)
@@ -69,7 +69,7 @@
 		goto ret
 
 	case gc.OEFACE:
-		if res.Op != gc.ONAME || !(res.Addable != 0) {
+		if res.Op != gc.ONAME || res.Addable == 0 {
 			gc.Tempname(&n1, n.Type)
 			gc.Cgen_eface(n, &n1)
 			cgen(&n1, res)
@@ -91,7 +91,7 @@
 		}
 	}
 
-	if gc.Isfat(n.Type) != 0 {
+	if gc.Isfat(n.Type) {
 		if n.Type.Width < 0 {
 			gc.Fatal("forgot to compute width for %v", gc.Tconv(n.Type, 0))
 		}
@@ -99,7 +99,7 @@
 		goto ret
 	}
 
-	if !(res.Addable != 0) {
+	if res.Addable == 0 {
 		if n.Ullman > res.Ullman {
 			regalloc(&n1, n.Type, res)
 			cgen(n, &n1)
@@ -118,7 +118,7 @@
 			goto gen
 		}
 
-		if gc.Complexop(n, res) != 0 {
+		if gc.Complexop(n, res) {
 			gc.Complexgen(n, res)
 			goto ret
 		}
@@ -126,7 +126,7 @@
 		f = 1 // gen thru register
 		switch n.Op {
 		case gc.OLITERAL:
-			if gc.Smallintconst(n) != 0 {
+			if gc.Smallintconst(n) {
 				f = 0
 			}
 
@@ -134,9 +134,9 @@
 			f = 0
 		}
 
-		if !(gc.Iscomplex[n.Type.Etype] != 0) {
+		if gc.Iscomplex[n.Type.Etype] == 0 {
 			a = optoas(gc.OAS, res.Type)
-			if sudoaddable(a, res, &addr) != 0 {
+			if sudoaddable(a, res, &addr) {
 				if f != 0 {
 					regalloc(&n2, res.Type, nil)
 					cgen(n, &n2)
@@ -167,12 +167,12 @@
 	switch n.Op {
 	case gc.OSPTR,
 		gc.OLEN:
-		if gc.Isslice(n.Left.Type) != 0 || gc.Istype(n.Left.Type, gc.TSTRING) != 0 {
+		if gc.Isslice(n.Left.Type) || gc.Istype(n.Left.Type, gc.TSTRING) {
 			n.Addable = n.Left.Addable
 		}
 
 	case gc.OCAP:
-		if gc.Isslice(n.Left.Type) != 0 {
+		if gc.Isslice(n.Left.Type) {
 			n.Addable = n.Left.Addable
 		}
 
@@ -180,7 +180,7 @@
 		n.Addable = n.Left.Addable
 	}
 
-	if gc.Complexop(n, res) != 0 {
+	if gc.Complexop(n, res) {
 		gc.Complexgen(n, res)
 		goto ret
 	}
@@ -204,9 +204,9 @@
 		}
 	}
 
-	if !(gc.Iscomplex[n.Type.Etype] != 0) {
+	if gc.Iscomplex[n.Type.Etype] == 0 {
 		a = optoas(gc.OAS, n.Type)
-		if sudoaddable(a, n, &addr) != 0 {
+		if sudoaddable(a, n, &addr) {
 			if res.Op == gc.OREGISTER {
 				p1 = gins(a, nil, res)
 				p1.From = addr
@@ -241,11 +241,11 @@
 		p1 = gc.Gbranch(obj.AJMP, nil, 0)
 
 		p2 = gc.Pc
-		gmove(gc.Nodbool(1), res)
+		gmove(gc.Nodbool(true), res)
 		p3 = gc.Gbranch(obj.AJMP, nil, 0)
 		gc.Patch(p1, gc.Pc)
 		bgen(n, true, 0, p2)
-		gmove(gc.Nodbool(0), res)
+		gmove(gc.Nodbool(false), res)
 		gc.Patch(p3, gc.Pc)
 		goto ret
 
@@ -353,7 +353,7 @@
 
 		// pointer is the first word of string or slice.
 	case gc.OSPTR:
-		if gc.Isconst(nl, gc.CTSTR) != 0 {
+		if gc.Isconst(nl, gc.CTSTR) {
 			regalloc(&n1, gc.Types[gc.Tptr], res)
 			p1 = gins(x86.ALEAQ, nil, &n1)
 			gc.Datastring(nl.Val.U.Sval.S, &p1.From)
@@ -368,7 +368,7 @@
 		regfree(&n1)
 
 	case gc.OLEN:
-		if gc.Istype(nl.Type, gc.TMAP) != 0 || gc.Istype(nl.Type, gc.TCHAN) != 0 {
+		if gc.Istype(nl.Type, gc.TMAP) || gc.Istype(nl.Type, gc.TCHAN) {
 			// map and chan have len in the first int-sized word.
 			// a zero pointer means zero length
 			regalloc(&n1, gc.Types[gc.Tptr], res)
@@ -391,7 +391,7 @@
 			break
 		}
 
-		if gc.Istype(nl.Type, gc.TSTRING) != 0 || gc.Isslice(nl.Type) != 0 {
+		if gc.Istype(nl.Type, gc.TSTRING) || gc.Isslice(nl.Type) {
 			// both slice and string have len one pointer into the struct.
 			// a zero pointer means zero length
 			igen(nl, &n1, res)
@@ -406,7 +406,7 @@
 		gc.Fatal("cgen: OLEN: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
 
 	case gc.OCAP:
-		if gc.Istype(nl.Type, gc.TCHAN) != 0 {
+		if gc.Istype(nl.Type, gc.TCHAN) {
 			// chan has cap in the second int-sized word.
 			// a zero pointer means zero length
 			regalloc(&n1, gc.Types[gc.Tptr], res)
@@ -430,7 +430,7 @@
 			break
 		}
 
-		if gc.Isslice(nl.Type) != 0 {
+		if gc.Isslice(nl.Type) {
 			igen(nl, &n1, res)
 			n1.Type = gc.Types[gc.Simtype[gc.TUINT]]
 			n1.Xoffset += int64(gc.Array_cap)
@@ -442,11 +442,11 @@
 		gc.Fatal("cgen: OCAP: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
 
 	case gc.OADDR:
-		if n.Bounded != 0 { // let race detector avoid nil checks
+		if n.Bounded { // let race detector avoid nil checks
 			gc.Disable_checknil++
 		}
 		agen(nl, res)
-		if n.Bounded != 0 {
+		if n.Bounded {
 			gc.Disable_checknil--
 		}
 
@@ -475,7 +475,7 @@
 			cgen_div(int(n.Op), &n1, nr, res)
 			regfree(&n1)
 		} else {
-			if !(gc.Smallintconst(nr) != 0) {
+			if !gc.Smallintconst(nr) {
 				regalloc(&n2, nr.Type, res)
 				cgen(nr, &n2)
 			} else {
@@ -491,7 +491,7 @@
 	case gc.OLSH,
 		gc.ORSH,
 		gc.OLROT:
-		cgen_shift(int(n.Op), int(n.Bounded), nl, nr, res)
+		cgen_shift(int(n.Op), n.Bounded, nl, nr, res)
 	}
 
 	goto ret
@@ -513,7 +513,7 @@
 	 * register for the computation.
 	 */
 sbop: // symmetric binary
-	if nl.Ullman < nr.Ullman || (nl.Ullman == nr.Ullman && (gc.Smallintconst(nl) != 0 || (nr.Op == gc.OLITERAL && !(gc.Smallintconst(nr) != 0)))) {
+	if nl.Ullman < nr.Ullman || (nl.Ullman == nr.Ullman && (gc.Smallintconst(nl) || (nr.Op == gc.OLITERAL && !gc.Smallintconst(nr)))) {
 		r = nl
 		nl = nr
 		nr = r
@@ -541,14 +541,14 @@
 				}
 			 *
 		*/
-		if gc.Smallintconst(nr) != 0 {
+		if gc.Smallintconst(nr) {
 			n2 = *nr
 		} else {
 			regalloc(&n2, nr.Type, nil)
 			cgen(nr, &n2)
 		}
 	} else {
-		if gc.Smallintconst(nr) != 0 {
+		if gc.Smallintconst(nr) {
 			n2 = *nr
 		} else {
 			regalloc(&n2, nr.Type, res)
@@ -591,7 +591,7 @@
 		gc.Dump("cgenr-n", n)
 	}
 
-	if gc.Isfat(n.Type) != 0 {
+	if gc.Isfat(n.Type) {
 		gc.Fatal("cgenr on fat node")
 	}
 
@@ -674,8 +674,8 @@
 		}
 		if nl.Addable != 0 {
 			cgenr(nr, &n1, nil)
-			if !(gc.Isconst(nl, gc.CTSTR) != 0) {
-				if gc.Isfixedarray(nl.Type) != 0 {
+			if !gc.Isconst(nl, gc.CTSTR) {
+				if gc.Isfixedarray(nl.Type) {
 					agenr(nl, &n3, res)
 				} else {
 					igen(nl, &nlen, res)
@@ -697,11 +697,11 @@
 		nr = &tmp
 
 	irad:
-		if !(gc.Isconst(nl, gc.CTSTR) != 0) {
-			if gc.Isfixedarray(nl.Type) != 0 {
+		if !gc.Isconst(nl, gc.CTSTR) {
+			if gc.Isfixedarray(nl.Type) {
 				agenr(nl, &n3, res)
 			} else {
-				if !(nl.Addable != 0) {
+				if nl.Addable == 0 {
 					// igen will need an addressable node.
 					gc.Tempname(&tmp2, nl.Type)
 
@@ -720,7 +720,7 @@
 			}
 		}
 
-		if !(gc.Isconst(nr, gc.CTINT) != 0) {
+		if !gc.Isconst(nr, gc.CTINT) {
 			cgenr(nr, &n1, nil)
 		}
 
@@ -733,15 +733,15 @@
 
 		// constant index
 	index:
-		if gc.Isconst(nr, gc.CTINT) != 0 {
-			if gc.Isconst(nl, gc.CTSTR) != 0 {
+		if gc.Isconst(nr, gc.CTINT) {
+			if gc.Isconst(nl, gc.CTSTR) {
 				gc.Fatal("constant string constant index") // front end should handle
 			}
 			v = uint64(gc.Mpgetfix(nr.Val.U.Xval))
-			if gc.Isslice(nl.Type) != 0 || nl.Type.Etype == gc.TSTRING {
-				if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
+			if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
+				if gc.Debug['B'] == 0 && !n.Bounded {
 					gc.Nodconst(&n2, gc.Types[gc.Simtype[gc.TUINT]], int64(v))
-					if gc.Smallintconst(nr) != 0 {
+					if gc.Smallintconst(nr) {
 						gins(optoas(gc.OCMP, gc.Types[gc.Simtype[gc.TUINT]]), &nlen, &n2)
 					} else {
 						regalloc(&tmp, gc.Types[gc.Simtype[gc.TUINT]], nil)
@@ -776,17 +776,17 @@
 		gmove(&n1, &n2)
 		regfree(&n1)
 
-		if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
+		if gc.Debug['B'] == 0 && !n.Bounded {
 			// check bounds
 			t = gc.Types[gc.Simtype[gc.TUINT]]
 
-			if gc.Is64(nr.Type) != 0 {
+			if gc.Is64(nr.Type) {
 				t = gc.Types[gc.TUINT64]
 			}
-			if gc.Isconst(nl, gc.CTSTR) != 0 {
+			if gc.Isconst(nl, gc.CTSTR) {
 				gc.Nodconst(&nlen, t, int64(len(nl.Val.U.Sval.S)))
-			} else if gc.Isslice(nl.Type) != 0 || nl.Type.Etype == gc.TSTRING {
-				if gc.Is64(nr.Type) != 0 {
+			} else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
+				if gc.Is64(nr.Type) {
 					regalloc(&n5, t, nil)
 					gmove(&nlen, &n5)
 					regfree(&nlen)
@@ -794,7 +794,7 @@
 				}
 			} else {
 				gc.Nodconst(&nlen, t, nl.Type.Bound)
-				if !(gc.Smallintconst(&nlen) != 0) {
+				if !gc.Smallintconst(&nlen) {
 					regalloc(&n5, t, nil)
 					gmove(&nlen, &n5)
 					nlen = n5
@@ -808,7 +808,7 @@
 			gc.Patch(p1, gc.Pc)
 		}
 
-		if gc.Isconst(nl, gc.CTSTR) != 0 {
+		if gc.Isconst(nl, gc.CTSTR) {
 			regalloc(&n3, gc.Types[gc.Tptr], res)
 			p1 = gins(x86.ALEAQ, nil, &n3)
 			gc.Datastring(nl.Val.U.Sval.S, &p1.From)
@@ -865,7 +865,7 @@
 		n = n.Left
 	}
 
-	if gc.Isconst(n, gc.CTNIL) != 0 && n.Type.Width > int64(gc.Widthptr) {
+	if gc.Isconst(n, gc.CTNIL) && n.Type.Width > int64(gc.Widthptr) {
 		// Use of a nil interface or nil slice.
 		// Create a temporary we can take the address of and read.
 		// The generated code is just going to panic, so it need not
@@ -934,7 +934,7 @@
 		}
 
 		// should only get here for heap vars or paramref
-		if !(n.Class&gc.PHEAP != 0) && n.Class != gc.PPARAMREF {
+		if n.Class&gc.PHEAP == 0 && n.Class != gc.PPARAMREF {
 			gc.Dump("bad agen", n)
 			gc.Fatal("agen: bad ONAME class %#x", n.Class)
 		}
@@ -1044,10 +1044,10 @@
 	// Could do the same for slice except that we need
 	// to use the real index for the bounds checking.
 	case gc.OINDEX:
-		if gc.Isfixedarray(n.Left.Type) != 0 || (gc.Isptr[n.Left.Type.Etype] != 0 && gc.Isfixedarray(n.Left.Left.Type) != 0) {
-			if gc.Isconst(n.Right, gc.CTINT) != 0 {
+		if gc.Isfixedarray(n.Left.Type) || (gc.Isptr[n.Left.Type.Etype] != 0 && gc.Isfixedarray(n.Left.Left.Type)) {
+			if gc.Isconst(n.Right, gc.CTINT) {
 				// Compute &a.
-				if !(gc.Isptr[n.Left.Type.Etype] != 0) {
+				if gc.Isptr[n.Left.Type.Etype] == 0 {
 					igen(n.Left, a, res)
 				} else {
 					igen(n.Left, &n1, res)
@@ -1096,7 +1096,7 @@
 	}
 
 	if n == nil {
-		n = gc.Nodbool(1)
+		n = gc.Nodbool(true)
 	}
 
 	if n.Ninit != nil {
@@ -1132,7 +1132,7 @@
 
 		// need to ask if it is bool?
 	case gc.OLITERAL:
-		if !true_ == !(n.Val.U.Bval != 0) {
+		if !true_ == (n.Val.U.Bval == 0) {
 			gc.Patch(gc.Gbranch(obj.AJMP, nil, likely), to)
 		}
 		goto ret
@@ -1228,7 +1228,7 @@
 			nr = r
 		}
 
-		if gc.Isslice(nl.Type) != 0 {
+		if gc.Isslice(nl.Type) {
 			// front end should only leave cmp to literal nil
 			if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
 				gc.Yyerror("illegal slice comparison")
@@ -1246,7 +1246,7 @@
 			break
 		}
 
-		if gc.Isinter(nl.Type) != 0 {
+		if gc.Isinter(nl.Type) {
 			// front end should only leave cmp to literal nil
 			if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
 				gc.Yyerror("illegal interface comparison")
@@ -1288,7 +1288,7 @@
 		regalloc(&n1, nl.Type, nil)
 		cgen(nl, &n1)
 
-		if gc.Smallintconst(nr) != 0 {
+		if gc.Smallintconst(nr) {
 			gins(optoas(gc.OCMP, nr.Type), &n1, nr)
 			gc.Patch(gc.Gbranch(optoas(a, nr.Type), nr.Type, likely), to)
 			regfree(&n1)
@@ -1378,14 +1378,14 @@
 
 	case gc.OINDEX:
 		t = n.Left.Type
-		if !(gc.Isfixedarray(t) != 0) {
+		if !gc.Isfixedarray(t) {
 			break
 		}
 		off = stkof(n.Left)
 		if off == -1000 || off == 1000 {
 			return off
 		}
-		if gc.Isconst(n.Right, gc.CTINT) != 0 {
+		if gc.Isconst(n.Right, gc.CTINT) {
 			return off + t.Type.Width*gc.Mpgetfix(n.Right.Val.U.Xval)
 		}
 		return 1000
@@ -1453,7 +1453,7 @@
 	}
 
 	// Avoid taking the address for simple enough types.
-	if componentgen(n, ns) != 0 {
+	if componentgen(n, ns) {
 		return
 	}
 
@@ -1617,19 +1617,19 @@
 	restx(&cx, &oldcx)
 }
 
-func cadable(n *gc.Node) int {
-	if !(n.Addable != 0) {
+func cadable(n *gc.Node) bool {
+	if n.Addable == 0 {
 		// dont know how it happens,
 		// but it does
-		return 0
+		return false
 	}
 
 	switch n.Op {
 	case gc.ONAME:
-		return 1
+		return true
 	}
 
-	return 0
+	return false
 }
 
 /*
@@ -1640,7 +1640,7 @@
  * nr is N when assigning a zero value.
  * return 1 if can do, 0 if can't.
  */
-func componentgen(nr *gc.Node, nl *gc.Node) int {
+func componentgen(nr *gc.Node, nl *gc.Node) bool {
 	var nodl gc.Node
 	var nodr gc.Node
 	var tmp gc.Node
@@ -1662,12 +1662,12 @@
 		t = nl.Type
 
 		// Slices are ok.
-		if gc.Isslice(t) != 0 {
+		if gc.Isslice(t) {
 			break
 		}
 
 		// Small arrays are ok.
-		if t.Bound > 0 && t.Bound <= 3 && !(gc.Isfat(t.Type) != 0) {
+		if t.Bound > 0 && t.Bound <= 3 && !gc.Isfat(t.Type) {
 			break
 		}
 
@@ -1679,7 +1679,7 @@
 		fldcount = 0
 
 		for t = nl.Type.Type; t != nil; t = t.Down {
-			if gc.Isfat(t.Type) != 0 {
+			if gc.Isfat(t.Type) {
 				goto no
 			}
 			if t.Etype != gc.TFIELD {
@@ -1698,8 +1698,8 @@
 	}
 
 	nodl = *nl
-	if !(cadable(nl) != 0) {
-		if nr != nil && !(cadable(nr) != 0) {
+	if !cadable(nl) {
+		if nr != nil && !cadable(nr) {
 			goto no
 		}
 		igen(nl, &nodl, nil)
@@ -1708,7 +1708,7 @@
 
 	if nr != nil {
 		nodr = *nr
-		if !(cadable(nr) != 0) {
+		if !cadable(nr) {
 			igen(nr, &nodr, nil)
 			freer = 1
 		}
@@ -1736,7 +1736,7 @@
 			gc.Gvardef(nl)
 		}
 		t = nl.Type
-		if !(gc.Isslice(t) != 0) {
+		if !gc.Isslice(t) {
 			nodl.Type = t.Type
 			nodr.Type = nodl.Type
 			for fldcount = 0; fldcount < t.Bound; fldcount++ {
@@ -1876,7 +1876,7 @@
 	if freel != 0 {
 		regfree(&nodl)
 	}
-	return 0
+	return false
 
 yes:
 	if freer != 0 {
@@ -1885,5 +1885,5 @@
 	if freel != 0 {
 		regfree(&nodl)
 	}
-	return 1
+	return true
 }
diff --git a/src/cmd/new6g/ggen.go b/src/cmd/new6g/ggen.go
index 2e323c9..be6ff21 100644
--- a/src/cmd/new6g/ggen.go
+++ b/src/cmd/new6g/ggen.go
@@ -38,7 +38,7 @@
 	// iterate through declarations - they are sorted in decreasing xoffset order.
 	for l = gc.Curfn.Dcl; l != nil; l = l.Next {
 		n = l.N
-		if !(n.Needzero != 0) {
+		if n.Needzero == 0 {
 			continue
 		}
 		if n.Class != gc.PAUTO {
@@ -174,7 +174,7 @@
 
 			p = gins(obj.ACALL, nil, f)
 			gc.Afunclit(&p.To, f)
-			if proc == -1 || gc.Noreturn(p) != 0 {
+			if proc == -1 || gc.Noreturn(p) {
 				gins(obj.AUNDEF, nil, nil)
 			}
 			break
@@ -224,7 +224,7 @@
 		if proc == 1 {
 			ginscall(gc.Newproc, 0)
 		} else {
-			if !(gc.Hasdefer != 0) {
+			if gc.Hasdefer == 0 {
 				gc.Fatal("hasdefer=0 but has defer")
 			}
 			ginscall(gc.Deferproc, 0)
@@ -265,7 +265,7 @@
 
 	i = i.Left // interface
 
-	if !(i.Addable != 0) {
+	if i.Addable == 0 {
 		gc.Tempname(&tmpi, i.Type)
 		cgen(i, &tmpi)
 		i = &tmpi
@@ -499,9 +499,9 @@
 	check = 0
 	if gc.Issigned[t.Etype] != 0 {
 		check = 1
-		if gc.Isconst(nl, gc.CTINT) != 0 && gc.Mpgetfix(nl.Val.U.Xval) != -(1<<uint64(t.Width*8-1)) {
+		if gc.Isconst(nl, gc.CTINT) && gc.Mpgetfix(nl.Val.U.Xval) != -(1<<uint64(t.Width*8-1)) {
 			check = 0
-		} else if gc.Isconst(nr, gc.CTINT) != 0 && gc.Mpgetfix(nr.Val.U.Xval) != -1 {
+		} else if gc.Isconst(nr, gc.CTINT) && gc.Mpgetfix(nr.Val.U.Xval) != -1 {
 			check = 0
 		}
 	}
@@ -578,7 +578,7 @@
 	}
 
 	savex(x86.REG_DX, &dx, &olddx, res, t)
-	if !(gc.Issigned[t.Etype] != 0) {
+	if gc.Issigned[t.Etype] == 0 {
 		gc.Nodconst(&n4, t, 0)
 		gmove(&n4, &dx)
 	} else {
@@ -618,7 +618,7 @@
 	*oldx = gc.Node{}
 
 	gc.Nodreg(x, t, dr)
-	if r > 1 && !(gc.Samereg(x, res) != 0) {
+	if r > 1 && !gc.Samereg(x, res) {
 		regalloc(oldx, gc.Types[gc.TINT64], nil)
 		x.Type = gc.Types[gc.TINT64]
 		gmove(x, oldx)
@@ -760,7 +760,7 @@
 		a = x86.AIMULW
 	}
 
-	if !(gc.Smallintconst(nr) != 0) {
+	if !gc.Smallintconst(nr) {
 		regalloc(&n3, nl.Type, nil)
 		cgen(nr, &n3)
 		gins(a, &n3, &n2)
@@ -820,7 +820,7 @@
  *	res = nl << nr
  *	res = nl >> nr
  */
-func cgen_shift(op int, bounded int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 	var n1 gc.Node
 	var n2 gc.Node
 	var n3 gc.Node
@@ -884,14 +884,14 @@
 	gc.Nodreg(&cx, gc.Types[gc.TUINT64], x86.REG_CX)
 
 	oldcx = gc.Node{}
-	if rcx > 0 && !(gc.Samereg(&cx, res) != 0) {
+	if rcx > 0 && !gc.Samereg(&cx, res) {
 		regalloc(&oldcx, gc.Types[gc.TUINT64], nil)
 		gmove(&cx, &oldcx)
 	}
 
 	cx.Type = tcount
 
-	if gc.Samereg(&cx, res) != 0 {
+	if gc.Samereg(&cx, res) {
 		regalloc(&n2, nl.Type, nil)
 	} else {
 		regalloc(&n2, nl.Type, res)
@@ -909,7 +909,7 @@
 	regfree(&n3)
 
 	// test and fix up large shifts
-	if !(bounded != 0) {
+	if !bounded {
 		gc.Nodconst(&n3, tcount, nl.Type.Width*8)
 		gins(optoas(gc.OCMP, tcount), &n1, &n3)
 		p1 = gc.Gbranch(optoas(gc.OLT, tcount), nil, +1)
@@ -1007,7 +1007,7 @@
 	w = nl.Type.Width
 
 	// Avoid taking the address for simple enough types.
-	if componentgen(nil, nl) != 0 {
+	if componentgen(nil, nl) {
 		return
 	}
 
@@ -1028,7 +1028,7 @@
 		for {
 			tmp14 := q
 			q--
-			if !(tmp14 > 0) {
+			if tmp14 <= 0 {
 				break
 			}
 			n1.Type = z.Type
@@ -1048,7 +1048,7 @@
 		for {
 			tmp15 := c
 			c--
-			if !(tmp15 > 0) {
+			if tmp15 <= 0 {
 				break
 			}
 			n1.Type = z.Type
@@ -1156,7 +1156,7 @@
 
 		p2.From.Type = obj.TYPE_REG
 		p2.From.Reg = x86.REG_AX
-		if regtyp(&p.From) != 0 {
+		if regtyp(&p.From) {
 			p2.To.Type = obj.TYPE_MEM
 			p2.To.Reg = p.From.Reg
 		} else {
diff --git a/src/cmd/new6g/gsubr.go b/src/cmd/new6g/gsubr.go
index 380c9e9..0f50723 100644
--- a/src/cmd/new6g/gsubr.go
+++ b/src/cmd/new6g/gsubr.go
@@ -103,7 +103,7 @@
 	}
 }
 
-func anyregalloc() int {
+func anyregalloc() bool {
 	var i int
 	var j int
 
@@ -116,11 +116,11 @@
 				goto ok
 			}
 		}
-		return 1
+		return true
 	ok:
 	}
 
-	return 0
+	return false
 }
 
 var regpc [x86.REG_R15 + 1 - x86.REG_AX]uint32
@@ -170,7 +170,6 @@
 			fmt.Printf("%d %p\n", i, regpc[i])
 		}
 		gc.Fatal("out of fixed registers")
-		fallthrough
 
 	case gc.TFLOAT32,
 		gc.TFLOAT64:
@@ -187,7 +186,6 @@
 			}
 		}
 		gc.Fatal("out of floating registers")
-		fallthrough
 
 	case gc.TCOMPLEX64,
 		gc.TCOMPLEX128:
@@ -339,7 +337,7 @@
 	}
 
 	// cannot have two memory operands
-	if gc.Ismem(f) != 0 && gc.Ismem(t) != 0 {
+	if gc.Ismem(f) && gc.Ismem(t) {
 		goto hard
 	}
 
@@ -350,7 +348,7 @@
 		ft = tt // so big switch will choose a simple mov
 
 		// some constants can't move directly to memory.
-		if gc.Ismem(t) != 0 {
+		if gc.Ismem(t) {
 			// float constants come from memory.
 			if gc.Isfloat[tt] != 0 {
 				goto hard
@@ -379,7 +377,6 @@
 	switch uint32(ft)<<16 | uint32(tt) {
 	default:
 		gc.Fatal("gmove %v -> %v", gc.Tconv(f.Type, obj.FmtLong), gc.Tconv(t.Type, obj.FmtLong))
-		fallthrough
 
 		/*
 		 * integer copy and truncate
@@ -699,9 +696,9 @@
 	return
 }
 
-func samaddr(f *gc.Node, t *gc.Node) int {
+func samaddr(f *gc.Node, t *gc.Node) bool {
 	if f.Op != t.Op {
-		return 0
+		return false
 	}
 
 	switch f.Op {
@@ -709,10 +706,10 @@
 		if f.Val.U.Reg != t.Val.U.Reg {
 			break
 		}
-		return 1
+		return true
 	}
 
-	return 0
+	return false
 }
 
 /*
@@ -722,9 +719,9 @@
 func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
 	var w int32
 	var p *obj.Prog
+	var af obj.Addr
 	//	Node nod;
 
-	var af obj.Addr
 	var at obj.Addr
 
 	//	if(f != N && f->op == OINDEX) {
@@ -751,12 +748,12 @@
 		x86.AMOVQ,
 		x86.AMOVSS,
 		x86.AMOVSD:
-		if f != nil && t != nil && samaddr(f, t) != 0 {
+		if f != nil && t != nil && samaddr(f, t) {
 			return nil
 		}
 
 	case x86.ALEAQ:
-		if f != nil && gc.Isconst(f, gc.CTNIL) != 0 {
+		if f != nil && gc.Isconst(f, gc.CTNIL) {
 			gc.Fatal("gins LEAQ nil %v", gc.Tconv(f.Type, 0))
 		}
 	}
@@ -1326,21 +1323,21 @@
 
 var cleani int = 0
 
-func xgen(n *gc.Node, a *gc.Node, o int) int {
+func xgen(n *gc.Node, a *gc.Node, o int) bool {
 	regalloc(a, gc.Types[gc.Tptr], nil)
 
 	if o&ODynam != 0 {
 		if n.Addable != 0 {
 			if n.Op != gc.OINDREG {
 				if n.Op != gc.OREGISTER {
-					return 1
+					return true
 				}
 			}
 		}
 	}
 
 	agen(n, a)
-	return 0
+	return false
 }
 
 func sudoclean() {
@@ -1364,7 +1361,7 @@
  * after successful sudoaddable,
  * to release the register used for a.
  */
-func sudoaddable(as int, n *gc.Node, a *obj.Addr) int {
+func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
 	var o int
 	var i int
 	var oary [10]int64
@@ -1383,14 +1380,14 @@
 	var t *gc.Type
 
 	if n.Type == nil {
-		return 0
+		return false
 	}
 
 	*a = obj.Addr{}
 
 	switch n.Op {
 	case gc.OLITERAL:
-		if !(gc.Isconst(n, gc.CTINT) != 0) {
+		if !gc.Isconst(n, gc.CTINT) {
 			break
 		}
 		v = gc.Mpgetfix(n.Val.U.Xval)
@@ -1409,22 +1406,22 @@
 		goto odot
 
 	case gc.OINDEX:
-		return 0
+		return false
 
 		// disabled: OINDEX case is now covered by agenr
 		// for a more suitable register allocation pattern.
 		if n.Left.Type.Etype == gc.TSTRING {
-			return 0
+			return false
 		}
 		goto oindex
 	}
 
-	return 0
+	return false
 
 lit:
 	switch as {
 	default:
-		return 0
+		return false
 
 	case x86.AADDB,
 		x86.AADDW,
@@ -1516,7 +1513,7 @@
 	l = n.Left
 	r = n.Right
 	if l.Ullman >= gc.UINF && r.Ullman >= gc.UINF {
-		return 0
+		return false
 	}
 
 	// set o to type of array
@@ -1533,13 +1530,13 @@
 	}
 
 	w = n.Type.Width
-	if gc.Isconst(r, gc.CTINT) != 0 {
+	if gc.Isconst(r, gc.CTINT) {
 		goto oindex_const
 	}
 
 	switch w {
 	default:
-		return 0
+		return false
 
 	case 1,
 		2,
@@ -1556,7 +1553,7 @@
 
 	// load the array (reg)
 	if l.Ullman > r.Ullman {
-		if xgen(l, reg, o) != 0 {
+		if xgen(l, reg, o) {
 			o |= OAddable
 		}
 	}
@@ -1575,13 +1572,13 @@
 
 	// load the array (reg)
 	if l.Ullman <= r.Ullman {
-		if xgen(l, reg, o) != 0 {
+		if xgen(l, reg, o) {
 			o |= OAddable
 		}
 	}
 
 	// check bounds
-	if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
+	if gc.Debug['B'] == 0 && !n.Bounded {
 		// check bounds
 		n4.Op = gc.OXXX
 
@@ -1598,7 +1595,7 @@
 				n2.Type = gc.Types[gc.Simtype[gc.TUINT]]
 			}
 		} else {
-			if gc.Is64(r.Type) != 0 {
+			if gc.Is64(r.Type) {
 				t = gc.Types[gc.TUINT64]
 			}
 			gc.Nodconst(&n2, gc.Types[gc.TUINT64], l.Type.Bound)
@@ -1653,7 +1650,7 @@
 oindex_const:
 	v = gc.Mpgetfix(r.Val.U.Xval)
 
-	if sudoaddable(as, l, a) != 0 {
+	if sudoaddable(as, l, a) {
 		goto oindex_const_sudo
 	}
 
@@ -1667,7 +1664,7 @@
 		regalloc(reg, gc.Types[gc.Tptr], nil)
 		agen(l, reg)
 
-		if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
+		if gc.Debug['B'] == 0 && !n.Bounded {
 			n1 = *reg
 			n1.Op = gc.OINDREG
 			n1.Type = gc.Types[gc.Tptr]
@@ -1717,7 +1714,7 @@
 	}
 
 	// slice indexed by a constant
-	if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
+	if gc.Debug['B'] == 0 && !n.Bounded {
 		a.Offset += int64(gc.Array_nel)
 		gc.Nodconst(&n2, gc.Types[gc.TUINT64], v)
 		p1 = gins(optoas(gc.OCMP, gc.Types[gc.Simtype[gc.TUINT]]), nil, &n2)
@@ -1747,9 +1744,9 @@
 	goto yes
 
 yes:
-	return 1
+	return true
 
 no:
 	sudoclean()
-	return 0
+	return false
 }
diff --git a/src/cmd/new6g/peep.go b/src/cmd/new6g/peep.go
index 7b262d6..9870ca5 100644
--- a/src/cmd/new6g/peep.go
+++ b/src/cmd/new6g/peep.go
@@ -44,21 +44,21 @@
 )
 
 // do we need the carry bit
-func needc(p *obj.Prog) int {
+func needc(p *obj.Prog) bool {
 	var info gc.ProgInfo
 
 	for p != nil {
 		proginfo(&info, p)
 		if info.Flags&gc.UseCarry != 0 {
-			return 1
+			return true
 		}
 		if info.Flags&(gc.SetCarry|gc.KillCarry) != 0 {
-			return 0
+			return false
 		}
 		p = p.Link
 	}
 
-	return 0
+	return false
 }
 
 func rnops(r *gc.Flow) *gc.Flow {
@@ -108,7 +108,7 @@
 		switch p.As {
 		case x86.ALEAL,
 			x86.ALEAQ:
-			if regtyp(&p.To) != 0 {
+			if regtyp(&p.To) {
 				if p.From.Sym != nil {
 					if p.From.Index == x86.REG_NONE {
 						conprop(r)
@@ -122,7 +122,7 @@
 			x86.AMOVQ,
 			x86.AMOVSS,
 			x86.AMOVSD:
-			if regtyp(&p.To) != 0 {
+			if regtyp(&p.To) {
 				if p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_FCONST {
 					conprop(r)
 				}
@@ -143,12 +143,12 @@
 			x86.AMOVQ,
 			x86.AMOVSS,
 			x86.AMOVSD:
-			if regtyp(&p.To) != 0 {
-				if regtyp(&p.From) != 0 {
-					if copyprop(g, r) != 0 {
+			if regtyp(&p.To) {
+				if regtyp(&p.From) {
+					if copyprop(g, r) {
 						excise(r)
 						t++
-					} else if subprop(r) != 0 && copyprop(g, r) != 0 {
+					} else if subprop(r) && copyprop(g, r) {
 						excise(r)
 						t++
 					}
@@ -159,7 +159,7 @@
 			x86.AMOVWLZX,
 			x86.AMOVBLSX,
 			x86.AMOVWLSX:
-			if regtyp(&p.To) != 0 {
+			if regtyp(&p.To) {
 				r1 = rnops(gc.Uniqs(r))
 				if r1 != nil {
 					p1 = r1.Prog
@@ -177,7 +177,7 @@
 			x86.AMOVLQSX,
 			x86.AMOVLQZX,
 			x86.AMOVQL:
-			if regtyp(&p.To) != 0 {
+			if regtyp(&p.To) {
 				r1 = rnops(gc.Uniqs(r))
 				if r1 != nil {
 					p1 = r1.Prog
@@ -191,7 +191,7 @@
 		case x86.AADDL,
 			x86.AADDQ,
 			x86.AADDW:
-			if p.From.Type != obj.TYPE_CONST || needc(p.Link) != 0 {
+			if p.From.Type != obj.TYPE_CONST || needc(p.Link) {
 				break
 			}
 			if p.From.Offset == -1 {
@@ -202,7 +202,7 @@
 				} else {
 					p.As = x86.ADECW
 				}
-				p.From = obj.Zprog.From
+				p.From = obj.Addr{}
 				break
 			}
 
@@ -214,14 +214,14 @@
 				} else {
 					p.As = x86.AINCW
 				}
-				p.From = obj.Zprog.From
+				p.From = obj.Addr{}
 				break
 			}
 
 		case x86.ASUBL,
 			x86.ASUBQ,
 			x86.ASUBW:
-			if p.From.Type != obj.TYPE_CONST || needc(p.Link) != 0 {
+			if p.From.Type != obj.TYPE_CONST || needc(p.Link) {
 				break
 			}
 			if p.From.Offset == -1 {
@@ -232,7 +232,7 @@
 				} else {
 					p.As = x86.AINCW
 				}
-				p.From = obj.Zprog.From
+				p.From = obj.Addr{}
 				break
 			}
 
@@ -244,7 +244,7 @@
 				} else {
 					p.As = x86.ADECW
 				}
-				p.From = obj.Zprog.From
+				p.From = obj.Addr{}
 				break
 			}
 		}
@@ -269,9 +269,9 @@
 	for r = g.Start; r != nil; r = r.Link {
 		p = r.Prog
 		if p.As == x86.AMOVLQZX {
-			if regtyp(&p.From) != 0 {
+			if regtyp(&p.From) {
 				if p.From.Type == p.To.Type && p.From.Reg == p.To.Reg {
-					if prevl(r, int(p.From.Reg)) != 0 {
+					if prevl(r, int(p.From.Reg)) {
 						excise(r)
 					}
 				}
@@ -279,8 +279,8 @@
 		}
 
 		if p.As == x86.AMOVSD {
-			if regtyp(&p.From) != 0 {
-				if regtyp(&p.To) != 0 {
+			if regtyp(&p.From) {
+				if regtyp(&p.To) {
 					p.As = x86.AMOVAPD
 				}
 			}
@@ -298,7 +298,7 @@
 			x86.AMOVL,
 			x86.AMOVQ,
 			x86.AMOVLQZX:
-			if regtyp(&p.To) != 0 && !(regconsttyp(&p.From) != 0) {
+			if regtyp(&p.To) && !regconsttyp(&p.From) {
 				pushback(r)
 			}
 		}
@@ -319,7 +319,7 @@
 	for r = gc.Uniqp(r0); r != nil && gc.Uniqs(r) != nil; r = gc.Uniqp(r) {
 		p = r.Prog
 		if p.As != obj.ANOP {
-			if !(regconsttyp(&p.From) != 0) || !(regtyp(&p.To) != 0) {
+			if !regconsttyp(&p.From) || !regtyp(&p.To) {
 				break
 			}
 			if copyu(p, &p0.To, nil) != 0 || copyu(p0, &p.To, nil) != 0 {
@@ -398,8 +398,8 @@
 	gc.Ostats.Ndelmov++
 }
 
-func regtyp(a *obj.Addr) int {
-	return bool2int(a.Type == obj.TYPE_REG && (x86.REG_AX <= a.Reg && a.Reg <= x86.REG_R15 || x86.REG_X0 <= a.Reg && a.Reg <= x86.REG_X15))
+func regtyp(a *obj.Addr) bool {
+	return a.Type == obj.TYPE_REG && (x86.REG_AX <= a.Reg && a.Reg <= x86.REG_R15 || x86.REG_X0 <= a.Reg && a.Reg <= x86.REG_X15)
 }
 
 // movb elimination.
@@ -418,7 +418,7 @@
 
 	for r = g.Start; r != nil; r = r.Link {
 		p = r.Prog
-		if regtyp(&p.To) != 0 {
+		if regtyp(&p.To) {
 			switch p.As {
 			case x86.AINCB,
 				x86.AINCW:
@@ -437,7 +437,7 @@
 				p.As = x86.ANOTQ
 			}
 
-			if regtyp(&p.From) != 0 || p.From.Type == obj.TYPE_CONST {
+			if regtyp(&p.From) || p.From.Type == obj.TYPE_CONST {
 				// move or artihmetic into partial register.
 				// from another register or constant can be movl.
 				// we don't switch to 64-bit arithmetic if it can
@@ -449,13 +449,13 @@
 
 				case x86.AADDB,
 					x86.AADDW:
-					if !(needc(p.Link) != 0) {
+					if !needc(p.Link) {
 						p.As = x86.AADDQ
 					}
 
 				case x86.ASUBB,
 					x86.ASUBW:
-					if !(needc(p.Link) != 0) {
+					if !needc(p.Link) {
 						p.As = x86.ASUBQ
 					}
 
@@ -500,23 +500,23 @@
 }
 
 // is 'a' a register or constant?
-func regconsttyp(a *obj.Addr) int {
-	if regtyp(a) != 0 {
-		return 1
+func regconsttyp(a *obj.Addr) bool {
+	if regtyp(a) {
+		return true
 	}
 	switch a.Type {
 	case obj.TYPE_CONST,
 		obj.TYPE_FCONST,
 		obj.TYPE_SCONST,
 		obj.TYPE_ADDR: // TODO(rsc): Not all TYPE_ADDRs are constants.
-		return 1
+		return true
 	}
 
-	return 0
+	return false
 }
 
 // is reg guaranteed to be truncated by a previous L instruction?
-func prevl(r0 *gc.Flow, reg int) int {
+func prevl(r0 *gc.Flow, reg int) bool {
 	var p *obj.Prog
 	var r *gc.Flow
 	var info gc.ProgInfo
@@ -527,14 +527,14 @@
 			proginfo(&info, p)
 			if info.Flags&gc.RightWrite != 0 {
 				if info.Flags&gc.SizeL != 0 {
-					return 1
+					return true
 				}
-				return 0
+				return false
 			}
 		}
 	}
 
-	return 0
+	return false
 }
 
 /*
@@ -551,7 +551,7 @@
  * hopefully, then the former or latter MOV
  * will be eliminated by copy propagation.
  */
-func subprop(r0 *gc.Flow) int {
+func subprop(r0 *gc.Flow) bool {
 	var p *obj.Prog
 	var info gc.ProgInfo
 	var v1 *obj.Addr
@@ -564,19 +564,19 @@
 	}
 	p = r0.Prog
 	v1 = &p.From
-	if !(regtyp(v1) != 0) {
+	if !regtyp(v1) {
 		if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
 			fmt.Printf("\tnot regtype %v; return 0\n", gc.Ctxt.Dconv(v1))
 		}
-		return 0
+		return false
 	}
 
 	v2 = &p.To
-	if !(regtyp(v2) != 0) {
+	if !regtyp(v2) {
 		if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
 			fmt.Printf("\tnot regtype %v; return 0\n", gc.Ctxt.Dconv(v2))
 		}
-		return 0
+		return false
 	}
 
 	for r = gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
@@ -599,21 +599,21 @@
 			if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
 				fmt.Printf("\tfound %v; return 0\n", p)
 			}
-			return 0
+			return false
 		}
 
 		if info.Reguse|info.Regset != 0 {
 			if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
 				fmt.Printf("\tfound %v; return 0\n", p)
 			}
-			return 0
+			return false
 		}
 
 		if (info.Flags&gc.Move != 0) && (info.Flags&(gc.SizeL|gc.SizeQ|gc.SizeF|gc.SizeD) != 0) && p.To.Type == v1.Type && p.To.Reg == v1.Reg {
 			goto gotit
 		}
 
-		if copyau(&p.From, v2) != 0 || copyau(&p.To, v2) != 0 {
+		if copyau(&p.From, v2) || copyau(&p.To, v2) {
 			if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
 				fmt.Printf("\tcopyau %v failed\n", gc.Ctxt.Dconv(v2))
 			}
@@ -631,7 +631,7 @@
 	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
 		fmt.Printf("\tran off end; return 0\n")
 	}
-	return 0
+	return false
 
 gotit:
 	copysub(&p.To, v1, v2, 1)
@@ -658,7 +658,7 @@
 	if gc.Debug['P'] != 0 {
 		fmt.Printf("%v last\n", r.Prog)
 	}
-	return 1
+	return true
 }
 
 /*
@@ -673,7 +673,7 @@
  *	set v1	F=1
  *	set v2	return success
  */
-func copyprop(g *gc.Graph, r0 *gc.Flow) int {
+func copyprop(g *gc.Graph, r0 *gc.Flow) bool {
 	var p *obj.Prog
 	var v1 *obj.Addr
 	var v2 *obj.Addr
@@ -684,14 +684,14 @@
 	p = r0.Prog
 	v1 = &p.From
 	v2 = &p.To
-	if copyas(v1, v2) != 0 {
-		return 1
+	if copyas(v1, v2) {
+		return true
 	}
 	gactive++
 	return copy1(v1, v2, r0.S1, 0)
 }
 
-func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) int {
+func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) bool {
 	var t int
 	var p *obj.Prog
 
@@ -699,7 +699,7 @@
 		if gc.Debug['P'] != 0 {
 			fmt.Printf("act set; return 1\n")
 		}
-		return 1
+		return true
 	}
 
 	r.Active = int32(gactive)
@@ -711,7 +711,7 @@
 		if gc.Debug['P'] != 0 {
 			fmt.Printf("%v", p)
 		}
-		if !(f != 0) && gc.Uniqp(r) == nil {
+		if f == 0 && gc.Uniqp(r) == nil {
 			f = 1
 			if gc.Debug['P'] != 0 {
 				fmt.Printf("; merge; f=%d", f)
@@ -724,33 +724,33 @@
 			if gc.Debug['P'] != 0 {
 				fmt.Printf("; %v rar; return 0\n", gc.Ctxt.Dconv(v2))
 			}
-			return 0
+			return false
 
 		case 3: /* set */
 			if gc.Debug['P'] != 0 {
 				fmt.Printf("; %v set; return 1\n", gc.Ctxt.Dconv(v2))
 			}
-			return 1
+			return true
 
 		case 1, /* used, substitute */
 			4: /* use and set */
 			if f != 0 {
-				if !(gc.Debug['P'] != 0) {
-					return 0
+				if gc.Debug['P'] == 0 {
+					return false
 				}
 				if t == 4 {
 					fmt.Printf("; %v used+set and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
 				} else {
 					fmt.Printf("; %v used and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
 				}
-				return 0
+				return false
 			}
 
 			if copyu(p, v2, v1) != 0 {
 				if gc.Debug['P'] != 0 {
 					fmt.Printf("; sub fail; return 0\n")
 				}
-				return 0
+				return false
 			}
 
 			if gc.Debug['P'] != 0 {
@@ -760,13 +760,13 @@
 				if gc.Debug['P'] != 0 {
 					fmt.Printf("; %v used+set; return 1\n", gc.Ctxt.Dconv(v2))
 				}
-				return 1
+				return true
 			}
 		}
 
-		if !(f != 0) {
+		if f == 0 {
 			t = copyu(p, v1, nil)
-			if !(f != 0) && (t == 2 || t == 3 || t == 4) {
+			if f == 0 && (t == 2 || t == 3 || t == 4) {
 				f = 1
 				if gc.Debug['P'] != 0 {
 					fmt.Printf("; %v set and !f; f=%d", gc.Ctxt.Dconv(v1), f)
@@ -778,13 +778,13 @@
 			fmt.Printf("\n")
 		}
 		if r.S2 != nil {
-			if !(copy1(v1, v2, r.S2, f) != 0) {
-				return 0
+			if !copy1(v1, v2, r.S2, f) {
+				return false
 			}
 		}
 	}
 
-	return 1
+	return true
 }
 
 /*
@@ -807,7 +807,7 @@
 			return 0
 		}
 
-		if copyau(&p.To, v) != 0 {
+		if copyau(&p.To, v) {
 			return 1
 		}
 		return 0
@@ -836,7 +836,7 @@
 			return 0
 		}
 
-		if copyau(&p.To, v) != 0 {
+		if copyau(&p.To, v) {
 			return 4
 		}
 		return 3
@@ -858,23 +858,23 @@
 	}
 
 	if info.Flags&gc.LeftAddr != 0 {
-		if copyas(&p.From, v) != 0 {
+		if copyas(&p.From, v) {
 			return 2
 		}
 	}
 
 	if info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightRead|gc.RightWrite {
-		if copyas(&p.To, v) != 0 {
+		if copyas(&p.To, v) {
 			return 2
 		}
 	}
 
 	if info.Flags&gc.RightWrite != 0 {
-		if copyas(&p.To, v) != 0 {
+		if copyas(&p.To, v) {
 			if s != nil {
 				return copysub(&p.From, v, s, 1)
 			}
-			if copyau(&p.From, v) != 0 {
+			if copyau(&p.From, v) {
 				return 4
 			}
 			return 3
@@ -889,10 +889,10 @@
 			return copysub(&p.To, v, s, 1)
 		}
 
-		if copyau(&p.From, v) != 0 {
+		if copyau(&p.From, v) {
 			return 1
 		}
-		if copyau(&p.To, v) != 0 {
+		if copyau(&p.To, v) {
 			return 1
 		}
 	}
@@ -905,7 +905,7 @@
  * could be set/use depending on
  * semantics
  */
-func copyas(a *obj.Addr, v *obj.Addr) int {
+func copyas(a *obj.Addr, v *obj.Addr) bool {
 	if x86.REG_AL <= a.Reg && a.Reg <= x86.REG_R15B {
 		gc.Fatal("use of byte register")
 	}
@@ -914,62 +914,62 @@
 	}
 
 	if a.Type != v.Type || a.Name != v.Name || a.Reg != v.Reg {
-		return 0
+		return false
 	}
-	if regtyp(v) != 0 {
-		return 1
+	if regtyp(v) {
+		return true
 	}
 	if v.Type == obj.TYPE_MEM && (v.Name == obj.NAME_AUTO || v.Name == obj.NAME_PARAM) {
 		if v.Offset == a.Offset {
-			return 1
+			return true
 		}
 	}
-	return 0
+	return false
 }
 
-func sameaddr(a *obj.Addr, v *obj.Addr) int {
+func sameaddr(a *obj.Addr, v *obj.Addr) bool {
 	if a.Type != v.Type || a.Name != v.Name || a.Reg != v.Reg {
-		return 0
+		return false
 	}
-	if regtyp(v) != 0 {
-		return 1
+	if regtyp(v) {
+		return true
 	}
 	if v.Type == obj.TYPE_MEM && (v.Name == obj.NAME_AUTO || v.Name == obj.NAME_PARAM) {
 		if v.Offset == a.Offset {
-			return 1
+			return true
 		}
 	}
-	return 0
+	return false
 }
 
 /*
  * either direct or indirect
  */
-func copyau(a *obj.Addr, v *obj.Addr) int {
-	if copyas(a, v) != 0 {
+func copyau(a *obj.Addr, v *obj.Addr) bool {
+	if copyas(a, v) {
 		if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
 			fmt.Printf("\tcopyau: copyas returned 1\n")
 		}
-		return 1
+		return true
 	}
 
-	if regtyp(v) != 0 {
+	if regtyp(v) {
 		if a.Type == obj.TYPE_MEM && a.Reg == v.Reg {
 			if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
 				fmt.Printf("\tcopyau: found indir use - return 1\n")
 			}
-			return 1
+			return true
 		}
 
 		if a.Index == v.Reg {
 			if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
 				fmt.Printf("\tcopyau: found index use - return 1\n")
 			}
-			return 1
+			return true
 		}
 	}
 
-	return 0
+	return false
 }
 
 /*
@@ -979,7 +979,7 @@
 func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
 	var reg int
 
-	if copyas(a, v) != 0 {
+	if copyas(a, v) {
 		reg = int(s.Reg)
 		if reg >= x86.REG_AX && reg <= x86.REG_R15 || reg >= x86.REG_X0 && reg <= x86.REG_X0+15 {
 			if f != 0 {
@@ -990,7 +990,7 @@
 		return 0
 	}
 
-	if regtyp(v) != 0 {
+	if regtyp(v) {
 		reg = int(v.Reg)
 		if a.Type == obj.TYPE_MEM && int(a.Reg) == reg {
 			if (s.Reg == x86.REG_BP || s.Reg == x86.REG_R13) && a.Index != x86.REG_NONE {
@@ -1068,10 +1068,10 @@
 	}
 }
 
-func smallindir(a *obj.Addr, reg *obj.Addr) int {
-	return bool2int(regtyp(reg) != 0 && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && a.Index == x86.REG_NONE && 0 <= a.Offset && a.Offset < 4096)
+func smallindir(a *obj.Addr, reg *obj.Addr) bool {
+	return regtyp(reg) && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && a.Index == x86.REG_NONE && 0 <= a.Offset && a.Offset < 4096
 }
 
-func stackaddr(a *obj.Addr) int {
-	return bool2int(a.Type == obj.TYPE_REG && a.Reg == x86.REG_SP)
+func stackaddr(a *obj.Addr) bool {
+	return a.Type == obj.TYPE_REG && a.Reg == x86.REG_SP
 }
diff --git a/src/cmd/new8g/cgen.go b/src/cmd/new8g/cgen.go
index 9d71aeb..9f736b1 100644
--- a/src/cmd/new8g/cgen.go
+++ b/src/cmd/new8g/cgen.go
@@ -84,7 +84,7 @@
 		gc.OSLICESTR,
 		gc.OSLICE3,
 		gc.OSLICE3ARR:
-		if res.Op != gc.ONAME || !(res.Addable != 0) {
+		if res.Op != gc.ONAME || res.Addable == 0 {
 			gc.Tempname(&n1, n.Type)
 			gc.Cgen_slice(n, &n1)
 			cgen(&n1, res)
@@ -94,7 +94,7 @@
 		return
 
 	case gc.OEFACE:
-		if res.Op != gc.ONAME || !(res.Addable != 0) {
+		if res.Op != gc.ONAME || res.Addable == 0 {
 			gc.Tempname(&n1, n.Type)
 			gc.Cgen_eface(n, &n1)
 			cgen(&n1, res)
@@ -117,7 +117,7 @@
 	}
 
 	// structs etc get handled specially
-	if gc.Isfat(n.Type) != 0 {
+	if gc.Isfat(n.Type) {
 		if n.Type.Width < 0 {
 			gc.Fatal("forgot to compute width for %v", gc.Tconv(n.Type, 0))
 		}
@@ -131,12 +131,12 @@
 	switch n.Op {
 	case gc.OSPTR,
 		gc.OLEN:
-		if gc.Isslice(n.Left.Type) != 0 || gc.Istype(n.Left.Type, gc.TSTRING) != 0 {
+		if gc.Isslice(n.Left.Type) || gc.Istype(n.Left.Type, gc.TSTRING) {
 			n.Addable = n.Left.Addable
 		}
 
 	case gc.OCAP:
-		if gc.Isslice(n.Left.Type) != 0 {
+		if gc.Isslice(n.Left.Type) {
 			n.Addable = n.Left.Addable
 		}
 
@@ -151,7 +151,7 @@
 	}
 
 	// if both are not addressable, use a temporary.
-	if !(n.Addable != 0) && !(res.Addable != 0) {
+	if n.Addable == 0 && res.Addable == 0 {
 		// could use regalloc here sometimes,
 		// but have to check for ullman >= UINF.
 		gc.Tempname(&n1, n.Type)
@@ -163,7 +163,7 @@
 
 	// if result is not addressable directly but n is,
 	// compute its address and then store via the address.
-	if !(res.Addable != 0) {
+	if res.Addable == 0 {
 		igen(res, &n1, nil)
 		cgen(n, &n1)
 		regfree(&n1)
@@ -171,7 +171,7 @@
 	}
 
 	// complex types
-	if gc.Complexop(n, res) != 0 {
+	if gc.Complexop(n, res) {
 		gc.Complexgen(n, res)
 		return
 	}
@@ -197,7 +197,7 @@
 	}
 
 	// 64-bit ops are hard on 32-bit machine.
-	if gc.Is64(n.Type) != 0 || gc.Is64(res.Type) != 0 || n.Left != nil && gc.Is64(n.Left.Type) != 0 {
+	if gc.Is64(n.Type) || gc.Is64(res.Type) || n.Left != nil && gc.Is64(n.Left.Type) {
 		switch n.Op {
 		// math goes to cgen64.
 		case gc.OMINUS,
@@ -246,11 +246,11 @@
 		p1 = gc.Gbranch(obj.AJMP, nil, 0)
 
 		p2 = gc.Pc
-		gmove(gc.Nodbool(1), res)
+		gmove(gc.Nodbool(true), res)
 		p3 = gc.Gbranch(obj.AJMP, nil, 0)
 		gc.Patch(p1, gc.Pc)
 		bgen(n, true, 0, p2)
-		gmove(gc.Nodbool(0), res)
+		gmove(gc.Nodbool(false), res)
 		gc.Patch(p3, gc.Pc)
 		return
 
@@ -288,7 +288,7 @@
 		cgen_hmul(nl, nr, res)
 
 	case gc.OCONV:
-		if gc.Eqtype(n.Type, nl.Type) || gc.Noconv(n.Type, nl.Type) != 0 {
+		if gc.Eqtype(n.Type, nl.Type) || gc.Noconv(n.Type, nl.Type) {
 			cgen(nl, res)
 			break
 		}
@@ -317,7 +317,7 @@
 
 		// pointer is the first word of string or slice.
 	case gc.OSPTR:
-		if gc.Isconst(nl, gc.CTSTR) != 0 {
+		if gc.Isconst(nl, gc.CTSTR) {
 			regalloc(&n1, gc.Types[gc.Tptr], res)
 			p1 = gins(i386.ALEAL, nil, &n1)
 			gc.Datastring(nl.Val.U.Sval.S, &p1.From)
@@ -332,7 +332,7 @@
 		regfree(&n1)
 
 	case gc.OLEN:
-		if gc.Istype(nl.Type, gc.TMAP) != 0 || gc.Istype(nl.Type, gc.TCHAN) != 0 {
+		if gc.Istype(nl.Type, gc.TMAP) || gc.Istype(nl.Type, gc.TCHAN) {
 			// map has len in the first 32-bit word.
 			// a zero pointer means zero length
 			gc.Tempname(&n1, gc.Types[gc.Tptr])
@@ -358,7 +358,7 @@
 			break
 		}
 
-		if gc.Istype(nl.Type, gc.TSTRING) != 0 || gc.Isslice(nl.Type) != 0 {
+		if gc.Istype(nl.Type, gc.TSTRING) || gc.Isslice(nl.Type) {
 			// both slice and string have len one pointer into the struct.
 			igen(nl, &n1, res)
 
@@ -372,7 +372,7 @@
 		gc.Fatal("cgen: OLEN: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
 
 	case gc.OCAP:
-		if gc.Istype(nl.Type, gc.TCHAN) != 0 {
+		if gc.Istype(nl.Type, gc.TCHAN) {
 			// chan has cap in the second 32-bit word.
 			// a zero pointer means zero length
 			gc.Tempname(&n1, gc.Types[gc.Tptr])
@@ -399,7 +399,7 @@
 			break
 		}
 
-		if gc.Isslice(nl.Type) != 0 {
+		if gc.Isslice(nl.Type) {
 			igen(nl, &n1, res)
 			n1.Type = gc.Types[gc.TUINT32]
 			n1.Xoffset += int64(gc.Array_cap)
@@ -432,7 +432,7 @@
 	case gc.OLSH,
 		gc.ORSH,
 		gc.OLROT:
-		cgen_shift(int(n.Op), int(n.Bounded), nl, nr, res)
+		cgen_shift(int(n.Op), n.Bounded, nl, nr, res)
 	}
 
 	return
@@ -445,7 +445,7 @@
 	}
 
 abop: // asymmetric binary
-	if gc.Smallintconst(nr) != 0 {
+	if gc.Smallintconst(nr) {
 		mgen(nl, &n1, res)
 		regalloc(&n2, nl.Type, &n1)
 		gmove(&n1, &n2)
@@ -496,7 +496,7 @@
 	var hi gc.Node
 	var zero gc.Node
 
-	if !(gc.Is64(n.Type) != 0) {
+	if !gc.Is64(n.Type) {
 		if n.Addable != 0 {
 			// nothing to do.
 			*res = *n
@@ -542,7 +542,7 @@
 	var v uint64
 	var p1 *obj.Prog
 	var p2 *obj.Prog
-	var bounded int
+	var bounded bool
 
 	if gc.Debug['g'] != 0 {
 		gc.Dump("\nagen-res", res)
@@ -557,7 +557,7 @@
 		n = n.Left
 	}
 
-	if gc.Isconst(n, gc.CTNIL) != 0 && n.Type.Width > int64(gc.Widthptr) {
+	if gc.Isconst(n, gc.CTNIL) && n.Type.Width > int64(gc.Widthptr) {
 		// Use of a nil interface or nil slice.
 		// Create a temporary we can take the address of and read.
 		// The generated code is just going to panic, so it need not
@@ -593,7 +593,6 @@
 	switch n.Op {
 	default:
 		gc.Fatal("agen %v", gc.Oconv(int(n.Op), 0))
-		fallthrough
 
 	case gc.OCALLMETH:
 		gc.Cgen_callmeth(n, 0)
@@ -624,32 +623,32 @@
 	case gc.OINDEX:
 		p2 = nil // to be patched to panicindex.
 		w = uint32(n.Type.Width)
-		bounded = bool2int(gc.Debug['B'] != 0 || n.Bounded != 0)
+		bounded = gc.Debug['B'] != 0 || n.Bounded
 		if nr.Addable != 0 {
 			// Generate &nl first, and move nr into register.
-			if !(gc.Isconst(nl, gc.CTSTR) != 0) {
+			if !gc.Isconst(nl, gc.CTSTR) {
 				igen(nl, &n3, res)
 			}
-			if !(gc.Isconst(nr, gc.CTINT) != 0) {
-				p2 = igenindex(nr, &tmp, bounded)
+			if !gc.Isconst(nr, gc.CTINT) {
+				p2 = igenindex(nr, &tmp, bool2int(bounded))
 				regalloc(&n1, tmp.Type, nil)
 				gmove(&tmp, &n1)
 			}
 		} else if nl.Addable != 0 {
 			// Generate nr first, and move &nl into register.
-			if !(gc.Isconst(nr, gc.CTINT) != 0) {
-				p2 = igenindex(nr, &tmp, bounded)
+			if !gc.Isconst(nr, gc.CTINT) {
+				p2 = igenindex(nr, &tmp, bool2int(bounded))
 				regalloc(&n1, tmp.Type, nil)
 				gmove(&tmp, &n1)
 			}
 
-			if !(gc.Isconst(nl, gc.CTSTR) != 0) {
+			if !gc.Isconst(nl, gc.CTSTR) {
 				igen(nl, &n3, res)
 			}
 		} else {
-			p2 = igenindex(nr, &tmp, bounded)
+			p2 = igenindex(nr, &tmp, bool2int(bounded))
 			nr = &tmp
-			if !(gc.Isconst(nl, gc.CTSTR) != 0) {
+			if !gc.Isconst(nl, gc.CTSTR) {
 				igen(nl, &n3, res)
 			}
 			regalloc(&n1, tmp.Type, nil)
@@ -657,7 +656,7 @@
 		}
 
 		// For fixed array we really want the pointer in n3.
-		if gc.Isfixedarray(nl.Type) != 0 {
+		if gc.Isfixedarray(nl.Type) {
 			regalloc(&n2, gc.Types[gc.Tptr], &n3)
 			agen(&n3, &n2)
 			regfree(&n3)
@@ -670,13 +669,13 @@
 		// w is width
 
 		// constant index
-		if gc.Isconst(nr, gc.CTINT) != 0 {
-			if gc.Isconst(nl, gc.CTSTR) != 0 {
+		if gc.Isconst(nr, gc.CTINT) {
+			if gc.Isconst(nl, gc.CTSTR) {
 				gc.Fatal("constant string constant index") // front end should handle
 			}
 			v = uint64(gc.Mpgetfix(nr.Val.U.Xval))
-			if gc.Isslice(nl.Type) != 0 || nl.Type.Etype == gc.TSTRING {
-				if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
+			if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
+				if gc.Debug['B'] == 0 && !n.Bounded {
 					nlen = n3
 					nlen.Type = gc.Types[gc.TUINT32]
 					nlen.Xoffset += int64(gc.Array_nel)
@@ -716,13 +715,13 @@
 		gmove(&n1, &n2)
 		regfree(&n1)
 
-		if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
+		if gc.Debug['B'] == 0 && !n.Bounded {
 			// check bounds
 			t = gc.Types[gc.TUINT32]
 
-			if gc.Isconst(nl, gc.CTSTR) != 0 {
+			if gc.Isconst(nl, gc.CTSTR) {
 				gc.Nodconst(&nlen, t, int64(len(nl.Val.U.Sval.S)))
-			} else if gc.Isslice(nl.Type) != 0 || nl.Type.Etype == gc.TSTRING {
+			} else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
 				nlen = n3
 				nlen.Type = t
 				nlen.Xoffset += int64(gc.Array_nel)
@@ -739,7 +738,7 @@
 			gc.Patch(p1, gc.Pc)
 		}
 
-		if gc.Isconst(nl, gc.CTSTR) != 0 {
+		if gc.Isconst(nl, gc.CTSTR) {
 			regalloc(&n3, gc.Types[gc.Tptr], res)
 			p1 = gins(i386.ALEAL, nil, &n3)
 			gc.Datastring(nl.Val.U.Sval.S, &p1.From)
@@ -751,7 +750,7 @@
 		// Load base pointer in n3.
 		regalloc(&tmp, gc.Types[gc.Tptr], &n3)
 
-		if gc.Isslice(nl.Type) != 0 || nl.Type.Etype == gc.TSTRING {
+		if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
 			n3.Type = gc.Types[gc.Tptr]
 			n3.Xoffset += int64(gc.Array_array)
 			gmove(&n3, &tmp)
@@ -789,7 +788,7 @@
 		}
 
 		// should only get here for heap vars or paramref
-		if !(n.Class&gc.PHEAP != 0) && n.Class != gc.PPARAMREF {
+		if n.Class&gc.PHEAP == 0 && n.Class != gc.PPARAMREF {
 			gc.Dump("bad agen", n)
 			gc.Fatal("agen: bad ONAME class %#x", n.Class)
 		}
@@ -813,7 +812,7 @@
 
 	case gc.ODOTPTR:
 		t = nl.Type
-		if !(gc.Isptr[t.Etype] != 0) {
+		if gc.Isptr[t.Etype] == 0 {
 			gc.Fatal("agen: not ptr %v", gc.Nconv(n, 0))
 		}
 		cgen(nl, res)
@@ -919,10 +918,10 @@
 	// Could do the same for slice except that we need
 	// to use the real index for the bounds checking.
 	case gc.OINDEX:
-		if gc.Isfixedarray(n.Left.Type) != 0 || (gc.Isptr[n.Left.Type.Etype] != 0 && gc.Isfixedarray(n.Left.Left.Type) != 0) {
-			if gc.Isconst(n.Right, gc.CTINT) != 0 {
+		if gc.Isfixedarray(n.Left.Type) || (gc.Isptr[n.Left.Type.Etype] != 0 && gc.Isfixedarray(n.Left.Left.Type)) {
+			if gc.Isconst(n.Right, gc.CTINT) {
 				// Compute &a.
-				if !(gc.Isptr[n.Left.Type.Etype] != 0) {
+				if gc.Isptr[n.Left.Type.Etype] == 0 {
 					igen(n.Left, a, res)
 				} else {
 					igen(n.Left, &n1, res)
@@ -979,7 +978,7 @@
 	}
 
 	if n == nil {
-		n = gc.Nodbool(1)
+		n = gc.Nodbool(true)
 	}
 
 	if n.Ninit != nil {
@@ -1021,13 +1020,13 @@
 
 		// need to ask if it is bool?
 	case gc.OLITERAL:
-		if !true_ == !(n.Val.U.Bval != 0) {
+		if !true_ == (n.Val.U.Bval == 0) {
 			gc.Patch(gc.Gbranch(obj.AJMP, nil, 0), to)
 		}
 		return
 
 	case gc.ONAME:
-		if !(n.Addable != 0) {
+		if n.Addable == 0 {
 			goto def
 		}
 		gc.Nodconst(&n1, n.Type, 0)
@@ -1101,7 +1100,7 @@
 			nr = r
 		}
 
-		if gc.Isslice(nl.Type) != 0 {
+		if gc.Isslice(nl.Type) {
 			// front end should only leave cmp to literal nil
 			if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
 				gc.Yyerror("illegal slice comparison")
@@ -1119,7 +1118,7 @@
 			break
 		}
 
-		if gc.Isinter(nl.Type) != 0 {
+		if gc.Isinter(nl.Type) {
 			// front end should only leave cmp to literal nil
 			if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
 				gc.Yyerror("illegal interface comparison")
@@ -1141,14 +1140,14 @@
 			break
 		}
 
-		if gc.Is64(nr.Type) != 0 {
-			if !(nl.Addable != 0) || gc.Isconst(nl, gc.CTINT) != 0 {
+		if gc.Is64(nr.Type) {
+			if nl.Addable == 0 || gc.Isconst(nl, gc.CTINT) {
 				gc.Tempname(&n1, nl.Type)
 				cgen(nl, &n1)
 				nl = &n1
 			}
 
-			if !(nr.Addable != 0) {
+			if nr.Addable == 0 {
 				gc.Tempname(&n2, nr.Type)
 				cgen(nr, &n2)
 				nr = &n2
@@ -1159,13 +1158,13 @@
 		}
 
 		if nr.Ullman >= gc.UINF {
-			if !(nl.Addable != 0) {
+			if nl.Addable == 0 {
 				gc.Tempname(&n1, nl.Type)
 				cgen(nl, &n1)
 				nl = &n1
 			}
 
-			if !(nr.Addable != 0) {
+			if nr.Addable == 0 {
 				gc.Tempname(&tmp, nr.Type)
 				cgen(nr, &tmp)
 				nr = &tmp
@@ -1177,19 +1176,19 @@
 			goto cmp
 		}
 
-		if !(nl.Addable != 0) {
+		if nl.Addable == 0 {
 			gc.Tempname(&n1, nl.Type)
 			cgen(nl, &n1)
 			nl = &n1
 		}
 
-		if gc.Smallintconst(nr) != 0 {
+		if gc.Smallintconst(nr) {
 			gins(optoas(gc.OCMP, nr.Type), nl, nr)
 			gc.Patch(gc.Gbranch(optoas(a, nr.Type), nr.Type, likely), to)
 			break
 		}
 
-		if !(nr.Addable != 0) {
+		if nr.Addable == 0 {
 			gc.Tempname(&tmp, nr.Type)
 			cgen(nr, &tmp)
 			nr = &tmp
@@ -1252,14 +1251,14 @@
 
 	case gc.OINDEX:
 		t = n.Left.Type
-		if !(gc.Isfixedarray(t) != 0) {
+		if !gc.Isfixedarray(t) {
 			break
 		}
 		off = stkof(n.Left)
 		if off == -1000 || off == 1000 {
 			return off
 		}
-		if gc.Isconst(n.Right, gc.CTINT) != 0 {
+		if gc.Isconst(n.Right, gc.CTINT) {
 			return int32(int64(off) + t.Type.Width*gc.Mpgetfix(n.Right.Val.U.Xval))
 		}
 		return 1000
@@ -1334,7 +1333,7 @@
 	}
 
 	// Avoid taking the address for simple enough types.
-	if componentgen(n, res) != 0 {
+	if componentgen(n, res) {
 		return
 	}
 
@@ -1360,10 +1359,10 @@
 
 	gc.Tempname(&tsrc, gc.Types[gc.Tptr])
 	gc.Tempname(&tdst, gc.Types[gc.Tptr])
-	if !(n.Addable != 0) {
+	if n.Addable == 0 {
 		agen(n, &tsrc)
 	}
-	if !(res.Addable != 0) {
+	if res.Addable == 0 {
 		agen(res, &tdst)
 	}
 	if n.Addable != 0 {
@@ -1459,19 +1458,19 @@
 	}
 }
 
-func cadable(n *gc.Node) int {
-	if !(n.Addable != 0) {
+func cadable(n *gc.Node) bool {
+	if n.Addable == 0 {
 		// dont know how it happens,
 		// but it does
-		return 0
+		return false
 	}
 
 	switch n.Op {
 	case gc.ONAME:
-		return 1
+		return true
 	}
 
-	return 0
+	return false
 }
 
 /*
@@ -1482,7 +1481,7 @@
  * nr is N when assigning a zero value.
  * return 1 if can do, 0 if can't.
  */
-func componentgen(nr *gc.Node, nl *gc.Node) int {
+func componentgen(nr *gc.Node, nl *gc.Node) bool {
 	var nodl gc.Node
 	var nodr gc.Node
 	var tmp gc.Node
@@ -1504,12 +1503,12 @@
 		t = nl.Type
 
 		// Slices are ok.
-		if gc.Isslice(t) != 0 {
+		if gc.Isslice(t) {
 			break
 		}
 
 		// Small arrays are ok.
-		if t.Bound > 0 && t.Bound <= 3 && !(gc.Isfat(t.Type) != 0) {
+		if t.Bound > 0 && t.Bound <= 3 && !gc.Isfat(t.Type) {
 			break
 		}
 
@@ -1521,7 +1520,7 @@
 		fldcount = 0
 
 		for t = nl.Type.Type; t != nil; t = t.Down {
-			if gc.Isfat(t.Type) != 0 {
+			if gc.Isfat(t.Type) {
 				goto no
 			}
 			if t.Etype != gc.TFIELD {
@@ -1540,8 +1539,8 @@
 	}
 
 	nodl = *nl
-	if !(cadable(nl) != 0) {
-		if nr != nil && !(cadable(nr) != 0) {
+	if !cadable(nl) {
+		if nr != nil && !cadable(nr) {
 			goto no
 		}
 		igen(nl, &nodl, nil)
@@ -1550,7 +1549,7 @@
 
 	if nr != nil {
 		nodr = *nr
-		if !(cadable(nr) != 0) {
+		if !cadable(nr) {
 			igen(nr, &nodr, nil)
 			freer = 1
 		}
@@ -1578,7 +1577,7 @@
 			gc.Gvardef(nl)
 		}
 		t = nl.Type
-		if !(gc.Isslice(t) != 0) {
+		if !gc.Isslice(t) {
 			nodl.Type = t.Type
 			nodr.Type = nodl.Type
 			for fldcount = 0; fldcount < t.Bound; fldcount++ {
@@ -1718,7 +1717,7 @@
 	if freel != 0 {
 		regfree(&nodl)
 	}
-	return 0
+	return false
 
 yes:
 	if freer != 0 {
@@ -1727,5 +1726,5 @@
 	if freel != 0 {
 		regfree(&nodl)
 	}
-	return 1
+	return true
 }
diff --git a/src/cmd/new8g/cgen64.go b/src/cmd/new8g/cgen64.go
index 108fc6a..1937ae0 100644
--- a/src/cmd/new8g/cgen64.go
+++ b/src/cmd/new8g/cgen64.go
@@ -44,7 +44,6 @@
 	switch n.Op {
 	default:
 		gc.Fatal("cgen64 %v", gc.Oconv(int(n.Op), 0))
-		fallthrough
 
 	case gc.OMINUS:
 		cgen(n.Left, res)
@@ -79,13 +78,13 @@
 
 	l = n.Left
 	r = n.Right
-	if !(l.Addable != 0) {
+	if l.Addable == 0 {
 		gc.Tempname(&t1, l.Type)
 		cgen(l, &t1)
 		l = &t1
 	}
 
-	if r != nil && !(r.Addable != 0) {
+	if r != nil && r.Addable == 0 {
 		gc.Tempname(&t2, r.Type)
 		cgen(r, &t2)
 		r = &t2
@@ -98,7 +97,7 @@
 	// Setup for binary operation.
 	split64(l, &lo1, &hi1)
 
-	if gc.Is64(r.Type) != 0 {
+	if gc.Is64(r.Type) {
 		split64(r, &lo2, &hi2)
 	}
 
@@ -196,7 +195,7 @@
 		if r.Op == gc.OLITERAL {
 			v = uint64(gc.Mpgetfix(r.Val.U.Xval))
 			if v >= 64 {
-				if gc.Is64(r.Type) != 0 {
+				if gc.Is64(r.Type) {
 					splitclean()
 				}
 				splitclean()
@@ -208,7 +207,7 @@
 			}
 
 			if v >= 32 {
-				if gc.Is64(r.Type) != 0 {
+				if gc.Is64(r.Type) {
 					splitclean()
 				}
 				split64(res, &lo2, &hi2)
@@ -243,7 +242,7 @@
 		// if high bits are set, zero value.
 		p1 = nil
 
-		if gc.Is64(r.Type) != 0 {
+		if gc.Is64(r.Type) {
 			gins(i386.ACMPL, &hi2, ncon(0))
 			p1 = gc.Gbranch(i386.AJNE, nil, +1)
 			gins(i386.AMOVL, &lo2, &cx)
@@ -285,7 +284,7 @@
 		if r.Op == gc.OLITERAL {
 			v = uint64(gc.Mpgetfix(r.Val.U.Xval))
 			if v >= 64 {
-				if gc.Is64(r.Type) != 0 {
+				if gc.Is64(r.Type) {
 					splitclean()
 				}
 				splitclean()
@@ -305,7 +304,7 @@
 			}
 
 			if v >= 32 {
-				if gc.Is64(r.Type) != 0 {
+				if gc.Is64(r.Type) {
 					splitclean()
 				}
 				split64(res, &lo2, &hi2)
@@ -344,7 +343,7 @@
 		// if high bits are set, zero value.
 		p1 = nil
 
-		if gc.Is64(r.Type) != 0 {
+		if gc.Is64(r.Type) {
 			gins(i386.ACMPL, &hi2, ncon(0))
 			p1 = gc.Gbranch(i386.AJNE, nil, +1)
 			gins(i386.AMOVL, &lo2, &cx)
@@ -496,7 +495,7 @@
 		gins(optoas(int(n.Op), lo1.Type), &hi2, &dx)
 	}
 
-	if gc.Is64(r.Type) != 0 {
+	if gc.Is64(r.Type) {
 		splitclean()
 	}
 	splitclean()
@@ -542,7 +541,6 @@
 	switch op {
 	default:
 		gc.Fatal("cmp64 %v %v", gc.Oconv(int(op), 0), gc.Tconv(t, 0))
-		fallthrough
 
 		// cmp hi
 	// jne L
diff --git a/src/cmd/new8g/ggen.go b/src/cmd/new8g/ggen.go
index 8dd469c..f72beda 100644
--- a/src/cmd/new8g/ggen.go
+++ b/src/cmd/new8g/ggen.go
@@ -36,7 +36,7 @@
 	ax = 0
 	for l = gc.Curfn.Dcl; l != nil; l = l.Next {
 		n = l.N
-		if !(n.Needzero != 0) {
+		if n.Needzero == 0 {
 			continue
 		}
 		if n.Class != gc.PAUTO {
@@ -129,7 +129,7 @@
 	w = uint32(nl.Type.Width)
 
 	// Avoid taking the address for simple enough types.
-	if componentgen(nil, nl) != 0 {
+	if componentgen(nil, nl) {
 		return
 	}
 
@@ -151,7 +151,7 @@
 		for {
 			tmp14 := q
 			q--
-			if !(tmp14 > 0) {
+			if tmp14 <= 0 {
 				break
 			}
 			n1.Type = z.Type
@@ -163,7 +163,7 @@
 		for {
 			tmp15 := c
 			c--
-			if !(tmp15 > 0) {
+			if tmp15 <= 0 {
 				break
 			}
 			n1.Type = z.Type
@@ -252,7 +252,7 @@
 
 			p = gins(obj.ACALL, nil, f)
 			gc.Afunclit(&p.To, f)
-			if proc == -1 || gc.Noreturn(p) != 0 {
+			if proc == -1 || gc.Noreturn(p) {
 				gins(obj.AUNDEF, nil, nil)
 			}
 			break
@@ -327,7 +327,7 @@
 
 	i = i.Left // interface
 
-	if !(i.Addable != 0) {
+	if i.Addable == 0 {
 		gc.Tempname(&tmpi, i.Type)
 		cgen(i, &tmpi)
 		i = &tmpi
@@ -563,9 +563,9 @@
 	check = 0
 	if gc.Issigned[t.Etype] != 0 {
 		check = 1
-		if gc.Isconst(nl, gc.CTINT) != 0 && gc.Mpgetfix(nl.Val.U.Xval) != -1<<uint64(t.Width*8-1) {
+		if gc.Isconst(nl, gc.CTINT) && gc.Mpgetfix(nl.Val.U.Xval) != -1<<uint64(t.Width*8-1) {
 			check = 0
-		} else if gc.Isconst(nr, gc.CTINT) != 0 && gc.Mpgetfix(nr.Val.U.Xval) != -1 {
+		} else if gc.Isconst(nr, gc.CTINT) && gc.Mpgetfix(nr.Val.U.Xval) != -1 {
 			check = 0
 		}
 	}
@@ -596,7 +596,7 @@
 		cgen(nr, &t2)
 	}
 
-	if !(gc.Samereg(ax, res) != 0) && !(gc.Samereg(dx, res) != 0) {
+	if !gc.Samereg(ax, res) && !gc.Samereg(dx, res) {
 		regalloc(&n1, t, res)
 	} else {
 		regalloc(&n1, t, nil)
@@ -639,7 +639,7 @@
 		gc.Patch(p1, gc.Pc)
 	}
 
-	if !(gc.Issigned[t.Etype] != 0) {
+	if gc.Issigned[t.Etype] == 0 {
 		gc.Nodconst(&nz, t, 0)
 		gmove(&nz, dx)
 	} else {
@@ -668,7 +668,7 @@
 	// and not the destination
 	*oldx = gc.Node{}
 
-	if r > 0 && !(gc.Samereg(x, res) != 0) {
+	if r > 0 && !gc.Samereg(x, res) {
 		gc.Tempname(oldx, gc.Types[gc.TINT32])
 		gmove(x, oldx)
 	}
@@ -697,7 +697,7 @@
 	var olddx gc.Node
 	var t *gc.Type
 
-	if gc.Is64(nl.Type) != 0 {
+	if gc.Is64(nl.Type) {
 		gc.Fatal("cgen_div %v", gc.Tconv(nl.Type, 0))
 	}
 
@@ -718,7 +718,7 @@
  *	res = nl << nr
  *	res = nl >> nr
  */
-func cgen_shift(op int, bounded int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 	var n1 gc.Node
 	var n2 gc.Node
 	var nt gc.Node
@@ -761,7 +761,7 @@
 
 	oldcx = gc.Node{}
 	gc.Nodreg(&cx, gc.Types[gc.TUINT32], i386.REG_CX)
-	if reg[i386.REG_CX] > 1 && !(gc.Samereg(&cx, res) != 0) {
+	if reg[i386.REG_CX] > 1 && !gc.Samereg(&cx, res) {
 		gc.Tempname(&oldcx, gc.Types[gc.TUINT32])
 		gmove(&cx, &oldcx)
 	}
@@ -774,7 +774,7 @@
 		regalloc(&n1, nr.Type, &n1) // to hold the shift type in CX
 	}
 
-	if gc.Samereg(&cx, res) != 0 {
+	if gc.Samereg(&cx, res) {
 		regalloc(&n2, nl.Type, nil)
 	} else {
 		regalloc(&n2, nl.Type, res)
@@ -788,7 +788,7 @@
 	}
 
 	// test and fix up large shifts
-	if bounded != 0 {
+	if bounded {
 		if nr.Type.Width > 4 {
 			// delayed reg alloc
 			gc.Nodreg(&n1, gc.Types[gc.TUINT32], i386.REG_CX)
@@ -943,11 +943,11 @@
 		gc.OGE:
 		p1 = gc.Gbranch(obj.AJMP, nil, 0)
 		p2 = gc.Pc
-		gmove(gc.Nodbool(1), res)
+		gmove(gc.Nodbool(true), res)
 		p3 = gc.Gbranch(obj.AJMP, nil, 0)
 		gc.Patch(p1, gc.Pc)
 		bgen(n, true, 0, p2)
-		gmove(gc.Nodbool(0), res)
+		gmove(gc.Nodbool(false), res)
 		gc.Patch(p3, gc.Pc)
 		return
 
@@ -956,7 +956,7 @@
 		return
 
 	case gc.OCONV:
-		if gc.Eqtype(n.Type, nl.Type) || gc.Noconv(n.Type, nl.Type) != 0 {
+		if gc.Eqtype(n.Type, nl.Type) || gc.Noconv(n.Type, nl.Type) {
 			cgen(nl, res)
 			return
 		}
@@ -1114,7 +1114,7 @@
 	nl = n.Left
 	nr = n.Right
 	a = int(n.Op)
-	if !(true_ != 0) {
+	if true_ == 0 {
 		// brcom is not valid on floats when NaN is involved.
 		p1 = gc.Gbranch(obj.AJMP, nil, 0)
 
@@ -1183,13 +1183,13 @@
 	goto ret
 
 sse:
-	if !(nl.Addable != 0) {
+	if nl.Addable == 0 {
 		gc.Tempname(&n1, nl.Type)
 		cgen(nl, &n1)
 		nl = &n1
 	}
 
-	if !(nr.Addable != 0) {
+	if nr.Addable == 0 {
 		gc.Tempname(&tmp, nr.Type)
 		cgen(nr, &tmp)
 		nr = &tmp
@@ -1286,7 +1286,7 @@
 
 		p2.From.Type = obj.TYPE_REG
 		p2.From.Reg = i386.REG_AX
-		if regtyp(&p.From) != 0 {
+		if regtyp(&p.From) {
 			p2.To.Type = obj.TYPE_MEM
 			p2.To.Reg = p.From.Reg
 		} else {
diff --git a/src/cmd/new8g/gsubr.go b/src/cmd/new8g/gsubr.go
index eaf0896..2728c2a 100644
--- a/src/cmd/new8g/gsubr.go
+++ b/src/cmd/new8g/gsubr.go
@@ -601,7 +601,7 @@
 	}
 }
 
-func anyregalloc() int {
+func anyregalloc() bool {
 	var i int
 	var j int
 
@@ -614,16 +614,16 @@
 				goto ok
 			}
 		}
-		return 1
+		return true
 	ok:
 	}
 
 	for i = i386.REG_X0; i <= i386.REG_X7; i++ {
 		if reg[i] != 0 {
-			return 1
+			return true
 		}
 	}
-	return 0
+	return false
 }
 
 /*
@@ -644,7 +644,6 @@
 	case gc.TINT64,
 		gc.TUINT64:
 		gc.Fatal("regalloc64")
-		fallthrough
 
 	case gc.TINT8,
 		gc.TUINT8,
@@ -677,7 +676,7 @@
 
 	case gc.TFLOAT32,
 		gc.TFLOAT64:
-		if !(gc.Use_sse != 0) {
+		if gc.Use_sse == 0 {
 			i = i386.REG_F0
 			goto out
 		}
@@ -798,7 +797,7 @@
 	var n1 gc.Node
 	var i int64
 
-	if !(gc.Is64(n.Type) != 0) {
+	if !gc.Is64(n.Type) {
 		gc.Fatal("split64 %v", gc.Tconv(n.Type, 0))
 	}
 
@@ -811,7 +810,7 @@
 	default:
 		switch n.Op {
 		default:
-			if !(dotaddable(n, &n1) != 0) {
+			if !dotaddable(n, &n1) {
 				igen(n, &n1, nil)
 				sclean[nsclean-1] = n1
 			}
@@ -934,7 +933,7 @@
 
 	// cannot have two integer memory operands;
 	// except 64-bit, which always copies via registers anyway.
-	if gc.Isint[ft] != 0 && gc.Isint[tt] != 0 && !(gc.Is64(f.Type) != 0) && !(gc.Is64(t.Type) != 0) && gc.Ismem(f) != 0 && gc.Ismem(t) != 0 {
+	if gc.Isint[ft] != 0 && gc.Isint[tt] != 0 && !gc.Is64(f.Type) && !gc.Is64(t.Type) && gc.Ismem(f) && gc.Ismem(t) {
 		goto hard
 	}
 
@@ -1200,7 +1199,7 @@
 	cvt = t.Type
 
 	// cannot have two floating point memory operands.
-	if gc.Isfloat[ft] != 0 && gc.Isfloat[tt] != 0 && gc.Ismem(f) != 0 && gc.Ismem(t) != 0 {
+	if gc.Isfloat[ft] != 0 && gc.Isfloat[tt] != 0 && gc.Ismem(f) && gc.Ismem(t) {
 		goto hard
 	}
 
@@ -1211,7 +1210,7 @@
 		ft = gc.Simsimtype(con.Type)
 
 		// some constants can't move directly to memory.
-		if gc.Ismem(t) != 0 {
+		if gc.Ismem(t) {
 			// float constants come from memory.
 			if gc.Isfloat[tt] != 0 {
 				goto hard
@@ -1269,7 +1268,7 @@
 
 	case gc.TFLOAT32<<16 | gc.TUINT64,
 		gc.TFLOAT64<<16 | gc.TUINT64:
-		if !(gc.Ismem(f) != 0) {
+		if !gc.Ismem(f) {
 			cvt = f.Type
 			goto hardmem
 		}
@@ -1500,7 +1499,6 @@
 		switch tt {
 		default:
 			gc.Fatal("gmove %v", gc.Nconv(t, 0))
-			fallthrough
 
 		case gc.TINT8:
 			gins(i386.ACMPL, &t1, ncon(-0x80&(1<<32-1)))
@@ -1595,7 +1593,7 @@
 	 */
 	case gc.TFLOAT32<<16 | gc.TFLOAT32,
 		gc.TFLOAT64<<16 | gc.TFLOAT64:
-		if gc.Ismem(f) != 0 && gc.Ismem(t) != 0 {
+		if gc.Ismem(f) && gc.Ismem(t) {
 			goto hard
 		}
 		if f.Op == gc.OREGISTER && t.Op == gc.OREGISTER {
@@ -1609,7 +1607,7 @@
 		if ft == gc.TFLOAT64 {
 			a = i386.AFMOVD
 		}
-		if gc.Ismem(t) != 0 {
+		if gc.Ismem(t) {
 			if f.Op != gc.OREGISTER || f.Val.U.Reg != i386.REG_F0 {
 				gc.Fatal("gmove %v", gc.Nconv(f, 0))
 			}
@@ -1620,7 +1618,7 @@
 		}
 
 	case gc.TFLOAT32<<16 | gc.TFLOAT64:
-		if gc.Ismem(f) != 0 && gc.Ismem(t) != 0 {
+		if gc.Ismem(f) && gc.Ismem(t) {
 			goto hard
 		}
 		if f.Op == gc.OREGISTER && t.Op == gc.OREGISTER {
@@ -1638,7 +1636,7 @@
 		return
 
 	case gc.TFLOAT64<<16 | gc.TFLOAT32:
-		if gc.Ismem(f) != 0 && gc.Ismem(t) != 0 {
+		if gc.Ismem(f) && gc.Ismem(t) {
 			goto hard
 		}
 		if f.Op == gc.OREGISTER && t.Op == gc.OREGISTER {
@@ -1810,9 +1808,9 @@
 	return
 }
 
-func samaddr(f *gc.Node, t *gc.Node) int {
+func samaddr(f *gc.Node, t *gc.Node) bool {
 	if f.Op != t.Op {
-		return 0
+		return false
 	}
 
 	switch f.Op {
@@ -1820,10 +1818,10 @@
 		if f.Val.U.Reg != t.Val.U.Reg {
 			break
 		}
-		return 1
+		return true
 	}
 
-	return 0
+	return false
 }
 
 /*
@@ -1850,12 +1848,12 @@
 	case i386.AMOVB,
 		i386.AMOVW,
 		i386.AMOVL:
-		if f != nil && t != nil && samaddr(f, t) != 0 {
+		if f != nil && t != nil && samaddr(f, t) {
 			return nil
 		}
 
 	case i386.ALEAL:
-		if f != nil && gc.Isconst(f, gc.CTNIL) != 0 {
+		if f != nil && gc.Isconst(f, gc.CTNIL) {
 			gc.Fatal("gins LEAL nil %v", gc.Tconv(f.Type, 0))
 		}
 	}
@@ -1904,13 +1902,13 @@
 	return p
 }
 
-func dotaddable(n *gc.Node, n1 *gc.Node) int {
+func dotaddable(n *gc.Node, n1 *gc.Node) bool {
 	var o int
 	var oary [10]int64
 	var nn *gc.Node
 
 	if n.Op != gc.ODOT {
-		return 0
+		return false
 	}
 
 	o = gc.Dotoffset(n, oary[:], &nn)
@@ -1918,16 +1916,16 @@
 		*n1 = *nn
 		n1.Type = n.Type
 		n1.Xoffset += oary[0]
-		return 1
+		return true
 	}
 
-	return 0
+	return false
 }
 
 func sudoclean() {
 }
 
-func sudoaddable(as int, n *gc.Node, a *obj.Addr) int {
+func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
 	*a = obj.Addr{}
-	return 0
+	return false
 }
diff --git a/src/cmd/new8g/peep.go b/src/cmd/new8g/peep.go
index 523ce9e..0838882 100644
--- a/src/cmd/new8g/peep.go
+++ b/src/cmd/new8g/peep.go
@@ -45,21 +45,21 @@
 var gactive uint32
 
 // do we need the carry bit
-func needc(p *obj.Prog) int {
+func needc(p *obj.Prog) bool {
 	var info gc.ProgInfo
 
 	for p != nil {
 		proginfo(&info, p)
 		if info.Flags&gc.UseCarry != 0 {
-			return 1
+			return true
 		}
 		if info.Flags&(gc.SetCarry|gc.KillCarry) != 0 {
-			return 0
+			return false
 		}
 		p = p.Link
 	}
 
-	return 0
+	return false
 }
 
 func rnops(r *gc.Flow) *gc.Flow {
@@ -108,7 +108,7 @@
 		p = r.Prog
 		switch p.As {
 		case i386.ALEAL:
-			if regtyp(&p.To) != 0 {
+			if regtyp(&p.To) {
 				if p.From.Sym != nil {
 					if p.From.Index == i386.REG_NONE {
 						conprop(r)
@@ -121,7 +121,7 @@
 			i386.AMOVL,
 			i386.AMOVSS,
 			i386.AMOVSD:
-			if regtyp(&p.To) != 0 {
+			if regtyp(&p.To) {
 				if p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_FCONST {
 					conprop(r)
 				}
@@ -141,12 +141,12 @@
 		case i386.AMOVL,
 			i386.AMOVSS,
 			i386.AMOVSD:
-			if regtyp(&p.To) != 0 {
-				if regtyp(&p.From) != 0 {
-					if copyprop(g, r) != 0 {
+			if regtyp(&p.To) {
+				if regtyp(&p.From) {
+					if copyprop(g, r) {
 						excise(r)
 						t++
-					} else if subprop(r) != 0 && copyprop(g, r) != 0 {
+					} else if subprop(r) && copyprop(g, r) {
 						excise(r)
 						t++
 					}
@@ -157,7 +157,7 @@
 			i386.AMOVWLZX,
 			i386.AMOVBLSX,
 			i386.AMOVWLSX:
-			if regtyp(&p.To) != 0 {
+			if regtyp(&p.To) {
 				r1 = rnops(gc.Uniqs(r))
 				if r1 != nil {
 					p1 = r1.Prog
@@ -170,7 +170,7 @@
 
 		case i386.AADDL,
 			i386.AADDW:
-			if p.From.Type != obj.TYPE_CONST || needc(p.Link) != 0 {
+			if p.From.Type != obj.TYPE_CONST || needc(p.Link) {
 				break
 			}
 			if p.From.Offset == -1 {
@@ -179,7 +179,7 @@
 				} else {
 					p.As = i386.ADECW
 				}
-				p.From = obj.Zprog.From
+				p.From = obj.Addr{}
 				break
 			}
 
@@ -189,13 +189,13 @@
 				} else {
 					p.As = i386.AINCW
 				}
-				p.From = obj.Zprog.From
+				p.From = obj.Addr{}
 				break
 			}
 
 		case i386.ASUBL,
 			i386.ASUBW:
-			if p.From.Type != obj.TYPE_CONST || needc(p.Link) != 0 {
+			if p.From.Type != obj.TYPE_CONST || needc(p.Link) {
 				break
 			}
 			if p.From.Offset == -1 {
@@ -204,7 +204,7 @@
 				} else {
 					p.As = i386.AINCW
 				}
-				p.From = obj.Zprog.From
+				p.From = obj.Addr{}
 				break
 			}
 
@@ -214,7 +214,7 @@
 				} else {
 					p.As = i386.ADECW
 				}
-				p.From = obj.Zprog.From
+				p.From = obj.Addr{}
 				break
 			}
 		}
@@ -232,8 +232,8 @@
 	for r = g.Start; r != nil; r = r.Link {
 		p = r.Prog
 		if p.As == i386.AMOVSD {
-			if regtyp(&p.From) != 0 {
-				if regtyp(&p.To) != 0 {
+			if regtyp(&p.From) {
+				if regtyp(&p.To) {
 					p.As = i386.AMOVAPD
 				}
 			}
@@ -256,8 +256,8 @@
 	gc.Ostats.Ndelmov++
 }
 
-func regtyp(a *obj.Addr) int {
-	return bool2int(a.Type == obj.TYPE_REG && (i386.REG_AX <= a.Reg && a.Reg <= i386.REG_DI || i386.REG_X0 <= a.Reg && a.Reg <= i386.REG_X7))
+func regtyp(a *obj.Addr) bool {
+	return a.Type == obj.TYPE_REG && (i386.REG_AX <= a.Reg && a.Reg <= i386.REG_DI || i386.REG_X0 <= a.Reg && a.Reg <= i386.REG_X7)
 }
 
 // movb elimination.
@@ -273,7 +273,7 @@
 
 	for r = g.Start; r != nil; r = r.Link {
 		p = r.Prog
-		if regtyp(&p.To) != 0 {
+		if regtyp(&p.To) {
 			switch p.As {
 			case i386.AINCB,
 				i386.AINCW:
@@ -292,7 +292,7 @@
 				p.As = i386.ANOTL
 			}
 
-			if regtyp(&p.From) != 0 || p.From.Type == obj.TYPE_CONST {
+			if regtyp(&p.From) || p.From.Type == obj.TYPE_CONST {
 				// move or artihmetic into partial register.
 				// from another register or constant can be movl.
 				// we don't switch to 32-bit arithmetic if it can
@@ -304,13 +304,13 @@
 
 				case i386.AADDB,
 					i386.AADDW:
-					if !(needc(p.Link) != 0) {
+					if !needc(p.Link) {
 						p.As = i386.AADDL
 					}
 
 				case i386.ASUBB,
 					i386.ASUBW:
-					if !(needc(p.Link) != 0) {
+					if !needc(p.Link) {
 						p.As = i386.ASUBL
 					}
 
@@ -366,7 +366,7 @@
  * hopefully, then the former or latter MOV
  * will be eliminated by copy propagation.
  */
-func subprop(r0 *gc.Flow) int {
+func subprop(r0 *gc.Flow) bool {
 	var p *obj.Prog
 	var v1 *obj.Addr
 	var v2 *obj.Addr
@@ -376,12 +376,12 @@
 
 	p = r0.Prog
 	v1 = &p.From
-	if !(regtyp(v1) != 0) {
-		return 0
+	if !regtyp(v1) {
+		return false
 	}
 	v2 = &p.To
-	if !(regtyp(v2) != 0) {
-		return 0
+	if !regtyp(v2) {
+		return false
 	}
 	for r = gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
 		if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
@@ -396,18 +396,18 @@
 		}
 		proginfo(&info, p)
 		if info.Flags&gc.Call != 0 {
-			return 0
+			return false
 		}
 
 		if info.Reguse|info.Regset != 0 {
-			return 0
+			return false
 		}
 
 		if (info.Flags&gc.Move != 0) && (info.Flags&(gc.SizeL|gc.SizeQ|gc.SizeF|gc.SizeD) != 0) && p.To.Type == v1.Type && p.To.Reg == v1.Reg {
 			goto gotit
 		}
 
-		if copyau(&p.From, v2) != 0 || copyau(&p.To, v2) != 0 {
+		if copyau(&p.From, v2) || copyau(&p.To, v2) {
 			break
 		}
 		if copysub(&p.From, v1, v2, 0) != 0 || copysub(&p.To, v1, v2, 0) != 0 {
@@ -415,7 +415,7 @@
 		}
 	}
 
-	return 0
+	return false
 
 gotit:
 	copysub(&p.To, v1, v2, 1)
@@ -442,7 +442,7 @@
 	if gc.Debug['P'] != 0 {
 		fmt.Printf("%v last\n", r.Prog)
 	}
-	return 1
+	return true
 }
 
 /*
@@ -457,7 +457,7 @@
  *	set v1	F=1
  *	set v2	return success
  */
-func copyprop(g *gc.Graph, r0 *gc.Flow) int {
+func copyprop(g *gc.Graph, r0 *gc.Flow) bool {
 	var p *obj.Prog
 	var v1 *obj.Addr
 	var v2 *obj.Addr
@@ -465,14 +465,14 @@
 	p = r0.Prog
 	v1 = &p.From
 	v2 = &p.To
-	if copyas(v1, v2) != 0 {
-		return 1
+	if copyas(v1, v2) {
+		return true
 	}
 	gactive++
 	return copy1(v1, v2, r0.S1, 0)
 }
 
-func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) int {
+func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) bool {
 	var t int
 	var p *obj.Prog
 
@@ -480,7 +480,7 @@
 		if gc.Debug['P'] != 0 {
 			fmt.Printf("act set; return 1\n")
 		}
-		return 1
+		return true
 	}
 
 	r.Active = int32(gactive)
@@ -492,7 +492,7 @@
 		if gc.Debug['P'] != 0 {
 			fmt.Printf("%v", p)
 		}
-		if !(f != 0) && gc.Uniqp(r) == nil {
+		if f == 0 && gc.Uniqp(r) == nil {
 			f = 1
 			if gc.Debug['P'] != 0 {
 				fmt.Printf("; merge; f=%d", f)
@@ -505,33 +505,33 @@
 			if gc.Debug['P'] != 0 {
 				fmt.Printf("; %v rar; return 0\n", gc.Ctxt.Dconv(v2))
 			}
-			return 0
+			return false
 
 		case 3: /* set */
 			if gc.Debug['P'] != 0 {
 				fmt.Printf("; %v set; return 1\n", gc.Ctxt.Dconv(v2))
 			}
-			return 1
+			return true
 
 		case 1, /* used, substitute */
 			4: /* use and set */
 			if f != 0 {
-				if !(gc.Debug['P'] != 0) {
-					return 0
+				if gc.Debug['P'] == 0 {
+					return false
 				}
 				if t == 4 {
 					fmt.Printf("; %v used+set and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
 				} else {
 					fmt.Printf("; %v used and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
 				}
-				return 0
+				return false
 			}
 
 			if copyu(p, v2, v1) != 0 {
 				if gc.Debug['P'] != 0 {
 					fmt.Printf("; sub fail; return 0\n")
 				}
-				return 0
+				return false
 			}
 
 			if gc.Debug['P'] != 0 {
@@ -541,13 +541,13 @@
 				if gc.Debug['P'] != 0 {
 					fmt.Printf("; %v used+set; return 1\n", gc.Ctxt.Dconv(v2))
 				}
-				return 1
+				return true
 			}
 		}
 
-		if !(f != 0) {
+		if f == 0 {
 			t = copyu(p, v1, nil)
-			if !(f != 0) && (t == 2 || t == 3 || t == 4) {
+			if f == 0 && (t == 2 || t == 3 || t == 4) {
 				f = 1
 				if gc.Debug['P'] != 0 {
 					fmt.Printf("; %v set and !f; f=%d", gc.Ctxt.Dconv(v1), f)
@@ -559,13 +559,13 @@
 			fmt.Printf("\n")
 		}
 		if r.S2 != nil {
-			if !(copy1(v1, v2, r.S2, f) != 0) {
-				return 0
+			if !copy1(v1, v2, r.S2, f) {
+				return false
 			}
 		}
 	}
 
-	return 1
+	return true
 }
 
 /*
@@ -588,7 +588,7 @@
 			return 0
 		}
 
-		if copyau(&p.To, v) != 0 {
+		if copyau(&p.To, v) {
 			return 1
 		}
 		return 0
@@ -617,7 +617,7 @@
 			return 0
 		}
 
-		if copyau(&p.To, v) != 0 {
+		if copyau(&p.To, v) {
 			return 4
 		}
 		return 3
@@ -639,23 +639,23 @@
 	}
 
 	if info.Flags&gc.LeftAddr != 0 {
-		if copyas(&p.From, v) != 0 {
+		if copyas(&p.From, v) {
 			return 2
 		}
 	}
 
 	if info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightRead|gc.RightWrite {
-		if copyas(&p.To, v) != 0 {
+		if copyas(&p.To, v) {
 			return 2
 		}
 	}
 
 	if info.Flags&gc.RightWrite != 0 {
-		if copyas(&p.To, v) != 0 {
+		if copyas(&p.To, v) {
 			if s != nil {
 				return copysub(&p.From, v, s, 1)
 			}
-			if copyau(&p.From, v) != 0 {
+			if copyau(&p.From, v) {
 				return 4
 			}
 			return 3
@@ -670,10 +670,10 @@
 			return copysub(&p.To, v, s, 1)
 		}
 
-		if copyau(&p.From, v) != 0 {
+		if copyau(&p.From, v) {
 			return 1
 		}
-		if copyau(&p.To, v) != 0 {
+		if copyau(&p.To, v) {
 			return 1
 		}
 	}
@@ -686,7 +686,7 @@
  * could be set/use depending on
  * semantics
  */
-func copyas(a *obj.Addr, v *obj.Addr) int {
+func copyas(a *obj.Addr, v *obj.Addr) bool {
 	if i386.REG_AL <= a.Reg && a.Reg <= i386.REG_BL {
 		gc.Fatal("use of byte register")
 	}
@@ -695,51 +695,51 @@
 	}
 
 	if a.Type != v.Type || a.Name != v.Name || a.Reg != v.Reg {
-		return 0
+		return false
 	}
-	if regtyp(v) != 0 {
-		return 1
+	if regtyp(v) {
+		return true
 	}
 	if v.Type == obj.TYPE_MEM && (v.Name == obj.NAME_AUTO || v.Name == obj.NAME_PARAM) {
 		if v.Offset == a.Offset {
-			return 1
+			return true
 		}
 	}
-	return 0
+	return false
 }
 
-func sameaddr(a *obj.Addr, v *obj.Addr) int {
+func sameaddr(a *obj.Addr, v *obj.Addr) bool {
 	if a.Type != v.Type || a.Name != v.Name || a.Reg != v.Reg {
-		return 0
+		return false
 	}
-	if regtyp(v) != 0 {
-		return 1
+	if regtyp(v) {
+		return true
 	}
 	if v.Type == obj.TYPE_MEM && (v.Name == obj.NAME_AUTO || v.Name == obj.NAME_PARAM) {
 		if v.Offset == a.Offset {
-			return 1
+			return true
 		}
 	}
-	return 0
+	return false
 }
 
 /*
  * either direct or indirect
  */
-func copyau(a *obj.Addr, v *obj.Addr) int {
-	if copyas(a, v) != 0 {
-		return 1
+func copyau(a *obj.Addr, v *obj.Addr) bool {
+	if copyas(a, v) {
+		return true
 	}
-	if regtyp(v) != 0 {
+	if regtyp(v) {
 		if a.Type == obj.TYPE_MEM && a.Reg == v.Reg {
-			return 1
+			return true
 		}
 		if a.Index == v.Reg {
-			return 1
+			return true
 		}
 	}
 
-	return 0
+	return false
 }
 
 /*
@@ -749,7 +749,7 @@
 func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
 	var reg int
 
-	if copyas(a, v) != 0 {
+	if copyas(a, v) {
 		reg = int(s.Reg)
 		if reg >= i386.REG_AX && reg <= i386.REG_DI || reg >= i386.REG_X0 && reg <= i386.REG_X7 {
 			if f != 0 {
@@ -760,7 +760,7 @@
 		return 0
 	}
 
-	if regtyp(v) != 0 {
+	if regtyp(v) {
 		reg = int(v.Reg)
 		if a.Type == obj.TYPE_MEM && int(a.Reg) == reg {
 			if (s.Reg == i386.REG_BP) && a.Index != obj.TYPE_NONE {
@@ -838,10 +838,10 @@
 	}
 }
 
-func smallindir(a *obj.Addr, reg *obj.Addr) int {
-	return bool2int(regtyp(reg) != 0 && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && a.Index == i386.REG_NONE && 0 <= a.Offset && a.Offset < 4096)
+func smallindir(a *obj.Addr, reg *obj.Addr) bool {
+	return regtyp(reg) && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && a.Index == i386.REG_NONE && 0 <= a.Offset && a.Offset < 4096
 }
 
-func stackaddr(a *obj.Addr) int {
-	return bool2int(a.Type == obj.TYPE_REG && a.Reg == i386.REG_SP)
+func stackaddr(a *obj.Addr) bool {
+	return a.Type == obj.TYPE_REG && a.Reg == i386.REG_SP
 }
diff --git a/src/cmd/new9g/cgen.go b/src/cmd/new9g/cgen.go
index 84ee97f..7a1e967 100644
--- a/src/cmd/new9g/cgen.go
+++ b/src/cmd/new9g/cgen.go
@@ -56,7 +56,7 @@
 		gc.OSLICESTR,
 		gc.OSLICE3,
 		gc.OSLICE3ARR:
-		if res.Op != gc.ONAME || !(res.Addable != 0) {
+		if res.Op != gc.ONAME || res.Addable == 0 {
 			gc.Tempname(&n1, n.Type)
 			gc.Cgen_slice(n, &n1)
 			cgen(&n1, res)
@@ -66,7 +66,7 @@
 		goto ret
 
 	case gc.OEFACE:
-		if res.Op != gc.ONAME || !(res.Addable != 0) {
+		if res.Op != gc.ONAME || res.Addable == 0 {
 			gc.Tempname(&n1, n.Type)
 			gc.Cgen_eface(n, &n1)
 			cgen(&n1, res)
@@ -88,7 +88,7 @@
 		}
 	}
 
-	if gc.Isfat(n.Type) != 0 {
+	if gc.Isfat(n.Type) {
 		if n.Type.Width < 0 {
 			gc.Fatal("forgot to compute width for %v", gc.Tconv(n.Type, 0))
 		}
@@ -96,7 +96,7 @@
 		goto ret
 	}
 
-	if !(res.Addable != 0) {
+	if res.Addable == 0 {
 		if n.Ullman > res.Ullman {
 			regalloc(&n1, n.Type, res)
 			cgen(n, &n1)
@@ -115,7 +115,7 @@
 			goto gen
 		}
 
-		if gc.Complexop(n, res) != 0 {
+		if gc.Complexop(n, res) {
 			gc.Complexgen(n, res)
 			goto ret
 		}
@@ -123,7 +123,7 @@
 		f = 1 // gen thru register
 		switch n.Op {
 		case gc.OLITERAL:
-			if gc.Smallintconst(n) != 0 {
+			if gc.Smallintconst(n) {
 				f = 0
 			}
 
@@ -131,9 +131,9 @@
 			f = 0
 		}
 
-		if !(gc.Iscomplex[n.Type.Etype] != 0) {
+		if gc.Iscomplex[n.Type.Etype] == 0 {
 			a = optoas(gc.OAS, res.Type)
-			if sudoaddable(a, res, &addr) != 0 {
+			if sudoaddable(a, res, &addr) {
 				if f != 0 {
 					regalloc(&n2, res.Type, nil)
 					cgen(n, &n2)
@@ -164,12 +164,12 @@
 	switch n.Op {
 	case gc.OSPTR,
 		gc.OLEN:
-		if gc.Isslice(n.Left.Type) != 0 || gc.Istype(n.Left.Type, gc.TSTRING) != 0 {
+		if gc.Isslice(n.Left.Type) || gc.Istype(n.Left.Type, gc.TSTRING) {
 			n.Addable = n.Left.Addable
 		}
 
 	case gc.OCAP:
-		if gc.Isslice(n.Left.Type) != 0 {
+		if gc.Isslice(n.Left.Type) {
 			n.Addable = n.Left.Addable
 		}
 
@@ -177,7 +177,7 @@
 		n.Addable = n.Left.Addable
 	}
 
-	if gc.Complexop(n, res) != 0 {
+	if gc.Complexop(n, res) {
 		gc.Complexgen(n, res)
 		goto ret
 	}
@@ -210,9 +210,9 @@
 		}
 	}
 
-	if !(gc.Iscomplex[n.Type.Etype] != 0) {
+	if gc.Iscomplex[n.Type.Etype] == 0 {
 		a = optoas(gc.OAS, n.Type)
-		if sudoaddable(a, n, &addr) != 0 {
+		if sudoaddable(a, n, &addr) {
 			if res.Op == gc.OREGISTER {
 				p1 = gins(a, nil, res)
 				p1.From = addr
@@ -251,11 +251,11 @@
 		p1 = gc.Gbranch(ppc64.ABR, nil, 0)
 
 		p2 = gc.Pc
-		gmove(gc.Nodbool(1), res)
+		gmove(gc.Nodbool(true), res)
 		p3 = gc.Gbranch(ppc64.ABR, nil, 0)
 		gc.Patch(p1, gc.Pc)
 		bgen(n, true, 0, p2)
-		gmove(gc.Nodbool(0), res)
+		gmove(gc.Nodbool(false), res)
 		gc.Patch(p3, gc.Pc)
 		goto ret
 
@@ -358,7 +358,7 @@
 
 		// pointer is the first word of string or slice.
 	case gc.OSPTR:
-		if gc.Isconst(nl, gc.CTSTR) != 0 {
+		if gc.Isconst(nl, gc.CTSTR) {
 			regalloc(&n1, gc.Types[gc.Tptr], res)
 			p1 = gins(ppc64.AMOVD, nil, &n1)
 			gc.Datastring(nl.Val.U.Sval.S, &p1.From)
@@ -373,7 +373,7 @@
 		regfree(&n1)
 
 	case gc.OLEN:
-		if gc.Istype(nl.Type, gc.TMAP) != 0 || gc.Istype(nl.Type, gc.TCHAN) != 0 {
+		if gc.Istype(nl.Type, gc.TMAP) || gc.Istype(nl.Type, gc.TCHAN) {
 			// map and chan have len in the first int-sized word.
 			// a zero pointer means zero length
 			regalloc(&n1, gc.Types[gc.Tptr], res)
@@ -396,7 +396,7 @@
 			break
 		}
 
-		if gc.Istype(nl.Type, gc.TSTRING) != 0 || gc.Isslice(nl.Type) != 0 {
+		if gc.Istype(nl.Type, gc.TSTRING) || gc.Isslice(nl.Type) {
 			// both slice and string have len one pointer into the struct.
 			// a zero pointer means zero length
 			igen(nl, &n1, res)
@@ -411,7 +411,7 @@
 		gc.Fatal("cgen: OLEN: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
 
 	case gc.OCAP:
-		if gc.Istype(nl.Type, gc.TCHAN) != 0 {
+		if gc.Istype(nl.Type, gc.TCHAN) {
 			// chan has cap in the second int-sized word.
 			// a zero pointer means zero length
 			regalloc(&n1, gc.Types[gc.Tptr], res)
@@ -435,7 +435,7 @@
 			break
 		}
 
-		if gc.Isslice(nl.Type) != 0 {
+		if gc.Isslice(nl.Type) {
 			igen(nl, &n1, res)
 			n1.Type = gc.Types[gc.Simtype[gc.TUINT]]
 			n1.Xoffset += int64(gc.Array_cap)
@@ -447,11 +447,11 @@
 		gc.Fatal("cgen: OCAP: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
 
 	case gc.OADDR:
-		if n.Bounded != 0 { // let race detector avoid nil checks
+		if n.Bounded { // let race detector avoid nil checks
 			gc.Disable_checknil++
 		}
 		agen(nl, res)
-		if n.Bounded != 0 {
+		if n.Bounded {
 			gc.Disable_checknil--
 		}
 
@@ -480,7 +480,7 @@
 			cgen_div(int(n.Op), &n1, nr, res)
 			regfree(&n1)
 		} else {
-			if !(gc.Smallintconst(nr) != 0) {
+			if !gc.Smallintconst(nr) {
 				regalloc(&n2, nr.Type, res)
 				cgen(nr, &n2)
 			} else {
@@ -496,7 +496,7 @@
 	case gc.OLSH,
 		gc.ORSH,
 		gc.OLROT:
-		cgen_shift(int(n.Op), int(n.Bounded), nl, nr, res)
+		cgen_shift(int(n.Op), n.Bounded, nl, nr, res)
 	}
 
 	goto ret
@@ -518,7 +518,7 @@
 	 * register for the computation.
 	 */
 sbop: // symmetric binary
-	if nl.Ullman < nr.Ullman || (nl.Ullman == nr.Ullman && (gc.Smallintconst(nl) != 0 || (nr.Op == gc.OLITERAL && !(gc.Smallintconst(nr) != 0)))) {
+	if nl.Ullman < nr.Ullman || (nl.Ullman == nr.Ullman && (gc.Smallintconst(nl) || (nr.Op == gc.OLITERAL && !gc.Smallintconst(nr)))) {
 		r = nl
 		nl = nr
 		nr = r
@@ -612,7 +612,7 @@
 		gc.Dump("cgenr-n", n)
 	}
 
-	if gc.Isfat(n.Type) != 0 {
+	if gc.Isfat(n.Type) {
 		gc.Fatal("cgenr on fat node")
 	}
 
@@ -688,33 +688,33 @@
 
 		//bounded = debug['B'] || n->bounded;
 		if nr.Addable != 0 {
-			if !(gc.Isconst(nr, gc.CTINT) != 0) {
+			if !gc.Isconst(nr, gc.CTINT) {
 				gc.Tempname(&tmp, gc.Types[gc.TINT64])
 			}
-			if !(gc.Isconst(nl, gc.CTSTR) != 0) {
+			if !gc.Isconst(nl, gc.CTSTR) {
 				agenr(nl, &n3, res)
 			}
-			if !(gc.Isconst(nr, gc.CTINT) != 0) {
+			if !gc.Isconst(nr, gc.CTINT) {
 				cgen(nr, &tmp)
 				regalloc(&n1, tmp.Type, nil)
 				gmove(&tmp, &n1)
 			}
 		} else if nl.Addable != 0 {
-			if !(gc.Isconst(nr, gc.CTINT) != 0) {
+			if !gc.Isconst(nr, gc.CTINT) {
 				gc.Tempname(&tmp, gc.Types[gc.TINT64])
 				cgen(nr, &tmp)
 				regalloc(&n1, tmp.Type, nil)
 				gmove(&tmp, &n1)
 			}
 
-			if !(gc.Isconst(nl, gc.CTSTR) != 0) {
+			if !gc.Isconst(nl, gc.CTSTR) {
 				agenr(nl, &n3, res)
 			}
 		} else {
 			gc.Tempname(&tmp, gc.Types[gc.TINT64])
 			cgen(nr, &tmp)
 			nr = &tmp
-			if !(gc.Isconst(nl, gc.CTSTR) != 0) {
+			if !gc.Isconst(nl, gc.CTSTR) {
 				agenr(nl, &n3, res)
 			}
 			regalloc(&n1, tmp.Type, nil)
@@ -726,13 +726,13 @@
 		// w is width
 
 		// constant index
-		if gc.Isconst(nr, gc.CTINT) != 0 {
-			if gc.Isconst(nl, gc.CTSTR) != 0 {
+		if gc.Isconst(nr, gc.CTINT) {
+			if gc.Isconst(nl, gc.CTSTR) {
 				gc.Fatal("constant string constant index")
 			}
 			v = uint64(gc.Mpgetfix(nr.Val.U.Xval))
-			if gc.Isslice(nl.Type) != 0 || nl.Type.Etype == gc.TSTRING {
-				if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
+			if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
+				if gc.Debug['B'] == 0 && !n.Bounded {
 					n1 = n3
 					n1.Op = gc.OINDREG
 					n1.Type = gc.Types[gc.Tptr]
@@ -765,11 +765,11 @@
 		gmove(&n1, &n2)
 		regfree(&n1)
 
-		if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
+		if gc.Debug['B'] == 0 && !n.Bounded {
 			// check bounds
-			if gc.Isconst(nl, gc.CTSTR) != 0 {
+			if gc.Isconst(nl, gc.CTSTR) {
 				gc.Nodconst(&n4, gc.Types[gc.TUINT64], int64(len(nl.Val.U.Sval.S)))
-			} else if gc.Isslice(nl.Type) != 0 || nl.Type.Etype == gc.TSTRING {
+			} else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
 				n1 = n3
 				n1.Op = gc.OINDREG
 				n1.Type = gc.Types[gc.Tptr]
@@ -799,12 +799,12 @@
 			gc.Patch(p1, gc.Pc)
 		}
 
-		if gc.Isconst(nl, gc.CTSTR) != 0 {
+		if gc.Isconst(nl, gc.CTSTR) {
 			regalloc(&n3, gc.Types[gc.Tptr], res)
 			p1 = gins(ppc64.AMOVD, nil, &n3)
 			gc.Datastring(nl.Val.U.Sval.S, &p1.From)
 			p1.From.Type = obj.TYPE_ADDR
-		} else if gc.Isslice(nl.Type) != 0 || nl.Type.Etype == gc.TSTRING {
+		} else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
 			n1 = n3
 			n1.Op = gc.OINDREG
 			n1.Type = gc.Types[gc.Tptr]
@@ -872,7 +872,7 @@
 		n = n.Left
 	}
 
-	if gc.Isconst(n, gc.CTNIL) != 0 && n.Type.Width > int64(gc.Widthptr) {
+	if gc.Isconst(n, gc.CTNIL) && n.Type.Width > int64(gc.Widthptr) {
 		// Use of a nil interface or nil slice.
 		// Create a temporary we can take the address of and read.
 		// The generated code is just going to panic, so it need not
@@ -950,7 +950,7 @@
 		}
 
 		// should only get here for heap vars or paramref
-		if !(n.Class&gc.PHEAP != 0) && n.Class != gc.PPARAMREF {
+		if n.Class&gc.PHEAP == 0 && n.Class != gc.PPARAMREF {
 			gc.Dump("bad agen", n)
 			gc.Fatal("agen: bad ONAME class %#x", n.Class)
 		}
@@ -1060,10 +1060,10 @@
 	// Could do the same for slice except that we need
 	// to use the real index for the bounds checking.
 	case gc.OINDEX:
-		if gc.Isfixedarray(n.Left.Type) != 0 || (gc.Isptr[n.Left.Type.Etype] != 0 && gc.Isfixedarray(n.Left.Left.Type) != 0) {
-			if gc.Isconst(n.Right, gc.CTINT) != 0 {
+		if gc.Isfixedarray(n.Left.Type) || (gc.Isptr[n.Left.Type.Etype] != 0 && gc.Isfixedarray(n.Left.Left.Type)) {
+			if gc.Isconst(n.Right, gc.CTINT) {
 				// Compute &a.
-				if !(gc.Isptr[n.Left.Type.Etype] != 0) {
+				if gc.Isptr[n.Left.Type.Etype] == 0 {
 					igen(n.Left, a, res)
 				} else {
 					igen(n.Left, &n1, res)
@@ -1112,7 +1112,7 @@
 	}
 
 	if n == nil {
-		n = gc.Nodbool(1)
+		n = gc.Nodbool(true)
 	}
 
 	if n.Ninit != nil {
@@ -1158,7 +1158,7 @@
 
 		// need to ask if it is bool?
 	case gc.OLITERAL:
-		if !true_ == !(n.Val.U.Bval != 0) {
+		if !true_ == (n.Val.U.Bval == 0) {
 			gc.Patch(gc.Gbranch(ppc64.ABR, nil, likely), to)
 		}
 		goto ret
@@ -1241,7 +1241,7 @@
 			nr = r
 		}
 
-		if gc.Isslice(nl.Type) != 0 {
+		if gc.Isslice(nl.Type) {
 			// front end should only leave cmp to literal nil
 			if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
 				gc.Yyerror("illegal slice comparison")
@@ -1262,7 +1262,7 @@
 			break
 		}
 
-		if gc.Isinter(nl.Type) != 0 {
+		if gc.Isinter(nl.Type) {
 			// front end should only leave cmp to literal nil
 			if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
 				gc.Yyerror("illegal interface comparison")
@@ -1376,14 +1376,14 @@
 
 	case gc.OINDEX:
 		t = n.Left.Type
-		if !(gc.Isfixedarray(t) != 0) {
+		if !gc.Isfixedarray(t) {
 			break
 		}
 		off = stkof(n.Left)
 		if off == -1000 || off == 1000 {
 			return off
 		}
-		if gc.Isconst(n.Right, gc.CTINT) != 0 {
+		if gc.Isconst(n.Right, gc.CTINT) {
 			return off + t.Type.Width*gc.Mpgetfix(n.Right.Val.U.Xval)
 		}
 		return 1000
@@ -1473,7 +1473,6 @@
 	switch align {
 	default:
 		gc.Fatal("sgen: invalid alignment %d for %v", align, gc.Tconv(n.Type, 0))
-		fallthrough
 
 	case 1:
 		op = ppc64.AMOVBU
@@ -1598,7 +1597,7 @@
 		for {
 			tmp14 := c
 			c--
-			if !(tmp14 > 0) {
+			if tmp14 <= 0 {
 				break
 			}
 
@@ -1617,19 +1616,19 @@
 	regfree(&tmp)
 }
 
-func cadable(n *gc.Node) int {
-	if !(n.Addable != 0) {
+func cadable(n *gc.Node) bool {
+	if n.Addable == 0 {
 		// dont know how it happens,
 		// but it does
-		return 0
+		return false
 	}
 
 	switch n.Op {
 	case gc.ONAME:
-		return 1
+		return true
 	}
 
-	return 0
+	return false
 }
 
 /*
@@ -1640,7 +1639,7 @@
  * nr is N when assigning a zero value.
  * return 1 if can do, 0 if can't.
  */
-func componentgen(nr *gc.Node, nl *gc.Node) int {
+func componentgen(nr *gc.Node, nl *gc.Node) bool {
 	var nodl gc.Node
 	var nodr gc.Node
 	var tmp gc.Node
@@ -1662,12 +1661,12 @@
 		t = nl.Type
 
 		// Slices are ok.
-		if gc.Isslice(t) != 0 {
+		if gc.Isslice(t) {
 			break
 		}
 
 		// Small arrays are ok.
-		if t.Bound > 0 && t.Bound <= 3 && !(gc.Isfat(t.Type) != 0) {
+		if t.Bound > 0 && t.Bound <= 3 && !gc.Isfat(t.Type) {
 			break
 		}
 
@@ -1679,7 +1678,7 @@
 		fldcount = 0
 
 		for t = nl.Type.Type; t != nil; t = t.Down {
-			if gc.Isfat(t.Type) != 0 {
+			if gc.Isfat(t.Type) {
 				goto no
 			}
 			if t.Etype != gc.TFIELD {
@@ -1698,8 +1697,8 @@
 	}
 
 	nodl = *nl
-	if !(cadable(nl) != 0) {
-		if nr != nil && !(cadable(nr) != 0) {
+	if !cadable(nl) {
+		if nr != nil && !cadable(nr) {
 			goto no
 		}
 		igen(nl, &nodl, nil)
@@ -1708,7 +1707,7 @@
 
 	if nr != nil {
 		nodr = *nr
-		if !(cadable(nr) != 0) {
+		if !cadable(nr) {
 			igen(nr, &nodr, nil)
 			freer = 1
 		}
@@ -1736,7 +1735,7 @@
 			gc.Gvardef(nl)
 		}
 		t = nl.Type
-		if !(gc.Isslice(t) != 0) {
+		if !gc.Isslice(t) {
 			nodl.Type = t.Type
 			nodr.Type = nodl.Type
 			for fldcount = 0; fldcount < t.Bound; fldcount++ {
@@ -1876,7 +1875,7 @@
 	if freel != 0 {
 		regfree(&nodl)
 	}
-	return 0
+	return false
 
 yes:
 	if freer != 0 {
@@ -1885,5 +1884,5 @@
 	if freel != 0 {
 		regfree(&nodl)
 	}
-	return 1
+	return true
 }
diff --git a/src/cmd/new9g/ggen.go b/src/cmd/new9g/ggen.go
index 1b335ab..54bebdd 100644
--- a/src/cmd/new9g/ggen.go
+++ b/src/cmd/new9g/ggen.go
@@ -37,7 +37,7 @@
 	// iterate through declarations - they are sorted in decreasing xoffset order.
 	for l = gc.Curfn.Dcl; l != nil; l = l.Next {
 		n = l.N
-		if !(n.Needzero != 0) {
+		if n.Needzero == 0 {
 			continue
 		}
 		if n.Class != gc.PAUTO {
@@ -187,7 +187,7 @@
 
 			p = gins(ppc64.ABL, nil, f)
 			gc.Afunclit(&p.To, f)
-			if proc == -1 || gc.Noreturn(p) != 0 {
+			if proc == -1 || gc.Noreturn(p) {
 				gins(obj.AUNDEF, nil, nil)
 			}
 			break
@@ -226,7 +226,7 @@
 		if proc == 1 {
 			ginscall(gc.Newproc, 0)
 		} else {
-			if !(gc.Hasdefer != 0) {
+			if gc.Hasdefer == 0 {
 				gc.Fatal("hasdefer=0 but has defer")
 			}
 			ginscall(gc.Deferproc, 0)
@@ -270,7 +270,7 @@
 
 	i = i.Left // interface
 
-	if !(i.Addable != 0) {
+	if i.Addable == 0 {
 		gc.Tempname(&tmpi, i.Type)
 		cgen(i, &tmpi)
 		i = &tmpi
@@ -503,9 +503,9 @@
 	check = 0
 	if gc.Issigned[t.Etype] != 0 {
 		check = 1
-		if gc.Isconst(nl, gc.CTINT) != 0 && gc.Mpgetfix(nl.Val.U.Xval) != -(1<<uint64(t.Width*8-1)) {
+		if gc.Isconst(nl, gc.CTINT) && gc.Mpgetfix(nl.Val.U.Xval) != -(1<<uint64(t.Width*8-1)) {
 			check = 0
-		} else if gc.Isconst(nr, gc.CTINT) != 0 && gc.Mpgetfix(nr.Val.U.Xval) != -1 {
+		} else if gc.Isconst(nr, gc.CTINT) && gc.Mpgetfix(nr.Val.U.Xval) != -1 {
 			check = 0
 		}
 	}
@@ -723,7 +723,7 @@
 	// use 2-operand 16-bit multiply
 	// because there is no 2-operand 8-bit multiply
 	//a = AIMULW;
-	if !(gc.Smallintconst(nr) != 0) {
+	if !gc.Smallintconst(nr) {
 		regalloc(&n3, nl.Type, nil)
 		cgen(nr, &n3)
 		gins(a, &n3, &n2)
@@ -799,7 +799,7 @@
  *	res = nl << nr
  *	res = nl >> nr
  */
-func cgen_shift(op int, bounded int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
 	var n1 gc.Node
 	var n2 gc.Node
 	var n3 gc.Node
@@ -869,7 +869,7 @@
 	regfree(&n3)
 
 	// test and fix up large shifts
-	if !(bounded != 0) {
+	if !bounded {
 		gc.Nodconst(&n3, tcount, nl.Type.Width*8)
 		gins(optoas(gc.OCMP, tcount), &n1, &n3)
 		p1 = gc.Gbranch(optoas(gc.OLT, tcount), nil, +1)
diff --git a/src/cmd/new9g/gsubr.go b/src/cmd/new9g/gsubr.go
index 932ae0f..91e87ff 100644
--- a/src/cmd/new9g/gsubr.go
+++ b/src/cmd/new9g/gsubr.go
@@ -93,7 +93,7 @@
 	}
 }
 
-func anyregalloc() int {
+func anyregalloc() bool {
 	var i int
 	var j int
 
@@ -106,11 +106,11 @@
 				goto ok
 			}
 		}
-		return 1
+		return true
 	ok:
 	}
 
-	return 0
+	return false
 }
 
 /*
@@ -176,7 +176,6 @@
 			fmt.Printf("R%d %p\n", i, regpc[i-ppc64.REG_R0])
 		}
 		gc.Fatal("out of fixed registers")
-		fallthrough
 
 	case gc.TFLOAT32,
 		gc.TFLOAT64:
@@ -199,7 +198,6 @@
 			fmt.Printf("F%d %p\n", i, regpc[i-ppc64.REG_R0])
 		}
 		gc.Fatal("out of floating registers")
-		fallthrough
 
 	case gc.TCOMPLEX64,
 		gc.TCOMPLEX128:
@@ -277,7 +275,6 @@
 	switch as {
 	default:
 		gc.Fatal("ginscon2")
-		fallthrough
 
 	case ppc64.ACMP:
 		if -ppc64.BIG <= c && c <= ppc64.BIG {
@@ -356,7 +353,7 @@
 	}
 
 	// cannot have two memory operands
-	if gc.Ismem(f) != 0 && gc.Ismem(t) != 0 {
+	if gc.Ismem(f) && gc.Ismem(t) {
 		goto hard
 	}
 
@@ -391,7 +388,7 @@
 		ft = tt // so big switch will choose a simple mov
 
 		// constants can't move directly to memory.
-		if gc.Ismem(t) != 0 {
+		if gc.Ismem(t) {
 			goto hard
 		}
 	}
@@ -422,7 +419,6 @@
 	switch uint32(ft)<<16 | uint32(tt) {
 	default:
 		gc.Fatal("gmove %v -> %v", gc.Tconv(f.Type, obj.FmtLong), gc.Tconv(t.Type, obj.FmtLong))
-		fallthrough
 
 		/*
 		 * integer copy and truncate
@@ -1140,10 +1136,10 @@
 	OAddable = 1 << 1
 )
 
-func xgen(n *gc.Node, a *gc.Node, o int) int {
+func xgen(n *gc.Node, a *gc.Node, o int) bool {
 	// TODO(minux)
 
-	return -1
+	return -1 != 0 /*TypeKind(100016)*/
 }
 
 func sudoclean() {
@@ -1161,9 +1157,9 @@
  * after successful sudoaddable,
  * to release the register used for a.
  */
-func sudoaddable(as int, n *gc.Node, a *obj.Addr) int {
+func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
 	// TODO(minux)
 
 	*a = obj.Addr{}
-	return 0
+	return false
 }
diff --git a/src/cmd/new9g/peep.go b/src/cmd/new9g/peep.go
index de3d7c3..486b316 100644
--- a/src/cmd/new9g/peep.go
+++ b/src/cmd/new9g/peep.go
@@ -69,14 +69,14 @@
 		// breaking moves that do care.  This might let us
 		// simplify or remove the next peep loop, too.
 		if p.As == ppc64.AMOVD || p.As == ppc64.AFMOVD {
-			if regtyp(&p.To) != 0 {
+			if regtyp(&p.To) {
 				// Try to eliminate reg->reg moves
-				if regtyp(&p.From) != 0 {
+				if regtyp(&p.From) {
 					if p.From.Type == p.To.Type {
-						if copyprop(r) != 0 {
+						if copyprop(r) {
 							excise(r)
 							t++
-						} else if subprop(r) != 0 && copyprop(r) != 0 {
+						} else if subprop(r) && copyprop(r) {
 							excise(r)
 							t++
 						}
@@ -89,10 +89,10 @@
 					if p.To.Type == obj.TYPE_REG {
 						p.From.Type = obj.TYPE_REG
 						p.From.Reg = ppc64.REGZERO
-						if copyprop(r) != 0 {
+						if copyprop(r) {
 							excise(r)
 							t++
-						} else if subprop(r) != 0 && copyprop(r) != 0 {
+						} else if subprop(r) && copyprop(r) {
 							excise(r)
 							t++
 						}
@@ -156,7 +156,7 @@
 		switch p.As {
 		case ppc64.ACMP,
 			ppc64.ACMPW: /* always safe? */
-			if !(regzer(&p.To) != 0) {
+			if regzer(&p.To) == 0 {
 				continue
 			}
 			r1 = r.S1
@@ -186,7 +186,7 @@
 			r1 = r
 			for {
 				r1 = gc.Uniqp(r1)
-				if !(r1 != nil && r1.Prog.As == obj.ANOP) {
+				if r1 == nil || r1.Prog.As != obj.ANOP {
 					break
 				}
 			}
@@ -379,9 +379,9 @@
 	return 0
 }
 
-func regtyp(a *obj.Addr) int {
+func regtyp(a *obj.Addr) bool {
 	// TODO(rsc): Floating point register exclusions?
-	return bool2int(a.Type == obj.TYPE_REG && ppc64.REG_R0 <= a.Reg && a.Reg <= ppc64.REG_F31 && a.Reg != ppc64.REGZERO)
+	return a.Type == obj.TYPE_REG && ppc64.REG_R0 <= a.Reg && a.Reg <= ppc64.REG_F31 && a.Reg != ppc64.REGZERO
 }
 
 /*
@@ -401,7 +401,7 @@
  * r0 (the argument, not the register) is the MOV at the end of the
  * above sequences.  This returns 1 if it modified any instructions.
  */
-func subprop(r0 *gc.Flow) int {
+func subprop(r0 *gc.Flow) bool {
 	var p *obj.Prog
 	var v1 *obj.Addr
 	var v2 *obj.Addr
@@ -411,12 +411,12 @@
 
 	p = r0.Prog
 	v1 = &p.From
-	if !(regtyp(v1) != 0) {
-		return 0
+	if !regtyp(v1) {
+		return false
 	}
 	v2 = &p.To
-	if !(regtyp(v2) != 0) {
-		return 0
+	if !regtyp(v2) {
+		return false
 	}
 	for r = gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
 		if gc.Uniqs(r) == nil {
@@ -428,7 +428,7 @@
 		}
 		proginfo(&info, p)
 		if info.Flags&gc.Call != 0 {
-			return 0
+			return false
 		}
 
 		if info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightWrite {
@@ -439,7 +439,7 @@
 			}
 		}
 
-		if copyau(&p.From, v2) != 0 || copyau1(p, v2) != 0 || copyau(&p.To, v2) != 0 {
+		if copyau(&p.From, v2) || copyau1(p, v2) || copyau(&p.To, v2) {
 			break
 		}
 		if copysub(&p.From, v1, v2, 0) != 0 || copysub1(p, v1, v2, 0) != 0 || copysub(&p.To, v1, v2, 0) != 0 {
@@ -447,7 +447,7 @@
 		}
 	}
 
-	return 0
+	return false
 
 gotit:
 	copysub(&p.To, v1, v2, 1)
@@ -475,7 +475,7 @@
 	if gc.Debug['P'] != 0 {
 		fmt.Printf("%v last\n", r.Prog)
 	}
-	return 1
+	return true
 }
 
 /*
@@ -490,7 +490,7 @@
  *	set v1	F=1
  *	set v2	return success (caller can remove v1->v2 move)
  */
-func copyprop(r0 *gc.Flow) int {
+func copyprop(r0 *gc.Flow) bool {
 	var p *obj.Prog
 	var v1 *obj.Addr
 	var v2 *obj.Addr
@@ -498,11 +498,11 @@
 	p = r0.Prog
 	v1 = &p.From
 	v2 = &p.To
-	if copyas(v1, v2) != 0 {
+	if copyas(v1, v2) {
 		if gc.Debug['P'] != 0 {
 			fmt.Printf("eliminating self-move\n", r0.Prog)
 		}
-		return 1
+		return true
 	}
 
 	gactive++
@@ -514,7 +514,7 @@
 
 // copy1 replaces uses of v2 with v1 starting at r and returns 1 if
 // all uses were rewritten.
-func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) int {
+func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) bool {
 	var t int
 	var p *obj.Prog
 
@@ -522,7 +522,7 @@
 		if gc.Debug['P'] != 0 {
 			fmt.Printf("act set; return 1\n")
 		}
-		return 1
+		return true
 	}
 
 	r.Active = int32(gactive)
@@ -534,7 +534,7 @@
 		if gc.Debug['P'] != 0 {
 			fmt.Printf("%v", p)
 		}
-		if !(f != 0) && gc.Uniqp(r) == nil {
+		if f == 0 && gc.Uniqp(r) == nil {
 			// Multiple predecessors; conservatively
 			// assume v1 was set on other path
 			f = 1
@@ -550,33 +550,33 @@
 			if gc.Debug['P'] != 0 {
 				fmt.Printf("; %v rar; return 0\n", gc.Ctxt.Dconv(v2))
 			}
-			return 0
+			return false
 
 		case 3: /* set */
 			if gc.Debug['P'] != 0 {
 				fmt.Printf("; %v set; return 1\n", gc.Ctxt.Dconv(v2))
 			}
-			return 1
+			return true
 
 		case 1, /* used, substitute */
 			4: /* use and set */
 			if f != 0 {
-				if !(gc.Debug['P'] != 0) {
-					return 0
+				if gc.Debug['P'] == 0 {
+					return false
 				}
 				if t == 4 {
 					fmt.Printf("; %v used+set and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
 				} else {
 					fmt.Printf("; %v used and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
 				}
-				return 0
+				return false
 			}
 
 			if copyu(p, v2, v1) != 0 {
 				if gc.Debug['P'] != 0 {
 					fmt.Printf("; sub fail; return 0\n")
 				}
-				return 0
+				return false
 			}
 
 			if gc.Debug['P'] != 0 {
@@ -586,13 +586,13 @@
 				if gc.Debug['P'] != 0 {
 					fmt.Printf("; %v used+set; return 1\n", gc.Ctxt.Dconv(v2))
 				}
-				return 1
+				return true
 			}
 		}
 
-		if !(f != 0) {
+		if f == 0 {
 			t = copyu(p, v1, nil)
-			if !(f != 0) && (t == 2 || t == 3 || t == 4) {
+			if f == 0 && (t == 2 || t == 3 || t == 4) {
 				f = 1
 				if gc.Debug['P'] != 0 {
 					fmt.Printf("; %v set and !f; f=%d", gc.Ctxt.Dconv(v1), f)
@@ -604,13 +604,13 @@
 			fmt.Printf("\n")
 		}
 		if r.S2 != nil {
-			if !(copy1(v1, v2, r.S2, f) != 0) {
-				return 0
+			if !copy1(v1, v2, r.S2, f) {
+				return false
 			}
 		}
 	}
 
-	return 1
+	return true
 }
 
 // If s==nil, copyu returns the set/use of v in p; otherwise, it
@@ -671,7 +671,7 @@
 			}
 
 			// Update only indirect uses of v in p->to
-			if !(copyas(&p.To, v) != 0) {
+			if !copyas(&p.To, v) {
 				if copysub(&p.To, v, s, 1) != 0 {
 					return 1
 				}
@@ -679,21 +679,21 @@
 			return 0
 		}
 
-		if copyas(&p.To, v) != 0 {
+		if copyas(&p.To, v) {
 			// Fix up implicit from
 			if p.From.Type == obj.TYPE_NONE {
 				p.From = p.To
 			}
-			if copyau(&p.From, v) != 0 {
+			if copyau(&p.From, v) {
 				return 4
 			}
 			return 3
 		}
 
-		if copyau(&p.From, v) != 0 {
+		if copyau(&p.From, v) {
 			return 1
 		}
-		if copyau(&p.To, v) != 0 {
+		if copyau(&p.To, v) {
 			// p->to only indirectly uses v
 			return 1
 		}
@@ -707,7 +707,7 @@
 		ppc64.AMOVWZU,
 		ppc64.AMOVDU:
 		if p.From.Type == obj.TYPE_MEM {
-			if copyas(&p.From, v) != 0 {
+			if copyas(&p.From, v) {
 				// No s!=nil check; need to fail
 				// anyway in that case
 				return 2
@@ -720,11 +720,11 @@
 				return 0
 			}
 
-			if copyas(&p.To, v) != 0 {
+			if copyas(&p.To, v) {
 				return 3
 			}
 		} else if p.To.Type == obj.TYPE_MEM {
-			if copyas(&p.To, v) != 0 {
+			if copyas(&p.To, v) {
 				return 2
 			}
 			if s != nil {
@@ -734,7 +734,7 @@
 				return 0
 			}
 
-			if copyau(&p.From, v) != 0 {
+			if copyau(&p.From, v) {
 				return 1
 			}
 		} else {
@@ -745,7 +745,7 @@
 
 	case ppc64.ARLWMI, /* read p->from, read p->reg, rar p->to */
 		ppc64.ARLWMICC:
-		if copyas(&p.To, v) != 0 {
+		if copyas(&p.To, v) {
 			return 2
 		}
 		fallthrough
@@ -806,7 +806,7 @@
 			}
 
 			// Update only indirect uses of v in p->to
-			if !(copyas(&p.To, v) != 0) {
+			if !copyas(&p.To, v) {
 				if copysub(&p.To, v, s, 1) != 0 {
 					return 1
 				}
@@ -814,7 +814,7 @@
 			return 0
 		}
 
-		if copyas(&p.To, v) != 0 {
+		if copyas(&p.To, v) {
 			if p.Reg == 0 {
 				// Fix up implicit reg (e.g., ADD
 				// R3,R4 -> ADD R3,R4,R4) so we can
@@ -822,22 +822,22 @@
 				p.Reg = p.To.Reg
 			}
 
-			if copyau(&p.From, v) != 0 {
+			if copyau(&p.From, v) {
 				return 4
 			}
-			if copyau1(p, v) != 0 {
+			if copyau1(p, v) {
 				return 4
 			}
 			return 3
 		}
 
-		if copyau(&p.From, v) != 0 {
+		if copyau(&p.From, v) {
 			return 1
 		}
-		if copyau1(p, v) != 0 {
+		if copyau1(p, v) {
 			return 1
 		}
-		if copyau(&p.To, v) != 0 {
+		if copyau(&p.To, v) {
 			return 1
 		}
 		return 0
@@ -866,10 +866,10 @@
 			return copysub(&p.To, v, s, 1)
 		}
 
-		if copyau(&p.From, v) != 0 {
+		if copyau(&p.From, v) {
 			return 1
 		}
-		if copyau(&p.To, v) != 0 {
+		if copyau(&p.To, v) {
 			return 1
 		}
 		return 0
@@ -885,7 +885,7 @@
 			return 0
 		}
 
-		if copyau(&p.To, v) != 0 {
+		if copyau(&p.To, v) {
 			return 1
 		}
 		return 0
@@ -928,7 +928,7 @@
 			return 0
 		}
 
-		if copyau(&p.To, v) != 0 {
+		if copyau(&p.To, v) {
 			return 4
 		}
 		return 3
@@ -982,15 +982,15 @@
 // If a is the from operand, this means this operation reads the
 // register in v.  If a is the to operand, this means this operation
 // writes the register in v.
-func copyas(a *obj.Addr, v *obj.Addr) int {
-	if regtyp(v) != 0 {
+func copyas(a *obj.Addr, v *obj.Addr) bool {
+	if regtyp(v) {
 		if a.Type == v.Type {
 			if a.Reg == v.Reg {
-				return 1
+				return true
 			}
 		}
 	}
-	return 0
+	return false
 }
 
 // copyau returns 1 if a either directly or indirectly addresses the
@@ -1000,36 +1000,36 @@
 // register in v.  If a is the to operand, this means the operation
 // either reads or writes the register in v (if !copyas(a, v), then
 // the operation reads the register in v).
-func copyau(a *obj.Addr, v *obj.Addr) int {
-	if copyas(a, v) != 0 {
-		return 1
+func copyau(a *obj.Addr, v *obj.Addr) bool {
+	if copyas(a, v) {
+		return true
 	}
 	if v.Type == obj.TYPE_REG {
 		if a.Type == obj.TYPE_MEM || (a.Type == obj.TYPE_ADDR && a.Reg != 0) {
 			if v.Reg == a.Reg {
-				return 1
+				return true
 			}
 		}
 	}
-	return 0
+	return false
 }
 
 // copyau1 returns 1 if p->reg references the same register as v and v
 // is a direct reference.
-func copyau1(p *obj.Prog, v *obj.Addr) int {
-	if regtyp(v) != 0 && v.Reg != 0 {
+func copyau1(p *obj.Prog, v *obj.Addr) bool {
+	if regtyp(v) && v.Reg != 0 {
 		if p.Reg == v.Reg {
-			return 1
+			return true
 		}
 	}
-	return 0
+	return false
 }
 
 // copysub replaces v with s in a if f!=0 or indicates it if could if f==0.
 // Returns 1 on failure to substitute (it always succeeds on ppc64).
 func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
 	if f != 0 {
-		if copyau(a, v) != 0 {
+		if copyau(a, v) {
 			a.Reg = s.Reg
 		}
 	}
@@ -1040,32 +1040,32 @@
 // Returns 1 on failure to substitute (it always succeeds on ppc64).
 func copysub1(p1 *obj.Prog, v *obj.Addr, s *obj.Addr, f int) int {
 	if f != 0 {
-		if copyau1(p1, v) != 0 {
+		if copyau1(p1, v) {
 			p1.Reg = s.Reg
 		}
 	}
 	return 0
 }
 
-func sameaddr(a *obj.Addr, v *obj.Addr) int {
+func sameaddr(a *obj.Addr, v *obj.Addr) bool {
 	if a.Type != v.Type {
-		return 0
+		return false
 	}
-	if regtyp(v) != 0 && a.Reg == v.Reg {
-		return 1
+	if regtyp(v) && a.Reg == v.Reg {
+		return true
 	}
 	if v.Type == obj.NAME_AUTO || v.Type == obj.NAME_PARAM {
 		if v.Offset == a.Offset {
-			return 1
+			return true
 		}
 	}
-	return 0
+	return false
 }
 
-func smallindir(a *obj.Addr, reg *obj.Addr) int {
-	return bool2int(reg.Type == obj.TYPE_REG && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && 0 <= a.Offset && a.Offset < 4096)
+func smallindir(a *obj.Addr, reg *obj.Addr) bool {
+	return reg.Type == obj.TYPE_REG && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && 0 <= a.Offset && a.Offset < 4096
 }
 
-func stackaddr(a *obj.Addr) int {
-	return bool2int(a.Type == obj.TYPE_REG && a.Reg == ppc64.REGSP)
+func stackaddr(a *obj.Addr) bool {
+	return a.Type == obj.TYPE_REG && a.Reg == ppc64.REGSP
 }