cmd/compile: guard the &-to-<<>> opt against small constants

Converting an and-K into a pair of shifts for K that will
fit in a one-byte argument is probably not an optimization,
and it also interferes with other patterns that we want to
see fire, like (<< (AND K)) [for small K] and bounds check
elimination for masked indices.

Turns out that on Intel, even 32-bit signed immediates beat
the shift pair; the size reduction of tool binaries is 0.09%
vs 0.07% for only the 8-bit immediates.

RLH found this one working on the new/next GC.

Change-Id: I2414a8de1dd58d680d18587577fbadb7ff4f67d9
Reviewed-on: https://go-review.googlesource.com/20410
Reviewed-by: Keith Randall <khr@golang.org>
Run-TryBot: David Chase <drchase@google.com>
diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go
index cde60c1..122c661 100644
--- a/src/cmd/compile/internal/ssa/rewritegeneric.go
+++ b/src/cmd/compile/internal/ssa/rewritegeneric.go
@@ -693,56 +693,6 @@
 		v.AuxInt = 0
 		return true
 	}
-	// match: (And32 <t> (Const32 [y]) x)
-	// cond: nlz(int64(int32(y))) + nto(int64(int32(y))) == 64
-	// result: (Rsh32Ux32 (Lsh32x32 <t> x (Const32 <t> [nlz(int64(int32(y)))-32])) (Const32 <t> [nlz(int64(int32(y)))-32]))
-	for {
-		t := v.Type
-		if v.Args[0].Op != OpConst32 {
-			break
-		}
-		y := v.Args[0].AuxInt
-		x := v.Args[1]
-		if !(nlz(int64(int32(y)))+nto(int64(int32(y))) == 64) {
-			break
-		}
-		v.reset(OpRsh32Ux32)
-		v0 := b.NewValue0(v.Line, OpLsh32x32, t)
-		v0.AddArg(x)
-		v1 := b.NewValue0(v.Line, OpConst32, t)
-		v1.AuxInt = nlz(int64(int32(y))) - 32
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpConst32, t)
-		v2.AuxInt = nlz(int64(int32(y))) - 32
-		v.AddArg(v2)
-		return true
-	}
-	// match: (And32 <t> (Const32 [y]) x)
-	// cond: nlo(int64(int32(y))) + ntz(int64(int32(y))) == 64
-	// result: (Lsh32x32 (Rsh32Ux32 <t> x (Const32 <t> [ntz(int64(int32(y)))])) (Const32 <t> [ntz(int64(int32(y)))]))
-	for {
-		t := v.Type
-		if v.Args[0].Op != OpConst32 {
-			break
-		}
-		y := v.Args[0].AuxInt
-		x := v.Args[1]
-		if !(nlo(int64(int32(y)))+ntz(int64(int32(y))) == 64) {
-			break
-		}
-		v.reset(OpLsh32x32)
-		v0 := b.NewValue0(v.Line, OpRsh32Ux32, t)
-		v0.AddArg(x)
-		v1 := b.NewValue0(v.Line, OpConst32, t)
-		v1.AuxInt = ntz(int64(int32(y)))
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpConst32, t)
-		v2.AuxInt = ntz(int64(int32(y)))
-		v.AddArg(v2)
-		return true
-	}
 	return false
 }
 func rewriteValuegeneric_OpAnd64(v *Value, config *Config) bool {
@@ -812,7 +762,7 @@
 		return true
 	}
 	// match: (And64 <t> (Const64 [y]) x)
-	// cond: nlz(y) + nto(y) == 64
+	// cond: nlz(y) + nto(y) == 64 && nto(y) >= 32
 	// result: (Rsh64Ux64 (Lsh64x64 <t> x (Const64 <t> [nlz(y)])) (Const64 <t> [nlz(y)]))
 	for {
 		t := v.Type
@@ -821,7 +771,7 @@
 		}
 		y := v.Args[0].AuxInt
 		x := v.Args[1]
-		if !(nlz(y)+nto(y) == 64) {
+		if !(nlz(y)+nto(y) == 64 && nto(y) >= 32) {
 			break
 		}
 		v.reset(OpRsh64Ux64)
@@ -837,7 +787,7 @@
 		return true
 	}
 	// match: (And64 <t> (Const64 [y]) x)
-	// cond: nlo(y) + ntz(y) == 64
+	// cond: nlo(y) + ntz(y) == 64 && ntz(y) >= 32
 	// result: (Lsh64x64 (Rsh64Ux64 <t> x (Const64 <t> [ntz(y)])) (Const64 <t> [ntz(y)]))
 	for {
 		t := v.Type
@@ -846,7 +796,7 @@
 		}
 		y := v.Args[0].AuxInt
 		x := v.Args[1]
-		if !(nlo(y)+ntz(y) == 64) {
+		if !(nlo(y)+ntz(y) == 64 && ntz(y) >= 32) {
 			break
 		}
 		v.reset(OpLsh64x64)