cmd/compile: convert rest ARM64.rules lines to typed aux mode

This patch adds the ARM6464Bitfield auxInt to auxIntType() and
returns its Go type as "arm64Bitfield" type, which is defined
as int16 type.

And the Go type of SymOff auxInt is int32, but some functions
(such as min(), areAdjacentOffsets() and read16/32/64(),etc.)
use SymOff as an input parameter and treat its type as int64,
this patch adds the type conversion for these rules.

Passes toolstash-check -all.

Change-Id: Ib234b48d0a97ef244dd37878e06b5825316dd782
Reviewed-on: https://go-review.googlesource.com/c/go/+/234378
Reviewed-by: Keith Randall <khr@golang.org>
diff --git a/src/cmd/compile/internal/ssa/gen/ARM.rules b/src/cmd/compile/internal/ssa/gen/ARM.rules
index ab8bd0e..983f884 100644
--- a/src/cmd/compile/internal/ssa/gen/ARM.rules
+++ b/src/cmd/compile/internal/ssa/gen/ARM.rules
@@ -1138,7 +1138,7 @@
 // ((x>>8) | (x<<8)) -> (REV16 x), the type of x is uint16, "|" can also be "^" or "+".
 // UBFX instruction is supported by ARMv6T2, ARMv7 and above versions, REV16 is supported by
 // ARMv6 and above versions. So for ARMv6, we need to match SLLconst, SRLconst and ORshiftLL.
-((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (BFXU <typ.UInt16> [armBFAuxInt(8, 8)] x) x) -> (REV16 x)
+((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (BFXU <typ.UInt16> [int32(armBFAuxInt(8, 8))] x) x) => (REV16 x)
 ((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (SRLconst <typ.UInt16> [24] (SLLconst [16] x)) x) && objabi.GOARM>=6 -> (REV16 x)
 
 // use indexed loads and stores
diff --git a/src/cmd/compile/internal/ssa/gen/ARM64.rules b/src/cmd/compile/internal/ssa/gen/ARM64.rules
index a30ef3a..c29e7f7 100644
--- a/src/cmd/compile/internal/ssa/gen/ARM64.rules
+++ b/src/cmd/compile/internal/ssa/gen/ARM64.rules
@@ -1533,156 +1533,156 @@
       (CSEL0 {arm64Negate(boolval.Op)} x flagArg(boolval))
 
 // absorb shifts into ops
-(NEG x:(SLLconst [c] y)) && clobberIfDead(x) -> (NEGshiftLL [c] y)
-(NEG x:(SRLconst [c] y)) && clobberIfDead(x) -> (NEGshiftRL [c] y)
-(NEG x:(SRAconst [c] y)) && clobberIfDead(x) -> (NEGshiftRA [c] y)
-(MVN x:(SLLconst [c] y)) && clobberIfDead(x) -> (MVNshiftLL [c] y)
-(MVN x:(SRLconst [c] y)) && clobberIfDead(x) -> (MVNshiftRL [c] y)
-(MVN x:(SRAconst [c] y)) && clobberIfDead(x) -> (MVNshiftRA [c] y)
-(ADD x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) -> (ADDshiftLL x0 y [c])
-(ADD x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) -> (ADDshiftRL x0 y [c])
-(ADD x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) -> (ADDshiftRA x0 y [c])
-(SUB x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) -> (SUBshiftLL x0 y [c])
-(SUB x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) -> (SUBshiftRL x0 y [c])
-(SUB x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) -> (SUBshiftRA x0 y [c])
-(AND x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) -> (ANDshiftLL x0 y [c])
-(AND x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) -> (ANDshiftRL x0 y [c])
-(AND x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) -> (ANDshiftRA x0 y [c])
-(OR  x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) -> (ORshiftLL  x0 y [c]) // useful for combined load
-(OR  x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) -> (ORshiftRL  x0 y [c])
-(OR  x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) -> (ORshiftRA  x0 y [c])
-(XOR x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) -> (XORshiftLL x0 y [c])
-(XOR x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) -> (XORshiftRL x0 y [c])
-(XOR x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) -> (XORshiftRA x0 y [c])
-(BIC x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) -> (BICshiftLL x0 y [c])
-(BIC x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) -> (BICshiftRL x0 y [c])
-(BIC x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) -> (BICshiftRA x0 y [c])
-(ORN x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) -> (ORNshiftLL x0 y [c])
-(ORN x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) -> (ORNshiftRL x0 y [c])
-(ORN x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) -> (ORNshiftRA x0 y [c])
-(EON x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) -> (EONshiftLL x0 y [c])
-(EON x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) -> (EONshiftRL x0 y [c])
-(EON x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) -> (EONshiftRA x0 y [c])
-(CMP x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) -> (CMPshiftLL x0 y [c])
-(CMP x0:(SLLconst [c] y) x1) && clobberIfDead(x0) -> (InvertFlags (CMPshiftLL x1 y [c]))
-(CMP x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) -> (CMPshiftRL x0 y [c])
-(CMP x0:(SRLconst [c] y) x1) && clobberIfDead(x0) -> (InvertFlags (CMPshiftRL x1 y [c]))
-(CMP x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) -> (CMPshiftRA x0 y [c])
-(CMP x0:(SRAconst [c] y) x1) && clobberIfDead(x0) -> (InvertFlags (CMPshiftRA x1 y [c]))
-(CMN x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) -> (CMNshiftLL x0 y [c])
-(CMN x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) -> (CMNshiftRL x0 y [c])
-(CMN x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) -> (CMNshiftRA x0 y [c])
-(TST x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) -> (TSTshiftLL x0 y [c])
-(TST x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) -> (TSTshiftRL x0 y [c])
-(TST x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) -> (TSTshiftRA x0 y [c])
+(NEG x:(SLLconst [c] y)) && clobberIfDead(x) => (NEGshiftLL [c] y)
+(NEG x:(SRLconst [c] y)) && clobberIfDead(x) => (NEGshiftRL [c] y)
+(NEG x:(SRAconst [c] y)) && clobberIfDead(x) => (NEGshiftRA [c] y)
+(MVN x:(SLLconst [c] y)) && clobberIfDead(x) => (MVNshiftLL [c] y)
+(MVN x:(SRLconst [c] y)) && clobberIfDead(x) => (MVNshiftRL [c] y)
+(MVN x:(SRAconst [c] y)) && clobberIfDead(x) => (MVNshiftRA [c] y)
+(ADD x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ADDshiftLL x0 y [c])
+(ADD x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ADDshiftRL x0 y [c])
+(ADD x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ADDshiftRA x0 y [c])
+(SUB x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (SUBshiftLL x0 y [c])
+(SUB x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (SUBshiftRL x0 y [c])
+(SUB x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (SUBshiftRA x0 y [c])
+(AND x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ANDshiftLL x0 y [c])
+(AND x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ANDshiftRL x0 y [c])
+(AND x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ANDshiftRA x0 y [c])
+(OR  x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ORshiftLL  x0 y [c]) // useful for combined load
+(OR  x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ORshiftRL  x0 y [c])
+(OR  x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ORshiftRA  x0 y [c])
+(XOR x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (XORshiftLL x0 y [c])
+(XOR x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (XORshiftRL x0 y [c])
+(XOR x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (XORshiftRA x0 y [c])
+(BIC x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (BICshiftLL x0 y [c])
+(BIC x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (BICshiftRL x0 y [c])
+(BIC x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (BICshiftRA x0 y [c])
+(ORN x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ORNshiftLL x0 y [c])
+(ORN x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ORNshiftRL x0 y [c])
+(ORN x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ORNshiftRA x0 y [c])
+(EON x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (EONshiftLL x0 y [c])
+(EON x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (EONshiftRL x0 y [c])
+(EON x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (EONshiftRA x0 y [c])
+(CMP x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (CMPshiftLL x0 y [c])
+(CMP x0:(SLLconst [c] y) x1) && clobberIfDead(x0) => (InvertFlags (CMPshiftLL x1 y [c]))
+(CMP x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (CMPshiftRL x0 y [c])
+(CMP x0:(SRLconst [c] y) x1) && clobberIfDead(x0) => (InvertFlags (CMPshiftRL x1 y [c]))
+(CMP x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (CMPshiftRA x0 y [c])
+(CMP x0:(SRAconst [c] y) x1) && clobberIfDead(x0) => (InvertFlags (CMPshiftRA x1 y [c]))
+(CMN x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (CMNshiftLL x0 y [c])
+(CMN x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (CMNshiftRL x0 y [c])
+(CMN x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (CMNshiftRA x0 y [c])
+(TST x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (TSTshiftLL x0 y [c])
+(TST x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (TSTshiftRL x0 y [c])
+(TST x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (TSTshiftRA x0 y [c])
 
 // prefer *const ops to *shift ops
-(ADDshiftLL (MOVDconst [c]) x [d]) -> (ADDconst [c] (SLLconst <x.Type> x [d]))
-(ADDshiftRL (MOVDconst [c]) x [d]) -> (ADDconst [c] (SRLconst <x.Type> x [d]))
-(ADDshiftRA (MOVDconst [c]) x [d]) -> (ADDconst [c] (SRAconst <x.Type> x [d]))
-(ANDshiftLL (MOVDconst [c]) x [d]) -> (ANDconst [c] (SLLconst <x.Type> x [d]))
-(ANDshiftRL (MOVDconst [c]) x [d]) -> (ANDconst [c] (SRLconst <x.Type> x [d]))
-(ANDshiftRA (MOVDconst [c]) x [d]) -> (ANDconst [c] (SRAconst <x.Type> x [d]))
-(ORshiftLL  (MOVDconst [c]) x [d]) -> (ORconst  [c] (SLLconst <x.Type> x [d]))
-(ORshiftRL  (MOVDconst [c]) x [d]) -> (ORconst  [c] (SRLconst <x.Type> x [d]))
-(ORshiftRA  (MOVDconst [c]) x [d]) -> (ORconst  [c] (SRAconst <x.Type> x [d]))
-(XORshiftLL (MOVDconst [c]) x [d]) -> (XORconst [c] (SLLconst <x.Type> x [d]))
-(XORshiftRL (MOVDconst [c]) x [d]) -> (XORconst [c] (SRLconst <x.Type> x [d]))
-(XORshiftRA (MOVDconst [c]) x [d]) -> (XORconst [c] (SRAconst <x.Type> x [d]))
-(CMPshiftLL (MOVDconst [c]) x [d]) -> (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d])))
-(CMPshiftRL (MOVDconst [c]) x [d]) -> (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d])))
-(CMPshiftRA (MOVDconst [c]) x [d]) -> (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d])))
-(CMNshiftLL (MOVDconst [c]) x [d]) -> (CMNconst [c] (SLLconst <x.Type> x [d]))
-(CMNshiftRL (MOVDconst [c]) x [d]) -> (CMNconst [c] (SRLconst <x.Type> x [d]))
-(CMNshiftRA (MOVDconst [c]) x [d]) -> (CMNconst [c] (SRAconst <x.Type> x [d]))
-(TSTshiftLL (MOVDconst [c]) x [d]) -> (TSTconst [c] (SLLconst <x.Type> x [d]))
-(TSTshiftRL (MOVDconst [c]) x [d]) -> (TSTconst [c] (SRLconst <x.Type> x [d]))
-(TSTshiftRA (MOVDconst [c]) x [d]) -> (TSTconst [c] (SRAconst <x.Type> x [d]))
+(ADDshiftLL (MOVDconst [c]) x [d]) => (ADDconst [c] (SLLconst <x.Type> x [d]))
+(ADDshiftRL (MOVDconst [c]) x [d]) => (ADDconst [c] (SRLconst <x.Type> x [d]))
+(ADDshiftRA (MOVDconst [c]) x [d]) => (ADDconst [c] (SRAconst <x.Type> x [d]))
+(ANDshiftLL (MOVDconst [c]) x [d]) => (ANDconst [c] (SLLconst <x.Type> x [d]))
+(ANDshiftRL (MOVDconst [c]) x [d]) => (ANDconst [c] (SRLconst <x.Type> x [d]))
+(ANDshiftRA (MOVDconst [c]) x [d]) => (ANDconst [c] (SRAconst <x.Type> x [d]))
+(ORshiftLL  (MOVDconst [c]) x [d]) => (ORconst  [c] (SLLconst <x.Type> x [d]))
+(ORshiftRL  (MOVDconst [c]) x [d]) => (ORconst  [c] (SRLconst <x.Type> x [d]))
+(ORshiftRA  (MOVDconst [c]) x [d]) => (ORconst  [c] (SRAconst <x.Type> x [d]))
+(XORshiftLL (MOVDconst [c]) x [d]) => (XORconst [c] (SLLconst <x.Type> x [d]))
+(XORshiftRL (MOVDconst [c]) x [d]) => (XORconst [c] (SRLconst <x.Type> x [d]))
+(XORshiftRA (MOVDconst [c]) x [d]) => (XORconst [c] (SRAconst <x.Type> x [d]))
+(CMPshiftLL (MOVDconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d])))
+(CMPshiftRL (MOVDconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d])))
+(CMPshiftRA (MOVDconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d])))
+(CMNshiftLL (MOVDconst [c]) x [d]) => (CMNconst [c] (SLLconst <x.Type> x [d]))
+(CMNshiftRL (MOVDconst [c]) x [d]) => (CMNconst [c] (SRLconst <x.Type> x [d]))
+(CMNshiftRA (MOVDconst [c]) x [d]) => (CMNconst [c] (SRAconst <x.Type> x [d]))
+(TSTshiftLL (MOVDconst [c]) x [d]) => (TSTconst [c] (SLLconst <x.Type> x [d]))
+(TSTshiftRL (MOVDconst [c]) x [d]) => (TSTconst [c] (SRLconst <x.Type> x [d]))
+(TSTshiftRA (MOVDconst [c]) x [d]) => (TSTconst [c] (SRAconst <x.Type> x [d]))
 
 // constant folding in *shift ops
-(MVNshiftLL (MOVDconst [c]) [d]) -> (MOVDconst [^int64(uint64(c)<<uint64(d))])
-(MVNshiftRL (MOVDconst [c]) [d]) -> (MOVDconst [^int64(uint64(c)>>uint64(d))])
-(MVNshiftRA (MOVDconst [c]) [d]) -> (MOVDconst [^(c>>uint64(d))])
-(NEGshiftLL (MOVDconst [c]) [d]) -> (MOVDconst [-int64(uint64(c)<<uint64(d))])
-(NEGshiftRL (MOVDconst [c]) [d]) -> (MOVDconst [-int64(uint64(c)>>uint64(d))])
-(NEGshiftRA (MOVDconst [c]) [d]) -> (MOVDconst [-(c>>uint64(d))])
-(ADDshiftLL x (MOVDconst [c]) [d]) -> (ADDconst x [int64(uint64(c)<<uint64(d))])
-(ADDshiftRL x (MOVDconst [c]) [d]) -> (ADDconst x [int64(uint64(c)>>uint64(d))])
-(ADDshiftRA x (MOVDconst [c]) [d]) -> (ADDconst x [c>>uint64(d)])
-(SUBshiftLL x (MOVDconst [c]) [d]) -> (SUBconst x [int64(uint64(c)<<uint64(d))])
-(SUBshiftRL x (MOVDconst [c]) [d]) -> (SUBconst x [int64(uint64(c)>>uint64(d))])
-(SUBshiftRA x (MOVDconst [c]) [d]) -> (SUBconst x [c>>uint64(d)])
-(ANDshiftLL x (MOVDconst [c]) [d]) -> (ANDconst x [int64(uint64(c)<<uint64(d))])
-(ANDshiftRL x (MOVDconst [c]) [d]) -> (ANDconst x [int64(uint64(c)>>uint64(d))])
-(ANDshiftRA x (MOVDconst [c]) [d]) -> (ANDconst x [c>>uint64(d)])
-(ORshiftLL  x (MOVDconst [c]) [d]) -> (ORconst  x [int64(uint64(c)<<uint64(d))])
-(ORshiftRL  x (MOVDconst [c]) [d]) -> (ORconst  x [int64(uint64(c)>>uint64(d))])
-(ORshiftRA  x (MOVDconst [c]) [d]) -> (ORconst  x [c>>uint64(d)])
-(XORshiftLL x (MOVDconst [c]) [d]) -> (XORconst x [int64(uint64(c)<<uint64(d))])
-(XORshiftRL x (MOVDconst [c]) [d]) -> (XORconst x [int64(uint64(c)>>uint64(d))])
-(XORshiftRA x (MOVDconst [c]) [d]) -> (XORconst x [c>>uint64(d)])
-(BICshiftLL x (MOVDconst [c]) [d]) -> (ANDconst x [^int64(uint64(c)<<uint64(d))])
-(BICshiftRL x (MOVDconst [c]) [d]) -> (ANDconst x [^int64(uint64(c)>>uint64(d))])
-(BICshiftRA x (MOVDconst [c]) [d]) -> (ANDconst x [^(c>>uint64(d))])
-(ORNshiftLL x (MOVDconst [c]) [d]) -> (ORconst  x [^int64(uint64(c)<<uint64(d))])
-(ORNshiftRL x (MOVDconst [c]) [d]) -> (ORconst  x [^int64(uint64(c)>>uint64(d))])
-(ORNshiftRA x (MOVDconst [c]) [d]) -> (ORconst  x [^(c>>uint64(d))])
-(EONshiftLL x (MOVDconst [c]) [d]) -> (XORconst x [^int64(uint64(c)<<uint64(d))])
-(EONshiftRL x (MOVDconst [c]) [d]) -> (XORconst x [^int64(uint64(c)>>uint64(d))])
-(EONshiftRA x (MOVDconst [c]) [d]) -> (XORconst x [^(c>>uint64(d))])
-(CMPshiftLL x (MOVDconst [c]) [d]) -> (CMPconst x [int64(uint64(c)<<uint64(d))])
-(CMPshiftRL x (MOVDconst [c]) [d]) -> (CMPconst x [int64(uint64(c)>>uint64(d))])
-(CMPshiftRA x (MOVDconst [c]) [d]) -> (CMPconst x [c>>uint64(d)])
-(CMNshiftLL x (MOVDconst [c]) [d]) -> (CMNconst x [int64(uint64(c)<<uint64(d))])
-(CMNshiftRL x (MOVDconst [c]) [d]) -> (CMNconst x [int64(uint64(c)>>uint64(d))])
-(CMNshiftRA x (MOVDconst [c]) [d]) -> (CMNconst x [c>>uint64(d)])
-(TSTshiftLL x (MOVDconst [c]) [d]) -> (TSTconst x [int64(uint64(c)<<uint64(d))])
-(TSTshiftRL x (MOVDconst [c]) [d]) -> (TSTconst x [int64(uint64(c)>>uint64(d))])
-(TSTshiftRA x (MOVDconst [c]) [d]) -> (TSTconst x [c>>uint64(d)])
+(MVNshiftLL (MOVDconst [c]) [d]) => (MOVDconst [^int64(uint64(c)<<uint64(d))])
+(MVNshiftRL (MOVDconst [c]) [d]) => (MOVDconst [^int64(uint64(c)>>uint64(d))])
+(MVNshiftRA (MOVDconst [c]) [d]) => (MOVDconst [^(c>>uint64(d))])
+(NEGshiftLL (MOVDconst [c]) [d]) => (MOVDconst [-int64(uint64(c)<<uint64(d))])
+(NEGshiftRL (MOVDconst [c]) [d]) => (MOVDconst [-int64(uint64(c)>>uint64(d))])
+(NEGshiftRA (MOVDconst [c]) [d]) => (MOVDconst [-(c>>uint64(d))])
+(ADDshiftLL x (MOVDconst [c]) [d]) => (ADDconst x [int64(uint64(c)<<uint64(d))])
+(ADDshiftRL x (MOVDconst [c]) [d]) => (ADDconst x [int64(uint64(c)>>uint64(d))])
+(ADDshiftRA x (MOVDconst [c]) [d]) => (ADDconst x [c>>uint64(d)])
+(SUBshiftLL x (MOVDconst [c]) [d]) => (SUBconst x [int64(uint64(c)<<uint64(d))])
+(SUBshiftRL x (MOVDconst [c]) [d]) => (SUBconst x [int64(uint64(c)>>uint64(d))])
+(SUBshiftRA x (MOVDconst [c]) [d]) => (SUBconst x [c>>uint64(d)])
+(ANDshiftLL x (MOVDconst [c]) [d]) => (ANDconst x [int64(uint64(c)<<uint64(d))])
+(ANDshiftRL x (MOVDconst [c]) [d]) => (ANDconst x [int64(uint64(c)>>uint64(d))])
+(ANDshiftRA x (MOVDconst [c]) [d]) => (ANDconst x [c>>uint64(d)])
+(ORshiftLL  x (MOVDconst [c]) [d]) => (ORconst  x [int64(uint64(c)<<uint64(d))])
+(ORshiftRL  x (MOVDconst [c]) [d]) => (ORconst  x [int64(uint64(c)>>uint64(d))])
+(ORshiftRA  x (MOVDconst [c]) [d]) => (ORconst  x [c>>uint64(d)])
+(XORshiftLL x (MOVDconst [c]) [d]) => (XORconst x [int64(uint64(c)<<uint64(d))])
+(XORshiftRL x (MOVDconst [c]) [d]) => (XORconst x [int64(uint64(c)>>uint64(d))])
+(XORshiftRA x (MOVDconst [c]) [d]) => (XORconst x [c>>uint64(d)])
+(BICshiftLL x (MOVDconst [c]) [d]) => (ANDconst x [^int64(uint64(c)<<uint64(d))])
+(BICshiftRL x (MOVDconst [c]) [d]) => (ANDconst x [^int64(uint64(c)>>uint64(d))])
+(BICshiftRA x (MOVDconst [c]) [d]) => (ANDconst x [^(c>>uint64(d))])
+(ORNshiftLL x (MOVDconst [c]) [d]) => (ORconst  x [^int64(uint64(c)<<uint64(d))])
+(ORNshiftRL x (MOVDconst [c]) [d]) => (ORconst  x [^int64(uint64(c)>>uint64(d))])
+(ORNshiftRA x (MOVDconst [c]) [d]) => (ORconst  x [^(c>>uint64(d))])
+(EONshiftLL x (MOVDconst [c]) [d]) => (XORconst x [^int64(uint64(c)<<uint64(d))])
+(EONshiftRL x (MOVDconst [c]) [d]) => (XORconst x [^int64(uint64(c)>>uint64(d))])
+(EONshiftRA x (MOVDconst [c]) [d]) => (XORconst x [^(c>>uint64(d))])
+(CMPshiftLL x (MOVDconst [c]) [d]) => (CMPconst x [int64(uint64(c)<<uint64(d))])
+(CMPshiftRL x (MOVDconst [c]) [d]) => (CMPconst x [int64(uint64(c)>>uint64(d))])
+(CMPshiftRA x (MOVDconst [c]) [d]) => (CMPconst x [c>>uint64(d)])
+(CMNshiftLL x (MOVDconst [c]) [d]) => (CMNconst x [int64(uint64(c)<<uint64(d))])
+(CMNshiftRL x (MOVDconst [c]) [d]) => (CMNconst x [int64(uint64(c)>>uint64(d))])
+(CMNshiftRA x (MOVDconst [c]) [d]) => (CMNconst x [c>>uint64(d)])
+(TSTshiftLL x (MOVDconst [c]) [d]) => (TSTconst x [int64(uint64(c)<<uint64(d))])
+(TSTshiftRL x (MOVDconst [c]) [d]) => (TSTconst x [int64(uint64(c)>>uint64(d))])
+(TSTshiftRA x (MOVDconst [c]) [d]) => (TSTconst x [c>>uint64(d)])
 
 // simplification with *shift ops
-(SUBshiftLL x (SLLconst x [c]) [d]) && c==d -> (MOVDconst [0])
-(SUBshiftRL x (SRLconst x [c]) [d]) && c==d -> (MOVDconst [0])
-(SUBshiftRA x (SRAconst x [c]) [d]) && c==d -> (MOVDconst [0])
-(ANDshiftLL x y:(SLLconst x [c]) [d]) && c==d -> y
-(ANDshiftRL x y:(SRLconst x [c]) [d]) && c==d -> y
-(ANDshiftRA x y:(SRAconst x [c]) [d]) && c==d -> y
-(ORshiftLL  x y:(SLLconst x [c]) [d]) && c==d -> y
-(ORshiftRL  x y:(SRLconst x [c]) [d]) && c==d -> y
-(ORshiftRA  x y:(SRAconst x [c]) [d]) && c==d -> y
-(XORshiftLL x (SLLconst x [c]) [d]) && c==d -> (MOVDconst [0])
-(XORshiftRL x (SRLconst x [c]) [d]) && c==d -> (MOVDconst [0])
-(XORshiftRA x (SRAconst x [c]) [d]) && c==d -> (MOVDconst [0])
-(BICshiftLL x (SLLconst x [c]) [d]) && c==d -> (MOVDconst [0])
-(BICshiftRL x (SRLconst x [c]) [d]) && c==d -> (MOVDconst [0])
-(BICshiftRA x (SRAconst x [c]) [d]) && c==d -> (MOVDconst [0])
-(EONshiftLL x (SLLconst x [c]) [d]) && c==d -> (MOVDconst [-1])
-(EONshiftRL x (SRLconst x [c]) [d]) && c==d -> (MOVDconst [-1])
-(EONshiftRA x (SRAconst x [c]) [d]) && c==d -> (MOVDconst [-1])
-(ORNshiftLL x (SLLconst x [c]) [d]) && c==d -> (MOVDconst [-1])
-(ORNshiftRL x (SRLconst x [c]) [d]) && c==d -> (MOVDconst [-1])
-(ORNshiftRA x (SRAconst x [c]) [d]) && c==d -> (MOVDconst [-1])
+(SUBshiftLL x (SLLconst x [c]) [d]) && c==d => (MOVDconst [0])
+(SUBshiftRL x (SRLconst x [c]) [d]) && c==d => (MOVDconst [0])
+(SUBshiftRA x (SRAconst x [c]) [d]) && c==d => (MOVDconst [0])
+(ANDshiftLL x y:(SLLconst x [c]) [d]) && c==d => y
+(ANDshiftRL x y:(SRLconst x [c]) [d]) && c==d => y
+(ANDshiftRA x y:(SRAconst x [c]) [d]) && c==d => y
+(ORshiftLL  x y:(SLLconst x [c]) [d]) && c==d => y
+(ORshiftRL  x y:(SRLconst x [c]) [d]) && c==d => y
+(ORshiftRA  x y:(SRAconst x [c]) [d]) && c==d => y
+(XORshiftLL x (SLLconst x [c]) [d]) && c==d => (MOVDconst [0])
+(XORshiftRL x (SRLconst x [c]) [d]) && c==d => (MOVDconst [0])
+(XORshiftRA x (SRAconst x [c]) [d]) && c==d => (MOVDconst [0])
+(BICshiftLL x (SLLconst x [c]) [d]) && c==d => (MOVDconst [0])
+(BICshiftRL x (SRLconst x [c]) [d]) && c==d => (MOVDconst [0])
+(BICshiftRA x (SRAconst x [c]) [d]) && c==d => (MOVDconst [0])
+(EONshiftLL x (SLLconst x [c]) [d]) && c==d => (MOVDconst [-1])
+(EONshiftRL x (SRLconst x [c]) [d]) && c==d => (MOVDconst [-1])
+(EONshiftRA x (SRAconst x [c]) [d]) && c==d => (MOVDconst [-1])
+(ORNshiftLL x (SLLconst x [c]) [d]) && c==d => (MOVDconst [-1])
+(ORNshiftRL x (SRLconst x [c]) [d]) && c==d => (MOVDconst [-1])
+(ORNshiftRA x (SRAconst x [c]) [d]) && c==d => (MOVDconst [-1])
 
 // Generate rotates with const shift
-(ADDshiftLL [c] (SRLconst x [64-c]) x) -> (RORconst [64-c] x)
-( ORshiftLL [c] (SRLconst x [64-c]) x) -> (RORconst [64-c] x)
-(XORshiftLL [c] (SRLconst x [64-c]) x) -> (RORconst [64-c] x)
-(ADDshiftRL [c] (SLLconst x [64-c]) x) -> (RORconst [   c] x)
-( ORshiftRL [c] (SLLconst x [64-c]) x) -> (RORconst [   c] x)
-(XORshiftRL [c] (SLLconst x [64-c]) x) -> (RORconst [   c] x)
+(ADDshiftLL [c] (SRLconst x [64-c]) x) => (RORconst [64-c] x)
+( ORshiftLL [c] (SRLconst x [64-c]) x) => (RORconst [64-c] x)
+(XORshiftLL [c] (SRLconst x [64-c]) x) => (RORconst [64-c] x)
+(ADDshiftRL [c] (SLLconst x [64-c]) x) => (RORconst [   c] x)
+( ORshiftRL [c] (SLLconst x [64-c]) x) => (RORconst [   c] x)
+(XORshiftRL [c] (SLLconst x [64-c]) x) => (RORconst [   c] x)
 
 (ADDshiftLL <t> [c] (UBFX [bfc] x) x) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
-	-> (RORWconst [32-c] x)
+	=> (RORWconst [32-c] x)
 ( ORshiftLL <t> [c] (UBFX [bfc] x) x) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
-	-> (RORWconst [32-c] x)
+	=> (RORWconst [32-c] x)
 (XORshiftLL <t> [c] (UBFX [bfc] x) x) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
-	-> (RORWconst [32-c] x)
-(ADDshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.Size() == 4 -> (RORWconst [c] x)
-( ORshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.Size() == 4 -> (RORWconst [c] x)
-(XORshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.Size() == 4 -> (RORWconst [c] x)
+	=> (RORWconst [32-c] x)
+(ADDshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.Size() == 4 => (RORWconst [c] x)
+( ORshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.Size() == 4 => (RORWconst [c] x)
+(XORshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.Size() == 4 => (RORWconst [c] x)
 
-(RORconst [c] (RORconst [d] x)) -> (RORconst [(c+d)&63] x)
-(RORWconst [c] (RORWconst [d] x)) -> (RORWconst [(c+d)&31] x)
+(RORconst [c] (RORconst [d] x)) => (RORconst [(c+d)&63] x)
+(RORWconst [c] (RORWconst [d] x)) => (RORWconst [(c+d)&31] x)
 
 // Generate rotates with non-const shift.
 // These rules match the Go source code like
@@ -1693,11 +1693,11 @@
 ((ADD|OR|XOR) (SLL x (ANDconst <t> [63] y))
 	(CSEL0 <typ.UInt64> {cc} (SRL <typ.UInt64> x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))
 		(CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))))) && cc == OpARM64LessThanU
-	-> (ROR x (NEG <t> y))
+	=> (ROR x (NEG <t> y))
 ((ADD|OR|XOR) (SRL <typ.UInt64> x (ANDconst <t> [63] y))
 	(CSEL0 <typ.UInt64> {cc} (SLL x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))
 		(CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))))) && cc == OpARM64LessThanU
-	-> (ROR x y)
+	=> (ROR x y)
 
 // These rules match the Go source code like
 //	y &= 31
@@ -1707,140 +1707,140 @@
 ((ADD|OR|XOR) (SLL x (ANDconst <t> [31] y))
 	(CSEL0 <typ.UInt32> {cc} (SRL <typ.UInt32> (MOVWUreg x) (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))
 		(CMPconst [64]  (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))))) && cc == OpARM64LessThanU
-	-> (RORW x (NEG <t> y))
+	=> (RORW x (NEG <t> y))
 ((ADD|OR|XOR) (SRL <typ.UInt32> (MOVWUreg x) (ANDconst <t> [31] y))
 	(CSEL0 <typ.UInt32> {cc} (SLL x (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))
 		(CMPconst [64]  (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))))) && cc == OpARM64LessThanU
-	-> (RORW x y)
+	=> (RORW x y)
 
-// ((x>>8) | (x<<8)) -> (REV16W x), the type of x is uint16, "|" can also be "^" or "+".
-((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (UBFX <typ.UInt16> [armBFAuxInt(8, 8)] x) x) -> (REV16W x)
+// ((x>>8) | (x<<8)) => (REV16W x), the type of x is uint16, "|" can also be "^" or "+".
+((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (UBFX <typ.UInt16> [armBFAuxInt(8, 8)] x) x) => (REV16W x)
 
 // Extract from reg pair
-(ADDshiftLL [c] (SRLconst x [64-c]) x2) -> (EXTRconst [64-c] x2 x)
-( ORshiftLL [c] (SRLconst x [64-c]) x2) -> (EXTRconst [64-c] x2 x)
-(XORshiftLL [c] (SRLconst x [64-c]) x2) -> (EXTRconst [64-c] x2 x)
+(ADDshiftLL [c] (SRLconst x [64-c]) x2) => (EXTRconst [64-c] x2 x)
+( ORshiftLL [c] (SRLconst x [64-c]) x2) => (EXTRconst [64-c] x2 x)
+(XORshiftLL [c] (SRLconst x [64-c]) x2) => (EXTRconst [64-c] x2 x)
 
 (ADDshiftLL <t> [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
-	-> (EXTRWconst [32-c] x2 x)
+	=> (EXTRWconst [32-c] x2 x)
 ( ORshiftLL <t> [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
-	-> (EXTRWconst [32-c] x2 x)
+	=> (EXTRWconst [32-c] x2 x)
 (XORshiftLL <t> [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
-	-> (EXTRWconst [32-c] x2 x)
+	=> (EXTRWconst [32-c] x2 x)
 
 // Rewrite special pairs of shifts to AND.
 // On ARM64 the bitmask can fit into an instruction.
-(SRLconst [c] (SLLconst [c] x)) && 0 < c && c < 64 -> (ANDconst [1<<uint(64-c)-1] x) // mask out high bits
-(SLLconst [c] (SRLconst [c] x)) && 0 < c && c < 64 -> (ANDconst [^(1<<uint(c)-1)] x) // mask out low bits
+(SRLconst [c] (SLLconst [c] x)) && 0 < c && c < 64 => (ANDconst [1<<uint(64-c)-1] x) // mask out high bits
+(SLLconst [c] (SRLconst [c] x)) && 0 < c && c < 64 => (ANDconst [^(1<<uint(c)-1)] x) // mask out low bits
 
 // Special case setting bit as 1. An example is math.Copysign(c,-1)
-(ORconst [c1] (ANDconst [c2] x)) && c2|c1 == ^0  -> (ORconst [c1] x)
+(ORconst [c1] (ANDconst [c2] x)) && c2|c1 == ^0  => (ORconst [c1] x)
 
 // bitfield ops
 
 // sbfiz
 // (x << lc) >> rc
-(SRAconst [rc] (SLLconst [lc] x)) && lc > rc -> (SBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
-(MOVWreg (SLLconst [lc] x)) && lc < 32 -> (SBFIZ [armBFAuxInt(lc, 32-lc)] x)
-(MOVHreg (SLLconst [lc] x)) && lc < 16 -> (SBFIZ [armBFAuxInt(lc, 16-lc)] x)
-(MOVBreg (SLLconst [lc] x)) && lc < 8 -> (SBFIZ [armBFAuxInt(lc, 8-lc)] x)
+(SRAconst [rc] (SLLconst [lc] x)) && lc > rc => (SBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
+(MOVWreg (SLLconst [lc] x)) && lc < 32 => (SBFIZ [armBFAuxInt(lc, 32-lc)] x)
+(MOVHreg (SLLconst [lc] x)) && lc < 16 => (SBFIZ [armBFAuxInt(lc, 16-lc)] x)
+(MOVBreg (SLLconst [lc] x)) && lc < 8 => (SBFIZ [armBFAuxInt(lc, 8-lc)] x)
 
 // sbfx
 // (x << lc) >> rc
-(SRAconst [rc] (SLLconst [lc] x)) && lc <= rc -> (SBFX [armBFAuxInt(rc-lc, 64-rc)] x)
-(SRAconst [rc] (MOVWreg x)) && rc < 32 -> (SBFX [armBFAuxInt(rc, 32-rc)] x)
-(SRAconst [rc] (MOVHreg x)) && rc < 16 -> (SBFX [armBFAuxInt(rc, 16-rc)] x)
-(SRAconst [rc] (MOVBreg x)) && rc < 8 -> (SBFX [armBFAuxInt(rc, 8-rc)] x)
+(SRAconst [rc] (SLLconst [lc] x)) && lc <= rc => (SBFX [armBFAuxInt(rc-lc, 64-rc)] x)
+(SRAconst [rc] (MOVWreg x)) && rc < 32 => (SBFX [armBFAuxInt(rc, 32-rc)] x)
+(SRAconst [rc] (MOVHreg x)) && rc < 16 => (SBFX [armBFAuxInt(rc, 16-rc)] x)
+(SRAconst [rc] (MOVBreg x)) && rc < 8 => (SBFX [armBFAuxInt(rc, 8-rc)] x)
 
 // sbfiz/sbfx combinations: merge shifts into bitfield ops
-(SRAconst [sc] (SBFIZ [bfc] x)) && sc < getARM64BFlsb(bfc)
-	-> (SBFIZ [armBFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))] x)
-(SRAconst [sc] (SBFIZ [bfc] x)) && sc >= getARM64BFlsb(bfc)
-	&& sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc)
-	-> (SBFX [armBFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x)
+(SRAconst [sc] (SBFIZ [bfc] x)) && sc < bfc.getARM64BFlsb()
+	=> (SBFIZ [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
+(SRAconst [sc] (SBFIZ [bfc] x)) && sc >= bfc.getARM64BFlsb()
+	&& sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
+	=> (SBFX [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
 
 // ubfiz
 // (x & ac) << sc
 (SLLconst [sc] (ANDconst [ac] x)) && isARM64BFMask(sc, ac, 0)
-	-> (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
-(SLLconst [sc] (MOVWUreg x)) && isARM64BFMask(sc, 1<<32-1, 0) -> (UBFIZ [armBFAuxInt(sc, 32)] x)
-(SLLconst [sc] (MOVHUreg x)) && isARM64BFMask(sc, 1<<16-1, 0) -> (UBFIZ [armBFAuxInt(sc, 16)] x)
-(SLLconst [sc] (MOVBUreg x)) && isARM64BFMask(sc, 1<<8-1, 0) -> (UBFIZ [armBFAuxInt(sc, 8)] x)
+	=> (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
+(SLLconst [sc] (MOVWUreg x)) && isARM64BFMask(sc, 1<<32-1, 0) => (UBFIZ [armBFAuxInt(sc, 32)] x)
+(SLLconst [sc] (MOVHUreg x)) && isARM64BFMask(sc, 1<<16-1, 0) => (UBFIZ [armBFAuxInt(sc, 16)] x)
+(SLLconst [sc] (MOVBUreg x)) && isARM64BFMask(sc, 1<<8-1, 0) => (UBFIZ [armBFAuxInt(sc, 8)] x)
 // (x << sc) & ac
 (ANDconst [ac] (SLLconst [sc] x)) && isARM64BFMask(sc, ac, sc)
-	-> (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
+	=> (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
 (MOVWUreg (SLLconst [sc] x)) && isARM64BFMask(sc, 1<<32-1, sc)
-	-> (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x)
+	=> (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x)
 (MOVHUreg (SLLconst [sc] x)) && isARM64BFMask(sc, 1<<16-1, sc)
-	-> (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x)
+	=> (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x)
 (MOVBUreg (SLLconst [sc] x)) && isARM64BFMask(sc, 1<<8-1, sc)
-	-> (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x)
+	=> (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x)
 // (x << lc) >> rc
-(SRLconst [rc] (SLLconst [lc] x)) && lc > rc -> (UBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
+(SRLconst [rc] (SLLconst [lc] x)) && lc > rc => (UBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
 
 // ubfx
 // (x >> sc) & ac
 (ANDconst [ac] (SRLconst [sc] x)) && isARM64BFMask(sc, ac, 0)
-	-> (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
-(MOVWUreg (SRLconst [sc] x)) && isARM64BFMask(sc, 1<<32-1, 0) -> (UBFX [armBFAuxInt(sc, 32)] x)
-(MOVHUreg (SRLconst [sc] x)) && isARM64BFMask(sc, 1<<16-1, 0) -> (UBFX [armBFAuxInt(sc, 16)] x)
-(MOVBUreg (SRLconst [sc] x)) && isARM64BFMask(sc, 1<<8-1, 0) -> (UBFX [armBFAuxInt(sc, 8)] x)
+	=> (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
+(MOVWUreg (SRLconst [sc] x)) && isARM64BFMask(sc, 1<<32-1, 0) => (UBFX [armBFAuxInt(sc, 32)] x)
+(MOVHUreg (SRLconst [sc] x)) && isARM64BFMask(sc, 1<<16-1, 0) => (UBFX [armBFAuxInt(sc, 16)] x)
+(MOVBUreg (SRLconst [sc] x)) && isARM64BFMask(sc, 1<<8-1, 0) => (UBFX [armBFAuxInt(sc, 8)] x)
 // (x & ac) >> sc
 (SRLconst [sc] (ANDconst [ac] x)) && isARM64BFMask(sc, ac, sc)
-	-> (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
+	=> (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
 (SRLconst [sc] (MOVWUreg x)) && isARM64BFMask(sc, 1<<32-1, sc)
-	-> (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x)
+	=> (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x)
 (SRLconst [sc] (MOVHUreg x)) && isARM64BFMask(sc, 1<<16-1, sc)
-	-> (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x)
+	=> (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x)
 (SRLconst [sc] (MOVBUreg x)) && isARM64BFMask(sc, 1<<8-1, sc)
-	-> (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x)
+	=> (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x)
 // (x << lc) >> rc
-(SRLconst [rc] (SLLconst [lc] x)) && lc < rc -> (UBFX [armBFAuxInt(rc-lc, 64-rc)] x)
+(SRLconst [rc] (SLLconst [lc] x)) && lc < rc => (UBFX [armBFAuxInt(rc-lc, 64-rc)] x)
 
 // ubfiz/ubfx combinations: merge shifts into bitfield ops
-(SRLconst [sc] (UBFX [bfc] x)) && sc < getARM64BFwidth(bfc)
-	-> (UBFX [armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc)] x)
-(UBFX [bfc] (SRLconst [sc] x)) && sc+getARM64BFwidth(bfc)+getARM64BFlsb(bfc) < 64
-	-> (UBFX [armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc))] x)
-(SLLconst [sc] (UBFIZ [bfc] x)) && sc+getARM64BFwidth(bfc)+getARM64BFlsb(bfc) < 64
-	-> (UBFIZ [armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc))] x)
-(UBFIZ [bfc] (SLLconst [sc] x)) && sc < getARM64BFwidth(bfc)
-	-> (UBFIZ [armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc)] x)
+(SRLconst [sc] (UBFX [bfc] x)) && sc < bfc.getARM64BFwidth()
+	=> (UBFX [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc)] x)
+(UBFX [bfc] (SRLconst [sc] x)) && sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64
+	=> (UBFX [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth())] x)
+(SLLconst [sc] (UBFIZ [bfc] x)) && sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64
+	=> (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth())] x)
+(UBFIZ [bfc] (SLLconst [sc] x)) && sc < bfc.getARM64BFwidth()
+	=> (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc)] x)
 // ((x << c1) >> c2) >> c3
-(SRLconst [sc] (UBFIZ [bfc] x)) && sc == getARM64BFlsb(bfc)
-	-> (ANDconst [1<<uint(getARM64BFwidth(bfc))-1] x)
-(SRLconst [sc] (UBFIZ [bfc] x)) && sc < getARM64BFlsb(bfc)
-	-> (UBFIZ [armBFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))] x)
-(SRLconst [sc] (UBFIZ [bfc] x)) && sc > getARM64BFlsb(bfc)
-	&& sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc)
-	-> (UBFX [armBFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x)
+(SRLconst [sc] (UBFIZ [bfc] x)) && sc == bfc.getARM64BFlsb()
+	=> (ANDconst [1<<uint(bfc.getARM64BFwidth())-1] x)
+(SRLconst [sc] (UBFIZ [bfc] x)) && sc < bfc.getARM64BFlsb()
+	=> (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
+(SRLconst [sc] (UBFIZ [bfc] x)) && sc > bfc.getARM64BFlsb()
+	&& sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
+	=> (UBFX [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
 // ((x << c1) << c2) >> c3
-(UBFX [bfc] (SLLconst [sc] x)) && sc == getARM64BFlsb(bfc)
-	-> (ANDconst [1<<uint(getARM64BFwidth(bfc))-1] x)
-(UBFX [bfc] (SLLconst [sc] x)) && sc < getARM64BFlsb(bfc)
-	-> (UBFX [armBFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))] x)
-(UBFX [bfc] (SLLconst [sc] x)) && sc > getARM64BFlsb(bfc)
-	&& sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc)
-	-> (UBFIZ [armBFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x)
+(UBFX [bfc] (SLLconst [sc] x)) && sc == bfc.getARM64BFlsb()
+	=> (ANDconst [1<<uint(bfc.getARM64BFwidth())-1] x)
+(UBFX [bfc] (SLLconst [sc] x)) && sc < bfc.getARM64BFlsb()
+	=> (UBFX [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
+(UBFX [bfc] (SLLconst [sc] x)) && sc > bfc.getARM64BFlsb()
+	&& sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
+	=> (UBFIZ [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
 
 // bfi
 (OR (UBFIZ [bfc] x) (ANDconst [ac] y))
-	&& ac == ^((1<<uint(getARM64BFwidth(bfc))-1) << uint(getARM64BFlsb(bfc)))
-	-> (BFI [bfc] y x)
+	&& ac == ^((1<<uint(bfc.getARM64BFwidth())-1) << uint(bfc.getARM64BFlsb()))
+	=> (BFI [bfc] y x)
 (ORshiftRL [rc] (ANDconst [ac] x) (SLLconst [lc] y))
 	&& lc > rc && ac == ^((1<<uint(64-lc)-1) << uint64(lc-rc))
-	-> (BFI [armBFAuxInt(lc-rc, 64-lc)] x y)
+	=> (BFI [armBFAuxInt(lc-rc, 64-lc)] x y)
 // bfxil
-(OR (UBFX [bfc] x) (ANDconst [ac] y)) && ac == ^(1<<uint(getARM64BFwidth(bfc))-1)
-	-> (BFXIL [bfc] y x)
-(ORshiftLL [sc] (UBFX [bfc] x) (SRLconst [sc] y)) && sc == getARM64BFwidth(bfc)
-	-> (BFXIL [bfc] y x)
+(OR (UBFX [bfc] x) (ANDconst [ac] y)) && ac == ^(1<<uint(bfc.getARM64BFwidth())-1)
+	=> (BFXIL [bfc] y x)
+(ORshiftLL [sc] (UBFX [bfc] x) (SRLconst [sc] y)) && sc == bfc.getARM64BFwidth()
+	=> (BFXIL [bfc] y x)
 (ORshiftRL [rc] (ANDconst [ac] y) (SLLconst [lc] x)) && lc < rc && ac == ^((1<<uint(64-rc)-1))
-	-> (BFXIL [armBFAuxInt(rc-lc, 64-rc)] y x)
+	=> (BFXIL [armBFAuxInt(rc-lc, 64-rc)] y x)
 
 // do combined loads
 // little endian loads
-// b[0] | b[1]<<8 -> load 16-bit
+// b[0] | b[1]<<8 => load 16-bit
 (ORshiftLL <t> [8]
 	y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem))
 	y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem)))
@@ -1849,7 +1849,7 @@
 	&& y0.Uses == 1 && y1.Uses == 1
 	&& mergePoint(b,x0,x1) != nil
 	&& clobber(x0, x1, y0, y1)
-	-> @mergePoint(b,x0,x1) (MOVHUload <t> {s} (OffPtr <p.Type> [i0] p) mem)
+	=> @mergePoint(b,x0,x1) (MOVHUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
 (ORshiftLL <t> [8]
 	y0:(MOVDnop x0:(MOVBUloadidx ptr0 idx0 mem))
 	y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem)))
@@ -1859,7 +1859,7 @@
 	&& mergePoint(b,x0,x1) != nil
 	&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
 	&& clobber(x0, x1, y0, y1)
-	-> @mergePoint(b,x0,x1) (MOVHUloadidx <t> ptr0 idx0 mem)
+	=> @mergePoint(b,x0,x1) (MOVHUloadidx <t> ptr0 idx0 mem)
 (ORshiftLL <t> [8]
 	y0:(MOVDnop x0:(MOVBUloadidx ptr idx mem))
 	y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem)))
@@ -1867,9 +1867,9 @@
 	&& y0.Uses == 1 && y1.Uses == 1
 	&& mergePoint(b,x0,x1) != nil
 	&& clobber(x0, x1, y0, y1)
-	-> @mergePoint(b,x0,x1) (MOVHUloadidx <t> ptr idx mem)
+	=> @mergePoint(b,x0,x1) (MOVHUloadidx <t> ptr idx mem)
 
-// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 -> load 32-bit
+// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 => load 32-bit
 (ORshiftLL <t> [24] o0:(ORshiftLL [16]
 	            x0:(MOVHUload [i0] {s} p mem)
 	y1:(MOVDnop x1:(MOVBUload [i2] {s} p mem)))
@@ -1881,7 +1881,7 @@
 	&& o0.Uses == 1
 	&& mergePoint(b,x0,x1,x2) != nil
 	&& clobber(x0, x1, x2, y1, y2, o0)
-	-> @mergePoint(b,x0,x1,x2) (MOVWUload <t> {s} (OffPtr <p.Type> [i0] p) mem)
+	=> @mergePoint(b,x0,x1,x2) (MOVWUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
 (ORshiftLL <t> [24] o0:(ORshiftLL [16]
 	            x0:(MOVHUloadidx ptr0 idx0 mem)
 	y1:(MOVDnop x1:(MOVBUload [2] {s} p1:(ADD ptr1 idx1) mem)))
@@ -1894,7 +1894,7 @@
 	&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
 	&& isSamePtr(p1, p)
 	&& clobber(x0, x1, x2, y1, y2, o0)
-	-> @mergePoint(b,x0,x1,x2) (MOVWUloadidx <t> ptr0 idx0 mem)
+	=> @mergePoint(b,x0,x1,x2) (MOVWUloadidx <t> ptr0 idx0 mem)
 (ORshiftLL <t> [24] o0:(ORshiftLL [16]
 	            x0:(MOVHUloadidx ptr idx mem)
 	y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [2] idx) mem)))
@@ -1904,7 +1904,7 @@
 	&& o0.Uses == 1
 	&& mergePoint(b,x0,x1,x2) != nil
 	&& clobber(x0, x1, x2, y1, y2, o0)
-	-> @mergePoint(b,x0,x1,x2) (MOVWUloadidx <t> ptr idx mem)
+	=> @mergePoint(b,x0,x1,x2) (MOVWUloadidx <t> ptr idx mem)
 (ORshiftLL <t> [24] o0:(ORshiftLL [16]
 	            x0:(MOVHUloadidx2 ptr0 idx0 mem)
 	y1:(MOVDnop x1:(MOVBUload [2] {s} p1:(ADDshiftLL [1] ptr1 idx1) mem)))
@@ -1917,9 +1917,9 @@
 	&& isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
 	&& isSamePtr(p1, p)
 	&& clobber(x0, x1, x2, y1, y2, o0)
-	-> @mergePoint(b,x0,x1,x2) (MOVWUloadidx <t> ptr0 (SLLconst <idx0.Type> [1] idx0) mem)
+	=> @mergePoint(b,x0,x1,x2) (MOVWUloadidx <t> ptr0 (SLLconst <idx0.Type> [1] idx0) mem)
 
-// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 | b[4]<<32 | b[5]<<40 | b[6]<<48 | b[7]<<56 -> load 64-bit
+// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 | b[4]<<32 | b[5]<<40 | b[6]<<48 | b[7]<<56 => load 64-bit
 (ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32]
 	            x0:(MOVWUload [i0] {s} p mem)
 	y1:(MOVDnop x1:(MOVBUload [i4] {s} p mem)))
@@ -1935,7 +1935,7 @@
 	&& o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1
 	&& mergePoint(b,x0,x1,x2,x3,x4) != nil
 	&& clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)
-	-> @mergePoint(b,x0,x1,x2,x3,x4) (MOVDload <t> {s} (OffPtr <p.Type> [i0] p) mem)
+	=> @mergePoint(b,x0,x1,x2,x3,x4) (MOVDload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
 (ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32]
 	            x0:(MOVWUloadidx ptr0 idx0 mem)
 	y1:(MOVDnop x1:(MOVBUload [4] {s} p1:(ADD ptr1 idx1) mem)))
@@ -1950,7 +1950,7 @@
 	&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
 	&& isSamePtr(p1, p)
 	&& clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)
-	-> @mergePoint(b,x0,x1,x2,x3,x4) (MOVDloadidx <t> ptr0 idx0 mem)
+	=> @mergePoint(b,x0,x1,x2,x3,x4) (MOVDloadidx <t> ptr0 idx0 mem)
 (ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32]
 	            x0:(MOVWUloadidx4 ptr0 idx0 mem)
 	y1:(MOVDnop x1:(MOVBUload [4] {s} p1:(ADDshiftLL [2] ptr1 idx1) mem)))
@@ -1965,7 +1965,7 @@
 	&& isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
 	&& isSamePtr(p1, p)
 	&& clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)
-	-> @mergePoint(b,x0,x1,x2,x3,x4) (MOVDloadidx <t> ptr0 (SLLconst <idx0.Type> [2] idx0) mem)
+	=> @mergePoint(b,x0,x1,x2,x3,x4) (MOVDloadidx <t> ptr0 (SLLconst <idx0.Type> [2] idx0) mem)
 (ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32]
 	            x0:(MOVWUloadidx ptr idx mem)
 	y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [4] idx) mem)))
@@ -1977,9 +1977,9 @@
 	&& o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1
 	&& mergePoint(b,x0,x1,x2,x3,x4) != nil
 	&& clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)
-	-> @mergePoint(b,x0,x1,x2,x3,x4) (MOVDloadidx <t> ptr idx mem)
+	=> @mergePoint(b,x0,x1,x2,x3,x4) (MOVDloadidx <t> ptr idx mem)
 
-// b[3]<<24 | b[2]<<16 | b[1]<<8 | b[0] -> load 32-bit
+// b[3]<<24 | b[2]<<16 | b[1]<<8 | b[0] => load 32-bit
 (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24]
 	y0:(MOVDnop x0:(MOVBUload [i3] {s} p mem)))
 	y1:(MOVDnop x1:(MOVBUload [i2] {s} p mem)))
@@ -1993,7 +1993,7 @@
 	&& o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1
 	&& mergePoint(b,x0,x1,x2,x3) != nil
 	&& clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
-	-> @mergePoint(b,x0,x1,x2,x3) (MOVWUload <t> {s} (OffPtr <p.Type> [i0] p) mem)
+	=> @mergePoint(b,x0,x1,x2,x3) (MOVWUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
 (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24]
 	y0:(MOVDnop x0:(MOVBUload [3] {s} p mem)))
 	y1:(MOVDnop x1:(MOVBUload [2] {s} p mem)))
@@ -2007,7 +2007,7 @@
 	&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
 	&& isSamePtr(p1, p)
 	&& clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
-	-> @mergePoint(b,x0,x1,x2,x3) (MOVWUloadidx <t> ptr0 idx0 mem)
+	=> @mergePoint(b,x0,x1,x2,x3) (MOVWUloadidx <t> ptr0 idx0 mem)
 (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24]
 	y0:(MOVDnop x0:(MOVBUloadidx ptr (ADDconst [3] idx) mem)))
 	y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [2] idx) mem)))
@@ -2018,9 +2018,9 @@
 	&& o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1
 	&& mergePoint(b,x0,x1,x2,x3) != nil
 	&& clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
-	-> @mergePoint(b,x0,x1,x2,x3) (MOVWUloadidx <t> ptr idx mem)
+	=> @mergePoint(b,x0,x1,x2,x3) (MOVWUloadidx <t> ptr idx mem)
 
-// b[7]<<56 | b[6]<<48 | b[5]<<40 | b[4]<<32 | b[3]<<24 | b[2]<<16 | b[1]<<8 | b[0] -> load 64-bit
+// b[7]<<56 | b[6]<<48 | b[5]<<40 | b[4]<<32 | b[3]<<24 | b[2]<<16 | b[1]<<8 | b[0] => load 64-bit
 (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56]
 	y0:(MOVDnop x0:(MOVBUload [i7] {s} p mem)))
 	y1:(MOVDnop x1:(MOVBUload [i6] {s} p mem)))
@@ -2045,7 +2045,7 @@
 	&& o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1
 	&& mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
 	&& clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
-	-> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload <t> {s} (OffPtr <p.Type> [i0] p) mem)
+	=> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
 (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56]
 	y0:(MOVDnop x0:(MOVBUload [7] {s} p mem)))
 	y1:(MOVDnop x1:(MOVBUload [6] {s} p mem)))
@@ -2066,7 +2066,7 @@
 	&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
 	&& isSamePtr(p1, p)
 	&& clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
-	-> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDloadidx <t> ptr0 idx0 mem)
+	=> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDloadidx <t> ptr0 idx0 mem)
 (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56]
 	y0:(MOVDnop x0:(MOVBUloadidx ptr (ADDconst [7] idx) mem)))
 	y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [6] idx) mem)))
@@ -2084,10 +2084,10 @@
 	&& o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1
 	&& mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
 	&& clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
-	-> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDloadidx <t> ptr idx mem)
+	=> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDloadidx <t> ptr idx mem)
 
 // big endian loads
-// b[1] | b[0]<<8 -> load 16-bit, reverse
+// b[1] | b[0]<<8 => load 16-bit, reverse
 (ORshiftLL <t> [8]
 	y0:(MOVDnop x0:(MOVBUload [i1] {s} p mem))
 	y1:(MOVDnop x1:(MOVBUload [i0] {s} p mem)))
@@ -2096,7 +2096,7 @@
 	&& y0.Uses == 1 && y1.Uses == 1
 	&& mergePoint(b,x0,x1) != nil
 	&& clobber(x0, x1, y0, y1)
-	-> @mergePoint(b,x0,x1) (REV16W <t> (MOVHUload <t> [i0] {s} p mem))
+	=> @mergePoint(b,x0,x1) (REV16W <t> (MOVHUload <t> [i0] {s} p mem))
 (ORshiftLL <t> [8]
 	y0:(MOVDnop x0:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))
 	y1:(MOVDnop x1:(MOVBUloadidx ptr0 idx0 mem)))
@@ -2106,7 +2106,7 @@
 	&& mergePoint(b,x0,x1) != nil
 	&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
 	&& clobber(x0, x1, y0, y1)
-	-> @mergePoint(b,x0,x1) (REV16W <t> (MOVHUloadidx <t> ptr0 idx0 mem))
+	=> @mergePoint(b,x0,x1) (REV16W <t> (MOVHUloadidx <t> ptr0 idx0 mem))
 (ORshiftLL <t> [8]
 	y0:(MOVDnop x0:(MOVBUloadidx ptr (ADDconst [1] idx) mem))
 	y1:(MOVDnop x1:(MOVBUloadidx ptr idx mem)))
@@ -2114,9 +2114,9 @@
 	&& y0.Uses == 1 && y1.Uses == 1
 	&& mergePoint(b,x0,x1) != nil
 	&& clobber(x0, x1, y0, y1)
-	-> @mergePoint(b,x0,x1) (REV16W <t> (MOVHUloadidx <t> ptr idx mem))
+	=> @mergePoint(b,x0,x1) (REV16W <t> (MOVHUloadidx <t> ptr idx mem))
 
-// b[3] | b[2]<<8 | b[1]<<16 | b[0]<<24 -> load 32-bit, reverse
+// b[3] | b[2]<<8 | b[1]<<16 | b[0]<<24 => load 32-bit, reverse
 (ORshiftLL <t> [24] o0:(ORshiftLL [16]
 	y0:(REV16W  x0:(MOVHUload [i2] {s} p mem))
 	y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem)))
@@ -2128,7 +2128,7 @@
 	&& o0.Uses == 1
 	&& mergePoint(b,x0,x1,x2) != nil
 	&& clobber(x0, x1, x2, y0, y1, y2, o0)
-	-> @mergePoint(b,x0,x1,x2) (REVW <t> (MOVWUload <t> {s} (OffPtr <p.Type> [i0] p) mem))
+	=> @mergePoint(b,x0,x1,x2) (REVW <t> (MOVWUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem))
 (ORshiftLL <t> [24] o0:(ORshiftLL [16]
 	y0:(REV16W  x0:(MOVHUload [2] {s} p mem))
 	y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem)))
@@ -2141,7 +2141,7 @@
 	&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
 	&& isSamePtr(p1, p)
 	&& clobber(x0, x1, x2, y0, y1, y2, o0)
-	-> @mergePoint(b,x0,x1,x2) (REVW <t> (MOVWUloadidx <t> ptr0 idx0 mem))
+	=> @mergePoint(b,x0,x1,x2) (REVW <t> (MOVWUloadidx <t> ptr0 idx0 mem))
 (ORshiftLL <t> [24] o0:(ORshiftLL [16]
 	y0:(REV16W  x0:(MOVHUloadidx ptr (ADDconst [2] idx) mem))
 	y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem)))
@@ -2151,9 +2151,9 @@
 	&& o0.Uses == 1
 	&& mergePoint(b,x0,x1,x2) != nil
 	&& clobber(x0, x1, x2, y0, y1, y2, o0)
-	-> @mergePoint(b,x0,x1,x2) (REVW <t> (MOVWUloadidx <t> ptr idx mem))
+	=> @mergePoint(b,x0,x1,x2) (REVW <t> (MOVWUloadidx <t> ptr idx mem))
 
-// b[7] | b[6]<<8 | b[5]<<16 | b[4]<<24 | b[3]<<32 | b[2]<<40 | b[1]<<48 | b[0]<<56 -> load 64-bit, reverse
+// b[7] | b[6]<<8 | b[5]<<16 | b[4]<<24 | b[3]<<32 | b[2]<<40 | b[1]<<48 | b[0]<<56 => load 64-bit, reverse
 (ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32]
 	y0:(REVW    x0:(MOVWUload [i4] {s} p mem))
 	y1:(MOVDnop x1:(MOVBUload [i3] {s} p mem)))
@@ -2169,7 +2169,7 @@
 	&& o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1
 	&& mergePoint(b,x0,x1,x2,x3,x4) != nil
 	&& clobber(x0, x1, x2, x3, x4, y0, y1, y2, y3, y4, o0, o1, o2)
-	-> @mergePoint(b,x0,x1,x2,x3,x4) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [i0] p) mem))
+	=> @mergePoint(b,x0,x1,x2,x3,x4) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem))
 (ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32]
 	y0:(REVW    x0:(MOVWUload [4] {s} p mem))
 	y1:(MOVDnop x1:(MOVBUload [3] {s} p mem)))
@@ -2184,7 +2184,7 @@
 	&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
 	&& isSamePtr(p1, p)
 	&& clobber(x0, x1, x2, x3, x4, y0, y1, y2, y3, y4, o0, o1, o2)
-	-> @mergePoint(b,x0,x1,x2,x3,x4) (REV <t> (MOVDloadidx <t> ptr0 idx0 mem))
+	=> @mergePoint(b,x0,x1,x2,x3,x4) (REV <t> (MOVDloadidx <t> ptr0 idx0 mem))
 (ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32]
 	y0:(REVW    x0:(MOVWUloadidx ptr (ADDconst [4] idx) mem))
 	y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [3] idx) mem)))
@@ -2196,9 +2196,9 @@
 	&& o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1
 	&& mergePoint(b,x0,x1,x2,x3,x4) != nil
 	&& clobber(x0, x1, x2, x3, x4, y0, y1, y2, y3, y4, o0, o1, o2)
-	-> @mergePoint(b,x0,x1,x2,x3,x4) (REV <t> (MOVDloadidx <t> ptr idx mem))
+	=> @mergePoint(b,x0,x1,x2,x3,x4) (REV <t> (MOVDloadidx <t> ptr idx mem))
 
-// b[0]<<24 | b[1]<<16 | b[2]<<8 | b[3] -> load 32-bit, reverse
+// b[0]<<24 | b[1]<<16 | b[2]<<8 | b[3] => load 32-bit, reverse
 (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24]
 	y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem)))
 	y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem)))
@@ -2212,7 +2212,7 @@
 	&& o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1
 	&& mergePoint(b,x0,x1,x2,x3) != nil
 	&& clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
-	-> @mergePoint(b,x0,x1,x2,x3) (REVW <t> (MOVWUload <t> {s} (OffPtr <p.Type> [i0] p) mem))
+	=> @mergePoint(b,x0,x1,x2,x3) (REVW <t> (MOVWUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem))
 (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24]
 	y0:(MOVDnop x0:(MOVBUloadidx ptr0 idx0 mem)))
 	y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem)))
@@ -2226,7 +2226,7 @@
 	&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
 	&& isSamePtr(p1, p)
 	&& clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
-	-> @mergePoint(b,x0,x1,x2,x3) (REVW <t> (MOVWUloadidx <t> ptr0 idx0 mem))
+	=> @mergePoint(b,x0,x1,x2,x3) (REVW <t> (MOVWUloadidx <t> ptr0 idx0 mem))
 (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24]
 	y0:(MOVDnop x0:(MOVBUloadidx ptr idx mem)))
 	y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem)))
@@ -2237,9 +2237,9 @@
 	&& o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1
 	&& mergePoint(b,x0,x1,x2,x3) != nil
 	&& clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
-	-> @mergePoint(b,x0,x1,x2,x3) (REVW <t> (MOVWUloadidx <t> ptr idx mem))
+	=> @mergePoint(b,x0,x1,x2,x3) (REVW <t> (MOVWUloadidx <t> ptr idx mem))
 
-// b[0]<<56 | b[1]<<48 | b[2]<<40 | b[3]<<32 | b[4]<<24 | b[5]<<16 | b[6]<<8 | b[7] -> load 64-bit, reverse
+// b[0]<<56 | b[1]<<48 | b[2]<<40 | b[3]<<32 | b[4]<<24 | b[5]<<16 | b[6]<<8 | b[7] => load 64-bit, reverse
 (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56]
 	y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem)))
 	y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem)))
@@ -2264,7 +2264,7 @@
 	&& o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1
 	&& mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
 	&& clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
-	-> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [i0] p) mem))
+	=> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem))
 (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56]
 	y0:(MOVDnop x0:(MOVBUloadidx ptr0 idx0 mem)))
 	y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem)))
@@ -2285,7 +2285,7 @@
 	&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
 	&& isSamePtr(p1, p)
 	&& clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
-	-> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV <t> (MOVDloadidx <t> ptr0 idx0 mem))
+	=> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV <t> (MOVDloadidx <t> ptr0 idx0 mem))
 (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56]
 	y0:(MOVDnop x0:(MOVBUloadidx ptr idx mem)))
 	y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem)))
@@ -2303,290 +2303,286 @@
 	&& o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1
 	&& mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
 	&& clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
-	-> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV <t> (MOVDloadidx <t> ptr idx mem))
+	=> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV <t> (MOVDloadidx <t> ptr idx mem))
 
 // Combine zero stores into larger (unaligned) stores.
 (MOVBstorezero [i] {s} ptr0 x:(MOVBstorezero [j] {s} ptr1 mem))
 	&& x.Uses == 1
-	&& areAdjacentOffsets(i,j,1)
-	&& is32Bit(min(i,j))
+	&& areAdjacentOffsets(int64(i),int64(j),1)
 	&& isSamePtr(ptr0, ptr1)
 	&& clobber(x)
-	-> (MOVHstorezero [min(i,j)] {s} ptr0 mem)
+	=> (MOVHstorezero [int32(min(int64(i),int64(j)))] {s} ptr0 mem)
 (MOVBstorezero [1] {s} (ADD ptr0 idx0) x:(MOVBstorezeroidx ptr1 idx1 mem))
 	&& x.Uses == 1
 	&& s == nil
 	&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
 	&& clobber(x)
-	-> (MOVHstorezeroidx ptr1 idx1 mem)
+	=> (MOVHstorezeroidx ptr1 idx1 mem)
 (MOVBstorezeroidx ptr (ADDconst [1] idx) x:(MOVBstorezeroidx ptr idx mem))
 	&& x.Uses == 1
 	&& clobber(x)
-	-> (MOVHstorezeroidx ptr idx mem)
+	=> (MOVHstorezeroidx ptr idx mem)
 (MOVHstorezero [i] {s} ptr0 x:(MOVHstorezero [j] {s} ptr1 mem))
 	&& x.Uses == 1
-	&& areAdjacentOffsets(i,j,2)
-	&& is32Bit(min(i,j))
+	&& areAdjacentOffsets(int64(i),int64(j),2)
 	&& isSamePtr(ptr0, ptr1)
 	&& clobber(x)
-	-> (MOVWstorezero [min(i,j)] {s} ptr0 mem)
+	=> (MOVWstorezero [int32(min(int64(i),int64(j)))] {s} ptr0 mem)
 (MOVHstorezero [2] {s} (ADD ptr0 idx0) x:(MOVHstorezeroidx ptr1 idx1 mem))
 	&& x.Uses == 1
 	&& s == nil
 	&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
 	&& clobber(x)
-	-> (MOVWstorezeroidx ptr1 idx1 mem)
+	=> (MOVWstorezeroidx ptr1 idx1 mem)
 (MOVHstorezeroidx ptr (ADDconst [2] idx) x:(MOVHstorezeroidx ptr idx mem))
 	&& x.Uses == 1
 	&& clobber(x)
-	-> (MOVWstorezeroidx ptr idx mem)
+	=> (MOVWstorezeroidx ptr idx mem)
 (MOVHstorezero [2] {s} (ADDshiftLL [1] ptr0 idx0) x:(MOVHstorezeroidx2 ptr1 idx1 mem))
 	&& x.Uses == 1
 	&& s == nil
 	&& isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
 	&& clobber(x)
-	-> (MOVWstorezeroidx ptr1 (SLLconst <idx1.Type> [1] idx1) mem)
+	=> (MOVWstorezeroidx ptr1 (SLLconst <idx1.Type> [1] idx1) mem)
 (MOVWstorezero [i] {s} ptr0 x:(MOVWstorezero [j] {s} ptr1 mem))
 	&& x.Uses == 1
-	&& areAdjacentOffsets(i,j,4)
-	&& is32Bit(min(i,j))
+	&& areAdjacentOffsets(int64(i),int64(j),4)
 	&& isSamePtr(ptr0, ptr1)
 	&& clobber(x)
-	-> (MOVDstorezero [min(i,j)] {s} ptr0 mem)
+	=> (MOVDstorezero [int32(min(int64(i),int64(j)))] {s} ptr0 mem)
 (MOVWstorezero [4] {s} (ADD ptr0 idx0) x:(MOVWstorezeroidx ptr1 idx1 mem))
 	&& x.Uses == 1
 	&& s == nil
 	&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
 	&& clobber(x)
-	-> (MOVDstorezeroidx ptr1 idx1 mem)
+	=> (MOVDstorezeroidx ptr1 idx1 mem)
 (MOVWstorezeroidx ptr (ADDconst [4] idx) x:(MOVWstorezeroidx ptr idx mem))
 	&& x.Uses == 1
 	&& clobber(x)
-	-> (MOVDstorezeroidx ptr idx mem)
+	=> (MOVDstorezeroidx ptr idx mem)
 (MOVWstorezero [4] {s} (ADDshiftLL [2] ptr0 idx0) x:(MOVWstorezeroidx4 ptr1 idx1 mem))
 	&& x.Uses == 1
 	&& s == nil
 	&& isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
 	&& clobber(x)
-	-> (MOVDstorezeroidx ptr1 (SLLconst <idx1.Type> [2] idx1) mem)
+	=> (MOVDstorezeroidx ptr1 (SLLconst <idx1.Type> [2] idx1) mem)
 (MOVDstorezero [i] {s} ptr0 x:(MOVDstorezero [j] {s} ptr1 mem))
 	&& x.Uses == 1
-	&& areAdjacentOffsets(i,j,8)
-	&& is32Bit(min(i,j))
+	&& areAdjacentOffsets(int64(i),int64(j),8)
 	&& isSamePtr(ptr0, ptr1)
 	&& clobber(x)
-	-> (MOVQstorezero [min(i,j)] {s} ptr0 mem)
+	=> (MOVQstorezero [int32(min(int64(i),int64(j)))] {s} ptr0 mem)
 (MOVDstorezero [8] {s} p0:(ADD ptr0 idx0) x:(MOVDstorezeroidx ptr1 idx1 mem))
 	&& x.Uses == 1
 	&& s == nil
 	&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
 	&& clobber(x)
-	-> (MOVQstorezero [0] {s} p0 mem)
+	=> (MOVQstorezero [0] {s} p0 mem)
 (MOVDstorezero [8] {s} p0:(ADDshiftLL [3] ptr0 idx0) x:(MOVDstorezeroidx8 ptr1 idx1 mem))
 	&& x.Uses == 1
 	&& s == nil
 	&& isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
 	&& clobber(x)
-	-> (MOVQstorezero [0] {s} p0 mem)
+	=> (MOVQstorezero [0] {s} p0 mem)
 
 // Combine stores into larger (unaligned) stores.
 (MOVBstore [i] {s} ptr0 (SRLconst [8] w) x:(MOVBstore [i-1] {s} ptr1 w mem))
 	&& x.Uses == 1
 	&& isSamePtr(ptr0, ptr1)
 	&& clobber(x)
-	-> (MOVHstore [i-1] {s} ptr0 w mem)
+	=> (MOVHstore [i-1] {s} ptr0 w mem)
 (MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [8] w) x:(MOVBstoreidx ptr1 idx1 w mem))
 	&& x.Uses == 1
 	&& s == nil
 	&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
 	&& clobber(x)
-	-> (MOVHstoreidx ptr1 idx1 w mem)
+	=> (MOVHstoreidx ptr1 idx1 w mem)
 (MOVBstoreidx ptr (ADDconst [1] idx) (SRLconst [8] w) x:(MOVBstoreidx ptr idx w mem))
 	&& x.Uses == 1
 	&& clobber(x)
-	-> (MOVHstoreidx ptr idx w mem)
+	=> (MOVHstoreidx ptr idx w mem)
 (MOVBstore [i] {s} ptr0 (UBFX [armBFAuxInt(8, 8)] w) x:(MOVBstore [i-1] {s} ptr1 w mem))
 	&& x.Uses == 1
 	&& isSamePtr(ptr0, ptr1)
 	&& clobber(x)
-	-> (MOVHstore [i-1] {s} ptr0 w mem)
+	=> (MOVHstore [i-1] {s} ptr0 w mem)
 (MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [armBFAuxInt(8, 8)] w) x:(MOVBstoreidx ptr1 idx1 w mem))
 	&& x.Uses == 1
 	&& s == nil
 	&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
 	&& clobber(x)
-	-> (MOVHstoreidx ptr1 idx1 w mem)
+	=> (MOVHstoreidx ptr1 idx1 w mem)
 (MOVBstore [i] {s} ptr0 (UBFX [armBFAuxInt(8, 24)] w) x:(MOVBstore [i-1] {s} ptr1 w mem))
 	&& x.Uses == 1
 	&& isSamePtr(ptr0, ptr1)
 	&& clobber(x)
-	-> (MOVHstore [i-1] {s} ptr0 w mem)
+	=> (MOVHstore [i-1] {s} ptr0 w mem)
 (MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [armBFAuxInt(8, 24)] w) x:(MOVBstoreidx ptr1 idx1 w mem))
 	&& x.Uses == 1
 	&& s == nil
 	&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
 	&& clobber(x)
-	-> (MOVHstoreidx ptr1 idx1 w mem)
+	=> (MOVHstoreidx ptr1 idx1 w mem)
 (MOVBstore [i] {s} ptr0 (SRLconst [8] (MOVDreg w)) x:(MOVBstore [i-1] {s} ptr1 w mem))
 	&& x.Uses == 1
 	&& isSamePtr(ptr0, ptr1)
 	&& clobber(x)
-	-> (MOVHstore [i-1] {s} ptr0 w mem)
+	=> (MOVHstore [i-1] {s} ptr0 w mem)
 (MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [8] (MOVDreg w)) x:(MOVBstoreidx ptr1 idx1 w mem))
 	&& x.Uses == 1
 	&& s == nil
 	&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
 	&& clobber(x)
-	-> (MOVHstoreidx ptr1 idx1 w mem)
+	=> (MOVHstoreidx ptr1 idx1 w mem)
 (MOVBstore [i] {s} ptr0 (SRLconst [j] w) x:(MOVBstore [i-1] {s} ptr1 w0:(SRLconst [j-8] w) mem))
 	&& x.Uses == 1
 	&& isSamePtr(ptr0, ptr1)
 	&& clobber(x)
-	-> (MOVHstore [i-1] {s} ptr0 w0 mem)
+	=> (MOVHstore [i-1] {s} ptr0 w0 mem)
 (MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [j] w) x:(MOVBstoreidx ptr1 idx1 w0:(SRLconst [j-8] w) mem))
 	&& x.Uses == 1
 	&& s == nil
 	&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
 	&& clobber(x)
-	-> (MOVHstoreidx ptr1 idx1 w0 mem)
+	=> (MOVHstoreidx ptr1 idx1 w0 mem)
 (MOVBstore [i] {s} ptr0 (UBFX [bfc] w) x:(MOVBstore [i-1] {s} ptr1 w0:(UBFX [bfc2] w) mem))
 	&& x.Uses == 1
 	&& isSamePtr(ptr0, ptr1)
-	&& getARM64BFwidth(bfc) == 32 - getARM64BFlsb(bfc)
-	&& getARM64BFwidth(bfc2) == 32 - getARM64BFlsb(bfc2)
-	&& getARM64BFlsb(bfc2) == getARM64BFlsb(bfc) - 8
+	&& bfc.getARM64BFwidth() == 32 - bfc.getARM64BFlsb()
+	&& bfc2.getARM64BFwidth() == 32 - bfc2.getARM64BFlsb()
+	&& bfc2.getARM64BFlsb() == bfc.getARM64BFlsb() - 8
 	&& clobber(x)
-	-> (MOVHstore [i-1] {s} ptr0 w0 mem)
+	=> (MOVHstore [i-1] {s} ptr0 w0 mem)
 (MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [bfc] w) x:(MOVBstoreidx ptr1 idx1 w0:(UBFX [bfc2] w) mem))
 	&& x.Uses == 1
 	&& s == nil
 	&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
-	&& getARM64BFwidth(bfc) == 32 - getARM64BFlsb(bfc)
-	&& getARM64BFwidth(bfc2) == 32 - getARM64BFlsb(bfc2)
-	&& getARM64BFlsb(bfc2) == getARM64BFlsb(bfc) - 8
+	&& bfc.getARM64BFwidth() == 32 - bfc.getARM64BFlsb()
+	&& bfc2.getARM64BFwidth() == 32 - bfc2.getARM64BFlsb()
+	&& bfc2.getARM64BFlsb() == bfc.getARM64BFlsb() - 8
 	&& clobber(x)
-	-> (MOVHstoreidx ptr1 idx1 w0 mem)
+	=> (MOVHstoreidx ptr1 idx1 w0 mem)
 (MOVBstore [i] {s} ptr0 (SRLconst [j] (MOVDreg w)) x:(MOVBstore [i-1] {s} ptr1 w0:(SRLconst [j-8] (MOVDreg w)) mem))
 	&& x.Uses == 1
 	&& isSamePtr(ptr0, ptr1)
 	&& clobber(x)
-	-> (MOVHstore [i-1] {s} ptr0 w0 mem)
+	=> (MOVHstore [i-1] {s} ptr0 w0 mem)
 (MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [j] (MOVDreg w)) x:(MOVBstoreidx ptr1 idx1 w0:(SRLconst [j-8] (MOVDreg w)) mem))
 	&& x.Uses == 1
 	&& s == nil
 	&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
 	&& clobber(x)
-	-> (MOVHstoreidx ptr1 idx1 w0 mem)
+	=> (MOVHstoreidx ptr1 idx1 w0 mem)
 (MOVHstore [i] {s} ptr0 (SRLconst [16] w) x:(MOVHstore [i-2] {s} ptr1 w mem))
 	&& x.Uses == 1
 	&& isSamePtr(ptr0, ptr1)
 	&& clobber(x)
-	-> (MOVWstore [i-2] {s} ptr0 w mem)
+	=> (MOVWstore [i-2] {s} ptr0 w mem)
 (MOVHstore [2] {s} (ADD ptr0 idx0) (SRLconst [16] w) x:(MOVHstoreidx ptr1 idx1 w mem))
 	&& x.Uses == 1
 	&& s == nil
 	&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
 	&& clobber(x)
-	-> (MOVWstoreidx ptr1 idx1 w mem)
+	=> (MOVWstoreidx ptr1 idx1 w mem)
 (MOVHstoreidx ptr (ADDconst [2] idx) (SRLconst [16] w) x:(MOVHstoreidx ptr idx w mem))
 	&& x.Uses == 1
 	&& clobber(x)
-	-> (MOVWstoreidx ptr idx w mem)
+	=> (MOVWstoreidx ptr idx w mem)
 (MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (SRLconst [16] w) x:(MOVHstoreidx2 ptr1 idx1 w mem))
 	&& x.Uses == 1
 	&& s == nil
 	&& isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
 	&& clobber(x)
-	-> (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w mem)
+	=> (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w mem)
 (MOVHstore [i] {s} ptr0 (UBFX [armBFAuxInt(16, 16)] w) x:(MOVHstore [i-2] {s} ptr1 w mem))
 	&& x.Uses == 1
 	&& isSamePtr(ptr0, ptr1)
 	&& clobber(x)
-	-> (MOVWstore [i-2] {s} ptr0 w mem)
+	=> (MOVWstore [i-2] {s} ptr0 w mem)
 (MOVHstore [2] {s} (ADD ptr0 idx0) (UBFX [armBFAuxInt(16, 16)] w) x:(MOVHstoreidx ptr1 idx1 w mem))
 	&& x.Uses == 1
 	&& s == nil
 	&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
 	&& clobber(x)
-	-> (MOVWstoreidx ptr1 idx1 w mem)
+	=> (MOVWstoreidx ptr1 idx1 w mem)
 (MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (UBFX [armBFAuxInt(16, 16)] w) x:(MOVHstoreidx2 ptr1 idx1 w mem))
 	&& x.Uses == 1
 	&& s == nil
 	&& isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
 	&& clobber(x)
-	-> (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w mem)
+	=> (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w mem)
 (MOVHstore [i] {s} ptr0 (SRLconst [16] (MOVDreg w)) x:(MOVHstore [i-2] {s} ptr1 w mem))
 	&& x.Uses == 1
 	&& isSamePtr(ptr0, ptr1)
 	&& clobber(x)
-	-> (MOVWstore [i-2] {s} ptr0 w mem)
+	=> (MOVWstore [i-2] {s} ptr0 w mem)
 (MOVHstore [2] {s} (ADD ptr0 idx0) (SRLconst [16] (MOVDreg w)) x:(MOVHstoreidx ptr1 idx1 w mem))
 	&& x.Uses == 1
 	&& s == nil
 	&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
 	&& clobber(x)
-	-> (MOVWstoreidx ptr1 idx1 w mem)
+	=> (MOVWstoreidx ptr1 idx1 w mem)
 (MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (SRLconst [16] (MOVDreg w)) x:(MOVHstoreidx2 ptr1 idx1 w mem))
 	&& x.Uses == 1
 	&& s == nil
 	&& isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
 	&& clobber(x)
-	-> (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w mem)
+	=> (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w mem)
 (MOVHstore [i] {s} ptr0 (SRLconst [j] w) x:(MOVHstore [i-2] {s} ptr1 w0:(SRLconst [j-16] w) mem))
 	&& x.Uses == 1
 	&& isSamePtr(ptr0, ptr1)
 	&& clobber(x)
-	-> (MOVWstore [i-2] {s} ptr0 w0 mem)
+	=> (MOVWstore [i-2] {s} ptr0 w0 mem)
 (MOVHstore [2] {s} (ADD ptr0 idx0) (SRLconst [j] w) x:(MOVHstoreidx ptr1 idx1 w0:(SRLconst [j-16] w) mem))
 	&& x.Uses == 1
 	&& s == nil
 	&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
 	&& clobber(x)
-	-> (MOVWstoreidx ptr1 idx1 w0 mem)
+	=> (MOVWstoreidx ptr1 idx1 w0 mem)
 (MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (SRLconst [j] w) x:(MOVHstoreidx2 ptr1 idx1 w0:(SRLconst [j-16] w) mem))
 	&& x.Uses == 1
 	&& s == nil
 	&& isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
 	&& clobber(x)
-	-> (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w0 mem)
+	=> (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w0 mem)
 (MOVWstore [i] {s} ptr0 (SRLconst [32] w) x:(MOVWstore [i-4] {s} ptr1 w mem))
 	&& x.Uses == 1
 	&& isSamePtr(ptr0, ptr1)
 	&& clobber(x)
-	-> (MOVDstore [i-4] {s} ptr0 w mem)
+	=> (MOVDstore [i-4] {s} ptr0 w mem)
 (MOVWstore [4] {s} (ADD ptr0 idx0) (SRLconst [32] w) x:(MOVWstoreidx ptr1 idx1 w mem))
 	&& x.Uses == 1
 	&& s == nil
 	&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
 	&& clobber(x)
-	-> (MOVDstoreidx ptr1 idx1 w mem)
+	=> (MOVDstoreidx ptr1 idx1 w mem)
 (MOVWstoreidx ptr (ADDconst [4] idx) (SRLconst [32] w) x:(MOVWstoreidx ptr idx w mem))
 	&& x.Uses == 1
 	&& clobber(x)
-	-> (MOVDstoreidx ptr idx w mem)
+	=> (MOVDstoreidx ptr idx w mem)
 (MOVWstore [4] {s} (ADDshiftLL [2] ptr0 idx0) (SRLconst [32] w) x:(MOVWstoreidx4 ptr1 idx1 w mem))
 	&& x.Uses == 1
 	&& s == nil
 	&& isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
 	&& clobber(x)
-	-> (MOVDstoreidx ptr1 (SLLconst <idx1.Type> [2] idx1) w mem)
+	=> (MOVDstoreidx ptr1 (SLLconst <idx1.Type> [2] idx1) w mem)
 (MOVWstore [i] {s} ptr0 (SRLconst [j] w) x:(MOVWstore [i-4] {s} ptr1 w0:(SRLconst [j-32] w) mem))
 	&& x.Uses == 1
 	&& isSamePtr(ptr0, ptr1)
 	&& clobber(x)
-	-> (MOVDstore [i-4] {s} ptr0 w0 mem)
+	=> (MOVDstore [i-4] {s} ptr0 w0 mem)
 (MOVWstore [4] {s} (ADD ptr0 idx0) (SRLconst [j] w) x:(MOVWstoreidx ptr1 idx1 w0:(SRLconst [j-32] w) mem))
 	&& x.Uses == 1
 	&& s == nil
 	&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
 	&& clobber(x)
-	-> (MOVDstoreidx ptr1 idx1 w0 mem)
+	=> (MOVDstoreidx ptr1 idx1 w0 mem)
 (MOVWstore [4] {s} (ADDshiftLL [2] ptr0 idx0) (SRLconst [j] w) x:(MOVWstoreidx4 ptr1 idx1 w0:(SRLconst [j-32] w) mem))
 	&& x.Uses == 1
 	&& s == nil
 	&& isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
 	&& clobber(x)
-	-> (MOVDstoreidx ptr1 (SLLconst <idx1.Type> [2] idx1) w0 mem)
+	=> (MOVDstoreidx ptr1 (SLLconst <idx1.Type> [2] idx1) w0 mem)
 (MOVBstore [i] {s} ptr w
 	x0:(MOVBstore [i-1] {s} ptr (SRLconst [8] w)
 	x1:(MOVBstore [i-2] {s} ptr (SRLconst [16] w)
@@ -2603,7 +2599,7 @@
 	&& x5.Uses == 1
 	&& x6.Uses == 1
 	&& clobber(x0, x1, x2, x3, x4, x5, x6)
-	-> (MOVDstore [i-7] {s} ptr (REV <w.Type> w) mem)
+	=> (MOVDstore [i-7] {s} ptr (REV <w.Type> w) mem)
 (MOVBstore [7] {s} p w
 	x0:(MOVBstore [6] {s} p (SRLconst [8] w)
 	x1:(MOVBstore [5] {s} p (SRLconst [16] w)
@@ -2623,7 +2619,7 @@
 	&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
 	&& isSamePtr(p1, p)
 	&& clobber(x0, x1, x2, x3, x4, x5, x6)
-	-> (MOVDstoreidx ptr0 idx0 (REV <w.Type> w) mem)
+	=> (MOVDstoreidx ptr0 idx0 (REV <w.Type> w) mem)
 (MOVBstore [i] {s} ptr w
 	x0:(MOVBstore [i-1] {s} ptr (UBFX [armBFAuxInt(8, 24)] w)
 	x1:(MOVBstore [i-2] {s} ptr (UBFX [armBFAuxInt(16, 16)] w)
@@ -2632,7 +2628,7 @@
 	&& x1.Uses == 1
 	&& x2.Uses == 1
 	&& clobber(x0, x1, x2)
-	-> (MOVWstore [i-3] {s} ptr (REVW <w.Type> w) mem)
+	=> (MOVWstore [i-3] {s} ptr (REVW <w.Type> w) mem)
 (MOVBstore [3] {s} p w
 	x0:(MOVBstore [2] {s} p (UBFX [armBFAuxInt(8, 24)] w)
 	x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (UBFX [armBFAuxInt(16, 16)] w)
@@ -2644,7 +2640,7 @@
 	&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
 	&& isSamePtr(p1, p)
 	&& clobber(x0, x1, x2)
-	-> (MOVWstoreidx ptr0 idx0 (REVW <w.Type> w) mem)
+	=> (MOVWstoreidx ptr0 idx0 (REVW <w.Type> w) mem)
 (MOVBstoreidx ptr (ADDconst [3] idx) w
 	x0:(MOVBstoreidx ptr (ADDconst [2] idx) (UBFX [armBFAuxInt(8, 24)] w)
 	x1:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [armBFAuxInt(16, 16)] w)
@@ -2653,7 +2649,7 @@
 	&& x1.Uses == 1
 	&& x2.Uses == 1
 	&& clobber(x0, x1, x2)
-	-> (MOVWstoreidx ptr idx (REVW <w.Type> w) mem)
+	=> (MOVWstoreidx ptr idx (REVW <w.Type> w) mem)
 (MOVBstoreidx ptr idx w
 	x0:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [armBFAuxInt(8, 24)] w)
 	x1:(MOVBstoreidx ptr (ADDconst [2] idx) (UBFX [armBFAuxInt(16, 16)] w)
@@ -2662,7 +2658,7 @@
 	&& x1.Uses == 1
 	&& x2.Uses == 1
 	&& clobber(x0, x1, x2)
-	-> (MOVWstoreidx ptr idx w mem)
+	=> (MOVWstoreidx ptr idx w mem)
 (MOVBstore [i] {s} ptr w
 	x0:(MOVBstore [i-1] {s} ptr (SRLconst [8] (MOVDreg w))
 	x1:(MOVBstore [i-2] {s} ptr (SRLconst [16] (MOVDreg w))
@@ -2671,7 +2667,7 @@
 	&& x1.Uses == 1
 	&& x2.Uses == 1
 	&& clobber(x0, x1, x2)
-	-> (MOVWstore [i-3] {s} ptr (REVW <w.Type> w) mem)
+	=> (MOVWstore [i-3] {s} ptr (REVW <w.Type> w) mem)
 (MOVBstore [3] {s} p w
 	x0:(MOVBstore [2] {s} p (SRLconst [8] (MOVDreg w))
 	x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (SRLconst [16] (MOVDreg w))
@@ -2683,7 +2679,7 @@
 	&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
 	&& isSamePtr(p1, p)
 	&& clobber(x0, x1, x2)
-	-> (MOVWstoreidx ptr0 idx0 (REVW <w.Type> w) mem)
+	=> (MOVWstoreidx ptr0 idx0 (REVW <w.Type> w) mem)
 (MOVBstore [i] {s} ptr w
 	x0:(MOVBstore [i-1] {s} ptr (SRLconst [8] w)
 	x1:(MOVBstore [i-2] {s} ptr (SRLconst [16] w)
@@ -2692,7 +2688,7 @@
 	&& x1.Uses == 1
 	&& x2.Uses == 1
 	&& clobber(x0, x1, x2)
-	-> (MOVWstore [i-3] {s} ptr (REVW <w.Type> w) mem)
+	=> (MOVWstore [i-3] {s} ptr (REVW <w.Type> w) mem)
 (MOVBstore [3] {s} p w
 	x0:(MOVBstore [2] {s} p (SRLconst [8] w)
 	x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (SRLconst [16] w)
@@ -2704,79 +2700,79 @@
 	&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
 	&& isSamePtr(p1, p)
 	&& clobber(x0, x1, x2)
-	-> (MOVWstoreidx ptr0 idx0 (REVW <w.Type> w) mem)
+	=> (MOVWstoreidx ptr0 idx0 (REVW <w.Type> w) mem)
 (MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (SRLconst [8] w) mem))
 	&& x.Uses == 1
 	&& clobber(x)
-	-> (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
+	=> (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
 (MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (SRLconst [8] w) mem))
 	&& x.Uses == 1
 	&& s == nil
 	&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
 	&& clobber(x)
-	-> (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
+	=> (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
 (MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (UBFX [armBFAuxInt(8, 8)] w) mem))
 	&& x.Uses == 1
 	&& clobber(x)
-	-> (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
+	=> (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
 (MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(8, 8)] w) mem))
 	&& x.Uses == 1
 	&& s == nil
 	&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
 	&& clobber(x)
-	-> (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
+	=> (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
 (MOVBstoreidx ptr (ADDconst [1] idx) w x:(MOVBstoreidx ptr idx (UBFX [armBFAuxInt(8, 8)] w) mem))
 	&& x.Uses == 1
 	&& clobber(x)
-	-> (MOVHstoreidx ptr idx (REV16W <w.Type> w) mem)
+	=> (MOVHstoreidx ptr idx (REV16W <w.Type> w) mem)
 (MOVBstoreidx ptr idx w x:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [armBFAuxInt(8, 8)] w) mem))
 	&& x.Uses == 1
 	&& clobber(x)
-	-> (MOVHstoreidx ptr idx w mem)
+	=> (MOVHstoreidx ptr idx w mem)
 (MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (SRLconst [8] (MOVDreg w)) mem))
 	&& x.Uses == 1
 	&& clobber(x)
-	-> (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
+	=> (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
 (MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (SRLconst [8] (MOVDreg w)) mem))
 	&& x.Uses == 1
 	&& s == nil
 	&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
 	&& clobber(x)
-	-> (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
+	=> (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
 (MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (UBFX [armBFAuxInt(8, 24)] w) mem))
 	&& x.Uses == 1
 	&& clobber(x)
-	-> (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
+	=> (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
 (MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(8, 24)] w) mem))
 	&& x.Uses == 1
 	&& s == nil
 	&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
 	&& clobber(x)
-	-> (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
+	=> (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
 
 // FP simplification
-(FNEGS (FMULS x y)) -> (FNMULS x y)
-(FNEGD (FMULD x y)) -> (FNMULD x y)
-(FMULS (FNEGS x) y) -> (FNMULS x y)
-(FMULD (FNEGD x) y) -> (FNMULD x y)
-(FNEGS (FNMULS x y)) -> (FMULS x y)
-(FNEGD (FNMULD x y)) -> (FMULD x y)
-(FNMULS (FNEGS x) y) -> (FMULS x y)
-(FNMULD (FNEGD x) y) -> (FMULD x y)
-(FADDS a (FMULS x y)) -> (FMADDS a x y)
-(FADDD a (FMULD x y)) -> (FMADDD a x y)
-(FSUBS a (FMULS x y)) -> (FMSUBS a x y)
-(FSUBD a (FMULD x y)) -> (FMSUBD a x y)
-(FSUBS (FMULS x y) a) -> (FNMSUBS a x y)
-(FSUBD (FMULD x y) a) -> (FNMSUBD a x y)
-(FADDS a (FNMULS x y)) -> (FMSUBS a x y)
-(FADDD a (FNMULD x y)) -> (FMSUBD a x y)
-(FSUBS a (FNMULS x y)) -> (FMADDS a x y)
-(FSUBD a (FNMULD x y)) -> (FMADDD a x y)
-(FSUBS (FNMULS x y) a) -> (FNMADDS a x y)
-(FSUBD (FNMULD x y) a) -> (FNMADDD a x y)
+(FNEGS (FMULS x y)) => (FNMULS x y)
+(FNEGD (FMULD x y)) => (FNMULD x y)
+(FMULS (FNEGS x) y) => (FNMULS x y)
+(FMULD (FNEGD x) y) => (FNMULD x y)
+(FNEGS (FNMULS x y)) => (FMULS x y)
+(FNEGD (FNMULD x y)) => (FMULD x y)
+(FNMULS (FNEGS x) y) => (FMULS x y)
+(FNMULD (FNEGD x) y) => (FMULD x y)
+(FADDS a (FMULS x y)) => (FMADDS a x y)
+(FADDD a (FMULD x y)) => (FMADDD a x y)
+(FSUBS a (FMULS x y)) => (FMSUBS a x y)
+(FSUBD a (FMULD x y)) => (FMSUBD a x y)
+(FSUBS (FMULS x y) a) => (FNMSUBS a x y)
+(FSUBD (FMULD x y) a) => (FNMSUBD a x y)
+(FADDS a (FNMULS x y)) => (FMSUBS a x y)
+(FADDD a (FNMULD x y)) => (FMSUBD a x y)
+(FSUBS a (FNMULS x y)) => (FMADDS a x y)
+(FSUBD a (FNMULD x y)) => (FMADDD a x y)
+(FSUBS (FNMULS x y) a) => (FNMADDS a x y)
+(FSUBD (FNMULD x y) a) => (FNMADDD a x y)
 
-(MOVBUload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVDconst [int64(read8(sym, off))])
-(MOVHUload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVDconst [int64(read16(sym, off, config.ctxt.Arch.ByteOrder))])
-(MOVWUload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVDconst [int64(read32(sym, off, config.ctxt.Arch.ByteOrder))])
-(MOVDload  [off] {sym} (SB) _) && symIsRO(sym) -> (MOVDconst [int64(read64(sym, off, config.ctxt.Arch.ByteOrder))])
+(MOVBUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read8(sym, int64(off)))])
+(MOVHUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+(MOVWUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+(MOVDload  [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
diff --git a/src/cmd/compile/internal/ssa/gen/rulegen.go b/src/cmd/compile/internal/ssa/gen/rulegen.go
index 1104e69..e520503 100644
--- a/src/cmd/compile/internal/ssa/gen/rulegen.go
+++ b/src/cmd/compile/internal/ssa/gen/rulegen.go
@@ -1820,6 +1820,8 @@
 		return "Op"
 	case "FlagConstant":
 		return "flagConstant"
+	case "ARM64BitField":
+		return "arm64BitField"
 	default:
 		return "invalid"
 	}
diff --git a/src/cmd/compile/internal/ssa/op.go b/src/cmd/compile/internal/ssa/op.go
index 6ebdf8c..063998c 100644
--- a/src/cmd/compile/internal/ssa/op.go
+++ b/src/cmd/compile/internal/ssa/op.go
@@ -277,3 +277,9 @@
 		panic("bad BoundsKind")
 	}
 }
+
+// arm64BitFileld is the GO type of ARM64BitField auxInt.
+// if x is an ARM64BitField, then width=x&0xff, lsb=(x>>8)&0xff, and
+// width+lsb<64 for 64-bit variant, width+lsb<32 for 32-bit variant.
+// the meaning of width and lsb are instruction-dependent.
+type arm64BitField int16
diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go
index 0b35abf..fb35691 100644
--- a/src/cmd/compile/internal/ssa/rewrite.go
+++ b/src/cmd/compile/internal/ssa/rewrite.go
@@ -618,6 +618,9 @@
 func auxIntToValAndOff(i int64) ValAndOff {
 	return ValAndOff(i)
 }
+func auxIntToArm64BitField(i int64) arm64BitField {
+	return arm64BitField(i)
+}
 func auxIntToInt128(x int64) int128 {
 	if x != 0 {
 		panic("nonzero int128 not allowed")
@@ -658,6 +661,9 @@
 func valAndOffToAuxInt(v ValAndOff) int64 {
 	return int64(v)
 }
+func arm64BitFieldToAuxInt(v arm64BitField) int64 {
+	return int64(v)
+}
 func int128ToAuxInt(x int128) int64 {
 	if x != 0 {
 		panic("nonzero int128 not allowed")
@@ -1295,24 +1301,24 @@
 }
 
 // encodes the lsb and width for arm(64) bitfield ops into the expected auxInt format.
-func armBFAuxInt(lsb, width int64) int64 {
+func armBFAuxInt(lsb, width int64) arm64BitField {
 	if lsb < 0 || lsb > 63 {
 		panic("ARM(64) bit field lsb constant out of range")
 	}
 	if width < 1 || width > 64 {
 		panic("ARM(64) bit field width constant out of range")
 	}
-	return width | lsb<<8
+	return arm64BitField(width | lsb<<8)
 }
 
 // returns the lsb part of the auxInt field of arm64 bitfield ops.
-func getARM64BFlsb(bfc int64) int64 {
+func (bfc arm64BitField) getARM64BFlsb() int64 {
 	return int64(uint64(bfc) >> 8)
 }
 
 // returns the width part of the auxInt field of arm64 bitfield ops.
-func getARM64BFwidth(bfc int64) int64 {
-	return bfc & 0xff
+func (bfc arm64BitField) getARM64BFwidth() int64 {
+	return int64(bfc) & 0xff
 }
 
 // checks if mask >> rshift applied at lsb is a valid arm64 bitfield op mask.
diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go
index f55e542..eaf20e2 100644
--- a/src/cmd/compile/internal/ssa/rewriteARM.go
+++ b/src/cmd/compile/internal/ssa/rewriteARM.go
@@ -2069,10 +2069,10 @@
 		v.AddArg(x)
 		return true
 	}
-	// match: (ADDshiftLL <typ.UInt16> [8] (BFXU <typ.UInt16> [armBFAuxInt(8, 8)] x) x)
+	// match: (ADDshiftLL <typ.UInt16> [8] (BFXU <typ.UInt16> [int32(armBFAuxInt(8, 8))] x) x)
 	// result: (REV16 x)
 	for {
-		if v.Type != typ.UInt16 || v.AuxInt != 8 || v_0.Op != OpARMBFXU || v_0.Type != typ.UInt16 || v_0.AuxInt != armBFAuxInt(8, 8) {
+		if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMBFXU || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != int32(armBFAuxInt(8, 8)) {
 			break
 		}
 		x := v_0.Args[0]
@@ -8537,10 +8537,10 @@
 		v.AddArg(x)
 		return true
 	}
-	// match: (ORshiftLL <typ.UInt16> [8] (BFXU <typ.UInt16> [armBFAuxInt(8, 8)] x) x)
+	// match: (ORshiftLL <typ.UInt16> [8] (BFXU <typ.UInt16> [int32(armBFAuxInt(8, 8))] x) x)
 	// result: (REV16 x)
 	for {
-		if v.Type != typ.UInt16 || v.AuxInt != 8 || v_0.Op != OpARMBFXU || v_0.Type != typ.UInt16 || v_0.AuxInt != armBFAuxInt(8, 8) {
+		if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMBFXU || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != int32(armBFAuxInt(8, 8)) {
 			break
 		}
 		x := v_0.Args[0]
@@ -12576,10 +12576,10 @@
 		v.AddArg(x)
 		return true
 	}
-	// match: (XORshiftLL <typ.UInt16> [8] (BFXU <typ.UInt16> [armBFAuxInt(8, 8)] x) x)
+	// match: (XORshiftLL <typ.UInt16> [8] (BFXU <typ.UInt16> [int32(armBFAuxInt(8, 8))] x) x)
 	// result: (REV16 x)
 	for {
-		if v.Type != typ.UInt16 || v.AuxInt != 8 || v_0.Op != OpARMBFXU || v_0.Type != typ.UInt16 || v_0.AuxInt != armBFAuxInt(8, 8) {
+		if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMBFXU || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != int32(armBFAuxInt(8, 8)) {
 			break
 		}
 		x := v_0.Args[0]
diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go
index 8655880..453578a 100644
--- a/src/cmd/compile/internal/ssa/rewriteARM64.go
+++ b/src/cmd/compile/internal/ssa/rewriteARM64.go
@@ -1229,13 +1229,13 @@
 			if x1.Op != OpARM64SLLconst {
 				continue
 			}
-			c := x1.AuxInt
+			c := auxIntToInt64(x1.AuxInt)
 			y := x1.Args[0]
 			if !(clobberIfDead(x1)) {
 				continue
 			}
 			v.reset(OpARM64ADDshiftLL)
-			v.AuxInt = c
+			v.AuxInt = int64ToAuxInt(c)
 			v.AddArg2(x0, y)
 			return true
 		}
@@ -1251,13 +1251,13 @@
 			if x1.Op != OpARM64SRLconst {
 				continue
 			}
-			c := x1.AuxInt
+			c := auxIntToInt64(x1.AuxInt)
 			y := x1.Args[0]
 			if !(clobberIfDead(x1)) {
 				continue
 			}
 			v.reset(OpARM64ADDshiftRL)
-			v.AuxInt = c
+			v.AuxInt = int64ToAuxInt(c)
 			v.AddArg2(x0, y)
 			return true
 		}
@@ -1273,13 +1273,13 @@
 			if x1.Op != OpARM64SRAconst {
 				continue
 			}
-			c := x1.AuxInt
+			c := auxIntToInt64(x1.AuxInt)
 			y := x1.Args[0]
 			if !(clobberIfDead(x1)) {
 				continue
 			}
 			v.reset(OpARM64ADDshiftRA)
-			v.AuxInt = c
+			v.AuxInt = int64ToAuxInt(c)
 			v.AddArg2(x0, y)
 			return true
 		}
@@ -1300,14 +1300,14 @@
 				continue
 			}
 			t := v_0_1.Type
-			if v_0_1.AuxInt != 63 {
+			if auxIntToInt64(v_0_1.AuxInt) != 63 {
 				continue
 			}
 			y := v_0_1.Args[0]
 			if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 {
 				continue
 			}
-			cc := v_1.Aux
+			cc := auxToCCop(v_1.Aux)
 			_ = v_1.Args[1]
 			v_1_0 := v_1.Args[0]
 			if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt64 {
@@ -1323,15 +1323,15 @@
 			}
 			_ = v_1_0_1.Args[1]
 			v_1_0_1_0 := v_1_0_1.Args[0]
-			if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 64 {
+			if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 64 {
 				continue
 			}
 			v_1_0_1_1 := v_1_0_1.Args[1]
-			if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 63 || y != v_1_0_1_1.Args[0] {
+			if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 63 || y != v_1_0_1_1.Args[0] {
 				continue
 			}
 			v_1_1 := v_1.Args[1]
-			if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 {
+			if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
 				continue
 			}
 			v_1_1_0 := v_1_1.Args[0]
@@ -1340,11 +1340,11 @@
 			}
 			_ = v_1_1_0.Args[1]
 			v_1_1_0_0 := v_1_1_0.Args[0]
-			if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 64 {
+			if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 64 {
 				continue
 			}
 			v_1_1_0_1 := v_1_1_0.Args[1]
-			if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+			if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
 				continue
 			}
 			v.reset(OpARM64ROR)
@@ -1370,14 +1370,14 @@
 				continue
 			}
 			t := v_0_1.Type
-			if v_0_1.AuxInt != 63 {
+			if auxIntToInt64(v_0_1.AuxInt) != 63 {
 				continue
 			}
 			y := v_0_1.Args[0]
 			if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 {
 				continue
 			}
-			cc := v_1.Aux
+			cc := auxToCCop(v_1.Aux)
 			_ = v_1.Args[1]
 			v_1_0 := v_1.Args[0]
 			if v_1_0.Op != OpARM64SLL {
@@ -1393,15 +1393,15 @@
 			}
 			_ = v_1_0_1.Args[1]
 			v_1_0_1_0 := v_1_0_1.Args[0]
-			if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 64 {
+			if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 64 {
 				continue
 			}
 			v_1_0_1_1 := v_1_0_1.Args[1]
-			if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 63 || y != v_1_0_1_1.Args[0] {
+			if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 63 || y != v_1_0_1_1.Args[0] {
 				continue
 			}
 			v_1_1 := v_1.Args[1]
-			if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 {
+			if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
 				continue
 			}
 			v_1_1_0 := v_1_1.Args[0]
@@ -1410,11 +1410,11 @@
 			}
 			_ = v_1_1_0.Args[1]
 			v_1_1_0_0 := v_1_1_0.Args[0]
-			if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 64 {
+			if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 64 {
 				continue
 			}
 			v_1_1_0_1 := v_1_1_0.Args[1]
-			if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+			if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
 				continue
 			}
 			v.reset(OpARM64ROR)
@@ -1438,14 +1438,14 @@
 				continue
 			}
 			t := v_0_1.Type
-			if v_0_1.AuxInt != 31 {
+			if auxIntToInt64(v_0_1.AuxInt) != 31 {
 				continue
 			}
 			y := v_0_1.Args[0]
 			if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 {
 				continue
 			}
-			cc := v_1.Aux
+			cc := auxToCCop(v_1.Aux)
 			_ = v_1.Args[1]
 			v_1_0 := v_1.Args[0]
 			if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt32 {
@@ -1462,15 +1462,15 @@
 			}
 			_ = v_1_0_1.Args[1]
 			v_1_0_1_0 := v_1_0_1.Args[0]
-			if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 32 {
+			if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 32 {
 				continue
 			}
 			v_1_0_1_1 := v_1_0_1.Args[1]
-			if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 31 || y != v_1_0_1_1.Args[0] {
+			if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 31 || y != v_1_0_1_1.Args[0] {
 				continue
 			}
 			v_1_1 := v_1.Args[1]
-			if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 {
+			if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
 				continue
 			}
 			v_1_1_0 := v_1_1.Args[0]
@@ -1479,11 +1479,11 @@
 			}
 			_ = v_1_1_0.Args[1]
 			v_1_1_0_0 := v_1_1_0.Args[0]
-			if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 32 {
+			if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 32 {
 				continue
 			}
 			v_1_1_0_1 := v_1_1_0.Args[1]
-			if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+			if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
 				continue
 			}
 			v.reset(OpARM64RORW)
@@ -1513,14 +1513,14 @@
 				continue
 			}
 			t := v_0_1.Type
-			if v_0_1.AuxInt != 31 {
+			if auxIntToInt64(v_0_1.AuxInt) != 31 {
 				continue
 			}
 			y := v_0_1.Args[0]
 			if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 {
 				continue
 			}
-			cc := v_1.Aux
+			cc := auxToCCop(v_1.Aux)
 			_ = v_1.Args[1]
 			v_1_0 := v_1.Args[0]
 			if v_1_0.Op != OpARM64SLL {
@@ -1536,15 +1536,15 @@
 			}
 			_ = v_1_0_1.Args[1]
 			v_1_0_1_0 := v_1_0_1.Args[0]
-			if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 32 {
+			if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 32 {
 				continue
 			}
 			v_1_0_1_1 := v_1_0_1.Args[1]
-			if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 31 || y != v_1_0_1_1.Args[0] {
+			if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 31 || y != v_1_0_1_1.Args[0] {
 				continue
 			}
 			v_1_1 := v_1.Args[1]
-			if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 {
+			if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
 				continue
 			}
 			v_1_1_0 := v_1_1.Args[0]
@@ -1553,11 +1553,11 @@
 			}
 			_ = v_1_1_0.Args[1]
 			v_1_1_0_0 := v_1_1_0.Args[0]
-			if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 32 {
+			if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 32 {
 				continue
 			}
 			v_1_1_0_1 := v_1_1_0.Args[1]
-			if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+			if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
 				continue
 			}
 			v.reset(OpARM64RORW)
@@ -1650,16 +1650,16 @@
 	// match: (ADDshiftLL (MOVDconst [c]) x [d])
 	// result: (ADDconst [c] (SLLconst <x.Type> x [d]))
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_0.AuxInt
+		c := auxIntToInt64(v_0.AuxInt)
 		x := v_1
 		v.reset(OpARM64ADDconst)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
-		v0.AuxInt = d
+		v0.AuxInt = int64ToAuxInt(d)
 		v0.AddArg(x)
 		v.AddArg(v0)
 		return true
@@ -1667,22 +1667,22 @@
 	// match: (ADDshiftLL x (MOVDconst [c]) [d])
 	// result: (ADDconst x [int64(uint64(c)<<uint64(d))])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		v.reset(OpARM64ADDconst)
-		v.AuxInt = int64(uint64(c) << uint64(d))
+		v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
 		v.AddArg(x)
 		return true
 	}
 	// match: (ADDshiftLL [c] (SRLconst x [64-c]) x)
 	// result: (RORconst [64-c] x)
 	for {
-		c := v.AuxInt
-		if v_0.Op != OpARM64SRLconst || v_0.AuxInt != 64-c {
+		c := auxIntToInt64(v.AuxInt)
+		if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
 			break
 		}
 		x := v_0.Args[0]
@@ -1690,7 +1690,7 @@
 			break
 		}
 		v.reset(OpARM64RORconst)
-		v.AuxInt = 64 - c
+		v.AuxInt = int64ToAuxInt(64 - c)
 		v.AddArg(x)
 		return true
 	}
@@ -1699,24 +1699,24 @@
 	// result: (RORWconst [32-c] x)
 	for {
 		t := v.Type
-		c := v.AuxInt
+		c := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64UBFX {
 			break
 		}
-		bfc := v_0.AuxInt
+		bfc := auxIntToArm64BitField(v_0.AuxInt)
 		x := v_0.Args[0]
 		if x != v_1 || !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) {
 			break
 		}
 		v.reset(OpARM64RORWconst)
-		v.AuxInt = 32 - c
+		v.AuxInt = int64ToAuxInt(32 - c)
 		v.AddArg(x)
 		return true
 	}
 	// match: (ADDshiftLL <typ.UInt16> [8] (UBFX <typ.UInt16> [armBFAuxInt(8, 8)] x) x)
 	// result: (REV16W x)
 	for {
-		if v.Type != typ.UInt16 || v.AuxInt != 8 || v_0.Op != OpARM64UBFX || v_0.Type != typ.UInt16 || v_0.AuxInt != armBFAuxInt(8, 8) {
+		if v.Type != typ.UInt16 || auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64UBFX || v_0.Type != typ.UInt16 || auxIntToArm64BitField(v_0.AuxInt) != armBFAuxInt(8, 8) {
 			break
 		}
 		x := v_0.Args[0]
@@ -1730,14 +1730,14 @@
 	// match: (ADDshiftLL [c] (SRLconst x [64-c]) x2)
 	// result: (EXTRconst [64-c] x2 x)
 	for {
-		c := v.AuxInt
-		if v_0.Op != OpARM64SRLconst || v_0.AuxInt != 64-c {
+		c := auxIntToInt64(v.AuxInt)
+		if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
 			break
 		}
 		x := v_0.Args[0]
 		x2 := v_1
 		v.reset(OpARM64EXTRconst)
-		v.AuxInt = 64 - c
+		v.AuxInt = int64ToAuxInt(64 - c)
 		v.AddArg2(x2, x)
 		return true
 	}
@@ -1746,18 +1746,18 @@
 	// result: (EXTRWconst [32-c] x2 x)
 	for {
 		t := v.Type
-		c := v.AuxInt
+		c := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64UBFX {
 			break
 		}
-		bfc := v_0.AuxInt
+		bfc := auxIntToArm64BitField(v_0.AuxInt)
 		x := v_0.Args[0]
 		x2 := v_1
 		if !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) {
 			break
 		}
 		v.reset(OpARM64EXTRWconst)
-		v.AuxInt = 32 - c
+		v.AuxInt = int64ToAuxInt(32 - c)
 		v.AddArg2(x2, x)
 		return true
 	}
@@ -1770,16 +1770,16 @@
 	// match: (ADDshiftRA (MOVDconst [c]) x [d])
 	// result: (ADDconst [c] (SRAconst <x.Type> x [d]))
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_0.AuxInt
+		c := auxIntToInt64(v_0.AuxInt)
 		x := v_1
 		v.reset(OpARM64ADDconst)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
-		v0.AuxInt = d
+		v0.AuxInt = int64ToAuxInt(d)
 		v0.AddArg(x)
 		v.AddArg(v0)
 		return true
@@ -1787,14 +1787,14 @@
 	// match: (ADDshiftRA x (MOVDconst [c]) [d])
 	// result: (ADDconst x [c>>uint64(d)])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		v.reset(OpARM64ADDconst)
-		v.AuxInt = c >> uint64(d)
+		v.AuxInt = int64ToAuxInt(c >> uint64(d))
 		v.AddArg(x)
 		return true
 	}
@@ -1807,16 +1807,16 @@
 	// match: (ADDshiftRL (MOVDconst [c]) x [d])
 	// result: (ADDconst [c] (SRLconst <x.Type> x [d]))
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_0.AuxInt
+		c := auxIntToInt64(v_0.AuxInt)
 		x := v_1
 		v.reset(OpARM64ADDconst)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
-		v0.AuxInt = d
+		v0.AuxInt = int64ToAuxInt(d)
 		v0.AddArg(x)
 		v.AddArg(v0)
 		return true
@@ -1824,22 +1824,22 @@
 	// match: (ADDshiftRL x (MOVDconst [c]) [d])
 	// result: (ADDconst x [int64(uint64(c)>>uint64(d))])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		v.reset(OpARM64ADDconst)
-		v.AuxInt = int64(uint64(c) >> uint64(d))
+		v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
 		v.AddArg(x)
 		return true
 	}
 	// match: (ADDshiftRL [c] (SLLconst x [64-c]) x)
 	// result: (RORconst [ c] x)
 	for {
-		c := v.AuxInt
-		if v_0.Op != OpARM64SLLconst || v_0.AuxInt != 64-c {
+		c := auxIntToInt64(v.AuxInt)
+		if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
 			break
 		}
 		x := v_0.Args[0]
@@ -1847,7 +1847,7 @@
 			break
 		}
 		v.reset(OpARM64RORconst)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v.AddArg(x)
 		return true
 	}
@@ -1856,8 +1856,8 @@
 	// result: (RORWconst [c] x)
 	for {
 		t := v.Type
-		c := v.AuxInt
-		if v_0.Op != OpARM64SLLconst || v_0.AuxInt != 32-c {
+		c := auxIntToInt64(v.AuxInt)
+		if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 32-c {
 			break
 		}
 		x := v_0.Args[0]
@@ -1865,7 +1865,7 @@
 			break
 		}
 		v.reset(OpARM64RORWconst)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v.AddArg(x)
 		return true
 	}
@@ -1925,13 +1925,13 @@
 			if x1.Op != OpARM64SLLconst {
 				continue
 			}
-			c := x1.AuxInt
+			c := auxIntToInt64(x1.AuxInt)
 			y := x1.Args[0]
 			if !(clobberIfDead(x1)) {
 				continue
 			}
 			v.reset(OpARM64ANDshiftLL)
-			v.AuxInt = c
+			v.AuxInt = int64ToAuxInt(c)
 			v.AddArg2(x0, y)
 			return true
 		}
@@ -1947,13 +1947,13 @@
 			if x1.Op != OpARM64SRLconst {
 				continue
 			}
-			c := x1.AuxInt
+			c := auxIntToInt64(x1.AuxInt)
 			y := x1.Args[0]
 			if !(clobberIfDead(x1)) {
 				continue
 			}
 			v.reset(OpARM64ANDshiftRL)
-			v.AuxInt = c
+			v.AuxInt = int64ToAuxInt(c)
 			v.AddArg2(x0, y)
 			return true
 		}
@@ -1969,13 +1969,13 @@
 			if x1.Op != OpARM64SRAconst {
 				continue
 			}
-			c := x1.AuxInt
+			c := auxIntToInt64(x1.AuxInt)
 			y := x1.Args[0]
 			if !(clobberIfDead(x1)) {
 				continue
 			}
 			v.reset(OpARM64ANDshiftRA)
-			v.AuxInt = c
+			v.AuxInt = int64ToAuxInt(c)
 			v.AddArg2(x0, y)
 			return true
 		}
@@ -2074,17 +2074,17 @@
 	// cond: isARM64BFMask(sc, ac, sc)
 	// result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
 	for {
-		ac := v.AuxInt
+		ac := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64SLLconst {
 			break
 		}
-		sc := v_0.AuxInt
+		sc := auxIntToInt64(v_0.AuxInt)
 		x := v_0.Args[0]
 		if !(isARM64BFMask(sc, ac, sc)) {
 			break
 		}
 		v.reset(OpARM64UBFIZ)
-		v.AuxInt = armBFAuxInt(sc, arm64BFWidth(ac, sc))
+		v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(ac, sc)))
 		v.AddArg(x)
 		return true
 	}
@@ -2092,17 +2092,17 @@
 	// cond: isARM64BFMask(sc, ac, 0)
 	// result: (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
 	for {
-		ac := v.AuxInt
+		ac := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64SRLconst {
 			break
 		}
-		sc := v_0.AuxInt
+		sc := auxIntToInt64(v_0.AuxInt)
 		x := v_0.Args[0]
 		if !(isARM64BFMask(sc, ac, 0)) {
 			break
 		}
 		v.reset(OpARM64UBFX)
-		v.AuxInt = armBFAuxInt(sc, arm64BFWidth(ac, 0))
+		v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(ac, 0)))
 		v.AddArg(x)
 		return true
 	}
@@ -2115,16 +2115,16 @@
 	// match: (ANDshiftLL (MOVDconst [c]) x [d])
 	// result: (ANDconst [c] (SLLconst <x.Type> x [d]))
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_0.AuxInt
+		c := auxIntToInt64(v_0.AuxInt)
 		x := v_1
 		v.reset(OpARM64ANDconst)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
-		v0.AuxInt = d
+		v0.AuxInt = int64ToAuxInt(d)
 		v0.AddArg(x)
 		v.AddArg(v0)
 		return true
@@ -2132,14 +2132,14 @@
 	// match: (ANDshiftLL x (MOVDconst [c]) [d])
 	// result: (ANDconst x [int64(uint64(c)<<uint64(d))])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		v.reset(OpARM64ANDconst)
-		v.AuxInt = int64(uint64(c) << uint64(d))
+		v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
 		v.AddArg(x)
 		return true
 	}
@@ -2147,13 +2147,13 @@
 	// cond: c==d
 	// result: y
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		y := v_1
 		if y.Op != OpARM64SLLconst {
 			break
 		}
-		c := y.AuxInt
+		c := auxIntToInt64(y.AuxInt)
 		if x != y.Args[0] || !(c == d) {
 			break
 		}
@@ -2169,16 +2169,16 @@
 	// match: (ANDshiftRA (MOVDconst [c]) x [d])
 	// result: (ANDconst [c] (SRAconst <x.Type> x [d]))
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_0.AuxInt
+		c := auxIntToInt64(v_0.AuxInt)
 		x := v_1
 		v.reset(OpARM64ANDconst)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
-		v0.AuxInt = d
+		v0.AuxInt = int64ToAuxInt(d)
 		v0.AddArg(x)
 		v.AddArg(v0)
 		return true
@@ -2186,14 +2186,14 @@
 	// match: (ANDshiftRA x (MOVDconst [c]) [d])
 	// result: (ANDconst x [c>>uint64(d)])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		v.reset(OpARM64ANDconst)
-		v.AuxInt = c >> uint64(d)
+		v.AuxInt = int64ToAuxInt(c >> uint64(d))
 		v.AddArg(x)
 		return true
 	}
@@ -2201,13 +2201,13 @@
 	// cond: c==d
 	// result: y
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		y := v_1
 		if y.Op != OpARM64SRAconst {
 			break
 		}
-		c := y.AuxInt
+		c := auxIntToInt64(y.AuxInt)
 		if x != y.Args[0] || !(c == d) {
 			break
 		}
@@ -2223,16 +2223,16 @@
 	// match: (ANDshiftRL (MOVDconst [c]) x [d])
 	// result: (ANDconst [c] (SRLconst <x.Type> x [d]))
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_0.AuxInt
+		c := auxIntToInt64(v_0.AuxInt)
 		x := v_1
 		v.reset(OpARM64ANDconst)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
-		v0.AuxInt = d
+		v0.AuxInt = int64ToAuxInt(d)
 		v0.AddArg(x)
 		v.AddArg(v0)
 		return true
@@ -2240,14 +2240,14 @@
 	// match: (ANDshiftRL x (MOVDconst [c]) [d])
 	// result: (ANDconst x [int64(uint64(c)>>uint64(d))])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		v.reset(OpARM64ANDconst)
-		v.AuxInt = int64(uint64(c) >> uint64(d))
+		v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
 		v.AddArg(x)
 		return true
 	}
@@ -2255,13 +2255,13 @@
 	// cond: c==d
 	// result: y
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		y := v_1
 		if y.Op != OpARM64SRLconst {
 			break
 		}
-		c := y.AuxInt
+		c := auxIntToInt64(y.AuxInt)
 		if x != y.Args[0] || !(c == d) {
 			break
 		}
@@ -2306,13 +2306,13 @@
 		if x1.Op != OpARM64SLLconst {
 			break
 		}
-		c := x1.AuxInt
+		c := auxIntToInt64(x1.AuxInt)
 		y := x1.Args[0]
 		if !(clobberIfDead(x1)) {
 			break
 		}
 		v.reset(OpARM64BICshiftLL)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v.AddArg2(x0, y)
 		return true
 	}
@@ -2325,13 +2325,13 @@
 		if x1.Op != OpARM64SRLconst {
 			break
 		}
-		c := x1.AuxInt
+		c := auxIntToInt64(x1.AuxInt)
 		y := x1.Args[0]
 		if !(clobberIfDead(x1)) {
 			break
 		}
 		v.reset(OpARM64BICshiftRL)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v.AddArg2(x0, y)
 		return true
 	}
@@ -2344,13 +2344,13 @@
 		if x1.Op != OpARM64SRAconst {
 			break
 		}
-		c := x1.AuxInt
+		c := auxIntToInt64(x1.AuxInt)
 		y := x1.Args[0]
 		if !(clobberIfDead(x1)) {
 			break
 		}
 		v.reset(OpARM64BICshiftRA)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v.AddArg2(x0, y)
 		return true
 	}
@@ -2362,14 +2362,14 @@
 	// match: (BICshiftLL x (MOVDconst [c]) [d])
 	// result: (ANDconst x [^int64(uint64(c)<<uint64(d))])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		v.reset(OpARM64ANDconst)
-		v.AuxInt = ^int64(uint64(c) << uint64(d))
+		v.AuxInt = int64ToAuxInt(^int64(uint64(c) << uint64(d)))
 		v.AddArg(x)
 		return true
 	}
@@ -2377,17 +2377,17 @@
 	// cond: c==d
 	// result: (MOVDconst [0])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64SLLconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		if x != v_1.Args[0] || !(c == d) {
 			break
 		}
 		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
+		v.AuxInt = int64ToAuxInt(0)
 		return true
 	}
 	return false
@@ -2398,14 +2398,14 @@
 	// match: (BICshiftRA x (MOVDconst [c]) [d])
 	// result: (ANDconst x [^(c>>uint64(d))])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		v.reset(OpARM64ANDconst)
-		v.AuxInt = ^(c >> uint64(d))
+		v.AuxInt = int64ToAuxInt(^(c >> uint64(d)))
 		v.AddArg(x)
 		return true
 	}
@@ -2413,17 +2413,17 @@
 	// cond: c==d
 	// result: (MOVDconst [0])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64SRAconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		if x != v_1.Args[0] || !(c == d) {
 			break
 		}
 		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
+		v.AuxInt = int64ToAuxInt(0)
 		return true
 	}
 	return false
@@ -2434,14 +2434,14 @@
 	// match: (BICshiftRL x (MOVDconst [c]) [d])
 	// result: (ANDconst x [^int64(uint64(c)>>uint64(d))])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		v.reset(OpARM64ANDconst)
-		v.AuxInt = ^int64(uint64(c) >> uint64(d))
+		v.AuxInt = int64ToAuxInt(^int64(uint64(c) >> uint64(d)))
 		v.AddArg(x)
 		return true
 	}
@@ -2449,17 +2449,17 @@
 	// cond: c==d
 	// result: (MOVDconst [0])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64SRLconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		if x != v_1.Args[0] || !(c == d) {
 			break
 		}
 		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
+		v.AuxInt = int64ToAuxInt(0)
 		return true
 	}
 	return false
@@ -2493,13 +2493,13 @@
 			if x1.Op != OpARM64SLLconst {
 				continue
 			}
-			c := x1.AuxInt
+			c := auxIntToInt64(x1.AuxInt)
 			y := x1.Args[0]
 			if !(clobberIfDead(x1)) {
 				continue
 			}
 			v.reset(OpARM64CMNshiftLL)
-			v.AuxInt = c
+			v.AuxInt = int64ToAuxInt(c)
 			v.AddArg2(x0, y)
 			return true
 		}
@@ -2515,13 +2515,13 @@
 			if x1.Op != OpARM64SRLconst {
 				continue
 			}
-			c := x1.AuxInt
+			c := auxIntToInt64(x1.AuxInt)
 			y := x1.Args[0]
 			if !(clobberIfDead(x1)) {
 				continue
 			}
 			v.reset(OpARM64CMNshiftRL)
-			v.AuxInt = c
+			v.AuxInt = int64ToAuxInt(c)
 			v.AddArg2(x0, y)
 			return true
 		}
@@ -2537,13 +2537,13 @@
 			if x1.Op != OpARM64SRAconst {
 				continue
 			}
-			c := x1.AuxInt
+			c := auxIntToInt64(x1.AuxInt)
 			y := x1.Args[0]
 			if !(clobberIfDead(x1)) {
 				continue
 			}
 			v.reset(OpARM64CMNshiftRA)
-			v.AuxInt = c
+			v.AuxInt = int64ToAuxInt(c)
 			v.AddArg2(x0, y)
 			return true
 		}
@@ -2611,16 +2611,16 @@
 	// match: (CMNshiftLL (MOVDconst [c]) x [d])
 	// result: (CMNconst [c] (SLLconst <x.Type> x [d]))
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_0.AuxInt
+		c := auxIntToInt64(v_0.AuxInt)
 		x := v_1
 		v.reset(OpARM64CMNconst)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
-		v0.AuxInt = d
+		v0.AuxInt = int64ToAuxInt(d)
 		v0.AddArg(x)
 		v.AddArg(v0)
 		return true
@@ -2628,14 +2628,14 @@
 	// match: (CMNshiftLL x (MOVDconst [c]) [d])
 	// result: (CMNconst x [int64(uint64(c)<<uint64(d))])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		v.reset(OpARM64CMNconst)
-		v.AuxInt = int64(uint64(c) << uint64(d))
+		v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
 		v.AddArg(x)
 		return true
 	}
@@ -2648,16 +2648,16 @@
 	// match: (CMNshiftRA (MOVDconst [c]) x [d])
 	// result: (CMNconst [c] (SRAconst <x.Type> x [d]))
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_0.AuxInt
+		c := auxIntToInt64(v_0.AuxInt)
 		x := v_1
 		v.reset(OpARM64CMNconst)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
-		v0.AuxInt = d
+		v0.AuxInt = int64ToAuxInt(d)
 		v0.AddArg(x)
 		v.AddArg(v0)
 		return true
@@ -2665,14 +2665,14 @@
 	// match: (CMNshiftRA x (MOVDconst [c]) [d])
 	// result: (CMNconst x [c>>uint64(d)])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		v.reset(OpARM64CMNconst)
-		v.AuxInt = c >> uint64(d)
+		v.AuxInt = int64ToAuxInt(c >> uint64(d))
 		v.AddArg(x)
 		return true
 	}
@@ -2685,16 +2685,16 @@
 	// match: (CMNshiftRL (MOVDconst [c]) x [d])
 	// result: (CMNconst [c] (SRLconst <x.Type> x [d]))
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_0.AuxInt
+		c := auxIntToInt64(v_0.AuxInt)
 		x := v_1
 		v.reset(OpARM64CMNconst)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
-		v0.AuxInt = d
+		v0.AuxInt = int64ToAuxInt(d)
 		v0.AddArg(x)
 		v.AddArg(v0)
 		return true
@@ -2702,14 +2702,14 @@
 	// match: (CMNshiftRL x (MOVDconst [c]) [d])
 	// result: (CMNconst x [int64(uint64(c)>>uint64(d))])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		v.reset(OpARM64CMNconst)
-		v.AuxInt = int64(uint64(c) >> uint64(d))
+		v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
 		v.AddArg(x)
 		return true
 	}
@@ -2771,13 +2771,13 @@
 		if x1.Op != OpARM64SLLconst {
 			break
 		}
-		c := x1.AuxInt
+		c := auxIntToInt64(x1.AuxInt)
 		y := x1.Args[0]
 		if !(clobberIfDead(x1)) {
 			break
 		}
 		v.reset(OpARM64CMPshiftLL)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v.AddArg2(x0, y)
 		return true
 	}
@@ -2789,7 +2789,7 @@
 		if x0.Op != OpARM64SLLconst {
 			break
 		}
-		c := x0.AuxInt
+		c := auxIntToInt64(x0.AuxInt)
 		y := x0.Args[0]
 		x1 := v_1
 		if !(clobberIfDead(x0)) {
@@ -2797,7 +2797,7 @@
 		}
 		v.reset(OpARM64InvertFlags)
 		v0 := b.NewValue0(v.Pos, OpARM64CMPshiftLL, types.TypeFlags)
-		v0.AuxInt = c
+		v0.AuxInt = int64ToAuxInt(c)
 		v0.AddArg2(x1, y)
 		v.AddArg(v0)
 		return true
@@ -2811,13 +2811,13 @@
 		if x1.Op != OpARM64SRLconst {
 			break
 		}
-		c := x1.AuxInt
+		c := auxIntToInt64(x1.AuxInt)
 		y := x1.Args[0]
 		if !(clobberIfDead(x1)) {
 			break
 		}
 		v.reset(OpARM64CMPshiftRL)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v.AddArg2(x0, y)
 		return true
 	}
@@ -2829,7 +2829,7 @@
 		if x0.Op != OpARM64SRLconst {
 			break
 		}
-		c := x0.AuxInt
+		c := auxIntToInt64(x0.AuxInt)
 		y := x0.Args[0]
 		x1 := v_1
 		if !(clobberIfDead(x0)) {
@@ -2837,7 +2837,7 @@
 		}
 		v.reset(OpARM64InvertFlags)
 		v0 := b.NewValue0(v.Pos, OpARM64CMPshiftRL, types.TypeFlags)
-		v0.AuxInt = c
+		v0.AuxInt = int64ToAuxInt(c)
 		v0.AddArg2(x1, y)
 		v.AddArg(v0)
 		return true
@@ -2851,13 +2851,13 @@
 		if x1.Op != OpARM64SRAconst {
 			break
 		}
-		c := x1.AuxInt
+		c := auxIntToInt64(x1.AuxInt)
 		y := x1.Args[0]
 		if !(clobberIfDead(x1)) {
 			break
 		}
 		v.reset(OpARM64CMPshiftRA)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v.AddArg2(x0, y)
 		return true
 	}
@@ -2869,7 +2869,7 @@
 		if x0.Op != OpARM64SRAconst {
 			break
 		}
-		c := x0.AuxInt
+		c := auxIntToInt64(x0.AuxInt)
 		y := x0.Args[0]
 		x1 := v_1
 		if !(clobberIfDead(x0)) {
@@ -2877,7 +2877,7 @@
 		}
 		v.reset(OpARM64InvertFlags)
 		v0 := b.NewValue0(v.Pos, OpARM64CMPshiftRA, types.TypeFlags)
-		v0.AuxInt = c
+		v0.AuxInt = int64ToAuxInt(c)
 		v0.AddArg2(x1, y)
 		v.AddArg(v0)
 		return true
@@ -3064,17 +3064,17 @@
 	// match: (CMPshiftLL (MOVDconst [c]) x [d])
 	// result: (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d])))
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_0.AuxInt
+		c := auxIntToInt64(v_0.AuxInt)
 		x := v_1
 		v.reset(OpARM64InvertFlags)
 		v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
-		v0.AuxInt = c
+		v0.AuxInt = int64ToAuxInt(c)
 		v1 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
-		v1.AuxInt = d
+		v1.AuxInt = int64ToAuxInt(d)
 		v1.AddArg(x)
 		v0.AddArg(v1)
 		v.AddArg(v0)
@@ -3083,14 +3083,14 @@
 	// match: (CMPshiftLL x (MOVDconst [c]) [d])
 	// result: (CMPconst x [int64(uint64(c)<<uint64(d))])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		v.reset(OpARM64CMPconst)
-		v.AuxInt = int64(uint64(c) << uint64(d))
+		v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
 		v.AddArg(x)
 		return true
 	}
@@ -3103,17 +3103,17 @@
 	// match: (CMPshiftRA (MOVDconst [c]) x [d])
 	// result: (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d])))
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_0.AuxInt
+		c := auxIntToInt64(v_0.AuxInt)
 		x := v_1
 		v.reset(OpARM64InvertFlags)
 		v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
-		v0.AuxInt = c
+		v0.AuxInt = int64ToAuxInt(c)
 		v1 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
-		v1.AuxInt = d
+		v1.AuxInt = int64ToAuxInt(d)
 		v1.AddArg(x)
 		v0.AddArg(v1)
 		v.AddArg(v0)
@@ -3122,14 +3122,14 @@
 	// match: (CMPshiftRA x (MOVDconst [c]) [d])
 	// result: (CMPconst x [c>>uint64(d)])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		v.reset(OpARM64CMPconst)
-		v.AuxInt = c >> uint64(d)
+		v.AuxInt = int64ToAuxInt(c >> uint64(d))
 		v.AddArg(x)
 		return true
 	}
@@ -3142,17 +3142,17 @@
 	// match: (CMPshiftRL (MOVDconst [c]) x [d])
 	// result: (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d])))
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_0.AuxInt
+		c := auxIntToInt64(v_0.AuxInt)
 		x := v_1
 		v.reset(OpARM64InvertFlags)
 		v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
-		v0.AuxInt = c
+		v0.AuxInt = int64ToAuxInt(c)
 		v1 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
-		v1.AuxInt = d
+		v1.AuxInt = int64ToAuxInt(d)
 		v1.AddArg(x)
 		v0.AddArg(v1)
 		v.AddArg(v0)
@@ -3161,14 +3161,14 @@
 	// match: (CMPshiftRL x (MOVDconst [c]) [d])
 	// result: (CMPconst x [int64(uint64(c)>>uint64(d))])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		v.reset(OpARM64CMPconst)
-		v.AuxInt = int64(uint64(c) >> uint64(d))
+		v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
 		v.AddArg(x)
 		return true
 	}
@@ -3444,13 +3444,13 @@
 		if x1.Op != OpARM64SLLconst {
 			break
 		}
-		c := x1.AuxInt
+		c := auxIntToInt64(x1.AuxInt)
 		y := x1.Args[0]
 		if !(clobberIfDead(x1)) {
 			break
 		}
 		v.reset(OpARM64EONshiftLL)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v.AddArg2(x0, y)
 		return true
 	}
@@ -3463,13 +3463,13 @@
 		if x1.Op != OpARM64SRLconst {
 			break
 		}
-		c := x1.AuxInt
+		c := auxIntToInt64(x1.AuxInt)
 		y := x1.Args[0]
 		if !(clobberIfDead(x1)) {
 			break
 		}
 		v.reset(OpARM64EONshiftRL)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v.AddArg2(x0, y)
 		return true
 	}
@@ -3482,13 +3482,13 @@
 		if x1.Op != OpARM64SRAconst {
 			break
 		}
-		c := x1.AuxInt
+		c := auxIntToInt64(x1.AuxInt)
 		y := x1.Args[0]
 		if !(clobberIfDead(x1)) {
 			break
 		}
 		v.reset(OpARM64EONshiftRA)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v.AddArg2(x0, y)
 		return true
 	}
@@ -3500,14 +3500,14 @@
 	// match: (EONshiftLL x (MOVDconst [c]) [d])
 	// result: (XORconst x [^int64(uint64(c)<<uint64(d))])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		v.reset(OpARM64XORconst)
-		v.AuxInt = ^int64(uint64(c) << uint64(d))
+		v.AuxInt = int64ToAuxInt(^int64(uint64(c) << uint64(d)))
 		v.AddArg(x)
 		return true
 	}
@@ -3515,17 +3515,17 @@
 	// cond: c==d
 	// result: (MOVDconst [-1])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64SLLconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		if x != v_1.Args[0] || !(c == d) {
 			break
 		}
 		v.reset(OpARM64MOVDconst)
-		v.AuxInt = -1
+		v.AuxInt = int64ToAuxInt(-1)
 		return true
 	}
 	return false
@@ -3536,14 +3536,14 @@
 	// match: (EONshiftRA x (MOVDconst [c]) [d])
 	// result: (XORconst x [^(c>>uint64(d))])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		v.reset(OpARM64XORconst)
-		v.AuxInt = ^(c >> uint64(d))
+		v.AuxInt = int64ToAuxInt(^(c >> uint64(d)))
 		v.AddArg(x)
 		return true
 	}
@@ -3551,17 +3551,17 @@
 	// cond: c==d
 	// result: (MOVDconst [-1])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64SRAconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		if x != v_1.Args[0] || !(c == d) {
 			break
 		}
 		v.reset(OpARM64MOVDconst)
-		v.AuxInt = -1
+		v.AuxInt = int64ToAuxInt(-1)
 		return true
 	}
 	return false
@@ -3572,14 +3572,14 @@
 	// match: (EONshiftRL x (MOVDconst [c]) [d])
 	// result: (XORconst x [^int64(uint64(c)>>uint64(d))])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		v.reset(OpARM64XORconst)
-		v.AuxInt = ^int64(uint64(c) >> uint64(d))
+		v.AuxInt = int64ToAuxInt(^int64(uint64(c) >> uint64(d)))
 		v.AddArg(x)
 		return true
 	}
@@ -3587,17 +3587,17 @@
 	// cond: c==d
 	// result: (MOVDconst [-1])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64SRLconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		if x != v_1.Args[0] || !(c == d) {
 			break
 		}
 		v.reset(OpARM64MOVDconst)
-		v.AuxInt = -1
+		v.AuxInt = int64ToAuxInt(-1)
 		return true
 	}
 	return false
@@ -6266,15 +6266,15 @@
 	}
 	// match: (MOVBUload [off] {sym} (SB) _)
 	// cond: symIsRO(sym)
-	// result: (MOVDconst [int64(read8(sym, off))])
+	// result: (MOVDconst [int64(read8(sym, int64(off)))])
 	for {
-		off := v.AuxInt
-		sym := v.Aux
+		off := auxIntToInt32(v.AuxInt)
+		sym := auxToSym(v.Aux)
 		if v_0.Op != OpSB || !(symIsRO(sym)) {
 			break
 		}
 		v.reset(OpARM64MOVDconst)
-		v.AuxInt = int64(read8(sym, off))
+		v.AuxInt = int64ToAuxInt(int64(read8(sym, int64(off))))
 		return true
 	}
 	return false
@@ -6417,13 +6417,13 @@
 		if v_0.Op != OpARM64SLLconst {
 			break
 		}
-		sc := v_0.AuxInt
+		sc := auxIntToInt64(v_0.AuxInt)
 		x := v_0.Args[0]
 		if !(isARM64BFMask(sc, 1<<8-1, sc)) {
 			break
 		}
 		v.reset(OpARM64UBFIZ)
-		v.AuxInt = armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))
+		v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc)))
 		v.AddArg(x)
 		return true
 	}
@@ -6434,13 +6434,13 @@
 		if v_0.Op != OpARM64SRLconst {
 			break
 		}
-		sc := v_0.AuxInt
+		sc := auxIntToInt64(v_0.AuxInt)
 		x := v_0.Args[0]
 		if !(isARM64BFMask(sc, 1<<8-1, 0)) {
 			break
 		}
 		v.reset(OpARM64UBFX)
-		v.AuxInt = armBFAuxInt(sc, 8)
+		v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, 8))
 		v.AddArg(x)
 		return true
 	}
@@ -6648,13 +6648,13 @@
 		if v_0.Op != OpARM64SLLconst {
 			break
 		}
-		lc := v_0.AuxInt
+		lc := auxIntToInt64(v_0.AuxInt)
 		x := v_0.Args[0]
 		if !(lc < 8) {
 			break
 		}
 		v.reset(OpARM64SBFIZ)
-		v.AuxInt = armBFAuxInt(lc, 8-lc)
+		v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 8-lc))
 		v.AddArg(x)
 		return true
 	}
@@ -6853,15 +6853,15 @@
 	// cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
 	// result: (MOVHstore [i-1] {s} ptr0 w mem)
 	for {
-		i := v.AuxInt
-		s := v.Aux
+		i := auxIntToInt32(v.AuxInt)
+		s := auxToSym(v.Aux)
 		ptr0 := v_0
-		if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 8 {
+		if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 8 {
 			break
 		}
 		w := v_1.Args[0]
 		x := v_2
-		if x.Op != OpARM64MOVBstore || x.AuxInt != i-1 || x.Aux != s {
+		if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
 			break
 		}
 		mem := x.Args[2]
@@ -6870,8 +6870,8 @@
 			break
 		}
 		v.reset(OpARM64MOVHstore)
-		v.AuxInt = i - 1
-		v.Aux = s
+		v.AuxInt = int32ToAuxInt(i - 1)
+		v.Aux = symToAux(s)
 		v.AddArg3(ptr0, w, mem)
 		return true
 	}
@@ -6879,10 +6879,10 @@
 	// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
 	// result: (MOVHstoreidx ptr1 idx1 w mem)
 	for {
-		if v.AuxInt != 1 {
+		if auxIntToInt32(v.AuxInt) != 1 {
 			break
 		}
-		s := v.Aux
+		s := auxToSym(v.Aux)
 		if v_0.Op != OpARM64ADD {
 			break
 		}
@@ -6892,7 +6892,7 @@
 		for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
 			ptr0 := v_0_0
 			idx0 := v_0_1
-			if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 8 {
+			if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 8 {
 				continue
 			}
 			w := v_1.Args[0]
@@ -6916,15 +6916,15 @@
 	// cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
 	// result: (MOVHstore [i-1] {s} ptr0 w mem)
 	for {
-		i := v.AuxInt
-		s := v.Aux
+		i := auxIntToInt32(v.AuxInt)
+		s := auxToSym(v.Aux)
 		ptr0 := v_0
-		if v_1.Op != OpARM64UBFX || v_1.AuxInt != armBFAuxInt(8, 8) {
+		if v_1.Op != OpARM64UBFX || auxIntToArm64BitField(v_1.AuxInt) != armBFAuxInt(8, 8) {
 			break
 		}
 		w := v_1.Args[0]
 		x := v_2
-		if x.Op != OpARM64MOVBstore || x.AuxInt != i-1 || x.Aux != s {
+		if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
 			break
 		}
 		mem := x.Args[2]
@@ -6933,8 +6933,8 @@
 			break
 		}
 		v.reset(OpARM64MOVHstore)
-		v.AuxInt = i - 1
-		v.Aux = s
+		v.AuxInt = int32ToAuxInt(i - 1)
+		v.Aux = symToAux(s)
 		v.AddArg3(ptr0, w, mem)
 		return true
 	}
@@ -6942,10 +6942,10 @@
 	// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
 	// result: (MOVHstoreidx ptr1 idx1 w mem)
 	for {
-		if v.AuxInt != 1 {
+		if auxIntToInt32(v.AuxInt) != 1 {
 			break
 		}
-		s := v.Aux
+		s := auxToSym(v.Aux)
 		if v_0.Op != OpARM64ADD {
 			break
 		}
@@ -6955,7 +6955,7 @@
 		for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
 			ptr0 := v_0_0
 			idx0 := v_0_1
-			if v_1.Op != OpARM64UBFX || v_1.AuxInt != armBFAuxInt(8, 8) {
+			if v_1.Op != OpARM64UBFX || auxIntToArm64BitField(v_1.AuxInt) != armBFAuxInt(8, 8) {
 				continue
 			}
 			w := v_1.Args[0]
@@ -6979,15 +6979,15 @@
 	// cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
 	// result: (MOVHstore [i-1] {s} ptr0 w mem)
 	for {
-		i := v.AuxInt
-		s := v.Aux
+		i := auxIntToInt32(v.AuxInt)
+		s := auxToSym(v.Aux)
 		ptr0 := v_0
-		if v_1.Op != OpARM64UBFX || v_1.AuxInt != armBFAuxInt(8, 24) {
+		if v_1.Op != OpARM64UBFX || auxIntToArm64BitField(v_1.AuxInt) != armBFAuxInt(8, 24) {
 			break
 		}
 		w := v_1.Args[0]
 		x := v_2
-		if x.Op != OpARM64MOVBstore || x.AuxInt != i-1 || x.Aux != s {
+		if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
 			break
 		}
 		mem := x.Args[2]
@@ -6996,8 +6996,8 @@
 			break
 		}
 		v.reset(OpARM64MOVHstore)
-		v.AuxInt = i - 1
-		v.Aux = s
+		v.AuxInt = int32ToAuxInt(i - 1)
+		v.Aux = symToAux(s)
 		v.AddArg3(ptr0, w, mem)
 		return true
 	}
@@ -7005,10 +7005,10 @@
 	// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
 	// result: (MOVHstoreidx ptr1 idx1 w mem)
 	for {
-		if v.AuxInt != 1 {
+		if auxIntToInt32(v.AuxInt) != 1 {
 			break
 		}
-		s := v.Aux
+		s := auxToSym(v.Aux)
 		if v_0.Op != OpARM64ADD {
 			break
 		}
@@ -7018,7 +7018,7 @@
 		for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
 			ptr0 := v_0_0
 			idx0 := v_0_1
-			if v_1.Op != OpARM64UBFX || v_1.AuxInt != armBFAuxInt(8, 24) {
+			if v_1.Op != OpARM64UBFX || auxIntToArm64BitField(v_1.AuxInt) != armBFAuxInt(8, 24) {
 				continue
 			}
 			w := v_1.Args[0]
@@ -7042,10 +7042,10 @@
 	// cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
 	// result: (MOVHstore [i-1] {s} ptr0 w mem)
 	for {
-		i := v.AuxInt
-		s := v.Aux
+		i := auxIntToInt32(v.AuxInt)
+		s := auxToSym(v.Aux)
 		ptr0 := v_0
-		if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 8 {
+		if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 8 {
 			break
 		}
 		v_1_0 := v_1.Args[0]
@@ -7054,7 +7054,7 @@
 		}
 		w := v_1_0.Args[0]
 		x := v_2
-		if x.Op != OpARM64MOVBstore || x.AuxInt != i-1 || x.Aux != s {
+		if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
 			break
 		}
 		mem := x.Args[2]
@@ -7063,8 +7063,8 @@
 			break
 		}
 		v.reset(OpARM64MOVHstore)
-		v.AuxInt = i - 1
-		v.Aux = s
+		v.AuxInt = int32ToAuxInt(i - 1)
+		v.Aux = symToAux(s)
 		v.AddArg3(ptr0, w, mem)
 		return true
 	}
@@ -7072,10 +7072,10 @@
 	// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
 	// result: (MOVHstoreidx ptr1 idx1 w mem)
 	for {
-		if v.AuxInt != 1 {
+		if auxIntToInt32(v.AuxInt) != 1 {
 			break
 		}
-		s := v.Aux
+		s := auxToSym(v.Aux)
 		if v_0.Op != OpARM64ADD {
 			break
 		}
@@ -7085,7 +7085,7 @@
 		for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
 			ptr0 := v_0_0
 			idx0 := v_0_1
-			if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 8 {
+			if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 8 {
 				continue
 			}
 			v_1_0 := v_1.Args[0]
@@ -7113,27 +7113,27 @@
 	// cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
 	// result: (MOVHstore [i-1] {s} ptr0 w0 mem)
 	for {
-		i := v.AuxInt
-		s := v.Aux
+		i := auxIntToInt32(v.AuxInt)
+		s := auxToSym(v.Aux)
 		ptr0 := v_0
 		if v_1.Op != OpARM64SRLconst {
 			break
 		}
-		j := v_1.AuxInt
+		j := auxIntToInt64(v_1.AuxInt)
 		w := v_1.Args[0]
 		x := v_2
-		if x.Op != OpARM64MOVBstore || x.AuxInt != i-1 || x.Aux != s {
+		if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
 			break
 		}
 		mem := x.Args[2]
 		ptr1 := x.Args[0]
 		w0 := x.Args[1]
-		if w0.Op != OpARM64SRLconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) {
+		if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) {
 			break
 		}
 		v.reset(OpARM64MOVHstore)
-		v.AuxInt = i - 1
-		v.Aux = s
+		v.AuxInt = int32ToAuxInt(i - 1)
+		v.Aux = symToAux(s)
 		v.AddArg3(ptr0, w0, mem)
 		return true
 	}
@@ -7141,10 +7141,10 @@
 	// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
 	// result: (MOVHstoreidx ptr1 idx1 w0 mem)
 	for {
-		if v.AuxInt != 1 {
+		if auxIntToInt32(v.AuxInt) != 1 {
 			break
 		}
-		s := v.Aux
+		s := auxToSym(v.Aux)
 		if v_0.Op != OpARM64ADD {
 			break
 		}
@@ -7157,7 +7157,7 @@
 			if v_1.Op != OpARM64SRLconst {
 				continue
 			}
-			j := v_1.AuxInt
+			j := auxIntToInt64(v_1.AuxInt)
 			w := v_1.Args[0]
 			x := v_2
 			if x.Op != OpARM64MOVBstoreidx {
@@ -7167,7 +7167,7 @@
 			ptr1 := x.Args[0]
 			idx1 := x.Args[1]
 			w0 := x.Args[2]
-			if w0.Op != OpARM64SRLconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+			if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
 				continue
 			}
 			v.reset(OpARM64MOVHstoreidx)
@@ -7177,19 +7177,19 @@
 		break
 	}
 	// match: (MOVBstore [i] {s} ptr0 (UBFX [bfc] w) x:(MOVBstore [i-1] {s} ptr1 w0:(UBFX [bfc2] w) mem))
-	// cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && getARM64BFwidth(bfc) == 32 - getARM64BFlsb(bfc) && getARM64BFwidth(bfc2) == 32 - getARM64BFlsb(bfc2) && getARM64BFlsb(bfc2) == getARM64BFlsb(bfc) - 8 && clobber(x)
+	// cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && bfc.getARM64BFwidth() == 32 - bfc.getARM64BFlsb() && bfc2.getARM64BFwidth() == 32 - bfc2.getARM64BFlsb() && bfc2.getARM64BFlsb() == bfc.getARM64BFlsb() - 8 && clobber(x)
 	// result: (MOVHstore [i-1] {s} ptr0 w0 mem)
 	for {
-		i := v.AuxInt
-		s := v.Aux
+		i := auxIntToInt32(v.AuxInt)
+		s := auxToSym(v.Aux)
 		ptr0 := v_0
 		if v_1.Op != OpARM64UBFX {
 			break
 		}
-		bfc := v_1.AuxInt
+		bfc := auxIntToArm64BitField(v_1.AuxInt)
 		w := v_1.Args[0]
 		x := v_2
-		if x.Op != OpARM64MOVBstore || x.AuxInt != i-1 || x.Aux != s {
+		if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
 			break
 		}
 		mem := x.Args[2]
@@ -7198,24 +7198,24 @@
 		if w0.Op != OpARM64UBFX {
 			break
 		}
-		bfc2 := w0.AuxInt
-		if w != w0.Args[0] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && getARM64BFwidth(bfc) == 32-getARM64BFlsb(bfc) && getARM64BFwidth(bfc2) == 32-getARM64BFlsb(bfc2) && getARM64BFlsb(bfc2) == getARM64BFlsb(bfc)-8 && clobber(x)) {
+		bfc2 := auxIntToArm64BitField(w0.AuxInt)
+		if w != w0.Args[0] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && bfc.getARM64BFwidth() == 32-bfc.getARM64BFlsb() && bfc2.getARM64BFwidth() == 32-bfc2.getARM64BFlsb() && bfc2.getARM64BFlsb() == bfc.getARM64BFlsb()-8 && clobber(x)) {
 			break
 		}
 		v.reset(OpARM64MOVHstore)
-		v.AuxInt = i - 1
-		v.Aux = s
+		v.AuxInt = int32ToAuxInt(i - 1)
+		v.Aux = symToAux(s)
 		v.AddArg3(ptr0, w0, mem)
 		return true
 	}
 	// match: (MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [bfc] w) x:(MOVBstoreidx ptr1 idx1 w0:(UBFX [bfc2] w) mem))
-	// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && getARM64BFwidth(bfc) == 32 - getARM64BFlsb(bfc) && getARM64BFwidth(bfc2) == 32 - getARM64BFlsb(bfc2) && getARM64BFlsb(bfc2) == getARM64BFlsb(bfc) - 8 && clobber(x)
+	// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && bfc.getARM64BFwidth() == 32 - bfc.getARM64BFlsb() && bfc2.getARM64BFwidth() == 32 - bfc2.getARM64BFlsb() && bfc2.getARM64BFlsb() == bfc.getARM64BFlsb() - 8 && clobber(x)
 	// result: (MOVHstoreidx ptr1 idx1 w0 mem)
 	for {
-		if v.AuxInt != 1 {
+		if auxIntToInt32(v.AuxInt) != 1 {
 			break
 		}
-		s := v.Aux
+		s := auxToSym(v.Aux)
 		if v_0.Op != OpARM64ADD {
 			break
 		}
@@ -7228,7 +7228,7 @@
 			if v_1.Op != OpARM64UBFX {
 				continue
 			}
-			bfc := v_1.AuxInt
+			bfc := auxIntToArm64BitField(v_1.AuxInt)
 			w := v_1.Args[0]
 			x := v_2
 			if x.Op != OpARM64MOVBstoreidx {
@@ -7241,8 +7241,8 @@
 			if w0.Op != OpARM64UBFX {
 				continue
 			}
-			bfc2 := w0.AuxInt
-			if w != w0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && getARM64BFwidth(bfc) == 32-getARM64BFlsb(bfc) && getARM64BFwidth(bfc2) == 32-getARM64BFlsb(bfc2) && getARM64BFlsb(bfc2) == getARM64BFlsb(bfc)-8 && clobber(x)) {
+			bfc2 := auxIntToArm64BitField(w0.AuxInt)
+			if w != w0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && bfc.getARM64BFwidth() == 32-bfc.getARM64BFlsb() && bfc2.getARM64BFwidth() == 32-bfc2.getARM64BFlsb() && bfc2.getARM64BFlsb() == bfc.getARM64BFlsb()-8 && clobber(x)) {
 				continue
 			}
 			v.reset(OpARM64MOVHstoreidx)
@@ -7255,26 +7255,26 @@
 	// cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
 	// result: (MOVHstore [i-1] {s} ptr0 w0 mem)
 	for {
-		i := v.AuxInt
-		s := v.Aux
+		i := auxIntToInt32(v.AuxInt)
+		s := auxToSym(v.Aux)
 		ptr0 := v_0
 		if v_1.Op != OpARM64SRLconst {
 			break
 		}
-		j := v_1.AuxInt
+		j := auxIntToInt64(v_1.AuxInt)
 		v_1_0 := v_1.Args[0]
 		if v_1_0.Op != OpARM64MOVDreg {
 			break
 		}
 		w := v_1_0.Args[0]
 		x := v_2
-		if x.Op != OpARM64MOVBstore || x.AuxInt != i-1 || x.Aux != s {
+		if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
 			break
 		}
 		mem := x.Args[2]
 		ptr1 := x.Args[0]
 		w0 := x.Args[1]
-		if w0.Op != OpARM64SRLconst || w0.AuxInt != j-8 {
+		if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-8 {
 			break
 		}
 		w0_0 := w0.Args[0]
@@ -7282,8 +7282,8 @@
 			break
 		}
 		v.reset(OpARM64MOVHstore)
-		v.AuxInt = i - 1
-		v.Aux = s
+		v.AuxInt = int32ToAuxInt(i - 1)
+		v.Aux = symToAux(s)
 		v.AddArg3(ptr0, w0, mem)
 		return true
 	}
@@ -7291,10 +7291,10 @@
 	// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
 	// result: (MOVHstoreidx ptr1 idx1 w0 mem)
 	for {
-		if v.AuxInt != 1 {
+		if auxIntToInt32(v.AuxInt) != 1 {
 			break
 		}
-		s := v.Aux
+		s := auxToSym(v.Aux)
 		if v_0.Op != OpARM64ADD {
 			break
 		}
@@ -7307,7 +7307,7 @@
 			if v_1.Op != OpARM64SRLconst {
 				continue
 			}
-			j := v_1.AuxInt
+			j := auxIntToInt64(v_1.AuxInt)
 			v_1_0 := v_1.Args[0]
 			if v_1_0.Op != OpARM64MOVDreg {
 				continue
@@ -7321,7 +7321,7 @@
 			ptr1 := x.Args[0]
 			idx1 := x.Args[1]
 			w0 := x.Args[2]
-			if w0.Op != OpARM64SRLconst || w0.AuxInt != j-8 {
+			if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-8 {
 				continue
 			}
 			w0_0 := w0.Args[0]
@@ -7338,12 +7338,12 @@
 	// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0, x1, x2, x3, x4, x5, x6)
 	// result: (MOVDstore [i-7] {s} ptr (REV <w.Type> w) mem)
 	for {
-		i := v.AuxInt
-		s := v.Aux
+		i := auxIntToInt32(v.AuxInt)
+		s := auxToSym(v.Aux)
 		ptr := v_0
 		w := v_1
 		x0 := v_2
-		if x0.Op != OpARM64MOVBstore || x0.AuxInt != i-1 || x0.Aux != s {
+		if x0.Op != OpARM64MOVBstore || auxIntToInt32(x0.AuxInt) != i-1 || auxToSym(x0.Aux) != s {
 			break
 		}
 		_ = x0.Args[2]
@@ -7351,11 +7351,11 @@
 			break
 		}
 		x0_1 := x0.Args[1]
-		if x0_1.Op != OpARM64SRLconst || x0_1.AuxInt != 8 || w != x0_1.Args[0] {
+		if x0_1.Op != OpARM64SRLconst || auxIntToInt64(x0_1.AuxInt) != 8 || w != x0_1.Args[0] {
 			break
 		}
 		x1 := x0.Args[2]
-		if x1.Op != OpARM64MOVBstore || x1.AuxInt != i-2 || x1.Aux != s {
+		if x1.Op != OpARM64MOVBstore || auxIntToInt32(x1.AuxInt) != i-2 || auxToSym(x1.Aux) != s {
 			break
 		}
 		_ = x1.Args[2]
@@ -7363,11 +7363,11 @@
 			break
 		}
 		x1_1 := x1.Args[1]
-		if x1_1.Op != OpARM64SRLconst || x1_1.AuxInt != 16 || w != x1_1.Args[0] {
+		if x1_1.Op != OpARM64SRLconst || auxIntToInt64(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
 			break
 		}
 		x2 := x1.Args[2]
-		if x2.Op != OpARM64MOVBstore || x2.AuxInt != i-3 || x2.Aux != s {
+		if x2.Op != OpARM64MOVBstore || auxIntToInt32(x2.AuxInt) != i-3 || auxToSym(x2.Aux) != s {
 			break
 		}
 		_ = x2.Args[2]
@@ -7375,11 +7375,11 @@
 			break
 		}
 		x2_1 := x2.Args[1]
-		if x2_1.Op != OpARM64SRLconst || x2_1.AuxInt != 24 || w != x2_1.Args[0] {
+		if x2_1.Op != OpARM64SRLconst || auxIntToInt64(x2_1.AuxInt) != 24 || w != x2_1.Args[0] {
 			break
 		}
 		x3 := x2.Args[2]
-		if x3.Op != OpARM64MOVBstore || x3.AuxInt != i-4 || x3.Aux != s {
+		if x3.Op != OpARM64MOVBstore || auxIntToInt32(x3.AuxInt) != i-4 || auxToSym(x3.Aux) != s {
 			break
 		}
 		_ = x3.Args[2]
@@ -7387,11 +7387,11 @@
 			break
 		}
 		x3_1 := x3.Args[1]
-		if x3_1.Op != OpARM64SRLconst || x3_1.AuxInt != 32 || w != x3_1.Args[0] {
+		if x3_1.Op != OpARM64SRLconst || auxIntToInt64(x3_1.AuxInt) != 32 || w != x3_1.Args[0] {
 			break
 		}
 		x4 := x3.Args[2]
-		if x4.Op != OpARM64MOVBstore || x4.AuxInt != i-5 || x4.Aux != s {
+		if x4.Op != OpARM64MOVBstore || auxIntToInt32(x4.AuxInt) != i-5 || auxToSym(x4.Aux) != s {
 			break
 		}
 		_ = x4.Args[2]
@@ -7399,11 +7399,11 @@
 			break
 		}
 		x4_1 := x4.Args[1]
-		if x4_1.Op != OpARM64SRLconst || x4_1.AuxInt != 40 || w != x4_1.Args[0] {
+		if x4_1.Op != OpARM64SRLconst || auxIntToInt64(x4_1.AuxInt) != 40 || w != x4_1.Args[0] {
 			break
 		}
 		x5 := x4.Args[2]
-		if x5.Op != OpARM64MOVBstore || x5.AuxInt != i-6 || x5.Aux != s {
+		if x5.Op != OpARM64MOVBstore || auxIntToInt32(x5.AuxInt) != i-6 || auxToSym(x5.Aux) != s {
 			break
 		}
 		_ = x5.Args[2]
@@ -7411,11 +7411,11 @@
 			break
 		}
 		x5_1 := x5.Args[1]
-		if x5_1.Op != OpARM64SRLconst || x5_1.AuxInt != 48 || w != x5_1.Args[0] {
+		if x5_1.Op != OpARM64SRLconst || auxIntToInt64(x5_1.AuxInt) != 48 || w != x5_1.Args[0] {
 			break
 		}
 		x6 := x5.Args[2]
-		if x6.Op != OpARM64MOVBstore || x6.AuxInt != i-7 || x6.Aux != s {
+		if x6.Op != OpARM64MOVBstore || auxIntToInt32(x6.AuxInt) != i-7 || auxToSym(x6.Aux) != s {
 			break
 		}
 		mem := x6.Args[2]
@@ -7423,12 +7423,12 @@
 			break
 		}
 		x6_1 := x6.Args[1]
-		if x6_1.Op != OpARM64SRLconst || x6_1.AuxInt != 56 || w != x6_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0, x1, x2, x3, x4, x5, x6)) {
+		if x6_1.Op != OpARM64SRLconst || auxIntToInt64(x6_1.AuxInt) != 56 || w != x6_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0, x1, x2, x3, x4, x5, x6)) {
 			break
 		}
 		v.reset(OpARM64MOVDstore)
-		v.AuxInt = i - 7
-		v.Aux = s
+		v.AuxInt = int32ToAuxInt(i - 7)
+		v.Aux = symToAux(s)
 		v0 := b.NewValue0(x6.Pos, OpARM64REV, w.Type)
 		v0.AddArg(w)
 		v.AddArg3(ptr, v0, mem)
@@ -7438,14 +7438,14 @@
 	// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, x3, x4, x5, x6)
 	// result: (MOVDstoreidx ptr0 idx0 (REV <w.Type> w) mem)
 	for {
-		if v.AuxInt != 7 {
+		if auxIntToInt32(v.AuxInt) != 7 {
 			break
 		}
-		s := v.Aux
+		s := auxToSym(v.Aux)
 		p := v_0
 		w := v_1
 		x0 := v_2
-		if x0.Op != OpARM64MOVBstore || x0.AuxInt != 6 || x0.Aux != s {
+		if x0.Op != OpARM64MOVBstore || auxIntToInt32(x0.AuxInt) != 6 || auxToSym(x0.Aux) != s {
 			break
 		}
 		_ = x0.Args[2]
@@ -7453,11 +7453,11 @@
 			break
 		}
 		x0_1 := x0.Args[1]
-		if x0_1.Op != OpARM64SRLconst || x0_1.AuxInt != 8 || w != x0_1.Args[0] {
+		if x0_1.Op != OpARM64SRLconst || auxIntToInt64(x0_1.AuxInt) != 8 || w != x0_1.Args[0] {
 			break
 		}
 		x1 := x0.Args[2]
-		if x1.Op != OpARM64MOVBstore || x1.AuxInt != 5 || x1.Aux != s {
+		if x1.Op != OpARM64MOVBstore || auxIntToInt32(x1.AuxInt) != 5 || auxToSym(x1.Aux) != s {
 			break
 		}
 		_ = x1.Args[2]
@@ -7465,11 +7465,11 @@
 			break
 		}
 		x1_1 := x1.Args[1]
-		if x1_1.Op != OpARM64SRLconst || x1_1.AuxInt != 16 || w != x1_1.Args[0] {
+		if x1_1.Op != OpARM64SRLconst || auxIntToInt64(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
 			break
 		}
 		x2 := x1.Args[2]
-		if x2.Op != OpARM64MOVBstore || x2.AuxInt != 4 || x2.Aux != s {
+		if x2.Op != OpARM64MOVBstore || auxIntToInt32(x2.AuxInt) != 4 || auxToSym(x2.Aux) != s {
 			break
 		}
 		_ = x2.Args[2]
@@ -7477,11 +7477,11 @@
 			break
 		}
 		x2_1 := x2.Args[1]
-		if x2_1.Op != OpARM64SRLconst || x2_1.AuxInt != 24 || w != x2_1.Args[0] {
+		if x2_1.Op != OpARM64SRLconst || auxIntToInt64(x2_1.AuxInt) != 24 || w != x2_1.Args[0] {
 			break
 		}
 		x3 := x2.Args[2]
-		if x3.Op != OpARM64MOVBstore || x3.AuxInt != 3 || x3.Aux != s {
+		if x3.Op != OpARM64MOVBstore || auxIntToInt32(x3.AuxInt) != 3 || auxToSym(x3.Aux) != s {
 			break
 		}
 		_ = x3.Args[2]
@@ -7489,11 +7489,11 @@
 			break
 		}
 		x3_1 := x3.Args[1]
-		if x3_1.Op != OpARM64SRLconst || x3_1.AuxInt != 32 || w != x3_1.Args[0] {
+		if x3_1.Op != OpARM64SRLconst || auxIntToInt64(x3_1.AuxInt) != 32 || w != x3_1.Args[0] {
 			break
 		}
 		x4 := x3.Args[2]
-		if x4.Op != OpARM64MOVBstore || x4.AuxInt != 2 || x4.Aux != s {
+		if x4.Op != OpARM64MOVBstore || auxIntToInt32(x4.AuxInt) != 2 || auxToSym(x4.Aux) != s {
 			break
 		}
 		_ = x4.Args[2]
@@ -7501,11 +7501,11 @@
 			break
 		}
 		x4_1 := x4.Args[1]
-		if x4_1.Op != OpARM64SRLconst || x4_1.AuxInt != 40 || w != x4_1.Args[0] {
+		if x4_1.Op != OpARM64SRLconst || auxIntToInt64(x4_1.AuxInt) != 40 || w != x4_1.Args[0] {
 			break
 		}
 		x5 := x4.Args[2]
-		if x5.Op != OpARM64MOVBstore || x5.AuxInt != 1 || x5.Aux != s {
+		if x5.Op != OpARM64MOVBstore || auxIntToInt32(x5.AuxInt) != 1 || auxToSym(x5.Aux) != s {
 			break
 		}
 		_ = x5.Args[2]
@@ -7520,7 +7520,7 @@
 			ptr1 := p1_0
 			idx1 := p1_1
 			x5_1 := x5.Args[1]
-			if x5_1.Op != OpARM64SRLconst || x5_1.AuxInt != 48 || w != x5_1.Args[0] {
+			if x5_1.Op != OpARM64SRLconst || auxIntToInt64(x5_1.AuxInt) != 48 || w != x5_1.Args[0] {
 				continue
 			}
 			x6 := x5.Args[2]
@@ -7531,7 +7531,7 @@
 			ptr0 := x6.Args[0]
 			idx0 := x6.Args[1]
 			x6_2 := x6.Args[2]
-			if x6_2.Op != OpARM64SRLconst || x6_2.AuxInt != 56 || w != x6_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, x3, x4, x5, x6)) {
+			if x6_2.Op != OpARM64SRLconst || auxIntToInt64(x6_2.AuxInt) != 56 || w != x6_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, x3, x4, x5, x6)) {
 				continue
 			}
 			v.reset(OpARM64MOVDstoreidx)
@@ -7546,12 +7546,12 @@
 	// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)
 	// result: (MOVWstore [i-3] {s} ptr (REVW <w.Type> w) mem)
 	for {
-		i := v.AuxInt
-		s := v.Aux
+		i := auxIntToInt32(v.AuxInt)
+		s := auxToSym(v.Aux)
 		ptr := v_0
 		w := v_1
 		x0 := v_2
-		if x0.Op != OpARM64MOVBstore || x0.AuxInt != i-1 || x0.Aux != s {
+		if x0.Op != OpARM64MOVBstore || auxIntToInt32(x0.AuxInt) != i-1 || auxToSym(x0.Aux) != s {
 			break
 		}
 		_ = x0.Args[2]
@@ -7559,11 +7559,11 @@
 			break
 		}
 		x0_1 := x0.Args[1]
-		if x0_1.Op != OpARM64UBFX || x0_1.AuxInt != armBFAuxInt(8, 24) || w != x0_1.Args[0] {
+		if x0_1.Op != OpARM64UBFX || auxIntToArm64BitField(x0_1.AuxInt) != armBFAuxInt(8, 24) || w != x0_1.Args[0] {
 			break
 		}
 		x1 := x0.Args[2]
-		if x1.Op != OpARM64MOVBstore || x1.AuxInt != i-2 || x1.Aux != s {
+		if x1.Op != OpARM64MOVBstore || auxIntToInt32(x1.AuxInt) != i-2 || auxToSym(x1.Aux) != s {
 			break
 		}
 		_ = x1.Args[2]
@@ -7571,11 +7571,11 @@
 			break
 		}
 		x1_1 := x1.Args[1]
-		if x1_1.Op != OpARM64UBFX || x1_1.AuxInt != armBFAuxInt(16, 16) || w != x1_1.Args[0] {
+		if x1_1.Op != OpARM64UBFX || auxIntToArm64BitField(x1_1.AuxInt) != armBFAuxInt(16, 16) || w != x1_1.Args[0] {
 			break
 		}
 		x2 := x1.Args[2]
-		if x2.Op != OpARM64MOVBstore || x2.AuxInt != i-3 || x2.Aux != s {
+		if x2.Op != OpARM64MOVBstore || auxIntToInt32(x2.AuxInt) != i-3 || auxToSym(x2.Aux) != s {
 			break
 		}
 		mem := x2.Args[2]
@@ -7583,12 +7583,12 @@
 			break
 		}
 		x2_1 := x2.Args[1]
-		if x2_1.Op != OpARM64UBFX || x2_1.AuxInt != armBFAuxInt(24, 8) || w != x2_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
+		if x2_1.Op != OpARM64UBFX || auxIntToArm64BitField(x2_1.AuxInt) != armBFAuxInt(24, 8) || w != x2_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
 			break
 		}
 		v.reset(OpARM64MOVWstore)
-		v.AuxInt = i - 3
-		v.Aux = s
+		v.AuxInt = int32ToAuxInt(i - 3)
+		v.Aux = symToAux(s)
 		v0 := b.NewValue0(x2.Pos, OpARM64REVW, w.Type)
 		v0.AddArg(w)
 		v.AddArg3(ptr, v0, mem)
@@ -7598,14 +7598,14 @@
 	// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2)
 	// result: (MOVWstoreidx ptr0 idx0 (REVW <w.Type> w) mem)
 	for {
-		if v.AuxInt != 3 {
+		if auxIntToInt32(v.AuxInt) != 3 {
 			break
 		}
-		s := v.Aux
+		s := auxToSym(v.Aux)
 		p := v_0
 		w := v_1
 		x0 := v_2
-		if x0.Op != OpARM64MOVBstore || x0.AuxInt != 2 || x0.Aux != s {
+		if x0.Op != OpARM64MOVBstore || auxIntToInt32(x0.AuxInt) != 2 || auxToSym(x0.Aux) != s {
 			break
 		}
 		_ = x0.Args[2]
@@ -7613,11 +7613,11 @@
 			break
 		}
 		x0_1 := x0.Args[1]
-		if x0_1.Op != OpARM64UBFX || x0_1.AuxInt != armBFAuxInt(8, 24) || w != x0_1.Args[0] {
+		if x0_1.Op != OpARM64UBFX || auxIntToArm64BitField(x0_1.AuxInt) != armBFAuxInt(8, 24) || w != x0_1.Args[0] {
 			break
 		}
 		x1 := x0.Args[2]
-		if x1.Op != OpARM64MOVBstore || x1.AuxInt != 1 || x1.Aux != s {
+		if x1.Op != OpARM64MOVBstore || auxIntToInt32(x1.AuxInt) != 1 || auxToSym(x1.Aux) != s {
 			break
 		}
 		_ = x1.Args[2]
@@ -7632,7 +7632,7 @@
 			ptr1 := p1_0
 			idx1 := p1_1
 			x1_1 := x1.Args[1]
-			if x1_1.Op != OpARM64UBFX || x1_1.AuxInt != armBFAuxInt(16, 16) || w != x1_1.Args[0] {
+			if x1_1.Op != OpARM64UBFX || auxIntToArm64BitField(x1_1.AuxInt) != armBFAuxInt(16, 16) || w != x1_1.Args[0] {
 				continue
 			}
 			x2 := x1.Args[2]
@@ -7643,7 +7643,7 @@
 			ptr0 := x2.Args[0]
 			idx0 := x2.Args[1]
 			x2_2 := x2.Args[2]
-			if x2_2.Op != OpARM64UBFX || x2_2.AuxInt != armBFAuxInt(24, 8) || w != x2_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2)) {
+			if x2_2.Op != OpARM64UBFX || auxIntToArm64BitField(x2_2.AuxInt) != armBFAuxInt(24, 8) || w != x2_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2)) {
 				continue
 			}
 			v.reset(OpARM64MOVWstoreidx)
@@ -7658,12 +7658,12 @@
 	// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)
 	// result: (MOVWstore [i-3] {s} ptr (REVW <w.Type> w) mem)
 	for {
-		i := v.AuxInt
-		s := v.Aux
+		i := auxIntToInt32(v.AuxInt)
+		s := auxToSym(v.Aux)
 		ptr := v_0
 		w := v_1
 		x0 := v_2
-		if x0.Op != OpARM64MOVBstore || x0.AuxInt != i-1 || x0.Aux != s {
+		if x0.Op != OpARM64MOVBstore || auxIntToInt32(x0.AuxInt) != i-1 || auxToSym(x0.Aux) != s {
 			break
 		}
 		_ = x0.Args[2]
@@ -7671,7 +7671,7 @@
 			break
 		}
 		x0_1 := x0.Args[1]
-		if x0_1.Op != OpARM64SRLconst || x0_1.AuxInt != 8 {
+		if x0_1.Op != OpARM64SRLconst || auxIntToInt64(x0_1.AuxInt) != 8 {
 			break
 		}
 		x0_1_0 := x0_1.Args[0]
@@ -7679,7 +7679,7 @@
 			break
 		}
 		x1 := x0.Args[2]
-		if x1.Op != OpARM64MOVBstore || x1.AuxInt != i-2 || x1.Aux != s {
+		if x1.Op != OpARM64MOVBstore || auxIntToInt32(x1.AuxInt) != i-2 || auxToSym(x1.Aux) != s {
 			break
 		}
 		_ = x1.Args[2]
@@ -7687,7 +7687,7 @@
 			break
 		}
 		x1_1 := x1.Args[1]
-		if x1_1.Op != OpARM64SRLconst || x1_1.AuxInt != 16 {
+		if x1_1.Op != OpARM64SRLconst || auxIntToInt64(x1_1.AuxInt) != 16 {
 			break
 		}
 		x1_1_0 := x1_1.Args[0]
@@ -7695,7 +7695,7 @@
 			break
 		}
 		x2 := x1.Args[2]
-		if x2.Op != OpARM64MOVBstore || x2.AuxInt != i-3 || x2.Aux != s {
+		if x2.Op != OpARM64MOVBstore || auxIntToInt32(x2.AuxInt) != i-3 || auxToSym(x2.Aux) != s {
 			break
 		}
 		mem := x2.Args[2]
@@ -7703,7 +7703,7 @@
 			break
 		}
 		x2_1 := x2.Args[1]
-		if x2_1.Op != OpARM64SRLconst || x2_1.AuxInt != 24 {
+		if x2_1.Op != OpARM64SRLconst || auxIntToInt64(x2_1.AuxInt) != 24 {
 			break
 		}
 		x2_1_0 := x2_1.Args[0]
@@ -7711,8 +7711,8 @@
 			break
 		}
 		v.reset(OpARM64MOVWstore)
-		v.AuxInt = i - 3
-		v.Aux = s
+		v.AuxInt = int32ToAuxInt(i - 3)
+		v.Aux = symToAux(s)
 		v0 := b.NewValue0(x2.Pos, OpARM64REVW, w.Type)
 		v0.AddArg(w)
 		v.AddArg3(ptr, v0, mem)
@@ -7722,14 +7722,14 @@
 	// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2)
 	// result: (MOVWstoreidx ptr0 idx0 (REVW <w.Type> w) mem)
 	for {
-		if v.AuxInt != 3 {
+		if auxIntToInt32(v.AuxInt) != 3 {
 			break
 		}
-		s := v.Aux
+		s := auxToSym(v.Aux)
 		p := v_0
 		w := v_1
 		x0 := v_2
-		if x0.Op != OpARM64MOVBstore || x0.AuxInt != 2 || x0.Aux != s {
+		if x0.Op != OpARM64MOVBstore || auxIntToInt32(x0.AuxInt) != 2 || auxToSym(x0.Aux) != s {
 			break
 		}
 		_ = x0.Args[2]
@@ -7737,7 +7737,7 @@
 			break
 		}
 		x0_1 := x0.Args[1]
-		if x0_1.Op != OpARM64SRLconst || x0_1.AuxInt != 8 {
+		if x0_1.Op != OpARM64SRLconst || auxIntToInt64(x0_1.AuxInt) != 8 {
 			break
 		}
 		x0_1_0 := x0_1.Args[0]
@@ -7745,7 +7745,7 @@
 			break
 		}
 		x1 := x0.Args[2]
-		if x1.Op != OpARM64MOVBstore || x1.AuxInt != 1 || x1.Aux != s {
+		if x1.Op != OpARM64MOVBstore || auxIntToInt32(x1.AuxInt) != 1 || auxToSym(x1.Aux) != s {
 			break
 		}
 		_ = x1.Args[2]
@@ -7760,7 +7760,7 @@
 			ptr1 := p1_0
 			idx1 := p1_1
 			x1_1 := x1.Args[1]
-			if x1_1.Op != OpARM64SRLconst || x1_1.AuxInt != 16 {
+			if x1_1.Op != OpARM64SRLconst || auxIntToInt64(x1_1.AuxInt) != 16 {
 				continue
 			}
 			x1_1_0 := x1_1.Args[0]
@@ -7775,7 +7775,7 @@
 			ptr0 := x2.Args[0]
 			idx0 := x2.Args[1]
 			x2_2 := x2.Args[2]
-			if x2_2.Op != OpARM64SRLconst || x2_2.AuxInt != 24 {
+			if x2_2.Op != OpARM64SRLconst || auxIntToInt64(x2_2.AuxInt) != 24 {
 				continue
 			}
 			x2_2_0 := x2_2.Args[0]
@@ -7794,12 +7794,12 @@
 	// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)
 	// result: (MOVWstore [i-3] {s} ptr (REVW <w.Type> w) mem)
 	for {
-		i := v.AuxInt
-		s := v.Aux
+		i := auxIntToInt32(v.AuxInt)
+		s := auxToSym(v.Aux)
 		ptr := v_0
 		w := v_1
 		x0 := v_2
-		if x0.Op != OpARM64MOVBstore || x0.AuxInt != i-1 || x0.Aux != s {
+		if x0.Op != OpARM64MOVBstore || auxIntToInt32(x0.AuxInt) != i-1 || auxToSym(x0.Aux) != s {
 			break
 		}
 		_ = x0.Args[2]
@@ -7807,11 +7807,11 @@
 			break
 		}
 		x0_1 := x0.Args[1]
-		if x0_1.Op != OpARM64SRLconst || x0_1.AuxInt != 8 || w != x0_1.Args[0] {
+		if x0_1.Op != OpARM64SRLconst || auxIntToInt64(x0_1.AuxInt) != 8 || w != x0_1.Args[0] {
 			break
 		}
 		x1 := x0.Args[2]
-		if x1.Op != OpARM64MOVBstore || x1.AuxInt != i-2 || x1.Aux != s {
+		if x1.Op != OpARM64MOVBstore || auxIntToInt32(x1.AuxInt) != i-2 || auxToSym(x1.Aux) != s {
 			break
 		}
 		_ = x1.Args[2]
@@ -7819,11 +7819,11 @@
 			break
 		}
 		x1_1 := x1.Args[1]
-		if x1_1.Op != OpARM64SRLconst || x1_1.AuxInt != 16 || w != x1_1.Args[0] {
+		if x1_1.Op != OpARM64SRLconst || auxIntToInt64(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
 			break
 		}
 		x2 := x1.Args[2]
-		if x2.Op != OpARM64MOVBstore || x2.AuxInt != i-3 || x2.Aux != s {
+		if x2.Op != OpARM64MOVBstore || auxIntToInt32(x2.AuxInt) != i-3 || auxToSym(x2.Aux) != s {
 			break
 		}
 		mem := x2.Args[2]
@@ -7831,12 +7831,12 @@
 			break
 		}
 		x2_1 := x2.Args[1]
-		if x2_1.Op != OpARM64SRLconst || x2_1.AuxInt != 24 || w != x2_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
+		if x2_1.Op != OpARM64SRLconst || auxIntToInt64(x2_1.AuxInt) != 24 || w != x2_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
 			break
 		}
 		v.reset(OpARM64MOVWstore)
-		v.AuxInt = i - 3
-		v.Aux = s
+		v.AuxInt = int32ToAuxInt(i - 3)
+		v.Aux = symToAux(s)
 		v0 := b.NewValue0(x2.Pos, OpARM64REVW, w.Type)
 		v0.AddArg(w)
 		v.AddArg3(ptr, v0, mem)
@@ -7846,14 +7846,14 @@
 	// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2)
 	// result: (MOVWstoreidx ptr0 idx0 (REVW <w.Type> w) mem)
 	for {
-		if v.AuxInt != 3 {
+		if auxIntToInt32(v.AuxInt) != 3 {
 			break
 		}
-		s := v.Aux
+		s := auxToSym(v.Aux)
 		p := v_0
 		w := v_1
 		x0 := v_2
-		if x0.Op != OpARM64MOVBstore || x0.AuxInt != 2 || x0.Aux != s {
+		if x0.Op != OpARM64MOVBstore || auxIntToInt32(x0.AuxInt) != 2 || auxToSym(x0.Aux) != s {
 			break
 		}
 		_ = x0.Args[2]
@@ -7861,11 +7861,11 @@
 			break
 		}
 		x0_1 := x0.Args[1]
-		if x0_1.Op != OpARM64SRLconst || x0_1.AuxInt != 8 || w != x0_1.Args[0] {
+		if x0_1.Op != OpARM64SRLconst || auxIntToInt64(x0_1.AuxInt) != 8 || w != x0_1.Args[0] {
 			break
 		}
 		x1 := x0.Args[2]
-		if x1.Op != OpARM64MOVBstore || x1.AuxInt != 1 || x1.Aux != s {
+		if x1.Op != OpARM64MOVBstore || auxIntToInt32(x1.AuxInt) != 1 || auxToSym(x1.Aux) != s {
 			break
 		}
 		_ = x1.Args[2]
@@ -7880,7 +7880,7 @@
 			ptr1 := p1_0
 			idx1 := p1_1
 			x1_1 := x1.Args[1]
-			if x1_1.Op != OpARM64SRLconst || x1_1.AuxInt != 16 || w != x1_1.Args[0] {
+			if x1_1.Op != OpARM64SRLconst || auxIntToInt64(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
 				continue
 			}
 			x2 := x1.Args[2]
@@ -7891,7 +7891,7 @@
 			ptr0 := x2.Args[0]
 			idx0 := x2.Args[1]
 			x2_2 := x2.Args[2]
-			if x2_2.Op != OpARM64SRLconst || x2_2.AuxInt != 24 || w != x2_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2)) {
+			if x2_2.Op != OpARM64SRLconst || auxIntToInt64(x2_2.AuxInt) != 24 || w != x2_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2)) {
 				continue
 			}
 			v.reset(OpARM64MOVWstoreidx)
@@ -7906,12 +7906,12 @@
 	// cond: x.Uses == 1 && clobber(x)
 	// result: (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
 	for {
-		i := v.AuxInt
-		s := v.Aux
+		i := auxIntToInt32(v.AuxInt)
+		s := auxToSym(v.Aux)
 		ptr := v_0
 		w := v_1
 		x := v_2
-		if x.Op != OpARM64MOVBstore || x.AuxInt != i-1 || x.Aux != s {
+		if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
 			break
 		}
 		mem := x.Args[2]
@@ -7919,12 +7919,12 @@
 			break
 		}
 		x_1 := x.Args[1]
-		if x_1.Op != OpARM64SRLconst || x_1.AuxInt != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
+		if x_1.Op != OpARM64SRLconst || auxIntToInt64(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
 			break
 		}
 		v.reset(OpARM64MOVHstore)
-		v.AuxInt = i - 1
-		v.Aux = s
+		v.AuxInt = int32ToAuxInt(i - 1)
+		v.Aux = symToAux(s)
 		v0 := b.NewValue0(x.Pos, OpARM64REV16W, w.Type)
 		v0.AddArg(w)
 		v.AddArg3(ptr, v0, mem)
@@ -7934,10 +7934,10 @@
 	// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
 	// result: (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
 	for {
-		if v.AuxInt != 1 {
+		if auxIntToInt32(v.AuxInt) != 1 {
 			break
 		}
-		s := v.Aux
+		s := auxToSym(v.Aux)
 		if v_0.Op != OpARM64ADD {
 			break
 		}
@@ -7956,7 +7956,7 @@
 			ptr0 := x.Args[0]
 			idx0 := x.Args[1]
 			x_2 := x.Args[2]
-			if x_2.Op != OpARM64SRLconst || x_2.AuxInt != 8 || w != x_2.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+			if x_2.Op != OpARM64SRLconst || auxIntToInt64(x_2.AuxInt) != 8 || w != x_2.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
 				continue
 			}
 			v.reset(OpARM64MOVHstoreidx)
@@ -7971,12 +7971,12 @@
 	// cond: x.Uses == 1 && clobber(x)
 	// result: (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
 	for {
-		i := v.AuxInt
-		s := v.Aux
+		i := auxIntToInt32(v.AuxInt)
+		s := auxToSym(v.Aux)
 		ptr := v_0
 		w := v_1
 		x := v_2
-		if x.Op != OpARM64MOVBstore || x.AuxInt != i-1 || x.Aux != s {
+		if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
 			break
 		}
 		mem := x.Args[2]
@@ -7984,12 +7984,12 @@
 			break
 		}
 		x_1 := x.Args[1]
-		if x_1.Op != OpARM64UBFX || x_1.AuxInt != armBFAuxInt(8, 8) || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
+		if x_1.Op != OpARM64UBFX || auxIntToArm64BitField(x_1.AuxInt) != armBFAuxInt(8, 8) || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
 			break
 		}
 		v.reset(OpARM64MOVHstore)
-		v.AuxInt = i - 1
-		v.Aux = s
+		v.AuxInt = int32ToAuxInt(i - 1)
+		v.Aux = symToAux(s)
 		v0 := b.NewValue0(x.Pos, OpARM64REV16W, w.Type)
 		v0.AddArg(w)
 		v.AddArg3(ptr, v0, mem)
@@ -7999,10 +7999,10 @@
 	// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
 	// result: (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
 	for {
-		if v.AuxInt != 1 {
+		if auxIntToInt32(v.AuxInt) != 1 {
 			break
 		}
-		s := v.Aux
+		s := auxToSym(v.Aux)
 		if v_0.Op != OpARM64ADD {
 			break
 		}
@@ -8021,7 +8021,7 @@
 			ptr0 := x.Args[0]
 			idx0 := x.Args[1]
 			x_2 := x.Args[2]
-			if x_2.Op != OpARM64UBFX || x_2.AuxInt != armBFAuxInt(8, 8) || w != x_2.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+			if x_2.Op != OpARM64UBFX || auxIntToArm64BitField(x_2.AuxInt) != armBFAuxInt(8, 8) || w != x_2.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
 				continue
 			}
 			v.reset(OpARM64MOVHstoreidx)
@@ -8036,12 +8036,12 @@
 	// cond: x.Uses == 1 && clobber(x)
 	// result: (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
 	for {
-		i := v.AuxInt
-		s := v.Aux
+		i := auxIntToInt32(v.AuxInt)
+		s := auxToSym(v.Aux)
 		ptr := v_0
 		w := v_1
 		x := v_2
-		if x.Op != OpARM64MOVBstore || x.AuxInt != i-1 || x.Aux != s {
+		if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
 			break
 		}
 		mem := x.Args[2]
@@ -8049,7 +8049,7 @@
 			break
 		}
 		x_1 := x.Args[1]
-		if x_1.Op != OpARM64SRLconst || x_1.AuxInt != 8 {
+		if x_1.Op != OpARM64SRLconst || auxIntToInt64(x_1.AuxInt) != 8 {
 			break
 		}
 		x_1_0 := x_1.Args[0]
@@ -8057,8 +8057,8 @@
 			break
 		}
 		v.reset(OpARM64MOVHstore)
-		v.AuxInt = i - 1
-		v.Aux = s
+		v.AuxInt = int32ToAuxInt(i - 1)
+		v.Aux = symToAux(s)
 		v0 := b.NewValue0(x.Pos, OpARM64REV16W, w.Type)
 		v0.AddArg(w)
 		v.AddArg3(ptr, v0, mem)
@@ -8068,10 +8068,10 @@
 	// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
 	// result: (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
 	for {
-		if v.AuxInt != 1 {
+		if auxIntToInt32(v.AuxInt) != 1 {
 			break
 		}
-		s := v.Aux
+		s := auxToSym(v.Aux)
 		if v_0.Op != OpARM64ADD {
 			break
 		}
@@ -8090,7 +8090,7 @@
 			ptr0 := x.Args[0]
 			idx0 := x.Args[1]
 			x_2 := x.Args[2]
-			if x_2.Op != OpARM64SRLconst || x_2.AuxInt != 8 {
+			if x_2.Op != OpARM64SRLconst || auxIntToInt64(x_2.AuxInt) != 8 {
 				continue
 			}
 			x_2_0 := x_2.Args[0]
@@ -8109,12 +8109,12 @@
 	// cond: x.Uses == 1 && clobber(x)
 	// result: (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
 	for {
-		i := v.AuxInt
-		s := v.Aux
+		i := auxIntToInt32(v.AuxInt)
+		s := auxToSym(v.Aux)
 		ptr := v_0
 		w := v_1
 		x := v_2
-		if x.Op != OpARM64MOVBstore || x.AuxInt != i-1 || x.Aux != s {
+		if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
 			break
 		}
 		mem := x.Args[2]
@@ -8122,12 +8122,12 @@
 			break
 		}
 		x_1 := x.Args[1]
-		if x_1.Op != OpARM64UBFX || x_1.AuxInt != armBFAuxInt(8, 24) || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
+		if x_1.Op != OpARM64UBFX || auxIntToArm64BitField(x_1.AuxInt) != armBFAuxInt(8, 24) || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
 			break
 		}
 		v.reset(OpARM64MOVHstore)
-		v.AuxInt = i - 1
-		v.Aux = s
+		v.AuxInt = int32ToAuxInt(i - 1)
+		v.Aux = symToAux(s)
 		v0 := b.NewValue0(x.Pos, OpARM64REV16W, w.Type)
 		v0.AddArg(w)
 		v.AddArg3(ptr, v0, mem)
@@ -8137,10 +8137,10 @@
 	// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
 	// result: (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
 	for {
-		if v.AuxInt != 1 {
+		if auxIntToInt32(v.AuxInt) != 1 {
 			break
 		}
-		s := v.Aux
+		s := auxToSym(v.Aux)
 		if v_0.Op != OpARM64ADD {
 			break
 		}
@@ -8159,7 +8159,7 @@
 			ptr0 := x.Args[0]
 			idx0 := x.Args[1]
 			x_2 := x.Args[2]
-			if x_2.Op != OpARM64UBFX || x_2.AuxInt != armBFAuxInt(8, 24) || w != x_2.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+			if x_2.Op != OpARM64UBFX || auxIntToArm64BitField(x_2.AuxInt) != armBFAuxInt(8, 24) || w != x_2.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
 				continue
 			}
 			v.reset(OpARM64MOVHstoreidx)
@@ -8318,11 +8318,11 @@
 	// result: (MOVHstoreidx ptr idx w mem)
 	for {
 		ptr := v_0
-		if v_1.Op != OpARM64ADDconst || v_1.AuxInt != 1 {
+		if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 1 {
 			break
 		}
 		idx := v_1.Args[0]
-		if v_2.Op != OpARM64SRLconst || v_2.AuxInt != 8 {
+		if v_2.Op != OpARM64SRLconst || auxIntToInt64(v_2.AuxInt) != 8 {
 			break
 		}
 		w := v_2.Args[0]
@@ -8343,7 +8343,7 @@
 	// result: (MOVWstoreidx ptr idx (REVW <w.Type> w) mem)
 	for {
 		ptr := v_0
-		if v_1.Op != OpARM64ADDconst || v_1.AuxInt != 3 {
+		if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 3 {
 			break
 		}
 		idx := v_1.Args[0]
@@ -8357,11 +8357,11 @@
 			break
 		}
 		x0_1 := x0.Args[1]
-		if x0_1.Op != OpARM64ADDconst || x0_1.AuxInt != 2 || idx != x0_1.Args[0] {
+		if x0_1.Op != OpARM64ADDconst || auxIntToInt64(x0_1.AuxInt) != 2 || idx != x0_1.Args[0] {
 			break
 		}
 		x0_2 := x0.Args[2]
-		if x0_2.Op != OpARM64UBFX || x0_2.AuxInt != armBFAuxInt(8, 24) || w != x0_2.Args[0] {
+		if x0_2.Op != OpARM64UBFX || auxIntToArm64BitField(x0_2.AuxInt) != armBFAuxInt(8, 24) || w != x0_2.Args[0] {
 			break
 		}
 		x1 := x0.Args[3]
@@ -8373,11 +8373,11 @@
 			break
 		}
 		x1_1 := x1.Args[1]
-		if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 1 || idx != x1_1.Args[0] {
+		if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 1 || idx != x1_1.Args[0] {
 			break
 		}
 		x1_2 := x1.Args[2]
-		if x1_2.Op != OpARM64UBFX || x1_2.AuxInt != armBFAuxInt(16, 16) || w != x1_2.Args[0] {
+		if x1_2.Op != OpARM64UBFX || auxIntToArm64BitField(x1_2.AuxInt) != armBFAuxInt(16, 16) || w != x1_2.Args[0] {
 			break
 		}
 		x2 := x1.Args[3]
@@ -8389,7 +8389,7 @@
 			break
 		}
 		x2_2 := x2.Args[2]
-		if x2_2.Op != OpARM64UBFX || x2_2.AuxInt != armBFAuxInt(24, 8) || w != x2_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
+		if x2_2.Op != OpARM64UBFX || auxIntToArm64BitField(x2_2.AuxInt) != armBFAuxInt(24, 8) || w != x2_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
 			break
 		}
 		v.reset(OpARM64MOVWstoreidx)
@@ -8414,11 +8414,11 @@
 			break
 		}
 		x0_1 := x0.Args[1]
-		if x0_1.Op != OpARM64ADDconst || x0_1.AuxInt != 1 || idx != x0_1.Args[0] {
+		if x0_1.Op != OpARM64ADDconst || auxIntToInt64(x0_1.AuxInt) != 1 || idx != x0_1.Args[0] {
 			break
 		}
 		x0_2 := x0.Args[2]
-		if x0_2.Op != OpARM64UBFX || x0_2.AuxInt != armBFAuxInt(8, 24) || w != x0_2.Args[0] {
+		if x0_2.Op != OpARM64UBFX || auxIntToArm64BitField(x0_2.AuxInt) != armBFAuxInt(8, 24) || w != x0_2.Args[0] {
 			break
 		}
 		x1 := x0.Args[3]
@@ -8430,11 +8430,11 @@
 			break
 		}
 		x1_1 := x1.Args[1]
-		if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 2 || idx != x1_1.Args[0] {
+		if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 2 || idx != x1_1.Args[0] {
 			break
 		}
 		x1_2 := x1.Args[2]
-		if x1_2.Op != OpARM64UBFX || x1_2.AuxInt != armBFAuxInt(16, 16) || w != x1_2.Args[0] {
+		if x1_2.Op != OpARM64UBFX || auxIntToArm64BitField(x1_2.AuxInt) != armBFAuxInt(16, 16) || w != x1_2.Args[0] {
 			break
 		}
 		x2 := x1.Args[3]
@@ -8446,11 +8446,11 @@
 			break
 		}
 		x2_1 := x2.Args[1]
-		if x2_1.Op != OpARM64ADDconst || x2_1.AuxInt != 3 || idx != x2_1.Args[0] {
+		if x2_1.Op != OpARM64ADDconst || auxIntToInt64(x2_1.AuxInt) != 3 || idx != x2_1.Args[0] {
 			break
 		}
 		x2_2 := x2.Args[2]
-		if x2_2.Op != OpARM64UBFX || x2_2.AuxInt != armBFAuxInt(24, 8) || w != x2_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
+		if x2_2.Op != OpARM64UBFX || auxIntToArm64BitField(x2_2.AuxInt) != armBFAuxInt(24, 8) || w != x2_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
 			break
 		}
 		v.reset(OpARM64MOVWstoreidx)
@@ -8462,7 +8462,7 @@
 	// result: (MOVHstoreidx ptr idx (REV16W <w.Type> w) mem)
 	for {
 		ptr := v_0
-		if v_1.Op != OpARM64ADDconst || v_1.AuxInt != 1 {
+		if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 1 {
 			break
 		}
 		idx := v_1.Args[0]
@@ -8476,7 +8476,7 @@
 			break
 		}
 		x_2 := x.Args[2]
-		if x_2.Op != OpARM64UBFX || x_2.AuxInt != armBFAuxInt(8, 8) || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) {
+		if x_2.Op != OpARM64UBFX || auxIntToArm64BitField(x_2.AuxInt) != armBFAuxInt(8, 8) || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) {
 			break
 		}
 		v.reset(OpARM64MOVHstoreidx)
@@ -8501,11 +8501,11 @@
 			break
 		}
 		x_1 := x.Args[1]
-		if x_1.Op != OpARM64ADDconst || x_1.AuxInt != 1 || idx != x_1.Args[0] {
+		if x_1.Op != OpARM64ADDconst || auxIntToInt64(x_1.AuxInt) != 1 || idx != x_1.Args[0] {
 			break
 		}
 		x_2 := x.Args[2]
-		if x_2.Op != OpARM64UBFX || x_2.AuxInt != armBFAuxInt(8, 8) || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) {
+		if x_2.Op != OpARM64UBFX || auxIntToArm64BitField(x_2.AuxInt) != armBFAuxInt(8, 8) || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) {
 			break
 		}
 		v.reset(OpARM64MOVHstoreidx)
@@ -8582,28 +8582,28 @@
 		return true
 	}
 	// match: (MOVBstorezero [i] {s} ptr0 x:(MOVBstorezero [j] {s} ptr1 mem))
-	// cond: x.Uses == 1 && areAdjacentOffsets(i,j,1) && is32Bit(min(i,j)) && isSamePtr(ptr0, ptr1) && clobber(x)
-	// result: (MOVHstorezero [min(i,j)] {s} ptr0 mem)
+	// cond: x.Uses == 1 && areAdjacentOffsets(int64(i),int64(j),1) && isSamePtr(ptr0, ptr1) && clobber(x)
+	// result: (MOVHstorezero [int32(min(int64(i),int64(j)))] {s} ptr0 mem)
 	for {
-		i := v.AuxInt
-		s := v.Aux
+		i := auxIntToInt32(v.AuxInt)
+		s := auxToSym(v.Aux)
 		ptr0 := v_0
 		x := v_1
 		if x.Op != OpARM64MOVBstorezero {
 			break
 		}
-		j := x.AuxInt
-		if x.Aux != s {
+		j := auxIntToInt32(x.AuxInt)
+		if auxToSym(x.Aux) != s {
 			break
 		}
 		mem := x.Args[1]
 		ptr1 := x.Args[0]
-		if !(x.Uses == 1 && areAdjacentOffsets(i, j, 1) && is32Bit(min(i, j)) && isSamePtr(ptr0, ptr1) && clobber(x)) {
+		if !(x.Uses == 1 && areAdjacentOffsets(int64(i), int64(j), 1) && isSamePtr(ptr0, ptr1) && clobber(x)) {
 			break
 		}
 		v.reset(OpARM64MOVHstorezero)
-		v.AuxInt = min(i, j)
-		v.Aux = s
+		v.AuxInt = int32ToAuxInt(int32(min(int64(i), int64(j))))
+		v.Aux = symToAux(s)
 		v.AddArg2(ptr0, mem)
 		return true
 	}
@@ -8611,10 +8611,10 @@
 	// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
 	// result: (MOVHstorezeroidx ptr1 idx1 mem)
 	for {
-		if v.AuxInt != 1 {
+		if auxIntToInt32(v.AuxInt) != 1 {
 			break
 		}
-		s := v.Aux
+		s := auxToSym(v.Aux)
 		if v_0.Op != OpARM64ADD {
 			break
 		}
@@ -8687,7 +8687,7 @@
 	// result: (MOVHstorezeroidx ptr idx mem)
 	for {
 		ptr := v_0
-		if v_1.Op != OpARM64ADDconst || v_1.AuxInt != 1 {
+		if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 1 {
 			break
 		}
 		idx := v_1.Args[0]
@@ -8830,15 +8830,15 @@
 	}
 	// match: (MOVDload [off] {sym} (SB) _)
 	// cond: symIsRO(sym)
-	// result: (MOVDconst [int64(read64(sym, off, config.ctxt.Arch.ByteOrder))])
+	// result: (MOVDconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
 	for {
-		off := v.AuxInt
-		sym := v.Aux
+		off := auxIntToInt32(v.AuxInt)
+		sym := auxToSym(v.Aux)
 		if v_0.Op != OpSB || !(symIsRO(sym)) {
 			break
 		}
 		v.reset(OpARM64MOVDconst)
-		v.AuxInt = int64(read64(sym, off, config.ctxt.Arch.ByteOrder))
+		v.AuxInt = int64ToAuxInt(int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder)))
 		return true
 	}
 	return false
@@ -9336,28 +9336,28 @@
 		return true
 	}
 	// match: (MOVDstorezero [i] {s} ptr0 x:(MOVDstorezero [j] {s} ptr1 mem))
-	// cond: x.Uses == 1 && areAdjacentOffsets(i,j,8) && is32Bit(min(i,j)) && isSamePtr(ptr0, ptr1) && clobber(x)
-	// result: (MOVQstorezero [min(i,j)] {s} ptr0 mem)
+	// cond: x.Uses == 1 && areAdjacentOffsets(int64(i),int64(j),8) && isSamePtr(ptr0, ptr1) && clobber(x)
+	// result: (MOVQstorezero [int32(min(int64(i),int64(j)))] {s} ptr0 mem)
 	for {
-		i := v.AuxInt
-		s := v.Aux
+		i := auxIntToInt32(v.AuxInt)
+		s := auxToSym(v.Aux)
 		ptr0 := v_0
 		x := v_1
 		if x.Op != OpARM64MOVDstorezero {
 			break
 		}
-		j := x.AuxInt
-		if x.Aux != s {
+		j := auxIntToInt32(x.AuxInt)
+		if auxToSym(x.Aux) != s {
 			break
 		}
 		mem := x.Args[1]
 		ptr1 := x.Args[0]
-		if !(x.Uses == 1 && areAdjacentOffsets(i, j, 8) && is32Bit(min(i, j)) && isSamePtr(ptr0, ptr1) && clobber(x)) {
+		if !(x.Uses == 1 && areAdjacentOffsets(int64(i), int64(j), 8) && isSamePtr(ptr0, ptr1) && clobber(x)) {
 			break
 		}
 		v.reset(OpARM64MOVQstorezero)
-		v.AuxInt = min(i, j)
-		v.Aux = s
+		v.AuxInt = int32ToAuxInt(int32(min(int64(i), int64(j))))
+		v.Aux = symToAux(s)
 		v.AddArg2(ptr0, mem)
 		return true
 	}
@@ -9365,10 +9365,10 @@
 	// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
 	// result: (MOVQstorezero [0] {s} p0 mem)
 	for {
-		if v.AuxInt != 8 {
+		if auxIntToInt32(v.AuxInt) != 8 {
 			break
 		}
-		s := v.Aux
+		s := auxToSym(v.Aux)
 		p0 := v_0
 		if p0.Op != OpARM64ADD {
 			break
@@ -9390,8 +9390,8 @@
 				continue
 			}
 			v.reset(OpARM64MOVQstorezero)
-			v.AuxInt = 0
-			v.Aux = s
+			v.AuxInt = int32ToAuxInt(0)
+			v.Aux = symToAux(s)
 			v.AddArg2(p0, mem)
 			return true
 		}
@@ -9401,12 +9401,12 @@
 	// cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
 	// result: (MOVQstorezero [0] {s} p0 mem)
 	for {
-		if v.AuxInt != 8 {
+		if auxIntToInt32(v.AuxInt) != 8 {
 			break
 		}
-		s := v.Aux
+		s := auxToSym(v.Aux)
 		p0 := v_0
-		if p0.Op != OpARM64ADDshiftLL || p0.AuxInt != 3 {
+		if p0.Op != OpARM64ADDshiftLL || auxIntToInt64(p0.AuxInt) != 3 {
 			break
 		}
 		idx0 := p0.Args[1]
@@ -9422,8 +9422,8 @@
 			break
 		}
 		v.reset(OpARM64MOVQstorezero)
-		v.AuxInt = 0
-		v.Aux = s
+		v.AuxInt = int32ToAuxInt(0)
+		v.Aux = symToAux(s)
 		v.AddArg2(p0, mem)
 		return true
 	}
@@ -9629,15 +9629,15 @@
 	}
 	// match: (MOVHUload [off] {sym} (SB) _)
 	// cond: symIsRO(sym)
-	// result: (MOVDconst [int64(read16(sym, off, config.ctxt.Arch.ByteOrder))])
+	// result: (MOVDconst [int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
 	for {
-		off := v.AuxInt
-		sym := v.Aux
+		off := auxIntToInt32(v.AuxInt)
+		sym := auxToSym(v.Aux)
 		if v_0.Op != OpSB || !(symIsRO(sym)) {
 			break
 		}
 		v.reset(OpARM64MOVDconst)
-		v.AuxInt = int64(read16(sym, off, config.ctxt.Arch.ByteOrder))
+		v.AuxInt = int64ToAuxInt(int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))
 		return true
 	}
 	return false
@@ -9899,13 +9899,13 @@
 		if v_0.Op != OpARM64SLLconst {
 			break
 		}
-		sc := v_0.AuxInt
+		sc := auxIntToInt64(v_0.AuxInt)
 		x := v_0.Args[0]
 		if !(isARM64BFMask(sc, 1<<16-1, sc)) {
 			break
 		}
 		v.reset(OpARM64UBFIZ)
-		v.AuxInt = armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))
+		v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc)))
 		v.AddArg(x)
 		return true
 	}
@@ -9916,13 +9916,13 @@
 		if v_0.Op != OpARM64SRLconst {
 			break
 		}
-		sc := v_0.AuxInt
+		sc := auxIntToInt64(v_0.AuxInt)
 		x := v_0.Args[0]
 		if !(isARM64BFMask(sc, 1<<16-1, 0)) {
 			break
 		}
 		v.reset(OpARM64UBFX)
-		v.AuxInt = armBFAuxInt(sc, 16)
+		v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, 16))
 		v.AddArg(x)
 		return true
 	}
@@ -10313,13 +10313,13 @@
 		if v_0.Op != OpARM64SLLconst {
 			break
 		}
-		lc := v_0.AuxInt
+		lc := auxIntToInt64(v_0.AuxInt)
 		x := v_0.Args[0]
 		if !(lc < 16) {
 			break
 		}
 		v.reset(OpARM64SBFIZ)
-		v.AuxInt = armBFAuxInt(lc, 16-lc)
+		v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 16-lc))
 		v.AddArg(x)
 		return true
 	}
@@ -10504,15 +10504,15 @@
 	// cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
 	// result: (MOVWstore [i-2] {s} ptr0 w mem)
 	for {
-		i := v.AuxInt
-		s := v.Aux
+		i := auxIntToInt32(v.AuxInt)
+		s := auxToSym(v.Aux)
 		ptr0 := v_0
-		if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 16 {
+		if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 16 {
 			break
 		}
 		w := v_1.Args[0]
 		x := v_2
-		if x.Op != OpARM64MOVHstore || x.AuxInt != i-2 || x.Aux != s {
+		if x.Op != OpARM64MOVHstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
 			break
 		}
 		mem := x.Args[2]
@@ -10521,8 +10521,8 @@
 			break
 		}
 		v.reset(OpARM64MOVWstore)
-		v.AuxInt = i - 2
-		v.Aux = s
+		v.AuxInt = int32ToAuxInt(i - 2)
+		v.Aux = symToAux(s)
 		v.AddArg3(ptr0, w, mem)
 		return true
 	}
@@ -10530,10 +10530,10 @@
 	// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
 	// result: (MOVWstoreidx ptr1 idx1 w mem)
 	for {
-		if v.AuxInt != 2 {
+		if auxIntToInt32(v.AuxInt) != 2 {
 			break
 		}
-		s := v.Aux
+		s := auxToSym(v.Aux)
 		if v_0.Op != OpARM64ADD {
 			break
 		}
@@ -10543,7 +10543,7 @@
 		for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
 			ptr0 := v_0_0
 			idx0 := v_0_1
-			if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 16 {
+			if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 16 {
 				continue
 			}
 			w := v_1.Args[0]
@@ -10567,16 +10567,16 @@
 	// cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
 	// result: (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w mem)
 	for {
-		if v.AuxInt != 2 {
+		if auxIntToInt32(v.AuxInt) != 2 {
 			break
 		}
-		s := v.Aux
-		if v_0.Op != OpARM64ADDshiftLL || v_0.AuxInt != 1 {
+		s := auxToSym(v.Aux)
+		if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 {
 			break
 		}
 		idx0 := v_0.Args[1]
 		ptr0 := v_0.Args[0]
-		if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 16 {
+		if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 16 {
 			break
 		}
 		w := v_1.Args[0]
@@ -10592,7 +10592,7 @@
 		}
 		v.reset(OpARM64MOVWstoreidx)
 		v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type)
-		v0.AuxInt = 1
+		v0.AuxInt = int64ToAuxInt(1)
 		v0.AddArg(idx1)
 		v.AddArg4(ptr1, v0, w, mem)
 		return true
@@ -10601,15 +10601,15 @@
 	// cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
 	// result: (MOVWstore [i-2] {s} ptr0 w mem)
 	for {
-		i := v.AuxInt
-		s := v.Aux
+		i := auxIntToInt32(v.AuxInt)
+		s := auxToSym(v.Aux)
 		ptr0 := v_0
-		if v_1.Op != OpARM64UBFX || v_1.AuxInt != armBFAuxInt(16, 16) {
+		if v_1.Op != OpARM64UBFX || auxIntToArm64BitField(v_1.AuxInt) != armBFAuxInt(16, 16) {
 			break
 		}
 		w := v_1.Args[0]
 		x := v_2
-		if x.Op != OpARM64MOVHstore || x.AuxInt != i-2 || x.Aux != s {
+		if x.Op != OpARM64MOVHstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
 			break
 		}
 		mem := x.Args[2]
@@ -10618,8 +10618,8 @@
 			break
 		}
 		v.reset(OpARM64MOVWstore)
-		v.AuxInt = i - 2
-		v.Aux = s
+		v.AuxInt = int32ToAuxInt(i - 2)
+		v.Aux = symToAux(s)
 		v.AddArg3(ptr0, w, mem)
 		return true
 	}
@@ -10627,10 +10627,10 @@
 	// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
 	// result: (MOVWstoreidx ptr1 idx1 w mem)
 	for {
-		if v.AuxInt != 2 {
+		if auxIntToInt32(v.AuxInt) != 2 {
 			break
 		}
-		s := v.Aux
+		s := auxToSym(v.Aux)
 		if v_0.Op != OpARM64ADD {
 			break
 		}
@@ -10640,7 +10640,7 @@
 		for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
 			ptr0 := v_0_0
 			idx0 := v_0_1
-			if v_1.Op != OpARM64UBFX || v_1.AuxInt != armBFAuxInt(16, 16) {
+			if v_1.Op != OpARM64UBFX || auxIntToArm64BitField(v_1.AuxInt) != armBFAuxInt(16, 16) {
 				continue
 			}
 			w := v_1.Args[0]
@@ -10664,16 +10664,16 @@
 	// cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
 	// result: (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w mem)
 	for {
-		if v.AuxInt != 2 {
+		if auxIntToInt32(v.AuxInt) != 2 {
 			break
 		}
-		s := v.Aux
-		if v_0.Op != OpARM64ADDshiftLL || v_0.AuxInt != 1 {
+		s := auxToSym(v.Aux)
+		if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 {
 			break
 		}
 		idx0 := v_0.Args[1]
 		ptr0 := v_0.Args[0]
-		if v_1.Op != OpARM64UBFX || v_1.AuxInt != armBFAuxInt(16, 16) {
+		if v_1.Op != OpARM64UBFX || auxIntToArm64BitField(v_1.AuxInt) != armBFAuxInt(16, 16) {
 			break
 		}
 		w := v_1.Args[0]
@@ -10689,7 +10689,7 @@
 		}
 		v.reset(OpARM64MOVWstoreidx)
 		v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type)
-		v0.AuxInt = 1
+		v0.AuxInt = int64ToAuxInt(1)
 		v0.AddArg(idx1)
 		v.AddArg4(ptr1, v0, w, mem)
 		return true
@@ -10698,10 +10698,10 @@
 	// cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
 	// result: (MOVWstore [i-2] {s} ptr0 w mem)
 	for {
-		i := v.AuxInt
-		s := v.Aux
+		i := auxIntToInt32(v.AuxInt)
+		s := auxToSym(v.Aux)
 		ptr0 := v_0
-		if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 16 {
+		if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 16 {
 			break
 		}
 		v_1_0 := v_1.Args[0]
@@ -10710,7 +10710,7 @@
 		}
 		w := v_1_0.Args[0]
 		x := v_2
-		if x.Op != OpARM64MOVHstore || x.AuxInt != i-2 || x.Aux != s {
+		if x.Op != OpARM64MOVHstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
 			break
 		}
 		mem := x.Args[2]
@@ -10719,8 +10719,8 @@
 			break
 		}
 		v.reset(OpARM64MOVWstore)
-		v.AuxInt = i - 2
-		v.Aux = s
+		v.AuxInt = int32ToAuxInt(i - 2)
+		v.Aux = symToAux(s)
 		v.AddArg3(ptr0, w, mem)
 		return true
 	}
@@ -10728,10 +10728,10 @@
 	// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
 	// result: (MOVWstoreidx ptr1 idx1 w mem)
 	for {
-		if v.AuxInt != 2 {
+		if auxIntToInt32(v.AuxInt) != 2 {
 			break
 		}
-		s := v.Aux
+		s := auxToSym(v.Aux)
 		if v_0.Op != OpARM64ADD {
 			break
 		}
@@ -10741,7 +10741,7 @@
 		for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
 			ptr0 := v_0_0
 			idx0 := v_0_1
-			if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 16 {
+			if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 16 {
 				continue
 			}
 			v_1_0 := v_1.Args[0]
@@ -10769,16 +10769,16 @@
 	// cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
 	// result: (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w mem)
 	for {
-		if v.AuxInt != 2 {
+		if auxIntToInt32(v.AuxInt) != 2 {
 			break
 		}
-		s := v.Aux
-		if v_0.Op != OpARM64ADDshiftLL || v_0.AuxInt != 1 {
+		s := auxToSym(v.Aux)
+		if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 {
 			break
 		}
 		idx0 := v_0.Args[1]
 		ptr0 := v_0.Args[0]
-		if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 16 {
+		if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 16 {
 			break
 		}
 		v_1_0 := v_1.Args[0]
@@ -10798,7 +10798,7 @@
 		}
 		v.reset(OpARM64MOVWstoreidx)
 		v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type)
-		v0.AuxInt = 1
+		v0.AuxInt = int64ToAuxInt(1)
 		v0.AddArg(idx1)
 		v.AddArg4(ptr1, v0, w, mem)
 		return true
@@ -10807,27 +10807,27 @@
 	// cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
 	// result: (MOVWstore [i-2] {s} ptr0 w0 mem)
 	for {
-		i := v.AuxInt
-		s := v.Aux
+		i := auxIntToInt32(v.AuxInt)
+		s := auxToSym(v.Aux)
 		ptr0 := v_0
 		if v_1.Op != OpARM64SRLconst {
 			break
 		}
-		j := v_1.AuxInt
+		j := auxIntToInt64(v_1.AuxInt)
 		w := v_1.Args[0]
 		x := v_2
-		if x.Op != OpARM64MOVHstore || x.AuxInt != i-2 || x.Aux != s {
+		if x.Op != OpARM64MOVHstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
 			break
 		}
 		mem := x.Args[2]
 		ptr1 := x.Args[0]
 		w0 := x.Args[1]
-		if w0.Op != OpARM64SRLconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) {
+		if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) {
 			break
 		}
 		v.reset(OpARM64MOVWstore)
-		v.AuxInt = i - 2
-		v.Aux = s
+		v.AuxInt = int32ToAuxInt(i - 2)
+		v.Aux = symToAux(s)
 		v.AddArg3(ptr0, w0, mem)
 		return true
 	}
@@ -10835,10 +10835,10 @@
 	// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
 	// result: (MOVWstoreidx ptr1 idx1 w0 mem)
 	for {
-		if v.AuxInt != 2 {
+		if auxIntToInt32(v.AuxInt) != 2 {
 			break
 		}
-		s := v.Aux
+		s := auxToSym(v.Aux)
 		if v_0.Op != OpARM64ADD {
 			break
 		}
@@ -10851,7 +10851,7 @@
 			if v_1.Op != OpARM64SRLconst {
 				continue
 			}
-			j := v_1.AuxInt
+			j := auxIntToInt64(v_1.AuxInt)
 			w := v_1.Args[0]
 			x := v_2
 			if x.Op != OpARM64MOVHstoreidx {
@@ -10861,7 +10861,7 @@
 			ptr1 := x.Args[0]
 			idx1 := x.Args[1]
 			w0 := x.Args[2]
-			if w0.Op != OpARM64SRLconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+			if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
 				continue
 			}
 			v.reset(OpARM64MOVWstoreidx)
@@ -10874,11 +10874,11 @@
 	// cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
 	// result: (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w0 mem)
 	for {
-		if v.AuxInt != 2 {
+		if auxIntToInt32(v.AuxInt) != 2 {
 			break
 		}
-		s := v.Aux
-		if v_0.Op != OpARM64ADDshiftLL || v_0.AuxInt != 1 {
+		s := auxToSym(v.Aux)
+		if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 {
 			break
 		}
 		idx0 := v_0.Args[1]
@@ -10886,7 +10886,7 @@
 		if v_1.Op != OpARM64SRLconst {
 			break
 		}
-		j := v_1.AuxInt
+		j := auxIntToInt64(v_1.AuxInt)
 		w := v_1.Args[0]
 		x := v_2
 		if x.Op != OpARM64MOVHstoreidx2 {
@@ -10896,12 +10896,12 @@
 		ptr1 := x.Args[0]
 		idx1 := x.Args[1]
 		w0 := x.Args[2]
-		if w0.Op != OpARM64SRLconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) {
+		if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) {
 			break
 		}
 		v.reset(OpARM64MOVWstoreidx)
 		v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type)
-		v0.AuxInt = 1
+		v0.AuxInt = int64ToAuxInt(1)
 		v0.AddArg(idx1)
 		v.AddArg4(ptr1, v0, w0, mem)
 		return true
@@ -11087,11 +11087,11 @@
 	// result: (MOVWstoreidx ptr idx w mem)
 	for {
 		ptr := v_0
-		if v_1.Op != OpARM64ADDconst || v_1.AuxInt != 2 {
+		if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 2 {
 			break
 		}
 		idx := v_1.Args[0]
-		if v_2.Op != OpARM64SRLconst || v_2.AuxInt != 16 {
+		if v_2.Op != OpARM64SRLconst || auxIntToInt64(v_2.AuxInt) != 16 {
 			break
 		}
 		w := v_2.Args[0]
@@ -11291,28 +11291,28 @@
 		return true
 	}
 	// match: (MOVHstorezero [i] {s} ptr0 x:(MOVHstorezero [j] {s} ptr1 mem))
-	// cond: x.Uses == 1 && areAdjacentOffsets(i,j,2) && is32Bit(min(i,j)) && isSamePtr(ptr0, ptr1) && clobber(x)
-	// result: (MOVWstorezero [min(i,j)] {s} ptr0 mem)
+	// cond: x.Uses == 1 && areAdjacentOffsets(int64(i),int64(j),2) && isSamePtr(ptr0, ptr1) && clobber(x)
+	// result: (MOVWstorezero [int32(min(int64(i),int64(j)))] {s} ptr0 mem)
 	for {
-		i := v.AuxInt
-		s := v.Aux
+		i := auxIntToInt32(v.AuxInt)
+		s := auxToSym(v.Aux)
 		ptr0 := v_0
 		x := v_1
 		if x.Op != OpARM64MOVHstorezero {
 			break
 		}
-		j := x.AuxInt
-		if x.Aux != s {
+		j := auxIntToInt32(x.AuxInt)
+		if auxToSym(x.Aux) != s {
 			break
 		}
 		mem := x.Args[1]
 		ptr1 := x.Args[0]
-		if !(x.Uses == 1 && areAdjacentOffsets(i, j, 2) && is32Bit(min(i, j)) && isSamePtr(ptr0, ptr1) && clobber(x)) {
+		if !(x.Uses == 1 && areAdjacentOffsets(int64(i), int64(j), 2) && isSamePtr(ptr0, ptr1) && clobber(x)) {
 			break
 		}
 		v.reset(OpARM64MOVWstorezero)
-		v.AuxInt = min(i, j)
-		v.Aux = s
+		v.AuxInt = int32ToAuxInt(int32(min(int64(i), int64(j))))
+		v.Aux = symToAux(s)
 		v.AddArg2(ptr0, mem)
 		return true
 	}
@@ -11320,10 +11320,10 @@
 	// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
 	// result: (MOVWstorezeroidx ptr1 idx1 mem)
 	for {
-		if v.AuxInt != 2 {
+		if auxIntToInt32(v.AuxInt) != 2 {
 			break
 		}
-		s := v.Aux
+		s := auxToSym(v.Aux)
 		if v_0.Op != OpARM64ADD {
 			break
 		}
@@ -11353,11 +11353,11 @@
 	// cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
 	// result: (MOVWstorezeroidx ptr1 (SLLconst <idx1.Type> [1] idx1) mem)
 	for {
-		if v.AuxInt != 2 {
+		if auxIntToInt32(v.AuxInt) != 2 {
 			break
 		}
-		s := v.Aux
-		if v_0.Op != OpARM64ADDshiftLL || v_0.AuxInt != 1 {
+		s := auxToSym(v.Aux)
+		if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 {
 			break
 		}
 		idx0 := v_0.Args[1]
@@ -11374,7 +11374,7 @@
 		}
 		v.reset(OpARM64MOVWstorezeroidx)
 		v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type)
-		v0.AuxInt = 1
+		v0.AuxInt = int64ToAuxInt(1)
 		v0.AddArg(idx1)
 		v.AddArg3(ptr1, v0, mem)
 		return true
@@ -11484,7 +11484,7 @@
 	// result: (MOVWstorezeroidx ptr idx mem)
 	for {
 		ptr := v_0
-		if v_1.Op != OpARM64ADDconst || v_1.AuxInt != 2 {
+		if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 2 {
 			break
 		}
 		idx := v_1.Args[0]
@@ -11701,15 +11701,15 @@
 	}
 	// match: (MOVWUload [off] {sym} (SB) _)
 	// cond: symIsRO(sym)
-	// result: (MOVDconst [int64(read32(sym, off, config.ctxt.Arch.ByteOrder))])
+	// result: (MOVDconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
 	for {
-		off := v.AuxInt
-		sym := v.Aux
+		off := auxIntToInt32(v.AuxInt)
+		sym := auxToSym(v.Aux)
 		if v_0.Op != OpSB || !(symIsRO(sym)) {
 			break
 		}
 		v.reset(OpARM64MOVDconst)
-		v.AuxInt = int64(read32(sym, off, config.ctxt.Arch.ByteOrder))
+		v.AuxInt = int64ToAuxInt(int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))
 		return true
 	}
 	return false
@@ -11996,13 +11996,13 @@
 		if v_0.Op != OpARM64SLLconst {
 			break
 		}
-		sc := v_0.AuxInt
+		sc := auxIntToInt64(v_0.AuxInt)
 		x := v_0.Args[0]
 		if !(isARM64BFMask(sc, 1<<32-1, sc)) {
 			break
 		}
 		v.reset(OpARM64UBFIZ)
-		v.AuxInt = armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))
+		v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc)))
 		v.AddArg(x)
 		return true
 	}
@@ -12013,13 +12013,13 @@
 		if v_0.Op != OpARM64SRLconst {
 			break
 		}
-		sc := v_0.AuxInt
+		sc := auxIntToInt64(v_0.AuxInt)
 		x := v_0.Args[0]
 		if !(isARM64BFMask(sc, 1<<32-1, 0)) {
 			break
 		}
 		v.reset(OpARM64UBFX)
-		v.AuxInt = armBFAuxInt(sc, 32)
+		v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, 32))
 		v.AddArg(x)
 		return true
 	}
@@ -12468,13 +12468,13 @@
 		if v_0.Op != OpARM64SLLconst {
 			break
 		}
-		lc := v_0.AuxInt
+		lc := auxIntToInt64(v_0.AuxInt)
 		x := v_0.Args[0]
 		if !(lc < 32) {
 			break
 		}
 		v.reset(OpARM64SBFIZ)
-		v.AuxInt = armBFAuxInt(lc, 32-lc)
+		v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 32-lc))
 		v.AddArg(x)
 		return true
 	}
@@ -12642,15 +12642,15 @@
 	// cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
 	// result: (MOVDstore [i-4] {s} ptr0 w mem)
 	for {
-		i := v.AuxInt
-		s := v.Aux
+		i := auxIntToInt32(v.AuxInt)
+		s := auxToSym(v.Aux)
 		ptr0 := v_0
-		if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 32 {
+		if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 32 {
 			break
 		}
 		w := v_1.Args[0]
 		x := v_2
-		if x.Op != OpARM64MOVWstore || x.AuxInt != i-4 || x.Aux != s {
+		if x.Op != OpARM64MOVWstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s {
 			break
 		}
 		mem := x.Args[2]
@@ -12659,8 +12659,8 @@
 			break
 		}
 		v.reset(OpARM64MOVDstore)
-		v.AuxInt = i - 4
-		v.Aux = s
+		v.AuxInt = int32ToAuxInt(i - 4)
+		v.Aux = symToAux(s)
 		v.AddArg3(ptr0, w, mem)
 		return true
 	}
@@ -12668,10 +12668,10 @@
 	// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
 	// result: (MOVDstoreidx ptr1 idx1 w mem)
 	for {
-		if v.AuxInt != 4 {
+		if auxIntToInt32(v.AuxInt) != 4 {
 			break
 		}
-		s := v.Aux
+		s := auxToSym(v.Aux)
 		if v_0.Op != OpARM64ADD {
 			break
 		}
@@ -12681,7 +12681,7 @@
 		for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
 			ptr0 := v_0_0
 			idx0 := v_0_1
-			if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 32 {
+			if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 32 {
 				continue
 			}
 			w := v_1.Args[0]
@@ -12705,16 +12705,16 @@
 	// cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
 	// result: (MOVDstoreidx ptr1 (SLLconst <idx1.Type> [2] idx1) w mem)
 	for {
-		if v.AuxInt != 4 {
+		if auxIntToInt32(v.AuxInt) != 4 {
 			break
 		}
-		s := v.Aux
-		if v_0.Op != OpARM64ADDshiftLL || v_0.AuxInt != 2 {
+		s := auxToSym(v.Aux)
+		if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 {
 			break
 		}
 		idx0 := v_0.Args[1]
 		ptr0 := v_0.Args[0]
-		if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 32 {
+		if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 32 {
 			break
 		}
 		w := v_1.Args[0]
@@ -12730,7 +12730,7 @@
 		}
 		v.reset(OpARM64MOVDstoreidx)
 		v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type)
-		v0.AuxInt = 2
+		v0.AuxInt = int64ToAuxInt(2)
 		v0.AddArg(idx1)
 		v.AddArg4(ptr1, v0, w, mem)
 		return true
@@ -12739,27 +12739,27 @@
 	// cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
 	// result: (MOVDstore [i-4] {s} ptr0 w0 mem)
 	for {
-		i := v.AuxInt
-		s := v.Aux
+		i := auxIntToInt32(v.AuxInt)
+		s := auxToSym(v.Aux)
 		ptr0 := v_0
 		if v_1.Op != OpARM64SRLconst {
 			break
 		}
-		j := v_1.AuxInt
+		j := auxIntToInt64(v_1.AuxInt)
 		w := v_1.Args[0]
 		x := v_2
-		if x.Op != OpARM64MOVWstore || x.AuxInt != i-4 || x.Aux != s {
+		if x.Op != OpARM64MOVWstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s {
 			break
 		}
 		mem := x.Args[2]
 		ptr1 := x.Args[0]
 		w0 := x.Args[1]
-		if w0.Op != OpARM64SRLconst || w0.AuxInt != j-32 || w != w0.Args[0] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) {
+		if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-32 || w != w0.Args[0] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) {
 			break
 		}
 		v.reset(OpARM64MOVDstore)
-		v.AuxInt = i - 4
-		v.Aux = s
+		v.AuxInt = int32ToAuxInt(i - 4)
+		v.Aux = symToAux(s)
 		v.AddArg3(ptr0, w0, mem)
 		return true
 	}
@@ -12767,10 +12767,10 @@
 	// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
 	// result: (MOVDstoreidx ptr1 idx1 w0 mem)
 	for {
-		if v.AuxInt != 4 {
+		if auxIntToInt32(v.AuxInt) != 4 {
 			break
 		}
-		s := v.Aux
+		s := auxToSym(v.Aux)
 		if v_0.Op != OpARM64ADD {
 			break
 		}
@@ -12783,7 +12783,7 @@
 			if v_1.Op != OpARM64SRLconst {
 				continue
 			}
-			j := v_1.AuxInt
+			j := auxIntToInt64(v_1.AuxInt)
 			w := v_1.Args[0]
 			x := v_2
 			if x.Op != OpARM64MOVWstoreidx {
@@ -12793,7 +12793,7 @@
 			ptr1 := x.Args[0]
 			idx1 := x.Args[1]
 			w0 := x.Args[2]
-			if w0.Op != OpARM64SRLconst || w0.AuxInt != j-32 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+			if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-32 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
 				continue
 			}
 			v.reset(OpARM64MOVDstoreidx)
@@ -12806,11 +12806,11 @@
 	// cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
 	// result: (MOVDstoreidx ptr1 (SLLconst <idx1.Type> [2] idx1) w0 mem)
 	for {
-		if v.AuxInt != 4 {
+		if auxIntToInt32(v.AuxInt) != 4 {
 			break
 		}
-		s := v.Aux
-		if v_0.Op != OpARM64ADDshiftLL || v_0.AuxInt != 2 {
+		s := auxToSym(v.Aux)
+		if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 {
 			break
 		}
 		idx0 := v_0.Args[1]
@@ -12818,7 +12818,7 @@
 		if v_1.Op != OpARM64SRLconst {
 			break
 		}
-		j := v_1.AuxInt
+		j := auxIntToInt64(v_1.AuxInt)
 		w := v_1.Args[0]
 		x := v_2
 		if x.Op != OpARM64MOVWstoreidx4 {
@@ -12828,12 +12828,12 @@
 		ptr1 := x.Args[0]
 		idx1 := x.Args[1]
 		w0 := x.Args[2]
-		if w0.Op != OpARM64SRLconst || w0.AuxInt != j-32 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) {
+		if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-32 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) {
 			break
 		}
 		v.reset(OpARM64MOVDstoreidx)
 		v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type)
-		v0.AuxInt = 2
+		v0.AuxInt = int64ToAuxInt(2)
 		v0.AddArg(idx1)
 		v.AddArg4(ptr1, v0, w0, mem)
 		return true
@@ -12957,11 +12957,11 @@
 	// result: (MOVDstoreidx ptr idx w mem)
 	for {
 		ptr := v_0
-		if v_1.Op != OpARM64ADDconst || v_1.AuxInt != 4 {
+		if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 4 {
 			break
 		}
 		idx := v_1.Args[0]
-		if v_2.Op != OpARM64SRLconst || v_2.AuxInt != 32 {
+		if v_2.Op != OpARM64SRLconst || auxIntToInt64(v_2.AuxInt) != 32 {
 			break
 		}
 		w := v_2.Args[0]
@@ -13133,28 +13133,28 @@
 		return true
 	}
 	// match: (MOVWstorezero [i] {s} ptr0 x:(MOVWstorezero [j] {s} ptr1 mem))
-	// cond: x.Uses == 1 && areAdjacentOffsets(i,j,4) && is32Bit(min(i,j)) && isSamePtr(ptr0, ptr1) && clobber(x)
-	// result: (MOVDstorezero [min(i,j)] {s} ptr0 mem)
+	// cond: x.Uses == 1 && areAdjacentOffsets(int64(i),int64(j),4) && isSamePtr(ptr0, ptr1) && clobber(x)
+	// result: (MOVDstorezero [int32(min(int64(i),int64(j)))] {s} ptr0 mem)
 	for {
-		i := v.AuxInt
-		s := v.Aux
+		i := auxIntToInt32(v.AuxInt)
+		s := auxToSym(v.Aux)
 		ptr0 := v_0
 		x := v_1
 		if x.Op != OpARM64MOVWstorezero {
 			break
 		}
-		j := x.AuxInt
-		if x.Aux != s {
+		j := auxIntToInt32(x.AuxInt)
+		if auxToSym(x.Aux) != s {
 			break
 		}
 		mem := x.Args[1]
 		ptr1 := x.Args[0]
-		if !(x.Uses == 1 && areAdjacentOffsets(i, j, 4) && is32Bit(min(i, j)) && isSamePtr(ptr0, ptr1) && clobber(x)) {
+		if !(x.Uses == 1 && areAdjacentOffsets(int64(i), int64(j), 4) && isSamePtr(ptr0, ptr1) && clobber(x)) {
 			break
 		}
 		v.reset(OpARM64MOVDstorezero)
-		v.AuxInt = min(i, j)
-		v.Aux = s
+		v.AuxInt = int32ToAuxInt(int32(min(int64(i), int64(j))))
+		v.Aux = symToAux(s)
 		v.AddArg2(ptr0, mem)
 		return true
 	}
@@ -13162,10 +13162,10 @@
 	// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
 	// result: (MOVDstorezeroidx ptr1 idx1 mem)
 	for {
-		if v.AuxInt != 4 {
+		if auxIntToInt32(v.AuxInt) != 4 {
 			break
 		}
-		s := v.Aux
+		s := auxToSym(v.Aux)
 		if v_0.Op != OpARM64ADD {
 			break
 		}
@@ -13195,11 +13195,11 @@
 	// cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
 	// result: (MOVDstorezeroidx ptr1 (SLLconst <idx1.Type> [2] idx1) mem)
 	for {
-		if v.AuxInt != 4 {
+		if auxIntToInt32(v.AuxInt) != 4 {
 			break
 		}
-		s := v.Aux
-		if v_0.Op != OpARM64ADDshiftLL || v_0.AuxInt != 2 {
+		s := auxToSym(v.Aux)
+		if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 {
 			break
 		}
 		idx0 := v_0.Args[1]
@@ -13216,7 +13216,7 @@
 		}
 		v.reset(OpARM64MOVDstorezeroidx)
 		v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type)
-		v0.AuxInt = 2
+		v0.AuxInt = int64ToAuxInt(2)
 		v0.AddArg(idx1)
 		v.AddArg3(ptr1, v0, mem)
 		return true
@@ -13294,7 +13294,7 @@
 	// result: (MOVDstorezeroidx ptr idx mem)
 	for {
 		ptr := v_0
-		if v_1.Op != OpARM64ADDconst || v_1.AuxInt != 4 {
+		if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 4 {
 			break
 		}
 		idx := v_1.Args[0]
@@ -14668,13 +14668,13 @@
 		if x.Op != OpARM64SLLconst {
 			break
 		}
-		c := x.AuxInt
+		c := auxIntToInt64(x.AuxInt)
 		y := x.Args[0]
 		if !(clobberIfDead(x)) {
 			break
 		}
 		v.reset(OpARM64MVNshiftLL)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v.AddArg(y)
 		return true
 	}
@@ -14686,13 +14686,13 @@
 		if x.Op != OpARM64SRLconst {
 			break
 		}
-		c := x.AuxInt
+		c := auxIntToInt64(x.AuxInt)
 		y := x.Args[0]
 		if !(clobberIfDead(x)) {
 			break
 		}
 		v.reset(OpARM64MVNshiftRL)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v.AddArg(y)
 		return true
 	}
@@ -14704,13 +14704,13 @@
 		if x.Op != OpARM64SRAconst {
 			break
 		}
-		c := x.AuxInt
+		c := auxIntToInt64(x.AuxInt)
 		y := x.Args[0]
 		if !(clobberIfDead(x)) {
 			break
 		}
 		v.reset(OpARM64MVNshiftRA)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v.AddArg(y)
 		return true
 	}
@@ -14721,13 +14721,13 @@
 	// match: (MVNshiftLL (MOVDconst [c]) [d])
 	// result: (MOVDconst [^int64(uint64(c)<<uint64(d))])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_0.AuxInt
+		c := auxIntToInt64(v_0.AuxInt)
 		v.reset(OpARM64MOVDconst)
-		v.AuxInt = ^int64(uint64(c) << uint64(d))
+		v.AuxInt = int64ToAuxInt(^int64(uint64(c) << uint64(d)))
 		return true
 	}
 	return false
@@ -14737,13 +14737,13 @@
 	// match: (MVNshiftRA (MOVDconst [c]) [d])
 	// result: (MOVDconst [^(c>>uint64(d))])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_0.AuxInt
+		c := auxIntToInt64(v_0.AuxInt)
 		v.reset(OpARM64MOVDconst)
-		v.AuxInt = ^(c >> uint64(d))
+		v.AuxInt = int64ToAuxInt(^(c >> uint64(d)))
 		return true
 	}
 	return false
@@ -14753,13 +14753,13 @@
 	// match: (MVNshiftRL (MOVDconst [c]) [d])
 	// result: (MOVDconst [^int64(uint64(c)>>uint64(d))])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_0.AuxInt
+		c := auxIntToInt64(v_0.AuxInt)
 		v.reset(OpARM64MOVDconst)
-		v.AuxInt = ^int64(uint64(c) >> uint64(d))
+		v.AuxInt = int64ToAuxInt(^int64(uint64(c) >> uint64(d)))
 		return true
 	}
 	return false
@@ -14809,13 +14809,13 @@
 		if x.Op != OpARM64SLLconst {
 			break
 		}
-		c := x.AuxInt
+		c := auxIntToInt64(x.AuxInt)
 		y := x.Args[0]
 		if !(clobberIfDead(x)) {
 			break
 		}
 		v.reset(OpARM64NEGshiftLL)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v.AddArg(y)
 		return true
 	}
@@ -14827,13 +14827,13 @@
 		if x.Op != OpARM64SRLconst {
 			break
 		}
-		c := x.AuxInt
+		c := auxIntToInt64(x.AuxInt)
 		y := x.Args[0]
 		if !(clobberIfDead(x)) {
 			break
 		}
 		v.reset(OpARM64NEGshiftRL)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v.AddArg(y)
 		return true
 	}
@@ -14845,13 +14845,13 @@
 		if x.Op != OpARM64SRAconst {
 			break
 		}
-		c := x.AuxInt
+		c := auxIntToInt64(x.AuxInt)
 		y := x.Args[0]
 		if !(clobberIfDead(x)) {
 			break
 		}
 		v.reset(OpARM64NEGshiftRA)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v.AddArg(y)
 		return true
 	}
@@ -14862,13 +14862,13 @@
 	// match: (NEGshiftLL (MOVDconst [c]) [d])
 	// result: (MOVDconst [-int64(uint64(c)<<uint64(d))])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_0.AuxInt
+		c := auxIntToInt64(v_0.AuxInt)
 		v.reset(OpARM64MOVDconst)
-		v.AuxInt = -int64(uint64(c) << uint64(d))
+		v.AuxInt = int64ToAuxInt(-int64(uint64(c) << uint64(d)))
 		return true
 	}
 	return false
@@ -14878,13 +14878,13 @@
 	// match: (NEGshiftRA (MOVDconst [c]) [d])
 	// result: (MOVDconst [-(c>>uint64(d))])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_0.AuxInt
+		c := auxIntToInt64(v_0.AuxInt)
 		v.reset(OpARM64MOVDconst)
-		v.AuxInt = -(c >> uint64(d))
+		v.AuxInt = int64ToAuxInt(-(c >> uint64(d)))
 		return true
 	}
 	return false
@@ -14894,13 +14894,13 @@
 	// match: (NEGshiftRL (MOVDconst [c]) [d])
 	// result: (MOVDconst [-int64(uint64(c)>>uint64(d))])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_0.AuxInt
+		c := auxIntToInt64(v_0.AuxInt)
 		v.reset(OpARM64MOVDconst)
-		v.AuxInt = -int64(uint64(c) >> uint64(d))
+		v.AuxInt = int64ToAuxInt(-int64(uint64(c) >> uint64(d)))
 		return true
 	}
 	return false
@@ -14987,13 +14987,13 @@
 			if x1.Op != OpARM64SLLconst {
 				continue
 			}
-			c := x1.AuxInt
+			c := auxIntToInt64(x1.AuxInt)
 			y := x1.Args[0]
 			if !(clobberIfDead(x1)) {
 				continue
 			}
 			v.reset(OpARM64ORshiftLL)
-			v.AuxInt = c
+			v.AuxInt = int64ToAuxInt(c)
 			v.AddArg2(x0, y)
 			return true
 		}
@@ -15009,13 +15009,13 @@
 			if x1.Op != OpARM64SRLconst {
 				continue
 			}
-			c := x1.AuxInt
+			c := auxIntToInt64(x1.AuxInt)
 			y := x1.Args[0]
 			if !(clobberIfDead(x1)) {
 				continue
 			}
 			v.reset(OpARM64ORshiftRL)
-			v.AuxInt = c
+			v.AuxInt = int64ToAuxInt(c)
 			v.AddArg2(x0, y)
 			return true
 		}
@@ -15031,13 +15031,13 @@
 			if x1.Op != OpARM64SRAconst {
 				continue
 			}
-			c := x1.AuxInt
+			c := auxIntToInt64(x1.AuxInt)
 			y := x1.Args[0]
 			if !(clobberIfDead(x1)) {
 				continue
 			}
 			v.reset(OpARM64ORshiftRA)
-			v.AuxInt = c
+			v.AuxInt = int64ToAuxInt(c)
 			v.AddArg2(x0, y)
 			return true
 		}
@@ -15058,14 +15058,14 @@
 				continue
 			}
 			t := v_0_1.Type
-			if v_0_1.AuxInt != 63 {
+			if auxIntToInt64(v_0_1.AuxInt) != 63 {
 				continue
 			}
 			y := v_0_1.Args[0]
 			if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 {
 				continue
 			}
-			cc := v_1.Aux
+			cc := auxToCCop(v_1.Aux)
 			_ = v_1.Args[1]
 			v_1_0 := v_1.Args[0]
 			if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt64 {
@@ -15081,15 +15081,15 @@
 			}
 			_ = v_1_0_1.Args[1]
 			v_1_0_1_0 := v_1_0_1.Args[0]
-			if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 64 {
+			if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 64 {
 				continue
 			}
 			v_1_0_1_1 := v_1_0_1.Args[1]
-			if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 63 || y != v_1_0_1_1.Args[0] {
+			if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 63 || y != v_1_0_1_1.Args[0] {
 				continue
 			}
 			v_1_1 := v_1.Args[1]
-			if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 {
+			if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
 				continue
 			}
 			v_1_1_0 := v_1_1.Args[0]
@@ -15098,11 +15098,11 @@
 			}
 			_ = v_1_1_0.Args[1]
 			v_1_1_0_0 := v_1_1_0.Args[0]
-			if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 64 {
+			if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 64 {
 				continue
 			}
 			v_1_1_0_1 := v_1_1_0.Args[1]
-			if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+			if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
 				continue
 			}
 			v.reset(OpARM64ROR)
@@ -15128,14 +15128,14 @@
 				continue
 			}
 			t := v_0_1.Type
-			if v_0_1.AuxInt != 63 {
+			if auxIntToInt64(v_0_1.AuxInt) != 63 {
 				continue
 			}
 			y := v_0_1.Args[0]
 			if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 {
 				continue
 			}
-			cc := v_1.Aux
+			cc := auxToCCop(v_1.Aux)
 			_ = v_1.Args[1]
 			v_1_0 := v_1.Args[0]
 			if v_1_0.Op != OpARM64SLL {
@@ -15151,15 +15151,15 @@
 			}
 			_ = v_1_0_1.Args[1]
 			v_1_0_1_0 := v_1_0_1.Args[0]
-			if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 64 {
+			if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 64 {
 				continue
 			}
 			v_1_0_1_1 := v_1_0_1.Args[1]
-			if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 63 || y != v_1_0_1_1.Args[0] {
+			if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 63 || y != v_1_0_1_1.Args[0] {
 				continue
 			}
 			v_1_1 := v_1.Args[1]
-			if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 {
+			if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
 				continue
 			}
 			v_1_1_0 := v_1_1.Args[0]
@@ -15168,11 +15168,11 @@
 			}
 			_ = v_1_1_0.Args[1]
 			v_1_1_0_0 := v_1_1_0.Args[0]
-			if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 64 {
+			if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 64 {
 				continue
 			}
 			v_1_1_0_1 := v_1_1_0.Args[1]
-			if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+			if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
 				continue
 			}
 			v.reset(OpARM64ROR)
@@ -15196,14 +15196,14 @@
 				continue
 			}
 			t := v_0_1.Type
-			if v_0_1.AuxInt != 31 {
+			if auxIntToInt64(v_0_1.AuxInt) != 31 {
 				continue
 			}
 			y := v_0_1.Args[0]
 			if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 {
 				continue
 			}
-			cc := v_1.Aux
+			cc := auxToCCop(v_1.Aux)
 			_ = v_1.Args[1]
 			v_1_0 := v_1.Args[0]
 			if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt32 {
@@ -15220,15 +15220,15 @@
 			}
 			_ = v_1_0_1.Args[1]
 			v_1_0_1_0 := v_1_0_1.Args[0]
-			if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 32 {
+			if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 32 {
 				continue
 			}
 			v_1_0_1_1 := v_1_0_1.Args[1]
-			if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 31 || y != v_1_0_1_1.Args[0] {
+			if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 31 || y != v_1_0_1_1.Args[0] {
 				continue
 			}
 			v_1_1 := v_1.Args[1]
-			if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 {
+			if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
 				continue
 			}
 			v_1_1_0 := v_1_1.Args[0]
@@ -15237,11 +15237,11 @@
 			}
 			_ = v_1_1_0.Args[1]
 			v_1_1_0_0 := v_1_1_0.Args[0]
-			if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 32 {
+			if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 32 {
 				continue
 			}
 			v_1_1_0_1 := v_1_1_0.Args[1]
-			if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+			if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
 				continue
 			}
 			v.reset(OpARM64RORW)
@@ -15271,14 +15271,14 @@
 				continue
 			}
 			t := v_0_1.Type
-			if v_0_1.AuxInt != 31 {
+			if auxIntToInt64(v_0_1.AuxInt) != 31 {
 				continue
 			}
 			y := v_0_1.Args[0]
 			if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 {
 				continue
 			}
-			cc := v_1.Aux
+			cc := auxToCCop(v_1.Aux)
 			_ = v_1.Args[1]
 			v_1_0 := v_1.Args[0]
 			if v_1_0.Op != OpARM64SLL {
@@ -15294,15 +15294,15 @@
 			}
 			_ = v_1_0_1.Args[1]
 			v_1_0_1_0 := v_1_0_1.Args[0]
-			if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 32 {
+			if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 32 {
 				continue
 			}
 			v_1_0_1_1 := v_1_0_1.Args[1]
-			if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 31 || y != v_1_0_1_1.Args[0] {
+			if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 31 || y != v_1_0_1_1.Args[0] {
 				continue
 			}
 			v_1_1 := v_1.Args[1]
-			if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 {
+			if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
 				continue
 			}
 			v_1_1_0 := v_1_1.Args[0]
@@ -15311,11 +15311,11 @@
 			}
 			_ = v_1_1_0.Args[1]
 			v_1_1_0_0 := v_1_1_0.Args[0]
-			if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 32 {
+			if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 32 {
 				continue
 			}
 			v_1_1_0_1 := v_1_1_0.Args[1]
-			if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+			if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
 				continue
 			}
 			v.reset(OpARM64RORW)
@@ -15325,50 +15325,50 @@
 		break
 	}
 	// match: (OR (UBFIZ [bfc] x) (ANDconst [ac] y))
-	// cond: ac == ^((1<<uint(getARM64BFwidth(bfc))-1) << uint(getARM64BFlsb(bfc)))
+	// cond: ac == ^((1<<uint(bfc.getARM64BFwidth())-1) << uint(bfc.getARM64BFlsb()))
 	// result: (BFI [bfc] y x)
 	for {
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
 			if v_0.Op != OpARM64UBFIZ {
 				continue
 			}
-			bfc := v_0.AuxInt
+			bfc := auxIntToArm64BitField(v_0.AuxInt)
 			x := v_0.Args[0]
 			if v_1.Op != OpARM64ANDconst {
 				continue
 			}
-			ac := v_1.AuxInt
+			ac := auxIntToInt64(v_1.AuxInt)
 			y := v_1.Args[0]
-			if !(ac == ^((1<<uint(getARM64BFwidth(bfc)) - 1) << uint(getARM64BFlsb(bfc)))) {
+			if !(ac == ^((1<<uint(bfc.getARM64BFwidth()) - 1) << uint(bfc.getARM64BFlsb()))) {
 				continue
 			}
 			v.reset(OpARM64BFI)
-			v.AuxInt = bfc
+			v.AuxInt = arm64BitFieldToAuxInt(bfc)
 			v.AddArg2(y, x)
 			return true
 		}
 		break
 	}
 	// match: (OR (UBFX [bfc] x) (ANDconst [ac] y))
-	// cond: ac == ^(1<<uint(getARM64BFwidth(bfc))-1)
+	// cond: ac == ^(1<<uint(bfc.getARM64BFwidth())-1)
 	// result: (BFXIL [bfc] y x)
 	for {
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
 			if v_0.Op != OpARM64UBFX {
 				continue
 			}
-			bfc := v_0.AuxInt
+			bfc := auxIntToArm64BitField(v_0.AuxInt)
 			x := v_0.Args[0]
 			if v_1.Op != OpARM64ANDconst {
 				continue
 			}
-			ac := v_1.AuxInt
+			ac := auxIntToInt64(v_1.AuxInt)
 			y := v_1.Args[0]
-			if !(ac == ^(1<<uint(getARM64BFwidth(bfc)) - 1)) {
+			if !(ac == ^(1<<uint(bfc.getARM64BFwidth()) - 1)) {
 				continue
 			}
 			v.reset(OpARM64BFXIL)
-			v.AuxInt = bfc
+			v.AuxInt = arm64BitFieldToAuxInt(bfc)
 			v.AddArg2(y, x)
 			return true
 		}
@@ -15376,22 +15376,22 @@
 	}
 	// match: (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUload [i3] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i2] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i1] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i0] {s} p mem)))
 	// cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
-	// result: @mergePoint(b,x0,x1,x2,x3) (MOVWUload <t> {s} (OffPtr <p.Type> [i0] p) mem)
+	// result: @mergePoint(b,x0,x1,x2,x3) (MOVWUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
 	for {
 		t := v.Type
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
 			o0 := v_0
-			if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 {
+			if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
 				continue
 			}
 			_ = o0.Args[1]
 			o1 := o0.Args[0]
-			if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 {
+			if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
 				continue
 			}
 			_ = o1.Args[1]
 			s0 := o1.Args[0]
-			if s0.Op != OpARM64SLLconst || s0.AuxInt != 24 {
+			if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 24 {
 				continue
 			}
 			y0 := s0.Args[0]
@@ -15402,8 +15402,8 @@
 			if x0.Op != OpARM64MOVBUload {
 				continue
 			}
-			i3 := x0.AuxInt
-			s := x0.Aux
+			i3 := auxIntToInt32(x0.AuxInt)
+			s := auxToSym(x0.Aux)
 			mem := x0.Args[1]
 			p := x0.Args[0]
 			y1 := o1.Args[1]
@@ -15414,8 +15414,8 @@
 			if x1.Op != OpARM64MOVBUload {
 				continue
 			}
-			i2 := x1.AuxInt
-			if x1.Aux != s {
+			i2 := auxIntToInt32(x1.AuxInt)
+			if auxToSym(x1.Aux) != s {
 				continue
 			}
 			_ = x1.Args[1]
@@ -15430,8 +15430,8 @@
 			if x2.Op != OpARM64MOVBUload {
 				continue
 			}
-			i1 := x2.AuxInt
-			if x2.Aux != s {
+			i1 := auxIntToInt32(x2.AuxInt)
+			if auxToSym(x2.Aux) != s {
 				continue
 			}
 			_ = x2.Args[1]
@@ -15446,8 +15446,8 @@
 			if x3.Op != OpARM64MOVBUload {
 				continue
 			}
-			i0 := x3.AuxInt
-			if x3.Aux != s {
+			i0 := auxIntToInt32(x3.AuxInt)
+			if auxToSym(x3.Aux) != s {
 				continue
 			}
 			_ = x3.Args[1]
@@ -15457,9 +15457,9 @@
 			b = mergePoint(b, x0, x1, x2, x3)
 			v0 := b.NewValue0(x3.Pos, OpARM64MOVWUload, t)
 			v.copyOf(v0)
-			v0.Aux = s
+			v0.Aux = symToAux(s)
 			v1 := b.NewValue0(x3.Pos, OpOffPtr, p.Type)
-			v1.AuxInt = i0
+			v1.AuxInt = int64ToAuxInt(int64(i0))
 			v1.AddArg(p)
 			v0.AddArg2(v1, mem)
 			return true
@@ -15473,17 +15473,17 @@
 		t := v.Type
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
 			o0 := v_0
-			if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 {
+			if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
 				continue
 			}
 			_ = o0.Args[1]
 			o1 := o0.Args[0]
-			if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 {
+			if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
 				continue
 			}
 			_ = o1.Args[1]
 			s0 := o1.Args[0]
-			if s0.Op != OpARM64SLLconst || s0.AuxInt != 24 {
+			if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 24 {
 				continue
 			}
 			y0 := s0.Args[0]
@@ -15491,10 +15491,10 @@
 				continue
 			}
 			x0 := y0.Args[0]
-			if x0.Op != OpARM64MOVBUload || x0.AuxInt != 3 {
+			if x0.Op != OpARM64MOVBUload || auxIntToInt32(x0.AuxInt) != 3 {
 				continue
 			}
-			s := x0.Aux
+			s := auxToSym(x0.Aux)
 			mem := x0.Args[1]
 			p := x0.Args[0]
 			y1 := o1.Args[1]
@@ -15502,7 +15502,7 @@
 				continue
 			}
 			x1 := y1.Args[0]
-			if x1.Op != OpARM64MOVBUload || x1.AuxInt != 2 || x1.Aux != s {
+			if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 2 || auxToSym(x1.Aux) != s {
 				continue
 			}
 			_ = x1.Args[1]
@@ -15514,7 +15514,7 @@
 				continue
 			}
 			x2 := y2.Args[0]
-			if x2.Op != OpARM64MOVBUload || x2.AuxInt != 1 || x2.Aux != s {
+			if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 1 || auxToSym(x2.Aux) != s {
 				continue
 			}
 			_ = x2.Args[1]
@@ -15561,17 +15561,17 @@
 		t := v.Type
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
 			o0 := v_0
-			if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 {
+			if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
 				continue
 			}
 			_ = o0.Args[1]
 			o1 := o0.Args[0]
-			if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 {
+			if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
 				continue
 			}
 			_ = o1.Args[1]
 			s0 := o1.Args[0]
-			if s0.Op != OpARM64SLLconst || s0.AuxInt != 24 {
+			if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 24 {
 				continue
 			}
 			y0 := s0.Args[0]
@@ -15585,7 +15585,7 @@
 			mem := x0.Args[2]
 			ptr := x0.Args[0]
 			x0_1 := x0.Args[1]
-			if x0_1.Op != OpARM64ADDconst || x0_1.AuxInt != 3 {
+			if x0_1.Op != OpARM64ADDconst || auxIntToInt64(x0_1.AuxInt) != 3 {
 				continue
 			}
 			idx := x0_1.Args[0]
@@ -15602,7 +15602,7 @@
 				continue
 			}
 			x1_1 := x1.Args[1]
-			if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 2 || idx != x1_1.Args[0] || mem != x1.Args[2] {
+			if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 2 || idx != x1_1.Args[0] || mem != x1.Args[2] {
 				continue
 			}
 			y2 := o0.Args[1]
@@ -15618,7 +15618,7 @@
 				continue
 			}
 			x2_1 := x2.Args[1]
-			if x2_1.Op != OpARM64ADDconst || x2_1.AuxInt != 1 || idx != x2_1.Args[0] || mem != x2.Args[2] {
+			if x2_1.Op != OpARM64ADDconst || auxIntToInt64(x2_1.AuxInt) != 1 || idx != x2_1.Args[0] || mem != x2.Args[2] {
 				continue
 			}
 			y3 := v_1
@@ -15643,42 +15643,42 @@
 	}
 	// match: (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUload [i7] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i6] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i4] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i3] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [i2] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [i1] {s} p mem))) y7:(MOVDnop x7:(MOVBUload [i0] {s} p mem)))
 	// cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
-	// result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload <t> {s} (OffPtr <p.Type> [i0] p) mem)
+	// result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
 	for {
 		t := v.Type
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
 			o0 := v_0
-			if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 {
+			if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
 				continue
 			}
 			_ = o0.Args[1]
 			o1 := o0.Args[0]
-			if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 {
+			if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
 				continue
 			}
 			_ = o1.Args[1]
 			o2 := o1.Args[0]
-			if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 24 {
+			if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 24 {
 				continue
 			}
 			_ = o2.Args[1]
 			o3 := o2.Args[0]
-			if o3.Op != OpARM64ORshiftLL || o3.AuxInt != 32 {
+			if o3.Op != OpARM64ORshiftLL || auxIntToInt64(o3.AuxInt) != 32 {
 				continue
 			}
 			_ = o3.Args[1]
 			o4 := o3.Args[0]
-			if o4.Op != OpARM64ORshiftLL || o4.AuxInt != 40 {
+			if o4.Op != OpARM64ORshiftLL || auxIntToInt64(o4.AuxInt) != 40 {
 				continue
 			}
 			_ = o4.Args[1]
 			o5 := o4.Args[0]
-			if o5.Op != OpARM64ORshiftLL || o5.AuxInt != 48 {
+			if o5.Op != OpARM64ORshiftLL || auxIntToInt64(o5.AuxInt) != 48 {
 				continue
 			}
 			_ = o5.Args[1]
 			s0 := o5.Args[0]
-			if s0.Op != OpARM64SLLconst || s0.AuxInt != 56 {
+			if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 56 {
 				continue
 			}
 			y0 := s0.Args[0]
@@ -15689,8 +15689,8 @@
 			if x0.Op != OpARM64MOVBUload {
 				continue
 			}
-			i7 := x0.AuxInt
-			s := x0.Aux
+			i7 := auxIntToInt32(x0.AuxInt)
+			s := auxToSym(x0.Aux)
 			mem := x0.Args[1]
 			p := x0.Args[0]
 			y1 := o5.Args[1]
@@ -15701,8 +15701,8 @@
 			if x1.Op != OpARM64MOVBUload {
 				continue
 			}
-			i6 := x1.AuxInt
-			if x1.Aux != s {
+			i6 := auxIntToInt32(x1.AuxInt)
+			if auxToSym(x1.Aux) != s {
 				continue
 			}
 			_ = x1.Args[1]
@@ -15717,8 +15717,8 @@
 			if x2.Op != OpARM64MOVBUload {
 				continue
 			}
-			i5 := x2.AuxInt
-			if x2.Aux != s {
+			i5 := auxIntToInt32(x2.AuxInt)
+			if auxToSym(x2.Aux) != s {
 				continue
 			}
 			_ = x2.Args[1]
@@ -15733,8 +15733,8 @@
 			if x3.Op != OpARM64MOVBUload {
 				continue
 			}
-			i4 := x3.AuxInt
-			if x3.Aux != s {
+			i4 := auxIntToInt32(x3.AuxInt)
+			if auxToSym(x3.Aux) != s {
 				continue
 			}
 			_ = x3.Args[1]
@@ -15749,8 +15749,8 @@
 			if x4.Op != OpARM64MOVBUload {
 				continue
 			}
-			i3 := x4.AuxInt
-			if x4.Aux != s {
+			i3 := auxIntToInt32(x4.AuxInt)
+			if auxToSym(x4.Aux) != s {
 				continue
 			}
 			_ = x4.Args[1]
@@ -15765,8 +15765,8 @@
 			if x5.Op != OpARM64MOVBUload {
 				continue
 			}
-			i2 := x5.AuxInt
-			if x5.Aux != s {
+			i2 := auxIntToInt32(x5.AuxInt)
+			if auxToSym(x5.Aux) != s {
 				continue
 			}
 			_ = x5.Args[1]
@@ -15781,8 +15781,8 @@
 			if x6.Op != OpARM64MOVBUload {
 				continue
 			}
-			i1 := x6.AuxInt
-			if x6.Aux != s {
+			i1 := auxIntToInt32(x6.AuxInt)
+			if auxToSym(x6.Aux) != s {
 				continue
 			}
 			_ = x6.Args[1]
@@ -15797,8 +15797,8 @@
 			if x7.Op != OpARM64MOVBUload {
 				continue
 			}
-			i0 := x7.AuxInt
-			if x7.Aux != s {
+			i0 := auxIntToInt32(x7.AuxInt)
+			if auxToSym(x7.Aux) != s {
 				continue
 			}
 			_ = x7.Args[1]
@@ -15808,9 +15808,9 @@
 			b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
 			v0 := b.NewValue0(x7.Pos, OpARM64MOVDload, t)
 			v.copyOf(v0)
-			v0.Aux = s
+			v0.Aux = symToAux(s)
 			v1 := b.NewValue0(x7.Pos, OpOffPtr, p.Type)
-			v1.AuxInt = i0
+			v1.AuxInt = int64ToAuxInt(int64(i0))
 			v1.AddArg(p)
 			v0.AddArg2(v1, mem)
 			return true
@@ -15824,37 +15824,37 @@
 		t := v.Type
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
 			o0 := v_0
-			if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 {
+			if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
 				continue
 			}
 			_ = o0.Args[1]
 			o1 := o0.Args[0]
-			if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 {
+			if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
 				continue
 			}
 			_ = o1.Args[1]
 			o2 := o1.Args[0]
-			if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 24 {
+			if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 24 {
 				continue
 			}
 			_ = o2.Args[1]
 			o3 := o2.Args[0]
-			if o3.Op != OpARM64ORshiftLL || o3.AuxInt != 32 {
+			if o3.Op != OpARM64ORshiftLL || auxIntToInt64(o3.AuxInt) != 32 {
 				continue
 			}
 			_ = o3.Args[1]
 			o4 := o3.Args[0]
-			if o4.Op != OpARM64ORshiftLL || o4.AuxInt != 40 {
+			if o4.Op != OpARM64ORshiftLL || auxIntToInt64(o4.AuxInt) != 40 {
 				continue
 			}
 			_ = o4.Args[1]
 			o5 := o4.Args[0]
-			if o5.Op != OpARM64ORshiftLL || o5.AuxInt != 48 {
+			if o5.Op != OpARM64ORshiftLL || auxIntToInt64(o5.AuxInt) != 48 {
 				continue
 			}
 			_ = o5.Args[1]
 			s0 := o5.Args[0]
-			if s0.Op != OpARM64SLLconst || s0.AuxInt != 56 {
+			if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 56 {
 				continue
 			}
 			y0 := s0.Args[0]
@@ -15862,10 +15862,10 @@
 				continue
 			}
 			x0 := y0.Args[0]
-			if x0.Op != OpARM64MOVBUload || x0.AuxInt != 7 {
+			if x0.Op != OpARM64MOVBUload || auxIntToInt32(x0.AuxInt) != 7 {
 				continue
 			}
-			s := x0.Aux
+			s := auxToSym(x0.Aux)
 			mem := x0.Args[1]
 			p := x0.Args[0]
 			y1 := o5.Args[1]
@@ -15873,7 +15873,7 @@
 				continue
 			}
 			x1 := y1.Args[0]
-			if x1.Op != OpARM64MOVBUload || x1.AuxInt != 6 || x1.Aux != s {
+			if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 6 || auxToSym(x1.Aux) != s {
 				continue
 			}
 			_ = x1.Args[1]
@@ -15885,7 +15885,7 @@
 				continue
 			}
 			x2 := y2.Args[0]
-			if x2.Op != OpARM64MOVBUload || x2.AuxInt != 5 || x2.Aux != s {
+			if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 5 || auxToSym(x2.Aux) != s {
 				continue
 			}
 			_ = x2.Args[1]
@@ -15897,7 +15897,7 @@
 				continue
 			}
 			x3 := y3.Args[0]
-			if x3.Op != OpARM64MOVBUload || x3.AuxInt != 4 || x3.Aux != s {
+			if x3.Op != OpARM64MOVBUload || auxIntToInt32(x3.AuxInt) != 4 || auxToSym(x3.Aux) != s {
 				continue
 			}
 			_ = x3.Args[1]
@@ -15909,7 +15909,7 @@
 				continue
 			}
 			x4 := y4.Args[0]
-			if x4.Op != OpARM64MOVBUload || x4.AuxInt != 3 || x4.Aux != s {
+			if x4.Op != OpARM64MOVBUload || auxIntToInt32(x4.AuxInt) != 3 || auxToSym(x4.Aux) != s {
 				continue
 			}
 			_ = x4.Args[1]
@@ -15921,7 +15921,7 @@
 				continue
 			}
 			x5 := y5.Args[0]
-			if x5.Op != OpARM64MOVBUload || x5.AuxInt != 2 || x5.Aux != s {
+			if x5.Op != OpARM64MOVBUload || auxIntToInt32(x5.AuxInt) != 2 || auxToSym(x5.Aux) != s {
 				continue
 			}
 			_ = x5.Args[1]
@@ -15933,7 +15933,7 @@
 				continue
 			}
 			x6 := y6.Args[0]
-			if x6.Op != OpARM64MOVBUload || x6.AuxInt != 1 || x6.Aux != s {
+			if x6.Op != OpARM64MOVBUload || auxIntToInt32(x6.AuxInt) != 1 || auxToSym(x6.Aux) != s {
 				continue
 			}
 			_ = x6.Args[1]
@@ -15980,37 +15980,37 @@
 		t := v.Type
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
 			o0 := v_0
-			if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 {
+			if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
 				continue
 			}
 			_ = o0.Args[1]
 			o1 := o0.Args[0]
-			if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 {
+			if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
 				continue
 			}
 			_ = o1.Args[1]
 			o2 := o1.Args[0]
-			if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 24 {
+			if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 24 {
 				continue
 			}
 			_ = o2.Args[1]
 			o3 := o2.Args[0]
-			if o3.Op != OpARM64ORshiftLL || o3.AuxInt != 32 {
+			if o3.Op != OpARM64ORshiftLL || auxIntToInt64(o3.AuxInt) != 32 {
 				continue
 			}
 			_ = o3.Args[1]
 			o4 := o3.Args[0]
-			if o4.Op != OpARM64ORshiftLL || o4.AuxInt != 40 {
+			if o4.Op != OpARM64ORshiftLL || auxIntToInt64(o4.AuxInt) != 40 {
 				continue
 			}
 			_ = o4.Args[1]
 			o5 := o4.Args[0]
-			if o5.Op != OpARM64ORshiftLL || o5.AuxInt != 48 {
+			if o5.Op != OpARM64ORshiftLL || auxIntToInt64(o5.AuxInt) != 48 {
 				continue
 			}
 			_ = o5.Args[1]
 			s0 := o5.Args[0]
-			if s0.Op != OpARM64SLLconst || s0.AuxInt != 56 {
+			if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 56 {
 				continue
 			}
 			y0 := s0.Args[0]
@@ -16024,7 +16024,7 @@
 			mem := x0.Args[2]
 			ptr := x0.Args[0]
 			x0_1 := x0.Args[1]
-			if x0_1.Op != OpARM64ADDconst || x0_1.AuxInt != 7 {
+			if x0_1.Op != OpARM64ADDconst || auxIntToInt64(x0_1.AuxInt) != 7 {
 				continue
 			}
 			idx := x0_1.Args[0]
@@ -16041,7 +16041,7 @@
 				continue
 			}
 			x1_1 := x1.Args[1]
-			if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 6 || idx != x1_1.Args[0] || mem != x1.Args[2] {
+			if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 6 || idx != x1_1.Args[0] || mem != x1.Args[2] {
 				continue
 			}
 			y2 := o4.Args[1]
@@ -16057,7 +16057,7 @@
 				continue
 			}
 			x2_1 := x2.Args[1]
-			if x2_1.Op != OpARM64ADDconst || x2_1.AuxInt != 5 || idx != x2_1.Args[0] || mem != x2.Args[2] {
+			if x2_1.Op != OpARM64ADDconst || auxIntToInt64(x2_1.AuxInt) != 5 || idx != x2_1.Args[0] || mem != x2.Args[2] {
 				continue
 			}
 			y3 := o3.Args[1]
@@ -16073,7 +16073,7 @@
 				continue
 			}
 			x3_1 := x3.Args[1]
-			if x3_1.Op != OpARM64ADDconst || x3_1.AuxInt != 4 || idx != x3_1.Args[0] || mem != x3.Args[2] {
+			if x3_1.Op != OpARM64ADDconst || auxIntToInt64(x3_1.AuxInt) != 4 || idx != x3_1.Args[0] || mem != x3.Args[2] {
 				continue
 			}
 			y4 := o2.Args[1]
@@ -16089,7 +16089,7 @@
 				continue
 			}
 			x4_1 := x4.Args[1]
-			if x4_1.Op != OpARM64ADDconst || x4_1.AuxInt != 3 || idx != x4_1.Args[0] || mem != x4.Args[2] {
+			if x4_1.Op != OpARM64ADDconst || auxIntToInt64(x4_1.AuxInt) != 3 || idx != x4_1.Args[0] || mem != x4.Args[2] {
 				continue
 			}
 			y5 := o1.Args[1]
@@ -16105,7 +16105,7 @@
 				continue
 			}
 			x5_1 := x5.Args[1]
-			if x5_1.Op != OpARM64ADDconst || x5_1.AuxInt != 2 || idx != x5_1.Args[0] || mem != x5.Args[2] {
+			if x5_1.Op != OpARM64ADDconst || auxIntToInt64(x5_1.AuxInt) != 2 || idx != x5_1.Args[0] || mem != x5.Args[2] {
 				continue
 			}
 			y6 := o0.Args[1]
@@ -16121,7 +16121,7 @@
 				continue
 			}
 			x6_1 := x6.Args[1]
-			if x6_1.Op != OpARM64ADDconst || x6_1.AuxInt != 1 || idx != x6_1.Args[0] || mem != x6.Args[2] {
+			if x6_1.Op != OpARM64ADDconst || auxIntToInt64(x6_1.AuxInt) != 1 || idx != x6_1.Args[0] || mem != x6.Args[2] {
 				continue
 			}
 			y7 := v_1
@@ -16146,22 +16146,22 @@
 	}
 	// match: (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i3] {s} p mem)))
 	// cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
-	// result: @mergePoint(b,x0,x1,x2,x3) (REVW <t> (MOVWUload <t> {s} (OffPtr <p.Type> [i0] p) mem))
+	// result: @mergePoint(b,x0,x1,x2,x3) (REVW <t> (MOVWUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem))
 	for {
 		t := v.Type
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
 			o0 := v_0
-			if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 {
+			if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
 				continue
 			}
 			_ = o0.Args[1]
 			o1 := o0.Args[0]
-			if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 {
+			if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
 				continue
 			}
 			_ = o1.Args[1]
 			s0 := o1.Args[0]
-			if s0.Op != OpARM64SLLconst || s0.AuxInt != 24 {
+			if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 24 {
 				continue
 			}
 			y0 := s0.Args[0]
@@ -16172,8 +16172,8 @@
 			if x0.Op != OpARM64MOVBUload {
 				continue
 			}
-			i0 := x0.AuxInt
-			s := x0.Aux
+			i0 := auxIntToInt32(x0.AuxInt)
+			s := auxToSym(x0.Aux)
 			mem := x0.Args[1]
 			p := x0.Args[0]
 			y1 := o1.Args[1]
@@ -16184,8 +16184,8 @@
 			if x1.Op != OpARM64MOVBUload {
 				continue
 			}
-			i1 := x1.AuxInt
-			if x1.Aux != s {
+			i1 := auxIntToInt32(x1.AuxInt)
+			if auxToSym(x1.Aux) != s {
 				continue
 			}
 			_ = x1.Args[1]
@@ -16200,8 +16200,8 @@
 			if x2.Op != OpARM64MOVBUload {
 				continue
 			}
-			i2 := x2.AuxInt
-			if x2.Aux != s {
+			i2 := auxIntToInt32(x2.AuxInt)
+			if auxToSym(x2.Aux) != s {
 				continue
 			}
 			_ = x2.Args[1]
@@ -16216,8 +16216,8 @@
 			if x3.Op != OpARM64MOVBUload {
 				continue
 			}
-			i3 := x3.AuxInt
-			if x3.Aux != s {
+			i3 := auxIntToInt32(x3.AuxInt)
+			if auxToSym(x3.Aux) != s {
 				continue
 			}
 			_ = x3.Args[1]
@@ -16228,9 +16228,9 @@
 			v0 := b.NewValue0(x3.Pos, OpARM64REVW, t)
 			v.copyOf(v0)
 			v1 := b.NewValue0(x3.Pos, OpARM64MOVWUload, t)
-			v1.Aux = s
+			v1.Aux = symToAux(s)
 			v2 := b.NewValue0(x3.Pos, OpOffPtr, p.Type)
-			v2.AuxInt = i0
+			v2.AuxInt = int64ToAuxInt(int64(i0))
 			v2.AddArg(p)
 			v1.AddArg2(v2, mem)
 			v0.AddArg(v1)
@@ -16245,17 +16245,17 @@
 		t := v.Type
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
 			o0 := v_0
-			if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 {
+			if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
 				continue
 			}
 			_ = o0.Args[1]
 			o1 := o0.Args[0]
-			if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 {
+			if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
 				continue
 			}
 			_ = o1.Args[1]
 			s0 := o1.Args[0]
-			if s0.Op != OpARM64SLLconst || s0.AuxInt != 24 {
+			if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 24 {
 				continue
 			}
 			y0 := s0.Args[0]
@@ -16274,10 +16274,10 @@
 				continue
 			}
 			x1 := y1.Args[0]
-			if x1.Op != OpARM64MOVBUload || x1.AuxInt != 1 {
+			if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 1 {
 				continue
 			}
-			s := x1.Aux
+			s := auxToSym(x1.Aux)
 			_ = x1.Args[1]
 			p1 := x1.Args[0]
 			if p1.Op != OpARM64ADD {
@@ -16297,7 +16297,7 @@
 					continue
 				}
 				x2 := y2.Args[0]
-				if x2.Op != OpARM64MOVBUload || x2.AuxInt != 2 || x2.Aux != s {
+				if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 2 || auxToSym(x2.Aux) != s {
 					continue
 				}
 				_ = x2.Args[1]
@@ -16310,7 +16310,7 @@
 					continue
 				}
 				x3 := y3.Args[0]
-				if x3.Op != OpARM64MOVBUload || x3.AuxInt != 3 || x3.Aux != s {
+				if x3.Op != OpARM64MOVBUload || auxIntToInt32(x3.AuxInt) != 3 || auxToSym(x3.Aux) != s {
 					continue
 				}
 				_ = x3.Args[1]
@@ -16335,17 +16335,17 @@
 		t := v.Type
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
 			o0 := v_0
-			if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 {
+			if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
 				continue
 			}
 			_ = o0.Args[1]
 			o1 := o0.Args[0]
-			if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 {
+			if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
 				continue
 			}
 			_ = o1.Args[1]
 			s0 := o1.Args[0]
-			if s0.Op != OpARM64SLLconst || s0.AuxInt != 24 {
+			if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 24 {
 				continue
 			}
 			y0 := s0.Args[0]
@@ -16372,7 +16372,7 @@
 				continue
 			}
 			x1_1 := x1.Args[1]
-			if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 1 || idx != x1_1.Args[0] || mem != x1.Args[2] {
+			if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 1 || idx != x1_1.Args[0] || mem != x1.Args[2] {
 				continue
 			}
 			y2 := o0.Args[1]
@@ -16388,7 +16388,7 @@
 				continue
 			}
 			x2_1 := x2.Args[1]
-			if x2_1.Op != OpARM64ADDconst || x2_1.AuxInt != 2 || idx != x2_1.Args[0] || mem != x2.Args[2] {
+			if x2_1.Op != OpARM64ADDconst || auxIntToInt64(x2_1.AuxInt) != 2 || idx != x2_1.Args[0] || mem != x2.Args[2] {
 				continue
 			}
 			y3 := v_1
@@ -16404,7 +16404,7 @@
 				continue
 			}
 			x3_1 := x3.Args[1]
-			if x3_1.Op != OpARM64ADDconst || x3_1.AuxInt != 3 || idx != x3_1.Args[0] || mem != x3.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)) {
+			if x3_1.Op != OpARM64ADDconst || auxIntToInt64(x3_1.AuxInt) != 3 || idx != x3_1.Args[0] || mem != x3.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)) {
 				continue
 			}
 			b = mergePoint(b, x0, x1, x2, x3)
@@ -16419,42 +16419,42 @@
 	}
 	// match: (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i3] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i4] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [i5] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [i6] {s} p mem))) y7:(MOVDnop x7:(MOVBUload [i7] {s} p mem)))
 	// cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
-	// result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [i0] p) mem))
+	// result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem))
 	for {
 		t := v.Type
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
 			o0 := v_0
-			if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 {
+			if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
 				continue
 			}
 			_ = o0.Args[1]
 			o1 := o0.Args[0]
-			if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 {
+			if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
 				continue
 			}
 			_ = o1.Args[1]
 			o2 := o1.Args[0]
-			if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 24 {
+			if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 24 {
 				continue
 			}
 			_ = o2.Args[1]
 			o3 := o2.Args[0]
-			if o3.Op != OpARM64ORshiftLL || o3.AuxInt != 32 {
+			if o3.Op != OpARM64ORshiftLL || auxIntToInt64(o3.AuxInt) != 32 {
 				continue
 			}
 			_ = o3.Args[1]
 			o4 := o3.Args[0]
-			if o4.Op != OpARM64ORshiftLL || o4.AuxInt != 40 {
+			if o4.Op != OpARM64ORshiftLL || auxIntToInt64(o4.AuxInt) != 40 {
 				continue
 			}
 			_ = o4.Args[1]
 			o5 := o4.Args[0]
-			if o5.Op != OpARM64ORshiftLL || o5.AuxInt != 48 {
+			if o5.Op != OpARM64ORshiftLL || auxIntToInt64(o5.AuxInt) != 48 {
 				continue
 			}
 			_ = o5.Args[1]
 			s0 := o5.Args[0]
-			if s0.Op != OpARM64SLLconst || s0.AuxInt != 56 {
+			if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 56 {
 				continue
 			}
 			y0 := s0.Args[0]
@@ -16465,8 +16465,8 @@
 			if x0.Op != OpARM64MOVBUload {
 				continue
 			}
-			i0 := x0.AuxInt
-			s := x0.Aux
+			i0 := auxIntToInt32(x0.AuxInt)
+			s := auxToSym(x0.Aux)
 			mem := x0.Args[1]
 			p := x0.Args[0]
 			y1 := o5.Args[1]
@@ -16477,8 +16477,8 @@
 			if x1.Op != OpARM64MOVBUload {
 				continue
 			}
-			i1 := x1.AuxInt
-			if x1.Aux != s {
+			i1 := auxIntToInt32(x1.AuxInt)
+			if auxToSym(x1.Aux) != s {
 				continue
 			}
 			_ = x1.Args[1]
@@ -16493,8 +16493,8 @@
 			if x2.Op != OpARM64MOVBUload {
 				continue
 			}
-			i2 := x2.AuxInt
-			if x2.Aux != s {
+			i2 := auxIntToInt32(x2.AuxInt)
+			if auxToSym(x2.Aux) != s {
 				continue
 			}
 			_ = x2.Args[1]
@@ -16509,8 +16509,8 @@
 			if x3.Op != OpARM64MOVBUload {
 				continue
 			}
-			i3 := x3.AuxInt
-			if x3.Aux != s {
+			i3 := auxIntToInt32(x3.AuxInt)
+			if auxToSym(x3.Aux) != s {
 				continue
 			}
 			_ = x3.Args[1]
@@ -16525,8 +16525,8 @@
 			if x4.Op != OpARM64MOVBUload {
 				continue
 			}
-			i4 := x4.AuxInt
-			if x4.Aux != s {
+			i4 := auxIntToInt32(x4.AuxInt)
+			if auxToSym(x4.Aux) != s {
 				continue
 			}
 			_ = x4.Args[1]
@@ -16541,8 +16541,8 @@
 			if x5.Op != OpARM64MOVBUload {
 				continue
 			}
-			i5 := x5.AuxInt
-			if x5.Aux != s {
+			i5 := auxIntToInt32(x5.AuxInt)
+			if auxToSym(x5.Aux) != s {
 				continue
 			}
 			_ = x5.Args[1]
@@ -16557,8 +16557,8 @@
 			if x6.Op != OpARM64MOVBUload {
 				continue
 			}
-			i6 := x6.AuxInt
-			if x6.Aux != s {
+			i6 := auxIntToInt32(x6.AuxInt)
+			if auxToSym(x6.Aux) != s {
 				continue
 			}
 			_ = x6.Args[1]
@@ -16573,8 +16573,8 @@
 			if x7.Op != OpARM64MOVBUload {
 				continue
 			}
-			i7 := x7.AuxInt
-			if x7.Aux != s {
+			i7 := auxIntToInt32(x7.AuxInt)
+			if auxToSym(x7.Aux) != s {
 				continue
 			}
 			_ = x7.Args[1]
@@ -16585,9 +16585,9 @@
 			v0 := b.NewValue0(x7.Pos, OpARM64REV, t)
 			v.copyOf(v0)
 			v1 := b.NewValue0(x7.Pos, OpARM64MOVDload, t)
-			v1.Aux = s
+			v1.Aux = symToAux(s)
 			v2 := b.NewValue0(x7.Pos, OpOffPtr, p.Type)
-			v2.AuxInt = i0
+			v2.AuxInt = int64ToAuxInt(int64(i0))
 			v2.AddArg(p)
 			v1.AddArg2(v2, mem)
 			v0.AddArg(v1)
@@ -16602,37 +16602,37 @@
 		t := v.Type
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
 			o0 := v_0
-			if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 {
+			if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
 				continue
 			}
 			_ = o0.Args[1]
 			o1 := o0.Args[0]
-			if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 {
+			if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
 				continue
 			}
 			_ = o1.Args[1]
 			o2 := o1.Args[0]
-			if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 24 {
+			if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 24 {
 				continue
 			}
 			_ = o2.Args[1]
 			o3 := o2.Args[0]
-			if o3.Op != OpARM64ORshiftLL || o3.AuxInt != 32 {
+			if o3.Op != OpARM64ORshiftLL || auxIntToInt64(o3.AuxInt) != 32 {
 				continue
 			}
 			_ = o3.Args[1]
 			o4 := o3.Args[0]
-			if o4.Op != OpARM64ORshiftLL || o4.AuxInt != 40 {
+			if o4.Op != OpARM64ORshiftLL || auxIntToInt64(o4.AuxInt) != 40 {
 				continue
 			}
 			_ = o4.Args[1]
 			o5 := o4.Args[0]
-			if o5.Op != OpARM64ORshiftLL || o5.AuxInt != 48 {
+			if o5.Op != OpARM64ORshiftLL || auxIntToInt64(o5.AuxInt) != 48 {
 				continue
 			}
 			_ = o5.Args[1]
 			s0 := o5.Args[0]
-			if s0.Op != OpARM64SLLconst || s0.AuxInt != 56 {
+			if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 56 {
 				continue
 			}
 			y0 := s0.Args[0]
@@ -16651,10 +16651,10 @@
 				continue
 			}
 			x1 := y1.Args[0]
-			if x1.Op != OpARM64MOVBUload || x1.AuxInt != 1 {
+			if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 1 {
 				continue
 			}
-			s := x1.Aux
+			s := auxToSym(x1.Aux)
 			_ = x1.Args[1]
 			p1 := x1.Args[0]
 			if p1.Op != OpARM64ADD {
@@ -16674,7 +16674,7 @@
 					continue
 				}
 				x2 := y2.Args[0]
-				if x2.Op != OpARM64MOVBUload || x2.AuxInt != 2 || x2.Aux != s {
+				if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 2 || auxToSym(x2.Aux) != s {
 					continue
 				}
 				_ = x2.Args[1]
@@ -16687,7 +16687,7 @@
 					continue
 				}
 				x3 := y3.Args[0]
-				if x3.Op != OpARM64MOVBUload || x3.AuxInt != 3 || x3.Aux != s {
+				if x3.Op != OpARM64MOVBUload || auxIntToInt32(x3.AuxInt) != 3 || auxToSym(x3.Aux) != s {
 					continue
 				}
 				_ = x3.Args[1]
@@ -16699,7 +16699,7 @@
 					continue
 				}
 				x4 := y4.Args[0]
-				if x4.Op != OpARM64MOVBUload || x4.AuxInt != 4 || x4.Aux != s {
+				if x4.Op != OpARM64MOVBUload || auxIntToInt32(x4.AuxInt) != 4 || auxToSym(x4.Aux) != s {
 					continue
 				}
 				_ = x4.Args[1]
@@ -16711,7 +16711,7 @@
 					continue
 				}
 				x5 := y5.Args[0]
-				if x5.Op != OpARM64MOVBUload || x5.AuxInt != 5 || x5.Aux != s {
+				if x5.Op != OpARM64MOVBUload || auxIntToInt32(x5.AuxInt) != 5 || auxToSym(x5.Aux) != s {
 					continue
 				}
 				_ = x5.Args[1]
@@ -16723,7 +16723,7 @@
 					continue
 				}
 				x6 := y6.Args[0]
-				if x6.Op != OpARM64MOVBUload || x6.AuxInt != 6 || x6.Aux != s {
+				if x6.Op != OpARM64MOVBUload || auxIntToInt32(x6.AuxInt) != 6 || auxToSym(x6.Aux) != s {
 					continue
 				}
 				_ = x6.Args[1]
@@ -16735,7 +16735,7 @@
 					continue
 				}
 				x7 := y7.Args[0]
-				if x7.Op != OpARM64MOVBUload || x7.AuxInt != 7 || x7.Aux != s {
+				if x7.Op != OpARM64MOVBUload || auxIntToInt32(x7.AuxInt) != 7 || auxToSym(x7.Aux) != s {
 					continue
 				}
 				_ = x7.Args[1]
@@ -16760,37 +16760,37 @@
 		t := v.Type
 		for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
 			o0 := v_0
-			if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 {
+			if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
 				continue
 			}
 			_ = o0.Args[1]
 			o1 := o0.Args[0]
-			if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 {
+			if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
 				continue
 			}
 			_ = o1.Args[1]
 			o2 := o1.Args[0]
-			if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 24 {
+			if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 24 {
 				continue
 			}
 			_ = o2.Args[1]
 			o3 := o2.Args[0]
-			if o3.Op != OpARM64ORshiftLL || o3.AuxInt != 32 {
+			if o3.Op != OpARM64ORshiftLL || auxIntToInt64(o3.AuxInt) != 32 {
 				continue
 			}
 			_ = o3.Args[1]
 			o4 := o3.Args[0]
-			if o4.Op != OpARM64ORshiftLL || o4.AuxInt != 40 {
+			if o4.Op != OpARM64ORshiftLL || auxIntToInt64(o4.AuxInt) != 40 {
 				continue
 			}
 			_ = o4.Args[1]
 			o5 := o4.Args[0]
-			if o5.Op != OpARM64ORshiftLL || o5.AuxInt != 48 {
+			if o5.Op != OpARM64ORshiftLL || auxIntToInt64(o5.AuxInt) != 48 {
 				continue
 			}
 			_ = o5.Args[1]
 			s0 := o5.Args[0]
-			if s0.Op != OpARM64SLLconst || s0.AuxInt != 56 {
+			if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 56 {
 				continue
 			}
 			y0 := s0.Args[0]
@@ -16817,7 +16817,7 @@
 				continue
 			}
 			x1_1 := x1.Args[1]
-			if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 1 || idx != x1_1.Args[0] || mem != x1.Args[2] {
+			if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 1 || idx != x1_1.Args[0] || mem != x1.Args[2] {
 				continue
 			}
 			y2 := o4.Args[1]
@@ -16833,7 +16833,7 @@
 				continue
 			}
 			x2_1 := x2.Args[1]
-			if x2_1.Op != OpARM64ADDconst || x2_1.AuxInt != 2 || idx != x2_1.Args[0] || mem != x2.Args[2] {
+			if x2_1.Op != OpARM64ADDconst || auxIntToInt64(x2_1.AuxInt) != 2 || idx != x2_1.Args[0] || mem != x2.Args[2] {
 				continue
 			}
 			y3 := o3.Args[1]
@@ -16849,7 +16849,7 @@
 				continue
 			}
 			x3_1 := x3.Args[1]
-			if x3_1.Op != OpARM64ADDconst || x3_1.AuxInt != 3 || idx != x3_1.Args[0] || mem != x3.Args[2] {
+			if x3_1.Op != OpARM64ADDconst || auxIntToInt64(x3_1.AuxInt) != 3 || idx != x3_1.Args[0] || mem != x3.Args[2] {
 				continue
 			}
 			y4 := o2.Args[1]
@@ -16865,7 +16865,7 @@
 				continue
 			}
 			x4_1 := x4.Args[1]
-			if x4_1.Op != OpARM64ADDconst || x4_1.AuxInt != 4 || idx != x4_1.Args[0] || mem != x4.Args[2] {
+			if x4_1.Op != OpARM64ADDconst || auxIntToInt64(x4_1.AuxInt) != 4 || idx != x4_1.Args[0] || mem != x4.Args[2] {
 				continue
 			}
 			y5 := o1.Args[1]
@@ -16881,7 +16881,7 @@
 				continue
 			}
 			x5_1 := x5.Args[1]
-			if x5_1.Op != OpARM64ADDconst || x5_1.AuxInt != 5 || idx != x5_1.Args[0] || mem != x5.Args[2] {
+			if x5_1.Op != OpARM64ADDconst || auxIntToInt64(x5_1.AuxInt) != 5 || idx != x5_1.Args[0] || mem != x5.Args[2] {
 				continue
 			}
 			y6 := o0.Args[1]
@@ -16897,7 +16897,7 @@
 				continue
 			}
 			x6_1 := x6.Args[1]
-			if x6_1.Op != OpARM64ADDconst || x6_1.AuxInt != 6 || idx != x6_1.Args[0] || mem != x6.Args[2] {
+			if x6_1.Op != OpARM64ADDconst || auxIntToInt64(x6_1.AuxInt) != 6 || idx != x6_1.Args[0] || mem != x6.Args[2] {
 				continue
 			}
 			y7 := v_1
@@ -16913,7 +16913,7 @@
 				continue
 			}
 			x7_1 := x7.Args[1]
-			if x7_1.Op != OpARM64ADDconst || x7_1.AuxInt != 7 || idx != x7_1.Args[0] || mem != x7.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)) {
+			if x7_1.Op != OpARM64ADDconst || auxIntToInt64(x7_1.AuxInt) != 7 || idx != x7_1.Args[0] || mem != x7.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)) {
 				continue
 			}
 			b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
@@ -16964,13 +16964,13 @@
 		if x1.Op != OpARM64SLLconst {
 			break
 		}
-		c := x1.AuxInt
+		c := auxIntToInt64(x1.AuxInt)
 		y := x1.Args[0]
 		if !(clobberIfDead(x1)) {
 			break
 		}
 		v.reset(OpARM64ORNshiftLL)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v.AddArg2(x0, y)
 		return true
 	}
@@ -16983,13 +16983,13 @@
 		if x1.Op != OpARM64SRLconst {
 			break
 		}
-		c := x1.AuxInt
+		c := auxIntToInt64(x1.AuxInt)
 		y := x1.Args[0]
 		if !(clobberIfDead(x1)) {
 			break
 		}
 		v.reset(OpARM64ORNshiftRL)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v.AddArg2(x0, y)
 		return true
 	}
@@ -17002,13 +17002,13 @@
 		if x1.Op != OpARM64SRAconst {
 			break
 		}
-		c := x1.AuxInt
+		c := auxIntToInt64(x1.AuxInt)
 		y := x1.Args[0]
 		if !(clobberIfDead(x1)) {
 			break
 		}
 		v.reset(OpARM64ORNshiftRA)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v.AddArg2(x0, y)
 		return true
 	}
@@ -17020,14 +17020,14 @@
 	// match: (ORNshiftLL x (MOVDconst [c]) [d])
 	// result: (ORconst x [^int64(uint64(c)<<uint64(d))])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		v.reset(OpARM64ORconst)
-		v.AuxInt = ^int64(uint64(c) << uint64(d))
+		v.AuxInt = int64ToAuxInt(^int64(uint64(c) << uint64(d)))
 		v.AddArg(x)
 		return true
 	}
@@ -17035,17 +17035,17 @@
 	// cond: c==d
 	// result: (MOVDconst [-1])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64SLLconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		if x != v_1.Args[0] || !(c == d) {
 			break
 		}
 		v.reset(OpARM64MOVDconst)
-		v.AuxInt = -1
+		v.AuxInt = int64ToAuxInt(-1)
 		return true
 	}
 	return false
@@ -17056,14 +17056,14 @@
 	// match: (ORNshiftRA x (MOVDconst [c]) [d])
 	// result: (ORconst x [^(c>>uint64(d))])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		v.reset(OpARM64ORconst)
-		v.AuxInt = ^(c >> uint64(d))
+		v.AuxInt = int64ToAuxInt(^(c >> uint64(d)))
 		v.AddArg(x)
 		return true
 	}
@@ -17071,17 +17071,17 @@
 	// cond: c==d
 	// result: (MOVDconst [-1])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64SRAconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		if x != v_1.Args[0] || !(c == d) {
 			break
 		}
 		v.reset(OpARM64MOVDconst)
-		v.AuxInt = -1
+		v.AuxInt = int64ToAuxInt(-1)
 		return true
 	}
 	return false
@@ -17092,14 +17092,14 @@
 	// match: (ORNshiftRL x (MOVDconst [c]) [d])
 	// result: (ORconst x [^int64(uint64(c)>>uint64(d))])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		v.reset(OpARM64ORconst)
-		v.AuxInt = ^int64(uint64(c) >> uint64(d))
+		v.AuxInt = int64ToAuxInt(^int64(uint64(c) >> uint64(d)))
 		v.AddArg(x)
 		return true
 	}
@@ -17107,17 +17107,17 @@
 	// cond: c==d
 	// result: (MOVDconst [-1])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64SRLconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		if x != v_1.Args[0] || !(c == d) {
 			break
 		}
 		v.reset(OpARM64MOVDconst)
-		v.AuxInt = -1
+		v.AuxInt = int64ToAuxInt(-1)
 		return true
 	}
 	return false
@@ -17174,17 +17174,17 @@
 	// cond: c2|c1 == ^0
 	// result: (ORconst [c1] x)
 	for {
-		c1 := v.AuxInt
+		c1 := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64ANDconst {
 			break
 		}
-		c2 := v_0.AuxInt
+		c2 := auxIntToInt64(v_0.AuxInt)
 		x := v_0.Args[0]
 		if !(c2|c1 == ^0) {
 			break
 		}
 		v.reset(OpARM64ORconst)
-		v.AuxInt = c1
+		v.AuxInt = int64ToAuxInt(c1)
 		v.AddArg(x)
 		return true
 	}
@@ -17198,16 +17198,16 @@
 	// match: (ORshiftLL (MOVDconst [c]) x [d])
 	// result: (ORconst [c] (SLLconst <x.Type> x [d]))
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_0.AuxInt
+		c := auxIntToInt64(v_0.AuxInt)
 		x := v_1
 		v.reset(OpARM64ORconst)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
-		v0.AuxInt = d
+		v0.AuxInt = int64ToAuxInt(d)
 		v0.AddArg(x)
 		v.AddArg(v0)
 		return true
@@ -17215,14 +17215,14 @@
 	// match: (ORshiftLL x (MOVDconst [c]) [d])
 	// result: (ORconst x [int64(uint64(c)<<uint64(d))])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		v.reset(OpARM64ORconst)
-		v.AuxInt = int64(uint64(c) << uint64(d))
+		v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
 		v.AddArg(x)
 		return true
 	}
@@ -17230,13 +17230,13 @@
 	// cond: c==d
 	// result: y
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		y := v_1
 		if y.Op != OpARM64SLLconst {
 			break
 		}
-		c := y.AuxInt
+		c := auxIntToInt64(y.AuxInt)
 		if x != y.Args[0] || !(c == d) {
 			break
 		}
@@ -17246,8 +17246,8 @@
 	// match: ( ORshiftLL [c] (SRLconst x [64-c]) x)
 	// result: (RORconst [64-c] x)
 	for {
-		c := v.AuxInt
-		if v_0.Op != OpARM64SRLconst || v_0.AuxInt != 64-c {
+		c := auxIntToInt64(v.AuxInt)
+		if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
 			break
 		}
 		x := v_0.Args[0]
@@ -17255,7 +17255,7 @@
 			break
 		}
 		v.reset(OpARM64RORconst)
-		v.AuxInt = 64 - c
+		v.AuxInt = int64ToAuxInt(64 - c)
 		v.AddArg(x)
 		return true
 	}
@@ -17264,24 +17264,24 @@
 	// result: (RORWconst [32-c] x)
 	for {
 		t := v.Type
-		c := v.AuxInt
+		c := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64UBFX {
 			break
 		}
-		bfc := v_0.AuxInt
+		bfc := auxIntToArm64BitField(v_0.AuxInt)
 		x := v_0.Args[0]
 		if x != v_1 || !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) {
 			break
 		}
 		v.reset(OpARM64RORWconst)
-		v.AuxInt = 32 - c
+		v.AuxInt = int64ToAuxInt(32 - c)
 		v.AddArg(x)
 		return true
 	}
 	// match: (ORshiftLL <typ.UInt16> [8] (UBFX <typ.UInt16> [armBFAuxInt(8, 8)] x) x)
 	// result: (REV16W x)
 	for {
-		if v.Type != typ.UInt16 || v.AuxInt != 8 || v_0.Op != OpARM64UBFX || v_0.Type != typ.UInt16 || v_0.AuxInt != armBFAuxInt(8, 8) {
+		if v.Type != typ.UInt16 || auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64UBFX || v_0.Type != typ.UInt16 || auxIntToArm64BitField(v_0.AuxInt) != armBFAuxInt(8, 8) {
 			break
 		}
 		x := v_0.Args[0]
@@ -17295,14 +17295,14 @@
 	// match: ( ORshiftLL [c] (SRLconst x [64-c]) x2)
 	// result: (EXTRconst [64-c] x2 x)
 	for {
-		c := v.AuxInt
-		if v_0.Op != OpARM64SRLconst || v_0.AuxInt != 64-c {
+		c := auxIntToInt64(v.AuxInt)
+		if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
 			break
 		}
 		x := v_0.Args[0]
 		x2 := v_1
 		v.reset(OpARM64EXTRconst)
-		v.AuxInt = 64 - c
+		v.AuxInt = int64ToAuxInt(64 - c)
 		v.AddArg2(x2, x)
 		return true
 	}
@@ -17311,49 +17311,49 @@
 	// result: (EXTRWconst [32-c] x2 x)
 	for {
 		t := v.Type
-		c := v.AuxInt
+		c := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64UBFX {
 			break
 		}
-		bfc := v_0.AuxInt
+		bfc := auxIntToArm64BitField(v_0.AuxInt)
 		x := v_0.Args[0]
 		x2 := v_1
 		if !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) {
 			break
 		}
 		v.reset(OpARM64EXTRWconst)
-		v.AuxInt = 32 - c
+		v.AuxInt = int64ToAuxInt(32 - c)
 		v.AddArg2(x2, x)
 		return true
 	}
 	// match: (ORshiftLL [sc] (UBFX [bfc] x) (SRLconst [sc] y))
-	// cond: sc == getARM64BFwidth(bfc)
+	// cond: sc == bfc.getARM64BFwidth()
 	// result: (BFXIL [bfc] y x)
 	for {
-		sc := v.AuxInt
+		sc := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64UBFX {
 			break
 		}
-		bfc := v_0.AuxInt
+		bfc := auxIntToArm64BitField(v_0.AuxInt)
 		x := v_0.Args[0]
-		if v_1.Op != OpARM64SRLconst || v_1.AuxInt != sc {
+		if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != sc {
 			break
 		}
 		y := v_1.Args[0]
-		if !(sc == getARM64BFwidth(bfc)) {
+		if !(sc == bfc.getARM64BFwidth()) {
 			break
 		}
 		v.reset(OpARM64BFXIL)
-		v.AuxInt = bfc
+		v.AuxInt = arm64BitFieldToAuxInt(bfc)
 		v.AddArg2(y, x)
 		return true
 	}
 	// match: (ORshiftLL <t> [8] y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem)))
 	// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, y0, y1)
-	// result: @mergePoint(b,x0,x1) (MOVHUload <t> {s} (OffPtr <p.Type> [i0] p) mem)
+	// result: @mergePoint(b,x0,x1) (MOVHUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
 	for {
 		t := v.Type
-		if v.AuxInt != 8 {
+		if auxIntToInt64(v.AuxInt) != 8 {
 			break
 		}
 		y0 := v_0
@@ -17364,8 +17364,8 @@
 		if x0.Op != OpARM64MOVBUload {
 			break
 		}
-		i0 := x0.AuxInt
-		s := x0.Aux
+		i0 := auxIntToInt32(x0.AuxInt)
+		s := auxToSym(x0.Aux)
 		mem := x0.Args[1]
 		p := x0.Args[0]
 		y1 := v_1
@@ -17376,8 +17376,8 @@
 		if x1.Op != OpARM64MOVBUload {
 			break
 		}
-		i1 := x1.AuxInt
-		if x1.Aux != s {
+		i1 := auxIntToInt32(x1.AuxInt)
+		if auxToSym(x1.Aux) != s {
 			break
 		}
 		_ = x1.Args[1]
@@ -17387,9 +17387,9 @@
 		b = mergePoint(b, x0, x1)
 		v0 := b.NewValue0(x1.Pos, OpARM64MOVHUload, t)
 		v.copyOf(v0)
-		v0.Aux = s
+		v0.Aux = symToAux(s)
 		v1 := b.NewValue0(x1.Pos, OpOffPtr, p.Type)
-		v1.AuxInt = i0
+		v1.AuxInt = int64ToAuxInt(int64(i0))
 		v1.AddArg(p)
 		v0.AddArg2(v1, mem)
 		return true
@@ -17399,7 +17399,7 @@
 	// result: @mergePoint(b,x0,x1) (MOVHUloadidx <t> ptr0 idx0 mem)
 	for {
 		t := v.Type
-		if v.AuxInt != 8 {
+		if auxIntToInt64(v.AuxInt) != 8 {
 			break
 		}
 		y0 := v_0
@@ -17418,10 +17418,10 @@
 			break
 		}
 		x1 := y1.Args[0]
-		if x1.Op != OpARM64MOVBUload || x1.AuxInt != 1 {
+		if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 1 {
 			break
 		}
-		s := x1.Aux
+		s := auxToSym(x1.Aux)
 		_ = x1.Args[1]
 		p1 := x1.Args[0]
 		if p1.Op != OpARM64ADD {
@@ -17449,7 +17449,7 @@
 	// result: @mergePoint(b,x0,x1) (MOVHUloadidx <t> ptr idx mem)
 	for {
 		t := v.Type
-		if v.AuxInt != 8 {
+		if auxIntToInt64(v.AuxInt) != 8 {
 			break
 		}
 		y0 := v_0
@@ -17476,7 +17476,7 @@
 			break
 		}
 		x1_1 := x1.Args[1]
-		if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 1 || idx != x1_1.Args[0] || mem != x1.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, y0, y1)) {
+		if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 1 || idx != x1_1.Args[0] || mem != x1.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, y0, y1)) {
 			break
 		}
 		b = mergePoint(b, x0, x1)
@@ -17487,14 +17487,14 @@
 	}
 	// match: (ORshiftLL <t> [24] o0:(ORshiftLL [16] x0:(MOVHUload [i0] {s} p mem) y1:(MOVDnop x1:(MOVBUload [i2] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i3] {s} p mem)))
 	// cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0, x1, x2, y1, y2, o0)
-	// result: @mergePoint(b,x0,x1,x2) (MOVWUload <t> {s} (OffPtr <p.Type> [i0] p) mem)
+	// result: @mergePoint(b,x0,x1,x2) (MOVWUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
 	for {
 		t := v.Type
-		if v.AuxInt != 24 {
+		if auxIntToInt64(v.AuxInt) != 24 {
 			break
 		}
 		o0 := v_0
-		if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 16 {
+		if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 16 {
 			break
 		}
 		_ = o0.Args[1]
@@ -17502,8 +17502,8 @@
 		if x0.Op != OpARM64MOVHUload {
 			break
 		}
-		i0 := x0.AuxInt
-		s := x0.Aux
+		i0 := auxIntToInt32(x0.AuxInt)
+		s := auxToSym(x0.Aux)
 		mem := x0.Args[1]
 		p := x0.Args[0]
 		y1 := o0.Args[1]
@@ -17514,8 +17514,8 @@
 		if x1.Op != OpARM64MOVBUload {
 			break
 		}
-		i2 := x1.AuxInt
-		if x1.Aux != s {
+		i2 := auxIntToInt32(x1.AuxInt)
+		if auxToSym(x1.Aux) != s {
 			break
 		}
 		_ = x1.Args[1]
@@ -17530,8 +17530,8 @@
 		if x2.Op != OpARM64MOVBUload {
 			break
 		}
-		i3 := x2.AuxInt
-		if x2.Aux != s {
+		i3 := auxIntToInt32(x2.AuxInt)
+		if auxToSym(x2.Aux) != s {
 			break
 		}
 		_ = x2.Args[1]
@@ -17541,9 +17541,9 @@
 		b = mergePoint(b, x0, x1, x2)
 		v0 := b.NewValue0(x2.Pos, OpARM64MOVWUload, t)
 		v.copyOf(v0)
-		v0.Aux = s
+		v0.Aux = symToAux(s)
 		v1 := b.NewValue0(x2.Pos, OpOffPtr, p.Type)
-		v1.AuxInt = i0
+		v1.AuxInt = int64ToAuxInt(int64(i0))
 		v1.AddArg(p)
 		v0.AddArg2(v1, mem)
 		return true
@@ -17553,11 +17553,11 @@
 	// result: @mergePoint(b,x0,x1,x2) (MOVWUloadidx <t> ptr0 idx0 mem)
 	for {
 		t := v.Type
-		if v.AuxInt != 24 {
+		if auxIntToInt64(v.AuxInt) != 24 {
 			break
 		}
 		o0 := v_0
-		if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 16 {
+		if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 16 {
 			break
 		}
 		_ = o0.Args[1]
@@ -17573,10 +17573,10 @@
 			break
 		}
 		x1 := y1.Args[0]
-		if x1.Op != OpARM64MOVBUload || x1.AuxInt != 2 {
+		if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 2 {
 			break
 		}
-		s := x1.Aux
+		s := auxToSym(x1.Aux)
 		_ = x1.Args[1]
 		p1 := x1.Args[0]
 		if p1.Op != OpARM64ADD {
@@ -17596,7 +17596,7 @@
 				continue
 			}
 			x2 := y2.Args[0]
-			if x2.Op != OpARM64MOVBUload || x2.AuxInt != 3 || x2.Aux != s {
+			if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 3 || auxToSym(x2.Aux) != s {
 				continue
 			}
 			_ = x2.Args[1]
@@ -17617,11 +17617,11 @@
 	// result: @mergePoint(b,x0,x1,x2) (MOVWUloadidx <t> ptr idx mem)
 	for {
 		t := v.Type
-		if v.AuxInt != 24 {
+		if auxIntToInt64(v.AuxInt) != 24 {
 			break
 		}
 		o0 := v_0
-		if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 16 {
+		if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 16 {
 			break
 		}
 		_ = o0.Args[1]
@@ -17645,7 +17645,7 @@
 			break
 		}
 		x1_1 := x1.Args[1]
-		if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 2 || idx != x1_1.Args[0] || mem != x1.Args[2] {
+		if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 2 || idx != x1_1.Args[0] || mem != x1.Args[2] {
 			break
 		}
 		y2 := v_1
@@ -17661,7 +17661,7 @@
 			break
 		}
 		x2_1 := x2.Args[1]
-		if x2_1.Op != OpARM64ADDconst || x2_1.AuxInt != 3 || idx != x2_1.Args[0] || mem != x2.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, y1, y2, o0)) {
+		if x2_1.Op != OpARM64ADDconst || auxIntToInt64(x2_1.AuxInt) != 3 || idx != x2_1.Args[0] || mem != x2.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, y1, y2, o0)) {
 			break
 		}
 		b = mergePoint(b, x0, x1, x2)
@@ -17675,11 +17675,11 @@
 	// result: @mergePoint(b,x0,x1,x2) (MOVWUloadidx <t> ptr0 (SLLconst <idx0.Type> [1] idx0) mem)
 	for {
 		t := v.Type
-		if v.AuxInt != 24 {
+		if auxIntToInt64(v.AuxInt) != 24 {
 			break
 		}
 		o0 := v_0
-		if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 16 {
+		if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 16 {
 			break
 		}
 		_ = o0.Args[1]
@@ -17695,13 +17695,13 @@
 			break
 		}
 		x1 := y1.Args[0]
-		if x1.Op != OpARM64MOVBUload || x1.AuxInt != 2 {
+		if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 2 {
 			break
 		}
-		s := x1.Aux
+		s := auxToSym(x1.Aux)
 		_ = x1.Args[1]
 		p1 := x1.Args[0]
-		if p1.Op != OpARM64ADDshiftLL || p1.AuxInt != 1 {
+		if p1.Op != OpARM64ADDshiftLL || auxIntToInt64(p1.AuxInt) != 1 {
 			break
 		}
 		idx1 := p1.Args[1]
@@ -17714,7 +17714,7 @@
 			break
 		}
 		x2 := y2.Args[0]
-		if x2.Op != OpARM64MOVBUload || x2.AuxInt != 3 || x2.Aux != s {
+		if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 3 || auxToSym(x2.Aux) != s {
 			break
 		}
 		_ = x2.Args[1]
@@ -17726,31 +17726,31 @@
 		v0 := b.NewValue0(x2.Pos, OpARM64MOVWUloadidx, t)
 		v.copyOf(v0)
 		v1 := b.NewValue0(x2.Pos, OpARM64SLLconst, idx0.Type)
-		v1.AuxInt = 1
+		v1.AuxInt = int64ToAuxInt(1)
 		v1.AddArg(idx0)
 		v0.AddArg3(ptr0, v1, mem)
 		return true
 	}
 	// match: (ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] x0:(MOVWUload [i0] {s} p mem) y1:(MOVDnop x1:(MOVBUload [i4] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i6] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i7] {s} p mem)))
 	// cond: i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4) != nil && clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)
-	// result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDload <t> {s} (OffPtr <p.Type> [i0] p) mem)
+	// result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
 	for {
 		t := v.Type
-		if v.AuxInt != 56 {
+		if auxIntToInt64(v.AuxInt) != 56 {
 			break
 		}
 		o0 := v_0
-		if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 48 {
+		if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 48 {
 			break
 		}
 		_ = o0.Args[1]
 		o1 := o0.Args[0]
-		if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 40 {
+		if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 40 {
 			break
 		}
 		_ = o1.Args[1]
 		o2 := o1.Args[0]
-		if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 32 {
+		if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 32 {
 			break
 		}
 		_ = o2.Args[1]
@@ -17758,8 +17758,8 @@
 		if x0.Op != OpARM64MOVWUload {
 			break
 		}
-		i0 := x0.AuxInt
-		s := x0.Aux
+		i0 := auxIntToInt32(x0.AuxInt)
+		s := auxToSym(x0.Aux)
 		mem := x0.Args[1]
 		p := x0.Args[0]
 		y1 := o2.Args[1]
@@ -17770,8 +17770,8 @@
 		if x1.Op != OpARM64MOVBUload {
 			break
 		}
-		i4 := x1.AuxInt
-		if x1.Aux != s {
+		i4 := auxIntToInt32(x1.AuxInt)
+		if auxToSym(x1.Aux) != s {
 			break
 		}
 		_ = x1.Args[1]
@@ -17786,8 +17786,8 @@
 		if x2.Op != OpARM64MOVBUload {
 			break
 		}
-		i5 := x2.AuxInt
-		if x2.Aux != s {
+		i5 := auxIntToInt32(x2.AuxInt)
+		if auxToSym(x2.Aux) != s {
 			break
 		}
 		_ = x2.Args[1]
@@ -17802,8 +17802,8 @@
 		if x3.Op != OpARM64MOVBUload {
 			break
 		}
-		i6 := x3.AuxInt
-		if x3.Aux != s {
+		i6 := auxIntToInt32(x3.AuxInt)
+		if auxToSym(x3.Aux) != s {
 			break
 		}
 		_ = x3.Args[1]
@@ -17818,8 +17818,8 @@
 		if x4.Op != OpARM64MOVBUload {
 			break
 		}
-		i7 := x4.AuxInt
-		if x4.Aux != s {
+		i7 := auxIntToInt32(x4.AuxInt)
+		if auxToSym(x4.Aux) != s {
 			break
 		}
 		_ = x4.Args[1]
@@ -17829,9 +17829,9 @@
 		b = mergePoint(b, x0, x1, x2, x3, x4)
 		v0 := b.NewValue0(x4.Pos, OpARM64MOVDload, t)
 		v.copyOf(v0)
-		v0.Aux = s
+		v0.Aux = symToAux(s)
 		v1 := b.NewValue0(x4.Pos, OpOffPtr, p.Type)
-		v1.AuxInt = i0
+		v1.AuxInt = int64ToAuxInt(int64(i0))
 		v1.AddArg(p)
 		v0.AddArg2(v1, mem)
 		return true
@@ -17841,21 +17841,21 @@
 	// result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDloadidx <t> ptr0 idx0 mem)
 	for {
 		t := v.Type
-		if v.AuxInt != 56 {
+		if auxIntToInt64(v.AuxInt) != 56 {
 			break
 		}
 		o0 := v_0
-		if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 48 {
+		if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 48 {
 			break
 		}
 		_ = o0.Args[1]
 		o1 := o0.Args[0]
-		if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 40 {
+		if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 40 {
 			break
 		}
 		_ = o1.Args[1]
 		o2 := o1.Args[0]
-		if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 32 {
+		if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 32 {
 			break
 		}
 		_ = o2.Args[1]
@@ -17871,10 +17871,10 @@
 			break
 		}
 		x1 := y1.Args[0]
-		if x1.Op != OpARM64MOVBUload || x1.AuxInt != 4 {
+		if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 4 {
 			break
 		}
-		s := x1.Aux
+		s := auxToSym(x1.Aux)
 		_ = x1.Args[1]
 		p1 := x1.Args[0]
 		if p1.Op != OpARM64ADD {
@@ -17894,7 +17894,7 @@
 				continue
 			}
 			x2 := y2.Args[0]
-			if x2.Op != OpARM64MOVBUload || x2.AuxInt != 5 || x2.Aux != s {
+			if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 5 || auxToSym(x2.Aux) != s {
 				continue
 			}
 			_ = x2.Args[1]
@@ -17907,7 +17907,7 @@
 				continue
 			}
 			x3 := y3.Args[0]
-			if x3.Op != OpARM64MOVBUload || x3.AuxInt != 6 || x3.Aux != s {
+			if x3.Op != OpARM64MOVBUload || auxIntToInt32(x3.AuxInt) != 6 || auxToSym(x3.Aux) != s {
 				continue
 			}
 			_ = x3.Args[1]
@@ -17919,7 +17919,7 @@
 				continue
 			}
 			x4 := y4.Args[0]
-			if x4.Op != OpARM64MOVBUload || x4.AuxInt != 7 || x4.Aux != s {
+			if x4.Op != OpARM64MOVBUload || auxIntToInt32(x4.AuxInt) != 7 || auxToSym(x4.Aux) != s {
 				continue
 			}
 			_ = x4.Args[1]
@@ -17939,21 +17939,21 @@
 	// result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDloadidx <t> ptr0 (SLLconst <idx0.Type> [2] idx0) mem)
 	for {
 		t := v.Type
-		if v.AuxInt != 56 {
+		if auxIntToInt64(v.AuxInt) != 56 {
 			break
 		}
 		o0 := v_0
-		if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 48 {
+		if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 48 {
 			break
 		}
 		_ = o0.Args[1]
 		o1 := o0.Args[0]
-		if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 40 {
+		if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 40 {
 			break
 		}
 		_ = o1.Args[1]
 		o2 := o1.Args[0]
-		if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 32 {
+		if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 32 {
 			break
 		}
 		_ = o2.Args[1]
@@ -17969,13 +17969,13 @@
 			break
 		}
 		x1 := y1.Args[0]
-		if x1.Op != OpARM64MOVBUload || x1.AuxInt != 4 {
+		if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 4 {
 			break
 		}
-		s := x1.Aux
+		s := auxToSym(x1.Aux)
 		_ = x1.Args[1]
 		p1 := x1.Args[0]
-		if p1.Op != OpARM64ADDshiftLL || p1.AuxInt != 2 {
+		if p1.Op != OpARM64ADDshiftLL || auxIntToInt64(p1.AuxInt) != 2 {
 			break
 		}
 		idx1 := p1.Args[1]
@@ -17988,7 +17988,7 @@
 			break
 		}
 		x2 := y2.Args[0]
-		if x2.Op != OpARM64MOVBUload || x2.AuxInt != 5 || x2.Aux != s {
+		if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 5 || auxToSym(x2.Aux) != s {
 			break
 		}
 		_ = x2.Args[1]
@@ -18001,7 +18001,7 @@
 			break
 		}
 		x3 := y3.Args[0]
-		if x3.Op != OpARM64MOVBUload || x3.AuxInt != 6 || x3.Aux != s {
+		if x3.Op != OpARM64MOVBUload || auxIntToInt32(x3.AuxInt) != 6 || auxToSym(x3.Aux) != s {
 			break
 		}
 		_ = x3.Args[1]
@@ -18013,7 +18013,7 @@
 			break
 		}
 		x4 := y4.Args[0]
-		if x4.Op != OpARM64MOVBUload || x4.AuxInt != 7 || x4.Aux != s {
+		if x4.Op != OpARM64MOVBUload || auxIntToInt32(x4.AuxInt) != 7 || auxToSym(x4.Aux) != s {
 			break
 		}
 		_ = x4.Args[1]
@@ -18024,7 +18024,7 @@
 		v0 := b.NewValue0(x4.Pos, OpARM64MOVDloadidx, t)
 		v.copyOf(v0)
 		v1 := b.NewValue0(x4.Pos, OpARM64SLLconst, idx0.Type)
-		v1.AuxInt = 2
+		v1.AuxInt = int64ToAuxInt(2)
 		v1.AddArg(idx0)
 		v0.AddArg3(ptr0, v1, mem)
 		return true
@@ -18034,21 +18034,21 @@
 	// result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDloadidx <t> ptr idx mem)
 	for {
 		t := v.Type
-		if v.AuxInt != 56 {
+		if auxIntToInt64(v.AuxInt) != 56 {
 			break
 		}
 		o0 := v_0
-		if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 48 {
+		if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 48 {
 			break
 		}
 		_ = o0.Args[1]
 		o1 := o0.Args[0]
-		if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 40 {
+		if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 40 {
 			break
 		}
 		_ = o1.Args[1]
 		o2 := o1.Args[0]
-		if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 32 {
+		if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 32 {
 			break
 		}
 		_ = o2.Args[1]
@@ -18072,7 +18072,7 @@
 			break
 		}
 		x1_1 := x1.Args[1]
-		if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 4 || idx != x1_1.Args[0] || mem != x1.Args[2] {
+		if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 4 || idx != x1_1.Args[0] || mem != x1.Args[2] {
 			break
 		}
 		y2 := o1.Args[1]
@@ -18088,7 +18088,7 @@
 			break
 		}
 		x2_1 := x2.Args[1]
-		if x2_1.Op != OpARM64ADDconst || x2_1.AuxInt != 5 || idx != x2_1.Args[0] || mem != x2.Args[2] {
+		if x2_1.Op != OpARM64ADDconst || auxIntToInt64(x2_1.AuxInt) != 5 || idx != x2_1.Args[0] || mem != x2.Args[2] {
 			break
 		}
 		y3 := o0.Args[1]
@@ -18104,7 +18104,7 @@
 			break
 		}
 		x3_1 := x3.Args[1]
-		if x3_1.Op != OpARM64ADDconst || x3_1.AuxInt != 6 || idx != x3_1.Args[0] || mem != x3.Args[2] {
+		if x3_1.Op != OpARM64ADDconst || auxIntToInt64(x3_1.AuxInt) != 6 || idx != x3_1.Args[0] || mem != x3.Args[2] {
 			break
 		}
 		y4 := v_1
@@ -18120,7 +18120,7 @@
 			break
 		}
 		x4_1 := x4.Args[1]
-		if x4_1.Op != OpARM64ADDconst || x4_1.AuxInt != 7 || idx != x4_1.Args[0] || mem != x4.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)) {
+		if x4_1.Op != OpARM64ADDconst || auxIntToInt64(x4_1.AuxInt) != 7 || idx != x4_1.Args[0] || mem != x4.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)) {
 			break
 		}
 		b = mergePoint(b, x0, x1, x2, x3, x4)
@@ -18134,7 +18134,7 @@
 	// result: @mergePoint(b,x0,x1) (REV16W <t> (MOVHUload <t> [i0] {s} p mem))
 	for {
 		t := v.Type
-		if v.AuxInt != 8 {
+		if auxIntToInt64(v.AuxInt) != 8 {
 			break
 		}
 		y0 := v_0
@@ -18145,8 +18145,8 @@
 		if x0.Op != OpARM64MOVBUload {
 			break
 		}
-		i1 := x0.AuxInt
-		s := x0.Aux
+		i1 := auxIntToInt32(x0.AuxInt)
+		s := auxToSym(x0.Aux)
 		mem := x0.Args[1]
 		p := x0.Args[0]
 		y1 := v_1
@@ -18157,8 +18157,8 @@
 		if x1.Op != OpARM64MOVBUload {
 			break
 		}
-		i0 := x1.AuxInt
-		if x1.Aux != s {
+		i0 := auxIntToInt32(x1.AuxInt)
+		if auxToSym(x1.Aux) != s {
 			break
 		}
 		_ = x1.Args[1]
@@ -18169,8 +18169,8 @@
 		v0 := b.NewValue0(x1.Pos, OpARM64REV16W, t)
 		v.copyOf(v0)
 		v1 := b.NewValue0(x1.Pos, OpARM64MOVHUload, t)
-		v1.AuxInt = i0
-		v1.Aux = s
+		v1.AuxInt = int32ToAuxInt(i0)
+		v1.Aux = symToAux(s)
 		v1.AddArg2(p, mem)
 		v0.AddArg(v1)
 		return true
@@ -18180,7 +18180,7 @@
 	// result: @mergePoint(b,x0,x1) (REV16W <t> (MOVHUloadidx <t> ptr0 idx0 mem))
 	for {
 		t := v.Type
-		if v.AuxInt != 8 {
+		if auxIntToInt64(v.AuxInt) != 8 {
 			break
 		}
 		y0 := v_0
@@ -18188,10 +18188,10 @@
 			break
 		}
 		x0 := y0.Args[0]
-		if x0.Op != OpARM64MOVBUload || x0.AuxInt != 1 {
+		if x0.Op != OpARM64MOVBUload || auxIntToInt32(x0.AuxInt) != 1 {
 			break
 		}
-		s := x0.Aux
+		s := auxToSym(x0.Aux)
 		mem := x0.Args[1]
 		p1 := x0.Args[0]
 		if p1.Op != OpARM64ADD {
@@ -18232,7 +18232,7 @@
 	// result: @mergePoint(b,x0,x1) (REV16W <t> (MOVHUloadidx <t> ptr idx mem))
 	for {
 		t := v.Type
-		if v.AuxInt != 8 {
+		if auxIntToInt64(v.AuxInt) != 8 {
 			break
 		}
 		y0 := v_0
@@ -18246,7 +18246,7 @@
 		mem := x0.Args[2]
 		ptr := x0.Args[0]
 		x0_1 := x0.Args[1]
-		if x0_1.Op != OpARM64ADDconst || x0_1.AuxInt != 1 {
+		if x0_1.Op != OpARM64ADDconst || auxIntToInt64(x0_1.AuxInt) != 1 {
 			break
 		}
 		idx := x0_1.Args[0]
@@ -18272,14 +18272,14 @@
 	}
 	// match: (ORshiftLL <t> [24] o0:(ORshiftLL [16] y0:(REV16W x0:(MOVHUload [i2] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i0] {s} p mem)))
 	// cond: i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0, x1, x2, y0, y1, y2, o0)
-	// result: @mergePoint(b,x0,x1,x2) (REVW <t> (MOVWUload <t> {s} (OffPtr <p.Type> [i0] p) mem))
+	// result: @mergePoint(b,x0,x1,x2) (REVW <t> (MOVWUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem))
 	for {
 		t := v.Type
-		if v.AuxInt != 24 {
+		if auxIntToInt64(v.AuxInt) != 24 {
 			break
 		}
 		o0 := v_0
-		if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 16 {
+		if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 16 {
 			break
 		}
 		_ = o0.Args[1]
@@ -18291,8 +18291,8 @@
 		if x0.Op != OpARM64MOVHUload {
 			break
 		}
-		i2 := x0.AuxInt
-		s := x0.Aux
+		i2 := auxIntToInt32(x0.AuxInt)
+		s := auxToSym(x0.Aux)
 		mem := x0.Args[1]
 		p := x0.Args[0]
 		y1 := o0.Args[1]
@@ -18303,8 +18303,8 @@
 		if x1.Op != OpARM64MOVBUload {
 			break
 		}
-		i1 := x1.AuxInt
-		if x1.Aux != s {
+		i1 := auxIntToInt32(x1.AuxInt)
+		if auxToSym(x1.Aux) != s {
 			break
 		}
 		_ = x1.Args[1]
@@ -18319,8 +18319,8 @@
 		if x2.Op != OpARM64MOVBUload {
 			break
 		}
-		i0 := x2.AuxInt
-		if x2.Aux != s {
+		i0 := auxIntToInt32(x2.AuxInt)
+		if auxToSym(x2.Aux) != s {
 			break
 		}
 		_ = x2.Args[1]
@@ -18331,9 +18331,9 @@
 		v0 := b.NewValue0(x2.Pos, OpARM64REVW, t)
 		v.copyOf(v0)
 		v1 := b.NewValue0(x2.Pos, OpARM64MOVWUload, t)
-		v1.Aux = s
+		v1.Aux = symToAux(s)
 		v2 := b.NewValue0(x2.Pos, OpOffPtr, p.Type)
-		v2.AuxInt = i0
+		v2.AuxInt = int64ToAuxInt(int64(i0))
 		v2.AddArg(p)
 		v1.AddArg2(v2, mem)
 		v0.AddArg(v1)
@@ -18344,11 +18344,11 @@
 	// result: @mergePoint(b,x0,x1,x2) (REVW <t> (MOVWUloadidx <t> ptr0 idx0 mem))
 	for {
 		t := v.Type
-		if v.AuxInt != 24 {
+		if auxIntToInt64(v.AuxInt) != 24 {
 			break
 		}
 		o0 := v_0
-		if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 16 {
+		if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 16 {
 			break
 		}
 		_ = o0.Args[1]
@@ -18357,10 +18357,10 @@
 			break
 		}
 		x0 := y0.Args[0]
-		if x0.Op != OpARM64MOVHUload || x0.AuxInt != 2 {
+		if x0.Op != OpARM64MOVHUload || auxIntToInt32(x0.AuxInt) != 2 {
 			break
 		}
-		s := x0.Aux
+		s := auxToSym(x0.Aux)
 		mem := x0.Args[1]
 		p := x0.Args[0]
 		y1 := o0.Args[1]
@@ -18368,7 +18368,7 @@
 			break
 		}
 		x1 := y1.Args[0]
-		if x1.Op != OpARM64MOVBUload || x1.AuxInt != 1 || x1.Aux != s {
+		if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 1 || auxToSym(x1.Aux) != s {
 			break
 		}
 		_ = x1.Args[1]
@@ -18414,11 +18414,11 @@
 	// result: @mergePoint(b,x0,x1,x2) (REVW <t> (MOVWUloadidx <t> ptr idx mem))
 	for {
 		t := v.Type
-		if v.AuxInt != 24 {
+		if auxIntToInt64(v.AuxInt) != 24 {
 			break
 		}
 		o0 := v_0
-		if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 16 {
+		if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 16 {
 			break
 		}
 		_ = o0.Args[1]
@@ -18433,7 +18433,7 @@
 		mem := x0.Args[2]
 		ptr := x0.Args[0]
 		x0_1 := x0.Args[1]
-		if x0_1.Op != OpARM64ADDconst || x0_1.AuxInt != 2 {
+		if x0_1.Op != OpARM64ADDconst || auxIntToInt64(x0_1.AuxInt) != 2 {
 			break
 		}
 		idx := x0_1.Args[0]
@@ -18450,7 +18450,7 @@
 			break
 		}
 		x1_1 := x1.Args[1]
-		if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 1 || idx != x1_1.Args[0] || mem != x1.Args[2] {
+		if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 1 || idx != x1_1.Args[0] || mem != x1.Args[2] {
 			break
 		}
 		y2 := v_1
@@ -18475,24 +18475,24 @@
 	}
 	// match: (ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] y0:(REVW x0:(MOVWUload [i4] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [i3] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i1] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i0] {s} p mem)))
 	// cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4) != nil && clobber(x0, x1, x2, x3, x4, y0, y1, y2, y3, y4, o0, o1, o2)
-	// result: @mergePoint(b,x0,x1,x2,x3,x4) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [i0] p) mem))
+	// result: @mergePoint(b,x0,x1,x2,x3,x4) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem))
 	for {
 		t := v.Type
-		if v.AuxInt != 56 {
+		if auxIntToInt64(v.AuxInt) != 56 {
 			break
 		}
 		o0 := v_0
-		if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 48 {
+		if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 48 {
 			break
 		}
 		_ = o0.Args[1]
 		o1 := o0.Args[0]
-		if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 40 {
+		if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 40 {
 			break
 		}
 		_ = o1.Args[1]
 		o2 := o1.Args[0]
-		if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 32 {
+		if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 32 {
 			break
 		}
 		_ = o2.Args[1]
@@ -18504,8 +18504,8 @@
 		if x0.Op != OpARM64MOVWUload {
 			break
 		}
-		i4 := x0.AuxInt
-		s := x0.Aux
+		i4 := auxIntToInt32(x0.AuxInt)
+		s := auxToSym(x0.Aux)
 		mem := x0.Args[1]
 		p := x0.Args[0]
 		y1 := o2.Args[1]
@@ -18516,8 +18516,8 @@
 		if x1.Op != OpARM64MOVBUload {
 			break
 		}
-		i3 := x1.AuxInt
-		if x1.Aux != s {
+		i3 := auxIntToInt32(x1.AuxInt)
+		if auxToSym(x1.Aux) != s {
 			break
 		}
 		_ = x1.Args[1]
@@ -18532,8 +18532,8 @@
 		if x2.Op != OpARM64MOVBUload {
 			break
 		}
-		i2 := x2.AuxInt
-		if x2.Aux != s {
+		i2 := auxIntToInt32(x2.AuxInt)
+		if auxToSym(x2.Aux) != s {
 			break
 		}
 		_ = x2.Args[1]
@@ -18548,8 +18548,8 @@
 		if x3.Op != OpARM64MOVBUload {
 			break
 		}
-		i1 := x3.AuxInt
-		if x3.Aux != s {
+		i1 := auxIntToInt32(x3.AuxInt)
+		if auxToSym(x3.Aux) != s {
 			break
 		}
 		_ = x3.Args[1]
@@ -18564,8 +18564,8 @@
 		if x4.Op != OpARM64MOVBUload {
 			break
 		}
-		i0 := x4.AuxInt
-		if x4.Aux != s {
+		i0 := auxIntToInt32(x4.AuxInt)
+		if auxToSym(x4.Aux) != s {
 			break
 		}
 		_ = x4.Args[1]
@@ -18576,9 +18576,9 @@
 		v0 := b.NewValue0(x4.Pos, OpARM64REV, t)
 		v.copyOf(v0)
 		v1 := b.NewValue0(x4.Pos, OpARM64MOVDload, t)
-		v1.Aux = s
+		v1.Aux = symToAux(s)
 		v2 := b.NewValue0(x4.Pos, OpOffPtr, p.Type)
-		v2.AuxInt = i0
+		v2.AuxInt = int64ToAuxInt(int64(i0))
 		v2.AddArg(p)
 		v1.AddArg2(v2, mem)
 		v0.AddArg(v1)
@@ -18589,21 +18589,21 @@
 	// result: @mergePoint(b,x0,x1,x2,x3,x4) (REV <t> (MOVDloadidx <t> ptr0 idx0 mem))
 	for {
 		t := v.Type
-		if v.AuxInt != 56 {
+		if auxIntToInt64(v.AuxInt) != 56 {
 			break
 		}
 		o0 := v_0
-		if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 48 {
+		if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 48 {
 			break
 		}
 		_ = o0.Args[1]
 		o1 := o0.Args[0]
-		if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 40 {
+		if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 40 {
 			break
 		}
 		_ = o1.Args[1]
 		o2 := o1.Args[0]
-		if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 32 {
+		if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 32 {
 			break
 		}
 		_ = o2.Args[1]
@@ -18612,10 +18612,10 @@
 			break
 		}
 		x0 := y0.Args[0]
-		if x0.Op != OpARM64MOVWUload || x0.AuxInt != 4 {
+		if x0.Op != OpARM64MOVWUload || auxIntToInt32(x0.AuxInt) != 4 {
 			break
 		}
-		s := x0.Aux
+		s := auxToSym(x0.Aux)
 		mem := x0.Args[1]
 		p := x0.Args[0]
 		y1 := o2.Args[1]
@@ -18623,7 +18623,7 @@
 			break
 		}
 		x1 := y1.Args[0]
-		if x1.Op != OpARM64MOVBUload || x1.AuxInt != 3 || x1.Aux != s {
+		if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 3 || auxToSym(x1.Aux) != s {
 			break
 		}
 		_ = x1.Args[1]
@@ -18635,7 +18635,7 @@
 			break
 		}
 		x2 := y2.Args[0]
-		if x2.Op != OpARM64MOVBUload || x2.AuxInt != 2 || x2.Aux != s {
+		if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 2 || auxToSym(x2.Aux) != s {
 			break
 		}
 		_ = x2.Args[1]
@@ -18647,7 +18647,7 @@
 			break
 		}
 		x3 := y3.Args[0]
-		if x3.Op != OpARM64MOVBUload || x3.AuxInt != 1 || x3.Aux != s {
+		if x3.Op != OpARM64MOVBUload || auxIntToInt32(x3.AuxInt) != 1 || auxToSym(x3.Aux) != s {
 			break
 		}
 		_ = x3.Args[1]
@@ -18693,21 +18693,21 @@
 	// result: @mergePoint(b,x0,x1,x2,x3,x4) (REV <t> (MOVDloadidx <t> ptr idx mem))
 	for {
 		t := v.Type
-		if v.AuxInt != 56 {
+		if auxIntToInt64(v.AuxInt) != 56 {
 			break
 		}
 		o0 := v_0
-		if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 48 {
+		if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 48 {
 			break
 		}
 		_ = o0.Args[1]
 		o1 := o0.Args[0]
-		if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 40 {
+		if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 40 {
 			break
 		}
 		_ = o1.Args[1]
 		o2 := o1.Args[0]
-		if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 32 {
+		if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 32 {
 			break
 		}
 		_ = o2.Args[1]
@@ -18722,7 +18722,7 @@
 		mem := x0.Args[2]
 		ptr := x0.Args[0]
 		x0_1 := x0.Args[1]
-		if x0_1.Op != OpARM64ADDconst || x0_1.AuxInt != 4 {
+		if x0_1.Op != OpARM64ADDconst || auxIntToInt64(x0_1.AuxInt) != 4 {
 			break
 		}
 		idx := x0_1.Args[0]
@@ -18739,7 +18739,7 @@
 			break
 		}
 		x1_1 := x1.Args[1]
-		if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 3 || idx != x1_1.Args[0] || mem != x1.Args[2] {
+		if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 3 || idx != x1_1.Args[0] || mem != x1.Args[2] {
 			break
 		}
 		y2 := o1.Args[1]
@@ -18755,7 +18755,7 @@
 			break
 		}
 		x2_1 := x2.Args[1]
-		if x2_1.Op != OpARM64ADDconst || x2_1.AuxInt != 2 || idx != x2_1.Args[0] || mem != x2.Args[2] {
+		if x2_1.Op != OpARM64ADDconst || auxIntToInt64(x2_1.AuxInt) != 2 || idx != x2_1.Args[0] || mem != x2.Args[2] {
 			break
 		}
 		y3 := o0.Args[1]
@@ -18771,7 +18771,7 @@
 			break
 		}
 		x3_1 := x3.Args[1]
-		if x3_1.Op != OpARM64ADDconst || x3_1.AuxInt != 1 || idx != x3_1.Args[0] || mem != x3.Args[2] {
+		if x3_1.Op != OpARM64ADDconst || auxIntToInt64(x3_1.AuxInt) != 1 || idx != x3_1.Args[0] || mem != x3.Args[2] {
 			break
 		}
 		y4 := v_1
@@ -18803,16 +18803,16 @@
 	// match: (ORshiftRA (MOVDconst [c]) x [d])
 	// result: (ORconst [c] (SRAconst <x.Type> x [d]))
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_0.AuxInt
+		c := auxIntToInt64(v_0.AuxInt)
 		x := v_1
 		v.reset(OpARM64ORconst)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
-		v0.AuxInt = d
+		v0.AuxInt = int64ToAuxInt(d)
 		v0.AddArg(x)
 		v.AddArg(v0)
 		return true
@@ -18820,14 +18820,14 @@
 	// match: (ORshiftRA x (MOVDconst [c]) [d])
 	// result: (ORconst x [c>>uint64(d)])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		v.reset(OpARM64ORconst)
-		v.AuxInt = c >> uint64(d)
+		v.AuxInt = int64ToAuxInt(c >> uint64(d))
 		v.AddArg(x)
 		return true
 	}
@@ -18835,13 +18835,13 @@
 	// cond: c==d
 	// result: y
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		y := v_1
 		if y.Op != OpARM64SRAconst {
 			break
 		}
-		c := y.AuxInt
+		c := auxIntToInt64(y.AuxInt)
 		if x != y.Args[0] || !(c == d) {
 			break
 		}
@@ -18857,16 +18857,16 @@
 	// match: (ORshiftRL (MOVDconst [c]) x [d])
 	// result: (ORconst [c] (SRLconst <x.Type> x [d]))
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_0.AuxInt
+		c := auxIntToInt64(v_0.AuxInt)
 		x := v_1
 		v.reset(OpARM64ORconst)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
-		v0.AuxInt = d
+		v0.AuxInt = int64ToAuxInt(d)
 		v0.AddArg(x)
 		v.AddArg(v0)
 		return true
@@ -18874,14 +18874,14 @@
 	// match: (ORshiftRL x (MOVDconst [c]) [d])
 	// result: (ORconst x [int64(uint64(c)>>uint64(d))])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		v.reset(OpARM64ORconst)
-		v.AuxInt = int64(uint64(c) >> uint64(d))
+		v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
 		v.AddArg(x)
 		return true
 	}
@@ -18889,13 +18889,13 @@
 	// cond: c==d
 	// result: y
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		y := v_1
 		if y.Op != OpARM64SRLconst {
 			break
 		}
-		c := y.AuxInt
+		c := auxIntToInt64(y.AuxInt)
 		if x != y.Args[0] || !(c == d) {
 			break
 		}
@@ -18905,8 +18905,8 @@
 	// match: ( ORshiftRL [c] (SLLconst x [64-c]) x)
 	// result: (RORconst [ c] x)
 	for {
-		c := v.AuxInt
-		if v_0.Op != OpARM64SLLconst || v_0.AuxInt != 64-c {
+		c := auxIntToInt64(v.AuxInt)
+		if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
 			break
 		}
 		x := v_0.Args[0]
@@ -18914,7 +18914,7 @@
 			break
 		}
 		v.reset(OpARM64RORconst)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v.AddArg(x)
 		return true
 	}
@@ -18923,8 +18923,8 @@
 	// result: (RORWconst [c] x)
 	for {
 		t := v.Type
-		c := v.AuxInt
-		if v_0.Op != OpARM64SLLconst || v_0.AuxInt != 32-c {
+		c := auxIntToInt64(v.AuxInt)
+		if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 32-c {
 			break
 		}
 		x := v_0.Args[0]
@@ -18932,7 +18932,7 @@
 			break
 		}
 		v.reset(OpARM64RORWconst)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v.AddArg(x)
 		return true
 	}
@@ -18940,22 +18940,22 @@
 	// cond: lc > rc && ac == ^((1<<uint(64-lc)-1) << uint64(lc-rc))
 	// result: (BFI [armBFAuxInt(lc-rc, 64-lc)] x y)
 	for {
-		rc := v.AuxInt
+		rc := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64ANDconst {
 			break
 		}
-		ac := v_0.AuxInt
+		ac := auxIntToInt64(v_0.AuxInt)
 		x := v_0.Args[0]
 		if v_1.Op != OpARM64SLLconst {
 			break
 		}
-		lc := v_1.AuxInt
+		lc := auxIntToInt64(v_1.AuxInt)
 		y := v_1.Args[0]
 		if !(lc > rc && ac == ^((1<<uint(64-lc)-1)<<uint64(lc-rc))) {
 			break
 		}
 		v.reset(OpARM64BFI)
-		v.AuxInt = armBFAuxInt(lc-rc, 64-lc)
+		v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc-rc, 64-lc))
 		v.AddArg2(x, y)
 		return true
 	}
@@ -18963,22 +18963,22 @@
 	// cond: lc < rc && ac == ^((1<<uint(64-rc)-1))
 	// result: (BFXIL [armBFAuxInt(rc-lc, 64-rc)] y x)
 	for {
-		rc := v.AuxInt
+		rc := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64ANDconst {
 			break
 		}
-		ac := v_0.AuxInt
+		ac := auxIntToInt64(v_0.AuxInt)
 		y := v_0.Args[0]
 		if v_1.Op != OpARM64SLLconst {
 			break
 		}
-		lc := v_1.AuxInt
+		lc := auxIntToInt64(v_1.AuxInt)
 		x := v_1.Args[0]
 		if !(lc < rc && ac == ^(1<<uint(64-rc)-1)) {
 			break
 		}
 		v.reset(OpARM64BFXIL)
-		v.AuxInt = armBFAuxInt(rc-lc, 64-rc)
+		v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc-lc, 64-rc))
 		v.AddArg2(y, x)
 		return true
 	}
@@ -18989,14 +18989,14 @@
 	// match: (RORWconst [c] (RORWconst [d] x))
 	// result: (RORWconst [(c+d)&31] x)
 	for {
-		c := v.AuxInt
+		c := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64RORWconst {
 			break
 		}
-		d := v_0.AuxInt
+		d := auxIntToInt64(v_0.AuxInt)
 		x := v_0.Args[0]
 		v.reset(OpARM64RORWconst)
-		v.AuxInt = (c + d) & 31
+		v.AuxInt = int64ToAuxInt((c + d) & 31)
 		v.AddArg(x)
 		return true
 	}
@@ -19007,14 +19007,14 @@
 	// match: (RORconst [c] (RORconst [d] x))
 	// result: (RORconst [(c+d)&63] x)
 	for {
-		c := v.AuxInt
+		c := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64RORconst {
 			break
 		}
-		d := v_0.AuxInt
+		d := auxIntToInt64(v_0.AuxInt)
 		x := v_0.Args[0]
 		v.reset(OpARM64RORconst)
-		v.AuxInt = (c + d) & 63
+		v.AuxInt = int64ToAuxInt((c + d) & 63)
 		v.AddArg(x)
 		return true
 	}
@@ -19109,8 +19109,8 @@
 	// cond: 0 < c && c < 64
 	// result: (ANDconst [^(1<<uint(c)-1)] x)
 	for {
-		c := v.AuxInt
-		if v_0.Op != OpARM64SRLconst || v_0.AuxInt != c {
+		c := auxIntToInt64(v.AuxInt)
+		if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != c {
 			break
 		}
 		x := v_0.Args[0]
@@ -19118,7 +19118,7 @@
 			break
 		}
 		v.reset(OpARM64ANDconst)
-		v.AuxInt = ^(1<<uint(c) - 1)
+		v.AuxInt = int64ToAuxInt(^(1<<uint(c) - 1))
 		v.AddArg(x)
 		return true
 	}
@@ -19126,17 +19126,17 @@
 	// cond: isARM64BFMask(sc, ac, 0)
 	// result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
 	for {
-		sc := v.AuxInt
+		sc := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64ANDconst {
 			break
 		}
-		ac := v_0.AuxInt
+		ac := auxIntToInt64(v_0.AuxInt)
 		x := v_0.Args[0]
 		if !(isARM64BFMask(sc, ac, 0)) {
 			break
 		}
 		v.reset(OpARM64UBFIZ)
-		v.AuxInt = armBFAuxInt(sc, arm64BFWidth(ac, 0))
+		v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(ac, 0)))
 		v.AddArg(x)
 		return true
 	}
@@ -19144,7 +19144,7 @@
 	// cond: isARM64BFMask(sc, 1<<32-1, 0)
 	// result: (UBFIZ [armBFAuxInt(sc, 32)] x)
 	for {
-		sc := v.AuxInt
+		sc := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64MOVWUreg {
 			break
 		}
@@ -19153,7 +19153,7 @@
 			break
 		}
 		v.reset(OpARM64UBFIZ)
-		v.AuxInt = armBFAuxInt(sc, 32)
+		v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, 32))
 		v.AddArg(x)
 		return true
 	}
@@ -19161,7 +19161,7 @@
 	// cond: isARM64BFMask(sc, 1<<16-1, 0)
 	// result: (UBFIZ [armBFAuxInt(sc, 16)] x)
 	for {
-		sc := v.AuxInt
+		sc := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64MOVHUreg {
 			break
 		}
@@ -19170,7 +19170,7 @@
 			break
 		}
 		v.reset(OpARM64UBFIZ)
-		v.AuxInt = armBFAuxInt(sc, 16)
+		v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, 16))
 		v.AddArg(x)
 		return true
 	}
@@ -19178,7 +19178,7 @@
 	// cond: isARM64BFMask(sc, 1<<8-1, 0)
 	// result: (UBFIZ [armBFAuxInt(sc, 8)] x)
 	for {
-		sc := v.AuxInt
+		sc := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64MOVBUreg {
 			break
 		}
@@ -19187,25 +19187,25 @@
 			break
 		}
 		v.reset(OpARM64UBFIZ)
-		v.AuxInt = armBFAuxInt(sc, 8)
+		v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, 8))
 		v.AddArg(x)
 		return true
 	}
 	// match: (SLLconst [sc] (UBFIZ [bfc] x))
-	// cond: sc+getARM64BFwidth(bfc)+getARM64BFlsb(bfc) < 64
-	// result: (UBFIZ [armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc))] x)
+	// cond: sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64
+	// result: (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth())] x)
 	for {
-		sc := v.AuxInt
+		sc := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64UBFIZ {
 			break
 		}
-		bfc := v_0.AuxInt
+		bfc := auxIntToArm64BitField(v_0.AuxInt)
 		x := v_0.Args[0]
-		if !(sc+getARM64BFwidth(bfc)+getARM64BFlsb(bfc) < 64) {
+		if !(sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64) {
 			break
 		}
 		v.reset(OpARM64UBFIZ)
-		v.AuxInt = armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc))
+		v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()))
 		v.AddArg(x)
 		return true
 	}
@@ -19247,17 +19247,17 @@
 	// cond: lc > rc
 	// result: (SBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
 	for {
-		rc := v.AuxInt
+		rc := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64SLLconst {
 			break
 		}
-		lc := v_0.AuxInt
+		lc := auxIntToInt64(v_0.AuxInt)
 		x := v_0.Args[0]
 		if !(lc > rc) {
 			break
 		}
 		v.reset(OpARM64SBFIZ)
-		v.AuxInt = armBFAuxInt(lc-rc, 64-lc)
+		v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc-rc, 64-lc))
 		v.AddArg(x)
 		return true
 	}
@@ -19265,17 +19265,17 @@
 	// cond: lc <= rc
 	// result: (SBFX [armBFAuxInt(rc-lc, 64-rc)] x)
 	for {
-		rc := v.AuxInt
+		rc := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64SLLconst {
 			break
 		}
-		lc := v_0.AuxInt
+		lc := auxIntToInt64(v_0.AuxInt)
 		x := v_0.Args[0]
 		if !(lc <= rc) {
 			break
 		}
 		v.reset(OpARM64SBFX)
-		v.AuxInt = armBFAuxInt(rc-lc, 64-rc)
+		v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc-lc, 64-rc))
 		v.AddArg(x)
 		return true
 	}
@@ -19283,7 +19283,7 @@
 	// cond: rc < 32
 	// result: (SBFX [armBFAuxInt(rc, 32-rc)] x)
 	for {
-		rc := v.AuxInt
+		rc := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64MOVWreg {
 			break
 		}
@@ -19292,7 +19292,7 @@
 			break
 		}
 		v.reset(OpARM64SBFX)
-		v.AuxInt = armBFAuxInt(rc, 32-rc)
+		v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 32-rc))
 		v.AddArg(x)
 		return true
 	}
@@ -19300,7 +19300,7 @@
 	// cond: rc < 16
 	// result: (SBFX [armBFAuxInt(rc, 16-rc)] x)
 	for {
-		rc := v.AuxInt
+		rc := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64MOVHreg {
 			break
 		}
@@ -19309,7 +19309,7 @@
 			break
 		}
 		v.reset(OpARM64SBFX)
-		v.AuxInt = armBFAuxInt(rc, 16-rc)
+		v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 16-rc))
 		v.AddArg(x)
 		return true
 	}
@@ -19317,7 +19317,7 @@
 	// cond: rc < 8
 	// result: (SBFX [armBFAuxInt(rc, 8-rc)] x)
 	for {
-		rc := v.AuxInt
+		rc := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64MOVBreg {
 			break
 		}
@@ -19326,43 +19326,43 @@
 			break
 		}
 		v.reset(OpARM64SBFX)
-		v.AuxInt = armBFAuxInt(rc, 8-rc)
+		v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 8-rc))
 		v.AddArg(x)
 		return true
 	}
 	// match: (SRAconst [sc] (SBFIZ [bfc] x))
-	// cond: sc < getARM64BFlsb(bfc)
-	// result: (SBFIZ [armBFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))] x)
+	// cond: sc < bfc.getARM64BFlsb()
+	// result: (SBFIZ [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
 	for {
-		sc := v.AuxInt
+		sc := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64SBFIZ {
 			break
 		}
-		bfc := v_0.AuxInt
+		bfc := auxIntToArm64BitField(v_0.AuxInt)
 		x := v_0.Args[0]
-		if !(sc < getARM64BFlsb(bfc)) {
+		if !(sc < bfc.getARM64BFlsb()) {
 			break
 		}
 		v.reset(OpARM64SBFIZ)
-		v.AuxInt = armBFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))
+		v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth()))
 		v.AddArg(x)
 		return true
 	}
 	// match: (SRAconst [sc] (SBFIZ [bfc] x))
-	// cond: sc >= getARM64BFlsb(bfc) && sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc)
-	// result: (SBFX [armBFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x)
+	// cond: sc >= bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
+	// result: (SBFX [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
 	for {
-		sc := v.AuxInt
+		sc := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64SBFIZ {
 			break
 		}
-		bfc := v_0.AuxInt
+		bfc := auxIntToArm64BitField(v_0.AuxInt)
 		x := v_0.Args[0]
-		if !(sc >= getARM64BFlsb(bfc) && sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc)) {
+		if !(sc >= bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()) {
 			break
 		}
 		v.reset(OpARM64SBFX)
-		v.AuxInt = armBFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)
+		v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc))
 		v.AddArg(x)
 		return true
 	}
@@ -19404,8 +19404,8 @@
 	// cond: 0 < c && c < 64
 	// result: (ANDconst [1<<uint(64-c)-1] x)
 	for {
-		c := v.AuxInt
-		if v_0.Op != OpARM64SLLconst || v_0.AuxInt != c {
+		c := auxIntToInt64(v.AuxInt)
+		if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != c {
 			break
 		}
 		x := v_0.Args[0]
@@ -19413,7 +19413,7 @@
 			break
 		}
 		v.reset(OpARM64ANDconst)
-		v.AuxInt = 1<<uint(64-c) - 1
+		v.AuxInt = int64ToAuxInt(1<<uint(64-c) - 1)
 		v.AddArg(x)
 		return true
 	}
@@ -19421,17 +19421,17 @@
 	// cond: lc > rc
 	// result: (UBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
 	for {
-		rc := v.AuxInt
+		rc := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64SLLconst {
 			break
 		}
-		lc := v_0.AuxInt
+		lc := auxIntToInt64(v_0.AuxInt)
 		x := v_0.Args[0]
 		if !(lc > rc) {
 			break
 		}
 		v.reset(OpARM64UBFIZ)
-		v.AuxInt = armBFAuxInt(lc-rc, 64-lc)
+		v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc-rc, 64-lc))
 		v.AddArg(x)
 		return true
 	}
@@ -19439,17 +19439,17 @@
 	// cond: isARM64BFMask(sc, ac, sc)
 	// result: (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
 	for {
-		sc := v.AuxInt
+		sc := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64ANDconst {
 			break
 		}
-		ac := v_0.AuxInt
+		ac := auxIntToInt64(v_0.AuxInt)
 		x := v_0.Args[0]
 		if !(isARM64BFMask(sc, ac, sc)) {
 			break
 		}
 		v.reset(OpARM64UBFX)
-		v.AuxInt = armBFAuxInt(sc, arm64BFWidth(ac, sc))
+		v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(ac, sc)))
 		v.AddArg(x)
 		return true
 	}
@@ -19457,7 +19457,7 @@
 	// cond: isARM64BFMask(sc, 1<<32-1, sc)
 	// result: (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x)
 	for {
-		sc := v.AuxInt
+		sc := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64MOVWUreg {
 			break
 		}
@@ -19466,7 +19466,7 @@
 			break
 		}
 		v.reset(OpARM64UBFX)
-		v.AuxInt = armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))
+		v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc)))
 		v.AddArg(x)
 		return true
 	}
@@ -19474,7 +19474,7 @@
 	// cond: isARM64BFMask(sc, 1<<16-1, sc)
 	// result: (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x)
 	for {
-		sc := v.AuxInt
+		sc := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64MOVHUreg {
 			break
 		}
@@ -19483,7 +19483,7 @@
 			break
 		}
 		v.reset(OpARM64UBFX)
-		v.AuxInt = armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))
+		v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc)))
 		v.AddArg(x)
 		return true
 	}
@@ -19491,7 +19491,7 @@
 	// cond: isARM64BFMask(sc, 1<<8-1, sc)
 	// result: (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x)
 	for {
-		sc := v.AuxInt
+		sc := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64MOVBUreg {
 			break
 		}
@@ -19500,7 +19500,7 @@
 			break
 		}
 		v.reset(OpARM64UBFX)
-		v.AuxInt = armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))
+		v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc)))
 		v.AddArg(x)
 		return true
 	}
@@ -19508,89 +19508,89 @@
 	// cond: lc < rc
 	// result: (UBFX [armBFAuxInt(rc-lc, 64-rc)] x)
 	for {
-		rc := v.AuxInt
+		rc := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64SLLconst {
 			break
 		}
-		lc := v_0.AuxInt
+		lc := auxIntToInt64(v_0.AuxInt)
 		x := v_0.Args[0]
 		if !(lc < rc) {
 			break
 		}
 		v.reset(OpARM64UBFX)
-		v.AuxInt = armBFAuxInt(rc-lc, 64-rc)
+		v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc-lc, 64-rc))
 		v.AddArg(x)
 		return true
 	}
 	// match: (SRLconst [sc] (UBFX [bfc] x))
-	// cond: sc < getARM64BFwidth(bfc)
-	// result: (UBFX [armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc)] x)
+	// cond: sc < bfc.getARM64BFwidth()
+	// result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc)] x)
 	for {
-		sc := v.AuxInt
+		sc := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64UBFX {
 			break
 		}
-		bfc := v_0.AuxInt
+		bfc := auxIntToArm64BitField(v_0.AuxInt)
 		x := v_0.Args[0]
-		if !(sc < getARM64BFwidth(bfc)) {
+		if !(sc < bfc.getARM64BFwidth()) {
 			break
 		}
 		v.reset(OpARM64UBFX)
-		v.AuxInt = armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc)
+		v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc))
 		v.AddArg(x)
 		return true
 	}
 	// match: (SRLconst [sc] (UBFIZ [bfc] x))
-	// cond: sc == getARM64BFlsb(bfc)
-	// result: (ANDconst [1<<uint(getARM64BFwidth(bfc))-1] x)
+	// cond: sc == bfc.getARM64BFlsb()
+	// result: (ANDconst [1<<uint(bfc.getARM64BFwidth())-1] x)
 	for {
-		sc := v.AuxInt
+		sc := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64UBFIZ {
 			break
 		}
-		bfc := v_0.AuxInt
+		bfc := auxIntToArm64BitField(v_0.AuxInt)
 		x := v_0.Args[0]
-		if !(sc == getARM64BFlsb(bfc)) {
+		if !(sc == bfc.getARM64BFlsb()) {
 			break
 		}
 		v.reset(OpARM64ANDconst)
-		v.AuxInt = 1<<uint(getARM64BFwidth(bfc)) - 1
+		v.AuxInt = int64ToAuxInt(1<<uint(bfc.getARM64BFwidth()) - 1)
 		v.AddArg(x)
 		return true
 	}
 	// match: (SRLconst [sc] (UBFIZ [bfc] x))
-	// cond: sc < getARM64BFlsb(bfc)
-	// result: (UBFIZ [armBFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))] x)
+	// cond: sc < bfc.getARM64BFlsb()
+	// result: (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
 	for {
-		sc := v.AuxInt
+		sc := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64UBFIZ {
 			break
 		}
-		bfc := v_0.AuxInt
+		bfc := auxIntToArm64BitField(v_0.AuxInt)
 		x := v_0.Args[0]
-		if !(sc < getARM64BFlsb(bfc)) {
+		if !(sc < bfc.getARM64BFlsb()) {
 			break
 		}
 		v.reset(OpARM64UBFIZ)
-		v.AuxInt = armBFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))
+		v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth()))
 		v.AddArg(x)
 		return true
 	}
 	// match: (SRLconst [sc] (UBFIZ [bfc] x))
-	// cond: sc > getARM64BFlsb(bfc) && sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc)
-	// result: (UBFX [armBFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x)
+	// cond: sc > bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
+	// result: (UBFX [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
 	for {
-		sc := v.AuxInt
+		sc := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64UBFIZ {
 			break
 		}
-		bfc := v_0.AuxInt
+		bfc := auxIntToArm64BitField(v_0.AuxInt)
 		x := v_0.Args[0]
-		if !(sc > getARM64BFlsb(bfc) && sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc)) {
+		if !(sc > bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()) {
 			break
 		}
 		v.reset(OpARM64UBFX)
-		v.AuxInt = armBFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)
+		v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc))
 		v.AddArg(x)
 		return true
 	}
@@ -19807,13 +19807,13 @@
 		if x1.Op != OpARM64SLLconst {
 			break
 		}
-		c := x1.AuxInt
+		c := auxIntToInt64(x1.AuxInt)
 		y := x1.Args[0]
 		if !(clobberIfDead(x1)) {
 			break
 		}
 		v.reset(OpARM64SUBshiftLL)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v.AddArg2(x0, y)
 		return true
 	}
@@ -19826,13 +19826,13 @@
 		if x1.Op != OpARM64SRLconst {
 			break
 		}
-		c := x1.AuxInt
+		c := auxIntToInt64(x1.AuxInt)
 		y := x1.Args[0]
 		if !(clobberIfDead(x1)) {
 			break
 		}
 		v.reset(OpARM64SUBshiftRL)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v.AddArg2(x0, y)
 		return true
 	}
@@ -19845,13 +19845,13 @@
 		if x1.Op != OpARM64SRAconst {
 			break
 		}
-		c := x1.AuxInt
+		c := auxIntToInt64(x1.AuxInt)
 		y := x1.Args[0]
 		if !(clobberIfDead(x1)) {
 			break
 		}
 		v.reset(OpARM64SUBshiftRA)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v.AddArg2(x0, y)
 		return true
 	}
@@ -19917,14 +19917,14 @@
 	// match: (SUBshiftLL x (MOVDconst [c]) [d])
 	// result: (SUBconst x [int64(uint64(c)<<uint64(d))])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		v.reset(OpARM64SUBconst)
-		v.AuxInt = int64(uint64(c) << uint64(d))
+		v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
 		v.AddArg(x)
 		return true
 	}
@@ -19932,17 +19932,17 @@
 	// cond: c==d
 	// result: (MOVDconst [0])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64SLLconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		if x != v_1.Args[0] || !(c == d) {
 			break
 		}
 		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
+		v.AuxInt = int64ToAuxInt(0)
 		return true
 	}
 	return false
@@ -19953,14 +19953,14 @@
 	// match: (SUBshiftRA x (MOVDconst [c]) [d])
 	// result: (SUBconst x [c>>uint64(d)])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		v.reset(OpARM64SUBconst)
-		v.AuxInt = c >> uint64(d)
+		v.AuxInt = int64ToAuxInt(c >> uint64(d))
 		v.AddArg(x)
 		return true
 	}
@@ -19968,17 +19968,17 @@
 	// cond: c==d
 	// result: (MOVDconst [0])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64SRAconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		if x != v_1.Args[0] || !(c == d) {
 			break
 		}
 		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
+		v.AuxInt = int64ToAuxInt(0)
 		return true
 	}
 	return false
@@ -19989,14 +19989,14 @@
 	// match: (SUBshiftRL x (MOVDconst [c]) [d])
 	// result: (SUBconst x [int64(uint64(c)>>uint64(d))])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		v.reset(OpARM64SUBconst)
-		v.AuxInt = int64(uint64(c) >> uint64(d))
+		v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
 		v.AddArg(x)
 		return true
 	}
@@ -20004,17 +20004,17 @@
 	// cond: c==d
 	// result: (MOVDconst [0])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64SRLconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		if x != v_1.Args[0] || !(c == d) {
 			break
 		}
 		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
+		v.AuxInt = int64ToAuxInt(0)
 		return true
 	}
 	return false
@@ -20048,13 +20048,13 @@
 			if x1.Op != OpARM64SLLconst {
 				continue
 			}
-			c := x1.AuxInt
+			c := auxIntToInt64(x1.AuxInt)
 			y := x1.Args[0]
 			if !(clobberIfDead(x1)) {
 				continue
 			}
 			v.reset(OpARM64TSTshiftLL)
-			v.AuxInt = c
+			v.AuxInt = int64ToAuxInt(c)
 			v.AddArg2(x0, y)
 			return true
 		}
@@ -20070,13 +20070,13 @@
 			if x1.Op != OpARM64SRLconst {
 				continue
 			}
-			c := x1.AuxInt
+			c := auxIntToInt64(x1.AuxInt)
 			y := x1.Args[0]
 			if !(clobberIfDead(x1)) {
 				continue
 			}
 			v.reset(OpARM64TSTshiftRL)
-			v.AuxInt = c
+			v.AuxInt = int64ToAuxInt(c)
 			v.AddArg2(x0, y)
 			return true
 		}
@@ -20092,13 +20092,13 @@
 			if x1.Op != OpARM64SRAconst {
 				continue
 			}
-			c := x1.AuxInt
+			c := auxIntToInt64(x1.AuxInt)
 			y := x1.Args[0]
 			if !(clobberIfDead(x1)) {
 				continue
 			}
 			v.reset(OpARM64TSTshiftRA)
-			v.AuxInt = c
+			v.AuxInt = int64ToAuxInt(c)
 			v.AddArg2(x0, y)
 			return true
 		}
@@ -20166,16 +20166,16 @@
 	// match: (TSTshiftLL (MOVDconst [c]) x [d])
 	// result: (TSTconst [c] (SLLconst <x.Type> x [d]))
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_0.AuxInt
+		c := auxIntToInt64(v_0.AuxInt)
 		x := v_1
 		v.reset(OpARM64TSTconst)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
-		v0.AuxInt = d
+		v0.AuxInt = int64ToAuxInt(d)
 		v0.AddArg(x)
 		v.AddArg(v0)
 		return true
@@ -20183,14 +20183,14 @@
 	// match: (TSTshiftLL x (MOVDconst [c]) [d])
 	// result: (TSTconst x [int64(uint64(c)<<uint64(d))])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		v.reset(OpARM64TSTconst)
-		v.AuxInt = int64(uint64(c) << uint64(d))
+		v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
 		v.AddArg(x)
 		return true
 	}
@@ -20203,16 +20203,16 @@
 	// match: (TSTshiftRA (MOVDconst [c]) x [d])
 	// result: (TSTconst [c] (SRAconst <x.Type> x [d]))
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_0.AuxInt
+		c := auxIntToInt64(v_0.AuxInt)
 		x := v_1
 		v.reset(OpARM64TSTconst)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
-		v0.AuxInt = d
+		v0.AuxInt = int64ToAuxInt(d)
 		v0.AddArg(x)
 		v.AddArg(v0)
 		return true
@@ -20220,14 +20220,14 @@
 	// match: (TSTshiftRA x (MOVDconst [c]) [d])
 	// result: (TSTconst x [c>>uint64(d)])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		v.reset(OpARM64TSTconst)
-		v.AuxInt = c >> uint64(d)
+		v.AuxInt = int64ToAuxInt(c >> uint64(d))
 		v.AddArg(x)
 		return true
 	}
@@ -20240,16 +20240,16 @@
 	// match: (TSTshiftRL (MOVDconst [c]) x [d])
 	// result: (TSTconst [c] (SRLconst <x.Type> x [d]))
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_0.AuxInt
+		c := auxIntToInt64(v_0.AuxInt)
 		x := v_1
 		v.reset(OpARM64TSTconst)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
-		v0.AuxInt = d
+		v0.AuxInt = int64ToAuxInt(d)
 		v0.AddArg(x)
 		v.AddArg(v0)
 		return true
@@ -20257,14 +20257,14 @@
 	// match: (TSTshiftRL x (MOVDconst [c]) [d])
 	// result: (TSTconst x [int64(uint64(c)>>uint64(d))])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		v.reset(OpARM64TSTconst)
-		v.AuxInt = int64(uint64(c) >> uint64(d))
+		v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
 		v.AddArg(x)
 		return true
 	}
@@ -20273,20 +20273,20 @@
 func rewriteValueARM64_OpARM64UBFIZ(v *Value) bool {
 	v_0 := v.Args[0]
 	// match: (UBFIZ [bfc] (SLLconst [sc] x))
-	// cond: sc < getARM64BFwidth(bfc)
-	// result: (UBFIZ [armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc)] x)
+	// cond: sc < bfc.getARM64BFwidth()
+	// result: (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc)] x)
 	for {
-		bfc := v.AuxInt
+		bfc := auxIntToArm64BitField(v.AuxInt)
 		if v_0.Op != OpARM64SLLconst {
 			break
 		}
-		sc := v_0.AuxInt
+		sc := auxIntToInt64(v_0.AuxInt)
 		x := v_0.Args[0]
-		if !(sc < getARM64BFwidth(bfc)) {
+		if !(sc < bfc.getARM64BFwidth()) {
 			break
 		}
 		v.reset(OpARM64UBFIZ)
-		v.AuxInt = armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc)
+		v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc))
 		v.AddArg(x)
 		return true
 	}
@@ -20295,74 +20295,74 @@
 func rewriteValueARM64_OpARM64UBFX(v *Value) bool {
 	v_0 := v.Args[0]
 	// match: (UBFX [bfc] (SRLconst [sc] x))
-	// cond: sc+getARM64BFwidth(bfc)+getARM64BFlsb(bfc) < 64
-	// result: (UBFX [armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc))] x)
+	// cond: sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64
+	// result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth())] x)
 	for {
-		bfc := v.AuxInt
+		bfc := auxIntToArm64BitField(v.AuxInt)
 		if v_0.Op != OpARM64SRLconst {
 			break
 		}
-		sc := v_0.AuxInt
+		sc := auxIntToInt64(v_0.AuxInt)
 		x := v_0.Args[0]
-		if !(sc+getARM64BFwidth(bfc)+getARM64BFlsb(bfc) < 64) {
+		if !(sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64) {
 			break
 		}
 		v.reset(OpARM64UBFX)
-		v.AuxInt = armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc))
+		v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()))
 		v.AddArg(x)
 		return true
 	}
 	// match: (UBFX [bfc] (SLLconst [sc] x))
-	// cond: sc == getARM64BFlsb(bfc)
-	// result: (ANDconst [1<<uint(getARM64BFwidth(bfc))-1] x)
+	// cond: sc == bfc.getARM64BFlsb()
+	// result: (ANDconst [1<<uint(bfc.getARM64BFwidth())-1] x)
 	for {
-		bfc := v.AuxInt
+		bfc := auxIntToArm64BitField(v.AuxInt)
 		if v_0.Op != OpARM64SLLconst {
 			break
 		}
-		sc := v_0.AuxInt
+		sc := auxIntToInt64(v_0.AuxInt)
 		x := v_0.Args[0]
-		if !(sc == getARM64BFlsb(bfc)) {
+		if !(sc == bfc.getARM64BFlsb()) {
 			break
 		}
 		v.reset(OpARM64ANDconst)
-		v.AuxInt = 1<<uint(getARM64BFwidth(bfc)) - 1
+		v.AuxInt = int64ToAuxInt(1<<uint(bfc.getARM64BFwidth()) - 1)
 		v.AddArg(x)
 		return true
 	}
 	// match: (UBFX [bfc] (SLLconst [sc] x))
-	// cond: sc < getARM64BFlsb(bfc)
-	// result: (UBFX [armBFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))] x)
+	// cond: sc < bfc.getARM64BFlsb()
+	// result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
 	for {
-		bfc := v.AuxInt
+		bfc := auxIntToArm64BitField(v.AuxInt)
 		if v_0.Op != OpARM64SLLconst {
 			break
 		}
-		sc := v_0.AuxInt
+		sc := auxIntToInt64(v_0.AuxInt)
 		x := v_0.Args[0]
-		if !(sc < getARM64BFlsb(bfc)) {
+		if !(sc < bfc.getARM64BFlsb()) {
 			break
 		}
 		v.reset(OpARM64UBFX)
-		v.AuxInt = armBFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))
+		v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth()))
 		v.AddArg(x)
 		return true
 	}
 	// match: (UBFX [bfc] (SLLconst [sc] x))
-	// cond: sc > getARM64BFlsb(bfc) && sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc)
-	// result: (UBFIZ [armBFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x)
+	// cond: sc > bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
+	// result: (UBFIZ [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
 	for {
-		bfc := v.AuxInt
+		bfc := auxIntToArm64BitField(v.AuxInt)
 		if v_0.Op != OpARM64SLLconst {
 			break
 		}
-		sc := v_0.AuxInt
+		sc := auxIntToInt64(v_0.AuxInt)
 		x := v_0.Args[0]
-		if !(sc > getARM64BFlsb(bfc) && sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc)) {
+		if !(sc > bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()) {
 			break
 		}
 		v.reset(OpARM64UBFIZ)
-		v.AuxInt = armBFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)
+		v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc))
 		v.AddArg(x)
 		return true
 	}
@@ -20657,13 +20657,13 @@
 			if x1.Op != OpARM64SLLconst {
 				continue
 			}
-			c := x1.AuxInt
+			c := auxIntToInt64(x1.AuxInt)
 			y := x1.Args[0]
 			if !(clobberIfDead(x1)) {
 				continue
 			}
 			v.reset(OpARM64XORshiftLL)
-			v.AuxInt = c
+			v.AuxInt = int64ToAuxInt(c)
 			v.AddArg2(x0, y)
 			return true
 		}
@@ -20679,13 +20679,13 @@
 			if x1.Op != OpARM64SRLconst {
 				continue
 			}
-			c := x1.AuxInt
+			c := auxIntToInt64(x1.AuxInt)
 			y := x1.Args[0]
 			if !(clobberIfDead(x1)) {
 				continue
 			}
 			v.reset(OpARM64XORshiftRL)
-			v.AuxInt = c
+			v.AuxInt = int64ToAuxInt(c)
 			v.AddArg2(x0, y)
 			return true
 		}
@@ -20701,13 +20701,13 @@
 			if x1.Op != OpARM64SRAconst {
 				continue
 			}
-			c := x1.AuxInt
+			c := auxIntToInt64(x1.AuxInt)
 			y := x1.Args[0]
 			if !(clobberIfDead(x1)) {
 				continue
 			}
 			v.reset(OpARM64XORshiftRA)
-			v.AuxInt = c
+			v.AuxInt = int64ToAuxInt(c)
 			v.AddArg2(x0, y)
 			return true
 		}
@@ -20728,14 +20728,14 @@
 				continue
 			}
 			t := v_0_1.Type
-			if v_0_1.AuxInt != 63 {
+			if auxIntToInt64(v_0_1.AuxInt) != 63 {
 				continue
 			}
 			y := v_0_1.Args[0]
 			if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 {
 				continue
 			}
-			cc := v_1.Aux
+			cc := auxToCCop(v_1.Aux)
 			_ = v_1.Args[1]
 			v_1_0 := v_1.Args[0]
 			if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt64 {
@@ -20751,15 +20751,15 @@
 			}
 			_ = v_1_0_1.Args[1]
 			v_1_0_1_0 := v_1_0_1.Args[0]
-			if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 64 {
+			if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 64 {
 				continue
 			}
 			v_1_0_1_1 := v_1_0_1.Args[1]
-			if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 63 || y != v_1_0_1_1.Args[0] {
+			if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 63 || y != v_1_0_1_1.Args[0] {
 				continue
 			}
 			v_1_1 := v_1.Args[1]
-			if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 {
+			if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
 				continue
 			}
 			v_1_1_0 := v_1_1.Args[0]
@@ -20768,11 +20768,11 @@
 			}
 			_ = v_1_1_0.Args[1]
 			v_1_1_0_0 := v_1_1_0.Args[0]
-			if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 64 {
+			if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 64 {
 				continue
 			}
 			v_1_1_0_1 := v_1_1_0.Args[1]
-			if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+			if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
 				continue
 			}
 			v.reset(OpARM64ROR)
@@ -20798,14 +20798,14 @@
 				continue
 			}
 			t := v_0_1.Type
-			if v_0_1.AuxInt != 63 {
+			if auxIntToInt64(v_0_1.AuxInt) != 63 {
 				continue
 			}
 			y := v_0_1.Args[0]
 			if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 {
 				continue
 			}
-			cc := v_1.Aux
+			cc := auxToCCop(v_1.Aux)
 			_ = v_1.Args[1]
 			v_1_0 := v_1.Args[0]
 			if v_1_0.Op != OpARM64SLL {
@@ -20821,15 +20821,15 @@
 			}
 			_ = v_1_0_1.Args[1]
 			v_1_0_1_0 := v_1_0_1.Args[0]
-			if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 64 {
+			if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 64 {
 				continue
 			}
 			v_1_0_1_1 := v_1_0_1.Args[1]
-			if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 63 || y != v_1_0_1_1.Args[0] {
+			if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 63 || y != v_1_0_1_1.Args[0] {
 				continue
 			}
 			v_1_1 := v_1.Args[1]
-			if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 {
+			if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
 				continue
 			}
 			v_1_1_0 := v_1_1.Args[0]
@@ -20838,11 +20838,11 @@
 			}
 			_ = v_1_1_0.Args[1]
 			v_1_1_0_0 := v_1_1_0.Args[0]
-			if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 64 {
+			if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 64 {
 				continue
 			}
 			v_1_1_0_1 := v_1_1_0.Args[1]
-			if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+			if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
 				continue
 			}
 			v.reset(OpARM64ROR)
@@ -20866,14 +20866,14 @@
 				continue
 			}
 			t := v_0_1.Type
-			if v_0_1.AuxInt != 31 {
+			if auxIntToInt64(v_0_1.AuxInt) != 31 {
 				continue
 			}
 			y := v_0_1.Args[0]
 			if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 {
 				continue
 			}
-			cc := v_1.Aux
+			cc := auxToCCop(v_1.Aux)
 			_ = v_1.Args[1]
 			v_1_0 := v_1.Args[0]
 			if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt32 {
@@ -20890,15 +20890,15 @@
 			}
 			_ = v_1_0_1.Args[1]
 			v_1_0_1_0 := v_1_0_1.Args[0]
-			if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 32 {
+			if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 32 {
 				continue
 			}
 			v_1_0_1_1 := v_1_0_1.Args[1]
-			if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 31 || y != v_1_0_1_1.Args[0] {
+			if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 31 || y != v_1_0_1_1.Args[0] {
 				continue
 			}
 			v_1_1 := v_1.Args[1]
-			if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 {
+			if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
 				continue
 			}
 			v_1_1_0 := v_1_1.Args[0]
@@ -20907,11 +20907,11 @@
 			}
 			_ = v_1_1_0.Args[1]
 			v_1_1_0_0 := v_1_1_0.Args[0]
-			if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 32 {
+			if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 32 {
 				continue
 			}
 			v_1_1_0_1 := v_1_1_0.Args[1]
-			if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+			if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
 				continue
 			}
 			v.reset(OpARM64RORW)
@@ -20941,14 +20941,14 @@
 				continue
 			}
 			t := v_0_1.Type
-			if v_0_1.AuxInt != 31 {
+			if auxIntToInt64(v_0_1.AuxInt) != 31 {
 				continue
 			}
 			y := v_0_1.Args[0]
 			if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 {
 				continue
 			}
-			cc := v_1.Aux
+			cc := auxToCCop(v_1.Aux)
 			_ = v_1.Args[1]
 			v_1_0 := v_1.Args[0]
 			if v_1_0.Op != OpARM64SLL {
@@ -20964,15 +20964,15 @@
 			}
 			_ = v_1_0_1.Args[1]
 			v_1_0_1_0 := v_1_0_1.Args[0]
-			if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 32 {
+			if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 32 {
 				continue
 			}
 			v_1_0_1_1 := v_1_0_1.Args[1]
-			if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 31 || y != v_1_0_1_1.Args[0] {
+			if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 31 || y != v_1_0_1_1.Args[0] {
 				continue
 			}
 			v_1_1 := v_1.Args[1]
-			if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 {
+			if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
 				continue
 			}
 			v_1_1_0 := v_1_1.Args[0]
@@ -20981,11 +20981,11 @@
 			}
 			_ = v_1_1_0.Args[1]
 			v_1_1_0_0 := v_1_1_0.Args[0]
-			if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 32 {
+			if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 32 {
 				continue
 			}
 			v_1_1_0_1 := v_1_1_0.Args[1]
-			if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+			if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
 				continue
 			}
 			v.reset(OpARM64RORW)
@@ -21055,16 +21055,16 @@
 	// match: (XORshiftLL (MOVDconst [c]) x [d])
 	// result: (XORconst [c] (SLLconst <x.Type> x [d]))
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_0.AuxInt
+		c := auxIntToInt64(v_0.AuxInt)
 		x := v_1
 		v.reset(OpARM64XORconst)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
-		v0.AuxInt = d
+		v0.AuxInt = int64ToAuxInt(d)
 		v0.AddArg(x)
 		v.AddArg(v0)
 		return true
@@ -21072,14 +21072,14 @@
 	// match: (XORshiftLL x (MOVDconst [c]) [d])
 	// result: (XORconst x [int64(uint64(c)<<uint64(d))])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		v.reset(OpARM64XORconst)
-		v.AuxInt = int64(uint64(c) << uint64(d))
+		v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
 		v.AddArg(x)
 		return true
 	}
@@ -21087,24 +21087,24 @@
 	// cond: c==d
 	// result: (MOVDconst [0])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64SLLconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		if x != v_1.Args[0] || !(c == d) {
 			break
 		}
 		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
+		v.AuxInt = int64ToAuxInt(0)
 		return true
 	}
 	// match: (XORshiftLL [c] (SRLconst x [64-c]) x)
 	// result: (RORconst [64-c] x)
 	for {
-		c := v.AuxInt
-		if v_0.Op != OpARM64SRLconst || v_0.AuxInt != 64-c {
+		c := auxIntToInt64(v.AuxInt)
+		if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
 			break
 		}
 		x := v_0.Args[0]
@@ -21112,7 +21112,7 @@
 			break
 		}
 		v.reset(OpARM64RORconst)
-		v.AuxInt = 64 - c
+		v.AuxInt = int64ToAuxInt(64 - c)
 		v.AddArg(x)
 		return true
 	}
@@ -21121,24 +21121,24 @@
 	// result: (RORWconst [32-c] x)
 	for {
 		t := v.Type
-		c := v.AuxInt
+		c := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64UBFX {
 			break
 		}
-		bfc := v_0.AuxInt
+		bfc := auxIntToArm64BitField(v_0.AuxInt)
 		x := v_0.Args[0]
 		if x != v_1 || !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) {
 			break
 		}
 		v.reset(OpARM64RORWconst)
-		v.AuxInt = 32 - c
+		v.AuxInt = int64ToAuxInt(32 - c)
 		v.AddArg(x)
 		return true
 	}
 	// match: (XORshiftLL <typ.UInt16> [8] (UBFX <typ.UInt16> [armBFAuxInt(8, 8)] x) x)
 	// result: (REV16W x)
 	for {
-		if v.Type != typ.UInt16 || v.AuxInt != 8 || v_0.Op != OpARM64UBFX || v_0.Type != typ.UInt16 || v_0.AuxInt != armBFAuxInt(8, 8) {
+		if v.Type != typ.UInt16 || auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64UBFX || v_0.Type != typ.UInt16 || auxIntToArm64BitField(v_0.AuxInt) != armBFAuxInt(8, 8) {
 			break
 		}
 		x := v_0.Args[0]
@@ -21152,14 +21152,14 @@
 	// match: (XORshiftLL [c] (SRLconst x [64-c]) x2)
 	// result: (EXTRconst [64-c] x2 x)
 	for {
-		c := v.AuxInt
-		if v_0.Op != OpARM64SRLconst || v_0.AuxInt != 64-c {
+		c := auxIntToInt64(v.AuxInt)
+		if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
 			break
 		}
 		x := v_0.Args[0]
 		x2 := v_1
 		v.reset(OpARM64EXTRconst)
-		v.AuxInt = 64 - c
+		v.AuxInt = int64ToAuxInt(64 - c)
 		v.AddArg2(x2, x)
 		return true
 	}
@@ -21168,18 +21168,18 @@
 	// result: (EXTRWconst [32-c] x2 x)
 	for {
 		t := v.Type
-		c := v.AuxInt
+		c := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64UBFX {
 			break
 		}
-		bfc := v_0.AuxInt
+		bfc := auxIntToArm64BitField(v_0.AuxInt)
 		x := v_0.Args[0]
 		x2 := v_1
 		if !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) {
 			break
 		}
 		v.reset(OpARM64EXTRWconst)
-		v.AuxInt = 32 - c
+		v.AuxInt = int64ToAuxInt(32 - c)
 		v.AddArg2(x2, x)
 		return true
 	}
@@ -21192,16 +21192,16 @@
 	// match: (XORshiftRA (MOVDconst [c]) x [d])
 	// result: (XORconst [c] (SRAconst <x.Type> x [d]))
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_0.AuxInt
+		c := auxIntToInt64(v_0.AuxInt)
 		x := v_1
 		v.reset(OpARM64XORconst)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
-		v0.AuxInt = d
+		v0.AuxInt = int64ToAuxInt(d)
 		v0.AddArg(x)
 		v.AddArg(v0)
 		return true
@@ -21209,14 +21209,14 @@
 	// match: (XORshiftRA x (MOVDconst [c]) [d])
 	// result: (XORconst x [c>>uint64(d)])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		v.reset(OpARM64XORconst)
-		v.AuxInt = c >> uint64(d)
+		v.AuxInt = int64ToAuxInt(c >> uint64(d))
 		v.AddArg(x)
 		return true
 	}
@@ -21224,17 +21224,17 @@
 	// cond: c==d
 	// result: (MOVDconst [0])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64SRAconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		if x != v_1.Args[0] || !(c == d) {
 			break
 		}
 		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
+		v.AuxInt = int64ToAuxInt(0)
 		return true
 	}
 	return false
@@ -21246,16 +21246,16 @@
 	// match: (XORshiftRL (MOVDconst [c]) x [d])
 	// result: (XORconst [c] (SRLconst <x.Type> x [d]))
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		if v_0.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_0.AuxInt
+		c := auxIntToInt64(v_0.AuxInt)
 		x := v_1
 		v.reset(OpARM64XORconst)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
-		v0.AuxInt = d
+		v0.AuxInt = int64ToAuxInt(d)
 		v0.AddArg(x)
 		v.AddArg(v0)
 		return true
@@ -21263,14 +21263,14 @@
 	// match: (XORshiftRL x (MOVDconst [c]) [d])
 	// result: (XORconst x [int64(uint64(c)>>uint64(d))])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64MOVDconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		v.reset(OpARM64XORconst)
-		v.AuxInt = int64(uint64(c) >> uint64(d))
+		v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
 		v.AddArg(x)
 		return true
 	}
@@ -21278,24 +21278,24 @@
 	// cond: c==d
 	// result: (MOVDconst [0])
 	for {
-		d := v.AuxInt
+		d := auxIntToInt64(v.AuxInt)
 		x := v_0
 		if v_1.Op != OpARM64SRLconst {
 			break
 		}
-		c := v_1.AuxInt
+		c := auxIntToInt64(v_1.AuxInt)
 		if x != v_1.Args[0] || !(c == d) {
 			break
 		}
 		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
+		v.AuxInt = int64ToAuxInt(0)
 		return true
 	}
 	// match: (XORshiftRL [c] (SLLconst x [64-c]) x)
 	// result: (RORconst [ c] x)
 	for {
-		c := v.AuxInt
-		if v_0.Op != OpARM64SLLconst || v_0.AuxInt != 64-c {
+		c := auxIntToInt64(v.AuxInt)
+		if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
 			break
 		}
 		x := v_0.Args[0]
@@ -21303,7 +21303,7 @@
 			break
 		}
 		v.reset(OpARM64RORconst)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v.AddArg(x)
 		return true
 	}
@@ -21312,8 +21312,8 @@
 	// result: (RORWconst [c] x)
 	for {
 		t := v.Type
-		c := v.AuxInt
-		if v_0.Op != OpARM64SLLconst || v_0.AuxInt != 32-c {
+		c := auxIntToInt64(v.AuxInt)
+		if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 32-c {
 			break
 		}
 		x := v_0.Args[0]
@@ -21321,7 +21321,7 @@
 			break
 		}
 		v.reset(OpARM64RORWconst)
-		v.AuxInt = c
+		v.AuxInt = int64ToAuxInt(c)
 		v.AddArg(x)
 		return true
 	}
diff --git a/src/cmd/compile/internal/ssa/value.go b/src/cmd/compile/internal/ssa/value.go
index 63c8f3d..7ead0ff 100644
--- a/src/cmd/compile/internal/ssa/value.go
+++ b/src/cmd/compile/internal/ssa/value.go
@@ -126,6 +126,13 @@
 	return ValAndOff(v.AuxInt)
 }
 
+func (v *Value) AuxArm64BitField() arm64BitField {
+	if opcodeTable[v.Op].auxType != auxARM64BitField {
+		v.Fatalf("op %s doesn't have a ValAndOff aux field", v.Op)
+	}
+	return arm64BitField(v.AuxInt)
+}
+
 // long form print.  v# = opcode <type> [aux] args [: reg] (names)
 func (v *Value) LongString() string {
 	s := fmt.Sprintf("v%d = %s", v.ID, v.Op)
@@ -176,8 +183,8 @@
 	case auxInt64, auxInt128:
 		return fmt.Sprintf(" [%d]", v.AuxInt)
 	case auxARM64BitField:
-		lsb := getARM64BFlsb(v.AuxInt)
-		width := getARM64BFwidth(v.AuxInt)
+		lsb := v.AuxArm64BitField().getARM64BFlsb()
+		width := v.AuxArm64BitField().getARM64BFwidth()
 		return fmt.Sprintf(" [lsb=%d,width=%d]", lsb, width)
 	case auxFloat32, auxFloat64:
 		return fmt.Sprintf(" [%g]", v.AuxFloat())