| // Copyright 2015 The Go Authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style |
| // license that can be found in the LICENSE file. |
| |
| // Lowering arithmetic |
| (Add64 x y) -> (ADDQ x y) |
| (AddPtr x y) && config.PtrSize == 8 -> (ADDQ x y) |
| (AddPtr x y) && config.PtrSize == 4 -> (ADDL x y) |
| (Add32 x y) -> (ADDL x y) |
| (Add16 x y) -> (ADDL x y) |
| (Add8 x y) -> (ADDL x y) |
| (Add32F x y) -> (ADDSS x y) |
| (Add64F x y) -> (ADDSD x y) |
| |
| (Sub64 x y) -> (SUBQ x y) |
| (SubPtr x y) && config.PtrSize == 8 -> (SUBQ x y) |
| (SubPtr x y) && config.PtrSize == 4 -> (SUBL x y) |
| (Sub32 x y) -> (SUBL x y) |
| (Sub16 x y) -> (SUBL x y) |
| (Sub8 x y) -> (SUBL x y) |
| (Sub32F x y) -> (SUBSS x y) |
| (Sub64F x y) -> (SUBSD x y) |
| |
| (Mul64 x y) -> (MULQ x y) |
| (Mul32 x y) -> (MULL x y) |
| (Mul16 x y) -> (MULL x y) |
| (Mul8 x y) -> (MULL x y) |
| (Mul32F x y) -> (MULSS x y) |
| (Mul64F x y) -> (MULSD x y) |
| |
| (Div32F x y) -> (DIVSS x y) |
| (Div64F x y) -> (DIVSD x y) |
| |
| (Div64 x y) -> (Select0 (DIVQ x y)) |
| (Div64u x y) -> (Select0 (DIVQU x y)) |
| (Div32 x y) -> (Select0 (DIVL x y)) |
| (Div32u x y) -> (Select0 (DIVLU x y)) |
| (Div16 x y) -> (Select0 (DIVW x y)) |
| (Div16u x y) -> (Select0 (DIVWU x y)) |
| (Div8 x y) -> (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) |
| (Div8u x y) -> (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) |
| |
| (Hmul64 x y) -> (HMULQ x y) |
| (Hmul64u x y) -> (HMULQU x y) |
| (Hmul32 x y) -> (HMULL x y) |
| (Hmul32u x y) -> (HMULLU x y) |
| |
| (Mul64uhilo x y) -> (MULQU2 x y) |
| (Div128u xhi xlo y) -> (DIVQU2 xhi xlo y) |
| |
| (Avg64u x y) -> (AVGQU x y) |
| |
| (Mod64 x y) -> (Select1 (DIVQ x y)) |
| (Mod64u x y) -> (Select1 (DIVQU x y)) |
| (Mod32 x y) -> (Select1 (DIVL x y)) |
| (Mod32u x y) -> (Select1 (DIVLU x y)) |
| (Mod16 x y) -> (Select1 (DIVW x y)) |
| (Mod16u x y) -> (Select1 (DIVWU x y)) |
| (Mod8 x y) -> (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) |
| (Mod8u x y) -> (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) |
| |
| (And64 x y) -> (ANDQ x y) |
| (And32 x y) -> (ANDL x y) |
| (And16 x y) -> (ANDL x y) |
| (And8 x y) -> (ANDL x y) |
| |
| (Or64 x y) -> (ORQ x y) |
| (Or32 x y) -> (ORL x y) |
| (Or16 x y) -> (ORL x y) |
| (Or8 x y) -> (ORL x y) |
| |
| (Xor64 x y) -> (XORQ x y) |
| (Xor32 x y) -> (XORL x y) |
| (Xor16 x y) -> (XORL x y) |
| (Xor8 x y) -> (XORL x y) |
| |
| (Neg64 x) -> (NEGQ x) |
| (Neg32 x) -> (NEGL x) |
| (Neg16 x) -> (NEGL x) |
| (Neg8 x) -> (NEGL x) |
| (Neg32F x) -> (PXOR x (MOVSSconst <typ.Float32> [f2i(math.Copysign(0, -1))])) |
| (Neg64F x) -> (PXOR x (MOVSDconst <typ.Float64> [f2i(math.Copysign(0, -1))])) |
| |
| (Com64 x) -> (NOTQ x) |
| (Com32 x) -> (NOTL x) |
| (Com16 x) -> (NOTL x) |
| (Com8 x) -> (NOTL x) |
| |
| // Lowering boolean ops |
| (AndB x y) -> (ANDL x y) |
| (OrB x y) -> (ORL x y) |
| (Not x) -> (XORLconst [1] x) |
| |
| // Lowering pointer arithmetic |
| (OffPtr [off] ptr) && config.PtrSize == 8 && is32Bit(off) -> (ADDQconst [off] ptr) |
| (OffPtr [off] ptr) && config.PtrSize == 8 -> (ADDQ (MOVQconst [off]) ptr) |
| (OffPtr [off] ptr) && config.PtrSize == 4 -> (ADDLconst [off] ptr) |
| |
| // Lowering other arithmetic |
| (Ctz64 <t> x) -> (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <types.TypeFlags> (BSFQ x))) |
| (Ctz32 x) -> (Select0 (BSFQ (ORQ <typ.UInt64> (MOVQconst [1<<32]) x))) |
| |
| (BitLen64 <t> x) -> (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <types.TypeFlags> (BSRQ x)))) |
| (BitLen32 x) -> (BitLen64 (MOVLQZX <typ.UInt64> x)) |
| |
| (Bswap64 x) -> (BSWAPQ x) |
| (Bswap32 x) -> (BSWAPL x) |
| |
| (PopCount64 x) -> (POPCNTQ x) |
| (PopCount32 x) -> (POPCNTL x) |
| (PopCount16 x) -> (POPCNTL (MOVWQZX <typ.UInt32> x)) |
| (PopCount8 x) -> (POPCNTL (MOVBQZX <typ.UInt32> x)) |
| |
| (Sqrt x) -> (SQRTSD x) |
| |
| (RoundToEven x) -> (ROUNDSD [0] x) |
| (Floor x) -> (ROUNDSD [1] x) |
| (Ceil x) -> (ROUNDSD [2] x) |
| (Trunc x) -> (ROUNDSD [3] x) |
| |
| // Lowering extension |
| // Note: we always extend to 64 bits even though some ops don't need that many result bits. |
| (SignExt8to16 x) -> (MOVBQSX x) |
| (SignExt8to32 x) -> (MOVBQSX x) |
| (SignExt8to64 x) -> (MOVBQSX x) |
| (SignExt16to32 x) -> (MOVWQSX x) |
| (SignExt16to64 x) -> (MOVWQSX x) |
| (SignExt32to64 x) -> (MOVLQSX x) |
| |
| (ZeroExt8to16 x) -> (MOVBQZX x) |
| (ZeroExt8to32 x) -> (MOVBQZX x) |
| (ZeroExt8to64 x) -> (MOVBQZX x) |
| (ZeroExt16to32 x) -> (MOVWQZX x) |
| (ZeroExt16to64 x) -> (MOVWQZX x) |
| (ZeroExt32to64 x) -> (MOVLQZX x) |
| |
| (Slicemask <t> x) -> (SARQconst (NEGQ <t> x) [63]) |
| |
| // Lowering truncation |
| // Because we ignore high parts of registers, truncates are just copies. |
| (Trunc16to8 x) -> x |
| (Trunc32to8 x) -> x |
| (Trunc32to16 x) -> x |
| (Trunc64to8 x) -> x |
| (Trunc64to16 x) -> x |
| (Trunc64to32 x) -> x |
| |
| // Lowering float <-> int |
| (Cvt32to32F x) -> (CVTSL2SS x) |
| (Cvt32to64F x) -> (CVTSL2SD x) |
| (Cvt64to32F x) -> (CVTSQ2SS x) |
| (Cvt64to64F x) -> (CVTSQ2SD x) |
| |
| (Cvt32Fto32 x) -> (CVTTSS2SL x) |
| (Cvt32Fto64 x) -> (CVTTSS2SQ x) |
| (Cvt64Fto32 x) -> (CVTTSD2SL x) |
| (Cvt64Fto64 x) -> (CVTTSD2SQ x) |
| |
| (Cvt32Fto64F x) -> (CVTSS2SD x) |
| (Cvt64Fto32F x) -> (CVTSD2SS x) |
| |
| (Round32F x) -> x |
| (Round64F x) -> x |
| |
| // Lowering shifts |
| // Unsigned shifts need to return 0 if shift amount is >= width of shifted value. |
| // result = (arg << shift) & (shift >= argbits ? 0 : 0xffffffffffffffff) |
| (Lsh64x64 <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) |
| (Lsh64x32 <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) |
| (Lsh64x16 <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) |
| (Lsh64x8 <t> x y) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) |
| |
| (Lsh32x64 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) |
| (Lsh32x32 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) |
| (Lsh32x16 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) |
| (Lsh32x8 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) |
| |
| (Lsh16x64 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) |
| (Lsh16x32 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) |
| (Lsh16x16 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) |
| (Lsh16x8 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) |
| |
| (Lsh8x64 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) |
| (Lsh8x32 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) |
| (Lsh8x16 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) |
| (Lsh8x8 <t> x y) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) |
| |
| (Rsh64Ux64 <t> x y) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) |
| (Rsh64Ux32 <t> x y) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) |
| (Rsh64Ux16 <t> x y) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) |
| (Rsh64Ux8 <t> x y) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) |
| |
| (Rsh32Ux64 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) |
| (Rsh32Ux32 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) |
| (Rsh32Ux16 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) |
| (Rsh32Ux8 <t> x y) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) |
| |
| (Rsh16Ux64 <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16]))) |
| (Rsh16Ux32 <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16]))) |
| (Rsh16Ux16 <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16]))) |
| (Rsh16Ux8 <t> x y) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16]))) |
| |
| (Rsh8Ux64 <t> x y) -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8]))) |
| (Rsh8Ux32 <t> x y) -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8]))) |
| (Rsh8Ux16 <t> x y) -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8]))) |
| (Rsh8Ux8 <t> x y) -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8]))) |
| |
| // Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value. |
| // We implement this by setting the shift value to -1 (all ones) if the shift value is >= width. |
| (Rsh64x64 <t> x y) -> (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64]))))) |
| (Rsh64x32 <t> x y) -> (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64]))))) |
| (Rsh64x16 <t> x y) -> (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64]))))) |
| (Rsh64x8 <t> x y) -> (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64]))))) |
| |
| (Rsh32x64 <t> x y) -> (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32]))))) |
| (Rsh32x32 <t> x y) -> (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32]))))) |
| (Rsh32x16 <t> x y) -> (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32]))))) |
| (Rsh32x8 <t> x y) -> (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32]))))) |
| |
| (Rsh16x64 <t> x y) -> (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16]))))) |
| (Rsh16x32 <t> x y) -> (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16]))))) |
| (Rsh16x16 <t> x y) -> (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16]))))) |
| (Rsh16x8 <t> x y) -> (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16]))))) |
| |
| (Rsh8x64 <t> x y) -> (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8]))))) |
| (Rsh8x32 <t> x y) -> (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8]))))) |
| (Rsh8x16 <t> x y) -> (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8]))))) |
| (Rsh8x8 <t> x y) -> (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8]))))) |
| |
| // Lowering comparisons |
| (Less64 x y) -> (SETL (CMPQ x y)) |
| (Less32 x y) -> (SETL (CMPL x y)) |
| (Less16 x y) -> (SETL (CMPW x y)) |
| (Less8 x y) -> (SETL (CMPB x y)) |
| (Less64U x y) -> (SETB (CMPQ x y)) |
| (Less32U x y) -> (SETB (CMPL x y)) |
| (Less16U x y) -> (SETB (CMPW x y)) |
| (Less8U x y) -> (SETB (CMPB x y)) |
| // Use SETGF with reversed operands to dodge NaN case |
| (Less64F x y) -> (SETGF (UCOMISD y x)) |
| (Less32F x y) -> (SETGF (UCOMISS y x)) |
| |
| (Leq64 x y) -> (SETLE (CMPQ x y)) |
| (Leq32 x y) -> (SETLE (CMPL x y)) |
| (Leq16 x y) -> (SETLE (CMPW x y)) |
| (Leq8 x y) -> (SETLE (CMPB x y)) |
| (Leq64U x y) -> (SETBE (CMPQ x y)) |
| (Leq32U x y) -> (SETBE (CMPL x y)) |
| (Leq16U x y) -> (SETBE (CMPW x y)) |
| (Leq8U x y) -> (SETBE (CMPB x y)) |
| // Use SETGEF with reversed operands to dodge NaN case |
| (Leq64F x y) -> (SETGEF (UCOMISD y x)) |
| (Leq32F x y) -> (SETGEF (UCOMISS y x)) |
| |
| (Greater64 x y) -> (SETG (CMPQ x y)) |
| (Greater32 x y) -> (SETG (CMPL x y)) |
| (Greater16 x y) -> (SETG (CMPW x y)) |
| (Greater8 x y) -> (SETG (CMPB x y)) |
| (Greater64U x y) -> (SETA (CMPQ x y)) |
| (Greater32U x y) -> (SETA (CMPL x y)) |
| (Greater16U x y) -> (SETA (CMPW x y)) |
| (Greater8U x y) -> (SETA (CMPB x y)) |
| // Note Go assembler gets UCOMISx operand order wrong, but it is right here |
| // Bug is accommodated at generation of assembly language. |
| (Greater64F x y) -> (SETGF (UCOMISD x y)) |
| (Greater32F x y) -> (SETGF (UCOMISS x y)) |
| |
| (Geq64 x y) -> (SETGE (CMPQ x y)) |
| (Geq32 x y) -> (SETGE (CMPL x y)) |
| (Geq16 x y) -> (SETGE (CMPW x y)) |
| (Geq8 x y) -> (SETGE (CMPB x y)) |
| (Geq64U x y) -> (SETAE (CMPQ x y)) |
| (Geq32U x y) -> (SETAE (CMPL x y)) |
| (Geq16U x y) -> (SETAE (CMPW x y)) |
| (Geq8U x y) -> (SETAE (CMPB x y)) |
| // Note Go assembler gets UCOMISx operand order wrong, but it is right here |
| // Bug is accommodated at generation of assembly language. |
| (Geq64F x y) -> (SETGEF (UCOMISD x y)) |
| (Geq32F x y) -> (SETGEF (UCOMISS x y)) |
| |
| (Eq64 x y) -> (SETEQ (CMPQ x y)) |
| (Eq32 x y) -> (SETEQ (CMPL x y)) |
| (Eq16 x y) -> (SETEQ (CMPW x y)) |
| (Eq8 x y) -> (SETEQ (CMPB x y)) |
| (EqB x y) -> (SETEQ (CMPB x y)) |
| (EqPtr x y) && config.PtrSize == 8 -> (SETEQ (CMPQ x y)) |
| (EqPtr x y) && config.PtrSize == 4 -> (SETEQ (CMPL x y)) |
| (Eq64F x y) -> (SETEQF (UCOMISD x y)) |
| (Eq32F x y) -> (SETEQF (UCOMISS x y)) |
| |
| (Neq64 x y) -> (SETNE (CMPQ x y)) |
| (Neq32 x y) -> (SETNE (CMPL x y)) |
| (Neq16 x y) -> (SETNE (CMPW x y)) |
| (Neq8 x y) -> (SETNE (CMPB x y)) |
| (NeqB x y) -> (SETNE (CMPB x y)) |
| (NeqPtr x y) && config.PtrSize == 8 -> (SETNE (CMPQ x y)) |
| (NeqPtr x y) && config.PtrSize == 4 -> (SETNE (CMPL x y)) |
| (Neq64F x y) -> (SETNEF (UCOMISD x y)) |
| (Neq32F x y) -> (SETNEF (UCOMISS x y)) |
| |
| (Int64Hi x) -> (SHRQconst [32] x) // needed for amd64p32 |
| |
| // Lowering loads |
| (Load <t> ptr mem) && (is64BitInt(t) || isPtr(t) && config.PtrSize == 8) -> (MOVQload ptr mem) |
| (Load <t> ptr mem) && (is32BitInt(t) || isPtr(t) && config.PtrSize == 4) -> (MOVLload ptr mem) |
| (Load <t> ptr mem) && is16BitInt(t) -> (MOVWload ptr mem) |
| (Load <t> ptr mem) && (t.IsBoolean() || is8BitInt(t)) -> (MOVBload ptr mem) |
| (Load <t> ptr mem) && is32BitFloat(t) -> (MOVSSload ptr mem) |
| (Load <t> ptr mem) && is64BitFloat(t) -> (MOVSDload ptr mem) |
| |
| // Lowering stores |
| // These more-specific FP versions of Store pattern should come first. |
| (Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) -> (MOVSDstore ptr val mem) |
| (Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) -> (MOVSSstore ptr val mem) |
| |
| (Store {t} ptr val mem) && t.(*types.Type).Size() == 8 -> (MOVQstore ptr val mem) |
| (Store {t} ptr val mem) && t.(*types.Type).Size() == 4 -> (MOVLstore ptr val mem) |
| (Store {t} ptr val mem) && t.(*types.Type).Size() == 2 -> (MOVWstore ptr val mem) |
| (Store {t} ptr val mem) && t.(*types.Type).Size() == 1 -> (MOVBstore ptr val mem) |
| |
| // Lowering moves |
| (Move [0] _ _ mem) -> mem |
| (Move [1] dst src mem) -> (MOVBstore dst (MOVBload src mem) mem) |
| (Move [2] dst src mem) -> (MOVWstore dst (MOVWload src mem) mem) |
| (Move [4] dst src mem) -> (MOVLstore dst (MOVLload src mem) mem) |
| (Move [8] dst src mem) -> (MOVQstore dst (MOVQload src mem) mem) |
| (Move [16] dst src mem) && config.useSSE -> (MOVOstore dst (MOVOload src mem) mem) |
| (Move [16] dst src mem) && !config.useSSE -> |
| (MOVQstore [8] dst (MOVQload [8] src mem) |
| (MOVQstore dst (MOVQload src mem) mem)) |
| (Move [3] dst src mem) -> |
| (MOVBstore [2] dst (MOVBload [2] src mem) |
| (MOVWstore dst (MOVWload src mem) mem)) |
| (Move [5] dst src mem) -> |
| (MOVBstore [4] dst (MOVBload [4] src mem) |
| (MOVLstore dst (MOVLload src mem) mem)) |
| (Move [6] dst src mem) -> |
| (MOVWstore [4] dst (MOVWload [4] src mem) |
| (MOVLstore dst (MOVLload src mem) mem)) |
| (Move [7] dst src mem) -> |
| (MOVLstore [3] dst (MOVLload [3] src mem) |
| (MOVLstore dst (MOVLload src mem) mem)) |
| (Move [s] dst src mem) && s > 8 && s < 16 -> |
| (MOVQstore [s-8] dst (MOVQload [s-8] src mem) |
| (MOVQstore dst (MOVQload src mem) mem)) |
| |
| // Adjust moves to be a multiple of 16 bytes. |
| (Move [s] dst src mem) |
| && s > 16 && s%16 != 0 && s%16 <= 8 -> |
| (Move [s-s%16] |
| (OffPtr <dst.Type> dst [s%16]) |
| (OffPtr <src.Type> src [s%16]) |
| (MOVQstore dst (MOVQload src mem) mem)) |
| (Move [s] dst src mem) |
| && s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE -> |
| (Move [s-s%16] |
| (OffPtr <dst.Type> dst [s%16]) |
| (OffPtr <src.Type> src [s%16]) |
| (MOVOstore dst (MOVOload src mem) mem)) |
| (Move [s] dst src mem) |
| && s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE -> |
| (Move [s-s%16] |
| (OffPtr <dst.Type> dst [s%16]) |
| (OffPtr <src.Type> src [s%16]) |
| (MOVQstore [8] dst (MOVQload [8] src mem) |
| (MOVQstore dst (MOVQload src mem) mem))) |
| |
| // Medium copying uses a duff device. |
| (Move [s] dst src mem) |
| && s >= 32 && s <= 16*64 && s%16 == 0 |
| && !config.noDuffDevice -> |
| (DUFFCOPY [14*(64-s/16)] dst src mem) |
| // 14 and 64 are magic constants. 14 is the number of bytes to encode: |
| // MOVUPS (SI), X0 |
| // ADDQ $16, SI |
| // MOVUPS X0, (DI) |
| // ADDQ $16, DI |
| // and 64 is the number of such blocks. See src/runtime/duff_amd64.s:duffcopy. |
| |
| // Large copying uses REP MOVSQ. |
| (Move [s] dst src mem) && (s > 16*64 || config.noDuffDevice) && s%8 == 0 -> |
| (REPMOVSQ dst src (MOVQconst [s/8]) mem) |
| |
| // Lowering Zero instructions |
| (Zero [0] _ mem) -> mem |
| (Zero [1] destptr mem) -> (MOVBstoreconst [0] destptr mem) |
| (Zero [2] destptr mem) -> (MOVWstoreconst [0] destptr mem) |
| (Zero [4] destptr mem) -> (MOVLstoreconst [0] destptr mem) |
| (Zero [8] destptr mem) -> (MOVQstoreconst [0] destptr mem) |
| |
| (Zero [3] destptr mem) -> |
| (MOVBstoreconst [makeValAndOff(0,2)] destptr |
| (MOVWstoreconst [0] destptr mem)) |
| (Zero [5] destptr mem) -> |
| (MOVBstoreconst [makeValAndOff(0,4)] destptr |
| (MOVLstoreconst [0] destptr mem)) |
| (Zero [6] destptr mem) -> |
| (MOVWstoreconst [makeValAndOff(0,4)] destptr |
| (MOVLstoreconst [0] destptr mem)) |
| (Zero [7] destptr mem) -> |
| (MOVLstoreconst [makeValAndOff(0,3)] destptr |
| (MOVLstoreconst [0] destptr mem)) |
| |
| // Strip off any fractional word zeroing. |
| (Zero [s] destptr mem) && s%8 != 0 && s > 8 && !config.useSSE -> |
| (Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8]) |
| (MOVQstoreconst [0] destptr mem)) |
| |
| // Zero small numbers of words directly. |
| (Zero [16] destptr mem) && !config.useSSE -> |
| (MOVQstoreconst [makeValAndOff(0,8)] destptr |
| (MOVQstoreconst [0] destptr mem)) |
| (Zero [24] destptr mem) && !config.useSSE -> |
| (MOVQstoreconst [makeValAndOff(0,16)] destptr |
| (MOVQstoreconst [makeValAndOff(0,8)] destptr |
| (MOVQstoreconst [0] destptr mem))) |
| (Zero [32] destptr mem) && !config.useSSE -> |
| (MOVQstoreconst [makeValAndOff(0,24)] destptr |
| (MOVQstoreconst [makeValAndOff(0,16)] destptr |
| (MOVQstoreconst [makeValAndOff(0,8)] destptr |
| (MOVQstoreconst [0] destptr mem)))) |
| |
| (Zero [s] destptr mem) && s > 8 && s < 16 && config.useSSE -> |
| (MOVQstoreconst [makeValAndOff(0,s-8)] destptr |
| (MOVQstoreconst [0] destptr mem)) |
| |
| // Adjust zeros to be a multiple of 16 bytes. |
| (Zero [s] destptr mem) && s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE -> |
| (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) |
| (MOVOstore destptr (MOVOconst [0]) mem)) |
| |
| (Zero [s] destptr mem) && s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE -> |
| (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) |
| (MOVQstoreconst [0] destptr mem)) |
| |
| (Zero [16] destptr mem) && config.useSSE -> |
| (MOVOstore destptr (MOVOconst [0]) mem) |
| (Zero [32] destptr mem) && config.useSSE -> |
| (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0]) |
| (MOVOstore destptr (MOVOconst [0]) mem)) |
| (Zero [48] destptr mem) && config.useSSE -> |
| (MOVOstore (OffPtr <destptr.Type> destptr [32]) (MOVOconst [0]) |
| (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0]) |
| (MOVOstore destptr (MOVOconst [0]) mem))) |
| (Zero [64] destptr mem) && config.useSSE -> |
| (MOVOstore (OffPtr <destptr.Type> destptr [48]) (MOVOconst [0]) |
| (MOVOstore (OffPtr <destptr.Type> destptr [32]) (MOVOconst [0]) |
| (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0]) |
| (MOVOstore destptr (MOVOconst [0]) mem)))) |
| |
| // Medium zeroing uses a duff device. |
| (Zero [s] destptr mem) |
| && s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice -> |
| (DUFFZERO [s] destptr (MOVOconst [0]) mem) |
| |
| // Large zeroing uses REP STOSQ. |
| (Zero [s] destptr mem) |
| && (s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) |
| && s%8 == 0 -> |
| (REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem) |
| |
| // Lowering constants |
| (Const8 [val]) -> (MOVLconst [val]) |
| (Const16 [val]) -> (MOVLconst [val]) |
| (Const32 [val]) -> (MOVLconst [val]) |
| (Const64 [val]) -> (MOVQconst [val]) |
| (Const32F [val]) -> (MOVSSconst [val]) |
| (Const64F [val]) -> (MOVSDconst [val]) |
| (ConstNil) && config.PtrSize == 8 -> (MOVQconst [0]) |
| (ConstNil) && config.PtrSize == 4 -> (MOVLconst [0]) |
| (ConstBool [b]) -> (MOVLconst [b]) |
| |
| // Lowering calls |
| (StaticCall [argwid] {target} mem) -> (CALLstatic [argwid] {target} mem) |
| (ClosureCall [argwid] entry closure mem) -> (CALLclosure [argwid] entry closure mem) |
| (InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem) |
| |
| // Miscellaneous |
| (Convert <t> x mem) && config.PtrSize == 8 -> (MOVQconvert <t> x mem) |
| (Convert <t> x mem) && config.PtrSize == 4 -> (MOVLconvert <t> x mem) |
| (IsNonNil p) && config.PtrSize == 8 -> (SETNE (TESTQ p p)) |
| (IsNonNil p) && config.PtrSize == 4 -> (SETNE (TESTL p p)) |
| (IsInBounds idx len) && config.PtrSize == 8 -> (SETB (CMPQ idx len)) |
| (IsInBounds idx len) && config.PtrSize == 4 -> (SETB (CMPL idx len)) |
| (IsSliceInBounds idx len) && config.PtrSize == 8 -> (SETBE (CMPQ idx len)) |
| (IsSliceInBounds idx len) && config.PtrSize == 4 -> (SETBE (CMPL idx len)) |
| (NilCheck ptr mem) -> (LoweredNilCheck ptr mem) |
| (GetG mem) -> (LoweredGetG mem) |
| (GetClosurePtr) -> (LoweredGetClosurePtr) |
| (GetCallerPC) -> (LoweredGetCallerPC) |
| (GetCallerSP) -> (LoweredGetCallerSP) |
| (Addr {sym} base) && config.PtrSize == 8 -> (LEAQ {sym} base) |
| (Addr {sym} base) && config.PtrSize == 4 -> (LEAL {sym} base) |
| |
| (MOVBstore [off] {sym} ptr y:(SETL x) mem) && y.Uses == 1 -> (SETLmem [off] {sym} ptr x mem) |
| (MOVBstore [off] {sym} ptr y:(SETLE x) mem) && y.Uses == 1 -> (SETLEmem [off] {sym} ptr x mem) |
| (MOVBstore [off] {sym} ptr y:(SETG x) mem) && y.Uses == 1 -> (SETGmem [off] {sym} ptr x mem) |
| (MOVBstore [off] {sym} ptr y:(SETGE x) mem) && y.Uses == 1 -> (SETGEmem [off] {sym} ptr x mem) |
| (MOVBstore [off] {sym} ptr y:(SETEQ x) mem) && y.Uses == 1 -> (SETEQmem [off] {sym} ptr x mem) |
| (MOVBstore [off] {sym} ptr y:(SETNE x) mem) && y.Uses == 1 -> (SETNEmem [off] {sym} ptr x mem) |
| (MOVBstore [off] {sym} ptr y:(SETB x) mem) && y.Uses == 1 -> (SETBmem [off] {sym} ptr x mem) |
| (MOVBstore [off] {sym} ptr y:(SETBE x) mem) && y.Uses == 1 -> (SETBEmem [off] {sym} ptr x mem) |
| (MOVBstore [off] {sym} ptr y:(SETA x) mem) && y.Uses == 1 -> (SETAmem [off] {sym} ptr x mem) |
| (MOVBstore [off] {sym} ptr y:(SETAE x) mem) && y.Uses == 1 -> (SETAEmem [off] {sym} ptr x mem) |
| |
| // block rewrites |
| (If (SETL cmp) yes no) -> (LT cmp yes no) |
| (If (SETLE cmp) yes no) -> (LE cmp yes no) |
| (If (SETG cmp) yes no) -> (GT cmp yes no) |
| (If (SETGE cmp) yes no) -> (GE cmp yes no) |
| (If (SETEQ cmp) yes no) -> (EQ cmp yes no) |
| (If (SETNE cmp) yes no) -> (NE cmp yes no) |
| (If (SETB cmp) yes no) -> (ULT cmp yes no) |
| (If (SETBE cmp) yes no) -> (ULE cmp yes no) |
| (If (SETA cmp) yes no) -> (UGT cmp yes no) |
| (If (SETAE cmp) yes no) -> (UGE cmp yes no) |
| |
| // Special case for floating point - LF/LEF not generated |
| (If (SETGF cmp) yes no) -> (UGT cmp yes no) |
| (If (SETGEF cmp) yes no) -> (UGE cmp yes no) |
| (If (SETEQF cmp) yes no) -> (EQF cmp yes no) |
| (If (SETNEF cmp) yes no) -> (NEF cmp yes no) |
| |
| (If cond yes no) -> (NE (TESTB cond cond) yes no) |
| |
| // Atomic loads. Other than preserving their ordering with respect to other loads, nothing special here. |
| (AtomicLoad32 ptr mem) -> (MOVLatomicload ptr mem) |
| (AtomicLoad64 ptr mem) -> (MOVQatomicload ptr mem) |
| (AtomicLoadPtr ptr mem) && config.PtrSize == 8 -> (MOVQatomicload ptr mem) |
| (AtomicLoadPtr ptr mem) && config.PtrSize == 4 -> (MOVLatomicload ptr mem) |
| |
| // Atomic stores. We use XCHG to prevent the hardware reordering a subsequent load. |
| // TODO: most runtime uses of atomic stores don't need that property. Use normal stores for those? |
| (AtomicStore32 ptr val mem) -> (Select1 (XCHGL <types.NewTuple(typ.UInt32,types.TypeMem)> val ptr mem)) |
| (AtomicStore64 ptr val mem) -> (Select1 (XCHGQ <types.NewTuple(typ.UInt64,types.TypeMem)> val ptr mem)) |
| (AtomicStorePtrNoWB ptr val mem) && config.PtrSize == 8 -> (Select1 (XCHGQ <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem)) |
| (AtomicStorePtrNoWB ptr val mem) && config.PtrSize == 4 -> (Select1 (XCHGL <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem)) |
| |
| // Atomic exchanges. |
| (AtomicExchange32 ptr val mem) -> (XCHGL val ptr mem) |
| (AtomicExchange64 ptr val mem) -> (XCHGQ val ptr mem) |
| |
| // Atomic adds. |
| (AtomicAdd32 ptr val mem) -> (AddTupleFirst32 val (XADDLlock val ptr mem)) |
| (AtomicAdd64 ptr val mem) -> (AddTupleFirst64 val (XADDQlock val ptr mem)) |
| (Select0 <t> (AddTupleFirst32 val tuple)) -> (ADDL val (Select0 <t> tuple)) |
| (Select1 (AddTupleFirst32 _ tuple)) -> (Select1 tuple) |
| (Select0 <t> (AddTupleFirst64 val tuple)) -> (ADDQ val (Select0 <t> tuple)) |
| (Select1 (AddTupleFirst64 _ tuple)) -> (Select1 tuple) |
| |
| // Atomic compare and swap. |
| (AtomicCompareAndSwap32 ptr old new_ mem) -> (CMPXCHGLlock ptr old new_ mem) |
| (AtomicCompareAndSwap64 ptr old new_ mem) -> (CMPXCHGQlock ptr old new_ mem) |
| |
| // Atomic memory updates. |
| (AtomicAnd8 ptr val mem) -> (ANDBlock ptr val mem) |
| (AtomicOr8 ptr val mem) -> (ORBlock ptr val mem) |
| |
| // Write barrier. |
| (WB {fn} destptr srcptr mem) -> (LoweredWB {fn} destptr srcptr mem) |
| |
| // *************************** |
| // Above: lowering rules |
| // Below: optimizations |
| // *************************** |
| // TODO: Should the optimizations be a separate pass? |
| |
| // Fold boolean tests into blocks |
| (NE (TESTB (SETL cmp) (SETL cmp)) yes no) -> (LT cmp yes no) |
| (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) -> (LE cmp yes no) |
| (NE (TESTB (SETG cmp) (SETG cmp)) yes no) -> (GT cmp yes no) |
| (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) -> (GE cmp yes no) |
| (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) -> (EQ cmp yes no) |
| (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) -> (NE cmp yes no) |
| (NE (TESTB (SETB cmp) (SETB cmp)) yes no) -> (ULT cmp yes no) |
| (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) -> (ULE cmp yes no) |
| (NE (TESTB (SETA cmp) (SETA cmp)) yes no) -> (UGT cmp yes no) |
| (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) -> (UGE cmp yes no) |
| |
| // Recognize bit tests: a&(1<<b) != 0 for b suitably bounded |
| // Note that ULT and SETB check the carry flag; they are identical to CS and SETCS. |
| // Same, mutatis mutandis, for UGE and SETAE, and CC and SETCC. |
| (NE (TESTL (SHLL (MOVLconst [1]) x) y)) && !config.nacl -> (ULT (BTL x y)) |
| (EQ (TESTL (SHLL (MOVLconst [1]) x) y)) && !config.nacl -> (UGE (BTL x y)) |
| (NE (TESTQ (SHLQ (MOVQconst [1]) x) y)) && !config.nacl -> (ULT (BTQ x y)) |
| (EQ (TESTQ (SHLQ (MOVQconst [1]) x) y)) && !config.nacl -> (UGE (BTQ x y)) |
| (NE (TESTLconst [c] x)) && isPowerOfTwo(c) && log2(c) < 32 && !config.nacl -> (ULT (BTLconst [log2(c)] x)) |
| (EQ (TESTLconst [c] x)) && isPowerOfTwo(c) && log2(c) < 32 && !config.nacl -> (UGE (BTLconst [log2(c)] x)) |
| (NE (TESTQconst [c] x)) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (ULT (BTQconst [log2(c)] x)) |
| (EQ (TESTQconst [c] x)) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (UGE (BTQconst [log2(c)] x)) |
| (NE (TESTQ (MOVQconst [c]) x)) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (ULT (BTQconst [log2(c)] x)) |
| (EQ (TESTQ (MOVQconst [c]) x)) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (UGE (BTQconst [log2(c)] x)) |
| (SETNE (TESTL (SHLL (MOVLconst [1]) x) y)) && !config.nacl -> (SETB (BTL x y)) |
| (SETEQ (TESTL (SHLL (MOVLconst [1]) x) y)) && !config.nacl -> (SETAE (BTL x y)) |
| (SETNE (TESTQ (SHLQ (MOVQconst [1]) x) y)) && !config.nacl -> (SETB (BTQ x y)) |
| (SETEQ (TESTQ (SHLQ (MOVQconst [1]) x) y)) && !config.nacl -> (SETAE (BTQ x y)) |
| (SETNE (TESTLconst [c] x)) && isPowerOfTwo(c) && log2(c) < 32 && !config.nacl -> (SETB (BTLconst [log2(c)] x)) |
| (SETEQ (TESTLconst [c] x)) && isPowerOfTwo(c) && log2(c) < 32 && !config.nacl -> (SETAE (BTLconst [log2(c)] x)) |
| (SETNE (TESTQconst [c] x)) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (SETB (BTQconst [log2(c)] x)) |
| (SETEQ (TESTQconst [c] x)) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (SETAE (BTQconst [log2(c)] x)) |
| (SETNE (TESTQ (MOVQconst [c]) x)) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (SETB (BTQconst [log2(c)] x)) |
| (SETEQ (TESTQ (MOVQconst [c]) x)) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (SETAE (BTQconst [log2(c)] x)) |
| // SET..mem variant |
| (SETNEmem [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) && !config.nacl -> (SETBmem [off] {sym} ptr (BTL x y) mem) |
| (SETEQmem [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) && !config.nacl -> (SETAEmem [off] {sym} ptr (BTL x y) mem) |
| (SETNEmem [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) && !config.nacl -> (SETBmem [off] {sym} ptr (BTQ x y) mem) |
| (SETEQmem [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) && !config.nacl -> (SETAEmem [off] {sym} ptr (BTQ x y) mem) |
| (SETNEmem [off] {sym} ptr (TESTLconst [c] x) mem) && isPowerOfTwo(c) && log2(c) < 32 && !config.nacl -> (SETBmem [off] {sym} ptr (BTLconst [log2(c)] x) mem) |
| (SETEQmem [off] {sym} ptr (TESTLconst [c] x) mem) && isPowerOfTwo(c) && log2(c) < 32 && !config.nacl -> (SETAEmem [off] {sym} ptr (BTLconst [log2(c)] x) mem) |
| (SETNEmem [off] {sym} ptr (TESTQconst [c] x) mem) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (SETBmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) |
| (SETEQmem [off] {sym} ptr (TESTQconst [c] x) mem) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (SETAEmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) |
| (SETNEmem [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (SETBmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) |
| (SETEQmem [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (SETAEmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) |
| |
| // Fold boolean negation into SETcc. |
| (XORLconst [1] (SETNE x)) -> (SETEQ x) |
| (XORLconst [1] (SETEQ x)) -> (SETNE x) |
| (XORLconst [1] (SETL x)) -> (SETGE x) |
| (XORLconst [1] (SETGE x)) -> (SETL x) |
| (XORLconst [1] (SETLE x)) -> (SETG x) |
| (XORLconst [1] (SETG x)) -> (SETLE x) |
| (XORLconst [1] (SETB x)) -> (SETAE x) |
| (XORLconst [1] (SETAE x)) -> (SETB x) |
| (XORLconst [1] (SETBE x)) -> (SETA x) |
| (XORLconst [1] (SETA x)) -> (SETBE x) |
| |
| // Convert BTQconst to BTLconst if possible. It has a shorter encoding. |
| (BTQconst [c] x) && c < 32 -> (BTLconst [c] x) |
| |
| // Special case for floating point - LF/LEF not generated |
| (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) -> (UGT cmp yes no) |
| (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) -> (UGE cmp yes no) |
| (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) -> (EQF cmp yes no) |
| (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) -> (NEF cmp yes no) |
| |
| // Disabled because it interferes with the pattern match above and makes worse code. |
| // (SETNEF x) -> (ORQ (SETNE <typ.Int8> x) (SETNAN <typ.Int8> x)) |
| // (SETEQF x) -> (ANDQ (SETEQ <typ.Int8> x) (SETORD <typ.Int8> x)) |
| |
| // fold constants into instructions |
| (ADDQ x (MOVQconst [c])) && is32Bit(c) -> (ADDQconst [c] x) |
| (ADDL x (MOVLconst [c])) -> (ADDLconst [c] x) |
| |
| (SUBQ x (MOVQconst [c])) && is32Bit(c) -> (SUBQconst x [c]) |
| (SUBQ (MOVQconst [c]) x) && is32Bit(c) -> (NEGQ (SUBQconst <v.Type> x [c])) |
| (SUBL x (MOVLconst [c])) -> (SUBLconst x [c]) |
| (SUBL (MOVLconst [c]) x) -> (NEGL (SUBLconst <v.Type> x [c])) |
| |
| (MULQ x (MOVQconst [c])) && is32Bit(c) -> (MULQconst [c] x) |
| (MULL x (MOVLconst [c])) -> (MULLconst [c] x) |
| |
| (ANDQ x (MOVQconst [c])) && is32Bit(c) -> (ANDQconst [c] x) |
| (ANDL x (MOVLconst [c])) -> (ANDLconst [c] x) |
| |
| (ANDLconst [c] (ANDLconst [d] x)) -> (ANDLconst [c & d] x) |
| (ANDQconst [c] (ANDQconst [d] x)) -> (ANDQconst [c & d] x) |
| |
| (XORLconst [c] (XORLconst [d] x)) -> (XORLconst [c ^ d] x) |
| (XORQconst [c] (XORQconst [d] x)) -> (XORQconst [c ^ d] x) |
| |
| (MULLconst [c] (MULLconst [d] x)) -> (MULLconst [int64(int32(c * d))] x) |
| (MULQconst [c] (MULQconst [d] x)) && is32Bit(c*d) -> (MULQconst [c * d] x) |
| |
| (ORQ x (MOVQconst [c])) && is32Bit(c) -> (ORQconst [c] x) |
| (ORL x (MOVLconst [c])) -> (ORLconst [c] x) |
| |
| (XORQ x (MOVQconst [c])) && is32Bit(c) -> (XORQconst [c] x) |
| (XORL x (MOVLconst [c])) -> (XORLconst [c] x) |
| |
| (SHLQ x (MOVQconst [c])) -> (SHLQconst [c&63] x) |
| (SHLQ x (MOVLconst [c])) -> (SHLQconst [c&63] x) |
| |
| (SHLL x (MOVQconst [c])) -> (SHLLconst [c&31] x) |
| (SHLL x (MOVLconst [c])) -> (SHLLconst [c&31] x) |
| |
| (SHRQ x (MOVQconst [c])) -> (SHRQconst [c&63] x) |
| (SHRQ x (MOVLconst [c])) -> (SHRQconst [c&63] x) |
| |
| (SHRL x (MOVQconst [c])) -> (SHRLconst [c&31] x) |
| (SHRL x (MOVLconst [c])) -> (SHRLconst [c&31] x) |
| |
| (SHRW x (MOVQconst [c])) && c&31 < 16 -> (SHRWconst [c&31] x) |
| (SHRW x (MOVLconst [c])) && c&31 < 16 -> (SHRWconst [c&31] x) |
| (SHRW _ (MOVQconst [c])) && c&31 >= 16 -> (MOVLconst [0]) |
| (SHRW _ (MOVLconst [c])) && c&31 >= 16 -> (MOVLconst [0]) |
| |
| (SHRB x (MOVQconst [c])) && c&31 < 8 -> (SHRBconst [c&31] x) |
| (SHRB x (MOVLconst [c])) && c&31 < 8 -> (SHRBconst [c&31] x) |
| (SHRB _ (MOVQconst [c])) && c&31 >= 8 -> (MOVLconst [0]) |
| (SHRB _ (MOVLconst [c])) && c&31 >= 8 -> (MOVLconst [0]) |
| |
| (SARQ x (MOVQconst [c])) -> (SARQconst [c&63] x) |
| (SARQ x (MOVLconst [c])) -> (SARQconst [c&63] x) |
| |
| (SARL x (MOVQconst [c])) -> (SARLconst [c&31] x) |
| (SARL x (MOVLconst [c])) -> (SARLconst [c&31] x) |
| |
| (SARW x (MOVQconst [c])) -> (SARWconst [min(c&31,15)] x) |
| (SARW x (MOVLconst [c])) -> (SARWconst [min(c&31,15)] x) |
| |
| (SARB x (MOVQconst [c])) -> (SARBconst [min(c&31,7)] x) |
| (SARB x (MOVLconst [c])) -> (SARBconst [min(c&31,7)] x) |
| |
| // Operations which don't affect the low 6/5 bits of the shift amount are NOPs. |
| (SHLQ x (ADDQconst [c] y)) && c & 63 == 0 -> (SHLQ x y) |
| (SHRQ x (ADDQconst [c] y)) && c & 63 == 0 -> (SHRQ x y) |
| (SARQ x (ADDQconst [c] y)) && c & 63 == 0 -> (SARQ x y) |
| (SHLQ x (NEGQ <t> (ADDQconst [c] y))) && c & 63 == 0 -> (SHLQ x (NEGQ <t> y)) |
| (SHRQ x (NEGQ <t> (ADDQconst [c] y))) && c & 63 == 0 -> (SHRQ x (NEGQ <t> y)) |
| (SARQ x (NEGQ <t> (ADDQconst [c] y))) && c & 63 == 0 -> (SARQ x (NEGQ <t> y)) |
| (SHLQ x (ANDQconst [c] y)) && c & 63 == 63 -> (SHLQ x y) |
| (SHRQ x (ANDQconst [c] y)) && c & 63 == 63 -> (SHRQ x y) |
| (SARQ x (ANDQconst [c] y)) && c & 63 == 63 -> (SARQ x y) |
| (SHLQ x (NEGQ <t> (ANDQconst [c] y))) && c & 63 == 63 -> (SHLQ x (NEGQ <t> y)) |
| (SHRQ x (NEGQ <t> (ANDQconst [c] y))) && c & 63 == 63 -> (SHRQ x (NEGQ <t> y)) |
| (SARQ x (NEGQ <t> (ANDQconst [c] y))) && c & 63 == 63 -> (SARQ x (NEGQ <t> y)) |
| |
| (SHLL x (ADDQconst [c] y)) && c & 31 == 0 -> (SHLL x y) |
| (SHRL x (ADDQconst [c] y)) && c & 31 == 0 -> (SHRL x y) |
| (SARL x (ADDQconst [c] y)) && c & 31 == 0 -> (SARL x y) |
| (SHLL x (NEGQ <t> (ADDQconst [c] y))) && c & 31 == 0 -> (SHLL x (NEGQ <t> y)) |
| (SHRL x (NEGQ <t> (ADDQconst [c] y))) && c & 31 == 0 -> (SHRL x (NEGQ <t> y)) |
| (SARL x (NEGQ <t> (ADDQconst [c] y))) && c & 31 == 0 -> (SARL x (NEGQ <t> y)) |
| (SHLL x (ANDQconst [c] y)) && c & 31 == 31 -> (SHLL x y) |
| (SHRL x (ANDQconst [c] y)) && c & 31 == 31 -> (SHRL x y) |
| (SARL x (ANDQconst [c] y)) && c & 31 == 31 -> (SARL x y) |
| (SHLL x (NEGQ <t> (ANDQconst [c] y))) && c & 31 == 31 -> (SHLL x (NEGQ <t> y)) |
| (SHRL x (NEGQ <t> (ANDQconst [c] y))) && c & 31 == 31 -> (SHRL x (NEGQ <t> y)) |
| (SARL x (NEGQ <t> (ANDQconst [c] y))) && c & 31 == 31 -> (SARL x (NEGQ <t> y)) |
| |
| (SHLQ x (ADDLconst [c] y)) && c & 63 == 0 -> (SHLQ x y) |
| (SHRQ x (ADDLconst [c] y)) && c & 63 == 0 -> (SHRQ x y) |
| (SARQ x (ADDLconst [c] y)) && c & 63 == 0 -> (SARQ x y) |
| (SHLQ x (NEGL <t> (ADDLconst [c] y))) && c & 63 == 0 -> (SHLQ x (NEGL <t> y)) |
| (SHRQ x (NEGL <t> (ADDLconst [c] y))) && c & 63 == 0 -> (SHRQ x (NEGL <t> y)) |
| (SARQ x (NEGL <t> (ADDLconst [c] y))) && c & 63 == 0 -> (SARQ x (NEGL <t> y)) |
| (SHLQ x (ANDLconst [c] y)) && c & 63 == 63 -> (SHLQ x y) |
| (SHRQ x (ANDLconst [c] y)) && c & 63 == 63 -> (SHRQ x y) |
| (SARQ x (ANDLconst [c] y)) && c & 63 == 63 -> (SARQ x y) |
| (SHLQ x (NEGL <t> (ANDLconst [c] y))) && c & 63 == 63 -> (SHLQ x (NEGL <t> y)) |
| (SHRQ x (NEGL <t> (ANDLconst [c] y))) && c & 63 == 63 -> (SHRQ x (NEGL <t> y)) |
| (SARQ x (NEGL <t> (ANDLconst [c] y))) && c & 63 == 63 -> (SARQ x (NEGL <t> y)) |
| |
| (SHLL x (ADDLconst [c] y)) && c & 31 == 0 -> (SHLL x y) |
| (SHRL x (ADDLconst [c] y)) && c & 31 == 0 -> (SHRL x y) |
| (SARL x (ADDLconst [c] y)) && c & 31 == 0 -> (SARL x y) |
| (SHLL x (NEGL <t> (ADDLconst [c] y))) && c & 31 == 0 -> (SHLL x (NEGL <t> y)) |
| (SHRL x (NEGL <t> (ADDLconst [c] y))) && c & 31 == 0 -> (SHRL x (NEGL <t> y)) |
| (SARL x (NEGL <t> (ADDLconst [c] y))) && c & 31 == 0 -> (SARL x (NEGL <t> y)) |
| (SHLL x (ANDLconst [c] y)) && c & 31 == 31 -> (SHLL x y) |
| (SHRL x (ANDLconst [c] y)) && c & 31 == 31 -> (SHRL x y) |
| (SARL x (ANDLconst [c] y)) && c & 31 == 31 -> (SARL x y) |
| (SHLL x (NEGL <t> (ANDLconst [c] y))) && c & 31 == 31 -> (SHLL x (NEGL <t> y)) |
| (SHRL x (NEGL <t> (ANDLconst [c] y))) && c & 31 == 31 -> (SHRL x (NEGL <t> y)) |
| (SARL x (NEGL <t> (ANDLconst [c] y))) && c & 31 == 31 -> (SARL x (NEGL <t> y)) |
| |
| // Constant rotate instructions |
| (ADDQ (SHLQconst x [c]) (SHRQconst x [d])) && d==64-c -> (ROLQconst x [c]) |
| ( ORQ (SHLQconst x [c]) (SHRQconst x [d])) && d==64-c -> (ROLQconst x [c]) |
| (XORQ (SHLQconst x [c]) (SHRQconst x [d])) && d==64-c -> (ROLQconst x [c]) |
| |
| (ADDL (SHLLconst x [c]) (SHRLconst x [d])) && d==32-c -> (ROLLconst x [c]) |
| ( ORL (SHLLconst x [c]) (SHRLconst x [d])) && d==32-c -> (ROLLconst x [c]) |
| (XORL (SHLLconst x [c]) (SHRLconst x [d])) && d==32-c -> (ROLLconst x [c]) |
| |
| (ADDL <t> (SHLLconst x [c]) (SHRWconst x [d])) && d==16-c && c < 16 && t.Size() == 2 -> (ROLWconst x [c]) |
| ( ORL <t> (SHLLconst x [c]) (SHRWconst x [d])) && d==16-c && c < 16 && t.Size() == 2 -> (ROLWconst x [c]) |
| (XORL <t> (SHLLconst x [c]) (SHRWconst x [d])) && d==16-c && c < 16 && t.Size() == 2 -> (ROLWconst x [c]) |
| |
| (ADDL <t> (SHLLconst x [c]) (SHRBconst x [d])) && d==8-c && c < 8 && t.Size() == 1 -> (ROLBconst x [c]) |
| ( ORL <t> (SHLLconst x [c]) (SHRBconst x [d])) && d==8-c && c < 8 && t.Size() == 1 -> (ROLBconst x [c]) |
| (XORL <t> (SHLLconst x [c]) (SHRBconst x [d])) && d==8-c && c < 8 && t.Size() == 1 -> (ROLBconst x [c]) |
| |
| (ROLQconst [c] (ROLQconst [d] x)) -> (ROLQconst [(c+d)&63] x) |
| (ROLLconst [c] (ROLLconst [d] x)) -> (ROLLconst [(c+d)&31] x) |
| (ROLWconst [c] (ROLWconst [d] x)) -> (ROLWconst [(c+d)&15] x) |
| (ROLBconst [c] (ROLBconst [d] x)) -> (ROLBconst [(c+d)& 7] x) |
| |
| // Non-constant rotates. |
| // We want to issue a rotate when the Go source contains code like |
| // y &= 63 |
| // x << y | x >> (64-y) |
| // The shift rules above convert << to SHLx and >> to SHRx. |
| // SHRx converts its shift argument from 64-y to -y. |
| // A tricky situation occurs when y==0. Then the original code would be: |
| // x << 0 | x >> 64 |
| // But x >> 64 is 0, not x. So there's an additional mask that is ANDed in |
| // to force the second term to 0. We don't need that mask, but we must match |
| // it in order to strip it out. |
| (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])))) -> (ROLQ x y) |
| (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])))) -> (ROLQ x y) |
| (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])))) -> (RORQ x y) |
| (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])))) -> (RORQ x y) |
| |
| (ORL (SHLL x y) (ANDL (SHRL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])))) -> (ROLL x y) |
| (ORL (SHLL x y) (ANDL (SHRL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])))) -> (ROLL x y) |
| (ORL (SHRL x y) (ANDL (SHLL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])))) -> (RORL x y) |
| (ORL (SHRL x y) (ANDL (SHLL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])))) -> (RORL x y) |
| |
| // Help with rotate detection |
| (CMPQconst (NEGQ (ADDQconst [-16] (ANDQconst [15] _))) [32]) -> (FlagLT_ULT) |
| (CMPQconst (NEGQ (ADDQconst [ -8] (ANDQconst [7] _))) [32]) -> (FlagLT_ULT) |
| |
| (ORL (SHLL x (ANDQconst y [15])) |
| (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) |
| (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])))) |
| && v.Type.Size() == 2 |
| -> (ROLW x y) |
| (ORL (SHLL x (ANDLconst y [15])) |
| (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) |
| (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])))) |
| && v.Type.Size() == 2 |
| -> (ROLW x y) |
| (ORL (SHRW x (ANDQconst y [15])) |
| (SHLL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16])))) |
| && v.Type.Size() == 2 |
| -> (RORW x y) |
| (ORL (SHRW x (ANDLconst y [15])) |
| (SHLL x (NEGL (ADDLconst (ANDLconst y [15]) [-16])))) |
| && v.Type.Size() == 2 |
| -> (RORW x y) |
| |
| (ORL (SHLL x (ANDQconst y [ 7])) |
| (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) |
| (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])))) |
| && v.Type.Size() == 1 |
| -> (ROLB x y) |
| (ORL (SHLL x (ANDLconst y [ 7])) |
| (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) |
| (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])))) |
| && v.Type.Size() == 1 |
| -> (ROLB x y) |
| (ORL (SHRB x (ANDQconst y [ 7])) |
| (SHLL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])))) |
| && v.Type.Size() == 1 |
| -> (RORB x y) |
| (ORL (SHRB x (ANDLconst y [ 7])) |
| (SHLL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])))) |
| && v.Type.Size() == 1 |
| -> (RORB x y) |
| |
| // rotate left negative = rotate right |
| (ROLQ x (NEGQ y)) -> (RORQ x y) |
| (ROLQ x (NEGL y)) -> (RORQ x y) |
| (ROLL x (NEGQ y)) -> (RORL x y) |
| (ROLL x (NEGL y)) -> (RORL x y) |
| (ROLW x (NEGQ y)) -> (RORW x y) |
| (ROLW x (NEGL y)) -> (RORW x y) |
| (ROLB x (NEGQ y)) -> (RORB x y) |
| (ROLB x (NEGL y)) -> (RORB x y) |
| |
| // rotate right negative = rotate left |
| (RORQ x (NEGQ y)) -> (ROLQ x y) |
| (RORQ x (NEGL y)) -> (ROLQ x y) |
| (RORL x (NEGQ y)) -> (ROLL x y) |
| (RORL x (NEGL y)) -> (ROLL x y) |
| (RORW x (NEGQ y)) -> (ROLW x y) |
| (RORW x (NEGL y)) -> (ROLW x y) |
| (RORB x (NEGQ y)) -> (ROLB x y) |
| (RORB x (NEGL y)) -> (ROLB x y) |
| |
| // rotate by constants |
| (ROLQ x (MOVQconst [c])) -> (ROLQconst [c&63] x) |
| (ROLQ x (MOVLconst [c])) -> (ROLQconst [c&63] x) |
| (ROLL x (MOVQconst [c])) -> (ROLLconst [c&31] x) |
| (ROLL x (MOVLconst [c])) -> (ROLLconst [c&31] x) |
| (ROLW x (MOVQconst [c])) -> (ROLWconst [c&15] x) |
| (ROLW x (MOVLconst [c])) -> (ROLWconst [c&15] x) |
| (ROLB x (MOVQconst [c])) -> (ROLBconst [c&7 ] x) |
| (ROLB x (MOVLconst [c])) -> (ROLBconst [c&7 ] x) |
| |
| (RORQ x (MOVQconst [c])) -> (ROLQconst [(-c)&63] x) |
| (RORQ x (MOVLconst [c])) -> (ROLQconst [(-c)&63] x) |
| (RORL x (MOVQconst [c])) -> (ROLLconst [(-c)&31] x) |
| (RORL x (MOVLconst [c])) -> (ROLLconst [(-c)&31] x) |
| (RORW x (MOVQconst [c])) -> (ROLWconst [(-c)&15] x) |
| (RORW x (MOVLconst [c])) -> (ROLWconst [(-c)&15] x) |
| (RORB x (MOVQconst [c])) -> (ROLBconst [(-c)&7 ] x) |
| (RORB x (MOVLconst [c])) -> (ROLBconst [(-c)&7 ] x) |
| |
| // Constant shift simplifications |
| (SHLQconst x [0]) -> x |
| (SHRQconst x [0]) -> x |
| (SARQconst x [0]) -> x |
| |
| (SHLLconst x [0]) -> x |
| (SHRLconst x [0]) -> x |
| (SARLconst x [0]) -> x |
| |
| (SHRWconst x [0]) -> x |
| (SARWconst x [0]) -> x |
| |
| (SHRBconst x [0]) -> x |
| (SARBconst x [0]) -> x |
| |
| (ROLQconst x [0]) -> x |
| (ROLLconst x [0]) -> x |
| (ROLWconst x [0]) -> x |
| (ROLBconst x [0]) -> x |
| |
| // Note: the word and byte shifts keep the low 5 bits (not the low 4 or 3 bits) |
| // because the x86 instructions are defined to use all 5 bits of the shift even |
| // for the small shifts. I don't think we'll ever generate a weird shift (e.g. |
| // (SHRW x (MOVLconst [24])), but just in case. |
| |
| (CMPQ x (MOVQconst [c])) && is32Bit(c) -> (CMPQconst x [c]) |
| (CMPQ (MOVQconst [c]) x) && is32Bit(c) -> (InvertFlags (CMPQconst x [c])) |
| (CMPL x (MOVLconst [c])) -> (CMPLconst x [c]) |
| (CMPL (MOVLconst [c]) x) -> (InvertFlags (CMPLconst x [c])) |
| (CMPW x (MOVLconst [c])) -> (CMPWconst x [int64(int16(c))]) |
| (CMPW (MOVLconst [c]) x) -> (InvertFlags (CMPWconst x [int64(int16(c))])) |
| (CMPB x (MOVLconst [c])) -> (CMPBconst x [int64(int8(c))]) |
| (CMPB (MOVLconst [c]) x) -> (InvertFlags (CMPBconst x [int64(int8(c))])) |
| |
| // Using MOVZX instead of AND is cheaper. |
| (ANDLconst [0xFF] x) -> (MOVBQZX x) |
| (ANDLconst [0xFFFF] x) -> (MOVWQZX x) |
| (ANDQconst [0xFF] x) -> (MOVBQZX x) |
| (ANDQconst [0xFFFF] x) -> (MOVWQZX x) |
| (ANDQconst [0xFFFFFFFF] x) -> (MOVLQZX x) |
| |
| // strength reduction |
| // Assumes that the following costs from https://gmplib.org/~tege/x86-timing.pdf: |
| // 1 - addq, shlq, leaq, negq |
| // 3 - imulq |
| // This limits the rewrites to two instructions. |
| // TODO: 27, 81 |
| (MULQconst [-1] x) -> (NEGQ x) |
| (MULQconst [0] _) -> (MOVQconst [0]) |
| (MULQconst [1] x) -> x |
| (MULQconst [3] x) -> (LEAQ2 x x) |
| (MULQconst [5] x) -> (LEAQ4 x x) |
| (MULQconst [7] x) -> (LEAQ8 (NEGQ <v.Type> x) x) |
| (MULQconst [9] x) -> (LEAQ8 x x) |
| (MULQconst [11] x) -> (LEAQ2 x (LEAQ4 <v.Type> x x)) |
| (MULQconst [13] x) -> (LEAQ4 x (LEAQ2 <v.Type> x x)) |
| (MULQconst [21] x) -> (LEAQ4 x (LEAQ4 <v.Type> x x)) |
| (MULQconst [25] x) -> (LEAQ8 x (LEAQ2 <v.Type> x x)) |
| (MULQconst [37] x) -> (LEAQ4 x (LEAQ8 <v.Type> x x)) |
| (MULQconst [41] x) -> (LEAQ8 x (LEAQ4 <v.Type> x x)) |
| (MULQconst [73] x) -> (LEAQ8 x (LEAQ8 <v.Type> x x)) |
| |
| (MULQconst [c] x) && isPowerOfTwo(c+1) && c >= 15 -> (SUBQ (SHLQconst <v.Type> [log2(c+1)] x) x) |
| (MULQconst [c] x) && isPowerOfTwo(c-1) && c >= 17 -> (LEAQ1 (SHLQconst <v.Type> [log2(c-1)] x) x) |
| (MULQconst [c] x) && isPowerOfTwo(c-2) && c >= 34 -> (LEAQ2 (SHLQconst <v.Type> [log2(c-2)] x) x) |
| (MULQconst [c] x) && isPowerOfTwo(c-4) && c >= 68 -> (LEAQ4 (SHLQconst <v.Type> [log2(c-4)] x) x) |
| (MULQconst [c] x) && isPowerOfTwo(c-8) && c >= 136 -> (LEAQ8 (SHLQconst <v.Type> [log2(c-8)] x) x) |
| (MULQconst [c] x) && c%3 == 0 && isPowerOfTwo(c/3) -> (SHLQconst [log2(c/3)] (LEAQ2 <v.Type> x x)) |
| (MULQconst [c] x) && c%5 == 0 && isPowerOfTwo(c/5) -> (SHLQconst [log2(c/5)] (LEAQ4 <v.Type> x x)) |
| (MULQconst [c] x) && c%9 == 0 && isPowerOfTwo(c/9) -> (SHLQconst [log2(c/9)] (LEAQ8 <v.Type> x x)) |
| |
| // combine add/shift into LEAQ |
| (ADDQ x (SHLQconst [3] y)) -> (LEAQ8 x y) |
| (ADDQ x (SHLQconst [2] y)) -> (LEAQ4 x y) |
| (ADDQ x (SHLQconst [1] y)) -> (LEAQ2 x y) |
| (ADDQ x (ADDQ y y)) -> (LEAQ2 x y) |
| (ADDQ x (ADDQ x y)) -> (LEAQ2 y x) |
| |
| // combine ADDQ/ADDQconst into LEAQ1 |
| (ADDQconst [c] (ADDQ x y)) -> (LEAQ1 [c] x y) |
| (ADDQ (ADDQconst [c] x) y) -> (LEAQ1 [c] x y) |
| |
| // fold ADDQ into LEAQ |
| (ADDQconst [c] (LEAQ [d] {s} x)) && is32Bit(c+d) -> (LEAQ [c+d] {s} x) |
| (LEAQ [c] {s} (ADDQconst [d] x)) && is32Bit(c+d) -> (LEAQ [c+d] {s} x) |
| (LEAQ [c] {s} (ADDQ x y)) && x.Op != OpSB && y.Op != OpSB -> (LEAQ1 [c] {s} x y) |
| (ADDQ x (LEAQ [c] {s} y)) && x.Op != OpSB && y.Op != OpSB -> (LEAQ1 [c] {s} x y) |
| |
| // fold ADDQconst into LEAQx |
| (ADDQconst [c] (LEAQ1 [d] {s} x y)) && is32Bit(c+d) -> (LEAQ1 [c+d] {s} x y) |
| (ADDQconst [c] (LEAQ2 [d] {s} x y)) && is32Bit(c+d) -> (LEAQ2 [c+d] {s} x y) |
| (ADDQconst [c] (LEAQ4 [d] {s} x y)) && is32Bit(c+d) -> (LEAQ4 [c+d] {s} x y) |
| (ADDQconst [c] (LEAQ8 [d] {s} x y)) && is32Bit(c+d) -> (LEAQ8 [c+d] {s} x y) |
| (LEAQ1 [c] {s} (ADDQconst [d] x) y) && is32Bit(c+d) && x.Op != OpSB -> (LEAQ1 [c+d] {s} x y) |
| (LEAQ2 [c] {s} (ADDQconst [d] x) y) && is32Bit(c+d) && x.Op != OpSB -> (LEAQ2 [c+d] {s} x y) |
| (LEAQ2 [c] {s} x (ADDQconst [d] y)) && is32Bit(c+2*d) && y.Op != OpSB -> (LEAQ2 [c+2*d] {s} x y) |
| (LEAQ4 [c] {s} (ADDQconst [d] x) y) && is32Bit(c+d) && x.Op != OpSB -> (LEAQ4 [c+d] {s} x y) |
| (LEAQ4 [c] {s} x (ADDQconst [d] y)) && is32Bit(c+4*d) && y.Op != OpSB -> (LEAQ4 [c+4*d] {s} x y) |
| (LEAQ8 [c] {s} (ADDQconst [d] x) y) && is32Bit(c+d) && x.Op != OpSB -> (LEAQ8 [c+d] {s} x y) |
| (LEAQ8 [c] {s} x (ADDQconst [d] y)) && is32Bit(c+8*d) && y.Op != OpSB -> (LEAQ8 [c+8*d] {s} x y) |
| |
| // fold shifts into LEAQx |
| (LEAQ1 [c] {s} x (SHLQconst [1] y)) -> (LEAQ2 [c] {s} x y) |
| (LEAQ1 [c] {s} x (SHLQconst [2] y)) -> (LEAQ4 [c] {s} x y) |
| (LEAQ1 [c] {s} x (SHLQconst [3] y)) -> (LEAQ8 [c] {s} x y) |
| (LEAQ2 [c] {s} x (SHLQconst [1] y)) -> (LEAQ4 [c] {s} x y) |
| (LEAQ2 [c] {s} x (SHLQconst [2] y)) -> (LEAQ8 [c] {s} x y) |
| (LEAQ4 [c] {s} x (SHLQconst [1] y)) -> (LEAQ8 [c] {s} x y) |
| |
| // reverse ordering of compare instruction |
| (SETL (InvertFlags x)) -> (SETG x) |
| (SETG (InvertFlags x)) -> (SETL x) |
| (SETB (InvertFlags x)) -> (SETA x) |
| (SETA (InvertFlags x)) -> (SETB x) |
| (SETLE (InvertFlags x)) -> (SETGE x) |
| (SETGE (InvertFlags x)) -> (SETLE x) |
| (SETBE (InvertFlags x)) -> (SETAE x) |
| (SETAE (InvertFlags x)) -> (SETBE x) |
| (SETEQ (InvertFlags x)) -> (SETEQ x) |
| (SETNE (InvertFlags x)) -> (SETNE x) |
| |
| (SETLmem [off] {sym} ptr (InvertFlags x) mem) -> (SETGmem [off] {sym} ptr x mem) |
| (SETGmem [off] {sym} ptr (InvertFlags x) mem) -> (SETLmem [off] {sym} ptr x mem) |
| (SETBmem [off] {sym} ptr (InvertFlags x) mem) -> (SETAmem [off] {sym} ptr x mem) |
| (SETAmem [off] {sym} ptr (InvertFlags x) mem) -> (SETBmem [off] {sym} ptr x mem) |
| (SETLEmem [off] {sym} ptr (InvertFlags x) mem) -> (SETGEmem [off] {sym} ptr x mem) |
| (SETGEmem [off] {sym} ptr (InvertFlags x) mem) -> (SETLEmem [off] {sym} ptr x mem) |
| (SETBEmem [off] {sym} ptr (InvertFlags x) mem) -> (SETAEmem [off] {sym} ptr x mem) |
| (SETAEmem [off] {sym} ptr (InvertFlags x) mem) -> (SETBEmem [off] {sym} ptr x mem) |
| (SETEQmem [off] {sym} ptr (InvertFlags x) mem) -> (SETEQmem [off] {sym} ptr x mem) |
| (SETNEmem [off] {sym} ptr (InvertFlags x) mem) -> (SETNEmem [off] {sym} ptr x mem) |
| |
| // sign extended loads |
| // Note: The combined instruction must end up in the same block |
| // as the original load. If not, we end up making a value with |
| // memory type live in two different blocks, which can lead to |
| // multiple memory values alive simultaneously. |
| // Make sure we don't combine these ops if the load has another use. |
| // This prevents a single load from being split into multiple loads |
| // which then might return different values. See test/atomicload.go. |
| (MOVBQSX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) |
| (MOVBQSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) |
| (MOVBQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) |
| (MOVBQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) |
| (MOVBQZX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) |
| (MOVBQZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) |
| (MOVBQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) |
| (MOVBQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) |
| (MOVWQSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) |
| (MOVWQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) |
| (MOVWQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) |
| (MOVWQZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) |
| (MOVWQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) |
| (MOVWQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) |
| (MOVLQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) |
| (MOVLQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) |
| (MOVLQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) |
| (MOVLQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) |
| |
| (MOVLQZX x) && zeroUpper32Bits(x,3) -> x |
| |
| (MOVBQZX x:(MOVBloadidx1 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBloadidx1 <v.Type> [off] {sym} ptr idx mem) |
| (MOVWQZX x:(MOVWloadidx1 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWloadidx1 <v.Type> [off] {sym} ptr idx mem) |
| (MOVWQZX x:(MOVWloadidx2 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWloadidx2 <v.Type> [off] {sym} ptr idx mem) |
| (MOVLQZX x:(MOVLloadidx1 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVLloadidx1 <v.Type> [off] {sym} ptr idx mem) |
| (MOVLQZX x:(MOVLloadidx4 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVLloadidx4 <v.Type> [off] {sym} ptr idx mem) |
| |
| // replace load from same location as preceding store with zero/sign extension (or copy in case of full width) |
| (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVBQZX x) |
| (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVWQZX x) |
| (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVLQZX x) |
| (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x |
| (MOVBQSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVBQSX x) |
| (MOVWQSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVWQSX x) |
| (MOVLQSXload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVLQSX x) |
| |
| // Fold extensions and ANDs together. |
| (MOVBQZX (ANDLconst [c] x)) -> (ANDLconst [c & 0xff] x) |
| (MOVWQZX (ANDLconst [c] x)) -> (ANDLconst [c & 0xffff] x) |
| (MOVLQZX (ANDLconst [c] x)) -> (ANDLconst [c] x) |
| (MOVBQSX (ANDLconst [c] x)) && c & 0x80 == 0 -> (ANDLconst [c & 0x7f] x) |
| (MOVWQSX (ANDLconst [c] x)) && c & 0x8000 == 0 -> (ANDLconst [c & 0x7fff] x) |
| (MOVLQSX (ANDLconst [c] x)) && c & 0x80000000 == 0 -> (ANDLconst [c & 0x7fffffff] x) |
| |
| // Don't extend before storing |
| (MOVLstore [off] {sym} ptr (MOVLQSX x) mem) -> (MOVLstore [off] {sym} ptr x mem) |
| (MOVWstore [off] {sym} ptr (MOVWQSX x) mem) -> (MOVWstore [off] {sym} ptr x mem) |
| (MOVBstore [off] {sym} ptr (MOVBQSX x) mem) -> (MOVBstore [off] {sym} ptr x mem) |
| (MOVLstore [off] {sym} ptr (MOVLQZX x) mem) -> (MOVLstore [off] {sym} ptr x mem) |
| (MOVWstore [off] {sym} ptr (MOVWQZX x) mem) -> (MOVWstore [off] {sym} ptr x mem) |
| (MOVBstore [off] {sym} ptr (MOVBQZX x) mem) -> (MOVBstore [off] {sym} ptr x mem) |
| |
| // fold constants into memory operations |
| // Note that this is not always a good idea because if not all the uses of |
| // the ADDQconst get eliminated, we still have to compute the ADDQconst and we now |
| // have potentially two live values (ptr and (ADDQconst [off] ptr)) instead of one. |
| // Nevertheless, let's do it! |
| (MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVQload [off1+off2] {sym} ptr mem) |
| (MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVLload [off1+off2] {sym} ptr mem) |
| (MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVWload [off1+off2] {sym} ptr mem) |
| (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBload [off1+off2] {sym} ptr mem) |
| (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVSSload [off1+off2] {sym} ptr mem) |
| (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVSDload [off1+off2] {sym} ptr mem) |
| (MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVOload [off1+off2] {sym} ptr mem) |
| |
| (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVQstore [off1+off2] {sym} ptr val mem) |
| (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVLstore [off1+off2] {sym} ptr val mem) |
| (MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVWstore [off1+off2] {sym} ptr val mem) |
| (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVBstore [off1+off2] {sym} ptr val mem) |
| (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVSSstore [off1+off2] {sym} ptr val mem) |
| (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVSDstore [off1+off2] {sym} ptr val mem) |
| (MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVOstore [off1+off2] {sym} ptr val mem) |
| |
| // Fold constants into stores. |
| (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) && validValAndOff(c,off) -> |
| (MOVQstoreconst [makeValAndOff(c,off)] {sym} ptr mem) |
| (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) -> |
| (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) |
| (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) -> |
| (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) |
| (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) -> |
| (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem) |
| |
| // Fold address offsets into constant stores. |
| (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) -> |
| (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) |
| (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) -> |
| (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) |
| (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) -> |
| (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) |
| (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) -> |
| (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) |
| |
| // We need to fold LEAQ into the MOVx ops so that the live variable analysis knows |
| // what variables are being read/written by the ops. |
| (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) |
| (MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) |
| (MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) |
| (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) |
| (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem) |
| (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem) |
| (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem) |
| |
| (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) |
| (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) |
| (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) |
| |
| (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) |
| (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) |
| (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) |
| (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) |
| (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) |
| (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) |
| (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) |
| |
| (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) -> |
| (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) |
| (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) -> |
| (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) |
| (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) -> |
| (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) |
| (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) -> |
| (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) |
| |
| // generating indexed loads and stores |
| (MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) |
| (MOVWload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) |
| (MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) |
| (MOVLload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) |
| (MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) |
| (MOVLload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (MOVLloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) |
| (MOVQload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (MOVQloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) |
| (MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (MOVQloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) |
| (MOVSSload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) |
| (MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) |
| (MOVSDload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) |
| (MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) |
| |
| (MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) |
| (MOVWstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) |
| (MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) |
| (MOVLstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) |
| (MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) |
| (MOVLstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (MOVLstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) |
| (MOVQstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (MOVQstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) |
| (MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (MOVQstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) |
| (MOVSSstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) |
| (MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) |
| (MOVSDstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) |
| (MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) |
| |
| (MOVBload [off] {sym} (ADDQ ptr idx) mem) && ptr.Op != OpSB -> (MOVBloadidx1 [off] {sym} ptr idx mem) |
| (MOVWload [off] {sym} (ADDQ ptr idx) mem) && ptr.Op != OpSB -> (MOVWloadidx1 [off] {sym} ptr idx mem) |
| (MOVLload [off] {sym} (ADDQ ptr idx) mem) && ptr.Op != OpSB -> (MOVLloadidx1 [off] {sym} ptr idx mem) |
| (MOVQload [off] {sym} (ADDQ ptr idx) mem) && ptr.Op != OpSB -> (MOVQloadidx1 [off] {sym} ptr idx mem) |
| (MOVSSload [off] {sym} (ADDQ ptr idx) mem) && ptr.Op != OpSB -> (MOVSSloadidx1 [off] {sym} ptr idx mem) |
| (MOVSDload [off] {sym} (ADDQ ptr idx) mem) && ptr.Op != OpSB -> (MOVSDloadidx1 [off] {sym} ptr idx mem) |
| (MOVBstore [off] {sym} (ADDQ ptr idx) val mem) && ptr.Op != OpSB -> (MOVBstoreidx1 [off] {sym} ptr idx val mem) |
| (MOVWstore [off] {sym} (ADDQ ptr idx) val mem) && ptr.Op != OpSB -> (MOVWstoreidx1 [off] {sym} ptr idx val mem) |
| (MOVLstore [off] {sym} (ADDQ ptr idx) val mem) && ptr.Op != OpSB -> (MOVLstoreidx1 [off] {sym} ptr idx val mem) |
| (MOVQstore [off] {sym} (ADDQ ptr idx) val mem) && ptr.Op != OpSB -> (MOVQstoreidx1 [off] {sym} ptr idx val mem) |
| (MOVSSstore [off] {sym} (ADDQ ptr idx) val mem) && ptr.Op != OpSB -> (MOVSSstoreidx1 [off] {sym} ptr idx val mem) |
| (MOVSDstore [off] {sym} (ADDQ ptr idx) val mem) && ptr.Op != OpSB -> (MOVSDstoreidx1 [off] {sym} ptr idx val mem) |
| |
| (MOVBstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) -> |
| (MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) |
| (MOVWstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) -> |
| (MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) |
| (MOVWstoreconst [x] {sym1} (LEAQ2 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) -> |
| (MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) |
| (MOVLstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) -> |
| (MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) |
| (MOVLstoreconst [x] {sym1} (LEAQ4 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) -> |
| (MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) |
| (MOVQstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) -> |
| (MOVQstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) |
| (MOVQstoreconst [x] {sym1} (LEAQ8 [off] {sym2} ptr idx) mem) && canMergeSym(sym1, sym2) -> |
| (MOVQstoreconstidx8 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) |
| |
| (MOVBstoreconst [x] {sym} (ADDQ ptr idx) mem) -> (MOVBstoreconstidx1 [x] {sym} ptr idx mem) |
| (MOVWstoreconst [x] {sym} (ADDQ ptr idx) mem) -> (MOVWstoreconstidx1 [x] {sym} ptr idx mem) |
| (MOVLstoreconst [x] {sym} (ADDQ ptr idx) mem) -> (MOVLstoreconstidx1 [x] {sym} ptr idx mem) |
| (MOVQstoreconst [x] {sym} (ADDQ ptr idx) mem) -> (MOVQstoreconstidx1 [x] {sym} ptr idx mem) |
| |
| // combine SHLQ into indexed loads and stores |
| (MOVWloadidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) -> (MOVWloadidx2 [c] {sym} ptr idx mem) |
| (MOVLloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) -> (MOVLloadidx4 [c] {sym} ptr idx mem) |
| (MOVLloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) -> (MOVLloadidx8 [c] {sym} ptr idx mem) |
| (MOVQloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) -> (MOVQloadidx8 [c] {sym} ptr idx mem) |
| (MOVSSloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) -> (MOVSSloadidx4 [c] {sym} ptr idx mem) |
| (MOVSDloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) -> (MOVSDloadidx8 [c] {sym} ptr idx mem) |
| (MOVWstoreidx1 [c] {sym} ptr (SHLQconst [1] idx) val mem) -> (MOVWstoreidx2 [c] {sym} ptr idx val mem) |
| (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) -> (MOVLstoreidx4 [c] {sym} ptr idx val mem) |
| (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) -> (MOVLstoreidx8 [c] {sym} ptr idx val mem) |
| (MOVQstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) -> (MOVQstoreidx8 [c] {sym} ptr idx val mem) |
| (MOVSSstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) -> (MOVSSstoreidx4 [c] {sym} ptr idx val mem) |
| (MOVSDstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) -> (MOVSDstoreidx8 [c] {sym} ptr idx val mem) |
| (MOVWstoreconstidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) -> (MOVWstoreconstidx2 [c] {sym} ptr idx mem) |
| (MOVLstoreconstidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) -> (MOVLstoreconstidx4 [c] {sym} ptr idx mem) |
| (MOVQstoreconstidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) -> (MOVQstoreconstidx8 [c] {sym} ptr idx mem) |
| |
| // combine ADDQ into indexed loads and stores |
| (MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVBloadidx1 [c+d] {sym} ptr idx mem) |
| (MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVWloadidx1 [c+d] {sym} ptr idx mem) |
| (MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVWloadidx2 [c+d] {sym} ptr idx mem) |
| (MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVLloadidx1 [c+d] {sym} ptr idx mem) |
| (MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVLloadidx4 [c+d] {sym} ptr idx mem) |
| (MOVLloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVLloadidx8 [c+d] {sym} ptr idx mem) |
| (MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVQloadidx1 [c+d] {sym} ptr idx mem) |
| (MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVQloadidx8 [c+d] {sym} ptr idx mem) |
| (MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVSSloadidx1 [c+d] {sym} ptr idx mem) |
| (MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVSSloadidx4 [c+d] {sym} ptr idx mem) |
| (MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVSDloadidx1 [c+d] {sym} ptr idx mem) |
| (MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVSDloadidx8 [c+d] {sym} ptr idx mem) |
| |
| (MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) |
| (MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) |
| (MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVWstoreidx2 [c+d] {sym} ptr idx val mem) |
| (MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) |
| (MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVLstoreidx4 [c+d] {sym} ptr idx val mem) |
| (MOVLstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVLstoreidx8 [c+d] {sym} ptr idx val mem) |
| (MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) |
| (MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVQstoreidx8 [c+d] {sym} ptr idx val mem) |
| (MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) |
| (MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem) |
| (MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) |
| (MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem) |
| |
| (MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+d) -> (MOVBloadidx1 [c+d] {sym} ptr idx mem) |
| (MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+d) -> (MOVWloadidx1 [c+d] {sym} ptr idx mem) |
| (MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+2*d) -> (MOVWloadidx2 [c+2*d] {sym} ptr idx mem) |
| (MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+d) -> (MOVLloadidx1 [c+d] {sym} ptr idx mem) |
| (MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+4*d) -> (MOVLloadidx4 [c+4*d] {sym} ptr idx mem) |
| (MOVLloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+8*d) -> (MOVLloadidx8 [c+8*d] {sym} ptr idx mem) |
| (MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+d) -> (MOVQloadidx1 [c+d] {sym} ptr idx mem) |
| (MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+8*d) -> (MOVQloadidx8 [c+8*d] {sym} ptr idx mem) |
| (MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+d) -> (MOVSSloadidx1 [c+d] {sym} ptr idx mem) |
| (MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+4*d) -> (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem) |
| (MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+d) -> (MOVSDloadidx1 [c+d] {sym} ptr idx mem) |
| (MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+8*d) -> (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem) |
| |
| (MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+d) -> (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) |
| (MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+d) -> (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) |
| (MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+2*d) -> (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem) |
| (MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+d) -> (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) |
| (MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+4*d) -> (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem) |
| (MOVLstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+8*d) -> (MOVLstoreidx8 [c+8*d] {sym} ptr idx val mem) |
| (MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+d) -> (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) |
| (MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+8*d) -> (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem) |
| (MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+d) -> (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) |
| (MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+4*d) -> (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem) |
| (MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+d) -> (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) |
| (MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+8*d) -> (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem) |
| |
| (MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) -> |
| (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) |
| (MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) -> |
| (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) |
| (MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) -> |
| (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem) |
| (MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) -> |
| (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) |
| (MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) -> |
| (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem) |
| (MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) -> |
| (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) |
| (MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) -> |
| (MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem) |
| |
| (MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(c) -> |
| (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) |
| (MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(c) -> |
| (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) |
| (MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(2*c) -> |
| (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem) |
| (MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(c) -> |
| (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) |
| (MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(4*c) -> |
| (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem) |
| (MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(c) -> |
| (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) |
| (MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(8*c) -> |
| (MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem) |
| |
| // fold LEAQs together |
| (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x) |
| |
| // LEAQ into LEAQ1 |
| (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB -> |
| (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) |
| |
| // LEAQ1 into LEAQ |
| (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) |
| |
| // LEAQ into LEAQ[248] |
| (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB -> |
| (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) |
| (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB -> |
| (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) |
| (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB -> |
| (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) |
| |
| // LEAQ[248] into LEAQ |
| (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) |
| (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) |
| (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> |
| (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) |
| |
| // Absorb InvertFlags into branches. |
| (LT (InvertFlags cmp) yes no) -> (GT cmp yes no) |
| (GT (InvertFlags cmp) yes no) -> (LT cmp yes no) |
| (LE (InvertFlags cmp) yes no) -> (GE cmp yes no) |
| (GE (InvertFlags cmp) yes no) -> (LE cmp yes no) |
| (ULT (InvertFlags cmp) yes no) -> (UGT cmp yes no) |
| (UGT (InvertFlags cmp) yes no) -> (ULT cmp yes no) |
| (ULE (InvertFlags cmp) yes no) -> (UGE cmp yes no) |
| (UGE (InvertFlags cmp) yes no) -> (ULE cmp yes no) |
| (EQ (InvertFlags cmp) yes no) -> (EQ cmp yes no) |
| (NE (InvertFlags cmp) yes no) -> (NE cmp yes no) |
| |
| // Constant comparisons. |
| (CMPQconst (MOVQconst [x]) [y]) && x==y -> (FlagEQ) |
| (CMPQconst (MOVQconst [x]) [y]) && x<y && uint64(x)<uint64(y) -> (FlagLT_ULT) |
| (CMPQconst (MOVQconst [x]) [y]) && x<y && uint64(x)>uint64(y) -> (FlagLT_UGT) |
| (CMPQconst (MOVQconst [x]) [y]) && x>y && uint64(x)<uint64(y) -> (FlagGT_ULT) |
| (CMPQconst (MOVQconst [x]) [y]) && x>y && uint64(x)>uint64(y) -> (FlagGT_UGT) |
| (CMPLconst (MOVLconst [x]) [y]) && int32(x)==int32(y) -> (FlagEQ) |
| (CMPLconst (MOVLconst [x]) [y]) && int32(x)<int32(y) && uint32(x)<uint32(y) -> (FlagLT_ULT) |
| (CMPLconst (MOVLconst [x]) |