| // autogenerated from gen/AMD64.rules: do not edit! |
| // generated with: cd gen; go run *.go |
| |
| package ssa |
| |
| import "math" |
| |
| var _ = math.MinInt8 // in case not otherwise used |
| func rewriteValueAMD64(v *Value, config *Config) bool { |
| switch v.Op { |
| case OpAMD64ADDL: |
| return rewriteValueAMD64_OpAMD64ADDL(v, config) |
| case OpAMD64ADDLconst: |
| return rewriteValueAMD64_OpAMD64ADDLconst(v, config) |
| case OpAMD64ADDQ: |
| return rewriteValueAMD64_OpAMD64ADDQ(v, config) |
| case OpAMD64ADDQconst: |
| return rewriteValueAMD64_OpAMD64ADDQconst(v, config) |
| case OpAMD64ANDL: |
| return rewriteValueAMD64_OpAMD64ANDL(v, config) |
| case OpAMD64ANDLconst: |
| return rewriteValueAMD64_OpAMD64ANDLconst(v, config) |
| case OpAMD64ANDQ: |
| return rewriteValueAMD64_OpAMD64ANDQ(v, config) |
| case OpAMD64ANDQconst: |
| return rewriteValueAMD64_OpAMD64ANDQconst(v, config) |
| case OpAMD64CMPB: |
| return rewriteValueAMD64_OpAMD64CMPB(v, config) |
| case OpAMD64CMPBconst: |
| return rewriteValueAMD64_OpAMD64CMPBconst(v, config) |
| case OpAMD64CMPL: |
| return rewriteValueAMD64_OpAMD64CMPL(v, config) |
| case OpAMD64CMPLconst: |
| return rewriteValueAMD64_OpAMD64CMPLconst(v, config) |
| case OpAMD64CMPQ: |
| return rewriteValueAMD64_OpAMD64CMPQ(v, config) |
| case OpAMD64CMPQconst: |
| return rewriteValueAMD64_OpAMD64CMPQconst(v, config) |
| case OpAMD64CMPW: |
| return rewriteValueAMD64_OpAMD64CMPW(v, config) |
| case OpAMD64CMPWconst: |
| return rewriteValueAMD64_OpAMD64CMPWconst(v, config) |
| case OpAMD64CMPXCHGLlock: |
| return rewriteValueAMD64_OpAMD64CMPXCHGLlock(v, config) |
| case OpAMD64CMPXCHGQlock: |
| return rewriteValueAMD64_OpAMD64CMPXCHGQlock(v, config) |
| case OpAMD64LEAL: |
| return rewriteValueAMD64_OpAMD64LEAL(v, config) |
| case OpAMD64LEAQ: |
| return rewriteValueAMD64_OpAMD64LEAQ(v, config) |
| case OpAMD64LEAQ1: |
| return rewriteValueAMD64_OpAMD64LEAQ1(v, config) |
| case OpAMD64LEAQ2: |
| return rewriteValueAMD64_OpAMD64LEAQ2(v, config) |
| case OpAMD64LEAQ4: |
| return rewriteValueAMD64_OpAMD64LEAQ4(v, config) |
| case OpAMD64LEAQ8: |
| return rewriteValueAMD64_OpAMD64LEAQ8(v, config) |
| case OpAMD64MOVBQSX: |
| return rewriteValueAMD64_OpAMD64MOVBQSX(v, config) |
| case OpAMD64MOVBQSXload: |
| return rewriteValueAMD64_OpAMD64MOVBQSXload(v, config) |
| case OpAMD64MOVBQZX: |
| return rewriteValueAMD64_OpAMD64MOVBQZX(v, config) |
| case OpAMD64MOVBload: |
| return rewriteValueAMD64_OpAMD64MOVBload(v, config) |
| case OpAMD64MOVBloadidx1: |
| return rewriteValueAMD64_OpAMD64MOVBloadidx1(v, config) |
| case OpAMD64MOVBstore: |
| return rewriteValueAMD64_OpAMD64MOVBstore(v, config) |
| case OpAMD64MOVBstoreconst: |
| return rewriteValueAMD64_OpAMD64MOVBstoreconst(v, config) |
| case OpAMD64MOVBstoreconstidx1: |
| return rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v, config) |
| case OpAMD64MOVBstoreidx1: |
| return rewriteValueAMD64_OpAMD64MOVBstoreidx1(v, config) |
| case OpAMD64MOVLQSX: |
| return rewriteValueAMD64_OpAMD64MOVLQSX(v, config) |
| case OpAMD64MOVLQSXload: |
| return rewriteValueAMD64_OpAMD64MOVLQSXload(v, config) |
| case OpAMD64MOVLQZX: |
| return rewriteValueAMD64_OpAMD64MOVLQZX(v, config) |
| case OpAMD64MOVLatomicload: |
| return rewriteValueAMD64_OpAMD64MOVLatomicload(v, config) |
| case OpAMD64MOVLload: |
| return rewriteValueAMD64_OpAMD64MOVLload(v, config) |
| case OpAMD64MOVLloadidx1: |
| return rewriteValueAMD64_OpAMD64MOVLloadidx1(v, config) |
| case OpAMD64MOVLloadidx4: |
| return rewriteValueAMD64_OpAMD64MOVLloadidx4(v, config) |
| case OpAMD64MOVLstore: |
| return rewriteValueAMD64_OpAMD64MOVLstore(v, config) |
| case OpAMD64MOVLstoreconst: |
| return rewriteValueAMD64_OpAMD64MOVLstoreconst(v, config) |
| case OpAMD64MOVLstoreconstidx1: |
| return rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v, config) |
| case OpAMD64MOVLstoreconstidx4: |
| return rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v, config) |
| case OpAMD64MOVLstoreidx1: |
| return rewriteValueAMD64_OpAMD64MOVLstoreidx1(v, config) |
| case OpAMD64MOVLstoreidx4: |
| return rewriteValueAMD64_OpAMD64MOVLstoreidx4(v, config) |
| case OpAMD64MOVOload: |
| return rewriteValueAMD64_OpAMD64MOVOload(v, config) |
| case OpAMD64MOVOstore: |
| return rewriteValueAMD64_OpAMD64MOVOstore(v, config) |
| case OpAMD64MOVQatomicload: |
| return rewriteValueAMD64_OpAMD64MOVQatomicload(v, config) |
| case OpAMD64MOVQload: |
| return rewriteValueAMD64_OpAMD64MOVQload(v, config) |
| case OpAMD64MOVQloadidx1: |
| return rewriteValueAMD64_OpAMD64MOVQloadidx1(v, config) |
| case OpAMD64MOVQloadidx8: |
| return rewriteValueAMD64_OpAMD64MOVQloadidx8(v, config) |
| case OpAMD64MOVQstore: |
| return rewriteValueAMD64_OpAMD64MOVQstore(v, config) |
| case OpAMD64MOVQstoreconst: |
| return rewriteValueAMD64_OpAMD64MOVQstoreconst(v, config) |
| case OpAMD64MOVQstoreconstidx1: |
| return rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v, config) |
| case OpAMD64MOVQstoreconstidx8: |
| return rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v, config) |
| case OpAMD64MOVQstoreidx1: |
| return rewriteValueAMD64_OpAMD64MOVQstoreidx1(v, config) |
| case OpAMD64MOVQstoreidx8: |
| return rewriteValueAMD64_OpAMD64MOVQstoreidx8(v, config) |
| case OpAMD64MOVSDload: |
| return rewriteValueAMD64_OpAMD64MOVSDload(v, config) |
| case OpAMD64MOVSDloadidx1: |
| return rewriteValueAMD64_OpAMD64MOVSDloadidx1(v, config) |
| case OpAMD64MOVSDloadidx8: |
| return rewriteValueAMD64_OpAMD64MOVSDloadidx8(v, config) |
| case OpAMD64MOVSDstore: |
| return rewriteValueAMD64_OpAMD64MOVSDstore(v, config) |
| case OpAMD64MOVSDstoreidx1: |
| return rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v, config) |
| case OpAMD64MOVSDstoreidx8: |
| return rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v, config) |
| case OpAMD64MOVSSload: |
| return rewriteValueAMD64_OpAMD64MOVSSload(v, config) |
| case OpAMD64MOVSSloadidx1: |
| return rewriteValueAMD64_OpAMD64MOVSSloadidx1(v, config) |
| case OpAMD64MOVSSloadidx4: |
| return rewriteValueAMD64_OpAMD64MOVSSloadidx4(v, config) |
| case OpAMD64MOVSSstore: |
| return rewriteValueAMD64_OpAMD64MOVSSstore(v, config) |
| case OpAMD64MOVSSstoreidx1: |
| return rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v, config) |
| case OpAMD64MOVSSstoreidx4: |
| return rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v, config) |
| case OpAMD64MOVWQSX: |
| return rewriteValueAMD64_OpAMD64MOVWQSX(v, config) |
| case OpAMD64MOVWQSXload: |
| return rewriteValueAMD64_OpAMD64MOVWQSXload(v, config) |
| case OpAMD64MOVWQZX: |
| return rewriteValueAMD64_OpAMD64MOVWQZX(v, config) |
| case OpAMD64MOVWload: |
| return rewriteValueAMD64_OpAMD64MOVWload(v, config) |
| case OpAMD64MOVWloadidx1: |
| return rewriteValueAMD64_OpAMD64MOVWloadidx1(v, config) |
| case OpAMD64MOVWloadidx2: |
| return rewriteValueAMD64_OpAMD64MOVWloadidx2(v, config) |
| case OpAMD64MOVWstore: |
| return rewriteValueAMD64_OpAMD64MOVWstore(v, config) |
| case OpAMD64MOVWstoreconst: |
| return rewriteValueAMD64_OpAMD64MOVWstoreconst(v, config) |
| case OpAMD64MOVWstoreconstidx1: |
| return rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v, config) |
| case OpAMD64MOVWstoreconstidx2: |
| return rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v, config) |
| case OpAMD64MOVWstoreidx1: |
| return rewriteValueAMD64_OpAMD64MOVWstoreidx1(v, config) |
| case OpAMD64MOVWstoreidx2: |
| return rewriteValueAMD64_OpAMD64MOVWstoreidx2(v, config) |
| case OpAMD64MULL: |
| return rewriteValueAMD64_OpAMD64MULL(v, config) |
| case OpAMD64MULLconst: |
| return rewriteValueAMD64_OpAMD64MULLconst(v, config) |
| case OpAMD64MULQ: |
| return rewriteValueAMD64_OpAMD64MULQ(v, config) |
| case OpAMD64MULQconst: |
| return rewriteValueAMD64_OpAMD64MULQconst(v, config) |
| case OpAMD64NEGL: |
| return rewriteValueAMD64_OpAMD64NEGL(v, config) |
| case OpAMD64NEGQ: |
| return rewriteValueAMD64_OpAMD64NEGQ(v, config) |
| case OpAMD64NOTL: |
| return rewriteValueAMD64_OpAMD64NOTL(v, config) |
| case OpAMD64NOTQ: |
| return rewriteValueAMD64_OpAMD64NOTQ(v, config) |
| case OpAMD64ORL: |
| return rewriteValueAMD64_OpAMD64ORL(v, config) |
| case OpAMD64ORLconst: |
| return rewriteValueAMD64_OpAMD64ORLconst(v, config) |
| case OpAMD64ORQ: |
| return rewriteValueAMD64_OpAMD64ORQ(v, config) |
| case OpAMD64ORQconst: |
| return rewriteValueAMD64_OpAMD64ORQconst(v, config) |
| case OpAMD64ROLBconst: |
| return rewriteValueAMD64_OpAMD64ROLBconst(v, config) |
| case OpAMD64ROLLconst: |
| return rewriteValueAMD64_OpAMD64ROLLconst(v, config) |
| case OpAMD64ROLQconst: |
| return rewriteValueAMD64_OpAMD64ROLQconst(v, config) |
| case OpAMD64ROLWconst: |
| return rewriteValueAMD64_OpAMD64ROLWconst(v, config) |
| case OpAMD64SARB: |
| return rewriteValueAMD64_OpAMD64SARB(v, config) |
| case OpAMD64SARBconst: |
| return rewriteValueAMD64_OpAMD64SARBconst(v, config) |
| case OpAMD64SARL: |
| return rewriteValueAMD64_OpAMD64SARL(v, config) |
| case OpAMD64SARLconst: |
| return rewriteValueAMD64_OpAMD64SARLconst(v, config) |
| case OpAMD64SARQ: |
| return rewriteValueAMD64_OpAMD64SARQ(v, config) |
| case OpAMD64SARQconst: |
| return rewriteValueAMD64_OpAMD64SARQconst(v, config) |
| case OpAMD64SARW: |
| return rewriteValueAMD64_OpAMD64SARW(v, config) |
| case OpAMD64SARWconst: |
| return rewriteValueAMD64_OpAMD64SARWconst(v, config) |
| case OpAMD64SBBLcarrymask: |
| return rewriteValueAMD64_OpAMD64SBBLcarrymask(v, config) |
| case OpAMD64SBBQcarrymask: |
| return rewriteValueAMD64_OpAMD64SBBQcarrymask(v, config) |
| case OpAMD64SETA: |
| return rewriteValueAMD64_OpAMD64SETA(v, config) |
| case OpAMD64SETAE: |
| return rewriteValueAMD64_OpAMD64SETAE(v, config) |
| case OpAMD64SETB: |
| return rewriteValueAMD64_OpAMD64SETB(v, config) |
| case OpAMD64SETBE: |
| return rewriteValueAMD64_OpAMD64SETBE(v, config) |
| case OpAMD64SETEQ: |
| return rewriteValueAMD64_OpAMD64SETEQ(v, config) |
| case OpAMD64SETG: |
| return rewriteValueAMD64_OpAMD64SETG(v, config) |
| case OpAMD64SETGE: |
| return rewriteValueAMD64_OpAMD64SETGE(v, config) |
| case OpAMD64SETL: |
| return rewriteValueAMD64_OpAMD64SETL(v, config) |
| case OpAMD64SETLE: |
| return rewriteValueAMD64_OpAMD64SETLE(v, config) |
| case OpAMD64SETNE: |
| return rewriteValueAMD64_OpAMD64SETNE(v, config) |
| case OpAMD64SHLL: |
| return rewriteValueAMD64_OpAMD64SHLL(v, config) |
| case OpAMD64SHLQ: |
| return rewriteValueAMD64_OpAMD64SHLQ(v, config) |
| case OpAMD64SHRB: |
| return rewriteValueAMD64_OpAMD64SHRB(v, config) |
| case OpAMD64SHRL: |
| return rewriteValueAMD64_OpAMD64SHRL(v, config) |
| case OpAMD64SHRQ: |
| return rewriteValueAMD64_OpAMD64SHRQ(v, config) |
| case OpAMD64SHRW: |
| return rewriteValueAMD64_OpAMD64SHRW(v, config) |
| case OpAMD64SUBL: |
| return rewriteValueAMD64_OpAMD64SUBL(v, config) |
| case OpAMD64SUBLconst: |
| return rewriteValueAMD64_OpAMD64SUBLconst(v, config) |
| case OpAMD64SUBQ: |
| return rewriteValueAMD64_OpAMD64SUBQ(v, config) |
| case OpAMD64SUBQconst: |
| return rewriteValueAMD64_OpAMD64SUBQconst(v, config) |
| case OpAMD64XADDLlock: |
| return rewriteValueAMD64_OpAMD64XADDLlock(v, config) |
| case OpAMD64XADDQlock: |
| return rewriteValueAMD64_OpAMD64XADDQlock(v, config) |
| case OpAMD64XCHGL: |
| return rewriteValueAMD64_OpAMD64XCHGL(v, config) |
| case OpAMD64XCHGQ: |
| return rewriteValueAMD64_OpAMD64XCHGQ(v, config) |
| case OpAMD64XORL: |
| return rewriteValueAMD64_OpAMD64XORL(v, config) |
| case OpAMD64XORLconst: |
| return rewriteValueAMD64_OpAMD64XORLconst(v, config) |
| case OpAMD64XORQ: |
| return rewriteValueAMD64_OpAMD64XORQ(v, config) |
| case OpAMD64XORQconst: |
| return rewriteValueAMD64_OpAMD64XORQconst(v, config) |
| case OpAdd16: |
| return rewriteValueAMD64_OpAdd16(v, config) |
| case OpAdd32: |
| return rewriteValueAMD64_OpAdd32(v, config) |
| case OpAdd32F: |
| return rewriteValueAMD64_OpAdd32F(v, config) |
| case OpAdd64: |
| return rewriteValueAMD64_OpAdd64(v, config) |
| case OpAdd64F: |
| return rewriteValueAMD64_OpAdd64F(v, config) |
| case OpAdd8: |
| return rewriteValueAMD64_OpAdd8(v, config) |
| case OpAddPtr: |
| return rewriteValueAMD64_OpAddPtr(v, config) |
| case OpAddr: |
| return rewriteValueAMD64_OpAddr(v, config) |
| case OpAnd16: |
| return rewriteValueAMD64_OpAnd16(v, config) |
| case OpAnd32: |
| return rewriteValueAMD64_OpAnd32(v, config) |
| case OpAnd64: |
| return rewriteValueAMD64_OpAnd64(v, config) |
| case OpAnd8: |
| return rewriteValueAMD64_OpAnd8(v, config) |
| case OpAndB: |
| return rewriteValueAMD64_OpAndB(v, config) |
| case OpAtomicAdd32: |
| return rewriteValueAMD64_OpAtomicAdd32(v, config) |
| case OpAtomicAdd64: |
| return rewriteValueAMD64_OpAtomicAdd64(v, config) |
| case OpAtomicAnd8: |
| return rewriteValueAMD64_OpAtomicAnd8(v, config) |
| case OpAtomicCompareAndSwap32: |
| return rewriteValueAMD64_OpAtomicCompareAndSwap32(v, config) |
| case OpAtomicCompareAndSwap64: |
| return rewriteValueAMD64_OpAtomicCompareAndSwap64(v, config) |
| case OpAtomicExchange32: |
| return rewriteValueAMD64_OpAtomicExchange32(v, config) |
| case OpAtomicExchange64: |
| return rewriteValueAMD64_OpAtomicExchange64(v, config) |
| case OpAtomicLoad32: |
| return rewriteValueAMD64_OpAtomicLoad32(v, config) |
| case OpAtomicLoad64: |
| return rewriteValueAMD64_OpAtomicLoad64(v, config) |
| case OpAtomicLoadPtr: |
| return rewriteValueAMD64_OpAtomicLoadPtr(v, config) |
| case OpAtomicOr8: |
| return rewriteValueAMD64_OpAtomicOr8(v, config) |
| case OpAtomicStore32: |
| return rewriteValueAMD64_OpAtomicStore32(v, config) |
| case OpAtomicStore64: |
| return rewriteValueAMD64_OpAtomicStore64(v, config) |
| case OpAtomicStorePtrNoWB: |
| return rewriteValueAMD64_OpAtomicStorePtrNoWB(v, config) |
| case OpAvg64u: |
| return rewriteValueAMD64_OpAvg64u(v, config) |
| case OpBswap32: |
| return rewriteValueAMD64_OpBswap32(v, config) |
| case OpBswap64: |
| return rewriteValueAMD64_OpBswap64(v, config) |
| case OpClosureCall: |
| return rewriteValueAMD64_OpClosureCall(v, config) |
| case OpCom16: |
| return rewriteValueAMD64_OpCom16(v, config) |
| case OpCom32: |
| return rewriteValueAMD64_OpCom32(v, config) |
| case OpCom64: |
| return rewriteValueAMD64_OpCom64(v, config) |
| case OpCom8: |
| return rewriteValueAMD64_OpCom8(v, config) |
| case OpConst16: |
| return rewriteValueAMD64_OpConst16(v, config) |
| case OpConst32: |
| return rewriteValueAMD64_OpConst32(v, config) |
| case OpConst32F: |
| return rewriteValueAMD64_OpConst32F(v, config) |
| case OpConst64: |
| return rewriteValueAMD64_OpConst64(v, config) |
| case OpConst64F: |
| return rewriteValueAMD64_OpConst64F(v, config) |
| case OpConst8: |
| return rewriteValueAMD64_OpConst8(v, config) |
| case OpConstBool: |
| return rewriteValueAMD64_OpConstBool(v, config) |
| case OpConstNil: |
| return rewriteValueAMD64_OpConstNil(v, config) |
| case OpConvert: |
| return rewriteValueAMD64_OpConvert(v, config) |
| case OpCtz32: |
| return rewriteValueAMD64_OpCtz32(v, config) |
| case OpCtz64: |
| return rewriteValueAMD64_OpCtz64(v, config) |
| case OpCvt32Fto32: |
| return rewriteValueAMD64_OpCvt32Fto32(v, config) |
| case OpCvt32Fto64: |
| return rewriteValueAMD64_OpCvt32Fto64(v, config) |
| case OpCvt32Fto64F: |
| return rewriteValueAMD64_OpCvt32Fto64F(v, config) |
| case OpCvt32to32F: |
| return rewriteValueAMD64_OpCvt32to32F(v, config) |
| case OpCvt32to64F: |
| return rewriteValueAMD64_OpCvt32to64F(v, config) |
| case OpCvt64Fto32: |
| return rewriteValueAMD64_OpCvt64Fto32(v, config) |
| case OpCvt64Fto32F: |
| return rewriteValueAMD64_OpCvt64Fto32F(v, config) |
| case OpCvt64Fto64: |
| return rewriteValueAMD64_OpCvt64Fto64(v, config) |
| case OpCvt64to32F: |
| return rewriteValueAMD64_OpCvt64to32F(v, config) |
| case OpCvt64to64F: |
| return rewriteValueAMD64_OpCvt64to64F(v, config) |
| case OpDeferCall: |
| return rewriteValueAMD64_OpDeferCall(v, config) |
| case OpDiv128u: |
| return rewriteValueAMD64_OpDiv128u(v, config) |
| case OpDiv16: |
| return rewriteValueAMD64_OpDiv16(v, config) |
| case OpDiv16u: |
| return rewriteValueAMD64_OpDiv16u(v, config) |
| case OpDiv32: |
| return rewriteValueAMD64_OpDiv32(v, config) |
| case OpDiv32F: |
| return rewriteValueAMD64_OpDiv32F(v, config) |
| case OpDiv32u: |
| return rewriteValueAMD64_OpDiv32u(v, config) |
| case OpDiv64: |
| return rewriteValueAMD64_OpDiv64(v, config) |
| case OpDiv64F: |
| return rewriteValueAMD64_OpDiv64F(v, config) |
| case OpDiv64u: |
| return rewriteValueAMD64_OpDiv64u(v, config) |
| case OpDiv8: |
| return rewriteValueAMD64_OpDiv8(v, config) |
| case OpDiv8u: |
| return rewriteValueAMD64_OpDiv8u(v, config) |
| case OpEq16: |
| return rewriteValueAMD64_OpEq16(v, config) |
| case OpEq32: |
| return rewriteValueAMD64_OpEq32(v, config) |
| case OpEq32F: |
| return rewriteValueAMD64_OpEq32F(v, config) |
| case OpEq64: |
| return rewriteValueAMD64_OpEq64(v, config) |
| case OpEq64F: |
| return rewriteValueAMD64_OpEq64F(v, config) |
| case OpEq8: |
| return rewriteValueAMD64_OpEq8(v, config) |
| case OpEqB: |
| return rewriteValueAMD64_OpEqB(v, config) |
| case OpEqPtr: |
| return rewriteValueAMD64_OpEqPtr(v, config) |
| case OpGeq16: |
| return rewriteValueAMD64_OpGeq16(v, config) |
| case OpGeq16U: |
| return rewriteValueAMD64_OpGeq16U(v, config) |
| case OpGeq32: |
| return rewriteValueAMD64_OpGeq32(v, config) |
| case OpGeq32F: |
| return rewriteValueAMD64_OpGeq32F(v, config) |
| case OpGeq32U: |
| return rewriteValueAMD64_OpGeq32U(v, config) |
| case OpGeq64: |
| return rewriteValueAMD64_OpGeq64(v, config) |
| case OpGeq64F: |
| return rewriteValueAMD64_OpGeq64F(v, config) |
| case OpGeq64U: |
| return rewriteValueAMD64_OpGeq64U(v, config) |
| case OpGeq8: |
| return rewriteValueAMD64_OpGeq8(v, config) |
| case OpGeq8U: |
| return rewriteValueAMD64_OpGeq8U(v, config) |
| case OpGetClosurePtr: |
| return rewriteValueAMD64_OpGetClosurePtr(v, config) |
| case OpGetG: |
| return rewriteValueAMD64_OpGetG(v, config) |
| case OpGoCall: |
| return rewriteValueAMD64_OpGoCall(v, config) |
| case OpGreater16: |
| return rewriteValueAMD64_OpGreater16(v, config) |
| case OpGreater16U: |
| return rewriteValueAMD64_OpGreater16U(v, config) |
| case OpGreater32: |
| return rewriteValueAMD64_OpGreater32(v, config) |
| case OpGreater32F: |
| return rewriteValueAMD64_OpGreater32F(v, config) |
| case OpGreater32U: |
| return rewriteValueAMD64_OpGreater32U(v, config) |
| case OpGreater64: |
| return rewriteValueAMD64_OpGreater64(v, config) |
| case OpGreater64F: |
| return rewriteValueAMD64_OpGreater64F(v, config) |
| case OpGreater64U: |
| return rewriteValueAMD64_OpGreater64U(v, config) |
| case OpGreater8: |
| return rewriteValueAMD64_OpGreater8(v, config) |
| case OpGreater8U: |
| return rewriteValueAMD64_OpGreater8U(v, config) |
| case OpHmul16: |
| return rewriteValueAMD64_OpHmul16(v, config) |
| case OpHmul16u: |
| return rewriteValueAMD64_OpHmul16u(v, config) |
| case OpHmul32: |
| return rewriteValueAMD64_OpHmul32(v, config) |
| case OpHmul32u: |
| return rewriteValueAMD64_OpHmul32u(v, config) |
| case OpHmul64: |
| return rewriteValueAMD64_OpHmul64(v, config) |
| case OpHmul64u: |
| return rewriteValueAMD64_OpHmul64u(v, config) |
| case OpHmul8: |
| return rewriteValueAMD64_OpHmul8(v, config) |
| case OpHmul8u: |
| return rewriteValueAMD64_OpHmul8u(v, config) |
| case OpInt64Hi: |
| return rewriteValueAMD64_OpInt64Hi(v, config) |
| case OpInterCall: |
| return rewriteValueAMD64_OpInterCall(v, config) |
| case OpIsInBounds: |
| return rewriteValueAMD64_OpIsInBounds(v, config) |
| case OpIsNonNil: |
| return rewriteValueAMD64_OpIsNonNil(v, config) |
| case OpIsSliceInBounds: |
| return rewriteValueAMD64_OpIsSliceInBounds(v, config) |
| case OpLeq16: |
| return rewriteValueAMD64_OpLeq16(v, config) |
| case OpLeq16U: |
| return rewriteValueAMD64_OpLeq16U(v, config) |
| case OpLeq32: |
| return rewriteValueAMD64_OpLeq32(v, config) |
| case OpLeq32F: |
| return rewriteValueAMD64_OpLeq32F(v, config) |
| case OpLeq32U: |
| return rewriteValueAMD64_OpLeq32U(v, config) |
| case OpLeq64: |
| return rewriteValueAMD64_OpLeq64(v, config) |
| case OpLeq64F: |
| return rewriteValueAMD64_OpLeq64F(v, config) |
| case OpLeq64U: |
| return rewriteValueAMD64_OpLeq64U(v, config) |
| case OpLeq8: |
| return rewriteValueAMD64_OpLeq8(v, config) |
| case OpLeq8U: |
| return rewriteValueAMD64_OpLeq8U(v, config) |
| case OpLess16: |
| return rewriteValueAMD64_OpLess16(v, config) |
| case OpLess16U: |
| return rewriteValueAMD64_OpLess16U(v, config) |
| case OpLess32: |
| return rewriteValueAMD64_OpLess32(v, config) |
| case OpLess32F: |
| return rewriteValueAMD64_OpLess32F(v, config) |
| case OpLess32U: |
| return rewriteValueAMD64_OpLess32U(v, config) |
| case OpLess64: |
| return rewriteValueAMD64_OpLess64(v, config) |
| case OpLess64F: |
| return rewriteValueAMD64_OpLess64F(v, config) |
| case OpLess64U: |
| return rewriteValueAMD64_OpLess64U(v, config) |
| case OpLess8: |
| return rewriteValueAMD64_OpLess8(v, config) |
| case OpLess8U: |
| return rewriteValueAMD64_OpLess8U(v, config) |
| case OpLoad: |
| return rewriteValueAMD64_OpLoad(v, config) |
| case OpLrot16: |
| return rewriteValueAMD64_OpLrot16(v, config) |
| case OpLrot32: |
| return rewriteValueAMD64_OpLrot32(v, config) |
| case OpLrot64: |
| return rewriteValueAMD64_OpLrot64(v, config) |
| case OpLrot8: |
| return rewriteValueAMD64_OpLrot8(v, config) |
| case OpLsh16x16: |
| return rewriteValueAMD64_OpLsh16x16(v, config) |
| case OpLsh16x32: |
| return rewriteValueAMD64_OpLsh16x32(v, config) |
| case OpLsh16x64: |
| return rewriteValueAMD64_OpLsh16x64(v, config) |
| case OpLsh16x8: |
| return rewriteValueAMD64_OpLsh16x8(v, config) |
| case OpLsh32x16: |
| return rewriteValueAMD64_OpLsh32x16(v, config) |
| case OpLsh32x32: |
| return rewriteValueAMD64_OpLsh32x32(v, config) |
| case OpLsh32x64: |
| return rewriteValueAMD64_OpLsh32x64(v, config) |
| case OpLsh32x8: |
| return rewriteValueAMD64_OpLsh32x8(v, config) |
| case OpLsh64x16: |
| return rewriteValueAMD64_OpLsh64x16(v, config) |
| case OpLsh64x32: |
| return rewriteValueAMD64_OpLsh64x32(v, config) |
| case OpLsh64x64: |
| return rewriteValueAMD64_OpLsh64x64(v, config) |
| case OpLsh64x8: |
| return rewriteValueAMD64_OpLsh64x8(v, config) |
| case OpLsh8x16: |
| return rewriteValueAMD64_OpLsh8x16(v, config) |
| case OpLsh8x32: |
| return rewriteValueAMD64_OpLsh8x32(v, config) |
| case OpLsh8x64: |
| return rewriteValueAMD64_OpLsh8x64(v, config) |
| case OpLsh8x8: |
| return rewriteValueAMD64_OpLsh8x8(v, config) |
| case OpMod16: |
| return rewriteValueAMD64_OpMod16(v, config) |
| case OpMod16u: |
| return rewriteValueAMD64_OpMod16u(v, config) |
| case OpMod32: |
| return rewriteValueAMD64_OpMod32(v, config) |
| case OpMod32u: |
| return rewriteValueAMD64_OpMod32u(v, config) |
| case OpMod64: |
| return rewriteValueAMD64_OpMod64(v, config) |
| case OpMod64u: |
| return rewriteValueAMD64_OpMod64u(v, config) |
| case OpMod8: |
| return rewriteValueAMD64_OpMod8(v, config) |
| case OpMod8u: |
| return rewriteValueAMD64_OpMod8u(v, config) |
| case OpMove: |
| return rewriteValueAMD64_OpMove(v, config) |
| case OpMul16: |
| return rewriteValueAMD64_OpMul16(v, config) |
| case OpMul32: |
| return rewriteValueAMD64_OpMul32(v, config) |
| case OpMul32F: |
| return rewriteValueAMD64_OpMul32F(v, config) |
| case OpMul64: |
| return rewriteValueAMD64_OpMul64(v, config) |
| case OpMul64F: |
| return rewriteValueAMD64_OpMul64F(v, config) |
| case OpMul64uhilo: |
| return rewriteValueAMD64_OpMul64uhilo(v, config) |
| case OpMul8: |
| return rewriteValueAMD64_OpMul8(v, config) |
| case OpNeg16: |
| return rewriteValueAMD64_OpNeg16(v, config) |
| case OpNeg32: |
| return rewriteValueAMD64_OpNeg32(v, config) |
| case OpNeg32F: |
| return rewriteValueAMD64_OpNeg32F(v, config) |
| case OpNeg64: |
| return rewriteValueAMD64_OpNeg64(v, config) |
| case OpNeg64F: |
| return rewriteValueAMD64_OpNeg64F(v, config) |
| case OpNeg8: |
| return rewriteValueAMD64_OpNeg8(v, config) |
| case OpNeq16: |
| return rewriteValueAMD64_OpNeq16(v, config) |
| case OpNeq32: |
| return rewriteValueAMD64_OpNeq32(v, config) |
| case OpNeq32F: |
| return rewriteValueAMD64_OpNeq32F(v, config) |
| case OpNeq64: |
| return rewriteValueAMD64_OpNeq64(v, config) |
| case OpNeq64F: |
| return rewriteValueAMD64_OpNeq64F(v, config) |
| case OpNeq8: |
| return rewriteValueAMD64_OpNeq8(v, config) |
| case OpNeqB: |
| return rewriteValueAMD64_OpNeqB(v, config) |
| case OpNeqPtr: |
| return rewriteValueAMD64_OpNeqPtr(v, config) |
| case OpNilCheck: |
| return rewriteValueAMD64_OpNilCheck(v, config) |
| case OpNot: |
| return rewriteValueAMD64_OpNot(v, config) |
| case OpOffPtr: |
| return rewriteValueAMD64_OpOffPtr(v, config) |
| case OpOr16: |
| return rewriteValueAMD64_OpOr16(v, config) |
| case OpOr32: |
| return rewriteValueAMD64_OpOr32(v, config) |
| case OpOr64: |
| return rewriteValueAMD64_OpOr64(v, config) |
| case OpOr8: |
| return rewriteValueAMD64_OpOr8(v, config) |
| case OpOrB: |
| return rewriteValueAMD64_OpOrB(v, config) |
| case OpRsh16Ux16: |
| return rewriteValueAMD64_OpRsh16Ux16(v, config) |
| case OpRsh16Ux32: |
| return rewriteValueAMD64_OpRsh16Ux32(v, config) |
| case OpRsh16Ux64: |
| return rewriteValueAMD64_OpRsh16Ux64(v, config) |
| case OpRsh16Ux8: |
| return rewriteValueAMD64_OpRsh16Ux8(v, config) |
| case OpRsh16x16: |
| return rewriteValueAMD64_OpRsh16x16(v, config) |
| case OpRsh16x32: |
| return rewriteValueAMD64_OpRsh16x32(v, config) |
| case OpRsh16x64: |
| return rewriteValueAMD64_OpRsh16x64(v, config) |
| case OpRsh16x8: |
| return rewriteValueAMD64_OpRsh16x8(v, config) |
| case OpRsh32Ux16: |
| return rewriteValueAMD64_OpRsh32Ux16(v, config) |
| case OpRsh32Ux32: |
| return rewriteValueAMD64_OpRsh32Ux32(v, config) |
| case OpRsh32Ux64: |
| return rewriteValueAMD64_OpRsh32Ux64(v, config) |
| case OpRsh32Ux8: |
| return rewriteValueAMD64_OpRsh32Ux8(v, config) |
| case OpRsh32x16: |
| return rewriteValueAMD64_OpRsh32x16(v, config) |
| case OpRsh32x32: |
| return rewriteValueAMD64_OpRsh32x32(v, config) |
| case OpRsh32x64: |
| return rewriteValueAMD64_OpRsh32x64(v, config) |
| case OpRsh32x8: |
| return rewriteValueAMD64_OpRsh32x8(v, config) |
| case OpRsh64Ux16: |
| return rewriteValueAMD64_OpRsh64Ux16(v, config) |
| case OpRsh64Ux32: |
| return rewriteValueAMD64_OpRsh64Ux32(v, config) |
| case OpRsh64Ux64: |
| return rewriteValueAMD64_OpRsh64Ux64(v, config) |
| case OpRsh64Ux8: |
| return rewriteValueAMD64_OpRsh64Ux8(v, config) |
| case OpRsh64x16: |
| return rewriteValueAMD64_OpRsh64x16(v, config) |
| case OpRsh64x32: |
| return rewriteValueAMD64_OpRsh64x32(v, config) |
| case OpRsh64x64: |
| return rewriteValueAMD64_OpRsh64x64(v, config) |
| case OpRsh64x8: |
| return rewriteValueAMD64_OpRsh64x8(v, config) |
| case OpRsh8Ux16: |
| return rewriteValueAMD64_OpRsh8Ux16(v, config) |
| case OpRsh8Ux32: |
| return rewriteValueAMD64_OpRsh8Ux32(v, config) |
| case OpRsh8Ux64: |
| return rewriteValueAMD64_OpRsh8Ux64(v, config) |
| case OpRsh8Ux8: |
| return rewriteValueAMD64_OpRsh8Ux8(v, config) |
| case OpRsh8x16: |
| return rewriteValueAMD64_OpRsh8x16(v, config) |
| case OpRsh8x32: |
| return rewriteValueAMD64_OpRsh8x32(v, config) |
| case OpRsh8x64: |
| return rewriteValueAMD64_OpRsh8x64(v, config) |
| case OpRsh8x8: |
| return rewriteValueAMD64_OpRsh8x8(v, config) |
| case OpSelect0: |
| return rewriteValueAMD64_OpSelect0(v, config) |
| case OpSelect1: |
| return rewriteValueAMD64_OpSelect1(v, config) |
| case OpSignExt16to32: |
| return rewriteValueAMD64_OpSignExt16to32(v, config) |
| case OpSignExt16to64: |
| return rewriteValueAMD64_OpSignExt16to64(v, config) |
| case OpSignExt32to64: |
| return rewriteValueAMD64_OpSignExt32to64(v, config) |
| case OpSignExt8to16: |
| return rewriteValueAMD64_OpSignExt8to16(v, config) |
| case OpSignExt8to32: |
| return rewriteValueAMD64_OpSignExt8to32(v, config) |
| case OpSignExt8to64: |
| return rewriteValueAMD64_OpSignExt8to64(v, config) |
| case OpSlicemask: |
| return rewriteValueAMD64_OpSlicemask(v, config) |
| case OpSqrt: |
| return rewriteValueAMD64_OpSqrt(v, config) |
| case OpStaticCall: |
| return rewriteValueAMD64_OpStaticCall(v, config) |
| case OpStore: |
| return rewriteValueAMD64_OpStore(v, config) |
| case OpSub16: |
| return rewriteValueAMD64_OpSub16(v, config) |
| case OpSub32: |
| return rewriteValueAMD64_OpSub32(v, config) |
| case OpSub32F: |
| return rewriteValueAMD64_OpSub32F(v, config) |
| case OpSub64: |
| return rewriteValueAMD64_OpSub64(v, config) |
| case OpSub64F: |
| return rewriteValueAMD64_OpSub64F(v, config) |
| case OpSub8: |
| return rewriteValueAMD64_OpSub8(v, config) |
| case OpSubPtr: |
| return rewriteValueAMD64_OpSubPtr(v, config) |
| case OpTrunc16to8: |
| return rewriteValueAMD64_OpTrunc16to8(v, config) |
| case OpTrunc32to16: |
| return rewriteValueAMD64_OpTrunc32to16(v, config) |
| case OpTrunc32to8: |
| return rewriteValueAMD64_OpTrunc32to8(v, config) |
| case OpTrunc64to16: |
| return rewriteValueAMD64_OpTrunc64to16(v, config) |
| case OpTrunc64to32: |
| return rewriteValueAMD64_OpTrunc64to32(v, config) |
| case OpTrunc64to8: |
| return rewriteValueAMD64_OpTrunc64to8(v, config) |
| case OpXor16: |
| return rewriteValueAMD64_OpXor16(v, config) |
| case OpXor32: |
| return rewriteValueAMD64_OpXor32(v, config) |
| case OpXor64: |
| return rewriteValueAMD64_OpXor64(v, config) |
| case OpXor8: |
| return rewriteValueAMD64_OpXor8(v, config) |
| case OpZero: |
| return rewriteValueAMD64_OpZero(v, config) |
| case OpZeroExt16to32: |
| return rewriteValueAMD64_OpZeroExt16to32(v, config) |
| case OpZeroExt16to64: |
| return rewriteValueAMD64_OpZeroExt16to64(v, config) |
| case OpZeroExt32to64: |
| return rewriteValueAMD64_OpZeroExt32to64(v, config) |
| case OpZeroExt8to16: |
| return rewriteValueAMD64_OpZeroExt8to16(v, config) |
| case OpZeroExt8to32: |
| return rewriteValueAMD64_OpZeroExt8to32(v, config) |
| case OpZeroExt8to64: |
| return rewriteValueAMD64_OpZeroExt8to64(v, config) |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64ADDL(v *Value, config *Config) bool { |
| b := v.Block |
| _ = b |
| // match: (ADDL x (MOVLconst [c])) |
| // cond: |
| // result: (ADDLconst [c] x) |
| for { |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64MOVLconst { |
| break |
| } |
| c := v_1.AuxInt |
| v.reset(OpAMD64ADDLconst) |
| v.AuxInt = c |
| v.AddArg(x) |
| return true |
| } |
| // match: (ADDL (MOVLconst [c]) x) |
| // cond: |
| // result: (ADDLconst [c] x) |
| for { |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLconst { |
| break |
| } |
| c := v_0.AuxInt |
| x := v.Args[1] |
| v.reset(OpAMD64ADDLconst) |
| v.AuxInt = c |
| v.AddArg(x) |
| return true |
| } |
| // match: (ADDL x (NEGL y)) |
| // cond: |
| // result: (SUBL x y) |
| for { |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64NEGL { |
| break |
| } |
| y := v_1.Args[0] |
| v.reset(OpAMD64SUBL) |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64ADDLconst(v *Value, config *Config) bool { |
| b := v.Block |
| _ = b |
| // match: (ADDLconst [c] x) |
| // cond: int32(c)==0 |
| // result: x |
| for { |
| c := v.AuxInt |
| x := v.Args[0] |
| if !(int32(c) == 0) { |
| break |
| } |
| v.reset(OpCopy) |
| v.Type = x.Type |
| v.AddArg(x) |
| return true |
| } |
| // match: (ADDLconst [c] (MOVLconst [d])) |
| // cond: |
| // result: (MOVLconst [int64(int32(c+d))]) |
| for { |
| c := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLconst { |
| break |
| } |
| d := v_0.AuxInt |
| v.reset(OpAMD64MOVLconst) |
| v.AuxInt = int64(int32(c + d)) |
| return true |
| } |
| // match: (ADDLconst [c] (ADDLconst [d] x)) |
| // cond: |
| // result: (ADDLconst [int64(int32(c+d))] x) |
| for { |
| c := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ADDLconst { |
| break |
| } |
| d := v_0.AuxInt |
| x := v_0.Args[0] |
| v.reset(OpAMD64ADDLconst) |
| v.AuxInt = int64(int32(c + d)) |
| v.AddArg(x) |
| return true |
| } |
| // match: (ADDLconst [c] (LEAL [d] {s} x)) |
| // cond: is32Bit(c+d) |
| // result: (LEAL [c+d] {s} x) |
| for { |
| c := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64LEAL { |
| break |
| } |
| d := v_0.AuxInt |
| s := v_0.Aux |
| x := v_0.Args[0] |
| if !(is32Bit(c + d)) { |
| break |
| } |
| v.reset(OpAMD64LEAL) |
| v.AuxInt = c + d |
| v.Aux = s |
| v.AddArg(x) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64ADDQ(v *Value, config *Config) bool { |
| b := v.Block |
| _ = b |
| // match: (ADDQ x (MOVQconst [c])) |
| // cond: is32Bit(c) |
| // result: (ADDQconst [c] x) |
| for { |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64MOVQconst { |
| break |
| } |
| c := v_1.AuxInt |
| if !(is32Bit(c)) { |
| break |
| } |
| v.reset(OpAMD64ADDQconst) |
| v.AuxInt = c |
| v.AddArg(x) |
| return true |
| } |
| // match: (ADDQ (MOVQconst [c]) x) |
| // cond: is32Bit(c) |
| // result: (ADDQconst [c] x) |
| for { |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVQconst { |
| break |
| } |
| c := v_0.AuxInt |
| x := v.Args[1] |
| if !(is32Bit(c)) { |
| break |
| } |
| v.reset(OpAMD64ADDQconst) |
| v.AuxInt = c |
| v.AddArg(x) |
| return true |
| } |
| // match: (ADDQ x (SHLQconst [3] y)) |
| // cond: |
| // result: (LEAQ8 x y) |
| for { |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64SHLQconst { |
| break |
| } |
| if v_1.AuxInt != 3 { |
| break |
| } |
| y := v_1.Args[0] |
| v.reset(OpAMD64LEAQ8) |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (ADDQ x (SHLQconst [2] y)) |
| // cond: |
| // result: (LEAQ4 x y) |
| for { |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64SHLQconst { |
| break |
| } |
| if v_1.AuxInt != 2 { |
| break |
| } |
| y := v_1.Args[0] |
| v.reset(OpAMD64LEAQ4) |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (ADDQ x (SHLQconst [1] y)) |
| // cond: |
| // result: (LEAQ2 x y) |
| for { |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64SHLQconst { |
| break |
| } |
| if v_1.AuxInt != 1 { |
| break |
| } |
| y := v_1.Args[0] |
| v.reset(OpAMD64LEAQ2) |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (ADDQ x (ADDQ y y)) |
| // cond: |
| // result: (LEAQ2 x y) |
| for { |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64ADDQ { |
| break |
| } |
| y := v_1.Args[0] |
| if y != v_1.Args[1] { |
| break |
| } |
| v.reset(OpAMD64LEAQ2) |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (ADDQ x (ADDQ x y)) |
| // cond: |
| // result: (LEAQ2 y x) |
| for { |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64ADDQ { |
| break |
| } |
| if x != v_1.Args[0] { |
| break |
| } |
| y := v_1.Args[1] |
| v.reset(OpAMD64LEAQ2) |
| v.AddArg(y) |
| v.AddArg(x) |
| return true |
| } |
| // match: (ADDQ x (ADDQ y x)) |
| // cond: |
| // result: (LEAQ2 y x) |
| for { |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64ADDQ { |
| break |
| } |
| y := v_1.Args[0] |
| if x != v_1.Args[1] { |
| break |
| } |
| v.reset(OpAMD64LEAQ2) |
| v.AddArg(y) |
| v.AddArg(x) |
| return true |
| } |
| // match: (ADDQ (ADDQconst [c] x) y) |
| // cond: |
| // result: (LEAQ1 [c] x y) |
| for { |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ADDQconst { |
| break |
| } |
| c := v_0.AuxInt |
| x := v_0.Args[0] |
| y := v.Args[1] |
| v.reset(OpAMD64LEAQ1) |
| v.AuxInt = c |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (ADDQ x (ADDQconst [c] y)) |
| // cond: |
| // result: (LEAQ1 [c] x y) |
| for { |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64ADDQconst { |
| break |
| } |
| c := v_1.AuxInt |
| y := v_1.Args[0] |
| v.reset(OpAMD64LEAQ1) |
| v.AuxInt = c |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (ADDQ x (LEAQ [c] {s} y)) |
| // cond: x.Op != OpSB && y.Op != OpSB |
| // result: (LEAQ1 [c] {s} x y) |
| for { |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64LEAQ { |
| break |
| } |
| c := v_1.AuxInt |
| s := v_1.Aux |
| y := v_1.Args[0] |
| if !(x.Op != OpSB && y.Op != OpSB) { |
| break |
| } |
| v.reset(OpAMD64LEAQ1) |
| v.AuxInt = c |
| v.Aux = s |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (ADDQ (LEAQ [c] {s} x) y) |
| // cond: x.Op != OpSB && y.Op != OpSB |
| // result: (LEAQ1 [c] {s} x y) |
| for { |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64LEAQ { |
| break |
| } |
| c := v_0.AuxInt |
| s := v_0.Aux |
| x := v_0.Args[0] |
| y := v.Args[1] |
| if !(x.Op != OpSB && y.Op != OpSB) { |
| break |
| } |
| v.reset(OpAMD64LEAQ1) |
| v.AuxInt = c |
| v.Aux = s |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (ADDQ x (NEGQ y)) |
| // cond: |
| // result: (SUBQ x y) |
| for { |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64NEGQ { |
| break |
| } |
| y := v_1.Args[0] |
| v.reset(OpAMD64SUBQ) |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64ADDQconst(v *Value, config *Config) bool { |
| b := v.Block |
| _ = b |
| // match: (ADDQconst [c] (ADDQ x y)) |
| // cond: |
| // result: (LEAQ1 [c] x y) |
| for { |
| c := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ADDQ { |
| break |
| } |
| x := v_0.Args[0] |
| y := v_0.Args[1] |
| v.reset(OpAMD64LEAQ1) |
| v.AuxInt = c |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (ADDQconst [c] (LEAQ [d] {s} x)) |
| // cond: is32Bit(c+d) |
| // result: (LEAQ [c+d] {s} x) |
| for { |
| c := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64LEAQ { |
| break |
| } |
| d := v_0.AuxInt |
| s := v_0.Aux |
| x := v_0.Args[0] |
| if !(is32Bit(c + d)) { |
| break |
| } |
| v.reset(OpAMD64LEAQ) |
| v.AuxInt = c + d |
| v.Aux = s |
| v.AddArg(x) |
| return true |
| } |
| // match: (ADDQconst [c] (LEAQ1 [d] {s} x y)) |
| // cond: is32Bit(c+d) |
| // result: (LEAQ1 [c+d] {s} x y) |
| for { |
| c := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64LEAQ1 { |
| break |
| } |
| d := v_0.AuxInt |
| s := v_0.Aux |
| x := v_0.Args[0] |
| y := v_0.Args[1] |
| if !(is32Bit(c + d)) { |
| break |
| } |
| v.reset(OpAMD64LEAQ1) |
| v.AuxInt = c + d |
| v.Aux = s |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (ADDQconst [c] (LEAQ2 [d] {s} x y)) |
| // cond: is32Bit(c+d) |
| // result: (LEAQ2 [c+d] {s} x y) |
| for { |
| c := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64LEAQ2 { |
| break |
| } |
| d := v_0.AuxInt |
| s := v_0.Aux |
| x := v_0.Args[0] |
| y := v_0.Args[1] |
| if !(is32Bit(c + d)) { |
| break |
| } |
| v.reset(OpAMD64LEAQ2) |
| v.AuxInt = c + d |
| v.Aux = s |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (ADDQconst [c] (LEAQ4 [d] {s} x y)) |
| // cond: is32Bit(c+d) |
| // result: (LEAQ4 [c+d] {s} x y) |
| for { |
| c := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64LEAQ4 { |
| break |
| } |
| d := v_0.AuxInt |
| s := v_0.Aux |
| x := v_0.Args[0] |
| y := v_0.Args[1] |
| if !(is32Bit(c + d)) { |
| break |
| } |
| v.reset(OpAMD64LEAQ4) |
| v.AuxInt = c + d |
| v.Aux = s |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (ADDQconst [c] (LEAQ8 [d] {s} x y)) |
| // cond: is32Bit(c+d) |
| // result: (LEAQ8 [c+d] {s} x y) |
| for { |
| c := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64LEAQ8 { |
| break |
| } |
| d := v_0.AuxInt |
| s := v_0.Aux |
| x := v_0.Args[0] |
| y := v_0.Args[1] |
| if !(is32Bit(c + d)) { |
| break |
| } |
| v.reset(OpAMD64LEAQ8) |
| v.AuxInt = c + d |
| v.Aux = s |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (ADDQconst [0] x) |
| // cond: |
| // result: x |
| for { |
| if v.AuxInt != 0 { |
| break |
| } |
| x := v.Args[0] |
| v.reset(OpCopy) |
| v.Type = x.Type |
| v.AddArg(x) |
| return true |
| } |
| // match: (ADDQconst [c] (MOVQconst [d])) |
| // cond: |
| // result: (MOVQconst [c+d]) |
| for { |
| c := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVQconst { |
| break |
| } |
| d := v_0.AuxInt |
| v.reset(OpAMD64MOVQconst) |
| v.AuxInt = c + d |
| return true |
| } |
| // match: (ADDQconst [c] (ADDQconst [d] x)) |
| // cond: is32Bit(c+d) |
| // result: (ADDQconst [c+d] x) |
| for { |
| c := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ADDQconst { |
| break |
| } |
| d := v_0.AuxInt |
| x := v_0.Args[0] |
| if !(is32Bit(c + d)) { |
| break |
| } |
| v.reset(OpAMD64ADDQconst) |
| v.AuxInt = c + d |
| v.AddArg(x) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64ANDL(v *Value, config *Config) bool { |
| b := v.Block |
| _ = b |
| // match: (ANDL x (MOVLconst [c])) |
| // cond: |
| // result: (ANDLconst [c] x) |
| for { |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64MOVLconst { |
| break |
| } |
| c := v_1.AuxInt |
| v.reset(OpAMD64ANDLconst) |
| v.AuxInt = c |
| v.AddArg(x) |
| return true |
| } |
| // match: (ANDL (MOVLconst [c]) x) |
| // cond: |
| // result: (ANDLconst [c] x) |
| for { |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLconst { |
| break |
| } |
| c := v_0.AuxInt |
| x := v.Args[1] |
| v.reset(OpAMD64ANDLconst) |
| v.AuxInt = c |
| v.AddArg(x) |
| return true |
| } |
| // match: (ANDL x x) |
| // cond: |
| // result: x |
| for { |
| x := v.Args[0] |
| if x != v.Args[1] { |
| break |
| } |
| v.reset(OpCopy) |
| v.Type = x.Type |
| v.AddArg(x) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64ANDLconst(v *Value, config *Config) bool { |
| b := v.Block |
| _ = b |
| // match: (ANDLconst [c] (ANDLconst [d] x)) |
| // cond: |
| // result: (ANDLconst [c & d] x) |
| for { |
| c := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ANDLconst { |
| break |
| } |
| d := v_0.AuxInt |
| x := v_0.Args[0] |
| v.reset(OpAMD64ANDLconst) |
| v.AuxInt = c & d |
| v.AddArg(x) |
| return true |
| } |
| // match: (ANDLconst [0xFF] x) |
| // cond: |
| // result: (MOVBQZX x) |
| for { |
| if v.AuxInt != 0xFF { |
| break |
| } |
| x := v.Args[0] |
| v.reset(OpAMD64MOVBQZX) |
| v.AddArg(x) |
| return true |
| } |
| // match: (ANDLconst [0xFFFF] x) |
| // cond: |
| // result: (MOVWQZX x) |
| for { |
| if v.AuxInt != 0xFFFF { |
| break |
| } |
| x := v.Args[0] |
| v.reset(OpAMD64MOVWQZX) |
| v.AddArg(x) |
| return true |
| } |
| // match: (ANDLconst [c] _) |
| // cond: int32(c)==0 |
| // result: (MOVLconst [0]) |
| for { |
| c := v.AuxInt |
| if !(int32(c) == 0) { |
| break |
| } |
| v.reset(OpAMD64MOVLconst) |
| v.AuxInt = 0 |
| return true |
| } |
| // match: (ANDLconst [c] x) |
| // cond: int32(c)==-1 |
| // result: x |
| for { |
| c := v.AuxInt |
| x := v.Args[0] |
| if !(int32(c) == -1) { |
| break |
| } |
| v.reset(OpCopy) |
| v.Type = x.Type |
| v.AddArg(x) |
| return true |
| } |
| // match: (ANDLconst [c] (MOVLconst [d])) |
| // cond: |
| // result: (MOVLconst [c&d]) |
| for { |
| c := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLconst { |
| break |
| } |
| d := v_0.AuxInt |
| v.reset(OpAMD64MOVLconst) |
| v.AuxInt = c & d |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64ANDQ(v *Value, config *Config) bool { |
| b := v.Block |
| _ = b |
| // match: (ANDQ x (MOVQconst [c])) |
| // cond: is32Bit(c) |
| // result: (ANDQconst [c] x) |
| for { |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64MOVQconst { |
| break |
| } |
| c := v_1.AuxInt |
| if !(is32Bit(c)) { |
| break |
| } |
| v.reset(OpAMD64ANDQconst) |
| v.AuxInt = c |
| v.AddArg(x) |
| return true |
| } |
| // match: (ANDQ (MOVQconst [c]) x) |
| // cond: is32Bit(c) |
| // result: (ANDQconst [c] x) |
| for { |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVQconst { |
| break |
| } |
| c := v_0.AuxInt |
| x := v.Args[1] |
| if !(is32Bit(c)) { |
| break |
| } |
| v.reset(OpAMD64ANDQconst) |
| v.AuxInt = c |
| v.AddArg(x) |
| return true |
| } |
| // match: (ANDQ x x) |
| // cond: |
| // result: x |
| for { |
| x := v.Args[0] |
| if x != v.Args[1] { |
| break |
| } |
| v.reset(OpCopy) |
| v.Type = x.Type |
| v.AddArg(x) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64ANDQconst(v *Value, config *Config) bool { |
| b := v.Block |
| _ = b |
| // match: (ANDQconst [c] (ANDQconst [d] x)) |
| // cond: |
| // result: (ANDQconst [c & d] x) |
| for { |
| c := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ANDQconst { |
| break |
| } |
| d := v_0.AuxInt |
| x := v_0.Args[0] |
| v.reset(OpAMD64ANDQconst) |
| v.AuxInt = c & d |
| v.AddArg(x) |
| return true |
| } |
| // match: (ANDQconst [0xFF] x) |
| // cond: |
| // result: (MOVBQZX x) |
| for { |
| if v.AuxInt != 0xFF { |
| break |
| } |
| x := v.Args[0] |
| v.reset(OpAMD64MOVBQZX) |
| v.AddArg(x) |
| return true |
| } |
| // match: (ANDQconst [0xFFFF] x) |
| // cond: |
| // result: (MOVWQZX x) |
| for { |
| if v.AuxInt != 0xFFFF { |
| break |
| } |
| x := v.Args[0] |
| v.reset(OpAMD64MOVWQZX) |
| v.AddArg(x) |
| return true |
| } |
| // match: (ANDQconst [0xFFFFFFFF] x) |
| // cond: |
| // result: (MOVLQZX x) |
| for { |
| if v.AuxInt != 0xFFFFFFFF { |
| break |
| } |
| x := v.Args[0] |
| v.reset(OpAMD64MOVLQZX) |
| v.AddArg(x) |
| return true |
| } |
| // match: (ANDQconst [0] _) |
| // cond: |
| // result: (MOVQconst [0]) |
| for { |
| if v.AuxInt != 0 { |
| break |
| } |
| v.reset(OpAMD64MOVQconst) |
| v.AuxInt = 0 |
| return true |
| } |
| // match: (ANDQconst [-1] x) |
| // cond: |
| // result: x |
| for { |
| if v.AuxInt != -1 { |
| break |
| } |
| x := v.Args[0] |
| v.reset(OpCopy) |
| v.Type = x.Type |
| v.AddArg(x) |
| return true |
| } |
| // match: (ANDQconst [c] (MOVQconst [d])) |
| // cond: |
| // result: (MOVQconst [c&d]) |
| for { |
| c := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVQconst { |
| break |
| } |
| d := v_0.AuxInt |
| v.reset(OpAMD64MOVQconst) |
| v.AuxInt = c & d |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64CMPB(v *Value, config *Config) bool { |
| b := v.Block |
| _ = b |
| // match: (CMPB x (MOVLconst [c])) |
| // cond: |
| // result: (CMPBconst x [int64(int8(c))]) |
| for { |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64MOVLconst { |
| break |
| } |
| c := v_1.AuxInt |
| v.reset(OpAMD64CMPBconst) |
| v.AuxInt = int64(int8(c)) |
| v.AddArg(x) |
| return true |
| } |
| // match: (CMPB (MOVLconst [c]) x) |
| // cond: |
| // result: (InvertFlags (CMPBconst x [int64(int8(c))])) |
| for { |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLconst { |
| break |
| } |
| c := v_0.AuxInt |
| x := v.Args[1] |
| v.reset(OpAMD64InvertFlags) |
| v0 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) |
| v0.AuxInt = int64(int8(c)) |
| v0.AddArg(x) |
| v.AddArg(v0) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool { |
| b := v.Block |
| _ = b |
| // match: (CMPBconst (MOVLconst [x]) [y]) |
| // cond: int8(x)==int8(y) |
| // result: (FlagEQ) |
| for { |
| y := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLconst { |
| break |
| } |
| x := v_0.AuxInt |
| if !(int8(x) == int8(y)) { |
| break |
| } |
| v.reset(OpAMD64FlagEQ) |
| return true |
| } |
| // match: (CMPBconst (MOVLconst [x]) [y]) |
| // cond: int8(x)<int8(y) && uint8(x)<uint8(y) |
| // result: (FlagLT_ULT) |
| for { |
| y := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLconst { |
| break |
| } |
| x := v_0.AuxInt |
| if !(int8(x) < int8(y) && uint8(x) < uint8(y)) { |
| break |
| } |
| v.reset(OpAMD64FlagLT_ULT) |
| return true |
| } |
| // match: (CMPBconst (MOVLconst [x]) [y]) |
| // cond: int8(x)<int8(y) && uint8(x)>uint8(y) |
| // result: (FlagLT_UGT) |
| for { |
| y := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLconst { |
| break |
| } |
| x := v_0.AuxInt |
| if !(int8(x) < int8(y) && uint8(x) > uint8(y)) { |
| break |
| } |
| v.reset(OpAMD64FlagLT_UGT) |
| return true |
| } |
| // match: (CMPBconst (MOVLconst [x]) [y]) |
| // cond: int8(x)>int8(y) && uint8(x)<uint8(y) |
| // result: (FlagGT_ULT) |
| for { |
| y := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLconst { |
| break |
| } |
| x := v_0.AuxInt |
| if !(int8(x) > int8(y) && uint8(x) < uint8(y)) { |
| break |
| } |
| v.reset(OpAMD64FlagGT_ULT) |
| return true |
| } |
| // match: (CMPBconst (MOVLconst [x]) [y]) |
| // cond: int8(x)>int8(y) && uint8(x)>uint8(y) |
| // result: (FlagGT_UGT) |
| for { |
| y := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLconst { |
| break |
| } |
| x := v_0.AuxInt |
| if !(int8(x) > int8(y) && uint8(x) > uint8(y)) { |
| break |
| } |
| v.reset(OpAMD64FlagGT_UGT) |
| return true |
| } |
| // match: (CMPBconst (ANDLconst _ [m]) [n]) |
| // cond: 0 <= int8(m) && int8(m) < int8(n) |
| // result: (FlagLT_ULT) |
| for { |
| n := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ANDLconst { |
| break |
| } |
| m := v_0.AuxInt |
| if !(0 <= int8(m) && int8(m) < int8(n)) { |
| break |
| } |
| v.reset(OpAMD64FlagLT_ULT) |
| return true |
| } |
| // match: (CMPBconst (ANDL x y) [0]) |
| // cond: |
| // result: (TESTB x y) |
| for { |
| if v.AuxInt != 0 { |
| break |
| } |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ANDL { |
| break |
| } |
| x := v_0.Args[0] |
| y := v_0.Args[1] |
| v.reset(OpAMD64TESTB) |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (CMPBconst (ANDLconst [c] x) [0]) |
| // cond: |
| // result: (TESTBconst [int64(int8(c))] x) |
| for { |
| if v.AuxInt != 0 { |
| break |
| } |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ANDLconst { |
| break |
| } |
| c := v_0.AuxInt |
| x := v_0.Args[0] |
| v.reset(OpAMD64TESTBconst) |
| v.AuxInt = int64(int8(c)) |
| v.AddArg(x) |
| return true |
| } |
| // match: (CMPBconst x [0]) |
| // cond: |
| // result: (TESTB x x) |
| for { |
| if v.AuxInt != 0 { |
| break |
| } |
| x := v.Args[0] |
| v.reset(OpAMD64TESTB) |
| v.AddArg(x) |
| v.AddArg(x) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64CMPL(v *Value, config *Config) bool { |
| b := v.Block |
| _ = b |
| // match: (CMPL x (MOVLconst [c])) |
| // cond: |
| // result: (CMPLconst x [c]) |
| for { |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64MOVLconst { |
| break |
| } |
| c := v_1.AuxInt |
| v.reset(OpAMD64CMPLconst) |
| v.AuxInt = c |
| v.AddArg(x) |
| return true |
| } |
| // match: (CMPL (MOVLconst [c]) x) |
| // cond: |
| // result: (InvertFlags (CMPLconst x [c])) |
| for { |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLconst { |
| break |
| } |
| c := v_0.AuxInt |
| x := v.Args[1] |
| v.reset(OpAMD64InvertFlags) |
| v0 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) |
| v0.AuxInt = c |
| v0.AddArg(x) |
| v.AddArg(v0) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64CMPLconst(v *Value, config *Config) bool { |
| b := v.Block |
| _ = b |
| // match: (CMPLconst (MOVLconst [x]) [y]) |
| // cond: int32(x)==int32(y) |
| // result: (FlagEQ) |
| for { |
| y := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLconst { |
| break |
| } |
| x := v_0.AuxInt |
| if !(int32(x) == int32(y)) { |
| break |
| } |
| v.reset(OpAMD64FlagEQ) |
| return true |
| } |
| // match: (CMPLconst (MOVLconst [x]) [y]) |
| // cond: int32(x)<int32(y) && uint32(x)<uint32(y) |
| // result: (FlagLT_ULT) |
| for { |
| y := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLconst { |
| break |
| } |
| x := v_0.AuxInt |
| if !(int32(x) < int32(y) && uint32(x) < uint32(y)) { |
| break |
| } |
| v.reset(OpAMD64FlagLT_ULT) |
| return true |
| } |
| // match: (CMPLconst (MOVLconst [x]) [y]) |
| // cond: int32(x)<int32(y) && uint32(x)>uint32(y) |
| // result: (FlagLT_UGT) |
| for { |
| y := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLconst { |
| break |
| } |
| x := v_0.AuxInt |
| if !(int32(x) < int32(y) && uint32(x) > uint32(y)) { |
| break |
| } |
| v.reset(OpAMD64FlagLT_UGT) |
| return true |
| } |
| // match: (CMPLconst (MOVLconst [x]) [y]) |
| // cond: int32(x)>int32(y) && uint32(x)<uint32(y) |
| // result: (FlagGT_ULT) |
| for { |
| y := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLconst { |
| break |
| } |
| x := v_0.AuxInt |
| if !(int32(x) > int32(y) && uint32(x) < uint32(y)) { |
| break |
| } |
| v.reset(OpAMD64FlagGT_ULT) |
| return true |
| } |
| // match: (CMPLconst (MOVLconst [x]) [y]) |
| // cond: int32(x)>int32(y) && uint32(x)>uint32(y) |
| // result: (FlagGT_UGT) |
| for { |
| y := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLconst { |
| break |
| } |
| x := v_0.AuxInt |
| if !(int32(x) > int32(y) && uint32(x) > uint32(y)) { |
| break |
| } |
| v.reset(OpAMD64FlagGT_UGT) |
| return true |
| } |
| // match: (CMPLconst (SHRLconst _ [c]) [n]) |
| // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) |
| // result: (FlagLT_ULT) |
| for { |
| n := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64SHRLconst { |
| break |
| } |
| c := v_0.AuxInt |
| if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) { |
| break |
| } |
| v.reset(OpAMD64FlagLT_ULT) |
| return true |
| } |
| // match: (CMPLconst (ANDLconst _ [m]) [n]) |
| // cond: 0 <= int32(m) && int32(m) < int32(n) |
| // result: (FlagLT_ULT) |
| for { |
| n := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ANDLconst { |
| break |
| } |
| m := v_0.AuxInt |
| if !(0 <= int32(m) && int32(m) < int32(n)) { |
| break |
| } |
| v.reset(OpAMD64FlagLT_ULT) |
| return true |
| } |
| // match: (CMPLconst (ANDL x y) [0]) |
| // cond: |
| // result: (TESTL x y) |
| for { |
| if v.AuxInt != 0 { |
| break |
| } |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ANDL { |
| break |
| } |
| x := v_0.Args[0] |
| y := v_0.Args[1] |
| v.reset(OpAMD64TESTL) |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (CMPLconst (ANDLconst [c] x) [0]) |
| // cond: |
| // result: (TESTLconst [c] x) |
| for { |
| if v.AuxInt != 0 { |
| break |
| } |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ANDLconst { |
| break |
| } |
| c := v_0.AuxInt |
| x := v_0.Args[0] |
| v.reset(OpAMD64TESTLconst) |
| v.AuxInt = c |
| v.AddArg(x) |
| return true |
| } |
| // match: (CMPLconst x [0]) |
| // cond: |
| // result: (TESTL x x) |
| for { |
| if v.AuxInt != 0 { |
| break |
| } |
| x := v.Args[0] |
| v.reset(OpAMD64TESTL) |
| v.AddArg(x) |
| v.AddArg(x) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64CMPQ(v *Value, config *Config) bool { |
| b := v.Block |
| _ = b |
| // match: (CMPQ x (MOVQconst [c])) |
| // cond: is32Bit(c) |
| // result: (CMPQconst x [c]) |
| for { |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64MOVQconst { |
| break |
| } |
| c := v_1.AuxInt |
| if !(is32Bit(c)) { |
| break |
| } |
| v.reset(OpAMD64CMPQconst) |
| v.AuxInt = c |
| v.AddArg(x) |
| return true |
| } |
| // match: (CMPQ (MOVQconst [c]) x) |
| // cond: is32Bit(c) |
| // result: (InvertFlags (CMPQconst x [c])) |
| for { |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVQconst { |
| break |
| } |
| c := v_0.AuxInt |
| x := v.Args[1] |
| if !(is32Bit(c)) { |
| break |
| } |
| v.reset(OpAMD64InvertFlags) |
| v0 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) |
| v0.AuxInt = c |
| v0.AddArg(x) |
| v.AddArg(v0) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64CMPQconst(v *Value, config *Config) bool { |
| b := v.Block |
| _ = b |
| // match: (CMPQconst (MOVQconst [x]) [y]) |
| // cond: x==y |
| // result: (FlagEQ) |
| for { |
| y := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVQconst { |
| break |
| } |
| x := v_0.AuxInt |
| if !(x == y) { |
| break |
| } |
| v.reset(OpAMD64FlagEQ) |
| return true |
| } |
| // match: (CMPQconst (MOVQconst [x]) [y]) |
| // cond: x<y && uint64(x)<uint64(y) |
| // result: (FlagLT_ULT) |
| for { |
| y := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVQconst { |
| break |
| } |
| x := v_0.AuxInt |
| if !(x < y && uint64(x) < uint64(y)) { |
| break |
| } |
| v.reset(OpAMD64FlagLT_ULT) |
| return true |
| } |
| // match: (CMPQconst (MOVQconst [x]) [y]) |
| // cond: x<y && uint64(x)>uint64(y) |
| // result: (FlagLT_UGT) |
| for { |
| y := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVQconst { |
| break |
| } |
| x := v_0.AuxInt |
| if !(x < y && uint64(x) > uint64(y)) { |
| break |
| } |
| v.reset(OpAMD64FlagLT_UGT) |
| return true |
| } |
| // match: (CMPQconst (MOVQconst [x]) [y]) |
| // cond: x>y && uint64(x)<uint64(y) |
| // result: (FlagGT_ULT) |
| for { |
| y := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVQconst { |
| break |
| } |
| x := v_0.AuxInt |
| if !(x > y && uint64(x) < uint64(y)) { |
| break |
| } |
| v.reset(OpAMD64FlagGT_ULT) |
| return true |
| } |
| // match: (CMPQconst (MOVQconst [x]) [y]) |
| // cond: x>y && uint64(x)>uint64(y) |
| // result: (FlagGT_UGT) |
| for { |
| y := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVQconst { |
| break |
| } |
| x := v_0.AuxInt |
| if !(x > y && uint64(x) > uint64(y)) { |
| break |
| } |
| v.reset(OpAMD64FlagGT_UGT) |
| return true |
| } |
| // match: (CMPQconst (MOVBQZX _) [c]) |
| // cond: 0xFF < c |
| // result: (FlagLT_ULT) |
| for { |
| c := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVBQZX { |
| break |
| } |
| if !(0xFF < c) { |
| break |
| } |
| v.reset(OpAMD64FlagLT_ULT) |
| return true |
| } |
| // match: (CMPQconst (MOVWQZX _) [c]) |
| // cond: 0xFFFF < c |
| // result: (FlagLT_ULT) |
| for { |
| c := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVWQZX { |
| break |
| } |
| if !(0xFFFF < c) { |
| break |
| } |
| v.reset(OpAMD64FlagLT_ULT) |
| return true |
| } |
| // match: (CMPQconst (MOVLQZX _) [c]) |
| // cond: 0xFFFFFFFF < c |
| // result: (FlagLT_ULT) |
| for { |
| c := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLQZX { |
| break |
| } |
| if !(0xFFFFFFFF < c) { |
| break |
| } |
| v.reset(OpAMD64FlagLT_ULT) |
| return true |
| } |
| // match: (CMPQconst (SHRQconst _ [c]) [n]) |
| // cond: 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n) |
| // result: (FlagLT_ULT) |
| for { |
| n := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64SHRQconst { |
| break |
| } |
| c := v_0.AuxInt |
| if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) { |
| break |
| } |
| v.reset(OpAMD64FlagLT_ULT) |
| return true |
| } |
| // match: (CMPQconst (ANDQconst _ [m]) [n]) |
| // cond: 0 <= m && m < n |
| // result: (FlagLT_ULT) |
| for { |
| n := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ANDQconst { |
| break |
| } |
| m := v_0.AuxInt |
| if !(0 <= m && m < n) { |
| break |
| } |
| v.reset(OpAMD64FlagLT_ULT) |
| return true |
| } |
| // match: (CMPQconst (ANDQ x y) [0]) |
| // cond: |
| // result: (TESTQ x y) |
| for { |
| if v.AuxInt != 0 { |
| break |
| } |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ANDQ { |
| break |
| } |
| x := v_0.Args[0] |
| y := v_0.Args[1] |
| v.reset(OpAMD64TESTQ) |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (CMPQconst (ANDQconst [c] x) [0]) |
| // cond: |
| // result: (TESTQconst [c] x) |
| for { |
| if v.AuxInt != 0 { |
| break |
| } |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ANDQconst { |
| break |
| } |
| c := v_0.AuxInt |
| x := v_0.Args[0] |
| v.reset(OpAMD64TESTQconst) |
| v.AuxInt = c |
| v.AddArg(x) |
| return true |
| } |
| // match: (CMPQconst x [0]) |
| // cond: |
| // result: (TESTQ x x) |
| for { |
| if v.AuxInt != 0 { |
| break |
| } |
| x := v.Args[0] |
| v.reset(OpAMD64TESTQ) |
| v.AddArg(x) |
| v.AddArg(x) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64CMPW(v *Value, config *Config) bool { |
| b := v.Block |
| _ = b |
| // match: (CMPW x (MOVLconst [c])) |
| // cond: |
| // result: (CMPWconst x [int64(int16(c))]) |
| for { |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64MOVLconst { |
| break |
| } |
| c := v_1.AuxInt |
| v.reset(OpAMD64CMPWconst) |
| v.AuxInt = int64(int16(c)) |
| v.AddArg(x) |
| return true |
| } |
| // match: (CMPW (MOVLconst [c]) x) |
| // cond: |
| // result: (InvertFlags (CMPWconst x [int64(int16(c))])) |
| for { |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLconst { |
| break |
| } |
| c := v_0.AuxInt |
| x := v.Args[1] |
| v.reset(OpAMD64InvertFlags) |
| v0 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) |
| v0.AuxInt = int64(int16(c)) |
| v0.AddArg(x) |
| v.AddArg(v0) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool { |
| b := v.Block |
| _ = b |
| // match: (CMPWconst (MOVLconst [x]) [y]) |
| // cond: int16(x)==int16(y) |
| // result: (FlagEQ) |
| for { |
| y := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLconst { |
| break |
| } |
| x := v_0.AuxInt |
| if !(int16(x) == int16(y)) { |
| break |
| } |
| v.reset(OpAMD64FlagEQ) |
| return true |
| } |
| // match: (CMPWconst (MOVLconst [x]) [y]) |
| // cond: int16(x)<int16(y) && uint16(x)<uint16(y) |
| // result: (FlagLT_ULT) |
| for { |
| y := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLconst { |
| break |
| } |
| x := v_0.AuxInt |
| if !(int16(x) < int16(y) && uint16(x) < uint16(y)) { |
| break |
| } |
| v.reset(OpAMD64FlagLT_ULT) |
| return true |
| } |
| // match: (CMPWconst (MOVLconst [x]) [y]) |
| // cond: int16(x)<int16(y) && uint16(x)>uint16(y) |
| // result: (FlagLT_UGT) |
| for { |
| y := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLconst { |
| break |
| } |
| x := v_0.AuxInt |
| if !(int16(x) < int16(y) && uint16(x) > uint16(y)) { |
| break |
| } |
| v.reset(OpAMD64FlagLT_UGT) |
| return true |
| } |
| // match: (CMPWconst (MOVLconst [x]) [y]) |
| // cond: int16(x)>int16(y) && uint16(x)<uint16(y) |
| // result: (FlagGT_ULT) |
| for { |
| y := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLconst { |
| break |
| } |
| x := v_0.AuxInt |
| if !(int16(x) > int16(y) && uint16(x) < uint16(y)) { |
| break |
| } |
| v.reset(OpAMD64FlagGT_ULT) |
| return true |
| } |
| // match: (CMPWconst (MOVLconst [x]) [y]) |
| // cond: int16(x)>int16(y) && uint16(x)>uint16(y) |
| // result: (FlagGT_UGT) |
| for { |
| y := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLconst { |
| break |
| } |
| x := v_0.AuxInt |
| if !(int16(x) > int16(y) && uint16(x) > uint16(y)) { |
| break |
| } |
| v.reset(OpAMD64FlagGT_UGT) |
| return true |
| } |
| // match: (CMPWconst (ANDLconst _ [m]) [n]) |
| // cond: 0 <= int16(m) && int16(m) < int16(n) |
| // result: (FlagLT_ULT) |
| for { |
| n := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ANDLconst { |
| break |
| } |
| m := v_0.AuxInt |
| if !(0 <= int16(m) && int16(m) < int16(n)) { |
| break |
| } |
| v.reset(OpAMD64FlagLT_ULT) |
| return true |
| } |
| // match: (CMPWconst (ANDL x y) [0]) |
| // cond: |
| // result: (TESTW x y) |
| for { |
| if v.AuxInt != 0 { |
| break |
| } |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ANDL { |
| break |
| } |
| x := v_0.Args[0] |
| y := v_0.Args[1] |
| v.reset(OpAMD64TESTW) |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (CMPWconst (ANDLconst [c] x) [0]) |
| // cond: |
| // result: (TESTWconst [int64(int16(c))] x) |
| for { |
| if v.AuxInt != 0 { |
| break |
| } |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ANDLconst { |
| break |
| } |
| c := v_0.AuxInt |
| x := v_0.Args[0] |
| v.reset(OpAMD64TESTWconst) |
| v.AuxInt = int64(int16(c)) |
| v.AddArg(x) |
| return true |
| } |
| // match: (CMPWconst x [0]) |
| // cond: |
| // result: (TESTW x x) |
| for { |
| if v.AuxInt != 0 { |
| break |
| } |
| x := v.Args[0] |
| v.reset(OpAMD64TESTW) |
| v.AddArg(x) |
| v.AddArg(x) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64CMPXCHGLlock(v *Value, config *Config) bool { |
| b := v.Block |
| _ = b |
| // match: (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) |
| // cond: is32Bit(off1+off2) |
| // result: (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem) |
| for { |
| off1 := v.AuxInt |
| sym := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ADDQconst { |
| break |
| } |
| off2 := v_0.AuxInt |
| ptr := v_0.Args[0] |
| old := v.Args[1] |
| new_ := v.Args[2] |
| mem := v.Args[3] |
| if !(is32Bit(off1 + off2)) { |
| break |
| } |
| v.reset(OpAMD64CMPXCHGLlock) |
| v.AuxInt = off1 + off2 |
| v.Aux = sym |
| v.AddArg(ptr) |
| v.AddArg(old) |
| v.AddArg(new_) |
| v.AddArg(mem) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64CMPXCHGQlock(v *Value, config *Config) bool { |
| b := v.Block |
| _ = b |
| // match: (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) |
| // cond: is32Bit(off1+off2) |
| // result: (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem) |
| for { |
| off1 := v.AuxInt |
| sym := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ADDQconst { |
| break |
| } |
| off2 := v_0.AuxInt |
| ptr := v_0.Args[0] |
| old := v.Args[1] |
| new_ := v.Args[2] |
| mem := v.Args[3] |
| if !(is32Bit(off1 + off2)) { |
| break |
| } |
| v.reset(OpAMD64CMPXCHGQlock) |
| v.AuxInt = off1 + off2 |
| v.Aux = sym |
| v.AddArg(ptr) |
| v.AddArg(old) |
| v.AddArg(new_) |
| v.AddArg(mem) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64LEAL(v *Value, config *Config) bool { |
| b := v.Block |
| _ = b |
| // match: (LEAL [c] {s} (ADDLconst [d] x)) |
| // cond: is32Bit(c+d) |
| // result: (LEAL [c+d] {s} x) |
| for { |
| c := v.AuxInt |
| s := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ADDLconst { |
| break |
| } |
| d := v_0.AuxInt |
| x := v_0.Args[0] |
| if !(is32Bit(c + d)) { |
| break |
| } |
| v.reset(OpAMD64LEAL) |
| v.AuxInt = c + d |
| v.Aux = s |
| v.AddArg(x) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64LEAQ(v *Value, config *Config) bool { |
| b := v.Block |
| _ = b |
| // match: (LEAQ [c] {s} (ADDQconst [d] x)) |
| // cond: is32Bit(c+d) |
| // result: (LEAQ [c+d] {s} x) |
| for { |
| c := v.AuxInt |
| s := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ADDQconst { |
| break |
| } |
| d := v_0.AuxInt |
| x := v_0.Args[0] |
| if !(is32Bit(c + d)) { |
| break |
| } |
| v.reset(OpAMD64LEAQ) |
| v.AuxInt = c + d |
| v.Aux = s |
| v.AddArg(x) |
| return true |
| } |
| // match: (LEAQ [c] {s} (ADDQ x y)) |
| // cond: x.Op != OpSB && y.Op != OpSB |
| // result: (LEAQ1 [c] {s} x y) |
| for { |
| c := v.AuxInt |
| s := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ADDQ { |
| break |
| } |
| x := v_0.Args[0] |
| y := v_0.Args[1] |
| if !(x.Op != OpSB && y.Op != OpSB) { |
| break |
| } |
| v.reset(OpAMD64LEAQ1) |
| v.AuxInt = c |
| v.Aux = s |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) |
| // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) |
| // result: (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x) |
| for { |
| off1 := v.AuxInt |
| sym1 := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64LEAQ { |
| break |
| } |
| off2 := v_0.AuxInt |
| sym2 := v_0.Aux |
| x := v_0.Args[0] |
| if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { |
| break |
| } |
| v.reset(OpAMD64LEAQ) |
| v.AuxInt = off1 + off2 |
| v.Aux = mergeSym(sym1, sym2) |
| v.AddArg(x) |
| return true |
| } |
| // match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) |
| // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) |
| // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) |
| for { |
| off1 := v.AuxInt |
| sym1 := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64LEAQ1 { |
| break |
| } |
| off2 := v_0.AuxInt |
| sym2 := v_0.Aux |
| x := v_0.Args[0] |
| y := v_0.Args[1] |
| if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { |
| break |
| } |
| v.reset(OpAMD64LEAQ1) |
| v.AuxInt = off1 + off2 |
| v.Aux = mergeSym(sym1, sym2) |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) |
| // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) |
| // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) |
| for { |
| off1 := v.AuxInt |
| sym1 := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64LEAQ2 { |
| break |
| } |
| off2 := v_0.AuxInt |
| sym2 := v_0.Aux |
| x := v_0.Args[0] |
| y := v_0.Args[1] |
| if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { |
| break |
| } |
| v.reset(OpAMD64LEAQ2) |
| v.AuxInt = off1 + off2 |
| v.Aux = mergeSym(sym1, sym2) |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) |
| // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) |
| // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) |
| for { |
| off1 := v.AuxInt |
| sym1 := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64LEAQ4 { |
| break |
| } |
| off2 := v_0.AuxInt |
| sym2 := v_0.Aux |
| x := v_0.Args[0] |
| y := v_0.Args[1] |
| if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { |
| break |
| } |
| v.reset(OpAMD64LEAQ4) |
| v.AuxInt = off1 + off2 |
| v.Aux = mergeSym(sym1, sym2) |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) |
| // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) |
| // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) |
| for { |
| off1 := v.AuxInt |
| sym1 := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64LEAQ8 { |
| break |
| } |
| off2 := v_0.AuxInt |
| sym2 := v_0.Aux |
| x := v_0.Args[0] |
| y := v_0.Args[1] |
| if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { |
| break |
| } |
| v.reset(OpAMD64LEAQ8) |
| v.AuxInt = off1 + off2 |
| v.Aux = mergeSym(sym1, sym2) |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64LEAQ1(v *Value, config *Config) bool { |
| b := v.Block |
| _ = b |
| // match: (LEAQ1 [c] {s} (ADDQconst [d] x) y) |
| // cond: is32Bit(c+d) && x.Op != OpSB |
| // result: (LEAQ1 [c+d] {s} x y) |
| for { |
| c := v.AuxInt |
| s := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ADDQconst { |
| break |
| } |
| d := v_0.AuxInt |
| x := v_0.Args[0] |
| y := v.Args[1] |
| if !(is32Bit(c+d) && x.Op != OpSB) { |
| break |
| } |
| v.reset(OpAMD64LEAQ1) |
| v.AuxInt = c + d |
| v.Aux = s |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (LEAQ1 [c] {s} x (ADDQconst [d] y)) |
| // cond: is32Bit(c+d) && y.Op != OpSB |
| // result: (LEAQ1 [c+d] {s} x y) |
| for { |
| c := v.AuxInt |
| s := v.Aux |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64ADDQconst { |
| break |
| } |
| d := v_1.AuxInt |
| y := v_1.Args[0] |
| if !(is32Bit(c+d) && y.Op != OpSB) { |
| break |
| } |
| v.reset(OpAMD64LEAQ1) |
| v.AuxInt = c + d |
| v.Aux = s |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (LEAQ1 [c] {s} x (SHLQconst [1] y)) |
| // cond: |
| // result: (LEAQ2 [c] {s} x y) |
| for { |
| c := v.AuxInt |
| s := v.Aux |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64SHLQconst { |
| break |
| } |
| if v_1.AuxInt != 1 { |
| break |
| } |
| y := v_1.Args[0] |
| v.reset(OpAMD64LEAQ2) |
| v.AuxInt = c |
| v.Aux = s |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (LEAQ1 [c] {s} (SHLQconst [1] x) y) |
| // cond: |
| // result: (LEAQ2 [c] {s} y x) |
| for { |
| c := v.AuxInt |
| s := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64SHLQconst { |
| break |
| } |
| if v_0.AuxInt != 1 { |
| break |
| } |
| x := v_0.Args[0] |
| y := v.Args[1] |
| v.reset(OpAMD64LEAQ2) |
| v.AuxInt = c |
| v.Aux = s |
| v.AddArg(y) |
| v.AddArg(x) |
| return true |
| } |
| // match: (LEAQ1 [c] {s} x (SHLQconst [2] y)) |
| // cond: |
| // result: (LEAQ4 [c] {s} x y) |
| for { |
| c := v.AuxInt |
| s := v.Aux |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64SHLQconst { |
| break |
| } |
| if v_1.AuxInt != 2 { |
| break |
| } |
| y := v_1.Args[0] |
| v.reset(OpAMD64LEAQ4) |
| v.AuxInt = c |
| v.Aux = s |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (LEAQ1 [c] {s} (SHLQconst [2] x) y) |
| // cond: |
| // result: (LEAQ4 [c] {s} y x) |
| for { |
| c := v.AuxInt |
| s := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64SHLQconst { |
| break |
| } |
| if v_0.AuxInt != 2 { |
| break |
| } |
| x := v_0.Args[0] |
| y := v.Args[1] |
| v.reset(OpAMD64LEAQ4) |
| v.AuxInt = c |
| v.Aux = s |
| v.AddArg(y) |
| v.AddArg(x) |
| return true |
| } |
| // match: (LEAQ1 [c] {s} x (SHLQconst [3] y)) |
| // cond: |
| // result: (LEAQ8 [c] {s} x y) |
| for { |
| c := v.AuxInt |
| s := v.Aux |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64SHLQconst { |
| break |
| } |
| if v_1.AuxInt != 3 { |
| break |
| } |
| y := v_1.Args[0] |
| v.reset(OpAMD64LEAQ8) |
| v.AuxInt = c |
| v.Aux = s |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (LEAQ1 [c] {s} (SHLQconst [3] x) y) |
| // cond: |
| // result: (LEAQ8 [c] {s} y x) |
| for { |
| c := v.AuxInt |
| s := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64SHLQconst { |
| break |
| } |
| if v_0.AuxInt != 3 { |
| break |
| } |
| x := v_0.Args[0] |
| y := v.Args[1] |
| v.reset(OpAMD64LEAQ8) |
| v.AuxInt = c |
| v.Aux = s |
| v.AddArg(y) |
| v.AddArg(x) |
| return true |
| } |
| // match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) |
| // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB |
| // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) |
| for { |
| off1 := v.AuxInt |
| sym1 := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64LEAQ { |
| break |
| } |
| off2 := v_0.AuxInt |
| sym2 := v_0.Aux |
| x := v_0.Args[0] |
| y := v.Args[1] |
| if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { |
| break |
| } |
| v.reset(OpAMD64LEAQ1) |
| v.AuxInt = off1 + off2 |
| v.Aux = mergeSym(sym1, sym2) |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (LEAQ1 [off1] {sym1} x (LEAQ [off2] {sym2} y)) |
| // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB |
| // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) |
| for { |
| off1 := v.AuxInt |
| sym1 := v.Aux |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64LEAQ { |
| break |
| } |
| off2 := v_1.AuxInt |
| sym2 := v_1.Aux |
| y := v_1.Args[0] |
| if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB) { |
| break |
| } |
| v.reset(OpAMD64LEAQ1) |
| v.AuxInt = off1 + off2 |
| v.Aux = mergeSym(sym1, sym2) |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64LEAQ2(v *Value, config *Config) bool { |
| b := v.Block |
| _ = b |
| // match: (LEAQ2 [c] {s} (ADDQconst [d] x) y) |
| // cond: is32Bit(c+d) && x.Op != OpSB |
| // result: (LEAQ2 [c+d] {s} x y) |
| for { |
| c := v.AuxInt |
| s := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ADDQconst { |
| break |
| } |
| d := v_0.AuxInt |
| x := v_0.Args[0] |
| y := v.Args[1] |
| if !(is32Bit(c+d) && x.Op != OpSB) { |
| break |
| } |
| v.reset(OpAMD64LEAQ2) |
| v.AuxInt = c + d |
| v.Aux = s |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (LEAQ2 [c] {s} x (ADDQconst [d] y)) |
| // cond: is32Bit(c+2*d) && y.Op != OpSB |
| // result: (LEAQ2 [c+2*d] {s} x y) |
| for { |
| c := v.AuxInt |
| s := v.Aux |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64ADDQconst { |
| break |
| } |
| d := v_1.AuxInt |
| y := v_1.Args[0] |
| if !(is32Bit(c+2*d) && y.Op != OpSB) { |
| break |
| } |
| v.reset(OpAMD64LEAQ2) |
| v.AuxInt = c + 2*d |
| v.Aux = s |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (LEAQ2 [c] {s} x (SHLQconst [1] y)) |
| // cond: |
| // result: (LEAQ4 [c] {s} x y) |
| for { |
| c := v.AuxInt |
| s := v.Aux |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64SHLQconst { |
| break |
| } |
| if v_1.AuxInt != 1 { |
| break |
| } |
| y := v_1.Args[0] |
| v.reset(OpAMD64LEAQ4) |
| v.AuxInt = c |
| v.Aux = s |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (LEAQ2 [c] {s} x (SHLQconst [2] y)) |
| // cond: |
| // result: (LEAQ8 [c] {s} x y) |
| for { |
| c := v.AuxInt |
| s := v.Aux |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64SHLQconst { |
| break |
| } |
| if v_1.AuxInt != 2 { |
| break |
| } |
| y := v_1.Args[0] |
| v.reset(OpAMD64LEAQ8) |
| v.AuxInt = c |
| v.Aux = s |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) |
| // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB |
| // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) |
| for { |
| off1 := v.AuxInt |
| sym1 := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64LEAQ { |
| break |
| } |
| off2 := v_0.AuxInt |
| sym2 := v_0.Aux |
| x := v_0.Args[0] |
| y := v.Args[1] |
| if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { |
| break |
| } |
| v.reset(OpAMD64LEAQ2) |
| v.AuxInt = off1 + off2 |
| v.Aux = mergeSym(sym1, sym2) |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64LEAQ4(v *Value, config *Config) bool { |
| b := v.Block |
| _ = b |
| // match: (LEAQ4 [c] {s} (ADDQconst [d] x) y) |
| // cond: is32Bit(c+d) && x.Op != OpSB |
| // result: (LEAQ4 [c+d] {s} x y) |
| for { |
| c := v.AuxInt |
| s := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ADDQconst { |
| break |
| } |
| d := v_0.AuxInt |
| x := v_0.Args[0] |
| y := v.Args[1] |
| if !(is32Bit(c+d) && x.Op != OpSB) { |
| break |
| } |
| v.reset(OpAMD64LEAQ4) |
| v.AuxInt = c + d |
| v.Aux = s |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (LEAQ4 [c] {s} x (ADDQconst [d] y)) |
| // cond: is32Bit(c+4*d) && y.Op != OpSB |
| // result: (LEAQ4 [c+4*d] {s} x y) |
| for { |
| c := v.AuxInt |
| s := v.Aux |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64ADDQconst { |
| break |
| } |
| d := v_1.AuxInt |
| y := v_1.Args[0] |
| if !(is32Bit(c+4*d) && y.Op != OpSB) { |
| break |
| } |
| v.reset(OpAMD64LEAQ4) |
| v.AuxInt = c + 4*d |
| v.Aux = s |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (LEAQ4 [c] {s} x (SHLQconst [1] y)) |
| // cond: |
| // result: (LEAQ8 [c] {s} x y) |
| for { |
| c := v.AuxInt |
| s := v.Aux |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64SHLQconst { |
| break |
| } |
| if v_1.AuxInt != 1 { |
| break |
| } |
| y := v_1.Args[0] |
| v.reset(OpAMD64LEAQ8) |
| v.AuxInt = c |
| v.Aux = s |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) |
| // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB |
| // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) |
| for { |
| off1 := v.AuxInt |
| sym1 := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64LEAQ { |
| break |
| } |
| off2 := v_0.AuxInt |
| sym2 := v_0.Aux |
| x := v_0.Args[0] |
| y := v.Args[1] |
| if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { |
| break |
| } |
| v.reset(OpAMD64LEAQ4) |
| v.AuxInt = off1 + off2 |
| v.Aux = mergeSym(sym1, sym2) |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64LEAQ8(v *Value, config *Config) bool { |
| b := v.Block |
| _ = b |
| // match: (LEAQ8 [c] {s} (ADDQconst [d] x) y) |
| // cond: is32Bit(c+d) && x.Op != OpSB |
| // result: (LEAQ8 [c+d] {s} x y) |
| for { |
| c := v.AuxInt |
| s := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ADDQconst { |
| break |
| } |
| d := v_0.AuxInt |
| x := v_0.Args[0] |
| y := v.Args[1] |
| if !(is32Bit(c+d) && x.Op != OpSB) { |
| break |
| } |
| v.reset(OpAMD64LEAQ8) |
| v.AuxInt = c + d |
| v.Aux = s |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (LEAQ8 [c] {s} x (ADDQconst [d] y)) |
| // cond: is32Bit(c+8*d) && y.Op != OpSB |
| // result: (LEAQ8 [c+8*d] {s} x y) |
| for { |
| c := v.AuxInt |
| s := v.Aux |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64ADDQconst { |
| break |
| } |
| d := v_1.AuxInt |
| y := v_1.Args[0] |
| if !(is32Bit(c+8*d) && y.Op != OpSB) { |
| break |
| } |
| v.reset(OpAMD64LEAQ8) |
| v.AuxInt = c + 8*d |
| v.Aux = s |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) |
| // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB |
| // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) |
| for { |
| off1 := v.AuxInt |
| sym1 := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64LEAQ { |
| break |
| } |
| off2 := v_0.AuxInt |
| sym2 := v_0.Aux |
| x := v_0.Args[0] |
| y := v.Args[1] |
| if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { |
| break |
| } |
| v.reset(OpAMD64LEAQ8) |
| v.AuxInt = off1 + off2 |
| v.Aux = mergeSym(sym1, sym2) |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value, config *Config) bool { |
| b := v.Block |
| _ = b |
| // match: (MOVBQSX x:(MOVBload [off] {sym} ptr mem)) |
| // cond: x.Uses == 1 && clobber(x) |
| // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) |
| for { |
| x := v.Args[0] |
| if x.Op != OpAMD64MOVBload { |
| break |
| } |
| off := x.AuxInt |
| sym := x.Aux |
| ptr := x.Args[0] |
| mem := x.Args[1] |
| if !(x.Uses == 1 && clobber(x)) { |
| break |
| } |
| b = x.Block |
| v0 := b.NewValue0(v.Line, OpAMD64MOVBQSXload, v.Type) |
| v.reset(OpCopy) |
| v.AddArg(v0) |
| v0.AuxInt = off |
| v0.Aux = sym |
| v0.AddArg(ptr) |
| v0.AddArg(mem) |
| return true |
| } |
| // match: (MOVBQSX x:(MOVWload [off] {sym} ptr mem)) |
| // cond: x.Uses == 1 && clobber(x) |
| // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) |
| for { |
| x := v.Args[0] |
| if x.Op != OpAMD64MOVWload { |
| break |
| } |
| off := x.AuxInt |
| sym := x.Aux |
| ptr := x.Args[0] |
| mem := x.Args[1] |
| if !(x.Uses == 1 && clobber(x)) { |
| break |
| } |
| b = x.Block |
| v0 := b.NewValue0(v.Line, OpAMD64MOVBQSXload, v.Type) |
| v.reset(OpCopy) |
| v.AddArg(v0) |
| v0.AuxInt = off |
| v0.Aux = sym |
| v0.AddArg(ptr) |
| v0.AddArg(mem) |
| return true |
| } |
| // match: (MOVBQSX x:(MOVLload [off] {sym} ptr mem)) |
| // cond: x.Uses == 1 && clobber(x) |
| // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) |
| for { |
| x := v.Args[0] |
| if x.Op != OpAMD64MOVLload { |
| break |
| } |
| off := x.AuxInt |
| sym := x.Aux |
| ptr := x.Args[0] |
| mem := x.Args[1] |
| if !(x.Uses == 1 && clobber(x)) { |
| break |
| } |
| b = x.Block |
| v0 := b.NewValue0(v.Line, OpAMD64MOVBQSXload, v.Type) |
| v.reset(OpCopy) |
| v.AddArg(v0) |
| v0.AuxInt = off |
| v0.Aux = sym |
| v0.AddArg(ptr) |
| v0.AddArg(mem) |
| return true |
| } |
| // match: (MOVBQSX x:(MOVQload [off] {sym} ptr mem)) |
| // cond: x.Uses == 1 && clobber(x) |
| // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) |
| for { |
| x := v.Args[0] |
| if x.Op != OpAMD64MOVQload { |
| break |
| } |
| off := x.AuxInt |
| sym := x.Aux |
| ptr := x.Args[0] |
| mem := x.Args[1] |
| if !(x.Uses == 1 && clobber(x)) { |
| break |
| } |
| b = x.Block |
| v0 := b.NewValue0(v.Line, OpAMD64MOVBQSXload, v.Type) |
| v.reset(OpCopy) |
| v.AddArg(v0) |
| v0.AuxInt = off |
| v0.Aux = sym |
| v0.AddArg(ptr) |
| v0.AddArg(mem) |
| return true |
| } |
| // match: (MOVBQSX (ANDLconst [c] x)) |
| // cond: c & 0x80 == 0 |
| // result: (ANDLconst [c & 0x7f] x) |
| for { |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ANDLconst { |
| break |
| } |
| c := v_0.AuxInt |
| x := v_0.Args[0] |
| if !(c&0x80 == 0) { |
| break |
| } |
| v.reset(OpAMD64ANDLconst) |
| v.AuxInt = c & 0x7f |
| v.AddArg(x) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value, config *Config) bool { |
| b := v.Block |
| _ = b |
| // match: (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) |
| // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) |
| // result: (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) |
| for { |
| off1 := v.AuxInt |
| sym1 := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64LEAQ { |
| break |
| } |
| off2 := v_0.AuxInt |
| sym2 := v_0.Aux |
| base := v_0.Args[0] |
| mem := v.Args[1] |
| if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { |
| break |
| } |
| v.reset(OpAMD64MOVBQSXload) |
| v.AuxInt = off1 + off2 |
| v.Aux = mergeSym(sym1, sym2) |
| v.AddArg(base) |
| v.AddArg(mem) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value, config *Config) bool { |
| b := v.Block |
| _ = b |
| // match: (MOVBQZX x:(MOVBload [off] {sym} ptr mem)) |
| // cond: x.Uses == 1 && clobber(x) |
| // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) |
| for { |
| x := v.Args[0] |
| if x.Op != OpAMD64MOVBload { |
| break |
| } |
| off := x.AuxInt |
| sym := x.Aux |
| ptr := x.Args[0] |
| mem := x.Args[1] |
| if !(x.Uses == 1 && clobber(x)) { |
| break |
| } |
| b = x.Block |
| v0 := b.NewValue0(v.Line, OpAMD64MOVBload, v.Type) |
| v.reset(OpCopy) |
| v.AddArg(v0) |
| v0.AuxInt = off |
| v0.Aux = sym |
| v0.AddArg(ptr) |
| v0.AddArg(mem) |
| return true |
| } |
| // match: (MOVBQZX x:(MOVWload [off] {sym} ptr mem)) |
| // cond: x.Uses == 1 && clobber(x) |
| // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) |
| for { |
| x := v.Args[0] |
| if x.Op != OpAMD64MOVWload { |
| break |
| } |
| off := x.AuxInt |
| sym := x.Aux |
| ptr := x.Args[0] |
| mem := x.Args[1] |
| if !(x.Uses == 1 && clobber(x)) { |
| break |
| } |
| b = x.Block |
| v0 := b.NewValue0(v.Line, OpAMD64MOVBload, v.Type) |
| v.reset(OpCopy) |
| v.AddArg(v0) |
| v0.AuxInt = off |
| v0.Aux = sym |
| v0.AddArg(ptr) |
| v0.AddArg(mem) |
| return true |
| } |
| // match: (MOVBQZX x:(MOVLload [off] {sym} ptr mem)) |
| // cond: x.Uses == 1 && clobber(x) |
| // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) |
| for { |
| x := v.Args[0] |
| if x.Op != OpAMD64MOVLload { |
| break |
| } |
| off := x.AuxInt |
| sym := x.Aux |
| ptr := x.Args[0] |
| mem := x.Args[1] |
| if !(x.Uses == 1 && clobber(x)) { |
| break |
| } |
| b = x.Block |
| v0 := b.NewValue0(v.Line, OpAMD64MOVBload, v.Type) |
| v.reset(OpCopy) |
| v.AddArg(v0) |
| v0.AuxInt = off |
| v0.Aux = sym |
| v0.AddArg(ptr) |
| v0.AddArg(mem) |
| return true |
| } |
| // match: (MOVBQZX x:(MOVQload [off] {sym} ptr mem)) |
| // cond: x.Uses == 1 && clobber(x) |
| // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) |
| for { |
| x := v.Args[0] |
| if x.Op != OpAMD64MOVQload { |
| break |
| } |
| off := x.AuxInt |
| sym := x.Aux |
| ptr := x.Args[0] |
| mem := x.Args[1] |
| if !(x.Uses == 1 && clobber(x)) { |
| break |
| } |
| b = x.Block |
| v0 := b.NewValue0(v.Line, OpAMD64MOVBload, v.Type) |
| v.reset(OpCopy) |
| v.AddArg(v0) |
| v0.AuxInt = off |
| v0.Aux = sym |
| v0.AddArg(ptr) |
| v0.AddArg(mem) |
| return true |
| } |
| // match: (MOVBQZX x:(MOVBloadidx1 [off] {sym} ptr idx mem)) |
| // cond: x.Uses == 1 && clobber(x) |
| // result: @x.Block (MOVBloadidx1 <v.Type> [off] {sym} ptr idx mem) |
| for { |
| x := v.Args[0] |
| if x.Op != OpAMD64MOVBloadidx1 { |
| break |
| } |
| off := x.AuxInt |
| sym := x.Aux |
| ptr := x.Args[0] |
| idx := x.Args[1] |
| mem := x.Args[2] |
| if !(x.Uses == 1 && clobber(x)) { |
| break |
| } |
| b = x.Block |
| v0 := b.NewValue0(v.Line, OpAMD64MOVBloadidx1, v.Type) |
| v.reset(OpCopy) |
| v.AddArg(v0) |
| v0.AuxInt = off |
| v0.Aux = sym |
| v0.AddArg(ptr) |
| v0.AddArg(idx) |
| v0.AddArg(mem) |
| return true |
| } |
| // match: (MOVBQZX (ANDLconst [c] x)) |
| // cond: |
| // result: (ANDLconst [c & 0xff] x) |
| for { |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ANDLconst { |
| break |
| } |
| c := v_0.AuxInt |
| x := v_0.Args[0] |
| v.reset(OpAMD64ANDLconst) |
| v.AuxInt = c & 0xff |
| v.AddArg(x) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64MOVBload(v *Value, config *Config) bool { |
| b := v.Block |
| _ = b |
| // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) |
| // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) |
| // result: x |
| for { |
| off := v.AuxInt |
| sym := v.Aux |
| ptr := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64MOVBstore { |
| break |
| } |
| off2 := v_1.AuxInt |
| sym2 := v_1.Aux |
| ptr2 := v_1.Args[0] |
| x := v_1.Args[1] |
| if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { |
| break |
| } |
| v.reset(OpCopy) |
| v.Type = x.Type |
| v.AddArg(x) |
| return true |
| } |
| // match: (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem) |
| // cond: is32Bit(off1+off2) |
| // result: (MOVBload [off1+off2] {sym} ptr mem) |
| for { |
| off1 := v.AuxInt |
| sym := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ADDQconst { |
| break |
| } |
| off2 := v_0.AuxInt |
| ptr := v_0.Args[0] |
| mem := v.Args[1] |
| if !(is32Bit(off1 + off2)) { |
| break |
| } |
| v.reset(OpAMD64MOVBload) |
| v.AuxInt = off1 + off2 |
| v.Aux = sym |
| v.AddArg(ptr) |
| v.AddArg(mem) |
| return true |
| } |
| // match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) |
| // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) |
| // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) |
| for { |
| off1 := v.AuxInt |
| sym1 := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64LEAQ { |
| break |
| } |
| off2 := v_0.AuxInt |
| sym2 := v_0.Aux |
| base := v_0.Args[0] |
| mem := v.Args[1] |
| if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { |
| break |
| } |
| v.reset(OpAMD64MOVBload) |
| v.AuxInt = off1 + off2 |
| v.Aux = mergeSym(sym1, sym2) |
| v.AddArg(base) |
| v.AddArg(mem) |
| return true |
| } |
| // match: (MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) |
| // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) |
| // result: (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) |
| for { |
| off1 := v.AuxInt |
| sym1 := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64LEAQ1 { |
| break |
| } |
| off2 := v_0.AuxInt |
| sym2 := v_0.Aux |
| ptr := v_0.Args[0] |
| idx := v_0.Args[1] |
| mem := v.Args[1] |
| if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { |
| break |
| } |
| v.reset(OpAMD64MOVBloadidx1) |
| v.AuxInt = off1 + off2 |
| v.Aux = mergeSym(sym1, sym2) |
| v.AddArg(ptr) |
| v.AddArg(idx) |
| v.AddArg(mem) |
| return true |
| } |
| // match: (MOVBload [off] {sym} (ADDQ ptr idx) mem) |
| // cond: ptr.Op != OpSB |
| // result: (MOVBloadidx1 [off] {sym} ptr idx mem) |
| for { |
| off := v.AuxInt |
| sym := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ADDQ { |
| break |
| } |
| ptr := v_0.Args[0] |
| idx := v_0.Args[1] |
| mem := v.Args[1] |
| if !(ptr.Op != OpSB) { |
| break |
| } |
| v.reset(OpAMD64MOVBloadidx1) |
| v.AuxInt = off |
| v.Aux = sym |
| v.AddArg(ptr) |
| v.AddArg(idx) |
| v.AddArg(mem) |
| return true |
| } |
| // match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) |
| // cond: canMergeSym(sym1, sym2) |
| // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) |
| for { |
| off1 := v.AuxInt |
| sym1 := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64LEAL { |
| break |
| } |
| off2 := v_0.AuxInt |
| sym2 := v_0.Aux |
| base := v_0.Args[0] |
| mem := v.Args[1] |
| if !(canMergeSym(sym1, sym2)) { |
| break |
| } |
| v.reset(OpAMD64MOVBload) |
| v.AuxInt = off1 + off2 |
| v.Aux = mergeSym(sym1, sym2) |
| v.AddArg(base) |
| v.AddArg(mem) |
| return true |
| } |
| // match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem) |
| // cond: is32Bit(off1+off2) |
| // result: (MOVBload [off1+off2] {sym} ptr mem) |
| for { |
| off1 := v.AuxInt |
| sym := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ADDLconst { |
| break |
| } |
| off2 := v_0.AuxInt |
| ptr := v_0.Args[0] |
| mem := v.Args[1] |
| if !(is32Bit(off1 + off2)) { |
| break |
| } |
| v.reset(OpAMD64MOVBload) |
| v.AuxInt = off1 + off2 |
| v.Aux = sym |
| v.AddArg(ptr) |
| v.AddArg(mem) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64MOVBloadidx1(v *Value, config *Config) bool { |
| b := v.Block |
| _ = b |
| // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) |
| // cond: |
| // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) |
| for { |
| c := v.AuxInt |
| sym := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ADDQconst { |
| break |
| } |
| d := v_0.AuxInt |
| ptr := v_0.Args[0] |
| idx := v.Args[1] |
| mem := v.Args[2] |
| v.reset(OpAMD64MOVBloadidx1) |
| v.AuxInt = c + d |
| v.Aux = sym |
| v.AddArg(ptr) |
| v.AddArg(idx) |
| v.AddArg(mem) |
| return true |
| } |
| // match: (MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) |
| // cond: |
| // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) |
| for { |
| c := v.AuxInt |
| sym := v.Aux |
| ptr := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64ADDQconst { |
| break |
| } |
| d := v_1.AuxInt |
| idx := v_1.Args[0] |
| mem := v.Args[2] |
| v.reset(OpAMD64MOVBloadidx1) |
| v.AuxInt = c + d |
| v.Aux = sym |
| v.AddArg(ptr) |
| v.AddArg(idx) |
| v.AddArg(mem) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64MOVBstore(v *Value, config *Config) bool { |
| b := v.Block |
| _ = b |
| // match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem) |
| // cond: |
| // result: (MOVBstore [off] {sym} ptr x mem) |
| for { |
| off := v.AuxInt |
| sym := v.Aux |
| ptr := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64MOVBQSX { |
| break |
| } |
| x := v_1.Args[0] |
| mem := v.Args[2] |
| v.reset(OpAMD64MOVBstore) |
| v.AuxInt = off |
| v.Aux = sym |
| v.AddArg(ptr) |
| v.AddArg(x) |
| v.AddArg(mem) |
| return true |
| } |
| // match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem) |
| // cond: |
| // result: (MOVBstore [off] {sym} ptr x mem) |
| for { |
| off := v.AuxInt |
| sym := v.Aux |
| ptr := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64MOVBQZX { |
| break |
| } |
| x := v_1.Args[0] |
| mem := v.Args[2] |
| v.reset(OpAMD64MOVBstore) |
| v.AuxInt = off |
| v.Aux = sym |
| v.AddArg(ptr) |
| v.AddArg(x) |
| v.AddArg(mem) |
| return true |
| } |
| // match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem) |
| // cond: is32Bit(off1+off2) |
| // result: (MOVBstore [off1+off2] {sym} ptr val mem) |
| for { |
| off1 := v.AuxInt |
| sym := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ADDQconst { |
| break |
| } |
| off2 := v_0.AuxInt |
| ptr := v_0.Args[0] |
| val := v.Args[1] |
| mem := v.Args[2] |
| if !(is32Bit(off1 + off2)) { |
| break |
| } |
| v.reset(OpAMD64MOVBstore) |
| v.AuxInt = off1 + off2 |
| v.Aux = sym |
| v.AddArg(ptr) |
| v.AddArg(val) |
| v.AddArg(mem) |
| return true |
| } |
| // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) |
| // cond: validOff(off) |
| // result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem) |
| for { |
| off := v.AuxInt |
| sym := v.Aux |
| ptr := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64MOVLconst { |
| break |
| } |
| c := v_1.AuxInt |
| mem := v.Args[2] |
| if !(validOff(off)) { |
| break |
| } |
| v.reset(OpAMD64MOVBstoreconst) |
| v.AuxInt = makeValAndOff(int64(int8(c)), off) |
| v.Aux = sym |
| v.AddArg(ptr) |
| v.AddArg(mem) |
| return true |
| } |
| // match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) |
| // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) |
| // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) |
| for { |
| off1 := v.AuxInt |
| sym1 := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64LEAQ { |
| break |
| } |
| off2 := v_0.AuxInt |
| sym2 := v_0.Aux |
| base := v_0.Args[0] |
| val := v.Args[1] |
| mem := v.Args[2] |
| if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { |
| break |
| } |
| v.reset(OpAMD64MOVBstore) |
| v.AuxInt = off1 + off2 |
| v.Aux = mergeSym(sym1, sym2) |
| v.AddArg(base) |
| v.AddArg(val) |
| v.AddArg(mem) |
| return true |
| } |
| // match: (MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) |
| // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) |
| // result: (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) |
| for { |
| off1 := v.AuxInt |
| sym1 := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64LEAQ1 { |
| break |
| } |
| off2 := v_0.AuxInt |
| sym2 := v_0.Aux |
| ptr := v_0.Args[0] |
| idx := v_0.Args[1] |
| val := v.Args[1] |
| mem := v.Args[2] |
| if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { |
| break |
| } |
| v.reset(OpAMD64MOVBstoreidx1) |
| v.AuxInt = off1 + off2 |
| v.Aux = mergeSym(sym1, sym2) |
| v.AddArg(ptr) |
| v.AddArg(idx) |
| v.AddArg(val) |
| v.AddArg(mem) |
| return true |
| } |
| // match: (MOVBstore [off] {sym} (ADDQ ptr idx) val mem) |
| // cond: ptr.Op != OpSB |
| // result: (MOVBstoreidx1 [off] {sym} ptr idx val mem) |
| for { |
| off := v.AuxInt |
| sym := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ADDQ { |
| break |
| } |
| ptr := v_0.Args[0] |
| idx := v_0.Args[1] |
| val := v.Args[1] |
| mem := v.Args[2] |
| if !(ptr.Op != OpSB) { |
| break |
| } |
| v.reset(OpAMD64MOVBstoreidx1) |
| v.AuxInt = off |
| v.Aux = sym |
| v.AddArg(ptr) |
| v.AddArg(idx) |
| v.AddArg(val) |
| v.AddArg(mem) |
| return true |
| } |
| // match: (MOVBstore [i] {s} p w x2:(MOVBstore [i-1] {s} p (SHRLconst [8] w) x1:(MOVBstore [i-2] {s} p (SHRLconst [16] w) x0:(MOVBstore [i-3] {s} p (SHRLconst [24] w) mem)))) |
| // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) |
| // result: (MOVLstore [i-3] {s} p (BSWAPL <w.Type> w) mem) |
| for { |
| i := v.AuxInt |
| s := v.Aux |
| p := v.Args[0] |
| w := v.Args[1] |
| x2 := v.Args[2] |
| if x2.Op != OpAMD64MOVBstore { |
| break |
| } |
| if x2.AuxInt != i-1 { |
| break |
| } |
| if x2.Aux != s { |
| break |
| } |
| if p != x2.Args[0] { |
| break |
| } |
| x2_1 := x2.Args[1] |
| if x2_1.Op != OpAMD64SHRLconst { |
| break |
| } |
| if x2_1.AuxInt != 8 { |
| break |
| } |
| if w != x2_1.Args[0] { |
| break |
| } |
| x1 := x2.Args[2] |
| if x1.Op != OpAMD64MOVBstore { |
| break |
| } |
| if x1.AuxInt != i-2 { |
| break |
| } |
| if x1.Aux != s { |
| break |
| } |
| if p != x1.Args[0] { |
| break |
| } |
| x1_1 := x1.Args[1] |
| if x1_1.Op != OpAMD64SHRLconst { |
| break |
| } |
| if x1_1.AuxInt != 16 { |
| break |
| } |
| if w != x1_1.Args[0] { |
| break |
| } |
| x0 := x1.Args[2] |
| if x0.Op != OpAMD64MOVBstore { |
| break |
| } |
| if x0.AuxInt != i-3 { |
| break |
| } |
| if x0.Aux != s { |
| break |
| } |
| if p != x0.Args[0] { |
| break |
| } |
| x0_1 := x0.Args[1] |
| if x0_1.Op != OpAMD64SHRLconst { |
| break |
| } |
| if x0_1.AuxInt != 24 { |
| break |
| } |
| if w != x0_1.Args[0] { |
| break |
| } |
| mem := x0.Args[2] |
| if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { |
| break |
| } |
| v.reset(OpAMD64MOVLstore) |
| v.AuxInt = i - 3 |
| v.Aux = s |
| v.AddArg(p) |
| v0 := b.NewValue0(v.Line, OpAMD64BSWAPL, w.Type) |
| v0.AddArg(w) |
| v.AddArg(v0) |
| v.AddArg(mem) |
| return true |
| } |
| // match: (MOVBstore [i] {s} p w x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w) x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w) x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w) x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w) x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w) x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w) x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem)))))))) |
| // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) |
| // result: (MOVQstore [i-7] {s} p (BSWAPQ <w.Type> w) mem) |
| for { |
| i := v.AuxInt |
| s := v.Aux |
| p := v.Args[0] |
| w := v.Args[1] |
| x6 := v.Args[2] |
| if x6.Op != OpAMD64MOVBstore { |
| break |
| } |
| if x6.AuxInt != i-1 { |
| break |
| } |
| if x6.Aux != s { |
| break |
| } |
| if p != x6.Args[0] { |
| break |
| } |
| x6_1 := x6.Args[1] |
| if x6_1.Op != OpAMD64SHRQconst { |
| break |
| } |
| if x6_1.AuxInt != 8 { |
| break |
| } |
| if w != x6_1.Args[0] { |
| break |
| } |
| x5 := x6.Args[2] |
| if x5.Op != OpAMD64MOVBstore { |
| break |
| } |
| if x5.AuxInt != i-2 { |
| break |
| } |
| if x5.Aux != s { |
| break |
| } |
| if p != x5.Args[0] { |
| break |
| } |
| x5_1 := x5.Args[1] |
| if x5_1.Op != OpAMD64SHRQconst { |
| break |
| } |
| if x5_1.AuxInt != 16 { |
| break |
| } |
| if w != x5_1.Args[0] { |
| break |
| } |
| x4 := x5.Args[2] |
| if x4.Op != OpAMD64MOVBstore { |
| break |
| } |
| if x4.AuxInt != i-3 { |
| break |
| } |
| if x4.Aux != s { |
| break |
| } |
| if p != x4.Args[0] { |
| break |
| } |
| x4_1 := x4.Args[1] |
| if x4_1.Op != OpAMD64SHRQconst { |
| break |
| } |
| if x4_1.AuxInt != 24 { |
| break |
| } |
| if w != x4_1.Args[0] { |
| break |
| } |
| x3 := x4.Args[2] |
| if x3.Op != OpAMD64MOVBstore { |
| break |
| } |
| if x3.AuxInt != i-4 { |
| break |
| } |
| if x3.Aux != s { |
| break |
| } |
| if p != x3.Args[0] { |
| break |
| } |
| x3_1 := x3.Args[1] |
| if x3_1.Op != OpAMD64SHRQconst { |
| break |
| } |
| if x3_1.AuxInt != 32 { |
| break |
| } |
| if w != x3_1.Args[0] { |
| break |
| } |
| x2 := x3.Args[2] |
| if x2.Op != OpAMD64MOVBstore { |
| break |
| } |
| if x2.AuxInt != i-5 { |
| break |
| } |
| if x2.Aux != s { |
| break |
| } |
| if p != x2.Args[0] { |
| break |
| } |
| x2_1 := x2.Args[1] |
| if x2_1.Op != OpAMD64SHRQconst { |
| break |
| } |
| if x2_1.AuxInt != 40 { |
| break |
| } |
| if w != x2_1.Args[0] { |
| break |
| } |
| x1 := x2.Args[2] |
| if x1.Op != OpAMD64MOVBstore { |
| break |
| } |
| if x1.AuxInt != i-6 { |
| break |
| } |
| if x1.Aux != s { |
| break |
| } |
| if p != x1.Args[0] { |
| break |
| } |
| x1_1 := x1.Args[1] |
| if x1_1.Op != OpAMD64SHRQconst { |
| break |
| } |
| if x1_1.AuxInt != 48 { |
| break |
| } |
| if w != x1_1.Args[0] { |
| break |
| } |
| x0 := x1.Args[2] |
| if x0.Op != OpAMD64MOVBstore { |
| break |
| } |
| if x0.AuxInt != i-7 { |
| break |
| } |
| if x0.Aux != s { |
| break |
| } |
| if p != x0.Args[0] { |
| break |
| } |
| x0_1 := x0.Args[1] |
| if x0_1.Op != OpAMD64SHRQconst { |
| break |
| } |
| if x0_1.AuxInt != 56 { |
| break |
| } |
| if w != x0_1.Args[0] { |
| break |
| } |
| mem := x0.Args[2] |
| if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { |
| break |
| } |
| v.reset(OpAMD64MOVQstore) |
| v.AuxInt = i - 7 |
| v.Aux = s |
| v.AddArg(p) |
| v0 := b.NewValue0(v.Line, OpAMD64BSWAPQ, w.Type) |
| v0.AddArg(w) |
| v.AddArg(v0) |
| v.AddArg(mem) |
| return true |
| } |
| // match: (MOVBstore [i] {s} p (SHRQconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) |
| // cond: x.Uses == 1 && clobber(x) |
| // result: (MOVWstore [i-1] {s} p w mem) |
| for { |
| i := v.AuxInt |
| s := v.Aux |
| p := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64SHRQconst { |
| break |
| } |
| if v_1.AuxInt != 8 { |
| break |
| } |
| w := v_1.Args[0] |
| x := v.Args[2] |
| if x.Op != OpAMD64MOVBstore { |
| break |
| } |
| if x.AuxInt != i-1 { |
| break |
| } |
| if x.Aux != s { |
| break |
| } |
| if p != x.Args[0] { |
| break |
| } |
| if w != x.Args[1] { |
| break |
| } |
| mem := x.Args[2] |
| if !(x.Uses == 1 && clobber(x)) { |
| break |
| } |
| v.reset(OpAMD64MOVWstore) |
| v.AuxInt = i - 1 |
| v.Aux = s |
| v.AddArg(p) |
| v.AddArg(w) |
| v.AddArg(mem) |
| return true |
| } |
| // match: (MOVBstore [i] {s} p (SHRQconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRQconst [j-8] w) mem)) |
| // cond: x.Uses == 1 && clobber(x) |
| // result: (MOVWstore [i-1] {s} p w0 mem) |
| for { |
| i := v.AuxInt |
| s := v.Aux |
| p := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64SHRQconst { |
| break |
| } |
| j := v_1.AuxInt |
| w := v_1.Args[0] |
| x := v.Args[2] |
| if x.Op != OpAMD64MOVBstore { |
| break |
| } |
| if x.AuxInt != i-1 { |
| break |
| } |
| if x.Aux != s { |
| break |
| } |
| if p != x.Args[0] { |
| break |
| } |
| w0 := x.Args[1] |
| if w0.Op != OpAMD64SHRQconst { |
| break |
| } |
| if w0.AuxInt != j-8 { |
| break |
| } |
| if w != w0.Args[0] { |
| break |
| } |
| mem := x.Args[2] |
| if !(x.Uses == 1 && clobber(x)) { |
| break |
| } |
| v.reset(OpAMD64MOVWstore) |
| v.AuxInt = i - 1 |
| v.Aux = s |
| v.AddArg(p) |
| v.AddArg(w0) |
| v.AddArg(mem) |
| return true |
| } |
| // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) |
| // cond: canMergeSym(sym1, sym2) |
| // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) |
| for { |
| off1 := v.AuxInt |
| sym1 := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64LEAL { |
| break |
| } |
| off2 := v_0.AuxInt |
| sym2 := v_0.Aux |
| base := v_0.Args[0] |
| val := v.Args[1] |
| mem := v.Args[2] |
| if !(canMergeSym(sym1, sym2)) { |
| break |
| } |
| v.reset(OpAMD64MOVBstore) |
| v.AuxInt = off1 + off2 |
| v.Aux = mergeSym(sym1, sym2) |
| v.AddArg(base) |
| v.AddArg(val) |
| v.AddArg(mem) |
| return true |
| } |
| // match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem) |
| // cond: is32Bit(off1+off2) |
| // result: (MOVBstore [off1+off2] {sym} ptr val mem) |
| for { |
| off1 := v.AuxInt |
| sym := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ADDLconst { |
| break |
| } |
| off2 := v_0.AuxInt |
| ptr := v_0.Args[0] |
| val := v.Args[1] |
| mem := v.Args[2] |
| if !(is32Bit(off1 + off2)) { |
| break |
| } |
| v.reset(OpAMD64MOVBstore) |
| v.AuxInt = off1 + off2 |
| v.Aux = sym |
| v.AddArg(ptr) |
| v.AddArg(val) |
| v.AddArg(mem) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value, config *Config) bool { |
| b := v.Block |
| _ = b |
| // match: (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem) |
| // cond: ValAndOff(sc).canAdd(off) |
| // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) |
| for { |
| sc := v.AuxInt |
| s := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ADDQconst { |
| break |
| } |
| off := v_0.AuxInt |
| ptr := v_0.Args[0] |
| mem := v.Args[1] |
| if !(ValAndOff(sc).canAdd(off)) { |
| break |
| } |
| v.reset(OpAMD64MOVBstoreconst) |
| v.AuxInt = ValAndOff(sc).add(off) |
| v.Aux = s |
| v.AddArg(ptr) |
| v.AddArg(mem) |
| return true |
| } |
| // match: (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) |
| // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) |
| // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) |
| for { |
| sc := v.AuxInt |
| sym1 := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64LEAQ { |
| break |
| } |
| off := v_0.AuxInt |
| sym2 := v_0.Aux |
| ptr := v_0.Args[0] |
| mem := v.Args[1] |
| if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { |
| break |
| } |
| v.reset(OpAMD64MOVBstoreconst) |
| v.AuxInt = ValAndOff(sc).add(off) |
| v.Aux = mergeSym(sym1, sym2) |
| v.AddArg(ptr) |
| v.AddArg(mem) |
| return true |
| } |
| // match: (MOVBstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) |
| // cond: canMergeSym(sym1, sym2) |
| // result: (MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) |
| for { |
| x := v.AuxInt |
| sym1 := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64LEAQ1 { |
| break |
| } |
| off := v_0.AuxInt |
| sym2 := v_0.Aux |
| ptr := v_0.Args[0] |
| idx := v_0.Args[1] |
| mem := v.Args[1] |
| if !(canMergeSym(sym1, sym2)) { |
| break |
| } |
| v.reset(OpAMD64MOVBstoreconstidx1) |
| v.AuxInt = ValAndOff(x).add(off) |
| v.Aux = mergeSym(sym1, sym2) |
| v.AddArg(ptr) |
| v.AddArg(idx) |
| v.AddArg(mem) |
| return true |
| } |
| // match: (MOVBstoreconst [x] {sym} (ADDQ ptr idx) mem) |
| // cond: |
| // result: (MOVBstoreconstidx1 [x] {sym} ptr idx mem) |
| for { |
| x := v.AuxInt |
| sym := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ADDQ { |
| break |
| } |
| ptr := v_0.Args[0] |
| idx := v_0.Args[1] |
| mem := v.Args[1] |
| v.reset(OpAMD64MOVBstoreconstidx1) |
| v.AuxInt = x |
| v.Aux = sym |
| v.AddArg(ptr) |
| v.AddArg(idx) |
| v.AddArg(mem) |
| return true |
| } |
| // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem)) |
| // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) |
| // result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem) |
| for { |
| c := v.AuxInt |
| s := v.Aux |
| p := v.Args[0] |
| x := v.Args[1] |
| |