| // Code generated from gen/AMD64.rules; DO NOT EDIT. |
| // generated with: cd gen; go run *.go |
| |
| package ssa |
| |
| import "math" |
| import "cmd/internal/obj" |
| import "cmd/internal/objabi" |
| import "cmd/compile/internal/types" |
| |
| var _ = math.MinInt8 // in case not otherwise used |
| var _ = obj.ANOP // in case not otherwise used |
| var _ = objabi.GOROOT // in case not otherwise used |
| var _ = types.TypeMem // in case not otherwise used |
| |
| func rewriteValueAMD64(v *Value) bool { |
| switch v.Op { |
| case OpAMD64ADDL: |
| return rewriteValueAMD64_OpAMD64ADDL_0(v) || rewriteValueAMD64_OpAMD64ADDL_10(v) |
| case OpAMD64ADDLconst: |
| return rewriteValueAMD64_OpAMD64ADDLconst_0(v) |
| case OpAMD64ADDLconstmem: |
| return rewriteValueAMD64_OpAMD64ADDLconstmem_0(v) |
| case OpAMD64ADDLmem: |
| return rewriteValueAMD64_OpAMD64ADDLmem_0(v) |
| case OpAMD64ADDQ: |
| return rewriteValueAMD64_OpAMD64ADDQ_0(v) || rewriteValueAMD64_OpAMD64ADDQ_10(v) || rewriteValueAMD64_OpAMD64ADDQ_20(v) |
| case OpAMD64ADDQconst: |
| return rewriteValueAMD64_OpAMD64ADDQconst_0(v) |
| case OpAMD64ADDQconstmem: |
| return rewriteValueAMD64_OpAMD64ADDQconstmem_0(v) |
| case OpAMD64ADDQmem: |
| return rewriteValueAMD64_OpAMD64ADDQmem_0(v) |
| case OpAMD64ADDSD: |
| return rewriteValueAMD64_OpAMD64ADDSD_0(v) |
| case OpAMD64ADDSDmem: |
| return rewriteValueAMD64_OpAMD64ADDSDmem_0(v) |
| case OpAMD64ADDSS: |
| return rewriteValueAMD64_OpAMD64ADDSS_0(v) |
| case OpAMD64ADDSSmem: |
| return rewriteValueAMD64_OpAMD64ADDSSmem_0(v) |
| case OpAMD64ANDL: |
| return rewriteValueAMD64_OpAMD64ANDL_0(v) |
| case OpAMD64ANDLconst: |
| return rewriteValueAMD64_OpAMD64ANDLconst_0(v) |
| case OpAMD64ANDLmem: |
| return rewriteValueAMD64_OpAMD64ANDLmem_0(v) |
| case OpAMD64ANDQ: |
| return rewriteValueAMD64_OpAMD64ANDQ_0(v) |
| case OpAMD64ANDQconst: |
| return rewriteValueAMD64_OpAMD64ANDQconst_0(v) |
| case OpAMD64ANDQmem: |
| return rewriteValueAMD64_OpAMD64ANDQmem_0(v) |
| case OpAMD64BSFQ: |
| return rewriteValueAMD64_OpAMD64BSFQ_0(v) |
| case OpAMD64BTQconst: |
| return rewriteValueAMD64_OpAMD64BTQconst_0(v) |
| case OpAMD64CMOVQEQ: |
| return rewriteValueAMD64_OpAMD64CMOVQEQ_0(v) |
| case OpAMD64CMPB: |
| return rewriteValueAMD64_OpAMD64CMPB_0(v) |
| case OpAMD64CMPBconst: |
| return rewriteValueAMD64_OpAMD64CMPBconst_0(v) |
| case OpAMD64CMPL: |
| return rewriteValueAMD64_OpAMD64CMPL_0(v) |
| case OpAMD64CMPLconst: |
| return rewriteValueAMD64_OpAMD64CMPLconst_0(v) |
| case OpAMD64CMPQ: |
| return rewriteValueAMD64_OpAMD64CMPQ_0(v) |
| case OpAMD64CMPQconst: |
| return rewriteValueAMD64_OpAMD64CMPQconst_0(v) || rewriteValueAMD64_OpAMD64CMPQconst_10(v) |
| case OpAMD64CMPW: |
| return rewriteValueAMD64_OpAMD64CMPW_0(v) |
| case OpAMD64CMPWconst: |
| return rewriteValueAMD64_OpAMD64CMPWconst_0(v) |
| case OpAMD64CMPXCHGLlock: |
| return rewriteValueAMD64_OpAMD64CMPXCHGLlock_0(v) |
| case OpAMD64CMPXCHGQlock: |
| return rewriteValueAMD64_OpAMD64CMPXCHGQlock_0(v) |
| case OpAMD64LEAL: |
| return rewriteValueAMD64_OpAMD64LEAL_0(v) |
| case OpAMD64LEAQ: |
| return rewriteValueAMD64_OpAMD64LEAQ_0(v) |
| case OpAMD64LEAQ1: |
| return rewriteValueAMD64_OpAMD64LEAQ1_0(v) |
| case OpAMD64LEAQ2: |
| return rewriteValueAMD64_OpAMD64LEAQ2_0(v) |
| case OpAMD64LEAQ4: |
| return rewriteValueAMD64_OpAMD64LEAQ4_0(v) |
| case OpAMD64LEAQ8: |
| return rewriteValueAMD64_OpAMD64LEAQ8_0(v) |
| case OpAMD64MOVBQSX: |
| return rewriteValueAMD64_OpAMD64MOVBQSX_0(v) |
| case OpAMD64MOVBQSXload: |
| return rewriteValueAMD64_OpAMD64MOVBQSXload_0(v) |
| case OpAMD64MOVBQZX: |
| return rewriteValueAMD64_OpAMD64MOVBQZX_0(v) |
| case OpAMD64MOVBload: |
| return rewriteValueAMD64_OpAMD64MOVBload_0(v) |
| case OpAMD64MOVBloadidx1: |
| return rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v) |
| case OpAMD64MOVBstore: |
| return rewriteValueAMD64_OpAMD64MOVBstore_0(v) || rewriteValueAMD64_OpAMD64MOVBstore_10(v) || rewriteValueAMD64_OpAMD64MOVBstore_20(v) |
| case OpAMD64MOVBstoreconst: |
| return rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v) |
| case OpAMD64MOVBstoreconstidx1: |
| return rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v) |
| case OpAMD64MOVBstoreidx1: |
| return rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v) |
| case OpAMD64MOVLQSX: |
| return rewriteValueAMD64_OpAMD64MOVLQSX_0(v) |
| case OpAMD64MOVLQSXload: |
| return rewriteValueAMD64_OpAMD64MOVLQSXload_0(v) |
| case OpAMD64MOVLQZX: |
| return rewriteValueAMD64_OpAMD64MOVLQZX_0(v) |
| case OpAMD64MOVLatomicload: |
| return rewriteValueAMD64_OpAMD64MOVLatomicload_0(v) |
| case OpAMD64MOVLf2i: |
| return rewriteValueAMD64_OpAMD64MOVLf2i_0(v) |
| case OpAMD64MOVLi2f: |
| return rewriteValueAMD64_OpAMD64MOVLi2f_0(v) |
| case OpAMD64MOVLload: |
| return rewriteValueAMD64_OpAMD64MOVLload_0(v) |
| case OpAMD64MOVLloadidx1: |
| return rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v) |
| case OpAMD64MOVLloadidx4: |
| return rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v) |
| case OpAMD64MOVLloadidx8: |
| return rewriteValueAMD64_OpAMD64MOVLloadidx8_0(v) |
| case OpAMD64MOVLstore: |
| return rewriteValueAMD64_OpAMD64MOVLstore_0(v) || rewriteValueAMD64_OpAMD64MOVLstore_10(v) |
| case OpAMD64MOVLstoreconst: |
| return rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v) |
| case OpAMD64MOVLstoreconstidx1: |
| return rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v) |
| case OpAMD64MOVLstoreconstidx4: |
| return rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v) |
| case OpAMD64MOVLstoreidx1: |
| return rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v) |
| case OpAMD64MOVLstoreidx4: |
| return rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v) |
| case OpAMD64MOVLstoreidx8: |
| return rewriteValueAMD64_OpAMD64MOVLstoreidx8_0(v) |
| case OpAMD64MOVOload: |
| return rewriteValueAMD64_OpAMD64MOVOload_0(v) |
| case OpAMD64MOVOstore: |
| return rewriteValueAMD64_OpAMD64MOVOstore_0(v) |
| case OpAMD64MOVQatomicload: |
| return rewriteValueAMD64_OpAMD64MOVQatomicload_0(v) |
| case OpAMD64MOVQf2i: |
| return rewriteValueAMD64_OpAMD64MOVQf2i_0(v) |
| case OpAMD64MOVQi2f: |
| return rewriteValueAMD64_OpAMD64MOVQi2f_0(v) |
| case OpAMD64MOVQload: |
| return rewriteValueAMD64_OpAMD64MOVQload_0(v) |
| case OpAMD64MOVQloadidx1: |
| return rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v) |
| case OpAMD64MOVQloadidx8: |
| return rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v) |
| case OpAMD64MOVQstore: |
| return rewriteValueAMD64_OpAMD64MOVQstore_0(v) |
| case OpAMD64MOVQstoreconst: |
| return rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v) |
| case OpAMD64MOVQstoreconstidx1: |
| return rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v) |
| case OpAMD64MOVQstoreconstidx8: |
| return rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v) |
| case OpAMD64MOVQstoreidx1: |
| return rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v) |
| case OpAMD64MOVQstoreidx8: |
| return rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v) |
| case OpAMD64MOVSDload: |
| return rewriteValueAMD64_OpAMD64MOVSDload_0(v) |
| case OpAMD64MOVSDloadidx1: |
| return rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v) |
| case OpAMD64MOVSDloadidx8: |
| return rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v) |
| case OpAMD64MOVSDstore: |
| return rewriteValueAMD64_OpAMD64MOVSDstore_0(v) |
| case OpAMD64MOVSDstoreidx1: |
| return rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v) |
| case OpAMD64MOVSDstoreidx8: |
| return rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v) |
| case OpAMD64MOVSSload: |
| return rewriteValueAMD64_OpAMD64MOVSSload_0(v) |
| case OpAMD64MOVSSloadidx1: |
| return rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v) |
| case OpAMD64MOVSSloadidx4: |
| return rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v) |
| case OpAMD64MOVSSstore: |
| return rewriteValueAMD64_OpAMD64MOVSSstore_0(v) |
| case OpAMD64MOVSSstoreidx1: |
| return rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v) |
| case OpAMD64MOVSSstoreidx4: |
| return rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v) |
| case OpAMD64MOVWQSX: |
| return rewriteValueAMD64_OpAMD64MOVWQSX_0(v) |
| case OpAMD64MOVWQSXload: |
| return rewriteValueAMD64_OpAMD64MOVWQSXload_0(v) |
| case OpAMD64MOVWQZX: |
| return rewriteValueAMD64_OpAMD64MOVWQZX_0(v) |
| case OpAMD64MOVWload: |
| return rewriteValueAMD64_OpAMD64MOVWload_0(v) |
| case OpAMD64MOVWloadidx1: |
| return rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v) |
| case OpAMD64MOVWloadidx2: |
| return rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v) |
| case OpAMD64MOVWstore: |
| return rewriteValueAMD64_OpAMD64MOVWstore_0(v) || rewriteValueAMD64_OpAMD64MOVWstore_10(v) |
| case OpAMD64MOVWstoreconst: |
| return rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v) |
| case OpAMD64MOVWstoreconstidx1: |
| return rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v) |
| case OpAMD64MOVWstoreconstidx2: |
| return rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v) |
| case OpAMD64MOVWstoreidx1: |
| return rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v) |
| case OpAMD64MOVWstoreidx2: |
| return rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v) |
| case OpAMD64MULL: |
| return rewriteValueAMD64_OpAMD64MULL_0(v) |
| case OpAMD64MULLconst: |
| return rewriteValueAMD64_OpAMD64MULLconst_0(v) |
| case OpAMD64MULQ: |
| return rewriteValueAMD64_OpAMD64MULQ_0(v) |
| case OpAMD64MULQconst: |
| return rewriteValueAMD64_OpAMD64MULQconst_0(v) || rewriteValueAMD64_OpAMD64MULQconst_10(v) || rewriteValueAMD64_OpAMD64MULQconst_20(v) |
| case OpAMD64MULSD: |
| return rewriteValueAMD64_OpAMD64MULSD_0(v) |
| case OpAMD64MULSDmem: |
| return rewriteValueAMD64_OpAMD64MULSDmem_0(v) |
| case OpAMD64MULSS: |
| return rewriteValueAMD64_OpAMD64MULSS_0(v) |
| case OpAMD64MULSSmem: |
| return rewriteValueAMD64_OpAMD64MULSSmem_0(v) |
| case OpAMD64NEGL: |
| return rewriteValueAMD64_OpAMD64NEGL_0(v) |
| case OpAMD64NEGQ: |
| return rewriteValueAMD64_OpAMD64NEGQ_0(v) |
| case OpAMD64NOTL: |
| return rewriteValueAMD64_OpAMD64NOTL_0(v) |
| case OpAMD64NOTQ: |
| return rewriteValueAMD64_OpAMD64NOTQ_0(v) |
| case OpAMD64ORL: |
| return rewriteValueAMD64_OpAMD64ORL_0(v) || rewriteValueAMD64_OpAMD64ORL_10(v) || rewriteValueAMD64_OpAMD64ORL_20(v) || rewriteValueAMD64_OpAMD64ORL_30(v) || rewriteValueAMD64_OpAMD64ORL_40(v) || rewriteValueAMD64_OpAMD64ORL_50(v) || rewriteValueAMD64_OpAMD64ORL_60(v) || rewriteValueAMD64_OpAMD64ORL_70(v) || rewriteValueAMD64_OpAMD64ORL_80(v) || rewriteValueAMD64_OpAMD64ORL_90(v) || rewriteValueAMD64_OpAMD64ORL_100(v) || rewriteValueAMD64_OpAMD64ORL_110(v) || rewriteValueAMD64_OpAMD64ORL_120(v) || rewriteValueAMD64_OpAMD64ORL_130(v) |
| case OpAMD64ORLconst: |
| return rewriteValueAMD64_OpAMD64ORLconst_0(v) |
| case OpAMD64ORLmem: |
| return rewriteValueAMD64_OpAMD64ORLmem_0(v) |
| case OpAMD64ORQ: |
| return rewriteValueAMD64_OpAMD64ORQ_0(v) || rewriteValueAMD64_OpAMD64ORQ_10(v) || rewriteValueAMD64_OpAMD64ORQ_20(v) || rewriteValueAMD64_OpAMD64ORQ_30(v) || rewriteValueAMD64_OpAMD64ORQ_40(v) || rewriteValueAMD64_OpAMD64ORQ_50(v) || rewriteValueAMD64_OpAMD64ORQ_60(v) || rewriteValueAMD64_OpAMD64ORQ_70(v) || rewriteValueAMD64_OpAMD64ORQ_80(v) || rewriteValueAMD64_OpAMD64ORQ_90(v) || rewriteValueAMD64_OpAMD64ORQ_100(v) || rewriteValueAMD64_OpAMD64ORQ_110(v) || rewriteValueAMD64_OpAMD64ORQ_120(v) || rewriteValueAMD64_OpAMD64ORQ_130(v) || rewriteValueAMD64_OpAMD64ORQ_140(v) || rewriteValueAMD64_OpAMD64ORQ_150(v) || rewriteValueAMD64_OpAMD64ORQ_160(v) |
| case OpAMD64ORQconst: |
| return rewriteValueAMD64_OpAMD64ORQconst_0(v) |
| case OpAMD64ORQmem: |
| return rewriteValueAMD64_OpAMD64ORQmem_0(v) |
| case OpAMD64ROLB: |
| return rewriteValueAMD64_OpAMD64ROLB_0(v) |
| case OpAMD64ROLBconst: |
| return rewriteValueAMD64_OpAMD64ROLBconst_0(v) |
| case OpAMD64ROLL: |
| return rewriteValueAMD64_OpAMD64ROLL_0(v) |
| case OpAMD64ROLLconst: |
| return rewriteValueAMD64_OpAMD64ROLLconst_0(v) |
| case OpAMD64ROLQ: |
| return rewriteValueAMD64_OpAMD64ROLQ_0(v) |
| case OpAMD64ROLQconst: |
| return rewriteValueAMD64_OpAMD64ROLQconst_0(v) |
| case OpAMD64ROLW: |
| return rewriteValueAMD64_OpAMD64ROLW_0(v) |
| case OpAMD64ROLWconst: |
| return rewriteValueAMD64_OpAMD64ROLWconst_0(v) |
| case OpAMD64RORB: |
| return rewriteValueAMD64_OpAMD64RORB_0(v) |
| case OpAMD64RORL: |
| return rewriteValueAMD64_OpAMD64RORL_0(v) |
| case OpAMD64RORQ: |
| return rewriteValueAMD64_OpAMD64RORQ_0(v) |
| case OpAMD64RORW: |
| return rewriteValueAMD64_OpAMD64RORW_0(v) |
| case OpAMD64SARB: |
| return rewriteValueAMD64_OpAMD64SARB_0(v) |
| case OpAMD64SARBconst: |
| return rewriteValueAMD64_OpAMD64SARBconst_0(v) |
| case OpAMD64SARL: |
| return rewriteValueAMD64_OpAMD64SARL_0(v) |
| case OpAMD64SARLconst: |
| return rewriteValueAMD64_OpAMD64SARLconst_0(v) |
| case OpAMD64SARQ: |
| return rewriteValueAMD64_OpAMD64SARQ_0(v) |
| case OpAMD64SARQconst: |
| return rewriteValueAMD64_OpAMD64SARQconst_0(v) |
| case OpAMD64SARW: |
| return rewriteValueAMD64_OpAMD64SARW_0(v) |
| case OpAMD64SARWconst: |
| return rewriteValueAMD64_OpAMD64SARWconst_0(v) |
| case OpAMD64SBBLcarrymask: |
| return rewriteValueAMD64_OpAMD64SBBLcarrymask_0(v) |
| case OpAMD64SBBQcarrymask: |
| return rewriteValueAMD64_OpAMD64SBBQcarrymask_0(v) |
| case OpAMD64SETA: |
| return rewriteValueAMD64_OpAMD64SETA_0(v) |
| case OpAMD64SETAE: |
| return rewriteValueAMD64_OpAMD64SETAE_0(v) |
| case OpAMD64SETAEmem: |
| return rewriteValueAMD64_OpAMD64SETAEmem_0(v) |
| case OpAMD64SETAmem: |
| return rewriteValueAMD64_OpAMD64SETAmem_0(v) |
| case OpAMD64SETB: |
| return rewriteValueAMD64_OpAMD64SETB_0(v) |
| case OpAMD64SETBE: |
| return rewriteValueAMD64_OpAMD64SETBE_0(v) |
| case OpAMD64SETBEmem: |
| return rewriteValueAMD64_OpAMD64SETBEmem_0(v) |
| case OpAMD64SETBmem: |
| return rewriteValueAMD64_OpAMD64SETBmem_0(v) |
| case OpAMD64SETEQ: |
| return rewriteValueAMD64_OpAMD64SETEQ_0(v) || rewriteValueAMD64_OpAMD64SETEQ_10(v) |
| case OpAMD64SETEQmem: |
| return rewriteValueAMD64_OpAMD64SETEQmem_0(v) || rewriteValueAMD64_OpAMD64SETEQmem_10(v) |
| case OpAMD64SETG: |
| return rewriteValueAMD64_OpAMD64SETG_0(v) |
| case OpAMD64SETGE: |
| return rewriteValueAMD64_OpAMD64SETGE_0(v) |
| case OpAMD64SETGEmem: |
| return rewriteValueAMD64_OpAMD64SETGEmem_0(v) |
| case OpAMD64SETGmem: |
| return rewriteValueAMD64_OpAMD64SETGmem_0(v) |
| case OpAMD64SETL: |
| return rewriteValueAMD64_OpAMD64SETL_0(v) |
| case OpAMD64SETLE: |
| return rewriteValueAMD64_OpAMD64SETLE_0(v) |
| case OpAMD64SETLEmem: |
| return rewriteValueAMD64_OpAMD64SETLEmem_0(v) |
| case OpAMD64SETLmem: |
| return rewriteValueAMD64_OpAMD64SETLmem_0(v) |
| case OpAMD64SETNE: |
| return rewriteValueAMD64_OpAMD64SETNE_0(v) || rewriteValueAMD64_OpAMD64SETNE_10(v) |
| case OpAMD64SETNEmem: |
| return rewriteValueAMD64_OpAMD64SETNEmem_0(v) || rewriteValueAMD64_OpAMD64SETNEmem_10(v) |
| case OpAMD64SHLL: |
| return rewriteValueAMD64_OpAMD64SHLL_0(v) |
| case OpAMD64SHLLconst: |
| return rewriteValueAMD64_OpAMD64SHLLconst_0(v) |
| case OpAMD64SHLQ: |
| return rewriteValueAMD64_OpAMD64SHLQ_0(v) |
| case OpAMD64SHLQconst: |
| return rewriteValueAMD64_OpAMD64SHLQconst_0(v) |
| case OpAMD64SHRB: |
| return rewriteValueAMD64_OpAMD64SHRB_0(v) |
| case OpAMD64SHRBconst: |
| return rewriteValueAMD64_OpAMD64SHRBconst_0(v) |
| case OpAMD64SHRL: |
| return rewriteValueAMD64_OpAMD64SHRL_0(v) |
| case OpAMD64SHRLconst: |
| return rewriteValueAMD64_OpAMD64SHRLconst_0(v) |
| case OpAMD64SHRQ: |
| return rewriteValueAMD64_OpAMD64SHRQ_0(v) |
| case OpAMD64SHRQconst: |
| return rewriteValueAMD64_OpAMD64SHRQconst_0(v) |
| case OpAMD64SHRW: |
| return rewriteValueAMD64_OpAMD64SHRW_0(v) |
| case OpAMD64SHRWconst: |
| return rewriteValueAMD64_OpAMD64SHRWconst_0(v) |
| case OpAMD64SUBL: |
| return rewriteValueAMD64_OpAMD64SUBL_0(v) |
| case OpAMD64SUBLconst: |
| return rewriteValueAMD64_OpAMD64SUBLconst_0(v) |
| case OpAMD64SUBLmem: |
| return rewriteValueAMD64_OpAMD64SUBLmem_0(v) |
| case OpAMD64SUBQ: |
| return rewriteValueAMD64_OpAMD64SUBQ_0(v) |
| case OpAMD64SUBQconst: |
| return rewriteValueAMD64_OpAMD64SUBQconst_0(v) |
| case OpAMD64SUBQmem: |
| return rewriteValueAMD64_OpAMD64SUBQmem_0(v) |
| case OpAMD64SUBSD: |
| return rewriteValueAMD64_OpAMD64SUBSD_0(v) |
| case OpAMD64SUBSDmem: |
| return rewriteValueAMD64_OpAMD64SUBSDmem_0(v) |
| case OpAMD64SUBSS: |
| return rewriteValueAMD64_OpAMD64SUBSS_0(v) |
| case OpAMD64SUBSSmem: |
| return rewriteValueAMD64_OpAMD64SUBSSmem_0(v) |
| case OpAMD64TESTB: |
| return rewriteValueAMD64_OpAMD64TESTB_0(v) |
| case OpAMD64TESTL: |
| return rewriteValueAMD64_OpAMD64TESTL_0(v) |
| case OpAMD64TESTQ: |
| return rewriteValueAMD64_OpAMD64TESTQ_0(v) |
| case OpAMD64TESTW: |
| return rewriteValueAMD64_OpAMD64TESTW_0(v) |
| case OpAMD64XADDLlock: |
| return rewriteValueAMD64_OpAMD64XADDLlock_0(v) |
| case OpAMD64XADDQlock: |
| return rewriteValueAMD64_OpAMD64XADDQlock_0(v) |
| case OpAMD64XCHGL: |
| return rewriteValueAMD64_OpAMD64XCHGL_0(v) |
| case OpAMD64XCHGQ: |
| return rewriteValueAMD64_OpAMD64XCHGQ_0(v) |
| case OpAMD64XORL: |
| return rewriteValueAMD64_OpAMD64XORL_0(v) || rewriteValueAMD64_OpAMD64XORL_10(v) |
| case OpAMD64XORLconst: |
| return rewriteValueAMD64_OpAMD64XORLconst_0(v) || rewriteValueAMD64_OpAMD64XORLconst_10(v) |
| case OpAMD64XORLmem: |
| return rewriteValueAMD64_OpAMD64XORLmem_0(v) |
| case OpAMD64XORQ: |
| return rewriteValueAMD64_OpAMD64XORQ_0(v) |
| case OpAMD64XORQconst: |
| return rewriteValueAMD64_OpAMD64XORQconst_0(v) |
| case OpAMD64XORQmem: |
| return rewriteValueAMD64_OpAMD64XORQmem_0(v) |
| case OpAdd16: |
| return rewriteValueAMD64_OpAdd16_0(v) |
| case OpAdd32: |
| return rewriteValueAMD64_OpAdd32_0(v) |
| case OpAdd32F: |
| return rewriteValueAMD64_OpAdd32F_0(v) |
| case OpAdd64: |
| return rewriteValueAMD64_OpAdd64_0(v) |
| case OpAdd64F: |
| return rewriteValueAMD64_OpAdd64F_0(v) |
| case OpAdd8: |
| return rewriteValueAMD64_OpAdd8_0(v) |
| case OpAddPtr: |
| return rewriteValueAMD64_OpAddPtr_0(v) |
| case OpAddr: |
| return rewriteValueAMD64_OpAddr_0(v) |
| case OpAnd16: |
| return rewriteValueAMD64_OpAnd16_0(v) |
| case OpAnd32: |
| return rewriteValueAMD64_OpAnd32_0(v) |
| case OpAnd64: |
| return rewriteValueAMD64_OpAnd64_0(v) |
| case OpAnd8: |
| return rewriteValueAMD64_OpAnd8_0(v) |
| case OpAndB: |
| return rewriteValueAMD64_OpAndB_0(v) |
| case OpAtomicAdd32: |
| return rewriteValueAMD64_OpAtomicAdd32_0(v) |
| case OpAtomicAdd64: |
| return rewriteValueAMD64_OpAtomicAdd64_0(v) |
| case OpAtomicAnd8: |
| return rewriteValueAMD64_OpAtomicAnd8_0(v) |
| case OpAtomicCompareAndSwap32: |
| return rewriteValueAMD64_OpAtomicCompareAndSwap32_0(v) |
| case OpAtomicCompareAndSwap64: |
| return rewriteValueAMD64_OpAtomicCompareAndSwap64_0(v) |
| case OpAtomicExchange32: |
| return rewriteValueAMD64_OpAtomicExchange32_0(v) |
| case OpAtomicExchange64: |
| return rewriteValueAMD64_OpAtomicExchange64_0(v) |
| case OpAtomicLoad32: |
| return rewriteValueAMD64_OpAtomicLoad32_0(v) |
| case OpAtomicLoad64: |
| return rewriteValueAMD64_OpAtomicLoad64_0(v) |
| case OpAtomicLoadPtr: |
| return rewriteValueAMD64_OpAtomicLoadPtr_0(v) |
| case OpAtomicOr8: |
| return rewriteValueAMD64_OpAtomicOr8_0(v) |
| case OpAtomicStore32: |
| return rewriteValueAMD64_OpAtomicStore32_0(v) |
| case OpAtomicStore64: |
| return rewriteValueAMD64_OpAtomicStore64_0(v) |
| case OpAtomicStorePtrNoWB: |
| return rewriteValueAMD64_OpAtomicStorePtrNoWB_0(v) |
| case OpAvg64u: |
| return rewriteValueAMD64_OpAvg64u_0(v) |
| case OpBitLen32: |
| return rewriteValueAMD64_OpBitLen32_0(v) |
| case OpBitLen64: |
| return rewriteValueAMD64_OpBitLen64_0(v) |
| case OpBswap32: |
| return rewriteValueAMD64_OpBswap32_0(v) |
| case OpBswap64: |
| return rewriteValueAMD64_OpBswap64_0(v) |
| case OpCeil: |
| return rewriteValueAMD64_OpCeil_0(v) |
| case OpClosureCall: |
| return rewriteValueAMD64_OpClosureCall_0(v) |
| case OpCom16: |
| return rewriteValueAMD64_OpCom16_0(v) |
| case OpCom32: |
| return rewriteValueAMD64_OpCom32_0(v) |
| case OpCom64: |
| return rewriteValueAMD64_OpCom64_0(v) |
| case OpCom8: |
| return rewriteValueAMD64_OpCom8_0(v) |
| case OpConst16: |
| return rewriteValueAMD64_OpConst16_0(v) |
| case OpConst32: |
| return rewriteValueAMD64_OpConst32_0(v) |
| case OpConst32F: |
| return rewriteValueAMD64_OpConst32F_0(v) |
| case OpConst64: |
| return rewriteValueAMD64_OpConst64_0(v) |
| case OpConst64F: |
| return rewriteValueAMD64_OpConst64F_0(v) |
| case OpConst8: |
| return rewriteValueAMD64_OpConst8_0(v) |
| case OpConstBool: |
| return rewriteValueAMD64_OpConstBool_0(v) |
| case OpConstNil: |
| return rewriteValueAMD64_OpConstNil_0(v) |
| case OpConvert: |
| return rewriteValueAMD64_OpConvert_0(v) |
| case OpCtz32: |
| return rewriteValueAMD64_OpCtz32_0(v) |
| case OpCtz64: |
| return rewriteValueAMD64_OpCtz64_0(v) |
| case OpCvt32Fto32: |
| return rewriteValueAMD64_OpCvt32Fto32_0(v) |
| case OpCvt32Fto64: |
| return rewriteValueAMD64_OpCvt32Fto64_0(v) |
| case OpCvt32Fto64F: |
| return rewriteValueAMD64_OpCvt32Fto64F_0(v) |
| case OpCvt32to32F: |
| return rewriteValueAMD64_OpCvt32to32F_0(v) |
| case OpCvt32to64F: |
| return rewriteValueAMD64_OpCvt32to64F_0(v) |
| case OpCvt64Fto32: |
| return rewriteValueAMD64_OpCvt64Fto32_0(v) |
| case OpCvt64Fto32F: |
| return rewriteValueAMD64_OpCvt64Fto32F_0(v) |
| case OpCvt64Fto64: |
| return rewriteValueAMD64_OpCvt64Fto64_0(v) |
| case OpCvt64to32F: |
| return rewriteValueAMD64_OpCvt64to32F_0(v) |
| case OpCvt64to64F: |
| return rewriteValueAMD64_OpCvt64to64F_0(v) |
| case OpDiv128u: |
| return rewriteValueAMD64_OpDiv128u_0(v) |
| case OpDiv16: |
| return rewriteValueAMD64_OpDiv16_0(v) |
| case OpDiv16u: |
| return rewriteValueAMD64_OpDiv16u_0(v) |
| case OpDiv32: |
| return rewriteValueAMD64_OpDiv32_0(v) |
| case OpDiv32F: |
| return rewriteValueAMD64_OpDiv32F_0(v) |
| case OpDiv32u: |
| return rewriteValueAMD64_OpDiv32u_0(v) |
| case OpDiv64: |
| return rewriteValueAMD64_OpDiv64_0(v) |
| case OpDiv64F: |
| return rewriteValueAMD64_OpDiv64F_0(v) |
| case OpDiv64u: |
| return rewriteValueAMD64_OpDiv64u_0(v) |
| case OpDiv8: |
| return rewriteValueAMD64_OpDiv8_0(v) |
| case OpDiv8u: |
| return rewriteValueAMD64_OpDiv8u_0(v) |
| case OpEq16: |
| return rewriteValueAMD64_OpEq16_0(v) |
| case OpEq32: |
| return rewriteValueAMD64_OpEq32_0(v) |
| case OpEq32F: |
| return rewriteValueAMD64_OpEq32F_0(v) |
| case OpEq64: |
| return rewriteValueAMD64_OpEq64_0(v) |
| case OpEq64F: |
| return rewriteValueAMD64_OpEq64F_0(v) |
| case OpEq8: |
| return rewriteValueAMD64_OpEq8_0(v) |
| case OpEqB: |
| return rewriteValueAMD64_OpEqB_0(v) |
| case OpEqPtr: |
| return rewriteValueAMD64_OpEqPtr_0(v) |
| case OpFloor: |
| return rewriteValueAMD64_OpFloor_0(v) |
| case OpGeq16: |
| return rewriteValueAMD64_OpGeq16_0(v) |
| case OpGeq16U: |
| return rewriteValueAMD64_OpGeq16U_0(v) |
| case OpGeq32: |
| return rewriteValueAMD64_OpGeq32_0(v) |
| case OpGeq32F: |
| return rewriteValueAMD64_OpGeq32F_0(v) |
| case OpGeq32U: |
| return rewriteValueAMD64_OpGeq32U_0(v) |
| case OpGeq64: |
| return rewriteValueAMD64_OpGeq64_0(v) |
| case OpGeq64F: |
| return rewriteValueAMD64_OpGeq64F_0(v) |
| case OpGeq64U: |
| return rewriteValueAMD64_OpGeq64U_0(v) |
| case OpGeq8: |
| return rewriteValueAMD64_OpGeq8_0(v) |
| case OpGeq8U: |
| return rewriteValueAMD64_OpGeq8U_0(v) |
| case OpGetCallerPC: |
| return rewriteValueAMD64_OpGetCallerPC_0(v) |
| case OpGetCallerSP: |
| return rewriteValueAMD64_OpGetCallerSP_0(v) |
| case OpGetClosurePtr: |
| return rewriteValueAMD64_OpGetClosurePtr_0(v) |
| case OpGetG: |
| return rewriteValueAMD64_OpGetG_0(v) |
| case OpGreater16: |
| return rewriteValueAMD64_OpGreater16_0(v) |
| case OpGreater16U: |
| return rewriteValueAMD64_OpGreater16U_0(v) |
| case OpGreater32: |
| return rewriteValueAMD64_OpGreater32_0(v) |
| case OpGreater32F: |
| return rewriteValueAMD64_OpGreater32F_0(v) |
| case OpGreater32U: |
| return rewriteValueAMD64_OpGreater32U_0(v) |
| case OpGreater64: |
| return rewriteValueAMD64_OpGreater64_0(v) |
| case OpGreater64F: |
| return rewriteValueAMD64_OpGreater64F_0(v) |
| case OpGreater64U: |
| return rewriteValueAMD64_OpGreater64U_0(v) |
| case OpGreater8: |
| return rewriteValueAMD64_OpGreater8_0(v) |
| case OpGreater8U: |
| return rewriteValueAMD64_OpGreater8U_0(v) |
| case OpHmul32: |
| return rewriteValueAMD64_OpHmul32_0(v) |
| case OpHmul32u: |
| return rewriteValueAMD64_OpHmul32u_0(v) |
| case OpHmul64: |
| return rewriteValueAMD64_OpHmul64_0(v) |
| case OpHmul64u: |
| return rewriteValueAMD64_OpHmul64u_0(v) |
| case OpInt64Hi: |
| return rewriteValueAMD64_OpInt64Hi_0(v) |
| case OpInterCall: |
| return rewriteValueAMD64_OpInterCall_0(v) |
| case OpIsInBounds: |
| return rewriteValueAMD64_OpIsInBounds_0(v) |
| case OpIsNonNil: |
| return rewriteValueAMD64_OpIsNonNil_0(v) |
| case OpIsSliceInBounds: |
| return rewriteValueAMD64_OpIsSliceInBounds_0(v) |
| case OpLeq16: |
| return rewriteValueAMD64_OpLeq16_0(v) |
| case OpLeq16U: |
| return rewriteValueAMD64_OpLeq16U_0(v) |
| case OpLeq32: |
| return rewriteValueAMD64_OpLeq32_0(v) |
| case OpLeq32F: |
| return rewriteValueAMD64_OpLeq32F_0(v) |
| case OpLeq32U: |
| return rewriteValueAMD64_OpLeq32U_0(v) |
| case OpLeq64: |
| return rewriteValueAMD64_OpLeq64_0(v) |
| case OpLeq64F: |
| return rewriteValueAMD64_OpLeq64F_0(v) |
| case OpLeq64U: |
| return rewriteValueAMD64_OpLeq64U_0(v) |
| case OpLeq8: |
| return rewriteValueAMD64_OpLeq8_0(v) |
| case OpLeq8U: |
| return rewriteValueAMD64_OpLeq8U_0(v) |
| case OpLess16: |
| return rewriteValueAMD64_OpLess16_0(v) |
| case OpLess16U: |
| return rewriteValueAMD64_OpLess16U_0(v) |
| case OpLess32: |
| return rewriteValueAMD64_OpLess32_0(v) |
| case OpLess32F: |
| return rewriteValueAMD64_OpLess32F_0(v) |
| case OpLess32U: |
| return rewriteValueAMD64_OpLess32U_0(v) |
| case OpLess64: |
| return rewriteValueAMD64_OpLess64_0(v) |
| case OpLess64F: |
| return rewriteValueAMD64_OpLess64F_0(v) |
| case OpLess64U: |
| return rewriteValueAMD64_OpLess64U_0(v) |
| case OpLess8: |
| return rewriteValueAMD64_OpLess8_0(v) |
| case OpLess8U: |
| return rewriteValueAMD64_OpLess8U_0(v) |
| case OpLoad: |
| return rewriteValueAMD64_OpLoad_0(v) |
| case OpLsh16x16: |
| return rewriteValueAMD64_OpLsh16x16_0(v) |
| case OpLsh16x32: |
| return rewriteValueAMD64_OpLsh16x32_0(v) |
| case OpLsh16x64: |
| return rewriteValueAMD64_OpLsh16x64_0(v) |
| case OpLsh16x8: |
| return rewriteValueAMD64_OpLsh16x8_0(v) |
| case OpLsh32x16: |
| return rewriteValueAMD64_OpLsh32x16_0(v) |
| case OpLsh32x32: |
| return rewriteValueAMD64_OpLsh32x32_0(v) |
| case OpLsh32x64: |
| return rewriteValueAMD64_OpLsh32x64_0(v) |
| case OpLsh32x8: |
| return rewriteValueAMD64_OpLsh32x8_0(v) |
| case OpLsh64x16: |
| return rewriteValueAMD64_OpLsh64x16_0(v) |
| case OpLsh64x32: |
| return rewriteValueAMD64_OpLsh64x32_0(v) |
| case OpLsh64x64: |
| return rewriteValueAMD64_OpLsh64x64_0(v) |
| case OpLsh64x8: |
| return rewriteValueAMD64_OpLsh64x8_0(v) |
| case OpLsh8x16: |
| return rewriteValueAMD64_OpLsh8x16_0(v) |
| case OpLsh8x32: |
| return rewriteValueAMD64_OpLsh8x32_0(v) |
| case OpLsh8x64: |
| return rewriteValueAMD64_OpLsh8x64_0(v) |
| case OpLsh8x8: |
| return rewriteValueAMD64_OpLsh8x8_0(v) |
| case OpMod16: |
| return rewriteValueAMD64_OpMod16_0(v) |
| case OpMod16u: |
| return rewriteValueAMD64_OpMod16u_0(v) |
| case OpMod32: |
| return rewriteValueAMD64_OpMod32_0(v) |
| case OpMod32u: |
| return rewriteValueAMD64_OpMod32u_0(v) |
| case OpMod64: |
| return rewriteValueAMD64_OpMod64_0(v) |
| case OpMod64u: |
| return rewriteValueAMD64_OpMod64u_0(v) |
| case OpMod8: |
| return rewriteValueAMD64_OpMod8_0(v) |
| case OpMod8u: |
| return rewriteValueAMD64_OpMod8u_0(v) |
| case OpMove: |
| return rewriteValueAMD64_OpMove_0(v) || rewriteValueAMD64_OpMove_10(v) |
| case OpMul16: |
| return rewriteValueAMD64_OpMul16_0(v) |
| case OpMul32: |
| return rewriteValueAMD64_OpMul32_0(v) |
| case OpMul32F: |
| return rewriteValueAMD64_OpMul32F_0(v) |
| case OpMul64: |
| return rewriteValueAMD64_OpMul64_0(v) |
| case OpMul64F: |
| return rewriteValueAMD64_OpMul64F_0(v) |
| case OpMul64uhilo: |
| return rewriteValueAMD64_OpMul64uhilo_0(v) |
| case OpMul8: |
| return rewriteValueAMD64_OpMul8_0(v) |
| case OpNeg16: |
| return rewriteValueAMD64_OpNeg16_0(v) |
| case OpNeg32: |
| return rewriteValueAMD64_OpNeg32_0(v) |
| case OpNeg32F: |
| return rewriteValueAMD64_OpNeg32F_0(v) |
| case OpNeg64: |
| return rewriteValueAMD64_OpNeg64_0(v) |
| case OpNeg64F: |
| return rewriteValueAMD64_OpNeg64F_0(v) |
| case OpNeg8: |
| return rewriteValueAMD64_OpNeg8_0(v) |
| case OpNeq16: |
| return rewriteValueAMD64_OpNeq16_0(v) |
| case OpNeq32: |
| return rewriteValueAMD64_OpNeq32_0(v) |
| case OpNeq32F: |
| return rewriteValueAMD64_OpNeq32F_0(v) |
| case OpNeq64: |
| return rewriteValueAMD64_OpNeq64_0(v) |
| case OpNeq64F: |
| return rewriteValueAMD64_OpNeq64F_0(v) |
| case OpNeq8: |
| return rewriteValueAMD64_OpNeq8_0(v) |
| case OpNeqB: |
| return rewriteValueAMD64_OpNeqB_0(v) |
| case OpNeqPtr: |
| return rewriteValueAMD64_OpNeqPtr_0(v) |
| case OpNilCheck: |
| return rewriteValueAMD64_OpNilCheck_0(v) |
| case OpNot: |
| return rewriteValueAMD64_OpNot_0(v) |
| case OpOffPtr: |
| return rewriteValueAMD64_OpOffPtr_0(v) |
| case OpOr16: |
| return rewriteValueAMD64_OpOr16_0(v) |
| case OpOr32: |
| return rewriteValueAMD64_OpOr32_0(v) |
| case OpOr64: |
| return rewriteValueAMD64_OpOr64_0(v) |
| case OpOr8: |
| return rewriteValueAMD64_OpOr8_0(v) |
| case OpOrB: |
| return rewriteValueAMD64_OpOrB_0(v) |
| case OpPopCount16: |
| return rewriteValueAMD64_OpPopCount16_0(v) |
| case OpPopCount32: |
| return rewriteValueAMD64_OpPopCount32_0(v) |
| case OpPopCount64: |
| return rewriteValueAMD64_OpPopCount64_0(v) |
| case OpPopCount8: |
| return rewriteValueAMD64_OpPopCount8_0(v) |
| case OpRound32F: |
| return rewriteValueAMD64_OpRound32F_0(v) |
| case OpRound64F: |
| return rewriteValueAMD64_OpRound64F_0(v) |
| case OpRoundToEven: |
| return rewriteValueAMD64_OpRoundToEven_0(v) |
| case OpRsh16Ux16: |
| return rewriteValueAMD64_OpRsh16Ux16_0(v) |
| case OpRsh16Ux32: |
| return rewriteValueAMD64_OpRsh16Ux32_0(v) |
| case OpRsh16Ux64: |
| return rewriteValueAMD64_OpRsh16Ux64_0(v) |
| case OpRsh16Ux8: |
| return rewriteValueAMD64_OpRsh16Ux8_0(v) |
| case OpRsh16x16: |
| return rewriteValueAMD64_OpRsh16x16_0(v) |
| case OpRsh16x32: |
| return rewriteValueAMD64_OpRsh16x32_0(v) |
| case OpRsh16x64: |
| return rewriteValueAMD64_OpRsh16x64_0(v) |
| case OpRsh16x8: |
| return rewriteValueAMD64_OpRsh16x8_0(v) |
| case OpRsh32Ux16: |
| return rewriteValueAMD64_OpRsh32Ux16_0(v) |
| case OpRsh32Ux32: |
| return rewriteValueAMD64_OpRsh32Ux32_0(v) |
| case OpRsh32Ux64: |
| return rewriteValueAMD64_OpRsh32Ux64_0(v) |
| case OpRsh32Ux8: |
| return rewriteValueAMD64_OpRsh32Ux8_0(v) |
| case OpRsh32x16: |
| return rewriteValueAMD64_OpRsh32x16_0(v) |
| case OpRsh32x32: |
| return rewriteValueAMD64_OpRsh32x32_0(v) |
| case OpRsh32x64: |
| return rewriteValueAMD64_OpRsh32x64_0(v) |
| case OpRsh32x8: |
| return rewriteValueAMD64_OpRsh32x8_0(v) |
| case OpRsh64Ux16: |
| return rewriteValueAMD64_OpRsh64Ux16_0(v) |
| case OpRsh64Ux32: |
| return rewriteValueAMD64_OpRsh64Ux32_0(v) |
| case OpRsh64Ux64: |
| return rewriteValueAMD64_OpRsh64Ux64_0(v) |
| case OpRsh64Ux8: |
| return rewriteValueAMD64_OpRsh64Ux8_0(v) |
| case OpRsh64x16: |
| return rewriteValueAMD64_OpRsh64x16_0(v) |
| case OpRsh64x32: |
| return rewriteValueAMD64_OpRsh64x32_0(v) |
| case OpRsh64x64: |
| return rewriteValueAMD64_OpRsh64x64_0(v) |
| case OpRsh64x8: |
| return rewriteValueAMD64_OpRsh64x8_0(v) |
| case OpRsh8Ux16: |
| return rewriteValueAMD64_OpRsh8Ux16_0(v) |
| case OpRsh8Ux32: |
| return rewriteValueAMD64_OpRsh8Ux32_0(v) |
| case OpRsh8Ux64: |
| return rewriteValueAMD64_OpRsh8Ux64_0(v) |
| case OpRsh8Ux8: |
| return rewriteValueAMD64_OpRsh8Ux8_0(v) |
| case OpRsh8x16: |
| return rewriteValueAMD64_OpRsh8x16_0(v) |
| case OpRsh8x32: |
| return rewriteValueAMD64_OpRsh8x32_0(v) |
| case OpRsh8x64: |
| return rewriteValueAMD64_OpRsh8x64_0(v) |
| case OpRsh8x8: |
| return rewriteValueAMD64_OpRsh8x8_0(v) |
| case OpSelect0: |
| return rewriteValueAMD64_OpSelect0_0(v) |
| case OpSelect1: |
| return rewriteValueAMD64_OpSelect1_0(v) |
| case OpSignExt16to32: |
| return rewriteValueAMD64_OpSignExt16to32_0(v) |
| case OpSignExt16to64: |
| return rewriteValueAMD64_OpSignExt16to64_0(v) |
| case OpSignExt32to64: |
| return rewriteValueAMD64_OpSignExt32to64_0(v) |
| case OpSignExt8to16: |
| return rewriteValueAMD64_OpSignExt8to16_0(v) |
| case OpSignExt8to32: |
| return rewriteValueAMD64_OpSignExt8to32_0(v) |
| case OpSignExt8to64: |
| return rewriteValueAMD64_OpSignExt8to64_0(v) |
| case OpSlicemask: |
| return rewriteValueAMD64_OpSlicemask_0(v) |
| case OpSqrt: |
| return rewriteValueAMD64_OpSqrt_0(v) |
| case OpStaticCall: |
| return rewriteValueAMD64_OpStaticCall_0(v) |
| case OpStore: |
| return rewriteValueAMD64_OpStore_0(v) |
| case OpSub16: |
| return rewriteValueAMD64_OpSub16_0(v) |
| case OpSub32: |
| return rewriteValueAMD64_OpSub32_0(v) |
| case OpSub32F: |
| return rewriteValueAMD64_OpSub32F_0(v) |
| case OpSub64: |
| return rewriteValueAMD64_OpSub64_0(v) |
| case OpSub64F: |
| return rewriteValueAMD64_OpSub64F_0(v) |
| case OpSub8: |
| return rewriteValueAMD64_OpSub8_0(v) |
| case OpSubPtr: |
| return rewriteValueAMD64_OpSubPtr_0(v) |
| case OpTrunc: |
| return rewriteValueAMD64_OpTrunc_0(v) |
| case OpTrunc16to8: |
| return rewriteValueAMD64_OpTrunc16to8_0(v) |
| case OpTrunc32to16: |
| return rewriteValueAMD64_OpTrunc32to16_0(v) |
| case OpTrunc32to8: |
| return rewriteValueAMD64_OpTrunc32to8_0(v) |
| case OpTrunc64to16: |
| return rewriteValueAMD64_OpTrunc64to16_0(v) |
| case OpTrunc64to32: |
| return rewriteValueAMD64_OpTrunc64to32_0(v) |
| case OpTrunc64to8: |
| return rewriteValueAMD64_OpTrunc64to8_0(v) |
| case OpWB: |
| return rewriteValueAMD64_OpWB_0(v) |
| case OpXor16: |
| return rewriteValueAMD64_OpXor16_0(v) |
| case OpXor32: |
| return rewriteValueAMD64_OpXor32_0(v) |
| case OpXor64: |
| return rewriteValueAMD64_OpXor64_0(v) |
| case OpXor8: |
| return rewriteValueAMD64_OpXor8_0(v) |
| case OpZero: |
| return rewriteValueAMD64_OpZero_0(v) || rewriteValueAMD64_OpZero_10(v) || rewriteValueAMD64_OpZero_20(v) |
| case OpZeroExt16to32: |
| return rewriteValueAMD64_OpZeroExt16to32_0(v) |
| case OpZeroExt16to64: |
| return rewriteValueAMD64_OpZeroExt16to64_0(v) |
| case OpZeroExt32to64: |
| return rewriteValueAMD64_OpZeroExt32to64_0(v) |
| case OpZeroExt8to16: |
| return rewriteValueAMD64_OpZeroExt8to16_0(v) |
| case OpZeroExt8to32: |
| return rewriteValueAMD64_OpZeroExt8to32_0(v) |
| case OpZeroExt8to64: |
| return rewriteValueAMD64_OpZeroExt8to64_0(v) |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64ADDL_0(v *Value) bool { |
| // match: (ADDL x (MOVLconst [c])) |
| // cond: |
| // result: (ADDLconst [c] x) |
| for { |
| _ = v.Args[1] |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64MOVLconst { |
| break |
| } |
| c := v_1.AuxInt |
| v.reset(OpAMD64ADDLconst) |
| v.AuxInt = c |
| v.AddArg(x) |
| return true |
| } |
| // match: (ADDL (MOVLconst [c]) x) |
| // cond: |
| // result: (ADDLconst [c] x) |
| for { |
| _ = v.Args[1] |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLconst { |
| break |
| } |
| c := v_0.AuxInt |
| x := v.Args[1] |
| v.reset(OpAMD64ADDLconst) |
| v.AuxInt = c |
| v.AddArg(x) |
| return true |
| } |
| // match: (ADDL (SHLLconst x [c]) (SHRLconst x [d])) |
| // cond: d==32-c |
| // result: (ROLLconst x [c]) |
| for { |
| _ = v.Args[1] |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64SHLLconst { |
| break |
| } |
| c := v_0.AuxInt |
| x := v_0.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64SHRLconst { |
| break |
| } |
| d := v_1.AuxInt |
| if x != v_1.Args[0] { |
| break |
| } |
| if !(d == 32-c) { |
| break |
| } |
| v.reset(OpAMD64ROLLconst) |
| v.AuxInt = c |
| v.AddArg(x) |
| return true |
| } |
| // match: (ADDL (SHRLconst x [d]) (SHLLconst x [c])) |
| // cond: d==32-c |
| // result: (ROLLconst x [c]) |
| for { |
| _ = v.Args[1] |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64SHRLconst { |
| break |
| } |
| d := v_0.AuxInt |
| x := v_0.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64SHLLconst { |
| break |
| } |
| c := v_1.AuxInt |
| if x != v_1.Args[0] { |
| break |
| } |
| if !(d == 32-c) { |
| break |
| } |
| v.reset(OpAMD64ROLLconst) |
| v.AuxInt = c |
| v.AddArg(x) |
| return true |
| } |
| // match: (ADDL <t> (SHLLconst x [c]) (SHRWconst x [d])) |
| // cond: d==16-c && c < 16 && t.Size() == 2 |
| // result: (ROLWconst x [c]) |
| for { |
| t := v.Type |
| _ = v.Args[1] |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64SHLLconst { |
| break |
| } |
| c := v_0.AuxInt |
| x := v_0.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64SHRWconst { |
| break |
| } |
| d := v_1.AuxInt |
| if x != v_1.Args[0] { |
| break |
| } |
| if !(d == 16-c && c < 16 && t.Size() == 2) { |
| break |
| } |
| v.reset(OpAMD64ROLWconst) |
| v.AuxInt = c |
| v.AddArg(x) |
| return true |
| } |
| // match: (ADDL <t> (SHRWconst x [d]) (SHLLconst x [c])) |
| // cond: d==16-c && c < 16 && t.Size() == 2 |
| // result: (ROLWconst x [c]) |
| for { |
| t := v.Type |
| _ = v.Args[1] |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64SHRWconst { |
| break |
| } |
| d := v_0.AuxInt |
| x := v_0.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64SHLLconst { |
| break |
| } |
| c := v_1.AuxInt |
| if x != v_1.Args[0] { |
| break |
| } |
| if !(d == 16-c && c < 16 && t.Size() == 2) { |
| break |
| } |
| v.reset(OpAMD64ROLWconst) |
| v.AuxInt = c |
| v.AddArg(x) |
| return true |
| } |
| // match: (ADDL <t> (SHLLconst x [c]) (SHRBconst x [d])) |
| // cond: d==8-c && c < 8 && t.Size() == 1 |
| // result: (ROLBconst x [c]) |
| for { |
| t := v.Type |
| _ = v.Args[1] |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64SHLLconst { |
| break |
| } |
| c := v_0.AuxInt |
| x := v_0.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64SHRBconst { |
| break |
| } |
| d := v_1.AuxInt |
| if x != v_1.Args[0] { |
| break |
| } |
| if !(d == 8-c && c < 8 && t.Size() == 1) { |
| break |
| } |
| v.reset(OpAMD64ROLBconst) |
| v.AuxInt = c |
| v.AddArg(x) |
| return true |
| } |
| // match: (ADDL <t> (SHRBconst x [d]) (SHLLconst x [c])) |
| // cond: d==8-c && c < 8 && t.Size() == 1 |
| // result: (ROLBconst x [c]) |
| for { |
| t := v.Type |
| _ = v.Args[1] |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64SHRBconst { |
| break |
| } |
| d := v_0.AuxInt |
| x := v_0.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64SHLLconst { |
| break |
| } |
| c := v_1.AuxInt |
| if x != v_1.Args[0] { |
| break |
| } |
| if !(d == 8-c && c < 8 && t.Size() == 1) { |
| break |
| } |
| v.reset(OpAMD64ROLBconst) |
| v.AuxInt = c |
| v.AddArg(x) |
| return true |
| } |
| // match: (ADDL x (NEGL y)) |
| // cond: |
| // result: (SUBL x y) |
| for { |
| _ = v.Args[1] |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64NEGL { |
| break |
| } |
| y := v_1.Args[0] |
| v.reset(OpAMD64SUBL) |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (ADDL (NEGL y) x) |
| // cond: |
| // result: (SUBL x y) |
| for { |
| _ = v.Args[1] |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64NEGL { |
| break |
| } |
| y := v_0.Args[0] |
| x := v.Args[1] |
| v.reset(OpAMD64SUBL) |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64ADDL_10(v *Value) bool { |
| // match: (ADDL x l:(MOVLload [off] {sym} ptr mem)) |
| // cond: canMergeLoad(v, l, x) && clobber(l) |
| // result: (ADDLmem x [off] {sym} ptr mem) |
| for { |
| _ = v.Args[1] |
| x := v.Args[0] |
| l := v.Args[1] |
| if l.Op != OpAMD64MOVLload { |
| break |
| } |
| off := l.AuxInt |
| sym := l.Aux |
| _ = l.Args[1] |
| ptr := l.Args[0] |
| mem := l.Args[1] |
| if !(canMergeLoad(v, l, x) && clobber(l)) { |
| break |
| } |
| v.reset(OpAMD64ADDLmem) |
| v.AuxInt = off |
| v.Aux = sym |
| v.AddArg(x) |
| v.AddArg(ptr) |
| v.AddArg(mem) |
| return true |
| } |
| // match: (ADDL l:(MOVLload [off] {sym} ptr mem) x) |
| // cond: canMergeLoad(v, l, x) && clobber(l) |
| // result: (ADDLmem x [off] {sym} ptr mem) |
| for { |
| _ = v.Args[1] |
| l := v.Args[0] |
| if l.Op != OpAMD64MOVLload { |
| break |
| } |
| off := l.AuxInt |
| sym := l.Aux |
| _ = l.Args[1] |
| ptr := l.Args[0] |
| mem := l.Args[1] |
| x := v.Args[1] |
| if !(canMergeLoad(v, l, x) && clobber(l)) { |
| break |
| } |
| v.reset(OpAMD64ADDLmem) |
| v.AuxInt = off |
| v.Aux = sym |
| v.AddArg(x) |
| v.AddArg(ptr) |
| v.AddArg(mem) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64ADDLconst_0(v *Value) bool { |
| // match: (ADDLconst [c] x) |
| // cond: int32(c)==0 |
| // result: x |
| for { |
| c := v.AuxInt |
| x := v.Args[0] |
| if !(int32(c) == 0) { |
| break |
| } |
| v.reset(OpCopy) |
| v.Type = x.Type |
| v.AddArg(x) |
| return true |
| } |
| // match: (ADDLconst [c] (MOVLconst [d])) |
| // cond: |
| // result: (MOVLconst [int64(int32(c+d))]) |
| for { |
| c := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLconst { |
| break |
| } |
| d := v_0.AuxInt |
| v.reset(OpAMD64MOVLconst) |
| v.AuxInt = int64(int32(c + d)) |
| return true |
| } |
| // match: (ADDLconst [c] (ADDLconst [d] x)) |
| // cond: |
| // result: (ADDLconst [int64(int32(c+d))] x) |
| for { |
| c := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ADDLconst { |
| break |
| } |
| d := v_0.AuxInt |
| x := v_0.Args[0] |
| v.reset(OpAMD64ADDLconst) |
| v.AuxInt = int64(int32(c + d)) |
| v.AddArg(x) |
| return true |
| } |
| // match: (ADDLconst [c] (LEAL [d] {s} x)) |
| // cond: is32Bit(c+d) |
| // result: (LEAL [c+d] {s} x) |
| for { |
| c := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64LEAL { |
| break |
| } |
| d := v_0.AuxInt |
| s := v_0.Aux |
| x := v_0.Args[0] |
| if !(is32Bit(c + d)) { |
| break |
| } |
| v.reset(OpAMD64LEAL) |
| v.AuxInt = c + d |
| v.Aux = s |
| v.AddArg(x) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64ADDLconstmem_0(v *Value) bool { |
| b := v.Block |
| _ = b |
| typ := &b.Func.Config.Types |
| _ = typ |
| // match: (ADDLconstmem [valOff] {sym} ptr (MOVSSstore [ValAndOff(valOff).Off()] {sym} ptr x _)) |
| // cond: |
| // result: (ADDLconst [ValAndOff(valOff).Val()] (MOVLf2i x)) |
| for { |
| valOff := v.AuxInt |
| sym := v.Aux |
| _ = v.Args[1] |
| ptr := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64MOVSSstore { |
| break |
| } |
| if v_1.AuxInt != ValAndOff(valOff).Off() { |
| break |
| } |
| if v_1.Aux != sym { |
| break |
| } |
| _ = v_1.Args[2] |
| if ptr != v_1.Args[0] { |
| break |
| } |
| x := v_1.Args[1] |
| v.reset(OpAMD64ADDLconst) |
| v.AuxInt = ValAndOff(valOff).Val() |
| v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) |
| v0.AddArg(x) |
| v.AddArg(v0) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64ADDLmem_0(v *Value) bool { |
| b := v.Block |
| _ = b |
| typ := &b.Func.Config.Types |
| _ = typ |
| // match: (ADDLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) |
| // cond: |
| // result: (ADDL x (MOVLf2i y)) |
| for { |
| off := v.AuxInt |
| sym := v.Aux |
| _ = v.Args[2] |
| x := v.Args[0] |
| ptr := v.Args[1] |
| v_2 := v.Args[2] |
| if v_2.Op != OpAMD64MOVSSstore { |
| break |
| } |
| if v_2.AuxInt != off { |
| break |
| } |
| if v_2.Aux != sym { |
| break |
| } |
| _ = v_2.Args[2] |
| if ptr != v_2.Args[0] { |
| break |
| } |
| y := v_2.Args[1] |
| v.reset(OpAMD64ADDL) |
| v.AddArg(x) |
| v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) |
| v0.AddArg(y) |
| v.AddArg(v0) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64ADDQ_0(v *Value) bool { |
| // match: (ADDQ x (MOVQconst [c])) |
| // cond: is32Bit(c) |
| // result: (ADDQconst [c] x) |
| for { |
| _ = v.Args[1] |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64MOVQconst { |
| break |
| } |
| c := v_1.AuxInt |
| if !(is32Bit(c)) { |
| break |
| } |
| v.reset(OpAMD64ADDQconst) |
| v.AuxInt = c |
| v.AddArg(x) |
| return true |
| } |
| // match: (ADDQ (MOVQconst [c]) x) |
| // cond: is32Bit(c) |
| // result: (ADDQconst [c] x) |
| for { |
| _ = v.Args[1] |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVQconst { |
| break |
| } |
| c := v_0.AuxInt |
| x := v.Args[1] |
| if !(is32Bit(c)) { |
| break |
| } |
| v.reset(OpAMD64ADDQconst) |
| v.AuxInt = c |
| v.AddArg(x) |
| return true |
| } |
| // match: (ADDQ (SHLQconst x [c]) (SHRQconst x [d])) |
| // cond: d==64-c |
| // result: (ROLQconst x [c]) |
| for { |
| _ = v.Args[1] |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64SHLQconst { |
| break |
| } |
| c := v_0.AuxInt |
| x := v_0.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64SHRQconst { |
| break |
| } |
| d := v_1.AuxInt |
| if x != v_1.Args[0] { |
| break |
| } |
| if !(d == 64-c) { |
| break |
| } |
| v.reset(OpAMD64ROLQconst) |
| v.AuxInt = c |
| v.AddArg(x) |
| return true |
| } |
| // match: (ADDQ (SHRQconst x [d]) (SHLQconst x [c])) |
| // cond: d==64-c |
| // result: (ROLQconst x [c]) |
| for { |
| _ = v.Args[1] |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64SHRQconst { |
| break |
| } |
| d := v_0.AuxInt |
| x := v_0.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64SHLQconst { |
| break |
| } |
| c := v_1.AuxInt |
| if x != v_1.Args[0] { |
| break |
| } |
| if !(d == 64-c) { |
| break |
| } |
| v.reset(OpAMD64ROLQconst) |
| v.AuxInt = c |
| v.AddArg(x) |
| return true |
| } |
| // match: (ADDQ x (SHLQconst [3] y)) |
| // cond: |
| // result: (LEAQ8 x y) |
| for { |
| _ = v.Args[1] |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64SHLQconst { |
| break |
| } |
| if v_1.AuxInt != 3 { |
| break |
| } |
| y := v_1.Args[0] |
| v.reset(OpAMD64LEAQ8) |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (ADDQ (SHLQconst [3] y) x) |
| // cond: |
| // result: (LEAQ8 x y) |
| for { |
| _ = v.Args[1] |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64SHLQconst { |
| break |
| } |
| if v_0.AuxInt != 3 { |
| break |
| } |
| y := v_0.Args[0] |
| x := v.Args[1] |
| v.reset(OpAMD64LEAQ8) |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (ADDQ x (SHLQconst [2] y)) |
| // cond: |
| // result: (LEAQ4 x y) |
| for { |
| _ = v.Args[1] |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64SHLQconst { |
| break |
| } |
| if v_1.AuxInt != 2 { |
| break |
| } |
| y := v_1.Args[0] |
| v.reset(OpAMD64LEAQ4) |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (ADDQ (SHLQconst [2] y) x) |
| // cond: |
| // result: (LEAQ4 x y) |
| for { |
| _ = v.Args[1] |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64SHLQconst { |
| break |
| } |
| if v_0.AuxInt != 2 { |
| break |
| } |
| y := v_0.Args[0] |
| x := v.Args[1] |
| v.reset(OpAMD64LEAQ4) |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (ADDQ x (SHLQconst [1] y)) |
| // cond: |
| // result: (LEAQ2 x y) |
| for { |
| _ = v.Args[1] |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64SHLQconst { |
| break |
| } |
| if v_1.AuxInt != 1 { |
| break |
| } |
| y := v_1.Args[0] |
| v.reset(OpAMD64LEAQ2) |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (ADDQ (SHLQconst [1] y) x) |
| // cond: |
| // result: (LEAQ2 x y) |
| for { |
| _ = v.Args[1] |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64SHLQconst { |
| break |
| } |
| if v_0.AuxInt != 1 { |
| break |
| } |
| y := v_0.Args[0] |
| x := v.Args[1] |
| v.reset(OpAMD64LEAQ2) |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64ADDQ_10(v *Value) bool { |
| // match: (ADDQ x (ADDQ y y)) |
| // cond: |
| // result: (LEAQ2 x y) |
| for { |
| _ = v.Args[1] |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64ADDQ { |
| break |
| } |
| _ = v_1.Args[1] |
| y := v_1.Args[0] |
| if y != v_1.Args[1] { |
| break |
| } |
| v.reset(OpAMD64LEAQ2) |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (ADDQ (ADDQ y y) x) |
| // cond: |
| // result: (LEAQ2 x y) |
| for { |
| _ = v.Args[1] |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ADDQ { |
| break |
| } |
| _ = v_0.Args[1] |
| y := v_0.Args[0] |
| if y != v_0.Args[1] { |
| break |
| } |
| x := v.Args[1] |
| v.reset(OpAMD64LEAQ2) |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (ADDQ x (ADDQ x y)) |
| // cond: |
| // result: (LEAQ2 y x) |
| for { |
| _ = v.Args[1] |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64ADDQ { |
| break |
| } |
| _ = v_1.Args[1] |
| if x != v_1.Args[0] { |
| break |
| } |
| y := v_1.Args[1] |
| v.reset(OpAMD64LEAQ2) |
| v.AddArg(y) |
| v.AddArg(x) |
| return true |
| } |
| // match: (ADDQ x (ADDQ y x)) |
| // cond: |
| // result: (LEAQ2 y x) |
| for { |
| _ = v.Args[1] |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64ADDQ { |
| break |
| } |
| _ = v_1.Args[1] |
| y := v_1.Args[0] |
| if x != v_1.Args[1] { |
| break |
| } |
| v.reset(OpAMD64LEAQ2) |
| v.AddArg(y) |
| v.AddArg(x) |
| return true |
| } |
| // match: (ADDQ (ADDQ x y) x) |
| // cond: |
| // result: (LEAQ2 y x) |
| for { |
| _ = v.Args[1] |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ADDQ { |
| break |
| } |
| _ = v_0.Args[1] |
| x := v_0.Args[0] |
| y := v_0.Args[1] |
| if x != v.Args[1] { |
| break |
| } |
| v.reset(OpAMD64LEAQ2) |
| v.AddArg(y) |
| v.AddArg(x) |
| return true |
| } |
| // match: (ADDQ (ADDQ y x) x) |
| // cond: |
| // result: (LEAQ2 y x) |
| for { |
| _ = v.Args[1] |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ADDQ { |
| break |
| } |
| _ = v_0.Args[1] |
| y := v_0.Args[0] |
| x := v_0.Args[1] |
| if x != v.Args[1] { |
| break |
| } |
| v.reset(OpAMD64LEAQ2) |
| v.AddArg(y) |
| v.AddArg(x) |
| return true |
| } |
| // match: (ADDQ (ADDQconst [c] x) y) |
| // cond: |
| // result: (LEAQ1 [c] x y) |
| for { |
| _ = v.Args[1] |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ADDQconst { |
| break |
| } |
| c := v_0.AuxInt |
| x := v_0.Args[0] |
| y := v.Args[1] |
| v.reset(OpAMD64LEAQ1) |
| v.AuxInt = c |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (ADDQ y (ADDQconst [c] x)) |
| // cond: |
| // result: (LEAQ1 [c] x y) |
| for { |
| _ = v.Args[1] |
| y := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64ADDQconst { |
| break |
| } |
| c := v_1.AuxInt |
| x := v_1.Args[0] |
| v.reset(OpAMD64LEAQ1) |
| v.AuxInt = c |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (ADDQ x (LEAQ [c] {s} y)) |
| // cond: x.Op != OpSB && y.Op != OpSB |
| // result: (LEAQ1 [c] {s} x y) |
| for { |
| _ = v.Args[1] |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64LEAQ { |
| break |
| } |
| c := v_1.AuxInt |
| s := v_1.Aux |
| y := v_1.Args[0] |
| if !(x.Op != OpSB && y.Op != OpSB) { |
| break |
| } |
| v.reset(OpAMD64LEAQ1) |
| v.AuxInt = c |
| v.Aux = s |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (ADDQ (LEAQ [c] {s} y) x) |
| // cond: x.Op != OpSB && y.Op != OpSB |
| // result: (LEAQ1 [c] {s} x y) |
| for { |
| _ = v.Args[1] |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64LEAQ { |
| break |
| } |
| c := v_0.AuxInt |
| s := v_0.Aux |
| y := v_0.Args[0] |
| x := v.Args[1] |
| if !(x.Op != OpSB && y.Op != OpSB) { |
| break |
| } |
| v.reset(OpAMD64LEAQ1) |
| v.AuxInt = c |
| v.Aux = s |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64ADDQ_20(v *Value) bool { |
| // match: (ADDQ x (NEGQ y)) |
| // cond: |
| // result: (SUBQ x y) |
| for { |
| _ = v.Args[1] |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64NEGQ { |
| break |
| } |
| y := v_1.Args[0] |
| v.reset(OpAMD64SUBQ) |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (ADDQ (NEGQ y) x) |
| // cond: |
| // result: (SUBQ x y) |
| for { |
| _ = v.Args[1] |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64NEGQ { |
| break |
| } |
| y := v_0.Args[0] |
| x := v.Args[1] |
| v.reset(OpAMD64SUBQ) |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (ADDQ x l:(MOVQload [off] {sym} ptr mem)) |
| // cond: canMergeLoad(v, l, x) && clobber(l) |
| // result: (ADDQmem x [off] {sym} ptr mem) |
| for { |
| _ = v.Args[1] |
| x := v.Args[0] |
| l := v.Args[1] |
| if l.Op != OpAMD64MOVQload { |
| break |
| } |
| off := l.AuxInt |
| sym := l.Aux |
| _ = l.Args[1] |
| ptr := l.Args[0] |
| mem := l.Args[1] |
| if !(canMergeLoad(v, l, x) && clobber(l)) { |
| break |
| } |
| v.reset(OpAMD64ADDQmem) |
| v.AuxInt = off |
| v.Aux = sym |
| v.AddArg(x) |
| v.AddArg(ptr) |
| v.AddArg(mem) |
| return true |
| } |
| // match: (ADDQ l:(MOVQload [off] {sym} ptr mem) x) |
| // cond: canMergeLoad(v, l, x) && clobber(l) |
| // result: (ADDQmem x [off] {sym} ptr mem) |
| for { |
| _ = v.Args[1] |
| l := v.Args[0] |
| if l.Op != OpAMD64MOVQload { |
| break |
| } |
| off := l.AuxInt |
| sym := l.Aux |
| _ = l.Args[1] |
| ptr := l.Args[0] |
| mem := l.Args[1] |
| x := v.Args[1] |
| if !(canMergeLoad(v, l, x) && clobber(l)) { |
| break |
| } |
| v.reset(OpAMD64ADDQmem) |
| v.AuxInt = off |
| v.Aux = sym |
| v.AddArg(x) |
| v.AddArg(ptr) |
| v.AddArg(mem) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64ADDQconst_0(v *Value) bool { |
| // match: (ADDQconst [c] (ADDQ x y)) |
| // cond: |
| // result: (LEAQ1 [c] x y) |
| for { |
| c := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ADDQ { |
| break |
| } |
| _ = v_0.Args[1] |
| x := v_0.Args[0] |
| y := v_0.Args[1] |
| v.reset(OpAMD64LEAQ1) |
| v.AuxInt = c |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (ADDQconst [c] (LEAQ [d] {s} x)) |
| // cond: is32Bit(c+d) |
| // result: (LEAQ [c+d] {s} x) |
| for { |
| c := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64LEAQ { |
| break |
| } |
| d := v_0.AuxInt |
| s := v_0.Aux |
| x := v_0.Args[0] |
| if !(is32Bit(c + d)) { |
| break |
| } |
| v.reset(OpAMD64LEAQ) |
| v.AuxInt = c + d |
| v.Aux = s |
| v.AddArg(x) |
| return true |
| } |
| // match: (ADDQconst [c] (LEAQ1 [d] {s} x y)) |
| // cond: is32Bit(c+d) |
| // result: (LEAQ1 [c+d] {s} x y) |
| for { |
| c := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64LEAQ1 { |
| break |
| } |
| d := v_0.AuxInt |
| s := v_0.Aux |
| _ = v_0.Args[1] |
| x := v_0.Args[0] |
| y := v_0.Args[1] |
| if !(is32Bit(c + d)) { |
| break |
| } |
| v.reset(OpAMD64LEAQ1) |
| v.AuxInt = c + d |
| v.Aux = s |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (ADDQconst [c] (LEAQ2 [d] {s} x y)) |
| // cond: is32Bit(c+d) |
| // result: (LEAQ2 [c+d] {s} x y) |
| for { |
| c := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64LEAQ2 { |
| break |
| } |
| d := v_0.AuxInt |
| s := v_0.Aux |
| _ = v_0.Args[1] |
| x := v_0.Args[0] |
| y := v_0.Args[1] |
| if !(is32Bit(c + d)) { |
| break |
| } |
| v.reset(OpAMD64LEAQ2) |
| v.AuxInt = c + d |
| v.Aux = s |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (ADDQconst [c] (LEAQ4 [d] {s} x y)) |
| // cond: is32Bit(c+d) |
| // result: (LEAQ4 [c+d] {s} x y) |
| for { |
| c := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64LEAQ4 { |
| break |
| } |
| d := v_0.AuxInt |
| s := v_0.Aux |
| _ = v_0.Args[1] |
| x := v_0.Args[0] |
| y := v_0.Args[1] |
| if !(is32Bit(c + d)) { |
| break |
| } |
| v.reset(OpAMD64LEAQ4) |
| v.AuxInt = c + d |
| v.Aux = s |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (ADDQconst [c] (LEAQ8 [d] {s} x y)) |
| // cond: is32Bit(c+d) |
| // result: (LEAQ8 [c+d] {s} x y) |
| for { |
| c := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64LEAQ8 { |
| break |
| } |
| d := v_0.AuxInt |
| s := v_0.Aux |
| _ = v_0.Args[1] |
| x := v_0.Args[0] |
| y := v_0.Args[1] |
| if !(is32Bit(c + d)) { |
| break |
| } |
| v.reset(OpAMD64LEAQ8) |
| v.AuxInt = c + d |
| v.Aux = s |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (ADDQconst [0] x) |
| // cond: |
| // result: x |
| for { |
| if v.AuxInt != 0 { |
| break |
| } |
| x := v.Args[0] |
| v.reset(OpCopy) |
| v.Type = x.Type |
| v.AddArg(x) |
| return true |
| } |
| // match: (ADDQconst [c] (MOVQconst [d])) |
| // cond: |
| // result: (MOVQconst [c+d]) |
| for { |
| c := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVQconst { |
| break |
| } |
| d := v_0.AuxInt |
| v.reset(OpAMD64MOVQconst) |
| v.AuxInt = c + d |
| return true |
| } |
| // match: (ADDQconst [c] (ADDQconst [d] x)) |
| // cond: is32Bit(c+d) |
| // result: (ADDQconst [c+d] x) |
| for { |
| c := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ADDQconst { |
| break |
| } |
| d := v_0.AuxInt |
| x := v_0.Args[0] |
| if !(is32Bit(c + d)) { |
| break |
| } |
| v.reset(OpAMD64ADDQconst) |
| v.AuxInt = c + d |
| v.AddArg(x) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64ADDQconstmem_0(v *Value) bool { |
| b := v.Block |
| _ = b |
| typ := &b.Func.Config.Types |
| _ = typ |
| // match: (ADDQconstmem [valOff] {sym} ptr (MOVSDstore [ValAndOff(valOff).Off()] {sym} ptr x _)) |
| // cond: |
| // result: (ADDQconst [ValAndOff(valOff).Val()] (MOVQf2i x)) |
| for { |
| valOff := v.AuxInt |
| sym := v.Aux |
| _ = v.Args[1] |
| ptr := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64MOVSDstore { |
| break |
| } |
| if v_1.AuxInt != ValAndOff(valOff).Off() { |
| break |
| } |
| if v_1.Aux != sym { |
| break |
| } |
| _ = v_1.Args[2] |
| if ptr != v_1.Args[0] { |
| break |
| } |
| x := v_1.Args[1] |
| v.reset(OpAMD64ADDQconst) |
| v.AuxInt = ValAndOff(valOff).Val() |
| v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) |
| v0.AddArg(x) |
| v.AddArg(v0) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64ADDQmem_0(v *Value) bool { |
| b := v.Block |
| _ = b |
| typ := &b.Func.Config.Types |
| _ = typ |
| // match: (ADDQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) |
| // cond: |
| // result: (ADDQ x (MOVQf2i y)) |
| for { |
| off := v.AuxInt |
| sym := v.Aux |
| _ = v.Args[2] |
| x := v.Args[0] |
| ptr := v.Args[1] |
| v_2 := v.Args[2] |
| if v_2.Op != OpAMD64MOVSDstore { |
| break |
| } |
| if v_2.AuxInt != off { |
| break |
| } |
| if v_2.Aux != sym { |
| break |
| } |
| _ = v_2.Args[2] |
| if ptr != v_2.Args[0] { |
| break |
| } |
| y := v_2.Args[1] |
| v.reset(OpAMD64ADDQ) |
| v.AddArg(x) |
| v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) |
| v0.AddArg(y) |
| v.AddArg(v0) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64ADDSD_0(v *Value) bool { |
| // match: (ADDSD x l:(MOVSDload [off] {sym} ptr mem)) |
| // cond: canMergeLoad(v, l, x) && clobber(l) |
| // result: (ADDSDmem x [off] {sym} ptr mem) |
| for { |
| _ = v.Args[1] |
| x := v.Args[0] |
| l := v.Args[1] |
| if l.Op != OpAMD64MOVSDload { |
| break |
| } |
| off := l.AuxInt |
| sym := l.Aux |
| _ = l.Args[1] |
| ptr := l.Args[0] |
| mem := l.Args[1] |
| if !(canMergeLoad(v, l, x) && clobber(l)) { |
| break |
| } |
| v.reset(OpAMD64ADDSDmem) |
| v.AuxInt = off |
| v.Aux = sym |
| v.AddArg(x) |
| v.AddArg(ptr) |
| v.AddArg(mem) |
| return true |
| } |
| // match: (ADDSD l:(MOVSDload [off] {sym} ptr mem) x) |
| // cond: canMergeLoad(v, l, x) && clobber(l) |
| // result: (ADDSDmem x [off] {sym} ptr mem) |
| for { |
| _ = v.Args[1] |
| l := v.Args[0] |
| if l.Op != OpAMD64MOVSDload { |
| break |
| } |
| off := l.AuxInt |
| sym := l.Aux |
| _ = l.Args[1] |
| ptr := l.Args[0] |
| mem := l.Args[1] |
| x := v.Args[1] |
| if !(canMergeLoad(v, l, x) && clobber(l)) { |
| break |
| } |
| v.reset(OpAMD64ADDSDmem) |
| v.AuxInt = off |
| v.Aux = sym |
| v.AddArg(x) |
| v.AddArg(ptr) |
| v.AddArg(mem) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64ADDSDmem_0(v *Value) bool { |
| b := v.Block |
| _ = b |
| typ := &b.Func.Config.Types |
| _ = typ |
| // match: (ADDSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) |
| // cond: |
| // result: (ADDSD x (MOVQi2f y)) |
| for { |
| off := v.AuxInt |
| sym := v.Aux |
| _ = v.Args[2] |
| x := v.Args[0] |
| ptr := v.Args[1] |
| v_2 := v.Args[2] |
| if v_2.Op != OpAMD64MOVQstore { |
| break |
| } |
| if v_2.AuxInt != off { |
| break |
| } |
| if v_2.Aux != sym { |
| break |
| } |
| _ = v_2.Args[2] |
| if ptr != v_2.Args[0] { |
| break |
| } |
| y := v_2.Args[1] |
| v.reset(OpAMD64ADDSD) |
| v.AddArg(x) |
| v0 := b.NewValue0(v.Pos, OpAMD64MOVQi2f, typ.Float64) |
| v0.AddArg(y) |
| v.AddArg(v0) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64ADDSS_0(v *Value) bool { |
| // match: (ADDSS x l:(MOVSSload [off] {sym} ptr mem)) |
| // cond: canMergeLoad(v, l, x) && clobber(l) |
| // result: (ADDSSmem x [off] {sym} ptr mem) |
| for { |
| _ = v.Args[1] |
| x := v.Args[0] |
| l := v.Args[1] |
| if l.Op != OpAMD64MOVSSload { |
| break |
| } |
| off := l.AuxInt |
| sym := l.Aux |
| _ = l.Args[1] |
| ptr := l.Args[0] |
| mem := l.Args[1] |
| if !(canMergeLoad(v, l, x) && clobber(l)) { |
| break |
| } |
| v.reset(OpAMD64ADDSSmem) |
| v.AuxInt = off |
| v.Aux = sym |
| v.AddArg(x) |
| v.AddArg(ptr) |
| v.AddArg(mem) |
| return true |
| } |
| // match: (ADDSS l:(MOVSSload [off] {sym} ptr mem) x) |
| // cond: canMergeLoad(v, l, x) && clobber(l) |
| // result: (ADDSSmem x [off] {sym} ptr mem) |
| for { |
| _ = v.Args[1] |
| l := v.Args[0] |
| if l.Op != OpAMD64MOVSSload { |
| break |
| } |
| off := l.AuxInt |
| sym := l.Aux |
| _ = l.Args[1] |
| ptr := l.Args[0] |
| mem := l.Args[1] |
| x := v.Args[1] |
| if !(canMergeLoad(v, l, x) && clobber(l)) { |
| break |
| } |
| v.reset(OpAMD64ADDSSmem) |
| v.AuxInt = off |
| v.Aux = sym |
| v.AddArg(x) |
| v.AddArg(ptr) |
| v.AddArg(mem) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64ADDSSmem_0(v *Value) bool { |
| b := v.Block |
| _ = b |
| typ := &b.Func.Config.Types |
| _ = typ |
| // match: (ADDSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) |
| // cond: |
| // result: (ADDSS x (MOVLi2f y)) |
| for { |
| off := v.AuxInt |
| sym := v.Aux |
| _ = v.Args[2] |
| x := v.Args[0] |
| ptr := v.Args[1] |
| v_2 := v.Args[2] |
| if v_2.Op != OpAMD64MOVLstore { |
| break |
| } |
| if v_2.AuxInt != off { |
| break |
| } |
| if v_2.Aux != sym { |
| break |
| } |
| _ = v_2.Args[2] |
| if ptr != v_2.Args[0] { |
| break |
| } |
| y := v_2.Args[1] |
| v.reset(OpAMD64ADDSS) |
| v.AddArg(x) |
| v0 := b.NewValue0(v.Pos, OpAMD64MOVLi2f, typ.Float32) |
| v0.AddArg(y) |
| v.AddArg(v0) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64ANDL_0(v *Value) bool { |
| // match: (ANDL x (MOVLconst [c])) |
| // cond: |
| // result: (ANDLconst [c] x) |
| for { |
| _ = v.Args[1] |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64MOVLconst { |
| break |
| } |
| c := v_1.AuxInt |
| v.reset(OpAMD64ANDLconst) |
| v.AuxInt = c |
| v.AddArg(x) |
| return true |
| } |
| // match: (ANDL (MOVLconst [c]) x) |
| // cond: |
| // result: (ANDLconst [c] x) |
| for { |
| _ = v.Args[1] |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLconst { |
| break |
| } |
| c := v_0.AuxInt |
| x := v.Args[1] |
| v.reset(OpAMD64ANDLconst) |
| v.AuxInt = c |
| v.AddArg(x) |
| return true |
| } |
| // match: (ANDL x x) |
| // cond: |
| // result: x |
| for { |
| _ = v.Args[1] |
| x := v.Args[0] |
| if x != v.Args[1] { |
| break |
| } |
| v.reset(OpCopy) |
| v.Type = x.Type |
| v.AddArg(x) |
| return true |
| } |
| // match: (ANDL x l:(MOVLload [off] {sym} ptr mem)) |
| // cond: canMergeLoad(v, l, x) && clobber(l) |
| // result: (ANDLmem x [off] {sym} ptr mem) |
| for { |
| _ = v.Args[1] |
| x := v.Args[0] |
| l := v.Args[1] |
| if l.Op != OpAMD64MOVLload { |
| break |
| } |
| off := l.AuxInt |
| sym := l.Aux |
| _ = l.Args[1] |
| ptr := l.Args[0] |
| mem := l.Args[1] |
| if !(canMergeLoad(v, l, x) && clobber(l)) { |
| break |
| } |
| v.reset(OpAMD64ANDLmem) |
| v.AuxInt = off |
| v.Aux = sym |
| v.AddArg(x) |
| v.AddArg(ptr) |
| v.AddArg(mem) |
| return true |
| } |
| // match: (ANDL l:(MOVLload [off] {sym} ptr mem) x) |
| // cond: canMergeLoad(v, l, x) && clobber(l) |
| // result: (ANDLmem x [off] {sym} ptr mem) |
| for { |
| _ = v.Args[1] |
| l := v.Args[0] |
| if l.Op != OpAMD64MOVLload { |
| break |
| } |
| off := l.AuxInt |
| sym := l.Aux |
| _ = l.Args[1] |
| ptr := l.Args[0] |
| mem := l.Args[1] |
| x := v.Args[1] |
| if !(canMergeLoad(v, l, x) && clobber(l)) { |
| break |
| } |
| v.reset(OpAMD64ANDLmem) |
| v.AuxInt = off |
| v.Aux = sym |
| v.AddArg(x) |
| v.AddArg(ptr) |
| v.AddArg(mem) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64ANDLconst_0(v *Value) bool { |
| // match: (ANDLconst [c] (ANDLconst [d] x)) |
| // cond: |
| // result: (ANDLconst [c & d] x) |
| for { |
| c := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ANDLconst { |
| break |
| } |
| d := v_0.AuxInt |
| x := v_0.Args[0] |
| v.reset(OpAMD64ANDLconst) |
| v.AuxInt = c & d |
| v.AddArg(x) |
| return true |
| } |
| // match: (ANDLconst [0xFF] x) |
| // cond: |
| // result: (MOVBQZX x) |
| for { |
| if v.AuxInt != 0xFF { |
| break |
| } |
| x := v.Args[0] |
| v.reset(OpAMD64MOVBQZX) |
| v.AddArg(x) |
| return true |
| } |
| // match: (ANDLconst [0xFFFF] x) |
| // cond: |
| // result: (MOVWQZX x) |
| for { |
| if v.AuxInt != 0xFFFF { |
| break |
| } |
| x := v.Args[0] |
| v.reset(OpAMD64MOVWQZX) |
| v.AddArg(x) |
| return true |
| } |
| // match: (ANDLconst [c] _) |
| // cond: int32(c)==0 |
| // result: (MOVLconst [0]) |
| for { |
| c := v.AuxInt |
| if !(int32(c) == 0) { |
| break |
| } |
| v.reset(OpAMD64MOVLconst) |
| v.AuxInt = 0 |
| return true |
| } |
| // match: (ANDLconst [c] x) |
| // cond: int32(c)==-1 |
| // result: x |
| for { |
| c := v.AuxInt |
| x := v.Args[0] |
| if !(int32(c) == -1) { |
| break |
| } |
| v.reset(OpCopy) |
| v.Type = x.Type |
| v.AddArg(x) |
| return true |
| } |
| // match: (ANDLconst [c] (MOVLconst [d])) |
| // cond: |
| // result: (MOVLconst [c&d]) |
| for { |
| c := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLconst { |
| break |
| } |
| d := v_0.AuxInt |
| v.reset(OpAMD64MOVLconst) |
| v.AuxInt = c & d |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64ANDLmem_0(v *Value) bool { |
| b := v.Block |
| _ = b |
| typ := &b.Func.Config.Types |
| _ = typ |
| // match: (ANDLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) |
| // cond: |
| // result: (ANDL x (MOVLf2i y)) |
| for { |
| off := v.AuxInt |
| sym := v.Aux |
| _ = v.Args[2] |
| x := v.Args[0] |
| ptr := v.Args[1] |
| v_2 := v.Args[2] |
| if v_2.Op != OpAMD64MOVSSstore { |
| break |
| } |
| if v_2.AuxInt != off { |
| break |
| } |
| if v_2.Aux != sym { |
| break |
| } |
| _ = v_2.Args[2] |
| if ptr != v_2.Args[0] { |
| break |
| } |
| y := v_2.Args[1] |
| v.reset(OpAMD64ANDL) |
| v.AddArg(x) |
| v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) |
| v0.AddArg(y) |
| v.AddArg(v0) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64ANDQ_0(v *Value) bool { |
| // match: (ANDQ x (MOVQconst [c])) |
| // cond: is32Bit(c) |
| // result: (ANDQconst [c] x) |
| for { |
| _ = v.Args[1] |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64MOVQconst { |
| break |
| } |
| c := v_1.AuxInt |
| if !(is32Bit(c)) { |
| break |
| } |
| v.reset(OpAMD64ANDQconst) |
| v.AuxInt = c |
| v.AddArg(x) |
| return true |
| } |
| // match: (ANDQ (MOVQconst [c]) x) |
| // cond: is32Bit(c) |
| // result: (ANDQconst [c] x) |
| for { |
| _ = v.Args[1] |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVQconst { |
| break |
| } |
| c := v_0.AuxInt |
| x := v.Args[1] |
| if !(is32Bit(c)) { |
| break |
| } |
| v.reset(OpAMD64ANDQconst) |
| v.AuxInt = c |
| v.AddArg(x) |
| return true |
| } |
| // match: (ANDQ x x) |
| // cond: |
| // result: x |
| for { |
| _ = v.Args[1] |
| x := v.Args[0] |
| if x != v.Args[1] { |
| break |
| } |
| v.reset(OpCopy) |
| v.Type = x.Type |
| v.AddArg(x) |
| return true |
| } |
| // match: (ANDQ x l:(MOVQload [off] {sym} ptr mem)) |
| // cond: canMergeLoad(v, l, x) && clobber(l) |
| // result: (ANDQmem x [off] {sym} ptr mem) |
| for { |
| _ = v.Args[1] |
| x := v.Args[0] |
| l := v.Args[1] |
| if l.Op != OpAMD64MOVQload { |
| break |
| } |
| off := l.AuxInt |
| sym := l.Aux |
| _ = l.Args[1] |
| ptr := l.Args[0] |
| mem := l.Args[1] |
| if !(canMergeLoad(v, l, x) && clobber(l)) { |
| break |
| } |
| v.reset(OpAMD64ANDQmem) |
| v.AuxInt = off |
| v.Aux = sym |
| v.AddArg(x) |
| v.AddArg(ptr) |
| v.AddArg(mem) |
| return true |
| } |
| // match: (ANDQ l:(MOVQload [off] {sym} ptr mem) x) |
| // cond: canMergeLoad(v, l, x) && clobber(l) |
| // result: (ANDQmem x [off] {sym} ptr mem) |
| for { |
| _ = v.Args[1] |
| l := v.Args[0] |
| if l.Op != OpAMD64MOVQload { |
| break |
| } |
| off := l.AuxInt |
| sym := l.Aux |
| _ = l.Args[1] |
| ptr := l.Args[0] |
| mem := l.Args[1] |
| x := v.Args[1] |
| if !(canMergeLoad(v, l, x) && clobber(l)) { |
| break |
| } |
| v.reset(OpAMD64ANDQmem) |
| v.AuxInt = off |
| v.Aux = sym |
| v.AddArg(x) |
| v.AddArg(ptr) |
| v.AddArg(mem) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64ANDQconst_0(v *Value) bool { |
| // match: (ANDQconst [c] (ANDQconst [d] x)) |
| // cond: |
| // result: (ANDQconst [c & d] x) |
| for { |
| c := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ANDQconst { |
| break |
| } |
| d := v_0.AuxInt |
| x := v_0.Args[0] |
| v.reset(OpAMD64ANDQconst) |
| v.AuxInt = c & d |
| v.AddArg(x) |
| return true |
| } |
| // match: (ANDQconst [0xFF] x) |
| // cond: |
| // result: (MOVBQZX x) |
| for { |
| if v.AuxInt != 0xFF { |
| break |
| } |
| x := v.Args[0] |
| v.reset(OpAMD64MOVBQZX) |
| v.AddArg(x) |
| return true |
| } |
| // match: (ANDQconst [0xFFFF] x) |
| // cond: |
| // result: (MOVWQZX x) |
| for { |
| if v.AuxInt != 0xFFFF { |
| break |
| } |
| x := v.Args[0] |
| v.reset(OpAMD64MOVWQZX) |
| v.AddArg(x) |
| return true |
| } |
| // match: (ANDQconst [0xFFFFFFFF] x) |
| // cond: |
| // result: (MOVLQZX x) |
| for { |
| if v.AuxInt != 0xFFFFFFFF { |
| break |
| } |
| x := v.Args[0] |
| v.reset(OpAMD64MOVLQZX) |
| v.AddArg(x) |
| return true |
| } |
| // match: (ANDQconst [0] _) |
| // cond: |
| // result: (MOVQconst [0]) |
| for { |
| if v.AuxInt != 0 { |
| break |
| } |
| v.reset(OpAMD64MOVQconst) |
| v.AuxInt = 0 |
| return true |
| } |
| // match: (ANDQconst [-1] x) |
| // cond: |
| // result: x |
| for { |
| if v.AuxInt != -1 { |
| break |
| } |
| x := v.Args[0] |
| v.reset(OpCopy) |
| v.Type = x.Type |
| v.AddArg(x) |
| return true |
| } |
| // match: (ANDQconst [c] (MOVQconst [d])) |
| // cond: |
| // result: (MOVQconst [c&d]) |
| for { |
| c := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVQconst { |
| break |
| } |
| d := v_0.AuxInt |
| v.reset(OpAMD64MOVQconst) |
| v.AuxInt = c & d |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64ANDQmem_0(v *Value) bool { |
| b := v.Block |
| _ = b |
| typ := &b.Func.Config.Types |
| _ = typ |
| // match: (ANDQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) |
| // cond: |
| // result: (ANDQ x (MOVQf2i y)) |
| for { |
| off := v.AuxInt |
| sym := v.Aux |
| _ = v.Args[2] |
| x := v.Args[0] |
| ptr := v.Args[1] |
| v_2 := v.Args[2] |
| if v_2.Op != OpAMD64MOVSDstore { |
| break |
| } |
| if v_2.AuxInt != off { |
| break |
| } |
| if v_2.Aux != sym { |
| break |
| } |
| _ = v_2.Args[2] |
| if ptr != v_2.Args[0] { |
| break |
| } |
| y := v_2.Args[1] |
| v.reset(OpAMD64ANDQ) |
| v.AddArg(x) |
| v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) |
| v0.AddArg(y) |
| v.AddArg(v0) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64BSFQ_0(v *Value) bool { |
| b := v.Block |
| _ = b |
| // match: (BSFQ (ORQconst <t> [1<<8] (MOVBQZX x))) |
| // cond: |
| // result: (BSFQ (ORQconst <t> [1<<8] x)) |
| for { |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ORQconst { |
| break |
| } |
| t := v_0.Type |
| if v_0.AuxInt != 1<<8 { |
| break |
| } |
| v_0_0 := v_0.Args[0] |
| if v_0_0.Op != OpAMD64MOVBQZX { |
| break |
| } |
| x := v_0_0.Args[0] |
| v.reset(OpAMD64BSFQ) |
| v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t) |
| v0.AuxInt = 1 << 8 |
| v0.AddArg(x) |
| v.AddArg(v0) |
| return true |
| } |
| // match: (BSFQ (ORQconst <t> [1<<16] (MOVWQZX x))) |
| // cond: |
| // result: (BSFQ (ORQconst <t> [1<<16] x)) |
| for { |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ORQconst { |
| break |
| } |
| t := v_0.Type |
| if v_0.AuxInt != 1<<16 { |
| break |
| } |
| v_0_0 := v_0.Args[0] |
| if v_0_0.Op != OpAMD64MOVWQZX { |
| break |
| } |
| x := v_0_0.Args[0] |
| v.reset(OpAMD64BSFQ) |
| v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t) |
| v0.AuxInt = 1 << 16 |
| v0.AddArg(x) |
| v.AddArg(v0) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64BTQconst_0(v *Value) bool { |
| // match: (BTQconst [c] x) |
| // cond: c < 32 |
| // result: (BTLconst [c] x) |
| for { |
| c := v.AuxInt |
| x := v.Args[0] |
| if !(c < 32) { |
| break |
| } |
| v.reset(OpAMD64BTLconst) |
| v.AuxInt = c |
| v.AddArg(x) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64CMOVQEQ_0(v *Value) bool { |
| // match: (CMOVQEQ x _ (Select1 (BSFQ (ORQconst [c] _)))) |
| // cond: c != 0 |
| // result: x |
| for { |
| _ = v.Args[2] |
| x := v.Args[0] |
| v_2 := v.Args[2] |
| if v_2.Op != OpSelect1 { |
| break |
| } |
| v_2_0 := v_2.Args[0] |
| if v_2_0.Op != OpAMD64BSFQ { |
| break |
| } |
| v_2_0_0 := v_2_0.Args[0] |
| if v_2_0_0.Op != OpAMD64ORQconst { |
| break |
| } |
| c := v_2_0_0.AuxInt |
| if !(c != 0) { |
| break |
| } |
| v.reset(OpCopy) |
| v.Type = x.Type |
| v.AddArg(x) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64CMPB_0(v *Value) bool { |
| b := v.Block |
| _ = b |
| // match: (CMPB x (MOVLconst [c])) |
| // cond: |
| // result: (CMPBconst x [int64(int8(c))]) |
| for { |
| _ = v.Args[1] |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64MOVLconst { |
| break |
| } |
| c := v_1.AuxInt |
| v.reset(OpAMD64CMPBconst) |
| v.AuxInt = int64(int8(c)) |
| v.AddArg(x) |
| return true |
| } |
| // match: (CMPB (MOVLconst [c]) x) |
| // cond: |
| // result: (InvertFlags (CMPBconst x [int64(int8(c))])) |
| for { |
| _ = v.Args[1] |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLconst { |
| break |
| } |
| c := v_0.AuxInt |
| x := v.Args[1] |
| v.reset(OpAMD64InvertFlags) |
| v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) |
| v0.AuxInt = int64(int8(c)) |
| v0.AddArg(x) |
| v.AddArg(v0) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64CMPBconst_0(v *Value) bool { |
| // match: (CMPBconst (MOVLconst [x]) [y]) |
| // cond: int8(x)==int8(y) |
| // result: (FlagEQ) |
| for { |
| y := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLconst { |
| break |
| } |
| x := v_0.AuxInt |
| if !(int8(x) == int8(y)) { |
| break |
| } |
| v.reset(OpAMD64FlagEQ) |
| return true |
| } |
| // match: (CMPBconst (MOVLconst [x]) [y]) |
| // cond: int8(x)<int8(y) && uint8(x)<uint8(y) |
| // result: (FlagLT_ULT) |
| for { |
| y := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLconst { |
| break |
| } |
| x := v_0.AuxInt |
| if !(int8(x) < int8(y) && uint8(x) < uint8(y)) { |
| break |
| } |
| v.reset(OpAMD64FlagLT_ULT) |
| return true |
| } |
| // match: (CMPBconst (MOVLconst [x]) [y]) |
| // cond: int8(x)<int8(y) && uint8(x)>uint8(y) |
| // result: (FlagLT_UGT) |
| for { |
| y := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLconst { |
| break |
| } |
| x := v_0.AuxInt |
| if !(int8(x) < int8(y) && uint8(x) > uint8(y)) { |
| break |
| } |
| v.reset(OpAMD64FlagLT_UGT) |
| return true |
| } |
| // match: (CMPBconst (MOVLconst [x]) [y]) |
| // cond: int8(x)>int8(y) && uint8(x)<uint8(y) |
| // result: (FlagGT_ULT) |
| for { |
| y := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLconst { |
| break |
| } |
| x := v_0.AuxInt |
| if !(int8(x) > int8(y) && uint8(x) < uint8(y)) { |
| break |
| } |
| v.reset(OpAMD64FlagGT_ULT) |
| return true |
| } |
| // match: (CMPBconst (MOVLconst [x]) [y]) |
| // cond: int8(x)>int8(y) && uint8(x)>uint8(y) |
| // result: (FlagGT_UGT) |
| for { |
| y := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLconst { |
| break |
| } |
| x := v_0.AuxInt |
| if !(int8(x) > int8(y) && uint8(x) > uint8(y)) { |
| break |
| } |
| v.reset(OpAMD64FlagGT_UGT) |
| return true |
| } |
| // match: (CMPBconst (ANDLconst _ [m]) [n]) |
| // cond: 0 <= int8(m) && int8(m) < int8(n) |
| // result: (FlagLT_ULT) |
| for { |
| n := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ANDLconst { |
| break |
| } |
| m := v_0.AuxInt |
| if !(0 <= int8(m) && int8(m) < int8(n)) { |
| break |
| } |
| v.reset(OpAMD64FlagLT_ULT) |
| return true |
| } |
| // match: (CMPBconst (ANDL x y) [0]) |
| // cond: |
| // result: (TESTB x y) |
| for { |
| if v.AuxInt != 0 { |
| break |
| } |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ANDL { |
| break |
| } |
| _ = v_0.Args[1] |
| x := v_0.Args[0] |
| y := v_0.Args[1] |
| v.reset(OpAMD64TESTB) |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (CMPBconst (ANDLconst [c] x) [0]) |
| // cond: |
| // result: (TESTBconst [int64(int8(c))] x) |
| for { |
| if v.AuxInt != 0 { |
| break |
| } |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ANDLconst { |
| break |
| } |
| c := v_0.AuxInt |
| x := v_0.Args[0] |
| v.reset(OpAMD64TESTBconst) |
| v.AuxInt = int64(int8(c)) |
| v.AddArg(x) |
| return true |
| } |
| // match: (CMPBconst x [0]) |
| // cond: |
| // result: (TESTB x x) |
| for { |
| if v.AuxInt != 0 { |
| break |
| } |
| x := v.Args[0] |
| v.reset(OpAMD64TESTB) |
| v.AddArg(x) |
| v.AddArg(x) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64CMPL_0(v *Value) bool { |
| b := v.Block |
| _ = b |
| // match: (CMPL x (MOVLconst [c])) |
| // cond: |
| // result: (CMPLconst x [c]) |
| for { |
| _ = v.Args[1] |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64MOVLconst { |
| break |
| } |
| c := v_1.AuxInt |
| v.reset(OpAMD64CMPLconst) |
| v.AuxInt = c |
| v.AddArg(x) |
| return true |
| } |
| // match: (CMPL (MOVLconst [c]) x) |
| // cond: |
| // result: (InvertFlags (CMPLconst x [c])) |
| for { |
| _ = v.Args[1] |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLconst { |
| break |
| } |
| c := v_0.AuxInt |
| x := v.Args[1] |
| v.reset(OpAMD64InvertFlags) |
| v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) |
| v0.AuxInt = c |
| v0.AddArg(x) |
| v.AddArg(v0) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64CMPLconst_0(v *Value) bool { |
| // match: (CMPLconst (MOVLconst [x]) [y]) |
| // cond: int32(x)==int32(y) |
| // result: (FlagEQ) |
| for { |
| y := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLconst { |
| break |
| } |
| x := v_0.AuxInt |
| if !(int32(x) == int32(y)) { |
| break |
| } |
| v.reset(OpAMD64FlagEQ) |
| return true |
| } |
| // match: (CMPLconst (MOVLconst [x]) [y]) |
| // cond: int32(x)<int32(y) && uint32(x)<uint32(y) |
| // result: (FlagLT_ULT) |
| for { |
| y := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLconst { |
| break |
| } |
| x := v_0.AuxInt |
| if !(int32(x) < int32(y) && uint32(x) < uint32(y)) { |
| break |
| } |
| v.reset(OpAMD64FlagLT_ULT) |
| return true |
| } |
| // match: (CMPLconst (MOVLconst [x]) [y]) |
| // cond: int32(x)<int32(y) && uint32(x)>uint32(y) |
| // result: (FlagLT_UGT) |
| for { |
| y := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLconst { |
| break |
| } |
| x := v_0.AuxInt |
| if !(int32(x) < int32(y) && uint32(x) > uint32(y)) { |
| break |
| } |
| v.reset(OpAMD64FlagLT_UGT) |
| return true |
| } |
| // match: (CMPLconst (MOVLconst [x]) [y]) |
| // cond: int32(x)>int32(y) && uint32(x)<uint32(y) |
| // result: (FlagGT_ULT) |
| for { |
| y := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLconst { |
| break |
| } |
| x := v_0.AuxInt |
| if !(int32(x) > int32(y) && uint32(x) < uint32(y)) { |
| break |
| } |
| v.reset(OpAMD64FlagGT_ULT) |
| return true |
| } |
| // match: (CMPLconst (MOVLconst [x]) [y]) |
| // cond: int32(x)>int32(y) && uint32(x)>uint32(y) |
| // result: (FlagGT_UGT) |
| for { |
| y := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLconst { |
| break |
| } |
| x := v_0.AuxInt |
| if !(int32(x) > int32(y) && uint32(x) > uint32(y)) { |
| break |
| } |
| v.reset(OpAMD64FlagGT_UGT) |
| return true |
| } |
| // match: (CMPLconst (SHRLconst _ [c]) [n]) |
| // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) |
| // result: (FlagLT_ULT) |
| for { |
| n := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64SHRLconst { |
| break |
| } |
| c := v_0.AuxInt |
| if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) { |
| break |
| } |
| v.reset(OpAMD64FlagLT_ULT) |
| return true |
| } |
| // match: (CMPLconst (ANDLconst _ [m]) [n]) |
| // cond: 0 <= int32(m) && int32(m) < int32(n) |
| // result: (FlagLT_ULT) |
| for { |
| n := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ANDLconst { |
| break |
| } |
| m := v_0.AuxInt |
| if !(0 <= int32(m) && int32(m) < int32(n)) { |
| break |
| } |
| v.reset(OpAMD64FlagLT_ULT) |
| return true |
| } |
| // match: (CMPLconst (ANDL x y) [0]) |
| // cond: |
| // result: (TESTL x y) |
| for { |
| if v.AuxInt != 0 { |
| break |
| } |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ANDL { |
| break |
| } |
| _ = v_0.Args[1] |
| x := v_0.Args[0] |
| y := v_0.Args[1] |
| v.reset(OpAMD64TESTL) |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (CMPLconst (ANDLconst [c] x) [0]) |
| // cond: |
| // result: (TESTLconst [c] x) |
| for { |
| if v.AuxInt != 0 { |
| break |
| } |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ANDLconst { |
| break |
| } |
| c := v_0.AuxInt |
| x := v_0.Args[0] |
| v.reset(OpAMD64TESTLconst) |
| v.AuxInt = c |
| v.AddArg(x) |
| return true |
| } |
| // match: (CMPLconst x [0]) |
| // cond: |
| // result: (TESTL x x) |
| for { |
| if v.AuxInt != 0 { |
| break |
| } |
| x := v.Args[0] |
| v.reset(OpAMD64TESTL) |
| v.AddArg(x) |
| v.AddArg(x) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64CMPQ_0(v *Value) bool { |
| b := v.Block |
| _ = b |
| // match: (CMPQ x (MOVQconst [c])) |
| // cond: is32Bit(c) |
| // result: (CMPQconst x [c]) |
| for { |
| _ = v.Args[1] |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64MOVQconst { |
| break |
| } |
| c := v_1.AuxInt |
| if !(is32Bit(c)) { |
| break |
| } |
| v.reset(OpAMD64CMPQconst) |
| v.AuxInt = c |
| v.AddArg(x) |
| return true |
| } |
| // match: (CMPQ (MOVQconst [c]) x) |
| // cond: is32Bit(c) |
| // result: (InvertFlags (CMPQconst x [c])) |
| for { |
| _ = v.Args[1] |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVQconst { |
| break |
| } |
| c := v_0.AuxInt |
| x := v.Args[1] |
| if !(is32Bit(c)) { |
| break |
| } |
| v.reset(OpAMD64InvertFlags) |
| v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) |
| v0.AuxInt = c |
| v0.AddArg(x) |
| v.AddArg(v0) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64CMPQconst_0(v *Value) bool { |
| // match: (CMPQconst (NEGQ (ADDQconst [-16] (ANDQconst [15] _))) [32]) |
| // cond: |
| // result: (FlagLT_ULT) |
| for { |
| if v.AuxInt != 32 { |
| break |
| } |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64NEGQ { |
| break |
| } |
| v_0_0 := v_0.Args[0] |
| if v_0_0.Op != OpAMD64ADDQconst { |
| break |
| } |
| if v_0_0.AuxInt != -16 { |
| break |
| } |
| v_0_0_0 := v_0_0.Args[0] |
| if v_0_0_0.Op != OpAMD64ANDQconst { |
| break |
| } |
| if v_0_0_0.AuxInt != 15 { |
| break |
| } |
| v.reset(OpAMD64FlagLT_ULT) |
| return true |
| } |
| // match: (CMPQconst (NEGQ (ADDQconst [ -8] (ANDQconst [7] _))) [32]) |
| // cond: |
| // result: (FlagLT_ULT) |
| for { |
| if v.AuxInt != 32 { |
| break |
| } |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64NEGQ { |
| break |
| } |
| v_0_0 := v_0.Args[0] |
| if v_0_0.Op != OpAMD64ADDQconst { |
| break |
| } |
| if v_0_0.AuxInt != -8 { |
| break |
| } |
| v_0_0_0 := v_0_0.Args[0] |
| if v_0_0_0.Op != OpAMD64ANDQconst { |
| break |
| } |
| if v_0_0_0.AuxInt != 7 { |
| break |
| } |
| v.reset(OpAMD64FlagLT_ULT) |
| return true |
| } |
| // match: (CMPQconst (MOVQconst [x]) [y]) |
| // cond: x==y |
| // result: (FlagEQ) |
| for { |
| y := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVQconst { |
| break |
| } |
| x := v_0.AuxInt |
| if !(x == y) { |
| break |
| } |
| v.reset(OpAMD64FlagEQ) |
| return true |
| } |
| // match: (CMPQconst (MOVQconst [x]) [y]) |
| // cond: x<y && uint64(x)<uint64(y) |
| // result: (FlagLT_ULT) |
| for { |
| y := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVQconst { |
| break |
| } |
| x := v_0.AuxInt |
| if !(x < y && uint64(x) < uint64(y)) { |
| break |
| } |
| v.reset(OpAMD64FlagLT_ULT) |
| return true |
| } |
| // match: (CMPQconst (MOVQconst [x]) [y]) |
| // cond: x<y && uint64(x)>uint64(y) |
| // result: (FlagLT_UGT) |
| for { |
| y := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVQconst { |
| break |
| } |
| x := v_0.AuxInt |
| if !(x < y && uint64(x) > uint64(y)) { |
| break |
| } |
| v.reset(OpAMD64FlagLT_UGT) |
| return true |
| } |
| // match: (CMPQconst (MOVQconst [x]) [y]) |
| // cond: x>y && uint64(x)<uint64(y) |
| // result: (FlagGT_ULT) |
| for { |
| y := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVQconst { |
| break |
| } |
| x := v_0.AuxInt |
| if !(x > y && uint64(x) < uint64(y)) { |
| break |
| } |
| v.reset(OpAMD64FlagGT_ULT) |
| return true |
| } |
| // match: (CMPQconst (MOVQconst [x]) [y]) |
| // cond: x>y && uint64(x)>uint64(y) |
| // result: (FlagGT_UGT) |
| for { |
| y := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVQconst { |
| break |
| } |
| x := v_0.AuxInt |
| if !(x > y && uint64(x) > uint64(y)) { |
| break |
| } |
| v.reset(OpAMD64FlagGT_UGT) |
| return true |
| } |
| // match: (CMPQconst (MOVBQZX _) [c]) |
| // cond: 0xFF < c |
| // result: (FlagLT_ULT) |
| for { |
| c := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVBQZX { |
| break |
| } |
| if !(0xFF < c) { |
| break |
| } |
| v.reset(OpAMD64FlagLT_ULT) |
| return true |
| } |
| // match: (CMPQconst (MOVWQZX _) [c]) |
| // cond: 0xFFFF < c |
| // result: (FlagLT_ULT) |
| for { |
| c := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVWQZX { |
| break |
| } |
| if !(0xFFFF < c) { |
| break |
| } |
| v.reset(OpAMD64FlagLT_ULT) |
| return true |
| } |
| // match: (CMPQconst (MOVLQZX _) [c]) |
| // cond: 0xFFFFFFFF < c |
| // result: (FlagLT_ULT) |
| for { |
| c := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLQZX { |
| break |
| } |
| if !(0xFFFFFFFF < c) { |
| break |
| } |
| v.reset(OpAMD64FlagLT_ULT) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64CMPQconst_10(v *Value) bool { |
| // match: (CMPQconst (SHRQconst _ [c]) [n]) |
| // cond: 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n) |
| // result: (FlagLT_ULT) |
| for { |
| n := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64SHRQconst { |
| break |
| } |
| c := v_0.AuxInt |
| if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) { |
| break |
| } |
| v.reset(OpAMD64FlagLT_ULT) |
| return true |
| } |
| // match: (CMPQconst (ANDQconst _ [m]) [n]) |
| // cond: 0 <= m && m < n |
| // result: (FlagLT_ULT) |
| for { |
| n := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ANDQconst { |
| break |
| } |
| m := v_0.AuxInt |
| if !(0 <= m && m < n) { |
| break |
| } |
| v.reset(OpAMD64FlagLT_ULT) |
| return true |
| } |
| // match: (CMPQconst (ANDLconst _ [m]) [n]) |
| // cond: 0 <= m && m < n |
| // result: (FlagLT_ULT) |
| for { |
| n := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ANDLconst { |
| break |
| } |
| m := v_0.AuxInt |
| if !(0 <= m && m < n) { |
| break |
| } |
| v.reset(OpAMD64FlagLT_ULT) |
| return true |
| } |
| // match: (CMPQconst (ANDQ x y) [0]) |
| // cond: |
| // result: (TESTQ x y) |
| for { |
| if v.AuxInt != 0 { |
| break |
| } |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ANDQ { |
| break |
| } |
| _ = v_0.Args[1] |
| x := v_0.Args[0] |
| y := v_0.Args[1] |
| v.reset(OpAMD64TESTQ) |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (CMPQconst (ANDQconst [c] x) [0]) |
| // cond: |
| // result: (TESTQconst [c] x) |
| for { |
| if v.AuxInt != 0 { |
| break |
| } |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ANDQconst { |
| break |
| } |
| c := v_0.AuxInt |
| x := v_0.Args[0] |
| v.reset(OpAMD64TESTQconst) |
| v.AuxInt = c |
| v.AddArg(x) |
| return true |
| } |
| // match: (CMPQconst x [0]) |
| // cond: |
| // result: (TESTQ x x) |
| for { |
| if v.AuxInt != 0 { |
| break |
| } |
| x := v.Args[0] |
| v.reset(OpAMD64TESTQ) |
| v.AddArg(x) |
| v.AddArg(x) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64CMPW_0(v *Value) bool { |
| b := v.Block |
| _ = b |
| // match: (CMPW x (MOVLconst [c])) |
| // cond: |
| // result: (CMPWconst x [int64(int16(c))]) |
| for { |
| _ = v.Args[1] |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64MOVLconst { |
| break |
| } |
| c := v_1.AuxInt |
| v.reset(OpAMD64CMPWconst) |
| v.AuxInt = int64(int16(c)) |
| v.AddArg(x) |
| return true |
| } |
| // match: (CMPW (MOVLconst [c]) x) |
| // cond: |
| // result: (InvertFlags (CMPWconst x [int64(int16(c))])) |
| for { |
| _ = v.Args[1] |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLconst { |
| break |
| } |
| c := v_0.AuxInt |
| x := v.Args[1] |
| v.reset(OpAMD64InvertFlags) |
| v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) |
| v0.AuxInt = int64(int16(c)) |
| v0.AddArg(x) |
| v.AddArg(v0) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64CMPWconst_0(v *Value) bool { |
| // match: (CMPWconst (MOVLconst [x]) [y]) |
| // cond: int16(x)==int16(y) |
| // result: (FlagEQ) |
| for { |
| y := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLconst { |
| break |
| } |
| x := v_0.AuxInt |
| if !(int16(x) == int16(y)) { |
| break |
| } |
| v.reset(OpAMD64FlagEQ) |
| return true |
| } |
| // match: (CMPWconst (MOVLconst [x]) [y]) |
| // cond: int16(x)<int16(y) && uint16(x)<uint16(y) |
| // result: (FlagLT_ULT) |
| for { |
| y := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLconst { |
| break |
| } |
| x := v_0.AuxInt |
| if !(int16(x) < int16(y) && uint16(x) < uint16(y)) { |
| break |
| } |
| v.reset(OpAMD64FlagLT_ULT) |
| return true |
| } |
| // match: (CMPWconst (MOVLconst [x]) [y]) |
| // cond: int16(x)<int16(y) && uint16(x)>uint16(y) |
| // result: (FlagLT_UGT) |
| for { |
| y := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLconst { |
| break |
| } |
| x := v_0.AuxInt |
| if !(int16(x) < int16(y) && uint16(x) > uint16(y)) { |
| break |
| } |
| v.reset(OpAMD64FlagLT_UGT) |
| return true |
| } |
| // match: (CMPWconst (MOVLconst [x]) [y]) |
| // cond: int16(x)>int16(y) && uint16(x)<uint16(y) |
| // result: (FlagGT_ULT) |
| for { |
| y := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLconst { |
| break |
| } |
| x := v_0.AuxInt |
| if !(int16(x) > int16(y) && uint16(x) < uint16(y)) { |
| break |
| } |
| v.reset(OpAMD64FlagGT_ULT) |
| return true |
| } |
| // match: (CMPWconst (MOVLconst [x]) [y]) |
| // cond: int16(x)>int16(y) && uint16(x)>uint16(y) |
| // result: (FlagGT_UGT) |
| for { |
| y := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64MOVLconst { |
| break |
| } |
| x := v_0.AuxInt |
| if !(int16(x) > int16(y) && uint16(x) > uint16(y)) { |
| break |
| } |
| v.reset(OpAMD64FlagGT_UGT) |
| return true |
| } |
| // match: (CMPWconst (ANDLconst _ [m]) [n]) |
| // cond: 0 <= int16(m) && int16(m) < int16(n) |
| // result: (FlagLT_ULT) |
| for { |
| n := v.AuxInt |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ANDLconst { |
| break |
| } |
| m := v_0.AuxInt |
| if !(0 <= int16(m) && int16(m) < int16(n)) { |
| break |
| } |
| v.reset(OpAMD64FlagLT_ULT) |
| return true |
| } |
| // match: (CMPWconst (ANDL x y) [0]) |
| // cond: |
| // result: (TESTW x y) |
| for { |
| if v.AuxInt != 0 { |
| break |
| } |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ANDL { |
| break |
| } |
| _ = v_0.Args[1] |
| x := v_0.Args[0] |
| y := v_0.Args[1] |
| v.reset(OpAMD64TESTW) |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (CMPWconst (ANDLconst [c] x) [0]) |
| // cond: |
| // result: (TESTWconst [int64(int16(c))] x) |
| for { |
| if v.AuxInt != 0 { |
| break |
| } |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ANDLconst { |
| break |
| } |
| c := v_0.AuxInt |
| x := v_0.Args[0] |
| v.reset(OpAMD64TESTWconst) |
| v.AuxInt = int64(int16(c)) |
| v.AddArg(x) |
| return true |
| } |
| // match: (CMPWconst x [0]) |
| // cond: |
| // result: (TESTW x x) |
| for { |
| if v.AuxInt != 0 { |
| break |
| } |
| x := v.Args[0] |
| v.reset(OpAMD64TESTW) |
| v.AddArg(x) |
| v.AddArg(x) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64CMPXCHGLlock_0(v *Value) bool { |
| // match: (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) |
| // cond: is32Bit(off1+off2) |
| // result: (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem) |
| for { |
| off1 := v.AuxInt |
| sym := v.Aux |
| _ = v.Args[3] |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ADDQconst { |
| break |
| } |
| off2 := v_0.AuxInt |
| ptr := v_0.Args[0] |
| old := v.Args[1] |
| new_ := v.Args[2] |
| mem := v.Args[3] |
| if !(is32Bit(off1 + off2)) { |
| break |
| } |
| v.reset(OpAMD64CMPXCHGLlock) |
| v.AuxInt = off1 + off2 |
| v.Aux = sym |
| v.AddArg(ptr) |
| v.AddArg(old) |
| v.AddArg(new_) |
| v.AddArg(mem) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64CMPXCHGQlock_0(v *Value) bool { |
| // match: (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) |
| // cond: is32Bit(off1+off2) |
| // result: (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem) |
| for { |
| off1 := v.AuxInt |
| sym := v.Aux |
| _ = v.Args[3] |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ADDQconst { |
| break |
| } |
| off2 := v_0.AuxInt |
| ptr := v_0.Args[0] |
| old := v.Args[1] |
| new_ := v.Args[2] |
| mem := v.Args[3] |
| if !(is32Bit(off1 + off2)) { |
| break |
| } |
| v.reset(OpAMD64CMPXCHGQlock) |
| v.AuxInt = off1 + off2 |
| v.Aux = sym |
| v.AddArg(ptr) |
| v.AddArg(old) |
| v.AddArg(new_) |
| v.AddArg(mem) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64LEAL_0(v *Value) bool { |
| // match: (LEAL [c] {s} (ADDLconst [d] x)) |
| // cond: is32Bit(c+d) |
| // result: (LEAL [c+d] {s} x) |
| for { |
| c := v.AuxInt |
| s := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ADDLconst { |
| break |
| } |
| d := v_0.AuxInt |
| x := v_0.Args[0] |
| if !(is32Bit(c + d)) { |
| break |
| } |
| v.reset(OpAMD64LEAL) |
| v.AuxInt = c + d |
| v.Aux = s |
| v.AddArg(x) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64LEAQ_0(v *Value) bool { |
| // match: (LEAQ [c] {s} (ADDQconst [d] x)) |
| // cond: is32Bit(c+d) |
| // result: (LEAQ [c+d] {s} x) |
| for { |
| c := v.AuxInt |
| s := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ADDQconst { |
| break |
| } |
| d := v_0.AuxInt |
| x := v_0.Args[0] |
| if !(is32Bit(c + d)) { |
| break |
| } |
| v.reset(OpAMD64LEAQ) |
| v.AuxInt = c + d |
| v.Aux = s |
| v.AddArg(x) |
| return true |
| } |
| // match: (LEAQ [c] {s} (ADDQ x y)) |
| // cond: x.Op != OpSB && y.Op != OpSB |
| // result: (LEAQ1 [c] {s} x y) |
| for { |
| c := v.AuxInt |
| s := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ADDQ { |
| break |
| } |
| _ = v_0.Args[1] |
| x := v_0.Args[0] |
| y := v_0.Args[1] |
| if !(x.Op != OpSB && y.Op != OpSB) { |
| break |
| } |
| v.reset(OpAMD64LEAQ1) |
| v.AuxInt = c |
| v.Aux = s |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) |
| // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) |
| // result: (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x) |
| for { |
| off1 := v.AuxInt |
| sym1 := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64LEAQ { |
| break |
| } |
| off2 := v_0.AuxInt |
| sym2 := v_0.Aux |
| x := v_0.Args[0] |
| if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { |
| break |
| } |
| v.reset(OpAMD64LEAQ) |
| v.AuxInt = off1 + off2 |
| v.Aux = mergeSym(sym1, sym2) |
| v.AddArg(x) |
| return true |
| } |
| // match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) |
| // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) |
| // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) |
| for { |
| off1 := v.AuxInt |
| sym1 := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64LEAQ1 { |
| break |
| } |
| off2 := v_0.AuxInt |
| sym2 := v_0.Aux |
| _ = v_0.Args[1] |
| x := v_0.Args[0] |
| y := v_0.Args[1] |
| if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { |
| break |
| } |
| v.reset(OpAMD64LEAQ1) |
| v.AuxInt = off1 + off2 |
| v.Aux = mergeSym(sym1, sym2) |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) |
| // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) |
| // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) |
| for { |
| off1 := v.AuxInt |
| sym1 := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64LEAQ2 { |
| break |
| } |
| off2 := v_0.AuxInt |
| sym2 := v_0.Aux |
| _ = v_0.Args[1] |
| x := v_0.Args[0] |
| y := v_0.Args[1] |
| if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { |
| break |
| } |
| v.reset(OpAMD64LEAQ2) |
| v.AuxInt = off1 + off2 |
| v.Aux = mergeSym(sym1, sym2) |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) |
| // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) |
| // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) |
| for { |
| off1 := v.AuxInt |
| sym1 := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64LEAQ4 { |
| break |
| } |
| off2 := v_0.AuxInt |
| sym2 := v_0.Aux |
| _ = v_0.Args[1] |
| x := v_0.Args[0] |
| y := v_0.Args[1] |
| if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { |
| break |
| } |
| v.reset(OpAMD64LEAQ4) |
| v.AuxInt = off1 + off2 |
| v.Aux = mergeSym(sym1, sym2) |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) |
| // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) |
| // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) |
| for { |
| off1 := v.AuxInt |
| sym1 := v.Aux |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64LEAQ8 { |
| break |
| } |
| off2 := v_0.AuxInt |
| sym2 := v_0.Aux |
| _ = v_0.Args[1] |
| x := v_0.Args[0] |
| y := v_0.Args[1] |
| if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { |
| break |
| } |
| v.reset(OpAMD64LEAQ8) |
| v.AuxInt = off1 + off2 |
| v.Aux = mergeSym(sym1, sym2) |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64LEAQ1_0(v *Value) bool { |
| // match: (LEAQ1 [c] {s} (ADDQconst [d] x) y) |
| // cond: is32Bit(c+d) && x.Op != OpSB |
| // result: (LEAQ1 [c+d] {s} x y) |
| for { |
| c := v.AuxInt |
| s := v.Aux |
| _ = v.Args[1] |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ADDQconst { |
| break |
| } |
| d := v_0.AuxInt |
| x := v_0.Args[0] |
| y := v.Args[1] |
| if !(is32Bit(c+d) && x.Op != OpSB) { |
| break |
| } |
| v.reset(OpAMD64LEAQ1) |
| v.AuxInt = c + d |
| v.Aux = s |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (LEAQ1 [c] {s} y (ADDQconst [d] x)) |
| // cond: is32Bit(c+d) && x.Op != OpSB |
| // result: (LEAQ1 [c+d] {s} x y) |
| for { |
| c := v.AuxInt |
| s := v.Aux |
| _ = v.Args[1] |
| y := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64ADDQconst { |
| break |
| } |
| d := v_1.AuxInt |
| x := v_1.Args[0] |
| if !(is32Bit(c+d) && x.Op != OpSB) { |
| break |
| } |
| v.reset(OpAMD64LEAQ1) |
| v.AuxInt = c + d |
| v.Aux = s |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (LEAQ1 [c] {s} x (SHLQconst [1] y)) |
| // cond: |
| // result: (LEAQ2 [c] {s} x y) |
| for { |
| c := v.AuxInt |
| s := v.Aux |
| _ = v.Args[1] |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64SHLQconst { |
| break |
| } |
| if v_1.AuxInt != 1 { |
| break |
| } |
| y := v_1.Args[0] |
| v.reset(OpAMD64LEAQ2) |
| v.AuxInt = c |
| v.Aux = s |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (LEAQ1 [c] {s} (SHLQconst [1] y) x) |
| // cond: |
| // result: (LEAQ2 [c] {s} x y) |
| for { |
| c := v.AuxInt |
| s := v.Aux |
| _ = v.Args[1] |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64SHLQconst { |
| break |
| } |
| if v_0.AuxInt != 1 { |
| break |
| } |
| y := v_0.Args[0] |
| x := v.Args[1] |
| v.reset(OpAMD64LEAQ2) |
| v.AuxInt = c |
| v.Aux = s |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (LEAQ1 [c] {s} x (SHLQconst [2] y)) |
| // cond: |
| // result: (LEAQ4 [c] {s} x y) |
| for { |
| c := v.AuxInt |
| s := v.Aux |
| _ = v.Args[1] |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64SHLQconst { |
| break |
| } |
| if v_1.AuxInt != 2 { |
| break |
| } |
| y := v_1.Args[0] |
| v.reset(OpAMD64LEAQ4) |
| v.AuxInt = c |
| v.Aux = s |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (LEAQ1 [c] {s} (SHLQconst [2] y) x) |
| // cond: |
| // result: (LEAQ4 [c] {s} x y) |
| for { |
| c := v.AuxInt |
| s := v.Aux |
| _ = v.Args[1] |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64SHLQconst { |
| break |
| } |
| if v_0.AuxInt != 2 { |
| break |
| } |
| y := v_0.Args[0] |
| x := v.Args[1] |
| v.reset(OpAMD64LEAQ4) |
| v.AuxInt = c |
| v.Aux = s |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (LEAQ1 [c] {s} x (SHLQconst [3] y)) |
| // cond: |
| // result: (LEAQ8 [c] {s} x y) |
| for { |
| c := v.AuxInt |
| s := v.Aux |
| _ = v.Args[1] |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64SHLQconst { |
| break |
| } |
| if v_1.AuxInt != 3 { |
| break |
| } |
| y := v_1.Args[0] |
| v.reset(OpAMD64LEAQ8) |
| v.AuxInt = c |
| v.Aux = s |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (LEAQ1 [c] {s} (SHLQconst [3] y) x) |
| // cond: |
| // result: (LEAQ8 [c] {s} x y) |
| for { |
| c := v.AuxInt |
| s := v.Aux |
| _ = v.Args[1] |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64SHLQconst { |
| break |
| } |
| if v_0.AuxInt != 3 { |
| break |
| } |
| y := v_0.Args[0] |
| x := v.Args[1] |
| v.reset(OpAMD64LEAQ8) |
| v.AuxInt = c |
| v.Aux = s |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) |
| // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB |
| // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) |
| for { |
| off1 := v.AuxInt |
| sym1 := v.Aux |
| _ = v.Args[1] |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64LEAQ { |
| break |
| } |
| off2 := v_0.AuxInt |
| sym2 := v_0.Aux |
| x := v_0.Args[0] |
| y := v.Args[1] |
| if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { |
| break |
| } |
| v.reset(OpAMD64LEAQ1) |
| v.AuxInt = off1 + off2 |
| v.Aux = mergeSym(sym1, sym2) |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (LEAQ1 [off1] {sym1} y (LEAQ [off2] {sym2} x)) |
| // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB |
| // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) |
| for { |
| off1 := v.AuxInt |
| sym1 := v.Aux |
| _ = v.Args[1] |
| y := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64LEAQ { |
| break |
| } |
| off2 := v_1.AuxInt |
| sym2 := v_1.Aux |
| x := v_1.Args[0] |
| if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { |
| break |
| } |
| v.reset(OpAMD64LEAQ1) |
| v.AuxInt = off1 + off2 |
| v.Aux = mergeSym(sym1, sym2) |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64LEAQ2_0(v *Value) bool { |
| // match: (LEAQ2 [c] {s} (ADDQconst [d] x) y) |
| // cond: is32Bit(c+d) && x.Op != OpSB |
| // result: (LEAQ2 [c+d] {s} x y) |
| for { |
| c := v.AuxInt |
| s := v.Aux |
| _ = v.Args[1] |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ADDQconst { |
| break |
| } |
| d := v_0.AuxInt |
| x := v_0.Args[0] |
| y := v.Args[1] |
| if !(is32Bit(c+d) && x.Op != OpSB) { |
| break |
| } |
| v.reset(OpAMD64LEAQ2) |
| v.AuxInt = c + d |
| v.Aux = s |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (LEAQ2 [c] {s} x (ADDQconst [d] y)) |
| // cond: is32Bit(c+2*d) && y.Op != OpSB |
| // result: (LEAQ2 [c+2*d] {s} x y) |
| for { |
| c := v.AuxInt |
| s := v.Aux |
| _ = v.Args[1] |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64ADDQconst { |
| break |
| } |
| d := v_1.AuxInt |
| y := v_1.Args[0] |
| if !(is32Bit(c+2*d) && y.Op != OpSB) { |
| break |
| } |
| v.reset(OpAMD64LEAQ2) |
| v.AuxInt = c + 2*d |
| v.Aux = s |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (LEAQ2 [c] {s} x (SHLQconst [1] y)) |
| // cond: |
| // result: (LEAQ4 [c] {s} x y) |
| for { |
| c := v.AuxInt |
| s := v.Aux |
| _ = v.Args[1] |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64SHLQconst { |
| break |
| } |
| if v_1.AuxInt != 1 { |
| break |
| } |
| y := v_1.Args[0] |
| v.reset(OpAMD64LEAQ4) |
| v.AuxInt = c |
| v.Aux = s |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (LEAQ2 [c] {s} x (SHLQconst [2] y)) |
| // cond: |
| // result: (LEAQ8 [c] {s} x y) |
| for { |
| c := v.AuxInt |
| s := v.Aux |
| _ = v.Args[1] |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64SHLQconst { |
| break |
| } |
| if v_1.AuxInt != 2 { |
| break |
| } |
| y := v_1.Args[0] |
| v.reset(OpAMD64LEAQ8) |
| v.AuxInt = c |
| v.Aux = s |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) |
| // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB |
| // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) |
| for { |
| off1 := v.AuxInt |
| sym1 := v.Aux |
| _ = v.Args[1] |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64LEAQ { |
| break |
| } |
| off2 := v_0.AuxInt |
| sym2 := v_0.Aux |
| x := v_0.Args[0] |
| y := v.Args[1] |
| if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { |
| break |
| } |
| v.reset(OpAMD64LEAQ2) |
| v.AuxInt = off1 + off2 |
| v.Aux = mergeSym(sym1, sym2) |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| return false |
| } |
| func rewriteValueAMD64_OpAMD64LEAQ4_0(v *Value) bool { |
| // match: (LEAQ4 [c] {s} (ADDQconst [d] x) y) |
| // cond: is32Bit(c+d) && x.Op != OpSB |
| // result: (LEAQ4 [c+d] {s} x y) |
| for { |
| c := v.AuxInt |
| s := v.Aux |
| _ = v.Args[1] |
| v_0 := v.Args[0] |
| if v_0.Op != OpAMD64ADDQconst { |
| break |
| } |
| d := v_0.AuxInt |
| x := v_0.Args[0] |
| y := v.Args[1] |
| if !(is32Bit(c+d) && x.Op != OpSB) { |
| break |
| } |
| v.reset(OpAMD64LEAQ4) |
| v.AuxInt = c + d |
| v.Aux = s |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (LEAQ4 [c] {s} x (ADDQconst [d] y)) |
| // cond: is32Bit(c+4*d) && y.Op != OpSB |
| // result: (LEAQ4 [c+4*d] {s} x y) |
| for { |
| c := v.AuxInt |
| s := v.Aux |
| _ = v.Args[1] |
| x := v.Args[0] |
| v_1 := v.Args[1] |
| if v_1.Op != OpAMD64ADDQconst { |
| break |
| } |
| d := v_1.AuxInt |
| y := v_1.Args[0] |
| if !(is32Bit(c+4*d) && y.Op != OpSB) { |
| break |
| } |
| v.reset(OpAMD64LEAQ4) |
| v.AuxInt = c + 4*d |
| v.Aux = s |
| v.AddArg(x) |
| v.AddArg(y) |
| return true |
| } |
| // match: (LEAQ4 [c] {s} x (SHLQconst [1] y)) |
| // cond: |
| // result: (LEAQ8 [c] {s} x y) |
| for { |
| c := v.AuxInt |
|