[dev.ssa] cmd/compile: refactor out rulegen value parsing
Previously, genMatch0 and genResult0 contained
lots of duplication: locating the op, parsing
the value, validation, etc.
Parsing and validation was mixed in with code gen.
Extract a helper, parseValue. It is responsible
for parsing the value, locating the op, and doing
shared validation.
As a bonus (and possibly as my original motivation),
make op selection pay attention to the number
of args present.
This allows arch-specific ops to share a name
with generic ops as long as there is no ambiguity.
It also detects and reports unresolved ambiguity,
unlike before, where it would simply always
pick the generic op, with no warning.
Also use parseValue when generating the top-level
op dispatch, to ensure its opinion about ops
matches genMatch0 and genResult0.
The order of statements in the generated code used
to depend on the exact rule. It is now somewhat
independent of the rule. That is the source
of some of the generated code changes in this CL.
See rewritedec64 and rewritegeneric for examples.
It is a one-time change.
The op dispatch switch and functions used to be
sorted by opname without architecture. The sort
now includes the architecture, leading to further
generated code changes.
See rewriteARM and rewriteAMD64 for examples.
Again, it is a one-time change.
There are no functional changes.
Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c
Reviewed-on: https://go-review.googlesource.com/24649
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
Reviewed-by: Keith Randall <khr@golang.org>
diff --git a/src/cmd/compile/internal/ssa/gen/rulegen.go b/src/cmd/compile/internal/ssa/gen/rulegen.go
index 0947e65..0cb428b 100644
--- a/src/cmd/compile/internal/ssa/gen/rulegen.go
+++ b/src/cmd/compile/internal/ssa/gen/rulegen.go
@@ -117,15 +117,17 @@
if unbalanced(rule) {
continue
}
- op := strings.Split(rule, " ")[0][1:]
- if op[len(op)-1] == ')' {
- op = op[:len(op)-1] // rule has only opcode, e.g. (ConstNil) -> ...
- }
+
loc := fmt.Sprintf("%s.rules:%d", arch.name, ruleLineno)
- if isBlock(op, arch) {
- blockrules[op] = append(blockrules[op], Rule{rule: rule, loc: loc})
+ r := Rule{rule: rule, loc: loc}
+ if rawop := strings.Split(rule, " ")[0][1:]; isBlock(rawop, arch) {
+ blockrules[rawop] = append(blockrules[rawop], r)
} else {
- oprules[op] = append(oprules[op], Rule{rule: rule, loc: loc})
+ // Do fancier value op matching.
+ match, _, _ := r.parse()
+ op, oparch, _, _, _, _ := parseValue(match, arch, loc)
+ opname := fmt.Sprintf("Op%s%s", oparch, op.name)
+ oprules[opname] = append(oprules[opname], r)
}
rule = ""
ruleLineno = 0
@@ -157,8 +159,8 @@
fmt.Fprintf(w, "func rewriteValue%s(v *Value, config *Config) bool {\n", arch.name)
fmt.Fprintf(w, "switch v.Op {\n")
for _, op := range ops {
- fmt.Fprintf(w, "case %s:\n", opName(op, arch))
- fmt.Fprintf(w, "return rewriteValue%s_%s(v, config)\n", arch.name, opName(op, arch))
+ fmt.Fprintf(w, "case %s:\n", op)
+ fmt.Fprintf(w, "return rewriteValue%s_%s(v, config)\n", arch.name, op)
}
fmt.Fprintf(w, "}\n")
fmt.Fprintf(w, "return false\n")
@@ -167,7 +169,7 @@
// Generate a routine per op. Note that we don't make one giant routine
// because it is too big for some compilers.
for _, op := range ops {
- fmt.Fprintf(w, "func rewriteValue%s_%s(v *Value, config *Config) bool {\n", arch.name, opName(op, arch))
+ fmt.Fprintf(w, "func rewriteValue%s_%s(v *Value, config *Config) bool {\n", arch.name, op)
fmt.Fprintln(w, "b := v.Block")
fmt.Fprintln(w, "_ = b")
var canFail bool
@@ -334,141 +336,108 @@
}
canFail := false
- // split body up into regions. Split by spaces/tabs, except those
- // contained in () or {}.
- s := split(match[1 : len(match)-1]) // remove parens, then split
-
- // Find op record
- var op opData
- for _, x := range genericOps {
- if x.name == s[0] {
- op = x
- break
- }
- }
- for _, x := range arch.ops {
- if x.name == s[0] {
- op = x
- break
- }
- }
- if op.name == "" {
- log.Fatalf("%s: unknown op %s", loc, s[0])
- }
+ op, oparch, typ, auxint, aux, args := parseValue(match, arch, loc)
// check op
if !top {
- fmt.Fprintf(w, "if %s.Op != %s {\nbreak\n}\n", v, opName(s[0], arch))
+ fmt.Fprintf(w, "if %s.Op != Op%s%s {\nbreak\n}\n", v, oparch, op.name)
canFail = true
}
- // check type/aux/args
- argnum := 0
- for _, a := range s[1:] {
- if a[0] == '<' {
- // type restriction
- t := a[1 : len(a)-1] // remove <>
- if !isVariable(t) {
- // code. We must match the results of this code.
- fmt.Fprintf(w, "if %s.Type != %s {\nbreak\n}\n", v, t)
+ if typ != "" {
+ if !isVariable(typ) {
+ // code. We must match the results of this code.
+ fmt.Fprintf(w, "if %s.Type != %s {\nbreak\n}\n", v, typ)
+ canFail = true
+ } else {
+ // variable
+ if _, ok := m[typ]; ok {
+ // must match previous variable
+ fmt.Fprintf(w, "if %s.Type != %s {\nbreak\n}\n", v, typ)
canFail = true
} else {
- // variable
- if _, ok := m[t]; ok {
- // must match previous variable
- fmt.Fprintf(w, "if %s.Type != %s {\nbreak\n}\n", v, t)
- canFail = true
- } else {
- m[t] = struct{}{}
- fmt.Fprintf(w, "%s := %s.Type\n", t, v)
- }
+ m[typ] = struct{}{}
+ fmt.Fprintf(w, "%s := %s.Type\n", typ, v)
}
- } else if a[0] == '[' {
- // auxint restriction
- switch op.aux {
- case "Bool", "Int8", "Int16", "Int32", "Int64", "Int128", "Float32", "Float64", "SymOff", "SymValAndOff", "SymInt32":
- default:
- log.Fatalf("%s: op %s %s can't have auxint", loc, op.name, op.aux)
- }
- x := a[1 : len(a)-1] // remove []
- if !isVariable(x) {
- // code
- fmt.Fprintf(w, "if %s.AuxInt != %s {\nbreak\n}\n", v, x)
+ }
+ }
+
+ if auxint != "" {
+ if !isVariable(auxint) {
+ // code
+ fmt.Fprintf(w, "if %s.AuxInt != %s {\nbreak\n}\n", v, auxint)
+ canFail = true
+ } else {
+ // variable
+ if _, ok := m[auxint]; ok {
+ fmt.Fprintf(w, "if %s.AuxInt != %s {\nbreak\n}\n", v, auxint)
canFail = true
} else {
- // variable
- if _, ok := m[x]; ok {
- fmt.Fprintf(w, "if %s.AuxInt != %s {\nbreak\n}\n", v, x)
- canFail = true
- } else {
- m[x] = struct{}{}
- fmt.Fprintf(w, "%s := %s.AuxInt\n", x, v)
- }
+ m[auxint] = struct{}{}
+ fmt.Fprintf(w, "%s := %s.AuxInt\n", auxint, v)
}
- } else if a[0] == '{' {
- // aux restriction
- switch op.aux {
- case "String", "Sym", "SymOff", "SymValAndOff", "SymInt32":
- default:
- log.Fatalf("%s: op %s %s can't have aux", loc, op.name, op.aux)
- }
- x := a[1 : len(a)-1] // remove {}
- if !isVariable(x) {
- // code
- fmt.Fprintf(w, "if %s.Aux != %s {\nbreak\n}\n", v, x)
+ }
+ }
+
+ if aux != "" {
+
+ if !isVariable(aux) {
+ // code
+ fmt.Fprintf(w, "if %s.Aux != %s {\nbreak\n}\n", v, aux)
+ canFail = true
+ } else {
+ // variable
+ if _, ok := m[aux]; ok {
+ fmt.Fprintf(w, "if %s.Aux != %s {\nbreak\n}\n", v, aux)
canFail = true
} else {
- // variable
- if _, ok := m[x]; ok {
- fmt.Fprintf(w, "if %s.Aux != %s {\nbreak\n}\n", v, x)
- canFail = true
- } else {
- m[x] = struct{}{}
- fmt.Fprintf(w, "%s := %s.Aux\n", x, v)
- }
+ m[aux] = struct{}{}
+ fmt.Fprintf(w, "%s := %s.Aux\n", aux, v)
}
- } else if a == "_" {
- argnum++
- } else if !strings.Contains(a, "(") {
+ }
+ }
+
+ for i, arg := range args {
+ if arg == "_" {
+ continue
+ }
+ if !strings.Contains(arg, "(") {
// leaf variable
- if _, ok := m[a]; ok {
+ if _, ok := m[arg]; ok {
// variable already has a definition. Check whether
// the old definition and the new definition match.
// For example, (add x x). Equality is just pointer equality
// on Values (so cse is important to do before lowering).
- fmt.Fprintf(w, "if %s != %s.Args[%d] {\nbreak\n}\n", a, v, argnum)
+ fmt.Fprintf(w, "if %s != %s.Args[%d] {\nbreak\n}\n", arg, v, i)
canFail = true
} else {
// remember that this variable references the given value
- m[a] = struct{}{}
- fmt.Fprintf(w, "%s := %s.Args[%d]\n", a, v, argnum)
+ m[arg] = struct{}{}
+ fmt.Fprintf(w, "%s := %s.Args[%d]\n", arg, v, i)
}
- argnum++
+ continue
+ }
+ // compound sexpr
+ var argname string
+ colon := strings.Index(arg, ":")
+ openparen := strings.Index(arg, "(")
+ if colon >= 0 && openparen >= 0 && colon < openparen {
+ // rule-specified name
+ argname = arg[:colon]
+ arg = arg[colon+1:]
} else {
- // compound sexpr
- var argname string
- colon := strings.Index(a, ":")
- openparen := strings.Index(a, "(")
- if colon >= 0 && openparen >= 0 && colon < openparen {
- // rule-specified name
- argname = a[:colon]
- a = a[colon+1:]
- } else {
- // autogenerated name
- argname = fmt.Sprintf("%s_%d", v, argnum)
- }
- fmt.Fprintf(w, "%s := %s.Args[%d]\n", argname, v, argnum)
- if genMatch0(w, arch, a, argname, m, false, loc) {
- canFail = true
- }
- argnum++
+ // autogenerated name
+ argname = fmt.Sprintf("%s_%d", v, i)
+ }
+ fmt.Fprintf(w, "%s := %s.Args[%d]\n", argname, v, i)
+ if genMatch0(w, arch, arg, argname, m, false, loc) {
+ canFail = true
}
}
+
if op.argLength == -1 {
- fmt.Fprintf(w, "if len(%s.Args) != %d {\nbreak\n}\n", v, argnum)
+ fmt.Fprintf(w, "if len(%s.Args) != %d {\nbreak\n}\n", v, len(args))
canFail = true
- } else if int(op.argLength) != argnum {
- log.Fatalf("%s: op %s should have %d args, has %d", loc, op.name, op.argLength, argnum)
}
return canFail
}
@@ -500,105 +469,44 @@
return result
}
- s := split(result[1 : len(result)-1]) // remove parens, then split
-
- // Find op record
- var op opData
- for _, x := range genericOps {
- if x.name == s[0] {
- op = x
- break
- }
- }
- for _, x := range arch.ops {
- if x.name == s[0] {
- op = x
- break
- }
- }
- if op.name == "" {
- log.Fatalf("%s: unknown op %s", loc, s[0])
- }
+ op, oparch, typ, auxint, aux, args := parseValue(result, arch, loc)
// Find the type of the variable.
- var opType string
- var typeOverride bool
- for _, a := range s[1:] {
- if a[0] == '<' {
- // type restriction
- opType = a[1 : len(a)-1] // remove <>
- typeOverride = true
- break
- }
+ typeOverride := typ != ""
+ if typ == "" && op.typ != "" {
+ typ = typeName(op.typ)
}
- if opType == "" {
- // find default type, if any
- for _, op := range arch.ops {
- if op.name == s[0] && op.typ != "" {
- opType = typeName(op.typ)
- break
- }
- }
- }
- if opType == "" {
- for _, op := range genericOps {
- if op.name == s[0] && op.typ != "" {
- opType = typeName(op.typ)
- break
- }
- }
- }
+
var v string
if top && !move {
v = "v"
- fmt.Fprintf(w, "v.reset(%s)\n", opName(s[0], arch))
+ fmt.Fprintf(w, "v.reset(Op%s%s)\n", oparch, op.name)
if typeOverride {
- fmt.Fprintf(w, "v.Type = %s\n", opType)
+ fmt.Fprintf(w, "v.Type = %s\n", typ)
}
} else {
- if opType == "" {
- log.Fatalf("sub-expression %s (op=%s) must have a type", result, s[0])
+ if typ == "" {
+ log.Fatalf("sub-expression %s (op=Op%s%s) must have a type", result, oparch, op.name)
}
v = fmt.Sprintf("v%d", *alloc)
*alloc++
- fmt.Fprintf(w, "%s := b.NewValue0(v.Line, %s, %s)\n", v, opName(s[0], arch), opType)
+ fmt.Fprintf(w, "%s := b.NewValue0(v.Line, Op%s%s, %s)\n", v, oparch, op.name, typ)
if move && top {
// Rewrite original into a copy
fmt.Fprintf(w, "v.reset(OpCopy)\n")
fmt.Fprintf(w, "v.AddArg(%s)\n", v)
}
}
- argnum := 0
- for _, a := range s[1:] {
- if a[0] == '<' {
- // type restriction, handled above
- } else if a[0] == '[' {
- // auxint restriction
- switch op.aux {
- case "Bool", "Int8", "Int16", "Int32", "Int64", "Int128", "Float32", "Float64", "SymOff", "SymValAndOff", "SymInt32":
- default:
- log.Fatalf("%s: op %s %s can't have auxint", loc, op.name, op.aux)
- }
- x := a[1 : len(a)-1] // remove []
- fmt.Fprintf(w, "%s.AuxInt = %s\n", v, x)
- } else if a[0] == '{' {
- // aux restriction
- switch op.aux {
- case "String", "Sym", "SymOff", "SymValAndOff", "SymInt32":
- default:
- log.Fatalf("%s: op %s %s can't have aux", loc, op.name, op.aux)
- }
- x := a[1 : len(a)-1] // remove {}
- fmt.Fprintf(w, "%s.Aux = %s\n", v, x)
- } else {
- // regular argument (sexpr or variable)
- x := genResult0(w, arch, a, alloc, false, move, loc)
- fmt.Fprintf(w, "%s.AddArg(%s)\n", v, x)
- argnum++
- }
+
+ if auxint != "" {
+ fmt.Fprintf(w, "%s.AuxInt = %s\n", v, auxint)
}
- if op.argLength != -1 && int(op.argLength) != argnum {
- log.Fatalf("%s: op %s should have %d args, has %d", loc, op.name, op.argLength, argnum)
+ if aux != "" {
+ fmt.Fprintf(w, "%s.Aux = %s\n", v, aux)
+ }
+ for _, arg := range args {
+ x := genResult0(w, arch, arg, alloc, false, move, loc)
+ fmt.Fprintf(w, "%s.AddArg(%s)\n", v, x)
}
return v
@@ -666,16 +574,102 @@
return false
}
-// opName converts from an op name specified in a rule file to an Op enum.
-// if the name matches a generic op, returns "Op" plus the specified name.
-// Otherwise, returns "Op" plus arch name plus op name.
-func opName(name string, arch arch) string {
- for _, op := range genericOps {
- if op.name == name {
- return "Op" + name
+// parseValue parses a parenthesized value from a rule.
+// The value can be from the match or the result side.
+// It returns the op and unparsed strings for typ, auxint, and aux restrictions and for all args.
+// oparch is the architecture that op is located in, or "" for generic.
+func parseValue(val string, arch arch, loc string) (op opData, oparch string, typ string, auxint string, aux string, args []string) {
+ val = val[1 : len(val)-1] // remove ()
+
+ // Split val up into regions.
+ // Split by spaces/tabs, except those contained in (), {}, [], or <>.
+ s := split(val)
+
+ // Extract restrictions and args.
+ for _, a := range s[1:] {
+ switch a[0] {
+ case '<':
+ typ = a[1 : len(a)-1] // remove <>
+ case '[':
+ auxint = a[1 : len(a)-1] // remove []
+ case '{':
+ aux = a[1 : len(a)-1] // remove {}
+ default:
+ args = append(args, a)
}
}
- return "Op" + arch.name + name
+
+ // Resolve the op.
+
+ // match reports whether x is a good op to select.
+ // If strict is true, rule generation might succeed.
+ // If strict is false, rule generation has failed,
+ // but we're trying to generate a useful error.
+ // Doing strict=true then strict=false allows
+ // precise op matching while retaining good error messages.
+ match := func(x opData, strict bool, archname string) bool {
+ if x.name != s[0] {
+ return false
+ }
+ if x.argLength != -1 && int(x.argLength) != len(args) {
+ if strict {
+ return false
+ } else {
+ log.Printf("%s: op %s (%s) should have %d args, has %d", loc, s[0], archname, op.argLength, len(args))
+ }
+ }
+ return true
+ }
+
+ for _, x := range genericOps {
+ if match(x, true, "generic") {
+ op = x
+ break
+ }
+ }
+ if arch.name != "generic" {
+ for _, x := range arch.ops {
+ if match(x, true, arch.name) {
+ if op.name != "" {
+ log.Fatalf("%s: matches for op %s found in both generic and %s", loc, op.name, arch.name)
+ }
+ op = x
+ oparch = arch.name
+ break
+ }
+ }
+ }
+
+ if op.name == "" {
+ // Failed to find the op.
+ // Run through everything again with strict=false
+ // to generate useful diagnosic messages before failing.
+ for _, x := range genericOps {
+ match(x, false, "generic")
+ }
+ for _, x := range arch.ops {
+ match(x, false, arch.name)
+ }
+ log.Fatalf("%s: unknown op %s", loc, s)
+ }
+
+ // Sanity check aux, auxint.
+ if auxint != "" {
+ switch op.aux {
+ case "Bool", "Int8", "Int16", "Int32", "Int64", "Int128", "Float32", "Float64", "SymOff", "SymValAndOff", "SymInt32":
+ default:
+ log.Fatalf("%s: op %s %s can't have auxint", loc, op.name, op.aux)
+ }
+ }
+ if aux != "" {
+ switch op.aux {
+ case "String", "Sym", "SymOff", "SymValAndOff", "SymInt32":
+ default:
+ log.Fatalf("%s: op %s %s can't have aux", loc, op.name, op.aux)
+ }
+ }
+
+ return
}
func blockName(name string, arch arch) string {
diff --git a/src/cmd/compile/internal/ssa/rewrite386.go b/src/cmd/compile/internal/ssa/rewrite386.go
index a6ded59..d54a9cb 100644
--- a/src/cmd/compile/internal/ssa/rewrite386.go
+++ b/src/cmd/compile/internal/ssa/rewrite386.go
@@ -20,6 +20,184 @@
return rewriteValue386_Op386ANDL(v, config)
case Op386ANDLconst:
return rewriteValue386_Op386ANDLconst(v, config)
+ case Op386CMPB:
+ return rewriteValue386_Op386CMPB(v, config)
+ case Op386CMPBconst:
+ return rewriteValue386_Op386CMPBconst(v, config)
+ case Op386CMPL:
+ return rewriteValue386_Op386CMPL(v, config)
+ case Op386CMPLconst:
+ return rewriteValue386_Op386CMPLconst(v, config)
+ case Op386CMPW:
+ return rewriteValue386_Op386CMPW(v, config)
+ case Op386CMPWconst:
+ return rewriteValue386_Op386CMPWconst(v, config)
+ case Op386LEAL:
+ return rewriteValue386_Op386LEAL(v, config)
+ case Op386LEAL1:
+ return rewriteValue386_Op386LEAL1(v, config)
+ case Op386LEAL2:
+ return rewriteValue386_Op386LEAL2(v, config)
+ case Op386LEAL4:
+ return rewriteValue386_Op386LEAL4(v, config)
+ case Op386LEAL8:
+ return rewriteValue386_Op386LEAL8(v, config)
+ case Op386MOVBLSX:
+ return rewriteValue386_Op386MOVBLSX(v, config)
+ case Op386MOVBLSXload:
+ return rewriteValue386_Op386MOVBLSXload(v, config)
+ case Op386MOVBLZX:
+ return rewriteValue386_Op386MOVBLZX(v, config)
+ case Op386MOVBload:
+ return rewriteValue386_Op386MOVBload(v, config)
+ case Op386MOVBloadidx1:
+ return rewriteValue386_Op386MOVBloadidx1(v, config)
+ case Op386MOVBstore:
+ return rewriteValue386_Op386MOVBstore(v, config)
+ case Op386MOVBstoreconst:
+ return rewriteValue386_Op386MOVBstoreconst(v, config)
+ case Op386MOVBstoreconstidx1:
+ return rewriteValue386_Op386MOVBstoreconstidx1(v, config)
+ case Op386MOVBstoreidx1:
+ return rewriteValue386_Op386MOVBstoreidx1(v, config)
+ case Op386MOVLload:
+ return rewriteValue386_Op386MOVLload(v, config)
+ case Op386MOVLloadidx1:
+ return rewriteValue386_Op386MOVLloadidx1(v, config)
+ case Op386MOVLloadidx4:
+ return rewriteValue386_Op386MOVLloadidx4(v, config)
+ case Op386MOVLstore:
+ return rewriteValue386_Op386MOVLstore(v, config)
+ case Op386MOVLstoreconst:
+ return rewriteValue386_Op386MOVLstoreconst(v, config)
+ case Op386MOVLstoreconstidx1:
+ return rewriteValue386_Op386MOVLstoreconstidx1(v, config)
+ case Op386MOVLstoreconstidx4:
+ return rewriteValue386_Op386MOVLstoreconstidx4(v, config)
+ case Op386MOVLstoreidx1:
+ return rewriteValue386_Op386MOVLstoreidx1(v, config)
+ case Op386MOVLstoreidx4:
+ return rewriteValue386_Op386MOVLstoreidx4(v, config)
+ case Op386MOVSDload:
+ return rewriteValue386_Op386MOVSDload(v, config)
+ case Op386MOVSDloadidx1:
+ return rewriteValue386_Op386MOVSDloadidx1(v, config)
+ case Op386MOVSDloadidx8:
+ return rewriteValue386_Op386MOVSDloadidx8(v, config)
+ case Op386MOVSDstore:
+ return rewriteValue386_Op386MOVSDstore(v, config)
+ case Op386MOVSDstoreidx1:
+ return rewriteValue386_Op386MOVSDstoreidx1(v, config)
+ case Op386MOVSDstoreidx8:
+ return rewriteValue386_Op386MOVSDstoreidx8(v, config)
+ case Op386MOVSSload:
+ return rewriteValue386_Op386MOVSSload(v, config)
+ case Op386MOVSSloadidx1:
+ return rewriteValue386_Op386MOVSSloadidx1(v, config)
+ case Op386MOVSSloadidx4:
+ return rewriteValue386_Op386MOVSSloadidx4(v, config)
+ case Op386MOVSSstore:
+ return rewriteValue386_Op386MOVSSstore(v, config)
+ case Op386MOVSSstoreidx1:
+ return rewriteValue386_Op386MOVSSstoreidx1(v, config)
+ case Op386MOVSSstoreidx4:
+ return rewriteValue386_Op386MOVSSstoreidx4(v, config)
+ case Op386MOVWLSX:
+ return rewriteValue386_Op386MOVWLSX(v, config)
+ case Op386MOVWLSXload:
+ return rewriteValue386_Op386MOVWLSXload(v, config)
+ case Op386MOVWLZX:
+ return rewriteValue386_Op386MOVWLZX(v, config)
+ case Op386MOVWload:
+ return rewriteValue386_Op386MOVWload(v, config)
+ case Op386MOVWloadidx1:
+ return rewriteValue386_Op386MOVWloadidx1(v, config)
+ case Op386MOVWloadidx2:
+ return rewriteValue386_Op386MOVWloadidx2(v, config)
+ case Op386MOVWstore:
+ return rewriteValue386_Op386MOVWstore(v, config)
+ case Op386MOVWstoreconst:
+ return rewriteValue386_Op386MOVWstoreconst(v, config)
+ case Op386MOVWstoreconstidx1:
+ return rewriteValue386_Op386MOVWstoreconstidx1(v, config)
+ case Op386MOVWstoreconstidx2:
+ return rewriteValue386_Op386MOVWstoreconstidx2(v, config)
+ case Op386MOVWstoreidx1:
+ return rewriteValue386_Op386MOVWstoreidx1(v, config)
+ case Op386MOVWstoreidx2:
+ return rewriteValue386_Op386MOVWstoreidx2(v, config)
+ case Op386MULL:
+ return rewriteValue386_Op386MULL(v, config)
+ case Op386MULLconst:
+ return rewriteValue386_Op386MULLconst(v, config)
+ case Op386NEGL:
+ return rewriteValue386_Op386NEGL(v, config)
+ case Op386NOTL:
+ return rewriteValue386_Op386NOTL(v, config)
+ case Op386ORL:
+ return rewriteValue386_Op386ORL(v, config)
+ case Op386ORLconst:
+ return rewriteValue386_Op386ORLconst(v, config)
+ case Op386ROLBconst:
+ return rewriteValue386_Op386ROLBconst(v, config)
+ case Op386ROLLconst:
+ return rewriteValue386_Op386ROLLconst(v, config)
+ case Op386ROLWconst:
+ return rewriteValue386_Op386ROLWconst(v, config)
+ case Op386SARB:
+ return rewriteValue386_Op386SARB(v, config)
+ case Op386SARBconst:
+ return rewriteValue386_Op386SARBconst(v, config)
+ case Op386SARL:
+ return rewriteValue386_Op386SARL(v, config)
+ case Op386SARLconst:
+ return rewriteValue386_Op386SARLconst(v, config)
+ case Op386SARW:
+ return rewriteValue386_Op386SARW(v, config)
+ case Op386SARWconst:
+ return rewriteValue386_Op386SARWconst(v, config)
+ case Op386SBBL:
+ return rewriteValue386_Op386SBBL(v, config)
+ case Op386SBBLcarrymask:
+ return rewriteValue386_Op386SBBLcarrymask(v, config)
+ case Op386SETA:
+ return rewriteValue386_Op386SETA(v, config)
+ case Op386SETAE:
+ return rewriteValue386_Op386SETAE(v, config)
+ case Op386SETB:
+ return rewriteValue386_Op386SETB(v, config)
+ case Op386SETBE:
+ return rewriteValue386_Op386SETBE(v, config)
+ case Op386SETEQ:
+ return rewriteValue386_Op386SETEQ(v, config)
+ case Op386SETG:
+ return rewriteValue386_Op386SETG(v, config)
+ case Op386SETGE:
+ return rewriteValue386_Op386SETGE(v, config)
+ case Op386SETL:
+ return rewriteValue386_Op386SETL(v, config)
+ case Op386SETLE:
+ return rewriteValue386_Op386SETLE(v, config)
+ case Op386SETNE:
+ return rewriteValue386_Op386SETNE(v, config)
+ case Op386SHLL:
+ return rewriteValue386_Op386SHLL(v, config)
+ case Op386SHRB:
+ return rewriteValue386_Op386SHRB(v, config)
+ case Op386SHRL:
+ return rewriteValue386_Op386SHRL(v, config)
+ case Op386SHRW:
+ return rewriteValue386_Op386SHRW(v, config)
+ case Op386SUBL:
+ return rewriteValue386_Op386SUBL(v, config)
+ case Op386SUBLcarry:
+ return rewriteValue386_Op386SUBLcarry(v, config)
+ case Op386SUBLconst:
+ return rewriteValue386_Op386SUBLconst(v, config)
+ case Op386XORL:
+ return rewriteValue386_Op386XORL(v, config)
+ case Op386XORLconst:
+ return rewriteValue386_Op386XORLconst(v, config)
case OpAdd16:
return rewriteValue386_OpAdd16(v, config)
case OpAdd32:
@@ -48,18 +226,6 @@
return rewriteValue386_OpAndB(v, config)
case OpBswap32:
return rewriteValue386_OpBswap32(v, config)
- case Op386CMPB:
- return rewriteValue386_Op386CMPB(v, config)
- case Op386CMPBconst:
- return rewriteValue386_Op386CMPBconst(v, config)
- case Op386CMPL:
- return rewriteValue386_Op386CMPL(v, config)
- case Op386CMPLconst:
- return rewriteValue386_Op386CMPLconst(v, config)
- case Op386CMPW:
- return rewriteValue386_Op386CMPW(v, config)
- case Op386CMPWconst:
- return rewriteValue386_Op386CMPWconst(v, config)
case OpClosureCall:
return rewriteValue386_OpClosureCall(v, config)
case OpCom16:
@@ -186,16 +352,6 @@
return rewriteValue386_OpIsNonNil(v, config)
case OpIsSliceInBounds:
return rewriteValue386_OpIsSliceInBounds(v, config)
- case Op386LEAL:
- return rewriteValue386_Op386LEAL(v, config)
- case Op386LEAL1:
- return rewriteValue386_Op386LEAL1(v, config)
- case Op386LEAL2:
- return rewriteValue386_Op386LEAL2(v, config)
- case Op386LEAL4:
- return rewriteValue386_Op386LEAL4(v, config)
- case Op386LEAL8:
- return rewriteValue386_Op386LEAL8(v, config)
case OpLeq16:
return rewriteValue386_OpLeq16(v, config)
case OpLeq16U:
@@ -260,94 +416,6 @@
return rewriteValue386_OpLsh8x64(v, config)
case OpLsh8x8:
return rewriteValue386_OpLsh8x8(v, config)
- case Op386MOVBLSX:
- return rewriteValue386_Op386MOVBLSX(v, config)
- case Op386MOVBLSXload:
- return rewriteValue386_Op386MOVBLSXload(v, config)
- case Op386MOVBLZX:
- return rewriteValue386_Op386MOVBLZX(v, config)
- case Op386MOVBload:
- return rewriteValue386_Op386MOVBload(v, config)
- case Op386MOVBloadidx1:
- return rewriteValue386_Op386MOVBloadidx1(v, config)
- case Op386MOVBstore:
- return rewriteValue386_Op386MOVBstore(v, config)
- case Op386MOVBstoreconst:
- return rewriteValue386_Op386MOVBstoreconst(v, config)
- case Op386MOVBstoreconstidx1:
- return rewriteValue386_Op386MOVBstoreconstidx1(v, config)
- case Op386MOVBstoreidx1:
- return rewriteValue386_Op386MOVBstoreidx1(v, config)
- case Op386MOVLload:
- return rewriteValue386_Op386MOVLload(v, config)
- case Op386MOVLloadidx1:
- return rewriteValue386_Op386MOVLloadidx1(v, config)
- case Op386MOVLloadidx4:
- return rewriteValue386_Op386MOVLloadidx4(v, config)
- case Op386MOVLstore:
- return rewriteValue386_Op386MOVLstore(v, config)
- case Op386MOVLstoreconst:
- return rewriteValue386_Op386MOVLstoreconst(v, config)
- case Op386MOVLstoreconstidx1:
- return rewriteValue386_Op386MOVLstoreconstidx1(v, config)
- case Op386MOVLstoreconstidx4:
- return rewriteValue386_Op386MOVLstoreconstidx4(v, config)
- case Op386MOVLstoreidx1:
- return rewriteValue386_Op386MOVLstoreidx1(v, config)
- case Op386MOVLstoreidx4:
- return rewriteValue386_Op386MOVLstoreidx4(v, config)
- case Op386MOVSDload:
- return rewriteValue386_Op386MOVSDload(v, config)
- case Op386MOVSDloadidx1:
- return rewriteValue386_Op386MOVSDloadidx1(v, config)
- case Op386MOVSDloadidx8:
- return rewriteValue386_Op386MOVSDloadidx8(v, config)
- case Op386MOVSDstore:
- return rewriteValue386_Op386MOVSDstore(v, config)
- case Op386MOVSDstoreidx1:
- return rewriteValue386_Op386MOVSDstoreidx1(v, config)
- case Op386MOVSDstoreidx8:
- return rewriteValue386_Op386MOVSDstoreidx8(v, config)
- case Op386MOVSSload:
- return rewriteValue386_Op386MOVSSload(v, config)
- case Op386MOVSSloadidx1:
- return rewriteValue386_Op386MOVSSloadidx1(v, config)
- case Op386MOVSSloadidx4:
- return rewriteValue386_Op386MOVSSloadidx4(v, config)
- case Op386MOVSSstore:
- return rewriteValue386_Op386MOVSSstore(v, config)
- case Op386MOVSSstoreidx1:
- return rewriteValue386_Op386MOVSSstoreidx1(v, config)
- case Op386MOVSSstoreidx4:
- return rewriteValue386_Op386MOVSSstoreidx4(v, config)
- case Op386MOVWLSX:
- return rewriteValue386_Op386MOVWLSX(v, config)
- case Op386MOVWLSXload:
- return rewriteValue386_Op386MOVWLSXload(v, config)
- case Op386MOVWLZX:
- return rewriteValue386_Op386MOVWLZX(v, config)
- case Op386MOVWload:
- return rewriteValue386_Op386MOVWload(v, config)
- case Op386MOVWloadidx1:
- return rewriteValue386_Op386MOVWloadidx1(v, config)
- case Op386MOVWloadidx2:
- return rewriteValue386_Op386MOVWloadidx2(v, config)
- case Op386MOVWstore:
- return rewriteValue386_Op386MOVWstore(v, config)
- case Op386MOVWstoreconst:
- return rewriteValue386_Op386MOVWstoreconst(v, config)
- case Op386MOVWstoreconstidx1:
- return rewriteValue386_Op386MOVWstoreconstidx1(v, config)
- case Op386MOVWstoreconstidx2:
- return rewriteValue386_Op386MOVWstoreconstidx2(v, config)
- case Op386MOVWstoreidx1:
- return rewriteValue386_Op386MOVWstoreidx1(v, config)
- case Op386MOVWstoreidx2:
- return rewriteValue386_Op386MOVWstoreidx2(v, config)
- case Op386MULL:
- return rewriteValue386_Op386MULL(v, config)
- case Op386MULLconst:
- return rewriteValue386_Op386MULLconst(v, config)
case OpMod16:
return rewriteValue386_OpMod16(v, config)
case OpMod16u:
@@ -374,10 +442,6 @@
return rewriteValue386_OpMul64F(v, config)
case OpMul8:
return rewriteValue386_OpMul8(v, config)
- case Op386NEGL:
- return rewriteValue386_Op386NEGL(v, config)
- case Op386NOTL:
- return rewriteValue386_Op386NOTL(v, config)
case OpNeg16:
return rewriteValue386_OpNeg16(v, config)
case OpNeg32:
@@ -406,10 +470,6 @@
return rewriteValue386_OpNilCheck(v, config)
case OpNot:
return rewriteValue386_OpNot(v, config)
- case Op386ORL:
- return rewriteValue386_Op386ORL(v, config)
- case Op386ORLconst:
- return rewriteValue386_Op386ORLconst(v, config)
case OpOffPtr:
return rewriteValue386_OpOffPtr(v, config)
case OpOr16:
@@ -420,12 +480,6 @@
return rewriteValue386_OpOr8(v, config)
case OpOrB:
return rewriteValue386_OpOrB(v, config)
- case Op386ROLBconst:
- return rewriteValue386_Op386ROLBconst(v, config)
- case Op386ROLLconst:
- return rewriteValue386_Op386ROLLconst(v, config)
- case Op386ROLWconst:
- return rewriteValue386_Op386ROLWconst(v, config)
case OpRsh16Ux16:
return rewriteValue386_OpRsh16Ux16(v, config)
case OpRsh16Ux32:
@@ -474,56 +528,6 @@
return rewriteValue386_OpRsh8x64(v, config)
case OpRsh8x8:
return rewriteValue386_OpRsh8x8(v, config)
- case Op386SARB:
- return rewriteValue386_Op386SARB(v, config)
- case Op386SARBconst:
- return rewriteValue386_Op386SARBconst(v, config)
- case Op386SARL:
- return rewriteValue386_Op386SARL(v, config)
- case Op386SARLconst:
- return rewriteValue386_Op386SARLconst(v, config)
- case Op386SARW:
- return rewriteValue386_Op386SARW(v, config)
- case Op386SARWconst:
- return rewriteValue386_Op386SARWconst(v, config)
- case Op386SBBL:
- return rewriteValue386_Op386SBBL(v, config)
- case Op386SBBLcarrymask:
- return rewriteValue386_Op386SBBLcarrymask(v, config)
- case Op386SETA:
- return rewriteValue386_Op386SETA(v, config)
- case Op386SETAE:
- return rewriteValue386_Op386SETAE(v, config)
- case Op386SETB:
- return rewriteValue386_Op386SETB(v, config)
- case Op386SETBE:
- return rewriteValue386_Op386SETBE(v, config)
- case Op386SETEQ:
- return rewriteValue386_Op386SETEQ(v, config)
- case Op386SETG:
- return rewriteValue386_Op386SETG(v, config)
- case Op386SETGE:
- return rewriteValue386_Op386SETGE(v, config)
- case Op386SETL:
- return rewriteValue386_Op386SETL(v, config)
- case Op386SETLE:
- return rewriteValue386_Op386SETLE(v, config)
- case Op386SETNE:
- return rewriteValue386_Op386SETNE(v, config)
- case Op386SHLL:
- return rewriteValue386_Op386SHLL(v, config)
- case Op386SHRB:
- return rewriteValue386_Op386SHRB(v, config)
- case Op386SHRL:
- return rewriteValue386_Op386SHRL(v, config)
- case Op386SHRW:
- return rewriteValue386_Op386SHRW(v, config)
- case Op386SUBL:
- return rewriteValue386_Op386SUBL(v, config)
- case Op386SUBLcarry:
- return rewriteValue386_Op386SUBLcarry(v, config)
- case Op386SUBLconst:
- return rewriteValue386_Op386SUBLconst(v, config)
case OpSignExt16to32:
return rewriteValue386_OpSignExt16to32(v, config)
case OpSignExt8to16:
@@ -560,10 +564,6 @@
return rewriteValue386_OpTrunc32to16(v, config)
case OpTrunc32to8:
return rewriteValue386_OpTrunc32to8(v, config)
- case Op386XORL:
- return rewriteValue386_Op386XORL(v, config)
- case Op386XORLconst:
- return rewriteValue386_Op386XORLconst(v, config)
case OpXor16:
return rewriteValue386_OpXor16(v, config)
case OpXor32:
@@ -1181,216 +1181,6 @@
}
return false
}
-func rewriteValue386_OpAdd16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add16 x y)
- // cond:
- // result: (ADDL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ADDL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpAdd32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add32 x y)
- // cond:
- // result: (ADDL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ADDL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpAdd32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add32F x y)
- // cond:
- // result: (ADDSS x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ADDSS)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpAdd32carry(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add32carry x y)
- // cond:
- // result: (ADDLcarry x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ADDLcarry)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpAdd32withcarry(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add32withcarry x y c)
- // cond:
- // result: (ADCL x y c)
- for {
- x := v.Args[0]
- y := v.Args[1]
- c := v.Args[2]
- v.reset(Op386ADCL)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(c)
- return true
- }
-}
-func rewriteValue386_OpAdd64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add64F x y)
- // cond:
- // result: (ADDSD x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ADDSD)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpAdd8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add8 x y)
- // cond:
- // result: (ADDL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ADDL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpAddPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (AddPtr x y)
- // cond:
- // result: (ADDL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ADDL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpAddr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Addr {sym} base)
- // cond:
- // result: (LEAL {sym} base)
- for {
- sym := v.Aux
- base := v.Args[0]
- v.reset(Op386LEAL)
- v.Aux = sym
- v.AddArg(base)
- return true
- }
-}
-func rewriteValue386_OpAnd16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (And16 x y)
- // cond:
- // result: (ANDL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ANDL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpAnd32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (And32 x y)
- // cond:
- // result: (ANDL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ANDL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpAnd8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (And8 x y)
- // cond:
- // result: (ANDL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ANDL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpAndB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (AndB x y)
- // cond:
- // result: (ANDL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ANDL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpBswap32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Bswap32 x)
- // cond:
- // result: (BSWAPL x)
- for {
- x := v.Args[0]
- v.reset(Op386BSWAPL)
- v.AddArg(x)
- return true
- }
-}
func rewriteValue386_Op386CMPB(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -1405,8 +1195,8 @@
}
c := v_1.AuxInt
v.reset(Op386CMPBconst)
- v.AddArg(x)
v.AuxInt = int64(int8(c))
+ v.AddArg(x)
return true
}
// match: (CMPB (MOVLconst [c]) x)
@@ -1421,8 +1211,8 @@
x := v.Args[1]
v.reset(Op386InvertFlags)
v0 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
- v0.AddArg(x)
v0.AuxInt = int64(int8(c))
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -1435,12 +1225,12 @@
// cond: int8(x)==int8(y)
// result: (FlagEQ)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != Op386MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int8(x) == int8(y)) {
break
}
@@ -1451,12 +1241,12 @@
// cond: int8(x)<int8(y) && uint8(x)<uint8(y)
// result: (FlagLT_ULT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != Op386MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int8(x) < int8(y) && uint8(x) < uint8(y)) {
break
}
@@ -1467,12 +1257,12 @@
// cond: int8(x)<int8(y) && uint8(x)>uint8(y)
// result: (FlagLT_UGT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != Op386MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int8(x) < int8(y) && uint8(x) > uint8(y)) {
break
}
@@ -1483,12 +1273,12 @@
// cond: int8(x)>int8(y) && uint8(x)<uint8(y)
// result: (FlagGT_ULT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != Op386MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int8(x) > int8(y) && uint8(x) < uint8(y)) {
break
}
@@ -1499,12 +1289,12 @@
// cond: int8(x)>int8(y) && uint8(x)>uint8(y)
// result: (FlagGT_UGT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != Op386MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int8(x) > int8(y) && uint8(x) > uint8(y)) {
break
}
@@ -1515,12 +1305,12 @@
// cond: 0 <= int8(m) && int8(m) < int8(n)
// result: (FlagLT_ULT)
for {
+ n := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != Op386ANDLconst {
break
}
m := v_0.AuxInt
- n := v.AuxInt
if !(0 <= int8(m) && int8(m) < int8(n)) {
break
}
@@ -1531,15 +1321,15 @@
// cond:
// result: (TESTB x y)
for {
+ if v.AuxInt != 0 {
+ break
+ }
v_0 := v.Args[0]
if v_0.Op != Op386ANDL {
break
}
x := v_0.Args[0]
y := v_0.Args[1]
- if v.AuxInt != 0 {
- break
- }
v.reset(Op386TESTB)
v.AddArg(x)
v.AddArg(y)
@@ -1549,15 +1339,15 @@
// cond:
// result: (TESTBconst [int64(int8(c))] x)
for {
+ if v.AuxInt != 0 {
+ break
+ }
v_0 := v.Args[0]
if v_0.Op != Op386ANDLconst {
break
}
c := v_0.AuxInt
x := v_0.Args[0]
- if v.AuxInt != 0 {
- break
- }
v.reset(Op386TESTBconst)
v.AuxInt = int64(int8(c))
v.AddArg(x)
@@ -1567,10 +1357,10 @@
// cond:
// result: (TESTB x x)
for {
- x := v.Args[0]
if v.AuxInt != 0 {
break
}
+ x := v.Args[0]
v.reset(Op386TESTB)
v.AddArg(x)
v.AddArg(x)
@@ -1592,8 +1382,8 @@
}
c := v_1.AuxInt
v.reset(Op386CMPLconst)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
// match: (CMPL (MOVLconst [c]) x)
@@ -1608,8 +1398,8 @@
x := v.Args[1]
v.reset(Op386InvertFlags)
v0 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
- v0.AddArg(x)
v0.AuxInt = c
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -1622,12 +1412,12 @@
// cond: int32(x)==int32(y)
// result: (FlagEQ)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != Op386MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int32(x) == int32(y)) {
break
}
@@ -1638,12 +1428,12 @@
// cond: int32(x)<int32(y) && uint32(x)<uint32(y)
// result: (FlagLT_ULT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != Op386MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int32(x) < int32(y) && uint32(x) < uint32(y)) {
break
}
@@ -1654,12 +1444,12 @@
// cond: int32(x)<int32(y) && uint32(x)>uint32(y)
// result: (FlagLT_UGT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != Op386MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int32(x) < int32(y) && uint32(x) > uint32(y)) {
break
}
@@ -1670,12 +1460,12 @@
// cond: int32(x)>int32(y) && uint32(x)<uint32(y)
// result: (FlagGT_ULT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != Op386MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int32(x) > int32(y) && uint32(x) < uint32(y)) {
break
}
@@ -1686,12 +1476,12 @@
// cond: int32(x)>int32(y) && uint32(x)>uint32(y)
// result: (FlagGT_UGT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != Op386MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int32(x) > int32(y) && uint32(x) > uint32(y)) {
break
}
@@ -1702,12 +1492,12 @@
// cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)
// result: (FlagLT_ULT)
for {
+ n := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != Op386SHRLconst {
break
}
c := v_0.AuxInt
- n := v.AuxInt
if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) {
break
}
@@ -1718,12 +1508,12 @@
// cond: 0 <= int32(m) && int32(m) < int32(n)
// result: (FlagLT_ULT)
for {
+ n := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != Op386ANDLconst {
break
}
m := v_0.AuxInt
- n := v.AuxInt
if !(0 <= int32(m) && int32(m) < int32(n)) {
break
}
@@ -1734,15 +1524,15 @@
// cond:
// result: (TESTL x y)
for {
+ if v.AuxInt != 0 {
+ break
+ }
v_0 := v.Args[0]
if v_0.Op != Op386ANDL {
break
}
x := v_0.Args[0]
y := v_0.Args[1]
- if v.AuxInt != 0 {
- break
- }
v.reset(Op386TESTL)
v.AddArg(x)
v.AddArg(y)
@@ -1752,15 +1542,15 @@
// cond:
// result: (TESTLconst [c] x)
for {
+ if v.AuxInt != 0 {
+ break
+ }
v_0 := v.Args[0]
if v_0.Op != Op386ANDLconst {
break
}
c := v_0.AuxInt
x := v_0.Args[0]
- if v.AuxInt != 0 {
- break
- }
v.reset(Op386TESTLconst)
v.AuxInt = c
v.AddArg(x)
@@ -1770,10 +1560,10 @@
// cond:
// result: (TESTL x x)
for {
- x := v.Args[0]
if v.AuxInt != 0 {
break
}
+ x := v.Args[0]
v.reset(Op386TESTL)
v.AddArg(x)
v.AddArg(x)
@@ -1795,8 +1585,8 @@
}
c := v_1.AuxInt
v.reset(Op386CMPWconst)
- v.AddArg(x)
v.AuxInt = int64(int16(c))
+ v.AddArg(x)
return true
}
// match: (CMPW (MOVLconst [c]) x)
@@ -1811,8 +1601,8 @@
x := v.Args[1]
v.reset(Op386InvertFlags)
v0 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
- v0.AddArg(x)
v0.AuxInt = int64(int16(c))
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -1825,12 +1615,12 @@
// cond: int16(x)==int16(y)
// result: (FlagEQ)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != Op386MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int16(x) == int16(y)) {
break
}
@@ -1841,12 +1631,12 @@
// cond: int16(x)<int16(y) && uint16(x)<uint16(y)
// result: (FlagLT_ULT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != Op386MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int16(x) < int16(y) && uint16(x) < uint16(y)) {
break
}
@@ -1857,12 +1647,12 @@
// cond: int16(x)<int16(y) && uint16(x)>uint16(y)
// result: (FlagLT_UGT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != Op386MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int16(x) < int16(y) && uint16(x) > uint16(y)) {
break
}
@@ -1873,12 +1663,12 @@
// cond: int16(x)>int16(y) && uint16(x)<uint16(y)
// result: (FlagGT_ULT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != Op386MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int16(x) > int16(y) && uint16(x) < uint16(y)) {
break
}
@@ -1889,12 +1679,12 @@
// cond: int16(x)>int16(y) && uint16(x)>uint16(y)
// result: (FlagGT_UGT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != Op386MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int16(x) > int16(y) && uint16(x) > uint16(y)) {
break
}
@@ -1905,12 +1695,12 @@
// cond: 0 <= int16(m) && int16(m) < int16(n)
// result: (FlagLT_ULT)
for {
+ n := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != Op386ANDLconst {
break
}
m := v_0.AuxInt
- n := v.AuxInt
if !(0 <= int16(m) && int16(m) < int16(n)) {
break
}
@@ -1921,15 +1711,15 @@
// cond:
// result: (TESTW x y)
for {
+ if v.AuxInt != 0 {
+ break
+ }
v_0 := v.Args[0]
if v_0.Op != Op386ANDL {
break
}
x := v_0.Args[0]
y := v_0.Args[1]
- if v.AuxInt != 0 {
- break
- }
v.reset(Op386TESTW)
v.AddArg(x)
v.AddArg(y)
@@ -1939,15 +1729,15 @@
// cond:
// result: (TESTWconst [int64(int16(c))] x)
for {
+ if v.AuxInt != 0 {
+ break
+ }
v_0 := v.Args[0]
if v_0.Op != Op386ANDLconst {
break
}
c := v_0.AuxInt
x := v_0.Args[0]
- if v.AuxInt != 0 {
- break
- }
v.reset(Op386TESTWconst)
v.AuxInt = int64(int16(c))
v.AddArg(x)
@@ -1957,10 +1747,10 @@
// cond:
// result: (TESTW x x)
for {
- x := v.Args[0]
if v.AuxInt != 0 {
break
}
+ x := v.Args[0]
v.reset(Op386TESTW)
v.AddArg(x)
v.AddArg(x)
@@ -1968,979 +1758,6 @@
}
return false
}
-func rewriteValue386_OpClosureCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ClosureCall [argwid] entry closure mem)
- // cond:
- // result: (CALLclosure [argwid] entry closure mem)
- for {
- argwid := v.AuxInt
- entry := v.Args[0]
- closure := v.Args[1]
- mem := v.Args[2]
- v.reset(Op386CALLclosure)
- v.AuxInt = argwid
- v.AddArg(entry)
- v.AddArg(closure)
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValue386_OpCom16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Com16 x)
- // cond:
- // result: (NOTL x)
- for {
- x := v.Args[0]
- v.reset(Op386NOTL)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValue386_OpCom32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Com32 x)
- // cond:
- // result: (NOTL x)
- for {
- x := v.Args[0]
- v.reset(Op386NOTL)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValue386_OpCom8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Com8 x)
- // cond:
- // result: (NOTL x)
- for {
- x := v.Args[0]
- v.reset(Op386NOTL)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValue386_OpConst16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Const16 [val])
- // cond:
- // result: (MOVLconst [val])
- for {
- val := v.AuxInt
- v.reset(Op386MOVLconst)
- v.AuxInt = val
- return true
- }
-}
-func rewriteValue386_OpConst32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Const32 [val])
- // cond:
- // result: (MOVLconst [val])
- for {
- val := v.AuxInt
- v.reset(Op386MOVLconst)
- v.AuxInt = val
- return true
- }
-}
-func rewriteValue386_OpConst32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Const32F [val])
- // cond:
- // result: (MOVSSconst [val])
- for {
- val := v.AuxInt
- v.reset(Op386MOVSSconst)
- v.AuxInt = val
- return true
- }
-}
-func rewriteValue386_OpConst64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Const64F [val])
- // cond:
- // result: (MOVSDconst [val])
- for {
- val := v.AuxInt
- v.reset(Op386MOVSDconst)
- v.AuxInt = val
- return true
- }
-}
-func rewriteValue386_OpConst8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Const8 [val])
- // cond:
- // result: (MOVLconst [val])
- for {
- val := v.AuxInt
- v.reset(Op386MOVLconst)
- v.AuxInt = val
- return true
- }
-}
-func rewriteValue386_OpConstBool(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ConstBool [b])
- // cond:
- // result: (MOVLconst [b])
- for {
- b := v.AuxInt
- v.reset(Op386MOVLconst)
- v.AuxInt = b
- return true
- }
-}
-func rewriteValue386_OpConstNil(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ConstNil)
- // cond:
- // result: (MOVLconst [0])
- for {
- v.reset(Op386MOVLconst)
- v.AuxInt = 0
- return true
- }
-}
-func rewriteValue386_OpConvert(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Convert <t> x mem)
- // cond:
- // result: (MOVLconvert <t> x mem)
- for {
- t := v.Type
- x := v.Args[0]
- mem := v.Args[1]
- v.reset(Op386MOVLconvert)
- v.Type = t
- v.AddArg(x)
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValue386_OpCvt32Fto32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt32Fto32 x)
- // cond:
- // result: (CVTTSS2SL x)
- for {
- x := v.Args[0]
- v.reset(Op386CVTTSS2SL)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValue386_OpCvt32Fto64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt32Fto64F x)
- // cond:
- // result: (CVTSS2SD x)
- for {
- x := v.Args[0]
- v.reset(Op386CVTSS2SD)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValue386_OpCvt32to32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt32to32F x)
- // cond:
- // result: (CVTSL2SS x)
- for {
- x := v.Args[0]
- v.reset(Op386CVTSL2SS)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValue386_OpCvt32to64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt32to64F x)
- // cond:
- // result: (CVTSL2SD x)
- for {
- x := v.Args[0]
- v.reset(Op386CVTSL2SD)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValue386_OpCvt64Fto32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt64Fto32 x)
- // cond:
- // result: (CVTTSD2SL x)
- for {
- x := v.Args[0]
- v.reset(Op386CVTTSD2SL)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValue386_OpCvt64Fto32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt64Fto32F x)
- // cond:
- // result: (CVTSD2SS x)
- for {
- x := v.Args[0]
- v.reset(Op386CVTSD2SS)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValue386_OpDeferCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (DeferCall [argwid] mem)
- // cond:
- // result: (CALLdefer [argwid] mem)
- for {
- argwid := v.AuxInt
- mem := v.Args[0]
- v.reset(Op386CALLdefer)
- v.AuxInt = argwid
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValue386_OpDiv16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div16 x y)
- // cond:
- // result: (DIVW x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386DIVW)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpDiv16u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div16u x y)
- // cond:
- // result: (DIVWU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386DIVWU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpDiv32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div32 x y)
- // cond:
- // result: (DIVL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386DIVL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpDiv32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div32F x y)
- // cond:
- // result: (DIVSS x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386DIVSS)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpDiv32u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div32u x y)
- // cond:
- // result: (DIVLU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386DIVLU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpDiv64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div64F x y)
- // cond:
- // result: (DIVSD x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386DIVSD)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpDiv8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div8 x y)
- // cond:
- // result: (DIVW (SignExt8to16 x) (SignExt8to16 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386DIVW)
- v0 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValue386_OpDiv8u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div8u x y)
- // cond:
- // result: (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386DIVWU)
- v0 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValue386_OpEq16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Eq16 x y)
- // cond:
- // result: (SETEQ (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETEQ)
- v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpEq32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Eq32 x y)
- // cond:
- // result: (SETEQ (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETEQ)
- v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpEq32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Eq32F x y)
- // cond:
- // result: (SETEQF (UCOMISS x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETEQF)
- v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpEq64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Eq64F x y)
- // cond:
- // result: (SETEQF (UCOMISD x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETEQF)
- v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpEq8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Eq8 x y)
- // cond:
- // result: (SETEQ (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETEQ)
- v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpEqB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (EqB x y)
- // cond:
- // result: (SETEQ (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETEQ)
- v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpEqPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (EqPtr x y)
- // cond:
- // result: (SETEQ (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETEQ)
- v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpGeq16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq16 x y)
- // cond:
- // result: (SETGE (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETGE)
- v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpGeq16U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq16U x y)
- // cond:
- // result: (SETAE (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETAE)
- v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpGeq32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq32 x y)
- // cond:
- // result: (SETGE (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETGE)
- v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpGeq32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq32F x y)
- // cond:
- // result: (SETGEF (UCOMISS x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETGEF)
- v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpGeq32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq32U x y)
- // cond:
- // result: (SETAE (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETAE)
- v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpGeq64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq64F x y)
- // cond:
- // result: (SETGEF (UCOMISD x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETGEF)
- v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpGeq8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq8 x y)
- // cond:
- // result: (SETGE (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETGE)
- v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpGeq8U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq8U x y)
- // cond:
- // result: (SETAE (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETAE)
- v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpGetClosurePtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (GetClosurePtr)
- // cond:
- // result: (LoweredGetClosurePtr)
- for {
- v.reset(Op386LoweredGetClosurePtr)
- return true
- }
-}
-func rewriteValue386_OpGetG(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (GetG mem)
- // cond:
- // result: (LoweredGetG mem)
- for {
- mem := v.Args[0]
- v.reset(Op386LoweredGetG)
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValue386_OpGoCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (GoCall [argwid] mem)
- // cond:
- // result: (CALLgo [argwid] mem)
- for {
- argwid := v.AuxInt
- mem := v.Args[0]
- v.reset(Op386CALLgo)
- v.AuxInt = argwid
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValue386_OpGreater16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater16 x y)
- // cond:
- // result: (SETG (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETG)
- v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpGreater16U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater16U x y)
- // cond:
- // result: (SETA (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETA)
- v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpGreater32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater32 x y)
- // cond:
- // result: (SETG (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETG)
- v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpGreater32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater32F x y)
- // cond:
- // result: (SETGF (UCOMISS x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETGF)
- v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpGreater32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater32U x y)
- // cond:
- // result: (SETA (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETA)
- v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpGreater64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater64F x y)
- // cond:
- // result: (SETGF (UCOMISD x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETGF)
- v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpGreater8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater8 x y)
- // cond:
- // result: (SETG (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETG)
- v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpGreater8U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater8U x y)
- // cond:
- // result: (SETA (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETA)
- v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpHmul16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul16 x y)
- // cond:
- // result: (HMULW x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386HMULW)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpHmul16u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul16u x y)
- // cond:
- // result: (HMULWU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386HMULWU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpHmul32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul32 x y)
- // cond:
- // result: (HMULL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386HMULL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpHmul32u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul32u x y)
- // cond:
- // result: (HMULLU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386HMULLU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpHmul8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul8 x y)
- // cond:
- // result: (HMULB x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386HMULB)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpHmul8u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul8u x y)
- // cond:
- // result: (HMULBU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386HMULBU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpInterCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (InterCall [argwid] entry mem)
- // cond:
- // result: (CALLinter [argwid] entry mem)
- for {
- argwid := v.AuxInt
- entry := v.Args[0]
- mem := v.Args[1]
- v.reset(Op386CALLinter)
- v.AuxInt = argwid
- v.AddArg(entry)
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValue386_OpIsInBounds(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (IsInBounds idx len)
- // cond:
- // result: (SETB (CMPL idx len))
- for {
- idx := v.Args[0]
- len := v.Args[1]
- v.reset(Op386SETB)
- v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
- v0.AddArg(idx)
- v0.AddArg(len)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpIsNonNil(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (IsNonNil p)
- // cond:
- // result: (SETNE (TESTL p p))
- for {
- p := v.Args[0]
- v.reset(Op386SETNE)
- v0 := b.NewValue0(v.Line, Op386TESTL, TypeFlags)
- v0.AddArg(p)
- v0.AddArg(p)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpIsSliceInBounds(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (IsSliceInBounds idx len)
- // cond:
- // result: (SETBE (CMPL idx len))
- for {
- idx := v.Args[0]
- len := v.Args[1]
- v.reset(Op386SETBE)
- v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
- v0.AddArg(idx)
- v0.AddArg(len)
- v.AddArg(v0)
- return true
- }
-}
func rewriteValue386_Op386LEAL(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -3629,742 +2446,6 @@
}
return false
}
-func rewriteValue386_OpLeq16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq16 x y)
- // cond:
- // result: (SETLE (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETLE)
- v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpLeq16U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq16U x y)
- // cond:
- // result: (SETBE (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETBE)
- v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpLeq32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq32 x y)
- // cond:
- // result: (SETLE (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETLE)
- v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpLeq32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq32F x y)
- // cond:
- // result: (SETGEF (UCOMISS y x))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETGEF)
- v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpLeq32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq32U x y)
- // cond:
- // result: (SETBE (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETBE)
- v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpLeq64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq64F x y)
- // cond:
- // result: (SETGEF (UCOMISD y x))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETGEF)
- v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpLeq8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq8 x y)
- // cond:
- // result: (SETLE (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETLE)
- v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpLeq8U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq8U x y)
- // cond:
- // result: (SETBE (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETBE)
- v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpLess16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less16 x y)
- // cond:
- // result: (SETL (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETL)
- v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpLess16U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less16U x y)
- // cond:
- // result: (SETB (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETB)
- v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpLess32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less32 x y)
- // cond:
- // result: (SETL (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETL)
- v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpLess32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less32F x y)
- // cond:
- // result: (SETGF (UCOMISS y x))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETGF)
- v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpLess32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less32U x y)
- // cond:
- // result: (SETB (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETB)
- v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpLess64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less64F x y)
- // cond:
- // result: (SETGF (UCOMISD y x))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETGF)
- v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpLess8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less8 x y)
- // cond:
- // result: (SETL (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETL)
- v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpLess8U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less8U x y)
- // cond:
- // result: (SETB (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETB)
- v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpLoad(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Load <t> ptr mem)
- // cond: (is32BitInt(t) || isPtr(t))
- // result: (MOVLload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is32BitInt(t) || isPtr(t)) {
- break
- }
- v.reset(Op386MOVLload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (Load <t> ptr mem)
- // cond: is16BitInt(t)
- // result: (MOVWload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is16BitInt(t)) {
- break
- }
- v.reset(Op386MOVWload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (Load <t> ptr mem)
- // cond: (t.IsBoolean() || is8BitInt(t))
- // result: (MOVBload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(t.IsBoolean() || is8BitInt(t)) {
- break
- }
- v.reset(Op386MOVBload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (Load <t> ptr mem)
- // cond: is32BitFloat(t)
- // result: (MOVSSload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is32BitFloat(t)) {
- break
- }
- v.reset(Op386MOVSSload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (Load <t> ptr mem)
- // cond: is64BitFloat(t)
- // result: (MOVSDload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is64BitFloat(t)) {
- break
- }
- v.reset(Op386MOVSDload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValue386_OpLrot16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lrot16 <t> x [c])
- // cond:
- // result: (ROLWconst <t> [c&15] x)
- for {
- t := v.Type
- x := v.Args[0]
- c := v.AuxInt
- v.reset(Op386ROLWconst)
- v.Type = t
- v.AuxInt = c & 15
- v.AddArg(x)
- return true
- }
-}
-func rewriteValue386_OpLrot32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lrot32 <t> x [c])
- // cond:
- // result: (ROLLconst <t> [c&31] x)
- for {
- t := v.Type
- x := v.Args[0]
- c := v.AuxInt
- v.reset(Op386ROLLconst)
- v.Type = t
- v.AuxInt = c & 31
- v.AddArg(x)
- return true
- }
-}
-func rewriteValue386_OpLrot8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lrot8 <t> x [c])
- // cond:
- // result: (ROLBconst <t> [c&7] x)
- for {
- t := v.Type
- x := v.Args[0]
- c := v.AuxInt
- v.reset(Op386ROLBconst)
- v.Type = t
- v.AuxInt = c & 7
- v.AddArg(x)
- return true
- }
-}
-func rewriteValue386_OpLsh16x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh16x16 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ANDL)
- v0 := b.NewValue0(v.Line, Op386SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValue386_OpLsh16x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh16x32 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ANDL)
- v0 := b.NewValue0(v.Line, Op386SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValue386_OpLsh16x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh16x64 x (Const64 [c]))
- // cond: uint64(c) < 16
- // result: (SHLLconst x [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) < 16) {
- break
- }
- v.reset(Op386SHLLconst)
- v.AddArg(x)
- v.AuxInt = c
- return true
- }
- // match: (Lsh16x64 _ (Const64 [c]))
- // cond: uint64(c) >= 16
- // result: (Const16 [0])
- for {
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) >= 16) {
- break
- }
- v.reset(OpConst16)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValue386_OpLsh16x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh16x8 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ANDL)
- v0 := b.NewValue0(v.Line, Op386SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValue386_OpLsh32x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh32x16 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ANDL)
- v0 := b.NewValue0(v.Line, Op386SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValue386_OpLsh32x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh32x32 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ANDL)
- v0 := b.NewValue0(v.Line, Op386SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValue386_OpLsh32x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh32x64 x (Const64 [c]))
- // cond: uint64(c) < 32
- // result: (SHLLconst x [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) < 32) {
- break
- }
- v.reset(Op386SHLLconst)
- v.AddArg(x)
- v.AuxInt = c
- return true
- }
- // match: (Lsh32x64 _ (Const64 [c]))
- // cond: uint64(c) >= 32
- // result: (Const32 [0])
- for {
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) >= 32) {
- break
- }
- v.reset(OpConst32)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValue386_OpLsh32x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh32x8 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ANDL)
- v0 := b.NewValue0(v.Line, Op386SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValue386_OpLsh8x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh8x16 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ANDL)
- v0 := b.NewValue0(v.Line, Op386SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValue386_OpLsh8x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh8x32 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ANDL)
- v0 := b.NewValue0(v.Line, Op386SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValue386_OpLsh8x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh8x64 x (Const64 [c]))
- // cond: uint64(c) < 8
- // result: (SHLLconst x [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) < 8) {
- break
- }
- v.reset(Op386SHLLconst)
- v.AddArg(x)
- v.AuxInt = c
- return true
- }
- // match: (Lsh8x64 _ (Const64 [c]))
- // cond: uint64(c) >= 8
- // result: (Const8 [0])
- for {
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) >= 8) {
- break
- }
- v.reset(OpConst8)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValue386_OpLsh8x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh8x8 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ANDL)
- v0 := b.NewValue0(v.Line, Op386SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
func rewriteValue386_Op386MOVBLSX(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -8924,487 +7005,6 @@
}
return false
}
-func rewriteValue386_OpMod16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod16 x y)
- // cond:
- // result: (MODW x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386MODW)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpMod16u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod16u x y)
- // cond:
- // result: (MODWU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386MODWU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpMod32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod32 x y)
- // cond:
- // result: (MODL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386MODL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpMod32u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod32u x y)
- // cond:
- // result: (MODLU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386MODLU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpMod8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod8 x y)
- // cond:
- // result: (MODW (SignExt8to16 x) (SignExt8to16 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386MODW)
- v0 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValue386_OpMod8u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod8u x y)
- // cond:
- // result: (MODWU (ZeroExt8to16 x) (ZeroExt8to16 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386MODWU)
- v0 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValue386_OpMove(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Move [s] _ _ mem)
- // cond: SizeAndAlign(s).Size() == 0
- // result: mem
- for {
- s := v.AuxInt
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 0) {
- break
- }
- v.reset(OpCopy)
- v.Type = mem.Type
- v.AddArg(mem)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 1
- // result: (MOVBstore dst (MOVBload src mem) mem)
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 1) {
- break
- }
- v.reset(Op386MOVBstore)
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, Op386MOVBload, config.fe.TypeUInt8())
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(mem)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 2
- // result: (MOVWstore dst (MOVWload src mem) mem)
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 2) {
- break
- }
- v.reset(Op386MOVWstore)
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, Op386MOVWload, config.fe.TypeUInt16())
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(mem)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 4
- // result: (MOVLstore dst (MOVLload src mem) mem)
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 4) {
- break
- }
- v.reset(Op386MOVLstore)
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(mem)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 3
- // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem))
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 3) {
- break
- }
- v.reset(Op386MOVBstore)
- v.AuxInt = 2
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, Op386MOVBload, config.fe.TypeUInt8())
- v0.AuxInt = 2
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386MOVWstore, TypeMem)
- v1.AddArg(dst)
- v2 := b.NewValue0(v.Line, Op386MOVWload, config.fe.TypeUInt16())
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 5
- // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 5) {
- break
- }
- v.reset(Op386MOVBstore)
- v.AuxInt = 4
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, Op386MOVBload, config.fe.TypeUInt8())
- v0.AuxInt = 4
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386MOVLstore, TypeMem)
- v1.AddArg(dst)
- v2 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 6
- // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 6) {
- break
- }
- v.reset(Op386MOVWstore)
- v.AuxInt = 4
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, Op386MOVWload, config.fe.TypeUInt16())
- v0.AuxInt = 4
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386MOVLstore, TypeMem)
- v1.AddArg(dst)
- v2 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 7
- // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem))
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 7) {
- break
- }
- v.reset(Op386MOVLstore)
- v.AuxInt = 3
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
- v0.AuxInt = 3
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386MOVLstore, TypeMem)
- v1.AddArg(dst)
- v2 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 8
- // result: (MOVLstore [4] dst (MOVLload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 8) {
- break
- }
- v.reset(Op386MOVLstore)
- v.AuxInt = 4
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
- v0.AuxInt = 4
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386MOVLstore, TypeMem)
- v1.AddArg(dst)
- v2 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size()%4 != 0
- // result: (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%4] (ADDLconst <dst.Type> dst [SizeAndAlign(s).Size()%4]) (ADDLconst <src.Type> src [SizeAndAlign(s).Size()%4]) (MOVLstore dst (MOVLload src mem) mem))
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size()%4 != 0) {
- break
- }
- v.reset(OpMove)
- v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%4
- v0 := b.NewValue0(v.Line, Op386ADDLconst, dst.Type)
- v0.AddArg(dst)
- v0.AuxInt = SizeAndAlign(s).Size() % 4
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386ADDLconst, src.Type)
- v1.AddArg(src)
- v1.AuxInt = SizeAndAlign(s).Size() % 4
- v.AddArg(v1)
- v2 := b.NewValue0(v.Line, Op386MOVLstore, TypeMem)
- v2.AddArg(dst)
- v3 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
- v3.AddArg(src)
- v3.AddArg(mem)
- v2.AddArg(v3)
- v2.AddArg(mem)
- v.AddArg(v2)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() <= 4*128 && SizeAndAlign(s).Size()%4 == 0 && !config.noDuffDevice
- // result: (DUFFCOPY [10*(128-SizeAndAlign(s).Size()/4)] dst src mem)
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() <= 4*128 && SizeAndAlign(s).Size()%4 == 0 && !config.noDuffDevice) {
- break
- }
- v.reset(Op386DUFFCOPY)
- v.AuxInt = 10 * (128 - SizeAndAlign(s).Size()/4)
- v.AddArg(dst)
- v.AddArg(src)
- v.AddArg(mem)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: (SizeAndAlign(s).Size() > 4*128 || config.noDuffDevice) && SizeAndAlign(s).Size()%4 == 0
- // result: (REPMOVSL dst src (MOVLconst [SizeAndAlign(s).Size()/4]) mem)
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !((SizeAndAlign(s).Size() > 4*128 || config.noDuffDevice) && SizeAndAlign(s).Size()%4 == 0) {
- break
- }
- v.reset(Op386REPMOVSL)
- v.AddArg(dst)
- v.AddArg(src)
- v0 := b.NewValue0(v.Line, Op386MOVLconst, config.fe.TypeUInt32())
- v0.AuxInt = SizeAndAlign(s).Size() / 4
- v.AddArg(v0)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValue386_OpMul16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul16 x y)
- // cond:
- // result: (MULL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386MULL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpMul32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul32 x y)
- // cond:
- // result: (MULL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386MULL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpMul32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul32F x y)
- // cond:
- // result: (MULSS x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386MULSS)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpMul32uhilo(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul32uhilo x y)
- // cond:
- // result: (MULLQU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386MULLQU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpMul64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul64F x y)
- // cond:
- // result: (MULSD x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386MULSD)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpMul8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul8 x y)
- // cond:
- // result: (MULL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386MULL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
func rewriteValue386_Op386NEGL(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -9441,225 +7041,6 @@
}
return false
}
-func rewriteValue386_OpNeg16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neg16 x)
- // cond:
- // result: (NEGL x)
- for {
- x := v.Args[0]
- v.reset(Op386NEGL)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValue386_OpNeg32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neg32 x)
- // cond:
- // result: (NEGL x)
- for {
- x := v.Args[0]
- v.reset(Op386NEGL)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValue386_OpNeg32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neg32F x)
- // cond:
- // result: (PXOR x (MOVSSconst <config.Frontend().TypeFloat32()> [f2i(math.Copysign(0, -1))]))
- for {
- x := v.Args[0]
- v.reset(Op386PXOR)
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, Op386MOVSSconst, config.Frontend().TypeFloat32())
- v0.AuxInt = f2i(math.Copysign(0, -1))
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpNeg64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neg64F x)
- // cond:
- // result: (PXOR x (MOVSDconst <config.Frontend().TypeFloat64()> [f2i(math.Copysign(0, -1))]))
- for {
- x := v.Args[0]
- v.reset(Op386PXOR)
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, Op386MOVSDconst, config.Frontend().TypeFloat64())
- v0.AuxInt = f2i(math.Copysign(0, -1))
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpNeg8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neg8 x)
- // cond:
- // result: (NEGL x)
- for {
- x := v.Args[0]
- v.reset(Op386NEGL)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValue386_OpNeq16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neq16 x y)
- // cond:
- // result: (SETNE (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETNE)
- v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpNeq32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neq32 x y)
- // cond:
- // result: (SETNE (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETNE)
- v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpNeq32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neq32F x y)
- // cond:
- // result: (SETNEF (UCOMISS x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETNEF)
- v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpNeq64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neq64F x y)
- // cond:
- // result: (SETNEF (UCOMISD x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETNEF)
- v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpNeq8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neq8 x y)
- // cond:
- // result: (SETNE (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETNE)
- v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpNeqB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (NeqB x y)
- // cond:
- // result: (SETNE (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETNE)
- v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpNeqPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (NeqPtr x y)
- // cond:
- // result: (SETNE (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SETNE)
- v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpNilCheck(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (NilCheck ptr mem)
- // cond:
- // result: (LoweredNilCheck ptr mem)
- for {
- ptr := v.Args[0]
- mem := v.Args[1]
- v.reset(Op386LoweredNilCheck)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValue386_OpNot(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Not x)
- // cond:
- // result: (XORLconst [1] x)
- for {
- x := v.Args[0]
- v.reset(Op386XORLconst)
- v.AuxInt = 1
- v.AddArg(x)
- return true
- }
-}
func rewriteValue386_Op386ORL(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -10069,81 +7450,6 @@
}
return false
}
-func rewriteValue386_OpOffPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (OffPtr [off] ptr)
- // cond:
- // result: (ADDLconst [off] ptr)
- for {
- off := v.AuxInt
- ptr := v.Args[0]
- v.reset(Op386ADDLconst)
- v.AuxInt = off
- v.AddArg(ptr)
- return true
- }
-}
-func rewriteValue386_OpOr16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Or16 x y)
- // cond:
- // result: (ORL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ORL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpOr32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Or32 x y)
- // cond:
- // result: (ORL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ORL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpOr8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Or8 x y)
- // cond:
- // result: (ORL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ORL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValue386_OpOrB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (OrB x y)
- // cond:
- // result: (ORL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ORL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
func rewriteValue386_Op386ROLBconst(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -10246,705 +7552,6 @@
}
return false
}
-func rewriteValue386_OpRsh16Ux16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16Ux16 <t> x y)
- // cond:
- // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ANDL)
- v0 := b.NewValue0(v.Line, Op386SHRW, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 16
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValue386_OpRsh16Ux32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16Ux32 <t> x y)
- // cond:
- // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ANDL)
- v0 := b.NewValue0(v.Line, Op386SHRW, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 16
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValue386_OpRsh16Ux64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16Ux64 x (Const64 [c]))
- // cond: uint64(c) < 16
- // result: (SHRWconst x [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) < 16) {
- break
- }
- v.reset(Op386SHRWconst)
- v.AddArg(x)
- v.AuxInt = c
- return true
- }
- // match: (Rsh16Ux64 _ (Const64 [c]))
- // cond: uint64(c) >= 16
- // result: (Const16 [0])
- for {
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) >= 16) {
- break
- }
- v.reset(OpConst16)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValue386_OpRsh16Ux8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16Ux8 <t> x y)
- // cond:
- // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ANDL)
- v0 := b.NewValue0(v.Line, Op386SHRW, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 16
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValue386_OpRsh16x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16x16 <t> x y)
- // cond:
- // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SARW)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
- v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 16
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpRsh16x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16x32 <t> x y)
- // cond:
- // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SARW)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
- v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 16
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpRsh16x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16x64 x (Const64 [c]))
- // cond: uint64(c) < 16
- // result: (SARWconst x [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) < 16) {
- break
- }
- v.reset(Op386SARWconst)
- v.AddArg(x)
- v.AuxInt = c
- return true
- }
- // match: (Rsh16x64 x (Const64 [c]))
- // cond: uint64(c) >= 16
- // result: (SARWconst x [15])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) >= 16) {
- break
- }
- v.reset(Op386SARWconst)
- v.AddArg(x)
- v.AuxInt = 15
- return true
- }
- return false
-}
-func rewriteValue386_OpRsh16x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16x8 <t> x y)
- // cond:
- // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SARW)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
- v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 16
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpRsh32Ux16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32Ux16 <t> x y)
- // cond:
- // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ANDL)
- v0 := b.NewValue0(v.Line, Op386SHRL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValue386_OpRsh32Ux32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32Ux32 <t> x y)
- // cond:
- // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ANDL)
- v0 := b.NewValue0(v.Line, Op386SHRL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValue386_OpRsh32Ux64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32Ux64 x (Const64 [c]))
- // cond: uint64(c) < 32
- // result: (SHRLconst x [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) < 32) {
- break
- }
- v.reset(Op386SHRLconst)
- v.AddArg(x)
- v.AuxInt = c
- return true
- }
- // match: (Rsh32Ux64 _ (Const64 [c]))
- // cond: uint64(c) >= 32
- // result: (Const32 [0])
- for {
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) >= 32) {
- break
- }
- v.reset(OpConst32)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValue386_OpRsh32Ux8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32Ux8 <t> x y)
- // cond:
- // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ANDL)
- v0 := b.NewValue0(v.Line, Op386SHRL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValue386_OpRsh32x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32x16 <t> x y)
- // cond:
- // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SARL)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
- v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 32
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpRsh32x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32x32 <t> x y)
- // cond:
- // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SARL)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
- v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 32
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpRsh32x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32x64 x (Const64 [c]))
- // cond: uint64(c) < 32
- // result: (SARLconst x [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) < 32) {
- break
- }
- v.reset(Op386SARLconst)
- v.AddArg(x)
- v.AuxInt = c
- return true
- }
- // match: (Rsh32x64 x (Const64 [c]))
- // cond: uint64(c) >= 32
- // result: (SARLconst x [31])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) >= 32) {
- break
- }
- v.reset(Op386SARLconst)
- v.AddArg(x)
- v.AuxInt = 31
- return true
- }
- return false
-}
-func rewriteValue386_OpRsh32x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32x8 <t> x y)
- // cond:
- // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SARL)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
- v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 32
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpRsh8Ux16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8Ux16 <t> x y)
- // cond:
- // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ANDL)
- v0 := b.NewValue0(v.Line, Op386SHRB, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 8
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValue386_OpRsh8Ux32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8Ux32 <t> x y)
- // cond:
- // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ANDL)
- v0 := b.NewValue0(v.Line, Op386SHRB, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 8
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValue386_OpRsh8Ux64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8Ux64 x (Const64 [c]))
- // cond: uint64(c) < 8
- // result: (SHRBconst x [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) < 8) {
- break
- }
- v.reset(Op386SHRBconst)
- v.AddArg(x)
- v.AuxInt = c
- return true
- }
- // match: (Rsh8Ux64 _ (Const64 [c]))
- // cond: uint64(c) >= 8
- // result: (Const8 [0])
- for {
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) >= 8) {
- break
- }
- v.reset(OpConst8)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValue386_OpRsh8Ux8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8Ux8 <t> x y)
- // cond:
- // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386ANDL)
- v0 := b.NewValue0(v.Line, Op386SHRB, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 8
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValue386_OpRsh8x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8x16 <t> x y)
- // cond:
- // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SARB)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
- v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 8
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpRsh8x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8x32 <t> x y)
- // cond:
- // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SARB)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
- v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 8
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValue386_OpRsh8x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8x64 x (Const64 [c]))
- // cond: uint64(c) < 8
- // result: (SARBconst x [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) < 8) {
- break
- }
- v.reset(Op386SARBconst)
- v.AddArg(x)
- v.AuxInt = c
- return true
- }
- // match: (Rsh8x64 x (Const64 [c]))
- // cond: uint64(c) >= 8
- // result: (SARBconst x [7])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) >= 8) {
- break
- }
- v.reset(Op386SARBconst)
- v.AddArg(x)
- v.AuxInt = 7
- return true
- }
- return false
-}
-func rewriteValue386_OpRsh8x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8x8 <t> x y)
- // cond:
- // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(Op386SARB)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
- v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 8
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
func rewriteValue386_Op386SARB(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -12182,8 +8789,8 @@
}
c := v_1.AuxInt
v.reset(Op386SUBLconst)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
// match: (SUBL (MOVLconst [c]) x)
@@ -12198,8 +8805,8 @@
x := v.Args[1]
v.reset(Op386NEGL)
v0 := b.NewValue0(v.Line, Op386SUBLconst, v.Type)
- v0.AddArg(x)
v0.AuxInt = c
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -12266,6 +8873,3495 @@
return true
}
}
+func rewriteValue386_Op386XORL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORL x (MOVLconst [c]))
+ // cond:
+ // result: (XORLconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(Op386XORLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORL (MOVLconst [c]) x)
+ // cond:
+ // result: (XORLconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(Op386XORLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORL x x)
+ // cond:
+ // result: (MOVLconst [0])
+ for {
+ x := v.Args[0]
+ if x != v.Args[1] {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386XORLconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORLconst [c] (XORLconst [d] x))
+ // cond:
+ // result: (XORLconst [c ^ d] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != Op386XORLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(Op386XORLconst)
+ v.AuxInt = c ^ d
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [c] x)
+ // cond: int32(c)==0
+ // result: x
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(int32(c) == 0) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [c] (MOVLconst [d]))
+ // cond:
+ // result: (MOVLconst [c^d])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(Op386MOVLconst)
+ v.AuxInt = c ^ d
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpAdd16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add16 x y)
+ // cond:
+ // result: (ADDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ADDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpAdd32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add32 x y)
+ // cond:
+ // result: (ADDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ADDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpAdd32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add32F x y)
+ // cond:
+ // result: (ADDSS x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ADDSS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpAdd32carry(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add32carry x y)
+ // cond:
+ // result: (ADDLcarry x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ADDLcarry)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpAdd32withcarry(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add32withcarry x y c)
+ // cond:
+ // result: (ADCL x y c)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ c := v.Args[2]
+ v.reset(Op386ADCL)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(c)
+ return true
+ }
+}
+func rewriteValue386_OpAdd64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add64F x y)
+ // cond:
+ // result: (ADDSD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ADDSD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpAdd8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add8 x y)
+ // cond:
+ // result: (ADDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ADDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpAddPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (AddPtr x y)
+ // cond:
+ // result: (ADDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ADDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpAddr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Addr {sym} base)
+ // cond:
+ // result: (LEAL {sym} base)
+ for {
+ sym := v.Aux
+ base := v.Args[0]
+ v.reset(Op386LEAL)
+ v.Aux = sym
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValue386_OpAnd16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And16 x y)
+ // cond:
+ // result: (ANDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpAnd32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And32 x y)
+ // cond:
+ // result: (ANDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpAnd8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And8 x y)
+ // cond:
+ // result: (ANDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpAndB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (AndB x y)
+ // cond:
+ // result: (ANDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpBswap32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Bswap32 x)
+ // cond:
+ // result: (BSWAPL x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386BSWAPL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpClosureCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ClosureCall [argwid] entry closure mem)
+ // cond:
+ // result: (CALLclosure [argwid] entry closure mem)
+ for {
+ argwid := v.AuxInt
+ entry := v.Args[0]
+ closure := v.Args[1]
+ mem := v.Args[2]
+ v.reset(Op386CALLclosure)
+ v.AuxInt = argwid
+ v.AddArg(entry)
+ v.AddArg(closure)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValue386_OpCom16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com16 x)
+ // cond:
+ // result: (NOTL x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386NOTL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpCom32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com32 x)
+ // cond:
+ // result: (NOTL x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386NOTL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpCom8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com8 x)
+ // cond:
+ // result: (NOTL x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386NOTL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpConst16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const16 [val])
+ // cond:
+ // result: (MOVLconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(Op386MOVLconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValue386_OpConst32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const32 [val])
+ // cond:
+ // result: (MOVLconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(Op386MOVLconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValue386_OpConst32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const32F [val])
+ // cond:
+ // result: (MOVSSconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(Op386MOVSSconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValue386_OpConst64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const64F [val])
+ // cond:
+ // result: (MOVSDconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(Op386MOVSDconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValue386_OpConst8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const8 [val])
+ // cond:
+ // result: (MOVLconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(Op386MOVLconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValue386_OpConstBool(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ConstBool [b])
+ // cond:
+ // result: (MOVLconst [b])
+ for {
+ b := v.AuxInt
+ v.reset(Op386MOVLconst)
+ v.AuxInt = b
+ return true
+ }
+}
+func rewriteValue386_OpConstNil(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ConstNil)
+ // cond:
+ // result: (MOVLconst [0])
+ for {
+ v.reset(Op386MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
+}
+func rewriteValue386_OpConvert(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Convert <t> x mem)
+ // cond:
+ // result: (MOVLconvert <t> x mem)
+ for {
+ t := v.Type
+ x := v.Args[0]
+ mem := v.Args[1]
+ v.reset(Op386MOVLconvert)
+ v.Type = t
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValue386_OpCvt32Fto32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Fto32 x)
+ // cond:
+ // result: (CVTTSS2SL x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386CVTTSS2SL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpCvt32Fto64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Fto64F x)
+ // cond:
+ // result: (CVTSS2SD x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386CVTSS2SD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpCvt32to32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32to32F x)
+ // cond:
+ // result: (CVTSL2SS x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386CVTSL2SS)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpCvt32to64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32to64F x)
+ // cond:
+ // result: (CVTSL2SD x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386CVTSL2SD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpCvt64Fto32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64Fto32 x)
+ // cond:
+ // result: (CVTTSD2SL x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386CVTTSD2SL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpCvt64Fto32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64Fto32F x)
+ // cond:
+ // result: (CVTSD2SS x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386CVTSD2SS)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpDeferCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (DeferCall [argwid] mem)
+ // cond:
+ // result: (CALLdefer [argwid] mem)
+ for {
+ argwid := v.AuxInt
+ mem := v.Args[0]
+ v.reset(Op386CALLdefer)
+ v.AuxInt = argwid
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValue386_OpDiv16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div16 x y)
+ // cond:
+ // result: (DIVW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386DIVW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpDiv16u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div16u x y)
+ // cond:
+ // result: (DIVWU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386DIVWU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpDiv32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div32 x y)
+ // cond:
+ // result: (DIVL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386DIVL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpDiv32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div32F x y)
+ // cond:
+ // result: (DIVSS x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386DIVSS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpDiv32u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div32u x y)
+ // cond:
+ // result: (DIVLU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386DIVLU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpDiv64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div64F x y)
+ // cond:
+ // result: (DIVSD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386DIVSD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpDiv8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div8 x y)
+ // cond:
+ // result: (DIVW (SignExt8to16 x) (SignExt8to16 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386DIVW)
+ v0 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpDiv8u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div8u x y)
+ // cond:
+ // result: (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386DIVWU)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpEq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq16 x y)
+ // cond:
+ // result: (SETEQ (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETEQ)
+ v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpEq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq32 x y)
+ // cond:
+ // result: (SETEQ (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETEQ)
+ v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpEq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq32F x y)
+ // cond:
+ // result: (SETEQF (UCOMISS x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETEQF)
+ v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpEq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq64F x y)
+ // cond:
+ // result: (SETEQF (UCOMISD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETEQF)
+ v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpEq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq8 x y)
+ // cond:
+ // result: (SETEQ (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETEQ)
+ v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpEqB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (EqB x y)
+ // cond:
+ // result: (SETEQ (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETEQ)
+ v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpEqPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (EqPtr x y)
+ // cond:
+ // result: (SETEQ (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETEQ)
+ v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGeq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq16 x y)
+ // cond:
+ // result: (SETGE (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETGE)
+ v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGeq16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq16U x y)
+ // cond:
+ // result: (SETAE (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETAE)
+ v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGeq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq32 x y)
+ // cond:
+ // result: (SETGE (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETGE)
+ v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGeq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq32F x y)
+ // cond:
+ // result: (SETGEF (UCOMISS x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETGEF)
+ v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGeq32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq32U x y)
+ // cond:
+ // result: (SETAE (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETAE)
+ v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGeq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq64F x y)
+ // cond:
+ // result: (SETGEF (UCOMISD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETGEF)
+ v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGeq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq8 x y)
+ // cond:
+ // result: (SETGE (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETGE)
+ v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGeq8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq8U x y)
+ // cond:
+ // result: (SETAE (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETAE)
+ v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGetClosurePtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GetClosurePtr)
+ // cond:
+ // result: (LoweredGetClosurePtr)
+ for {
+ v.reset(Op386LoweredGetClosurePtr)
+ return true
+ }
+}
+func rewriteValue386_OpGetG(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GetG mem)
+ // cond:
+ // result: (LoweredGetG mem)
+ for {
+ mem := v.Args[0]
+ v.reset(Op386LoweredGetG)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValue386_OpGoCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GoCall [argwid] mem)
+ // cond:
+ // result: (CALLgo [argwid] mem)
+ for {
+ argwid := v.AuxInt
+ mem := v.Args[0]
+ v.reset(Op386CALLgo)
+ v.AuxInt = argwid
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValue386_OpGreater16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater16 x y)
+ // cond:
+ // result: (SETG (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETG)
+ v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGreater16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater16U x y)
+ // cond:
+ // result: (SETA (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETA)
+ v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGreater32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater32 x y)
+ // cond:
+ // result: (SETG (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETG)
+ v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGreater32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater32F x y)
+ // cond:
+ // result: (SETGF (UCOMISS x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETGF)
+ v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGreater32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater32U x y)
+ // cond:
+ // result: (SETA (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETA)
+ v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGreater64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater64F x y)
+ // cond:
+ // result: (SETGF (UCOMISD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETGF)
+ v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGreater8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater8 x y)
+ // cond:
+ // result: (SETG (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETG)
+ v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpGreater8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater8U x y)
+ // cond:
+ // result: (SETA (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETA)
+ v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpHmul16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul16 x y)
+ // cond:
+ // result: (HMULW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386HMULW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpHmul16u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul16u x y)
+ // cond:
+ // result: (HMULWU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386HMULWU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpHmul32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul32 x y)
+ // cond:
+ // result: (HMULL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386HMULL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpHmul32u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul32u x y)
+ // cond:
+ // result: (HMULLU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386HMULLU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpHmul8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul8 x y)
+ // cond:
+ // result: (HMULB x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386HMULB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpHmul8u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul8u x y)
+ // cond:
+ // result: (HMULBU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386HMULBU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpInterCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (InterCall [argwid] entry mem)
+ // cond:
+ // result: (CALLinter [argwid] entry mem)
+ for {
+ argwid := v.AuxInt
+ entry := v.Args[0]
+ mem := v.Args[1]
+ v.reset(Op386CALLinter)
+ v.AuxInt = argwid
+ v.AddArg(entry)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValue386_OpIsInBounds(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsInBounds idx len)
+ // cond:
+ // result: (SETB (CMPL idx len))
+ for {
+ idx := v.Args[0]
+ len := v.Args[1]
+ v.reset(Op386SETB)
+ v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+ v0.AddArg(idx)
+ v0.AddArg(len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpIsNonNil(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsNonNil p)
+ // cond:
+ // result: (SETNE (TESTL p p))
+ for {
+ p := v.Args[0]
+ v.reset(Op386SETNE)
+ v0 := b.NewValue0(v.Line, Op386TESTL, TypeFlags)
+ v0.AddArg(p)
+ v0.AddArg(p)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpIsSliceInBounds(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsSliceInBounds idx len)
+ // cond:
+ // result: (SETBE (CMPL idx len))
+ for {
+ idx := v.Args[0]
+ len := v.Args[1]
+ v.reset(Op386SETBE)
+ v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+ v0.AddArg(idx)
+ v0.AddArg(len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq16 x y)
+ // cond:
+ // result: (SETLE (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETLE)
+ v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq16U x y)
+ // cond:
+ // result: (SETBE (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETBE)
+ v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32 x y)
+ // cond:
+ // result: (SETLE (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETLE)
+ v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32F x y)
+ // cond:
+ // result: (SETGEF (UCOMISS y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETGEF)
+ v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32U x y)
+ // cond:
+ // result: (SETBE (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETBE)
+ v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq64F x y)
+ // cond:
+ // result: (SETGEF (UCOMISD y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETGEF)
+ v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq8 x y)
+ // cond:
+ // result: (SETLE (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETLE)
+ v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq8U x y)
+ // cond:
+ // result: (SETBE (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETBE)
+ v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less16 x y)
+ // cond:
+ // result: (SETL (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETL)
+ v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less16U x y)
+ // cond:
+ // result: (SETB (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETB)
+ v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less32 x y)
+ // cond:
+ // result: (SETL (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETL)
+ v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less32F x y)
+ // cond:
+ // result: (SETGF (UCOMISS y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETGF)
+ v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less32U x y)
+ // cond:
+ // result: (SETB (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETB)
+ v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less64F x y)
+ // cond:
+ // result: (SETGF (UCOMISD y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETGF)
+ v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less8 x y)
+ // cond:
+ // result: (SETL (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETL)
+ v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less8U x y)
+ // cond:
+ // result: (SETB (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETB)
+ v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLoad(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Load <t> ptr mem)
+ // cond: (is32BitInt(t) || isPtr(t))
+ // result: (MOVLload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is32BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(Op386MOVLload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is16BitInt(t)
+ // result: (MOVWload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(Op386MOVWload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (t.IsBoolean() || is8BitInt(t))
+ // result: (MOVBload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(t.IsBoolean() || is8BitInt(t)) {
+ break
+ }
+ v.reset(Op386MOVBload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (MOVSSload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(Op386MOVSSload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (MOVSDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(Op386MOVSDload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLrot16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lrot16 <t> x [c])
+ // cond:
+ // result: (ROLWconst <t> [c&15] x)
+ for {
+ t := v.Type
+ c := v.AuxInt
+ x := v.Args[0]
+ v.reset(Op386ROLWconst)
+ v.Type = t
+ v.AuxInt = c & 15
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpLrot32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lrot32 <t> x [c])
+ // cond:
+ // result: (ROLLconst <t> [c&31] x)
+ for {
+ t := v.Type
+ c := v.AuxInt
+ x := v.Args[0]
+ v.reset(Op386ROLLconst)
+ v.Type = t
+ v.AuxInt = c & 31
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpLrot8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lrot8 <t> x [c])
+ // cond:
+ // result: (ROLBconst <t> [c&7] x)
+ for {
+ t := v.Type
+ c := v.AuxInt
+ x := v.Args[0]
+ v.reset(Op386ROLBconst)
+ v.Type = t
+ v.AuxInt = c & 7
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpLsh16x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x16 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpLsh16x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x32 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpLsh16x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x64 x (Const64 [c]))
+ // cond: uint64(c) < 16
+ // result: (SHLLconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(Op386SHLLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh16x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (Const16 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLsh16x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x8 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpLsh32x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x16 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpLsh32x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x32 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpLsh32x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x64 x (Const64 [c]))
+ // cond: uint64(c) < 32
+ // result: (SHLLconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(Op386SHLLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh32x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (Const32 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLsh32x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x8 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpLsh8x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x16 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpLsh8x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x32 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpLsh8x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x64 x (Const64 [c]))
+ // cond: uint64(c) < 8
+ // result: (SHLLconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(Op386SHLLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh8x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (Const8 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLsh8x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x8 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpMod16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod16 x y)
+ // cond:
+ // result: (MODW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386MODW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpMod16u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod16u x y)
+ // cond:
+ // result: (MODWU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386MODWU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpMod32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod32 x y)
+ // cond:
+ // result: (MODL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386MODL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpMod32u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod32u x y)
+ // cond:
+ // result: (MODLU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386MODLU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpMod8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod8 x y)
+ // cond:
+ // result: (MODW (SignExt8to16 x) (SignExt8to16 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386MODW)
+ v0 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpMod8u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod8u x y)
+ // cond:
+ // result: (MODWU (ZeroExt8to16 x) (ZeroExt8to16 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386MODWU)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpMove(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Move [s] _ _ mem)
+ // cond: SizeAndAlign(s).Size() == 0
+ // result: mem
+ for {
+ s := v.AuxInt
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 0) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = mem.Type
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 1
+ // result: (MOVBstore dst (MOVBload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 1) {
+ break
+ }
+ v.reset(Op386MOVBstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, Op386MOVBload, config.fe.TypeUInt8())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 2
+ // result: (MOVWstore dst (MOVWload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 2) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, Op386MOVWload, config.fe.TypeUInt16())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 4
+ // result: (MOVLstore dst (MOVLload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 4) {
+ break
+ }
+ v.reset(Op386MOVLstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 3
+ // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 3) {
+ break
+ }
+ v.reset(Op386MOVBstore)
+ v.AuxInt = 2
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, Op386MOVBload, config.fe.TypeUInt8())
+ v0.AuxInt = 2
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386MOVWstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, Op386MOVWload, config.fe.TypeUInt16())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 5
+ // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 5) {
+ break
+ }
+ v.reset(Op386MOVBstore)
+ v.AuxInt = 4
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, Op386MOVBload, config.fe.TypeUInt8())
+ v0.AuxInt = 4
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386MOVLstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 6
+ // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 6) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AuxInt = 4
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, Op386MOVWload, config.fe.TypeUInt16())
+ v0.AuxInt = 4
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386MOVLstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 7
+ // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 7) {
+ break
+ }
+ v.reset(Op386MOVLstore)
+ v.AuxInt = 3
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
+ v0.AuxInt = 3
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386MOVLstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 8
+ // result: (MOVLstore [4] dst (MOVLload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 8) {
+ break
+ }
+ v.reset(Op386MOVLstore)
+ v.AuxInt = 4
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
+ v0.AuxInt = 4
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386MOVLstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size()%4 != 0
+ // result: (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%4] (ADDLconst <dst.Type> dst [SizeAndAlign(s).Size()%4]) (ADDLconst <src.Type> src [SizeAndAlign(s).Size()%4]) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size()%4 != 0) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%4
+ v0 := b.NewValue0(v.Line, Op386ADDLconst, dst.Type)
+ v0.AuxInt = SizeAndAlign(s).Size() % 4
+ v0.AddArg(dst)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386ADDLconst, src.Type)
+ v1.AuxInt = SizeAndAlign(s).Size() % 4
+ v1.AddArg(src)
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, Op386MOVLstore, TypeMem)
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
+ v3.AddArg(src)
+ v3.AddArg(mem)
+ v2.AddArg(v3)
+ v2.AddArg(mem)
+ v.AddArg(v2)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() <= 4*128 && SizeAndAlign(s).Size()%4 == 0 && !config.noDuffDevice
+ // result: (DUFFCOPY [10*(128-SizeAndAlign(s).Size()/4)] dst src mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() <= 4*128 && SizeAndAlign(s).Size()%4 == 0 && !config.noDuffDevice) {
+ break
+ }
+ v.reset(Op386DUFFCOPY)
+ v.AuxInt = 10 * (128 - SizeAndAlign(s).Size()/4)
+ v.AddArg(dst)
+ v.AddArg(src)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: (SizeAndAlign(s).Size() > 4*128 || config.noDuffDevice) && SizeAndAlign(s).Size()%4 == 0
+ // result: (REPMOVSL dst src (MOVLconst [SizeAndAlign(s).Size()/4]) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !((SizeAndAlign(s).Size() > 4*128 || config.noDuffDevice) && SizeAndAlign(s).Size()%4 == 0) {
+ break
+ }
+ v.reset(Op386REPMOVSL)
+ v.AddArg(dst)
+ v.AddArg(src)
+ v0 := b.NewValue0(v.Line, Op386MOVLconst, config.fe.TypeUInt32())
+ v0.AuxInt = SizeAndAlign(s).Size() / 4
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpMul16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul16 x y)
+ // cond:
+ // result: (MULL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386MULL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpMul32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul32 x y)
+ // cond:
+ // result: (MULL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386MULL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpMul32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul32F x y)
+ // cond:
+ // result: (MULSS x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386MULSS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpMul32uhilo(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul32uhilo x y)
+ // cond:
+ // result: (MULLQU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386MULLQU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpMul64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul64F x y)
+ // cond:
+ // result: (MULSD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386MULSD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpMul8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul8 x y)
+ // cond:
+ // result: (MULL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386MULL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpNeg16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg16 x)
+ // cond:
+ // result: (NEGL x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386NEGL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpNeg32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg32 x)
+ // cond:
+ // result: (NEGL x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386NEGL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpNeg32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg32F x)
+ // cond:
+ // result: (PXOR x (MOVSSconst <config.Frontend().TypeFloat32()> [f2i(math.Copysign(0, -1))]))
+ for {
+ x := v.Args[0]
+ v.reset(Op386PXOR)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, Op386MOVSSconst, config.Frontend().TypeFloat32())
+ v0.AuxInt = f2i(math.Copysign(0, -1))
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeg64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg64F x)
+ // cond:
+ // result: (PXOR x (MOVSDconst <config.Frontend().TypeFloat64()> [f2i(math.Copysign(0, -1))]))
+ for {
+ x := v.Args[0]
+ v.reset(Op386PXOR)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, Op386MOVSDconst, config.Frontend().TypeFloat64())
+ v0.AuxInt = f2i(math.Copysign(0, -1))
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeg8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg8 x)
+ // cond:
+ // result: (NEGL x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386NEGL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpNeq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq16 x y)
+ // cond:
+ // result: (SETNE (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETNE)
+ v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq32 x y)
+ // cond:
+ // result: (SETNE (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETNE)
+ v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq32F x y)
+ // cond:
+ // result: (SETNEF (UCOMISS x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETNEF)
+ v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq64F x y)
+ // cond:
+ // result: (SETNEF (UCOMISD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETNEF)
+ v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq8 x y)
+ // cond:
+ // result: (SETNE (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETNE)
+ v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeqB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NeqB x y)
+ // cond:
+ // result: (SETNE (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETNE)
+ v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeqPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NeqPtr x y)
+ // cond:
+ // result: (SETNE (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SETNE)
+ v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNilCheck(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NilCheck ptr mem)
+ // cond:
+ // result: (LoweredNilCheck ptr mem)
+ for {
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ v.reset(Op386LoweredNilCheck)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValue386_OpNot(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Not x)
+ // cond:
+ // result: (XORLconst [1] x)
+ for {
+ x := v.Args[0]
+ v.reset(Op386XORLconst)
+ v.AuxInt = 1
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpOffPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (OffPtr [off] ptr)
+ // cond:
+ // result: (ADDLconst [off] ptr)
+ for {
+ off := v.AuxInt
+ ptr := v.Args[0]
+ v.reset(Op386ADDLconst)
+ v.AuxInt = off
+ v.AddArg(ptr)
+ return true
+ }
+}
+func rewriteValue386_OpOr16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or16 x y)
+ // cond:
+ // result: (ORL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ORL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpOr32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or32 x y)
+ // cond:
+ // result: (ORL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ORL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpOr8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or8 x y)
+ // cond:
+ // result: (ORL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ORL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpOrB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (OrB x y)
+ // cond:
+ // result: (ORL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ORL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValue386_OpRsh16Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux16 <t> x y)
+ // cond:
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHRW, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
+ v2.AuxInt = 16
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpRsh16Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux32 <t> x y)
+ // cond:
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHRW, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
+ v2.AuxInt = 16
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpRsh16Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux64 x (Const64 [c]))
+ // cond: uint64(c) < 16
+ // result: (SHRWconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(Op386SHRWconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh16Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (Const16 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh16Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux8 <t> x y)
+ // cond:
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHRW, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
+ v2.AuxInt = 16
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpRsh16x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x16 <t> x y)
+ // cond:
+ // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SARW)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
+ v3.AuxInt = 16
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpRsh16x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x32 <t> x y)
+ // cond:
+ // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SARW)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
+ v3.AuxInt = 16
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpRsh16x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x64 x (Const64 [c]))
+ // cond: uint64(c) < 16
+ // result: (SARWconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(Op386SARWconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh16x64 x (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (SARWconst x [15])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(Op386SARWconst)
+ v.AuxInt = 15
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh16x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x8 <t> x y)
+ // cond:
+ // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SARW)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
+ v3.AuxInt = 16
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpRsh32Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux16 <t> x y)
+ // cond:
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHRL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpRsh32Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux32 <t> x y)
+ // cond:
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHRL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpRsh32Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux64 x (Const64 [c]))
+ // cond: uint64(c) < 32
+ // result: (SHRLconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(Op386SHRLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (Const32 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh32Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux8 <t> x y)
+ // cond:
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHRL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpRsh32x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x16 <t> x y)
+ // cond:
+ // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SARL)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
+ v3.AuxInt = 32
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpRsh32x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x32 <t> x y)
+ // cond:
+ // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SARL)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
+ v3.AuxInt = 32
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpRsh32x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x64 x (Const64 [c]))
+ // cond: uint64(c) < 32
+ // result: (SARLconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(Op386SARLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32x64 x (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (SARLconst x [31])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(Op386SARLconst)
+ v.AuxInt = 31
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh32x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x8 <t> x y)
+ // cond:
+ // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SARL)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
+ v3.AuxInt = 32
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpRsh8Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux16 <t> x y)
+ // cond:
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHRB, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
+ v2.AuxInt = 8
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpRsh8Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux32 <t> x y)
+ // cond:
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHRB, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
+ v2.AuxInt = 8
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpRsh8Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux64 x (Const64 [c]))
+ // cond: uint64(c) < 8
+ // result: (SHRBconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(Op386SHRBconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh8Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (Const8 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh8Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux8 <t> x y)
+ // cond:
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Line, Op386SHRB, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
+ v2.AuxInt = 8
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValue386_OpRsh8x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x16 <t> x y)
+ // cond:
+ // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SARB)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
+ v3.AuxInt = 8
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpRsh8x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x32 <t> x y)
+ // cond:
+ // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SARB)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
+ v3.AuxInt = 8
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpRsh8x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x64 x (Const64 [c]))
+ // cond: uint64(c) < 8
+ // result: (SARBconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(Op386SARBconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh8x64 x (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (SARBconst x [7])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(Op386SARBconst)
+ v.AuxInt = 7
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh8x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x8 <t> x y)
+ // cond:
+ // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(Op386SARB)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
+ v3.AuxInt = 8
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
func rewriteValue386_OpSignExt16to32(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -12314,8 +12410,8 @@
for {
x := v.Args[0]
v.reset(Op386SARLconst)
- v.AddArg(x)
v.AuxInt = 31
+ v.AddArg(x)
return true
}
}
@@ -12604,102 +12700,6 @@
return true
}
}
-func rewriteValue386_Op386XORL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (XORL x (MOVLconst [c]))
- // cond:
- // result: (XORLconst [c] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != Op386MOVLconst {
- break
- }
- c := v_1.AuxInt
- v.reset(Op386XORLconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (XORL (MOVLconst [c]) x)
- // cond:
- // result: (XORLconst [c] x)
- for {
- v_0 := v.Args[0]
- if v_0.Op != Op386MOVLconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- v.reset(Op386XORLconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (XORL x x)
- // cond:
- // result: (MOVLconst [0])
- for {
- x := v.Args[0]
- if x != v.Args[1] {
- break
- }
- v.reset(Op386MOVLconst)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValue386_Op386XORLconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (XORLconst [c] (XORLconst [d] x))
- // cond:
- // result: (XORLconst [c ^ d] x)
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != Op386XORLconst {
- break
- }
- d := v_0.AuxInt
- x := v_0.Args[0]
- v.reset(Op386XORLconst)
- v.AuxInt = c ^ d
- v.AddArg(x)
- return true
- }
- // match: (XORLconst [c] x)
- // cond: int32(c)==0
- // result: x
- for {
- c := v.AuxInt
- x := v.Args[0]
- if !(int32(c) == 0) {
- break
- }
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
- // match: (XORLconst [c] (MOVLconst [d]))
- // cond:
- // result: (MOVLconst [c^d])
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != Op386MOVLconst {
- break
- }
- d := v_0.AuxInt
- v.reset(Op386MOVLconst)
- v.AuxInt = c ^ d
- return true
- }
- return false
-}
func rewriteValue386_OpXor16(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -12903,8 +12903,8 @@
v.reset(OpZero)
v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%4
v0 := b.NewValue0(v.Line, Op386ADDLconst, config.fe.TypeUInt32())
- v0.AddArg(destptr)
v0.AuxInt = SizeAndAlign(s).Size() % 4
+ v0.AddArg(destptr)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, Op386MOVLstoreconst, TypeMem)
v1.AuxInt = 0
diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go
index 154e1e9..19f01a0 100644
--- a/src/cmd/compile/internal/ssa/rewriteAMD64.go
+++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go
@@ -24,6 +24,250 @@
return rewriteValueAMD64_OpAMD64ANDQ(v, config)
case OpAMD64ANDQconst:
return rewriteValueAMD64_OpAMD64ANDQconst(v, config)
+ case OpAMD64CMOVLEQconst:
+ return rewriteValueAMD64_OpAMD64CMOVLEQconst(v, config)
+ case OpAMD64CMOVQEQconst:
+ return rewriteValueAMD64_OpAMD64CMOVQEQconst(v, config)
+ case OpAMD64CMOVWEQconst:
+ return rewriteValueAMD64_OpAMD64CMOVWEQconst(v, config)
+ case OpAMD64CMPB:
+ return rewriteValueAMD64_OpAMD64CMPB(v, config)
+ case OpAMD64CMPBconst:
+ return rewriteValueAMD64_OpAMD64CMPBconst(v, config)
+ case OpAMD64CMPL:
+ return rewriteValueAMD64_OpAMD64CMPL(v, config)
+ case OpAMD64CMPLconst:
+ return rewriteValueAMD64_OpAMD64CMPLconst(v, config)
+ case OpAMD64CMPQ:
+ return rewriteValueAMD64_OpAMD64CMPQ(v, config)
+ case OpAMD64CMPQconst:
+ return rewriteValueAMD64_OpAMD64CMPQconst(v, config)
+ case OpAMD64CMPW:
+ return rewriteValueAMD64_OpAMD64CMPW(v, config)
+ case OpAMD64CMPWconst:
+ return rewriteValueAMD64_OpAMD64CMPWconst(v, config)
+ case OpAMD64LEAQ:
+ return rewriteValueAMD64_OpAMD64LEAQ(v, config)
+ case OpAMD64LEAQ1:
+ return rewriteValueAMD64_OpAMD64LEAQ1(v, config)
+ case OpAMD64LEAQ2:
+ return rewriteValueAMD64_OpAMD64LEAQ2(v, config)
+ case OpAMD64LEAQ4:
+ return rewriteValueAMD64_OpAMD64LEAQ4(v, config)
+ case OpAMD64LEAQ8:
+ return rewriteValueAMD64_OpAMD64LEAQ8(v, config)
+ case OpAMD64MOVBQSX:
+ return rewriteValueAMD64_OpAMD64MOVBQSX(v, config)
+ case OpAMD64MOVBQSXload:
+ return rewriteValueAMD64_OpAMD64MOVBQSXload(v, config)
+ case OpAMD64MOVBQZX:
+ return rewriteValueAMD64_OpAMD64MOVBQZX(v, config)
+ case OpAMD64MOVBload:
+ return rewriteValueAMD64_OpAMD64MOVBload(v, config)
+ case OpAMD64MOVBloadidx1:
+ return rewriteValueAMD64_OpAMD64MOVBloadidx1(v, config)
+ case OpAMD64MOVBstore:
+ return rewriteValueAMD64_OpAMD64MOVBstore(v, config)
+ case OpAMD64MOVBstoreconst:
+ return rewriteValueAMD64_OpAMD64MOVBstoreconst(v, config)
+ case OpAMD64MOVBstoreconstidx1:
+ return rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v, config)
+ case OpAMD64MOVBstoreidx1:
+ return rewriteValueAMD64_OpAMD64MOVBstoreidx1(v, config)
+ case OpAMD64MOVLQSX:
+ return rewriteValueAMD64_OpAMD64MOVLQSX(v, config)
+ case OpAMD64MOVLQSXload:
+ return rewriteValueAMD64_OpAMD64MOVLQSXload(v, config)
+ case OpAMD64MOVLQZX:
+ return rewriteValueAMD64_OpAMD64MOVLQZX(v, config)
+ case OpAMD64MOVLload:
+ return rewriteValueAMD64_OpAMD64MOVLload(v, config)
+ case OpAMD64MOVLloadidx1:
+ return rewriteValueAMD64_OpAMD64MOVLloadidx1(v, config)
+ case OpAMD64MOVLloadidx4:
+ return rewriteValueAMD64_OpAMD64MOVLloadidx4(v, config)
+ case OpAMD64MOVLstore:
+ return rewriteValueAMD64_OpAMD64MOVLstore(v, config)
+ case OpAMD64MOVLstoreconst:
+ return rewriteValueAMD64_OpAMD64MOVLstoreconst(v, config)
+ case OpAMD64MOVLstoreconstidx1:
+ return rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v, config)
+ case OpAMD64MOVLstoreconstidx4:
+ return rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v, config)
+ case OpAMD64MOVLstoreidx1:
+ return rewriteValueAMD64_OpAMD64MOVLstoreidx1(v, config)
+ case OpAMD64MOVLstoreidx4:
+ return rewriteValueAMD64_OpAMD64MOVLstoreidx4(v, config)
+ case OpAMD64MOVOload:
+ return rewriteValueAMD64_OpAMD64MOVOload(v, config)
+ case OpAMD64MOVOstore:
+ return rewriteValueAMD64_OpAMD64MOVOstore(v, config)
+ case OpAMD64MOVQload:
+ return rewriteValueAMD64_OpAMD64MOVQload(v, config)
+ case OpAMD64MOVQloadidx1:
+ return rewriteValueAMD64_OpAMD64MOVQloadidx1(v, config)
+ case OpAMD64MOVQloadidx8:
+ return rewriteValueAMD64_OpAMD64MOVQloadidx8(v, config)
+ case OpAMD64MOVQstore:
+ return rewriteValueAMD64_OpAMD64MOVQstore(v, config)
+ case OpAMD64MOVQstoreconst:
+ return rewriteValueAMD64_OpAMD64MOVQstoreconst(v, config)
+ case OpAMD64MOVQstoreconstidx1:
+ return rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v, config)
+ case OpAMD64MOVQstoreconstidx8:
+ return rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v, config)
+ case OpAMD64MOVQstoreidx1:
+ return rewriteValueAMD64_OpAMD64MOVQstoreidx1(v, config)
+ case OpAMD64MOVQstoreidx8:
+ return rewriteValueAMD64_OpAMD64MOVQstoreidx8(v, config)
+ case OpAMD64MOVSDload:
+ return rewriteValueAMD64_OpAMD64MOVSDload(v, config)
+ case OpAMD64MOVSDloadidx1:
+ return rewriteValueAMD64_OpAMD64MOVSDloadidx1(v, config)
+ case OpAMD64MOVSDloadidx8:
+ return rewriteValueAMD64_OpAMD64MOVSDloadidx8(v, config)
+ case OpAMD64MOVSDstore:
+ return rewriteValueAMD64_OpAMD64MOVSDstore(v, config)
+ case OpAMD64MOVSDstoreidx1:
+ return rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v, config)
+ case OpAMD64MOVSDstoreidx8:
+ return rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v, config)
+ case OpAMD64MOVSSload:
+ return rewriteValueAMD64_OpAMD64MOVSSload(v, config)
+ case OpAMD64MOVSSloadidx1:
+ return rewriteValueAMD64_OpAMD64MOVSSloadidx1(v, config)
+ case OpAMD64MOVSSloadidx4:
+ return rewriteValueAMD64_OpAMD64MOVSSloadidx4(v, config)
+ case OpAMD64MOVSSstore:
+ return rewriteValueAMD64_OpAMD64MOVSSstore(v, config)
+ case OpAMD64MOVSSstoreidx1:
+ return rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v, config)
+ case OpAMD64MOVSSstoreidx4:
+ return rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v, config)
+ case OpAMD64MOVWQSX:
+ return rewriteValueAMD64_OpAMD64MOVWQSX(v, config)
+ case OpAMD64MOVWQSXload:
+ return rewriteValueAMD64_OpAMD64MOVWQSXload(v, config)
+ case OpAMD64MOVWQZX:
+ return rewriteValueAMD64_OpAMD64MOVWQZX(v, config)
+ case OpAMD64MOVWload:
+ return rewriteValueAMD64_OpAMD64MOVWload(v, config)
+ case OpAMD64MOVWloadidx1:
+ return rewriteValueAMD64_OpAMD64MOVWloadidx1(v, config)
+ case OpAMD64MOVWloadidx2:
+ return rewriteValueAMD64_OpAMD64MOVWloadidx2(v, config)
+ case OpAMD64MOVWstore:
+ return rewriteValueAMD64_OpAMD64MOVWstore(v, config)
+ case OpAMD64MOVWstoreconst:
+ return rewriteValueAMD64_OpAMD64MOVWstoreconst(v, config)
+ case OpAMD64MOVWstoreconstidx1:
+ return rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v, config)
+ case OpAMD64MOVWstoreconstidx2:
+ return rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v, config)
+ case OpAMD64MOVWstoreidx1:
+ return rewriteValueAMD64_OpAMD64MOVWstoreidx1(v, config)
+ case OpAMD64MOVWstoreidx2:
+ return rewriteValueAMD64_OpAMD64MOVWstoreidx2(v, config)
+ case OpAMD64MULL:
+ return rewriteValueAMD64_OpAMD64MULL(v, config)
+ case OpAMD64MULLconst:
+ return rewriteValueAMD64_OpAMD64MULLconst(v, config)
+ case OpAMD64MULQ:
+ return rewriteValueAMD64_OpAMD64MULQ(v, config)
+ case OpAMD64MULQconst:
+ return rewriteValueAMD64_OpAMD64MULQconst(v, config)
+ case OpAMD64NEGL:
+ return rewriteValueAMD64_OpAMD64NEGL(v, config)
+ case OpAMD64NEGQ:
+ return rewriteValueAMD64_OpAMD64NEGQ(v, config)
+ case OpAMD64NOTL:
+ return rewriteValueAMD64_OpAMD64NOTL(v, config)
+ case OpAMD64NOTQ:
+ return rewriteValueAMD64_OpAMD64NOTQ(v, config)
+ case OpAMD64ORL:
+ return rewriteValueAMD64_OpAMD64ORL(v, config)
+ case OpAMD64ORLconst:
+ return rewriteValueAMD64_OpAMD64ORLconst(v, config)
+ case OpAMD64ORQ:
+ return rewriteValueAMD64_OpAMD64ORQ(v, config)
+ case OpAMD64ORQconst:
+ return rewriteValueAMD64_OpAMD64ORQconst(v, config)
+ case OpAMD64ROLBconst:
+ return rewriteValueAMD64_OpAMD64ROLBconst(v, config)
+ case OpAMD64ROLLconst:
+ return rewriteValueAMD64_OpAMD64ROLLconst(v, config)
+ case OpAMD64ROLQconst:
+ return rewriteValueAMD64_OpAMD64ROLQconst(v, config)
+ case OpAMD64ROLWconst:
+ return rewriteValueAMD64_OpAMD64ROLWconst(v, config)
+ case OpAMD64SARB:
+ return rewriteValueAMD64_OpAMD64SARB(v, config)
+ case OpAMD64SARBconst:
+ return rewriteValueAMD64_OpAMD64SARBconst(v, config)
+ case OpAMD64SARL:
+ return rewriteValueAMD64_OpAMD64SARL(v, config)
+ case OpAMD64SARLconst:
+ return rewriteValueAMD64_OpAMD64SARLconst(v, config)
+ case OpAMD64SARQ:
+ return rewriteValueAMD64_OpAMD64SARQ(v, config)
+ case OpAMD64SARQconst:
+ return rewriteValueAMD64_OpAMD64SARQconst(v, config)
+ case OpAMD64SARW:
+ return rewriteValueAMD64_OpAMD64SARW(v, config)
+ case OpAMD64SARWconst:
+ return rewriteValueAMD64_OpAMD64SARWconst(v, config)
+ case OpAMD64SBBLcarrymask:
+ return rewriteValueAMD64_OpAMD64SBBLcarrymask(v, config)
+ case OpAMD64SBBQcarrymask:
+ return rewriteValueAMD64_OpAMD64SBBQcarrymask(v, config)
+ case OpAMD64SETA:
+ return rewriteValueAMD64_OpAMD64SETA(v, config)
+ case OpAMD64SETAE:
+ return rewriteValueAMD64_OpAMD64SETAE(v, config)
+ case OpAMD64SETB:
+ return rewriteValueAMD64_OpAMD64SETB(v, config)
+ case OpAMD64SETBE:
+ return rewriteValueAMD64_OpAMD64SETBE(v, config)
+ case OpAMD64SETEQ:
+ return rewriteValueAMD64_OpAMD64SETEQ(v, config)
+ case OpAMD64SETG:
+ return rewriteValueAMD64_OpAMD64SETG(v, config)
+ case OpAMD64SETGE:
+ return rewriteValueAMD64_OpAMD64SETGE(v, config)
+ case OpAMD64SETL:
+ return rewriteValueAMD64_OpAMD64SETL(v, config)
+ case OpAMD64SETLE:
+ return rewriteValueAMD64_OpAMD64SETLE(v, config)
+ case OpAMD64SETNE:
+ return rewriteValueAMD64_OpAMD64SETNE(v, config)
+ case OpAMD64SHLL:
+ return rewriteValueAMD64_OpAMD64SHLL(v, config)
+ case OpAMD64SHLQ:
+ return rewriteValueAMD64_OpAMD64SHLQ(v, config)
+ case OpAMD64SHRB:
+ return rewriteValueAMD64_OpAMD64SHRB(v, config)
+ case OpAMD64SHRL:
+ return rewriteValueAMD64_OpAMD64SHRL(v, config)
+ case OpAMD64SHRQ:
+ return rewriteValueAMD64_OpAMD64SHRQ(v, config)
+ case OpAMD64SHRW:
+ return rewriteValueAMD64_OpAMD64SHRW(v, config)
+ case OpAMD64SUBL:
+ return rewriteValueAMD64_OpAMD64SUBL(v, config)
+ case OpAMD64SUBLconst:
+ return rewriteValueAMD64_OpAMD64SUBLconst(v, config)
+ case OpAMD64SUBQ:
+ return rewriteValueAMD64_OpAMD64SUBQ(v, config)
+ case OpAMD64SUBQconst:
+ return rewriteValueAMD64_OpAMD64SUBQconst(v, config)
+ case OpAMD64XORL:
+ return rewriteValueAMD64_OpAMD64XORL(v, config)
+ case OpAMD64XORLconst:
+ return rewriteValueAMD64_OpAMD64XORLconst(v, config)
+ case OpAMD64XORQ:
+ return rewriteValueAMD64_OpAMD64XORQ(v, config)
+ case OpAMD64XORQconst:
+ return rewriteValueAMD64_OpAMD64XORQconst(v, config)
case OpAdd16:
return rewriteValueAMD64_OpAdd16(v, config)
case OpAdd32:
@@ -56,28 +300,6 @@
return rewriteValueAMD64_OpBswap32(v, config)
case OpBswap64:
return rewriteValueAMD64_OpBswap64(v, config)
- case OpAMD64CMOVLEQconst:
- return rewriteValueAMD64_OpAMD64CMOVLEQconst(v, config)
- case OpAMD64CMOVQEQconst:
- return rewriteValueAMD64_OpAMD64CMOVQEQconst(v, config)
- case OpAMD64CMOVWEQconst:
- return rewriteValueAMD64_OpAMD64CMOVWEQconst(v, config)
- case OpAMD64CMPB:
- return rewriteValueAMD64_OpAMD64CMPB(v, config)
- case OpAMD64CMPBconst:
- return rewriteValueAMD64_OpAMD64CMPBconst(v, config)
- case OpAMD64CMPL:
- return rewriteValueAMD64_OpAMD64CMPL(v, config)
- case OpAMD64CMPLconst:
- return rewriteValueAMD64_OpAMD64CMPLconst(v, config)
- case OpAMD64CMPQ:
- return rewriteValueAMD64_OpAMD64CMPQ(v, config)
- case OpAMD64CMPQconst:
- return rewriteValueAMD64_OpAMD64CMPQconst(v, config)
- case OpAMD64CMPW:
- return rewriteValueAMD64_OpAMD64CMPW(v, config)
- case OpAMD64CMPWconst:
- return rewriteValueAMD64_OpAMD64CMPWconst(v, config)
case OpClosureCall:
return rewriteValueAMD64_OpClosureCall(v, config)
case OpCom16:
@@ -240,16 +462,6 @@
return rewriteValueAMD64_OpIsNonNil(v, config)
case OpIsSliceInBounds:
return rewriteValueAMD64_OpIsSliceInBounds(v, config)
- case OpAMD64LEAQ:
- return rewriteValueAMD64_OpAMD64LEAQ(v, config)
- case OpAMD64LEAQ1:
- return rewriteValueAMD64_OpAMD64LEAQ1(v, config)
- case OpAMD64LEAQ2:
- return rewriteValueAMD64_OpAMD64LEAQ2(v, config)
- case OpAMD64LEAQ4:
- return rewriteValueAMD64_OpAMD64LEAQ4(v, config)
- case OpAMD64LEAQ8:
- return rewriteValueAMD64_OpAMD64LEAQ8(v, config)
case OpLeq16:
return rewriteValueAMD64_OpLeq16(v, config)
case OpLeq16U:
@@ -332,126 +544,6 @@
return rewriteValueAMD64_OpLsh8x64(v, config)
case OpLsh8x8:
return rewriteValueAMD64_OpLsh8x8(v, config)
- case OpAMD64MOVBQSX:
- return rewriteValueAMD64_OpAMD64MOVBQSX(v, config)
- case OpAMD64MOVBQSXload:
- return rewriteValueAMD64_OpAMD64MOVBQSXload(v, config)
- case OpAMD64MOVBQZX:
- return rewriteValueAMD64_OpAMD64MOVBQZX(v, config)
- case OpAMD64MOVBload:
- return rewriteValueAMD64_OpAMD64MOVBload(v, config)
- case OpAMD64MOVBloadidx1:
- return rewriteValueAMD64_OpAMD64MOVBloadidx1(v, config)
- case OpAMD64MOVBstore:
- return rewriteValueAMD64_OpAMD64MOVBstore(v, config)
- case OpAMD64MOVBstoreconst:
- return rewriteValueAMD64_OpAMD64MOVBstoreconst(v, config)
- case OpAMD64MOVBstoreconstidx1:
- return rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v, config)
- case OpAMD64MOVBstoreidx1:
- return rewriteValueAMD64_OpAMD64MOVBstoreidx1(v, config)
- case OpAMD64MOVLQSX:
- return rewriteValueAMD64_OpAMD64MOVLQSX(v, config)
- case OpAMD64MOVLQSXload:
- return rewriteValueAMD64_OpAMD64MOVLQSXload(v, config)
- case OpAMD64MOVLQZX:
- return rewriteValueAMD64_OpAMD64MOVLQZX(v, config)
- case OpAMD64MOVLload:
- return rewriteValueAMD64_OpAMD64MOVLload(v, config)
- case OpAMD64MOVLloadidx1:
- return rewriteValueAMD64_OpAMD64MOVLloadidx1(v, config)
- case OpAMD64MOVLloadidx4:
- return rewriteValueAMD64_OpAMD64MOVLloadidx4(v, config)
- case OpAMD64MOVLstore:
- return rewriteValueAMD64_OpAMD64MOVLstore(v, config)
- case OpAMD64MOVLstoreconst:
- return rewriteValueAMD64_OpAMD64MOVLstoreconst(v, config)
- case OpAMD64MOVLstoreconstidx1:
- return rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v, config)
- case OpAMD64MOVLstoreconstidx4:
- return rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v, config)
- case OpAMD64MOVLstoreidx1:
- return rewriteValueAMD64_OpAMD64MOVLstoreidx1(v, config)
- case OpAMD64MOVLstoreidx4:
- return rewriteValueAMD64_OpAMD64MOVLstoreidx4(v, config)
- case OpAMD64MOVOload:
- return rewriteValueAMD64_OpAMD64MOVOload(v, config)
- case OpAMD64MOVOstore:
- return rewriteValueAMD64_OpAMD64MOVOstore(v, config)
- case OpAMD64MOVQload:
- return rewriteValueAMD64_OpAMD64MOVQload(v, config)
- case OpAMD64MOVQloadidx1:
- return rewriteValueAMD64_OpAMD64MOVQloadidx1(v, config)
- case OpAMD64MOVQloadidx8:
- return rewriteValueAMD64_OpAMD64MOVQloadidx8(v, config)
- case OpAMD64MOVQstore:
- return rewriteValueAMD64_OpAMD64MOVQstore(v, config)
- case OpAMD64MOVQstoreconst:
- return rewriteValueAMD64_OpAMD64MOVQstoreconst(v, config)
- case OpAMD64MOVQstoreconstidx1:
- return rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v, config)
- case OpAMD64MOVQstoreconstidx8:
- return rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v, config)
- case OpAMD64MOVQstoreidx1:
- return rewriteValueAMD64_OpAMD64MOVQstoreidx1(v, config)
- case OpAMD64MOVQstoreidx8:
- return rewriteValueAMD64_OpAMD64MOVQstoreidx8(v, config)
- case OpAMD64MOVSDload:
- return rewriteValueAMD64_OpAMD64MOVSDload(v, config)
- case OpAMD64MOVSDloadidx1:
- return rewriteValueAMD64_OpAMD64MOVSDloadidx1(v, config)
- case OpAMD64MOVSDloadidx8:
- return rewriteValueAMD64_OpAMD64MOVSDloadidx8(v, config)
- case OpAMD64MOVSDstore:
- return rewriteValueAMD64_OpAMD64MOVSDstore(v, config)
- case OpAMD64MOVSDstoreidx1:
- return rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v, config)
- case OpAMD64MOVSDstoreidx8:
- return rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v, config)
- case OpAMD64MOVSSload:
- return rewriteValueAMD64_OpAMD64MOVSSload(v, config)
- case OpAMD64MOVSSloadidx1:
- return rewriteValueAMD64_OpAMD64MOVSSloadidx1(v, config)
- case OpAMD64MOVSSloadidx4:
- return rewriteValueAMD64_OpAMD64MOVSSloadidx4(v, config)
- case OpAMD64MOVSSstore:
- return rewriteValueAMD64_OpAMD64MOVSSstore(v, config)
- case OpAMD64MOVSSstoreidx1:
- return rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v, config)
- case OpAMD64MOVSSstoreidx4:
- return rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v, config)
- case OpAMD64MOVWQSX:
- return rewriteValueAMD64_OpAMD64MOVWQSX(v, config)
- case OpAMD64MOVWQSXload:
- return rewriteValueAMD64_OpAMD64MOVWQSXload(v, config)
- case OpAMD64MOVWQZX:
- return rewriteValueAMD64_OpAMD64MOVWQZX(v, config)
- case OpAMD64MOVWload:
- return rewriteValueAMD64_OpAMD64MOVWload(v, config)
- case OpAMD64MOVWloadidx1:
- return rewriteValueAMD64_OpAMD64MOVWloadidx1(v, config)
- case OpAMD64MOVWloadidx2:
- return rewriteValueAMD64_OpAMD64MOVWloadidx2(v, config)
- case OpAMD64MOVWstore:
- return rewriteValueAMD64_OpAMD64MOVWstore(v, config)
- case OpAMD64MOVWstoreconst:
- return rewriteValueAMD64_OpAMD64MOVWstoreconst(v, config)
- case OpAMD64MOVWstoreconstidx1:
- return rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v, config)
- case OpAMD64MOVWstoreconstidx2:
- return rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v, config)
- case OpAMD64MOVWstoreidx1:
- return rewriteValueAMD64_OpAMD64MOVWstoreidx1(v, config)
- case OpAMD64MOVWstoreidx2:
- return rewriteValueAMD64_OpAMD64MOVWstoreidx2(v, config)
- case OpAMD64MULL:
- return rewriteValueAMD64_OpAMD64MULL(v, config)
- case OpAMD64MULLconst:
- return rewriteValueAMD64_OpAMD64MULLconst(v, config)
- case OpAMD64MULQ:
- return rewriteValueAMD64_OpAMD64MULQ(v, config)
- case OpAMD64MULQconst:
- return rewriteValueAMD64_OpAMD64MULQconst(v, config)
case OpMod16:
return rewriteValueAMD64_OpMod16(v, config)
case OpMod16u:
@@ -482,14 +574,6 @@
return rewriteValueAMD64_OpMul64F(v, config)
case OpMul8:
return rewriteValueAMD64_OpMul8(v, config)
- case OpAMD64NEGL:
- return rewriteValueAMD64_OpAMD64NEGL(v, config)
- case OpAMD64NEGQ:
- return rewriteValueAMD64_OpAMD64NEGQ(v, config)
- case OpAMD64NOTL:
- return rewriteValueAMD64_OpAMD64NOTL(v, config)
- case OpAMD64NOTQ:
- return rewriteValueAMD64_OpAMD64NOTQ(v, config)
case OpNeg16:
return rewriteValueAMD64_OpNeg16(v, config)
case OpNeg32:
@@ -522,14 +606,6 @@
return rewriteValueAMD64_OpNilCheck(v, config)
case OpNot:
return rewriteValueAMD64_OpNot(v, config)
- case OpAMD64ORL:
- return rewriteValueAMD64_OpAMD64ORL(v, config)
- case OpAMD64ORLconst:
- return rewriteValueAMD64_OpAMD64ORLconst(v, config)
- case OpAMD64ORQ:
- return rewriteValueAMD64_OpAMD64ORQ(v, config)
- case OpAMD64ORQconst:
- return rewriteValueAMD64_OpAMD64ORQconst(v, config)
case OpOffPtr:
return rewriteValueAMD64_OpOffPtr(v, config)
case OpOr16:
@@ -542,14 +618,6 @@
return rewriteValueAMD64_OpOr8(v, config)
case OpOrB:
return rewriteValueAMD64_OpOrB(v, config)
- case OpAMD64ROLBconst:
- return rewriteValueAMD64_OpAMD64ROLBconst(v, config)
- case OpAMD64ROLLconst:
- return rewriteValueAMD64_OpAMD64ROLLconst(v, config)
- case OpAMD64ROLQconst:
- return rewriteValueAMD64_OpAMD64ROLQconst(v, config)
- case OpAMD64ROLWconst:
- return rewriteValueAMD64_OpAMD64ROLWconst(v, config)
case OpRsh16Ux16:
return rewriteValueAMD64_OpRsh16Ux16(v, config)
case OpRsh16Ux32:
@@ -614,66 +682,6 @@
return rewriteValueAMD64_OpRsh8x64(v, config)
case OpRsh8x8:
return rewriteValueAMD64_OpRsh8x8(v, config)
- case OpAMD64SARB:
- return rewriteValueAMD64_OpAMD64SARB(v, config)
- case OpAMD64SARBconst:
- return rewriteValueAMD64_OpAMD64SARBconst(v, config)
- case OpAMD64SARL:
- return rewriteValueAMD64_OpAMD64SARL(v, config)
- case OpAMD64SARLconst:
- return rewriteValueAMD64_OpAMD64SARLconst(v, config)
- case OpAMD64SARQ:
- return rewriteValueAMD64_OpAMD64SARQ(v, config)
- case OpAMD64SARQconst:
- return rewriteValueAMD64_OpAMD64SARQconst(v, config)
- case OpAMD64SARW:
- return rewriteValueAMD64_OpAMD64SARW(v, config)
- case OpAMD64SARWconst:
- return rewriteValueAMD64_OpAMD64SARWconst(v, config)
- case OpAMD64SBBLcarrymask:
- return rewriteValueAMD64_OpAMD64SBBLcarrymask(v, config)
- case OpAMD64SBBQcarrymask:
- return rewriteValueAMD64_OpAMD64SBBQcarrymask(v, config)
- case OpAMD64SETA:
- return rewriteValueAMD64_OpAMD64SETA(v, config)
- case OpAMD64SETAE:
- return rewriteValueAMD64_OpAMD64SETAE(v, config)
- case OpAMD64SETB:
- return rewriteValueAMD64_OpAMD64SETB(v, config)
- case OpAMD64SETBE:
- return rewriteValueAMD64_OpAMD64SETBE(v, config)
- case OpAMD64SETEQ:
- return rewriteValueAMD64_OpAMD64SETEQ(v, config)
- case OpAMD64SETG:
- return rewriteValueAMD64_OpAMD64SETG(v, config)
- case OpAMD64SETGE:
- return rewriteValueAMD64_OpAMD64SETGE(v, config)
- case OpAMD64SETL:
- return rewriteValueAMD64_OpAMD64SETL(v, config)
- case OpAMD64SETLE:
- return rewriteValueAMD64_OpAMD64SETLE(v, config)
- case OpAMD64SETNE:
- return rewriteValueAMD64_OpAMD64SETNE(v, config)
- case OpAMD64SHLL:
- return rewriteValueAMD64_OpAMD64SHLL(v, config)
- case OpAMD64SHLQ:
- return rewriteValueAMD64_OpAMD64SHLQ(v, config)
- case OpAMD64SHRB:
- return rewriteValueAMD64_OpAMD64SHRB(v, config)
- case OpAMD64SHRL:
- return rewriteValueAMD64_OpAMD64SHRL(v, config)
- case OpAMD64SHRQ:
- return rewriteValueAMD64_OpAMD64SHRQ(v, config)
- case OpAMD64SHRW:
- return rewriteValueAMD64_OpAMD64SHRW(v, config)
- case OpAMD64SUBL:
- return rewriteValueAMD64_OpAMD64SUBL(v, config)
- case OpAMD64SUBLconst:
- return rewriteValueAMD64_OpAMD64SUBLconst(v, config)
- case OpAMD64SUBQ:
- return rewriteValueAMD64_OpAMD64SUBQ(v, config)
- case OpAMD64SUBQconst:
- return rewriteValueAMD64_OpAMD64SUBQconst(v, config)
case OpSignExt16to32:
return rewriteValueAMD64_OpSignExt16to32(v, config)
case OpSignExt16to64:
@@ -718,14 +726,6 @@
return rewriteValueAMD64_OpTrunc64to32(v, config)
case OpTrunc64to8:
return rewriteValueAMD64_OpTrunc64to8(v, config)
- case OpAMD64XORL:
- return rewriteValueAMD64_OpAMD64XORL(v, config)
- case OpAMD64XORLconst:
- return rewriteValueAMD64_OpAMD64XORLconst(v, config)
- case OpAMD64XORQ:
- return rewriteValueAMD64_OpAMD64XORQ(v, config)
- case OpAMD64XORQconst:
- return rewriteValueAMD64_OpAMD64XORQconst(v, config)
case OpXor16:
return rewriteValueAMD64_OpXor16(v, config)
case OpXor32:
@@ -1531,242 +1531,6 @@
}
return false
}
-func rewriteValueAMD64_OpAdd16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add16 x y)
- // cond:
- // result: (ADDL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ADDL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpAdd32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add32 x y)
- // cond:
- // result: (ADDL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ADDL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpAdd32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add32F x y)
- // cond:
- // result: (ADDSS x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ADDSS)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpAdd64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add64 x y)
- // cond:
- // result: (ADDQ x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ADDQ)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpAdd64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add64F x y)
- // cond:
- // result: (ADDSD x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ADDSD)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpAdd8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add8 x y)
- // cond:
- // result: (ADDL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ADDL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpAddPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (AddPtr x y)
- // cond:
- // result: (ADDQ x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ADDQ)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpAddr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Addr {sym} base)
- // cond:
- // result: (LEAQ {sym} base)
- for {
- sym := v.Aux
- base := v.Args[0]
- v.reset(OpAMD64LEAQ)
- v.Aux = sym
- v.AddArg(base)
- return true
- }
-}
-func rewriteValueAMD64_OpAnd16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (And16 x y)
- // cond:
- // result: (ANDL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpAnd32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (And32 x y)
- // cond:
- // result: (ANDL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpAnd64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (And64 x y)
- // cond:
- // result: (ANDQ x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDQ)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpAnd8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (And8 x y)
- // cond:
- // result: (ANDL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpAndB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (AndB x y)
- // cond:
- // result: (ANDL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpAvg64u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Avg64u x y)
- // cond:
- // result: (AVGQU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64AVGQU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpBswap32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Bswap32 x)
- // cond:
- // result: (BSWAPL x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64BSWAPL)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpBswap64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Bswap64 x)
- // cond:
- // result: (BSWAPQ x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64BSWAPQ)
- v.AddArg(x)
- return true
- }
-}
func rewriteValueAMD64_OpAMD64CMOVLEQconst(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -1774,28 +1538,28 @@
// cond:
// result: (CMOVLNEconst x y [c])
for {
+ c := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64InvertFlags {
break
}
y := v_1.Args[0]
- c := v.AuxInt
v.reset(OpAMD64CMOVLNEconst)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (CMOVLEQconst _ (FlagEQ) [c])
// cond:
// result: (Const32 [c])
for {
+ c := v.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpAMD64FlagEQ {
break
}
- c := v.AuxInt
v.reset(OpConst32)
v.AuxInt = c
return true
@@ -1865,28 +1629,28 @@
// cond:
// result: (CMOVQNEconst x y [c])
for {
+ c := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64InvertFlags {
break
}
y := v_1.Args[0]
- c := v.AuxInt
v.reset(OpAMD64CMOVQNEconst)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (CMOVQEQconst _ (FlagEQ) [c])
// cond:
// result: (Const64 [c])
for {
+ c := v.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpAMD64FlagEQ {
break
}
- c := v.AuxInt
v.reset(OpConst64)
v.AuxInt = c
return true
@@ -1956,28 +1720,28 @@
// cond:
// result: (CMOVWNEconst x y [c])
for {
+ c := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64InvertFlags {
break
}
y := v_1.Args[0]
- c := v.AuxInt
v.reset(OpAMD64CMOVWNEconst)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (CMOVWEQconst _ (FlagEQ) [c])
// cond:
// result: (Const16 [c])
for {
+ c := v.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpAMD64FlagEQ {
break
}
- c := v.AuxInt
v.reset(OpConst16)
v.AuxInt = c
return true
@@ -2054,8 +1818,8 @@
}
c := v_1.AuxInt
v.reset(OpAMD64CMPBconst)
- v.AddArg(x)
v.AuxInt = int64(int8(c))
+ v.AddArg(x)
return true
}
// match: (CMPB (MOVLconst [c]) x)
@@ -2070,8 +1834,8 @@
x := v.Args[1]
v.reset(OpAMD64InvertFlags)
v0 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
- v0.AddArg(x)
v0.AuxInt = int64(int8(c))
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -2084,12 +1848,12 @@
// cond: int8(x)==int8(y)
// result: (FlagEQ)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int8(x) == int8(y)) {
break
}
@@ -2100,12 +1864,12 @@
// cond: int8(x)<int8(y) && uint8(x)<uint8(y)
// result: (FlagLT_ULT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int8(x) < int8(y) && uint8(x) < uint8(y)) {
break
}
@@ -2116,12 +1880,12 @@
// cond: int8(x)<int8(y) && uint8(x)>uint8(y)
// result: (FlagLT_UGT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int8(x) < int8(y) && uint8(x) > uint8(y)) {
break
}
@@ -2132,12 +1896,12 @@
// cond: int8(x)>int8(y) && uint8(x)<uint8(y)
// result: (FlagGT_ULT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int8(x) > int8(y) && uint8(x) < uint8(y)) {
break
}
@@ -2148,12 +1912,12 @@
// cond: int8(x)>int8(y) && uint8(x)>uint8(y)
// result: (FlagGT_UGT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int8(x) > int8(y) && uint8(x) > uint8(y)) {
break
}
@@ -2164,12 +1928,12 @@
// cond: 0 <= int8(m) && int8(m) < int8(n)
// result: (FlagLT_ULT)
for {
+ n := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64ANDLconst {
break
}
m := v_0.AuxInt
- n := v.AuxInt
if !(0 <= int8(m) && int8(m) < int8(n)) {
break
}
@@ -2180,15 +1944,15 @@
// cond:
// result: (TESTB x y)
for {
+ if v.AuxInt != 0 {
+ break
+ }
v_0 := v.Args[0]
if v_0.Op != OpAMD64ANDL {
break
}
x := v_0.Args[0]
y := v_0.Args[1]
- if v.AuxInt != 0 {
- break
- }
v.reset(OpAMD64TESTB)
v.AddArg(x)
v.AddArg(y)
@@ -2198,15 +1962,15 @@
// cond:
// result: (TESTBconst [int64(int8(c))] x)
for {
+ if v.AuxInt != 0 {
+ break
+ }
v_0 := v.Args[0]
if v_0.Op != OpAMD64ANDLconst {
break
}
c := v_0.AuxInt
x := v_0.Args[0]
- if v.AuxInt != 0 {
- break
- }
v.reset(OpAMD64TESTBconst)
v.AuxInt = int64(int8(c))
v.AddArg(x)
@@ -2216,10 +1980,10 @@
// cond:
// result: (TESTB x x)
for {
- x := v.Args[0]
if v.AuxInt != 0 {
break
}
+ x := v.Args[0]
v.reset(OpAMD64TESTB)
v.AddArg(x)
v.AddArg(x)
@@ -2241,8 +2005,8 @@
}
c := v_1.AuxInt
v.reset(OpAMD64CMPLconst)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
// match: (CMPL (MOVLconst [c]) x)
@@ -2257,8 +2021,8 @@
x := v.Args[1]
v.reset(OpAMD64InvertFlags)
v0 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
- v0.AddArg(x)
v0.AuxInt = c
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -2271,12 +2035,12 @@
// cond: int32(x)==int32(y)
// result: (FlagEQ)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int32(x) == int32(y)) {
break
}
@@ -2287,12 +2051,12 @@
// cond: int32(x)<int32(y) && uint32(x)<uint32(y)
// result: (FlagLT_ULT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int32(x) < int32(y) && uint32(x) < uint32(y)) {
break
}
@@ -2303,12 +2067,12 @@
// cond: int32(x)<int32(y) && uint32(x)>uint32(y)
// result: (FlagLT_UGT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int32(x) < int32(y) && uint32(x) > uint32(y)) {
break
}
@@ -2319,12 +2083,12 @@
// cond: int32(x)>int32(y) && uint32(x)<uint32(y)
// result: (FlagGT_ULT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int32(x) > int32(y) && uint32(x) < uint32(y)) {
break
}
@@ -2335,12 +2099,12 @@
// cond: int32(x)>int32(y) && uint32(x)>uint32(y)
// result: (FlagGT_UGT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int32(x) > int32(y) && uint32(x) > uint32(y)) {
break
}
@@ -2351,12 +2115,12 @@
// cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)
// result: (FlagLT_ULT)
for {
+ n := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64SHRLconst {
break
}
c := v_0.AuxInt
- n := v.AuxInt
if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) {
break
}
@@ -2367,12 +2131,12 @@
// cond: 0 <= int32(m) && int32(m) < int32(n)
// result: (FlagLT_ULT)
for {
+ n := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64ANDLconst {
break
}
m := v_0.AuxInt
- n := v.AuxInt
if !(0 <= int32(m) && int32(m) < int32(n)) {
break
}
@@ -2383,15 +2147,15 @@
// cond:
// result: (TESTL x y)
for {
+ if v.AuxInt != 0 {
+ break
+ }
v_0 := v.Args[0]
if v_0.Op != OpAMD64ANDL {
break
}
x := v_0.Args[0]
y := v_0.Args[1]
- if v.AuxInt != 0 {
- break
- }
v.reset(OpAMD64TESTL)
v.AddArg(x)
v.AddArg(y)
@@ -2401,15 +2165,15 @@
// cond:
// result: (TESTLconst [c] x)
for {
+ if v.AuxInt != 0 {
+ break
+ }
v_0 := v.Args[0]
if v_0.Op != OpAMD64ANDLconst {
break
}
c := v_0.AuxInt
x := v_0.Args[0]
- if v.AuxInt != 0 {
- break
- }
v.reset(OpAMD64TESTLconst)
v.AuxInt = c
v.AddArg(x)
@@ -2419,10 +2183,10 @@
// cond:
// result: (TESTL x x)
for {
- x := v.Args[0]
if v.AuxInt != 0 {
break
}
+ x := v.Args[0]
v.reset(OpAMD64TESTL)
v.AddArg(x)
v.AddArg(x)
@@ -2447,8 +2211,8 @@
break
}
v.reset(OpAMD64CMPQconst)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
// match: (CMPQ (MOVQconst [c]) x)
@@ -2466,8 +2230,8 @@
}
v.reset(OpAMD64InvertFlags)
v0 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
- v0.AddArg(x)
v0.AuxInt = c
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -2480,12 +2244,12 @@
// cond: x==y
// result: (FlagEQ)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVQconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(x == y) {
break
}
@@ -2496,12 +2260,12 @@
// cond: x<y && uint64(x)<uint64(y)
// result: (FlagLT_ULT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVQconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(x < y && uint64(x) < uint64(y)) {
break
}
@@ -2512,12 +2276,12 @@
// cond: x<y && uint64(x)>uint64(y)
// result: (FlagLT_UGT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVQconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(x < y && uint64(x) > uint64(y)) {
break
}
@@ -2528,12 +2292,12 @@
// cond: x>y && uint64(x)<uint64(y)
// result: (FlagGT_ULT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVQconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(x > y && uint64(x) < uint64(y)) {
break
}
@@ -2544,12 +2308,12 @@
// cond: x>y && uint64(x)>uint64(y)
// result: (FlagGT_UGT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVQconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(x > y && uint64(x) > uint64(y)) {
break
}
@@ -2560,11 +2324,11 @@
// cond: 0xFF < c
// result: (FlagLT_ULT)
for {
+ c := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVBQZX {
break
}
- c := v.AuxInt
if !(0xFF < c) {
break
}
@@ -2575,11 +2339,11 @@
// cond: 0xFFFF < c
// result: (FlagLT_ULT)
for {
+ c := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVWQZX {
break
}
- c := v.AuxInt
if !(0xFFFF < c) {
break
}
@@ -2590,11 +2354,11 @@
// cond: 0xFFFFFFFF < c
// result: (FlagLT_ULT)
for {
+ c := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLQZX {
break
}
- c := v.AuxInt
if !(0xFFFFFFFF < c) {
break
}
@@ -2605,12 +2369,12 @@
// cond: 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)
// result: (FlagLT_ULT)
for {
+ n := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64SHRQconst {
break
}
c := v_0.AuxInt
- n := v.AuxInt
if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) {
break
}
@@ -2621,12 +2385,12 @@
// cond: 0 <= m && m < n
// result: (FlagLT_ULT)
for {
+ n := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64ANDQconst {
break
}
m := v_0.AuxInt
- n := v.AuxInt
if !(0 <= m && m < n) {
break
}
@@ -2637,15 +2401,15 @@
// cond:
// result: (TESTQ x y)
for {
+ if v.AuxInt != 0 {
+ break
+ }
v_0 := v.Args[0]
if v_0.Op != OpAMD64ANDQ {
break
}
x := v_0.Args[0]
y := v_0.Args[1]
- if v.AuxInt != 0 {
- break
- }
v.reset(OpAMD64TESTQ)
v.AddArg(x)
v.AddArg(y)
@@ -2655,15 +2419,15 @@
// cond:
// result: (TESTQconst [c] x)
for {
+ if v.AuxInt != 0 {
+ break
+ }
v_0 := v.Args[0]
if v_0.Op != OpAMD64ANDQconst {
break
}
c := v_0.AuxInt
x := v_0.Args[0]
- if v.AuxInt != 0 {
- break
- }
v.reset(OpAMD64TESTQconst)
v.AuxInt = c
v.AddArg(x)
@@ -2673,10 +2437,10 @@
// cond:
// result: (TESTQ x x)
for {
- x := v.Args[0]
if v.AuxInt != 0 {
break
}
+ x := v.Args[0]
v.reset(OpAMD64TESTQ)
v.AddArg(x)
v.AddArg(x)
@@ -2698,8 +2462,8 @@
}
c := v_1.AuxInt
v.reset(OpAMD64CMPWconst)
- v.AddArg(x)
v.AuxInt = int64(int16(c))
+ v.AddArg(x)
return true
}
// match: (CMPW (MOVLconst [c]) x)
@@ -2714,8 +2478,8 @@
x := v.Args[1]
v.reset(OpAMD64InvertFlags)
v0 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
- v0.AddArg(x)
v0.AuxInt = int64(int16(c))
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -2728,12 +2492,12 @@
// cond: int16(x)==int16(y)
// result: (FlagEQ)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int16(x) == int16(y)) {
break
}
@@ -2744,12 +2508,12 @@
// cond: int16(x)<int16(y) && uint16(x)<uint16(y)
// result: (FlagLT_ULT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int16(x) < int16(y) && uint16(x) < uint16(y)) {
break
}
@@ -2760,12 +2524,12 @@
// cond: int16(x)<int16(y) && uint16(x)>uint16(y)
// result: (FlagLT_UGT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int16(x) < int16(y) && uint16(x) > uint16(y)) {
break
}
@@ -2776,12 +2540,12 @@
// cond: int16(x)>int16(y) && uint16(x)<uint16(y)
// result: (FlagGT_ULT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int16(x) > int16(y) && uint16(x) < uint16(y)) {
break
}
@@ -2792,12 +2556,12 @@
// cond: int16(x)>int16(y) && uint16(x)>uint16(y)
// result: (FlagGT_UGT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int16(x) > int16(y) && uint16(x) > uint16(y)) {
break
}
@@ -2808,12 +2572,12 @@
// cond: 0 <= int16(m) && int16(m) < int16(n)
// result: (FlagLT_ULT)
for {
+ n := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64ANDLconst {
break
}
m := v_0.AuxInt
- n := v.AuxInt
if !(0 <= int16(m) && int16(m) < int16(n)) {
break
}
@@ -2824,15 +2588,15 @@
// cond:
// result: (TESTW x y)
for {
+ if v.AuxInt != 0 {
+ break
+ }
v_0 := v.Args[0]
if v_0.Op != OpAMD64ANDL {
break
}
x := v_0.Args[0]
y := v_0.Args[1]
- if v.AuxInt != 0 {
- break
- }
v.reset(OpAMD64TESTW)
v.AddArg(x)
v.AddArg(y)
@@ -2842,15 +2606,15 @@
// cond:
// result: (TESTWconst [int64(int16(c))] x)
for {
+ if v.AuxInt != 0 {
+ break
+ }
v_0 := v.Args[0]
if v_0.Op != OpAMD64ANDLconst {
break
}
c := v_0.AuxInt
x := v_0.Args[0]
- if v.AuxInt != 0 {
- break
- }
v.reset(OpAMD64TESTWconst)
v.AuxInt = int64(int16(c))
v.AddArg(x)
@@ -2860,10 +2624,10 @@
// cond:
// result: (TESTW x x)
for {
- x := v.Args[0]
if v.AuxInt != 0 {
break
}
+ x := v.Args[0]
v.reset(OpAMD64TESTW)
v.AddArg(x)
v.AddArg(x)
@@ -2871,1281 +2635,6 @@
}
return false
}
-func rewriteValueAMD64_OpClosureCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ClosureCall [argwid] entry closure mem)
- // cond:
- // result: (CALLclosure [argwid] entry closure mem)
- for {
- argwid := v.AuxInt
- entry := v.Args[0]
- closure := v.Args[1]
- mem := v.Args[2]
- v.reset(OpAMD64CALLclosure)
- v.AuxInt = argwid
- v.AddArg(entry)
- v.AddArg(closure)
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValueAMD64_OpCom16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Com16 x)
- // cond:
- // result: (NOTL x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64NOTL)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpCom32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Com32 x)
- // cond:
- // result: (NOTL x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64NOTL)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpCom64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Com64 x)
- // cond:
- // result: (NOTQ x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64NOTQ)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpCom8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Com8 x)
- // cond:
- // result: (NOTL x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64NOTL)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpConst16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Const16 [val])
- // cond:
- // result: (MOVLconst [val])
- for {
- val := v.AuxInt
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = val
- return true
- }
-}
-func rewriteValueAMD64_OpConst32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Const32 [val])
- // cond:
- // result: (MOVLconst [val])
- for {
- val := v.AuxInt
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = val
- return true
- }
-}
-func rewriteValueAMD64_OpConst32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Const32F [val])
- // cond:
- // result: (MOVSSconst [val])
- for {
- val := v.AuxInt
- v.reset(OpAMD64MOVSSconst)
- v.AuxInt = val
- return true
- }
-}
-func rewriteValueAMD64_OpConst64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Const64 [val])
- // cond:
- // result: (MOVQconst [val])
- for {
- val := v.AuxInt
- v.reset(OpAMD64MOVQconst)
- v.AuxInt = val
- return true
- }
-}
-func rewriteValueAMD64_OpConst64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Const64F [val])
- // cond:
- // result: (MOVSDconst [val])
- for {
- val := v.AuxInt
- v.reset(OpAMD64MOVSDconst)
- v.AuxInt = val
- return true
- }
-}
-func rewriteValueAMD64_OpConst8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Const8 [val])
- // cond:
- // result: (MOVLconst [val])
- for {
- val := v.AuxInt
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = val
- return true
- }
-}
-func rewriteValueAMD64_OpConstBool(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ConstBool [b])
- // cond:
- // result: (MOVLconst [b])
- for {
- b := v.AuxInt
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = b
- return true
- }
-}
-func rewriteValueAMD64_OpConstNil(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ConstNil)
- // cond:
- // result: (MOVQconst [0])
- for {
- v.reset(OpAMD64MOVQconst)
- v.AuxInt = 0
- return true
- }
-}
-func rewriteValueAMD64_OpConvert(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Convert <t> x mem)
- // cond:
- // result: (MOVQconvert <t> x mem)
- for {
- t := v.Type
- x := v.Args[0]
- mem := v.Args[1]
- v.reset(OpAMD64MOVQconvert)
- v.Type = t
- v.AddArg(x)
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValueAMD64_OpCtz16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Ctz16 <t> x)
- // cond:
- // result: (CMOVWEQconst (BSFW <t> x) (CMPWconst x [0]) [16])
- for {
- t := v.Type
- x := v.Args[0]
- v.reset(OpAMD64CMOVWEQconst)
- v0 := b.NewValue0(v.Line, OpAMD64BSFW, t)
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
- v1.AddArg(x)
- v1.AuxInt = 0
- v.AddArg(v1)
- v.AuxInt = 16
- return true
- }
-}
-func rewriteValueAMD64_OpCtz32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Ctz32 <t> x)
- // cond:
- // result: (CMOVLEQconst (BSFL <t> x) (CMPLconst x [0]) [32])
- for {
- t := v.Type
- x := v.Args[0]
- v.reset(OpAMD64CMOVLEQconst)
- v0 := b.NewValue0(v.Line, OpAMD64BSFL, t)
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
- v1.AddArg(x)
- v1.AuxInt = 0
- v.AddArg(v1)
- v.AuxInt = 32
- return true
- }
-}
-func rewriteValueAMD64_OpCtz64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Ctz64 <t> x)
- // cond:
- // result: (CMOVQEQconst (BSFQ <t> x) (CMPQconst x [0]) [64])
- for {
- t := v.Type
- x := v.Args[0]
- v.reset(OpAMD64CMOVQEQconst)
- v0 := b.NewValue0(v.Line, OpAMD64BSFQ, t)
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
- v1.AddArg(x)
- v1.AuxInt = 0
- v.AddArg(v1)
- v.AuxInt = 64
- return true
- }
-}
-func rewriteValueAMD64_OpCvt32Fto32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt32Fto32 x)
- // cond:
- // result: (CVTTSS2SL x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64CVTTSS2SL)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpCvt32Fto64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt32Fto64 x)
- // cond:
- // result: (CVTTSS2SQ x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64CVTTSS2SQ)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpCvt32Fto64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt32Fto64F x)
- // cond:
- // result: (CVTSS2SD x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64CVTSS2SD)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpCvt32to32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt32to32F x)
- // cond:
- // result: (CVTSL2SS x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64CVTSL2SS)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpCvt32to64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt32to64F x)
- // cond:
- // result: (CVTSL2SD x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64CVTSL2SD)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpCvt64Fto32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt64Fto32 x)
- // cond:
- // result: (CVTTSD2SL x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64CVTTSD2SL)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpCvt64Fto32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt64Fto32F x)
- // cond:
- // result: (CVTSD2SS x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64CVTSD2SS)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpCvt64Fto64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt64Fto64 x)
- // cond:
- // result: (CVTTSD2SQ x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64CVTTSD2SQ)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpCvt64to32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt64to32F x)
- // cond:
- // result: (CVTSQ2SS x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64CVTSQ2SS)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpCvt64to64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt64to64F x)
- // cond:
- // result: (CVTSQ2SD x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64CVTSQ2SD)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpDeferCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (DeferCall [argwid] mem)
- // cond:
- // result: (CALLdefer [argwid] mem)
- for {
- argwid := v.AuxInt
- mem := v.Args[0]
- v.reset(OpAMD64CALLdefer)
- v.AuxInt = argwid
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValueAMD64_OpDiv16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div16 x y)
- // cond:
- // result: (Select0 (DIVW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpSelect0)
- v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16()))
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpDiv16u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div16u x y)
- // cond:
- // result: (Select0 (DIVWU x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpSelect0)
- v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16()))
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpDiv32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div32 x y)
- // cond:
- // result: (Select0 (DIVL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpSelect0)
- v0 := b.NewValue0(v.Line, OpAMD64DIVL, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32()))
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpDiv32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div32F x y)
- // cond:
- // result: (DIVSS x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64DIVSS)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpDiv32u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div32u x y)
- // cond:
- // result: (Select0 (DIVLU x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpSelect0)
- v0 := b.NewValue0(v.Line, OpAMD64DIVLU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpDiv64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div64 x y)
- // cond:
- // result: (Select0 (DIVQ x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpSelect0)
- v0 := b.NewValue0(v.Line, OpAMD64DIVQ, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpDiv64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div64F x y)
- // cond:
- // result: (DIVSD x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64DIVSD)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpDiv64u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div64u x y)
- // cond:
- // result: (Select0 (DIVQU x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpSelect0)
- v0 := b.NewValue0(v.Line, OpAMD64DIVQU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpDiv8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div8 x y)
- // cond:
- // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpSelect0)
- v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16()))
- v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpDiv8u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div8u x y)
- // cond:
- // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpSelect0)
- v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16()))
- v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpEq16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Eq16 x y)
- // cond:
- // result: (SETEQ (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETEQ)
- v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpEq32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Eq32 x y)
- // cond:
- // result: (SETEQ (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETEQ)
- v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpEq32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Eq32F x y)
- // cond:
- // result: (SETEQF (UCOMISS x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETEQF)
- v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpEq64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Eq64 x y)
- // cond:
- // result: (SETEQ (CMPQ x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETEQ)
- v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpEq64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Eq64F x y)
- // cond:
- // result: (SETEQF (UCOMISD x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETEQF)
- v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpEq8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Eq8 x y)
- // cond:
- // result: (SETEQ (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETEQ)
- v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpEqB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (EqB x y)
- // cond:
- // result: (SETEQ (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETEQ)
- v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpEqPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (EqPtr x y)
- // cond:
- // result: (SETEQ (CMPQ x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETEQ)
- v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGeq16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq16 x y)
- // cond:
- // result: (SETGE (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETGE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGeq16U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq16U x y)
- // cond:
- // result: (SETAE (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETAE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGeq32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq32 x y)
- // cond:
- // result: (SETGE (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETGE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGeq32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq32F x y)
- // cond:
- // result: (SETGEF (UCOMISS x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETGEF)
- v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGeq32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq32U x y)
- // cond:
- // result: (SETAE (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETAE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGeq64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq64 x y)
- // cond:
- // result: (SETGE (CMPQ x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETGE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGeq64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq64F x y)
- // cond:
- // result: (SETGEF (UCOMISD x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETGEF)
- v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGeq64U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq64U x y)
- // cond:
- // result: (SETAE (CMPQ x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETAE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGeq8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq8 x y)
- // cond:
- // result: (SETGE (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETGE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGeq8U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq8U x y)
- // cond:
- // result: (SETAE (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETAE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGetClosurePtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (GetClosurePtr)
- // cond:
- // result: (LoweredGetClosurePtr)
- for {
- v.reset(OpAMD64LoweredGetClosurePtr)
- return true
- }
-}
-func rewriteValueAMD64_OpGetG(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (GetG mem)
- // cond:
- // result: (LoweredGetG mem)
- for {
- mem := v.Args[0]
- v.reset(OpAMD64LoweredGetG)
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValueAMD64_OpGoCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (GoCall [argwid] mem)
- // cond:
- // result: (CALLgo [argwid] mem)
- for {
- argwid := v.AuxInt
- mem := v.Args[0]
- v.reset(OpAMD64CALLgo)
- v.AuxInt = argwid
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValueAMD64_OpGreater16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater16 x y)
- // cond:
- // result: (SETG (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETG)
- v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGreater16U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater16U x y)
- // cond:
- // result: (SETA (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETA)
- v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGreater32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater32 x y)
- // cond:
- // result: (SETG (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETG)
- v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGreater32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater32F x y)
- // cond:
- // result: (SETGF (UCOMISS x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETGF)
- v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGreater32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater32U x y)
- // cond:
- // result: (SETA (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETA)
- v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGreater64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater64 x y)
- // cond:
- // result: (SETG (CMPQ x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETG)
- v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGreater64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater64F x y)
- // cond:
- // result: (SETGF (UCOMISD x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETGF)
- v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGreater64U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater64U x y)
- // cond:
- // result: (SETA (CMPQ x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETA)
- v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGreater8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater8 x y)
- // cond:
- // result: (SETG (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETG)
- v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpGreater8U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater8U x y)
- // cond:
- // result: (SETA (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETA)
- v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpHmul16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul16 x y)
- // cond:
- // result: (HMULW x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64HMULW)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpHmul16u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul16u x y)
- // cond:
- // result: (HMULWU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64HMULWU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpHmul32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul32 x y)
- // cond:
- // result: (HMULL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64HMULL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpHmul32u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul32u x y)
- // cond:
- // result: (HMULLU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64HMULLU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpHmul64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul64 x y)
- // cond:
- // result: (HMULQ x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64HMULQ)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpHmul64u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul64u x y)
- // cond:
- // result: (HMULQU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64HMULQU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpHmul8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul8 x y)
- // cond:
- // result: (HMULB x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64HMULB)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpHmul8u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul8u x y)
- // cond:
- // result: (HMULBU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64HMULBU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpInterCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (InterCall [argwid] entry mem)
- // cond:
- // result: (CALLinter [argwid] entry mem)
- for {
- argwid := v.AuxInt
- entry := v.Args[0]
- mem := v.Args[1]
- v.reset(OpAMD64CALLinter)
- v.AuxInt = argwid
- v.AddArg(entry)
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValueAMD64_OpIsInBounds(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (IsInBounds idx len)
- // cond:
- // result: (SETB (CMPQ idx len))
- for {
- idx := v.Args[0]
- len := v.Args[1]
- v.reset(OpAMD64SETB)
- v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
- v0.AddArg(idx)
- v0.AddArg(len)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpIsNonNil(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (IsNonNil p)
- // cond:
- // result: (SETNE (TESTQ p p))
- for {
- p := v.Args[0]
- v.reset(OpAMD64SETNE)
- v0 := b.NewValue0(v.Line, OpAMD64TESTQ, TypeFlags)
- v0.AddArg(p)
- v0.AddArg(p)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpIsSliceInBounds(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (IsSliceInBounds idx len)
- // cond:
- // result: (SETBE (CMPQ idx len))
- for {
- idx := v.Args[0]
- len := v.Args[1]
- v.reset(OpAMD64SETBE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
- v0.AddArg(idx)
- v0.AddArg(len)
- v.AddArg(v0)
- return true
- }
-}
func rewriteValueAMD64_OpAMD64LEAQ(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -4834,893 +3323,6 @@
}
return false
}
-func rewriteValueAMD64_OpLeq16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq16 x y)
- // cond:
- // result: (SETLE (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETLE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLeq16U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq16U x y)
- // cond:
- // result: (SETBE (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETBE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLeq32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq32 x y)
- // cond:
- // result: (SETLE (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETLE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLeq32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq32F x y)
- // cond:
- // result: (SETGEF (UCOMISS y x))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETGEF)
- v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLeq32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq32U x y)
- // cond:
- // result: (SETBE (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETBE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLeq64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq64 x y)
- // cond:
- // result: (SETLE (CMPQ x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETLE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLeq64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq64F x y)
- // cond:
- // result: (SETGEF (UCOMISD y x))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETGEF)
- v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLeq64U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq64U x y)
- // cond:
- // result: (SETBE (CMPQ x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETBE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLeq8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq8 x y)
- // cond:
- // result: (SETLE (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETLE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLeq8U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq8U x y)
- // cond:
- // result: (SETBE (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETBE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLess16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less16 x y)
- // cond:
- // result: (SETL (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETL)
- v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLess16U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less16U x y)
- // cond:
- // result: (SETB (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETB)
- v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLess32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less32 x y)
- // cond:
- // result: (SETL (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETL)
- v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLess32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less32F x y)
- // cond:
- // result: (SETGF (UCOMISS y x))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETGF)
- v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLess32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less32U x y)
- // cond:
- // result: (SETB (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETB)
- v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLess64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less64 x y)
- // cond:
- // result: (SETL (CMPQ x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETL)
- v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLess64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less64F x y)
- // cond:
- // result: (SETGF (UCOMISD y x))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETGF)
- v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLess64U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less64U x y)
- // cond:
- // result: (SETB (CMPQ x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETB)
- v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLess8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less8 x y)
- // cond:
- // result: (SETL (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETL)
- v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLess8U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less8U x y)
- // cond:
- // result: (SETB (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETB)
- v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpLoad(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Load <t> ptr mem)
- // cond: (is64BitInt(t) || isPtr(t))
- // result: (MOVQload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is64BitInt(t) || isPtr(t)) {
- break
- }
- v.reset(OpAMD64MOVQload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (Load <t> ptr mem)
- // cond: is32BitInt(t)
- // result: (MOVLload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is32BitInt(t)) {
- break
- }
- v.reset(OpAMD64MOVLload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (Load <t> ptr mem)
- // cond: is16BitInt(t)
- // result: (MOVWload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is16BitInt(t)) {
- break
- }
- v.reset(OpAMD64MOVWload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (Load <t> ptr mem)
- // cond: (t.IsBoolean() || is8BitInt(t))
- // result: (MOVBload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(t.IsBoolean() || is8BitInt(t)) {
- break
- }
- v.reset(OpAMD64MOVBload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (Load <t> ptr mem)
- // cond: is32BitFloat(t)
- // result: (MOVSSload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is32BitFloat(t)) {
- break
- }
- v.reset(OpAMD64MOVSSload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (Load <t> ptr mem)
- // cond: is64BitFloat(t)
- // result: (MOVSDload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is64BitFloat(t)) {
- break
- }
- v.reset(OpAMD64MOVSDload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpLrot16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lrot16 <t> x [c])
- // cond:
- // result: (ROLWconst <t> [c&15] x)
- for {
- t := v.Type
- x := v.Args[0]
- c := v.AuxInt
- v.reset(OpAMD64ROLWconst)
- v.Type = t
- v.AuxInt = c & 15
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpLrot32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lrot32 <t> x [c])
- // cond:
- // result: (ROLLconst <t> [c&31] x)
- for {
- t := v.Type
- x := v.Args[0]
- c := v.AuxInt
- v.reset(OpAMD64ROLLconst)
- v.Type = t
- v.AuxInt = c & 31
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpLrot64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lrot64 <t> x [c])
- // cond:
- // result: (ROLQconst <t> [c&63] x)
- for {
- t := v.Type
- x := v.Args[0]
- c := v.AuxInt
- v.reset(OpAMD64ROLQconst)
- v.Type = t
- v.AuxInt = c & 63
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpLrot8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lrot8 <t> x [c])
- // cond:
- // result: (ROLBconst <t> [c&7] x)
- for {
- t := v.Type
- x := v.Args[0]
- c := v.AuxInt
- v.reset(OpAMD64ROLBconst)
- v.Type = t
- v.AuxInt = c & 7
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh16x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh16x16 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh16x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh16x32 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh16x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh16x64 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh16x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh16x8 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh32x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh32x16 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh32x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh32x32 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh32x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh32x64 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh32x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh32x8 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh64x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh64x16 <t> x y)
- // cond:
- // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDQ)
- v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 64
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh64x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh64x32 <t> x y)
- // cond:
- // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDQ)
- v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 64
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh64x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh64x64 <t> x y)
- // cond:
- // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDQ)
- v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 64
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh64x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh64x8 <t> x y)
- // cond:
- // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDQ)
- v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 64
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh8x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh8x16 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh8x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh8x32 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh8x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh8x64 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpLsh8x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh8x8 <t> x y)
- // cond:
- // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -11903,604 +9505,6 @@
}
return false
}
-func rewriteValueAMD64_OpMod16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod16 x y)
- // cond:
- // result: (Select1 (DIVW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpSelect1)
- v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16()))
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMod16u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod16u x y)
- // cond:
- // result: (Select1 (DIVWU x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpSelect1)
- v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16()))
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMod32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod32 x y)
- // cond:
- // result: (Select1 (DIVL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpSelect1)
- v0 := b.NewValue0(v.Line, OpAMD64DIVL, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32()))
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMod32u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod32u x y)
- // cond:
- // result: (Select1 (DIVLU x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpSelect1)
- v0 := b.NewValue0(v.Line, OpAMD64DIVLU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMod64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod64 x y)
- // cond:
- // result: (Select1 (DIVQ x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpSelect1)
- v0 := b.NewValue0(v.Line, OpAMD64DIVQ, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMod64u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod64u x y)
- // cond:
- // result: (Select1 (DIVQU x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpSelect1)
- v0 := b.NewValue0(v.Line, OpAMD64DIVQU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMod8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod8 x y)
- // cond:
- // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpSelect1)
- v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16()))
- v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMod8u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mod8u x y)
- // cond:
- // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpSelect1)
- v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16()))
- v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpMove(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Move [s] _ _ mem)
- // cond: SizeAndAlign(s).Size() == 0
- // result: mem
- for {
- s := v.AuxInt
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 0) {
- break
- }
- v.reset(OpCopy)
- v.Type = mem.Type
- v.AddArg(mem)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 1
- // result: (MOVBstore dst (MOVBload src mem) mem)
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 1) {
- break
- }
- v.reset(OpAMD64MOVBstore)
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8())
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(mem)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 2
- // result: (MOVWstore dst (MOVWload src mem) mem)
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 2) {
- break
- }
- v.reset(OpAMD64MOVWstore)
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16())
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(mem)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 4
- // result: (MOVLstore dst (MOVLload src mem) mem)
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 4) {
- break
- }
- v.reset(OpAMD64MOVLstore)
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(mem)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 8
- // result: (MOVQstore dst (MOVQload src mem) mem)
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 8) {
- break
- }
- v.reset(OpAMD64MOVQstore)
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(mem)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 16
- // result: (MOVOstore dst (MOVOload src mem) mem)
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 16) {
- break
- }
- v.reset(OpAMD64MOVOstore)
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpAMD64MOVOload, TypeInt128)
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v.AddArg(mem)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 3
- // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem))
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 3) {
- break
- }
- v.reset(OpAMD64MOVBstore)
- v.AuxInt = 2
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8())
- v0.AuxInt = 2
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64MOVWstore, TypeMem)
- v1.AddArg(dst)
- v2 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16())
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 5
- // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 5) {
- break
- }
- v.reset(OpAMD64MOVBstore)
- v.AuxInt = 4
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8())
- v0.AuxInt = 4
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem)
- v1.AddArg(dst)
- v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 6
- // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 6) {
- break
- }
- v.reset(OpAMD64MOVWstore)
- v.AuxInt = 4
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16())
- v0.AuxInt = 4
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem)
- v1.AddArg(dst)
- v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() == 7
- // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem))
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() == 7) {
- break
- }
- v.reset(OpAMD64MOVLstore)
- v.AuxInt = 3
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
- v0.AuxInt = 3
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem)
- v1.AddArg(dst)
- v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() < 16
- // result: (MOVQstore [SizeAndAlign(s).Size()-8] dst (MOVQload [SizeAndAlign(s).Size()-8] src mem) (MOVQstore dst (MOVQload src mem) mem))
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() < 16) {
- break
- }
- v.reset(OpAMD64MOVQstore)
- v.AuxInt = SizeAndAlign(s).Size() - 8
- v.AddArg(dst)
- v0 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
- v0.AuxInt = SizeAndAlign(s).Size() - 8
- v0.AddArg(src)
- v0.AddArg(mem)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeMem)
- v1.AddArg(dst)
- v2 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
- v2.AddArg(src)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v1.AddArg(mem)
- v.AddArg(v1)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 <= 8
- // result: (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16] (ADDQconst <dst.Type> dst [SizeAndAlign(s).Size()%16]) (ADDQconst <src.Type> src [SizeAndAlign(s).Size()%16]) (MOVQstore dst (MOVQload src mem) mem))
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 <= 8) {
- break
- }
- v.reset(OpMove)
- v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%16
- v0 := b.NewValue0(v.Line, OpAMD64ADDQconst, dst.Type)
- v0.AddArg(dst)
- v0.AuxInt = SizeAndAlign(s).Size() % 16
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64ADDQconst, src.Type)
- v1.AddArg(src)
- v1.AuxInt = SizeAndAlign(s).Size() % 16
- v.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeMem)
- v2.AddArg(dst)
- v3 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
- v3.AddArg(src)
- v3.AddArg(mem)
- v2.AddArg(v3)
- v2.AddArg(mem)
- v.AddArg(v2)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 > 8
- // result: (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16] (ADDQconst <dst.Type> dst [SizeAndAlign(s).Size()%16]) (ADDQconst <src.Type> src [SizeAndAlign(s).Size()%16]) (MOVOstore dst (MOVOload src mem) mem))
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 > 8) {
- break
- }
- v.reset(OpMove)
- v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%16
- v0 := b.NewValue0(v.Line, OpAMD64ADDQconst, dst.Type)
- v0.AddArg(dst)
- v0.AuxInt = SizeAndAlign(s).Size() % 16
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64ADDQconst, src.Type)
- v1.AddArg(src)
- v1.AuxInt = SizeAndAlign(s).Size() % 16
- v.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpAMD64MOVOstore, TypeMem)
- v2.AddArg(dst)
- v3 := b.NewValue0(v.Line, OpAMD64MOVOload, TypeInt128)
- v3.AddArg(src)
- v3.AddArg(mem)
- v2.AddArg(v3)
- v2.AddArg(mem)
- v.AddArg(v2)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: SizeAndAlign(s).Size() >= 32 && SizeAndAlign(s).Size() <= 16*64 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice
- // result: (DUFFCOPY [14*(64-SizeAndAlign(s).Size()/16)] dst src mem)
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !(SizeAndAlign(s).Size() >= 32 && SizeAndAlign(s).Size() <= 16*64 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice) {
- break
- }
- v.reset(OpAMD64DUFFCOPY)
- v.AuxInt = 14 * (64 - SizeAndAlign(s).Size()/16)
- v.AddArg(dst)
- v.AddArg(src)
- v.AddArg(mem)
- return true
- }
- // match: (Move [s] dst src mem)
- // cond: (SizeAndAlign(s).Size() > 16*64 || config.noDuffDevice) && SizeAndAlign(s).Size()%8 == 0
- // result: (REPMOVSQ dst src (MOVQconst [SizeAndAlign(s).Size()/8]) mem)
- for {
- s := v.AuxInt
- dst := v.Args[0]
- src := v.Args[1]
- mem := v.Args[2]
- if !((SizeAndAlign(s).Size() > 16*64 || config.noDuffDevice) && SizeAndAlign(s).Size()%8 == 0) {
- break
- }
- v.reset(OpAMD64REPMOVSQ)
- v.AddArg(dst)
- v.AddArg(src)
- v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
- v0.AuxInt = SizeAndAlign(s).Size() / 8
- v.AddArg(v0)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpMul16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul16 x y)
- // cond:
- // result: (MULL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64MULL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpMul32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul32 x y)
- // cond:
- // result: (MULL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64MULL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpMul32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul32F x y)
- // cond:
- // result: (MULSS x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64MULSS)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpMul64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul64 x y)
- // cond:
- // result: (MULQ x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64MULQ)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpMul64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul64F x y)
- // cond:
- // result: (MULSD x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64MULSD)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpMul8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Mul8 x y)
- // cond:
- // result: (MULL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64MULL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
func rewriteValueAMD64_OpAMD64NEGL(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -12573,255 +9577,6 @@
}
return false
}
-func rewriteValueAMD64_OpNeg16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neg16 x)
- // cond:
- // result: (NEGL x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64NEGL)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpNeg32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neg32 x)
- // cond:
- // result: (NEGL x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64NEGL)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpNeg32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neg32F x)
- // cond:
- // result: (PXOR x (MOVSSconst <config.Frontend().TypeFloat32()> [f2i(math.Copysign(0, -1))]))
- for {
- x := v.Args[0]
- v.reset(OpAMD64PXOR)
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64MOVSSconst, config.Frontend().TypeFloat32())
- v0.AuxInt = f2i(math.Copysign(0, -1))
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpNeg64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neg64 x)
- // cond:
- // result: (NEGQ x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64NEGQ)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpNeg64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neg64F x)
- // cond:
- // result: (PXOR x (MOVSDconst <config.Frontend().TypeFloat64()> [f2i(math.Copysign(0, -1))]))
- for {
- x := v.Args[0]
- v.reset(OpAMD64PXOR)
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64MOVSDconst, config.Frontend().TypeFloat64())
- v0.AuxInt = f2i(math.Copysign(0, -1))
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpNeg8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neg8 x)
- // cond:
- // result: (NEGL x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64NEGL)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueAMD64_OpNeq16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neq16 x y)
- // cond:
- // result: (SETNE (CMPW x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETNE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpNeq32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neq32 x y)
- // cond:
- // result: (SETNE (CMPL x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETNE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpNeq32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neq32F x y)
- // cond:
- // result: (SETNEF (UCOMISS x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETNEF)
- v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpNeq64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neq64 x y)
- // cond:
- // result: (SETNE (CMPQ x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETNE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpNeq64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neq64F x y)
- // cond:
- // result: (SETNEF (UCOMISD x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETNEF)
- v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpNeq8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Neq8 x y)
- // cond:
- // result: (SETNE (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETNE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpNeqB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (NeqB x y)
- // cond:
- // result: (SETNE (CMPB x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETNE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpNeqPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (NeqPtr x y)
- // cond:
- // result: (SETNE (CMPQ x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SETNE)
- v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpNilCheck(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (NilCheck ptr mem)
- // cond:
- // result: (LoweredNilCheck ptr mem)
- for {
- ptr := v.Args[0]
- mem := v.Args[1]
- v.reset(OpAMD64LoweredNilCheck)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValueAMD64_OpNot(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Not x)
- // cond:
- // result: (XORLconst [1] x)
- for {
- x := v.Args[0]
- v.reset(OpAMD64XORLconst)
- v.AuxInt = 1
- v.AddArg(x)
- return true
- }
-}
func rewriteValueAMD64_OpAMD64ORL(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -13771,112 +10526,6 @@
}
return false
}
-func rewriteValueAMD64_OpOffPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (OffPtr [off] ptr)
- // cond: is32Bit(off)
- // result: (ADDQconst [off] ptr)
- for {
- off := v.AuxInt
- ptr := v.Args[0]
- if !(is32Bit(off)) {
- break
- }
- v.reset(OpAMD64ADDQconst)
- v.AuxInt = off
- v.AddArg(ptr)
- return true
- }
- // match: (OffPtr [off] ptr)
- // cond:
- // result: (ADDQ (MOVQconst [off]) ptr)
- for {
- off := v.AuxInt
- ptr := v.Args[0]
- v.reset(OpAMD64ADDQ)
- v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
- v0.AuxInt = off
- v.AddArg(v0)
- v.AddArg(ptr)
- return true
- }
-}
-func rewriteValueAMD64_OpOr16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Or16 x y)
- // cond:
- // result: (ORL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ORL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpOr32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Or32 x y)
- // cond:
- // result: (ORL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ORL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpOr64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Or64 x y)
- // cond:
- // result: (ORQ x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ORQ)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpOr8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Or8 x y)
- // cond:
- // result: (ORL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ORL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpOrB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (OrB x y)
- // cond:
- // result: (ORL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ORL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
func rewriteValueAMD64_OpAMD64ROLBconst(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -14013,822 +10662,6 @@
}
return false
}
-func rewriteValueAMD64_OpRsh16Ux16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16Ux16 <t> x y)
- // cond:
- // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 16
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh16Ux32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16Ux32 <t> x y)
- // cond:
- // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 16
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh16Ux64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16Ux64 <t> x y)
- // cond:
- // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 16
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh16Ux8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16Ux8 <t> x y)
- // cond:
- // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 16
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh16x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16x16 <t> x y)
- // cond:
- // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARW)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 16
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh16x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16x32 <t> x y)
- // cond:
- // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARW)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 16
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh16x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16x64 <t> x y)
- // cond:
- // result: (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARW)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 16
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh16x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh16x8 <t> x y)
- // cond:
- // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARW)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 16
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh32Ux16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32Ux16 <t> x y)
- // cond:
- // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHRL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh32Ux32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32Ux32 <t> x y)
- // cond:
- // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHRL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh32Ux64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32Ux64 <t> x y)
- // cond:
- // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHRL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh32Ux8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32Ux8 <t> x y)
- // cond:
- // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHRL, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 32
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh32x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32x16 <t> x y)
- // cond:
- // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARL)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 32
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh32x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32x32 <t> x y)
- // cond:
- // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARL)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 32
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh32x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32x64 <t> x y)
- // cond:
- // result: (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARL)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 32
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh32x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh32x8 <t> x y)
- // cond:
- // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARL)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 32
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh64Ux16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh64Ux16 <t> x y)
- // cond:
- // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDQ)
- v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 64
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh64Ux32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh64Ux32 <t> x y)
- // cond:
- // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDQ)
- v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 64
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh64Ux64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh64Ux64 <t> x y)
- // cond:
- // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDQ)
- v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 64
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh64Ux8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh64Ux8 <t> x y)
- // cond:
- // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDQ)
- v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 64
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh64x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh64x16 <t> x y)
- // cond:
- // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARQ)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 64
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh64x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh64x32 <t> x y)
- // cond:
- // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARQ)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 64
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh64x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh64x64 <t> x y)
- // cond:
- // result: (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARQ)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 64
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh64x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh64x8 <t> x y)
- // cond:
- // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARQ)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 64
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh8Ux16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8Ux16 <t> x y)
- // cond:
- // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 8
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh8Ux32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8Ux32 <t> x y)
- // cond:
- // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 8
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh8Ux64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8Ux64 <t> x y)
- // cond:
- // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 8
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh8Ux8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8Ux8 <t> x y)
- // cond:
- // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
- v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
- v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
- v2.AddArg(y)
- v2.AuxInt = 8
- v1.AddArg(v2)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh8x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8x16 <t> x y)
- // cond:
- // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARB)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 8
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh8x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8x32 <t> x y)
- // cond:
- // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARB)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 8
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh8x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8x64 <t> x y)
- // cond:
- // result: (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARB)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 8
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpRsh8x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Rsh8x8 <t> x y)
- // cond:
- // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
- for {
- t := v.Type
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64SARB)
- v.Type = t
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
- v0.AddArg(y)
- v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
- v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
- v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
- v3.AddArg(y)
- v3.AuxInt = 8
- v2.AddArg(v3)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v.AddArg(v0)
- return true
- }
-}
func rewriteValueAMD64_OpAMD64SARB(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -16287,8 +12120,8 @@
}
c := v_1.AuxInt
v.reset(OpAMD64SUBLconst)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
// match: (SUBL (MOVLconst [c]) x)
@@ -16303,8 +12136,8 @@
x := v.Args[1]
v.reset(OpAMD64NEGL)
v0 := b.NewValue0(v.Line, OpAMD64SUBLconst, v.Type)
- v0.AddArg(x)
v0.AuxInt = c
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -16368,8 +12201,8 @@
break
}
v.reset(OpAMD64SUBQconst)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
// match: (SUBQ (MOVQconst [c]) x)
@@ -16387,8 +12220,8 @@
}
v.reset(OpAMD64NEGQ)
v0 := b.NewValue0(v.Line, OpAMD64SUBQconst, v.Type)
- v0.AddArg(x)
v0.AuxInt = c
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -16440,12 +12273,12 @@
// cond:
// result: (MOVQconst [d-c])
for {
+ c := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVQconst {
break
}
d := v_0.AuxInt
- c := v.AuxInt
v.reset(OpAMD64MOVQconst)
v.AuxInt = d - c
return true
@@ -16454,13 +12287,13 @@
// cond: is32Bit(-c-d)
// result: (ADDQconst [-c-d] x)
for {
+ c := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpAMD64SUBQconst {
break
}
- x := v_0.Args[0]
d := v_0.AuxInt
- c := v.AuxInt
+ x := v_0.Args[0]
if !(is32Bit(-c - d)) {
break
}
@@ -16471,6 +12304,4370 @@
}
return false
}
+func rewriteValueAMD64_OpAMD64XORL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORL x (MOVLconst [c]))
+ // cond:
+ // result: (XORLconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpAMD64XORLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORL (MOVLconst [c]) x)
+ // cond:
+ // result: (XORLconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpAMD64XORLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORL x x)
+ // cond:
+ // result: (MOVLconst [0])
+ for {
+ x := v.Args[0]
+ if x != v.Args[1] {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XORLconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORLconst [c] (XORLconst [d] x))
+ // cond:
+ // result: (XORLconst [c ^ d] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64XORLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64XORLconst)
+ v.AuxInt = c ^ d
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [c] x)
+ // cond: int32(c)==0
+ // result: x
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(int32(c) == 0) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [c] (MOVLconst [d]))
+ // cond:
+ // result: (MOVLconst [c^d])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = c ^ d
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XORQ(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORQ x (MOVQconst [c]))
+ // cond: is32Bit(c)
+ // result: (XORQconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpAMD64XORQconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORQ (MOVQconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (XORQconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpAMD64XORQconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORQ x x)
+ // cond:
+ // result: (MOVQconst [0])
+ for {
+ x := v.Args[0]
+ if x != v.Args[1] {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XORQconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORQconst [c] (XORQconst [d] x))
+ // cond:
+ // result: (XORQconst [c ^ d] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64XORQconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64XORQconst)
+ v.AuxInt = c ^ d
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORQconst [0] x)
+ // cond:
+ // result: x
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORQconst [c] (MOVQconst [d]))
+ // cond:
+ // result: (MOVQconst [c^d])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = c ^ d
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAdd16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add16 x y)
+ // cond:
+ // result: (ADDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ADDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAdd32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add32 x y)
+ // cond:
+ // result: (ADDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ADDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAdd32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add32F x y)
+ // cond:
+ // result: (ADDSS x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ADDSS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAdd64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add64 x y)
+ // cond:
+ // result: (ADDQ x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ADDQ)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAdd64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add64F x y)
+ // cond:
+ // result: (ADDSD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ADDSD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAdd8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add8 x y)
+ // cond:
+ // result: (ADDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ADDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAddPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (AddPtr x y)
+ // cond:
+ // result: (ADDQ x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ADDQ)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAddr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Addr {sym} base)
+ // cond:
+ // result: (LEAQ {sym} base)
+ for {
+ sym := v.Aux
+ base := v.Args[0]
+ v.reset(OpAMD64LEAQ)
+ v.Aux = sym
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAnd16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And16 x y)
+ // cond:
+ // result: (ANDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAnd32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And32 x y)
+ // cond:
+ // result: (ANDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAnd64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And64 x y)
+ // cond:
+ // result: (ANDQ x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDQ)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAnd8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And8 x y)
+ // cond:
+ // result: (ANDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAndB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (AndB x y)
+ // cond:
+ // result: (ANDL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAvg64u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Avg64u x y)
+ // cond:
+ // result: (AVGQU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64AVGQU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpBswap32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Bswap32 x)
+ // cond:
+ // result: (BSWAPL x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64BSWAPL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpBswap64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Bswap64 x)
+ // cond:
+ // result: (BSWAPQ x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64BSWAPQ)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpClosureCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ClosureCall [argwid] entry closure mem)
+ // cond:
+ // result: (CALLclosure [argwid] entry closure mem)
+ for {
+ argwid := v.AuxInt
+ entry := v.Args[0]
+ closure := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpAMD64CALLclosure)
+ v.AuxInt = argwid
+ v.AddArg(entry)
+ v.AddArg(closure)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCom16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com16 x)
+ // cond:
+ // result: (NOTL x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64NOTL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCom32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com32 x)
+ // cond:
+ // result: (NOTL x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64NOTL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCom64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com64 x)
+ // cond:
+ // result: (NOTQ x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64NOTQ)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCom8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com8 x)
+ // cond:
+ // result: (NOTL x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64NOTL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpConst16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const16 [val])
+ // cond:
+ // result: (MOVLconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueAMD64_OpConst32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const32 [val])
+ // cond:
+ // result: (MOVLconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueAMD64_OpConst32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const32F [val])
+ // cond:
+ // result: (MOVSSconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpAMD64MOVSSconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueAMD64_OpConst64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const64 [val])
+ // cond:
+ // result: (MOVQconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueAMD64_OpConst64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const64F [val])
+ // cond:
+ // result: (MOVSDconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpAMD64MOVSDconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueAMD64_OpConst8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const8 [val])
+ // cond:
+ // result: (MOVLconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueAMD64_OpConstBool(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ConstBool [b])
+ // cond:
+ // result: (MOVLconst [b])
+ for {
+ b := v.AuxInt
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = b
+ return true
+ }
+}
+func rewriteValueAMD64_OpConstNil(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ConstNil)
+ // cond:
+ // result: (MOVQconst [0])
+ for {
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = 0
+ return true
+ }
+}
+func rewriteValueAMD64_OpConvert(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Convert <t> x mem)
+ // cond:
+ // result: (MOVQconvert <t> x mem)
+ for {
+ t := v.Type
+ x := v.Args[0]
+ mem := v.Args[1]
+ v.reset(OpAMD64MOVQconvert)
+ v.Type = t
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCtz16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Ctz16 <t> x)
+ // cond:
+ // result: (CMOVWEQconst (BSFW <t> x) (CMPWconst x [0]) [16])
+ for {
+ t := v.Type
+ x := v.Args[0]
+ v.reset(OpAMD64CMOVWEQconst)
+ v.AuxInt = 16
+ v0 := b.NewValue0(v.Line, OpAMD64BSFW, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+ v1.AuxInt = 0
+ v1.AddArg(x)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCtz32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Ctz32 <t> x)
+ // cond:
+ // result: (CMOVLEQconst (BSFL <t> x) (CMPLconst x [0]) [32])
+ for {
+ t := v.Type
+ x := v.Args[0]
+ v.reset(OpAMD64CMOVLEQconst)
+ v.AuxInt = 32
+ v0 := b.NewValue0(v.Line, OpAMD64BSFL, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+ v1.AuxInt = 0
+ v1.AddArg(x)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCtz64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Ctz64 <t> x)
+ // cond:
+ // result: (CMOVQEQconst (BSFQ <t> x) (CMPQconst x [0]) [64])
+ for {
+ t := v.Type
+ x := v.Args[0]
+ v.reset(OpAMD64CMOVQEQconst)
+ v.AuxInt = 64
+ v0 := b.NewValue0(v.Line, OpAMD64BSFQ, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+ v1.AuxInt = 0
+ v1.AddArg(x)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCvt32Fto32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Fto32 x)
+ // cond:
+ // result: (CVTTSS2SL x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64CVTTSS2SL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCvt32Fto64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Fto64 x)
+ // cond:
+ // result: (CVTTSS2SQ x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64CVTTSS2SQ)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCvt32Fto64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Fto64F x)
+ // cond:
+ // result: (CVTSS2SD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64CVTSS2SD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCvt32to32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32to32F x)
+ // cond:
+ // result: (CVTSL2SS x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64CVTSL2SS)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCvt32to64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32to64F x)
+ // cond:
+ // result: (CVTSL2SD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64CVTSL2SD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCvt64Fto32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64Fto32 x)
+ // cond:
+ // result: (CVTTSD2SL x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64CVTTSD2SL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCvt64Fto32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64Fto32F x)
+ // cond:
+ // result: (CVTSD2SS x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64CVTSD2SS)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCvt64Fto64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64Fto64 x)
+ // cond:
+ // result: (CVTTSD2SQ x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64CVTTSD2SQ)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCvt64to32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64to32F x)
+ // cond:
+ // result: (CVTSQ2SS x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64CVTSQ2SS)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCvt64to64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64to64F x)
+ // cond:
+ // result: (CVTSQ2SD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64CVTSQ2SD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDeferCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (DeferCall [argwid] mem)
+ // cond:
+ // result: (CALLdefer [argwid] mem)
+ for {
+ argwid := v.AuxInt
+ mem := v.Args[0]
+ v.reset(OpAMD64CALLdefer)
+ v.AuxInt = argwid
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div16 x y)
+ // cond:
+ // result: (Select0 (DIVW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16()))
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv16u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div16u x y)
+ // cond:
+ // result: (Select0 (DIVWU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16()))
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div32 x y)
+ // cond:
+ // result: (Select0 (DIVL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVL, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32()))
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div32F x y)
+ // cond:
+ // result: (DIVSS x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64DIVSS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv32u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div32u x y)
+ // cond:
+ // result: (Select0 (DIVLU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVLU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div64 x y)
+ // cond:
+ // result: (Select0 (DIVQ x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVQ, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div64F x y)
+ // cond:
+ // result: (DIVSD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64DIVSD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv64u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div64u x y)
+ // cond:
+ // result: (Select0 (DIVQU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVQU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div8 x y)
+ // cond:
+ // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16()))
+ v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv8u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div8u x y)
+ // cond:
+ // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16()))
+ v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq16 x y)
+ // cond:
+ // result: (SETEQ (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq32 x y)
+ // cond:
+ // result: (SETEQ (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq32F x y)
+ // cond:
+ // result: (SETEQF (UCOMISS x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETEQF)
+ v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEq64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq64 x y)
+ // cond:
+ // result: (SETEQ (CMPQ x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq64F x y)
+ // cond:
+ // result: (SETEQF (UCOMISD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETEQF)
+ v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq8 x y)
+ // cond:
+ // result: (SETEQ (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEqB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (EqB x y)
+ // cond:
+ // result: (SETEQ (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEqPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (EqPtr x y)
+ // cond:
+ // result: (SETEQ (CMPQ x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGeq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq16 x y)
+ // cond:
+ // result: (SETGE (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETGE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGeq16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq16U x y)
+ // cond:
+ // result: (SETAE (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGeq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq32 x y)
+ // cond:
+ // result: (SETGE (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETGE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGeq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq32F x y)
+ // cond:
+ // result: (SETGEF (UCOMISS x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETGEF)
+ v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGeq32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq32U x y)
+ // cond:
+ // result: (SETAE (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGeq64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq64 x y)
+ // cond:
+ // result: (SETGE (CMPQ x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETGE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGeq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq64F x y)
+ // cond:
+ // result: (SETGEF (UCOMISD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETGEF)
+ v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGeq64U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq64U x y)
+ // cond:
+ // result: (SETAE (CMPQ x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGeq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq8 x y)
+ // cond:
+ // result: (SETGE (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETGE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGeq8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq8U x y)
+ // cond:
+ // result: (SETAE (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGetClosurePtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GetClosurePtr)
+ // cond:
+ // result: (LoweredGetClosurePtr)
+ for {
+ v.reset(OpAMD64LoweredGetClosurePtr)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGetG(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GetG mem)
+ // cond:
+ // result: (LoweredGetG mem)
+ for {
+ mem := v.Args[0]
+ v.reset(OpAMD64LoweredGetG)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGoCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GoCall [argwid] mem)
+ // cond:
+ // result: (CALLgo [argwid] mem)
+ for {
+ argwid := v.AuxInt
+ mem := v.Args[0]
+ v.reset(OpAMD64CALLgo)
+ v.AuxInt = argwid
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGreater16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater16 x y)
+ // cond:
+ // result: (SETG (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETG)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGreater16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater16U x y)
+ // cond:
+ // result: (SETA (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETA)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGreater32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater32 x y)
+ // cond:
+ // result: (SETG (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETG)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGreater32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater32F x y)
+ // cond:
+ // result: (SETGF (UCOMISS x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETGF)
+ v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGreater32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater32U x y)
+ // cond:
+ // result: (SETA (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETA)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGreater64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater64 x y)
+ // cond:
+ // result: (SETG (CMPQ x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETG)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGreater64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater64F x y)
+ // cond:
+ // result: (SETGF (UCOMISD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETGF)
+ v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGreater64U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater64U x y)
+ // cond:
+ // result: (SETA (CMPQ x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETA)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGreater8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater8 x y)
+ // cond:
+ // result: (SETG (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETG)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGreater8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater8U x y)
+ // cond:
+ // result: (SETA (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETA)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpHmul16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul16 x y)
+ // cond:
+ // result: (HMULW x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64HMULW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpHmul16u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul16u x y)
+ // cond:
+ // result: (HMULWU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64HMULWU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpHmul32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul32 x y)
+ // cond:
+ // result: (HMULL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64HMULL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpHmul32u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul32u x y)
+ // cond:
+ // result: (HMULLU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64HMULLU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpHmul64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul64 x y)
+ // cond:
+ // result: (HMULQ x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64HMULQ)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpHmul64u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul64u x y)
+ // cond:
+ // result: (HMULQU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64HMULQU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpHmul8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul8 x y)
+ // cond:
+ // result: (HMULB x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64HMULB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpHmul8u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul8u x y)
+ // cond:
+ // result: (HMULBU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64HMULBU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpInterCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (InterCall [argwid] entry mem)
+ // cond:
+ // result: (CALLinter [argwid] entry mem)
+ for {
+ argwid := v.AuxInt
+ entry := v.Args[0]
+ mem := v.Args[1]
+ v.reset(OpAMD64CALLinter)
+ v.AuxInt = argwid
+ v.AddArg(entry)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpIsInBounds(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsInBounds idx len)
+ // cond:
+ // result: (SETB (CMPQ idx len))
+ for {
+ idx := v.Args[0]
+ len := v.Args[1]
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+ v0.AddArg(idx)
+ v0.AddArg(len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpIsNonNil(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsNonNil p)
+ // cond:
+ // result: (SETNE (TESTQ p p))
+ for {
+ p := v.Args[0]
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Line, OpAMD64TESTQ, TypeFlags)
+ v0.AddArg(p)
+ v0.AddArg(p)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpIsSliceInBounds(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsSliceInBounds idx len)
+ // cond:
+ // result: (SETBE (CMPQ idx len))
+ for {
+ idx := v.Args[0]
+ len := v.Args[1]
+ v.reset(OpAMD64SETBE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+ v0.AddArg(idx)
+ v0.AddArg(len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq16 x y)
+ // cond:
+ // result: (SETLE (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETLE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq16U x y)
+ // cond:
+ // result: (SETBE (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETBE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32 x y)
+ // cond:
+ // result: (SETLE (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETLE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32F x y)
+ // cond:
+ // result: (SETGEF (UCOMISS y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETGEF)
+ v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32U x y)
+ // cond:
+ // result: (SETBE (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETBE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq64 x y)
+ // cond:
+ // result: (SETLE (CMPQ x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETLE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq64F x y)
+ // cond:
+ // result: (SETGEF (UCOMISD y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETGEF)
+ v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq64U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq64U x y)
+ // cond:
+ // result: (SETBE (CMPQ x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETBE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq8 x y)
+ // cond:
+ // result: (SETLE (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETLE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq8U x y)
+ // cond:
+ // result: (SETBE (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETBE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less16 x y)
+ // cond:
+ // result: (SETL (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETL)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less16U x y)
+ // cond:
+ // result: (SETB (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less32 x y)
+ // cond:
+ // result: (SETL (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETL)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less32F x y)
+ // cond:
+ // result: (SETGF (UCOMISS y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETGF)
+ v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less32U x y)
+ // cond:
+ // result: (SETB (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less64 x y)
+ // cond:
+ // result: (SETL (CMPQ x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETL)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less64F x y)
+ // cond:
+ // result: (SETGF (UCOMISD y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETGF)
+ v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess64U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less64U x y)
+ // cond:
+ // result: (SETB (CMPQ x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less8 x y)
+ // cond:
+ // result: (SETL (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETL)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less8U x y)
+ // cond:
+ // result: (SETB (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLoad(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Load <t> ptr mem)
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (MOVQload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64MOVQload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitInt(t)
+ // result: (MOVLload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64MOVLload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is16BitInt(t)
+ // result: (MOVWload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64MOVWload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (t.IsBoolean() || is8BitInt(t))
+ // result: (MOVBload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(t.IsBoolean() || is8BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64MOVBload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (MOVSSload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(OpAMD64MOVSSload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (MOVSDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(OpAMD64MOVSDload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLrot16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lrot16 <t> x [c])
+ // cond:
+ // result: (ROLWconst <t> [c&15] x)
+ for {
+ t := v.Type
+ c := v.AuxInt
+ x := v.Args[0]
+ v.reset(OpAMD64ROLWconst)
+ v.Type = t
+ v.AuxInt = c & 15
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLrot32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lrot32 <t> x [c])
+ // cond:
+ // result: (ROLLconst <t> [c&31] x)
+ for {
+ t := v.Type
+ c := v.AuxInt
+ x := v.Args[0]
+ v.reset(OpAMD64ROLLconst)
+ v.Type = t
+ v.AuxInt = c & 31
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLrot64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lrot64 <t> x [c])
+ // cond:
+ // result: (ROLQconst <t> [c&63] x)
+ for {
+ t := v.Type
+ c := v.AuxInt
+ x := v.Args[0]
+ v.reset(OpAMD64ROLQconst)
+ v.Type = t
+ v.AuxInt = c & 63
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLrot8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lrot8 <t> x [c])
+ // cond:
+ // result: (ROLBconst <t> [c&7] x)
+ for {
+ t := v.Type
+ c := v.AuxInt
+ x := v.Args[0]
+ v.reset(OpAMD64ROLBconst)
+ v.Type = t
+ v.AuxInt = c & 7
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh16x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x16 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh16x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x32 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh16x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x64 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh16x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x8 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh32x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x16 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh32x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x32 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh32x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x64 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh32x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x8 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh64x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh64x16 <t> x y)
+ // cond:
+ // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+ v2.AuxInt = 64
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh64x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh64x32 <t> x y)
+ // cond:
+ // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+ v2.AuxInt = 64
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh64x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh64x64 <t> x y)
+ // cond:
+ // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+ v2.AuxInt = 64
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh64x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh64x8 <t> x y)
+ // cond:
+ // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+ v2.AuxInt = 64
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh8x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x16 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh8x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x32 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh8x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x64 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh8x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x8 <t> x y)
+ // cond:
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod16 x y)
+ // cond:
+ // result: (Select1 (DIVW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16()))
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod16u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod16u x y)
+ // cond:
+ // result: (Select1 (DIVWU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16()))
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod32 x y)
+ // cond:
+ // result: (Select1 (DIVL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVL, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32()))
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod32u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod32u x y)
+ // cond:
+ // result: (Select1 (DIVLU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVLU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod64 x y)
+ // cond:
+ // result: (Select1 (DIVQ x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVQ, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod64u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod64u x y)
+ // cond:
+ // result: (Select1 (DIVQU x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVQU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod8 x y)
+ // cond:
+ // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16()))
+ v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod8u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mod8u x y)
+ // cond:
+ // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16()))
+ v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMove(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Move [s] _ _ mem)
+ // cond: SizeAndAlign(s).Size() == 0
+ // result: mem
+ for {
+ s := v.AuxInt
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 0) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = mem.Type
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 1
+ // result: (MOVBstore dst (MOVBload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 1) {
+ break
+ }
+ v.reset(OpAMD64MOVBstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 2
+ // result: (MOVWstore dst (MOVWload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 2) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 4
+ // result: (MOVLstore dst (MOVLload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 4) {
+ break
+ }
+ v.reset(OpAMD64MOVLstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 8
+ // result: (MOVQstore dst (MOVQload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 8) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 16
+ // result: (MOVOstore dst (MOVOload src mem) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 16) {
+ break
+ }
+ v.reset(OpAMD64MOVOstore)
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVOload, TypeInt128)
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 3
+ // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 3) {
+ break
+ }
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = 2
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8())
+ v0.AuxInt = 2
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64MOVWstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 5
+ // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 5) {
+ break
+ }
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = 4
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8())
+ v0.AuxInt = 4
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 6
+ // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 6) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = 4
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16())
+ v0.AuxInt = 4
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() == 7
+ // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() == 7) {
+ break
+ }
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = 3
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
+ v0.AuxInt = 3
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() < 16
+ // result: (MOVQstore [SizeAndAlign(s).Size()-8] dst (MOVQload [SizeAndAlign(s).Size()-8] src mem) (MOVQstore dst (MOVQload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() < 16) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = SizeAndAlign(s).Size() - 8
+ v.AddArg(dst)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
+ v0.AuxInt = SizeAndAlign(s).Size() - 8
+ v0.AddArg(src)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeMem)
+ v1.AddArg(dst)
+ v2 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
+ v2.AddArg(src)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v1.AddArg(mem)
+ v.AddArg(v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 <= 8
+ // result: (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16] (ADDQconst <dst.Type> dst [SizeAndAlign(s).Size()%16]) (ADDQconst <src.Type> src [SizeAndAlign(s).Size()%16]) (MOVQstore dst (MOVQload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 <= 8) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%16
+ v0 := b.NewValue0(v.Line, OpAMD64ADDQconst, dst.Type)
+ v0.AuxInt = SizeAndAlign(s).Size() % 16
+ v0.AddArg(dst)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64ADDQconst, src.Type)
+ v1.AuxInt = SizeAndAlign(s).Size() % 16
+ v1.AddArg(src)
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeMem)
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
+ v3.AddArg(src)
+ v3.AddArg(mem)
+ v2.AddArg(v3)
+ v2.AddArg(mem)
+ v.AddArg(v2)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 > 8
+ // result: (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16] (ADDQconst <dst.Type> dst [SizeAndAlign(s).Size()%16]) (ADDQconst <src.Type> src [SizeAndAlign(s).Size()%16]) (MOVOstore dst (MOVOload src mem) mem))
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 > 8) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%16
+ v0 := b.NewValue0(v.Line, OpAMD64ADDQconst, dst.Type)
+ v0.AuxInt = SizeAndAlign(s).Size() % 16
+ v0.AddArg(dst)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64ADDQconst, src.Type)
+ v1.AuxInt = SizeAndAlign(s).Size() % 16
+ v1.AddArg(src)
+ v.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpAMD64MOVOstore, TypeMem)
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Line, OpAMD64MOVOload, TypeInt128)
+ v3.AddArg(src)
+ v3.AddArg(mem)
+ v2.AddArg(v3)
+ v2.AddArg(mem)
+ v.AddArg(v2)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: SizeAndAlign(s).Size() >= 32 && SizeAndAlign(s).Size() <= 16*64 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice
+ // result: (DUFFCOPY [14*(64-SizeAndAlign(s).Size()/16)] dst src mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !(SizeAndAlign(s).Size() >= 32 && SizeAndAlign(s).Size() <= 16*64 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice) {
+ break
+ }
+ v.reset(OpAMD64DUFFCOPY)
+ v.AuxInt = 14 * (64 - SizeAndAlign(s).Size()/16)
+ v.AddArg(dst)
+ v.AddArg(src)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: (SizeAndAlign(s).Size() > 16*64 || config.noDuffDevice) && SizeAndAlign(s).Size()%8 == 0
+ // result: (REPMOVSQ dst src (MOVQconst [SizeAndAlign(s).Size()/8]) mem)
+ for {
+ s := v.AuxInt
+ dst := v.Args[0]
+ src := v.Args[1]
+ mem := v.Args[2]
+ if !((SizeAndAlign(s).Size() > 16*64 || config.noDuffDevice) && SizeAndAlign(s).Size()%8 == 0) {
+ break
+ }
+ v.reset(OpAMD64REPMOVSQ)
+ v.AddArg(dst)
+ v.AddArg(src)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
+ v0.AuxInt = SizeAndAlign(s).Size() / 8
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpMul16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul16 x y)
+ // cond:
+ // result: (MULL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64MULL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMul32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul32 x y)
+ // cond:
+ // result: (MULL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64MULL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMul32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul32F x y)
+ // cond:
+ // result: (MULSS x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64MULSS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMul64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul64 x y)
+ // cond:
+ // result: (MULQ x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64MULQ)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMul64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul64F x y)
+ // cond:
+ // result: (MULSD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64MULSD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMul8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Mul8 x y)
+ // cond:
+ // result: (MULL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64MULL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeg16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg16 x)
+ // cond:
+ // result: (NEGL x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64NEGL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeg32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg32 x)
+ // cond:
+ // result: (NEGL x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64NEGL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeg32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg32F x)
+ // cond:
+ // result: (PXOR x (MOVSSconst <config.Frontend().TypeFloat32()> [f2i(math.Copysign(0, -1))]))
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64PXOR)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVSSconst, config.Frontend().TypeFloat32())
+ v0.AuxInt = f2i(math.Copysign(0, -1))
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeg64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg64 x)
+ // cond:
+ // result: (NEGQ x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64NEGQ)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeg64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg64F x)
+ // cond:
+ // result: (PXOR x (MOVSDconst <config.Frontend().TypeFloat64()> [f2i(math.Copysign(0, -1))]))
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64PXOR)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVSDconst, config.Frontend().TypeFloat64())
+ v0.AuxInt = f2i(math.Copysign(0, -1))
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeg8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neg8 x)
+ // cond:
+ // result: (NEGL x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64NEGL)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq16 x y)
+ // cond:
+ // result: (SETNE (CMPW x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq32 x y)
+ // cond:
+ // result: (SETNE (CMPL x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq32F x y)
+ // cond:
+ // result: (SETNEF (UCOMISS x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETNEF)
+ v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeq64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq64 x y)
+ // cond:
+ // result: (SETNE (CMPQ x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq64F x y)
+ // cond:
+ // result: (SETNEF (UCOMISD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETNEF)
+ v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Neq8 x y)
+ // cond:
+ // result: (SETNE (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeqB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NeqB x y)
+ // cond:
+ // result: (SETNE (CMPB x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeqPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NeqPtr x y)
+ // cond:
+ // result: (SETNE (CMPQ x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNilCheck(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NilCheck ptr mem)
+ // cond:
+ // result: (LoweredNilCheck ptr mem)
+ for {
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ v.reset(OpAMD64LoweredNilCheck)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNot(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Not x)
+ // cond:
+ // result: (XORLconst [1] x)
+ for {
+ x := v.Args[0]
+ v.reset(OpAMD64XORLconst)
+ v.AuxInt = 1
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpOffPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (OffPtr [off] ptr)
+ // cond: is32Bit(off)
+ // result: (ADDQconst [off] ptr)
+ for {
+ off := v.AuxInt
+ ptr := v.Args[0]
+ if !(is32Bit(off)) {
+ break
+ }
+ v.reset(OpAMD64ADDQconst)
+ v.AuxInt = off
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (OffPtr [off] ptr)
+ // cond:
+ // result: (ADDQ (MOVQconst [off]) ptr)
+ for {
+ off := v.AuxInt
+ ptr := v.Args[0]
+ v.reset(OpAMD64ADDQ)
+ v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
+ v0.AuxInt = off
+ v.AddArg(v0)
+ v.AddArg(ptr)
+ return true
+ }
+}
+func rewriteValueAMD64_OpOr16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or16 x y)
+ // cond:
+ // result: (ORL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ORL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpOr32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or32 x y)
+ // cond:
+ // result: (ORL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ORL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpOr64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or64 x y)
+ // cond:
+ // result: (ORQ x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ORQ)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpOr8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Or8 x y)
+ // cond:
+ // result: (ORL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ORL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpOrB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (OrB x y)
+ // cond:
+ // result: (ORL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ORL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh16Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux16 <t> x y)
+ // cond:
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+ v2.AuxInt = 16
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh16Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux32 <t> x y)
+ // cond:
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+ v2.AuxInt = 16
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh16Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux64 <t> x y)
+ // cond:
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+ v2.AuxInt = 16
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh16Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16Ux8 <t> x y)
+ // cond:
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+ v2.AuxInt = 16
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh16x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x16 <t> x y)
+ // cond:
+ // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARW)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+ v3.AuxInt = 16
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh16x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x32 <t> x y)
+ // cond:
+ // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARW)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+ v3.AuxInt = 16
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh16x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x64 <t> x y)
+ // cond:
+ // result: (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARW)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+ v3.AuxInt = 16
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh16x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh16x8 <t> x y)
+ // cond:
+ // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARW)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+ v3.AuxInt = 16
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh32Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux16 <t> x y)
+ // cond:
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh32Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux32 <t> x y)
+ // cond:
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh32Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux64 <t> x y)
+ // cond:
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh32Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32Ux8 <t> x y)
+ // cond:
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRL, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+ v2.AuxInt = 32
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh32x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x16 <t> x y)
+ // cond:
+ // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARL)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+ v3.AuxInt = 32
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh32x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x32 <t> x y)
+ // cond:
+ // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARL)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+ v3.AuxInt = 32
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh32x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x64 <t> x y)
+ // cond:
+ // result: (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARL)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+ v3.AuxInt = 32
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh32x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh32x8 <t> x y)
+ // cond:
+ // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARL)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+ v3.AuxInt = 32
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh64Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64Ux16 <t> x y)
+ // cond:
+ // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+ v2.AuxInt = 64
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh64Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64Ux32 <t> x y)
+ // cond:
+ // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+ v2.AuxInt = 64
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh64Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64Ux64 <t> x y)
+ // cond:
+ // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+ v2.AuxInt = 64
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh64Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64Ux8 <t> x y)
+ // cond:
+ // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+ v2.AuxInt = 64
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh64x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64x16 <t> x y)
+ // cond:
+ // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARQ)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+ v3.AuxInt = 64
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh64x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64x32 <t> x y)
+ // cond:
+ // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARQ)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+ v3.AuxInt = 64
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh64x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64x64 <t> x y)
+ // cond:
+ // result: (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARQ)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+ v3.AuxInt = 64
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh64x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh64x8 <t> x y)
+ // cond:
+ // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARQ)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+ v3.AuxInt = 64
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh8Ux16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux16 <t> x y)
+ // cond:
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+ v2.AuxInt = 8
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh8Ux32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux32 <t> x y)
+ // cond:
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+ v2.AuxInt = 8
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh8Ux64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux64 <t> x y)
+ // cond:
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+ v2.AuxInt = 8
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh8Ux8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8Ux8 <t> x y)
+ // cond:
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+ v2.AuxInt = 8
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh8x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x16 <t> x y)
+ // cond:
+ // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARB)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+ v3.AuxInt = 8
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh8x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x32 <t> x y)
+ // cond:
+ // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARB)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+ v3.AuxInt = 8
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh8x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x64 <t> x y)
+ // cond:
+ // result: (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARB)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+ v3.AuxInt = 8
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh8x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Rsh8x8 <t> x y)
+ // cond:
+ // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
+ for {
+ t := v.Type
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64SARB)
+ v.Type = t
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+ v3.AuxInt = 8
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
func rewriteValueAMD64_OpSignExt16to32(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -16875,203 +17072,6 @@
return true
}
}
-func rewriteValueAMD64_OpAMD64XORL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (XORL x (MOVLconst [c]))
- // cond:
- // result: (XORLconst [c] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVLconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64XORLconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (XORL (MOVLconst [c]) x)
- // cond:
- // result: (XORLconst [c] x)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVLconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- v.reset(OpAMD64XORLconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (XORL x x)
- // cond:
- // result: (MOVLconst [0])
- for {
- x := v.Args[0]
- if x != v.Args[1] {
- break
- }
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64XORLconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (XORLconst [c] (XORLconst [d] x))
- // cond:
- // result: (XORLconst [c ^ d] x)
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64XORLconst {
- break
- }
- d := v_0.AuxInt
- x := v_0.Args[0]
- v.reset(OpAMD64XORLconst)
- v.AuxInt = c ^ d
- v.AddArg(x)
- return true
- }
- // match: (XORLconst [c] x)
- // cond: int32(c)==0
- // result: x
- for {
- c := v.AuxInt
- x := v.Args[0]
- if !(int32(c) == 0) {
- break
- }
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
- // match: (XORLconst [c] (MOVLconst [d]))
- // cond:
- // result: (MOVLconst [c^d])
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVLconst {
- break
- }
- d := v_0.AuxInt
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = c ^ d
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64XORQ(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (XORQ x (MOVQconst [c]))
- // cond: is32Bit(c)
- // result: (XORQconst [c] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVQconst {
- break
- }
- c := v_1.AuxInt
- if !(is32Bit(c)) {
- break
- }
- v.reset(OpAMD64XORQconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (XORQ (MOVQconst [c]) x)
- // cond: is32Bit(c)
- // result: (XORQconst [c] x)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVQconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- if !(is32Bit(c)) {
- break
- }
- v.reset(OpAMD64XORQconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (XORQ x x)
- // cond:
- // result: (MOVQconst [0])
- for {
- x := v.Args[0]
- if x != v.Args[1] {
- break
- }
- v.reset(OpAMD64MOVQconst)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64XORQconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (XORQconst [c] (XORQconst [d] x))
- // cond:
- // result: (XORQconst [c ^ d] x)
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64XORQconst {
- break
- }
- d := v_0.AuxInt
- x := v_0.Args[0]
- v.reset(OpAMD64XORQconst)
- v.AuxInt = c ^ d
- v.AddArg(x)
- return true
- }
- // match: (XORQconst [0] x)
- // cond:
- // result: x
- for {
- if v.AuxInt != 0 {
- break
- }
- x := v.Args[0]
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
- // match: (XORQconst [c] (MOVQconst [d]))
- // cond:
- // result: (MOVQconst [c^d])
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVQconst {
- break
- }
- d := v_0.AuxInt
- v.reset(OpAMD64MOVQconst)
- v.AuxInt = c ^ d
- return true
- }
- return false
-}
func rewriteValueAMD64_OpXor16(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -17306,8 +17306,8 @@
v.reset(OpZero)
v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%8
v0 := b.NewValue0(v.Line, OpAMD64ADDQconst, config.fe.TypeUInt64())
- v0.AddArg(destptr)
v0.AuxInt = SizeAndAlign(s).Size() % 8
+ v0.AddArg(destptr)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem)
v1.AuxInt = 0
diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go
index eb000d7..a4659e4 100644
--- a/src/cmd/compile/internal/ssa/rewriteARM.go
+++ b/src/cmd/compile/internal/ssa/rewriteARM.go
@@ -70,32 +70,6 @@
return rewriteValueARM_OpARMANDshiftRL(v, config)
case OpARMANDshiftRLreg:
return rewriteValueARM_OpARMANDshiftRLreg(v, config)
- case OpAdd16:
- return rewriteValueARM_OpAdd16(v, config)
- case OpAdd32:
- return rewriteValueARM_OpAdd32(v, config)
- case OpAdd32F:
- return rewriteValueARM_OpAdd32F(v, config)
- case OpAdd32carry:
- return rewriteValueARM_OpAdd32carry(v, config)
- case OpAdd32withcarry:
- return rewriteValueARM_OpAdd32withcarry(v, config)
- case OpAdd64F:
- return rewriteValueARM_OpAdd64F(v, config)
- case OpAdd8:
- return rewriteValueARM_OpAdd8(v, config)
- case OpAddPtr:
- return rewriteValueARM_OpAddPtr(v, config)
- case OpAddr:
- return rewriteValueARM_OpAddr(v, config)
- case OpAnd16:
- return rewriteValueARM_OpAnd16(v, config)
- case OpAnd32:
- return rewriteValueARM_OpAnd32(v, config)
- case OpAnd8:
- return rewriteValueARM_OpAnd8(v, config)
- case OpAndB:
- return rewriteValueARM_OpAndB(v, config)
case OpARMBIC:
return rewriteValueARM_OpARMBIC(v, config)
case OpARMBICconst:
@@ -136,124 +110,12 @@
return rewriteValueARM_OpARMCMPshiftRL(v, config)
case OpARMCMPshiftRLreg:
return rewriteValueARM_OpARMCMPshiftRLreg(v, config)
- case OpClosureCall:
- return rewriteValueARM_OpClosureCall(v, config)
- case OpCom16:
- return rewriteValueARM_OpCom16(v, config)
- case OpCom32:
- return rewriteValueARM_OpCom32(v, config)
- case OpCom8:
- return rewriteValueARM_OpCom8(v, config)
- case OpConst16:
- return rewriteValueARM_OpConst16(v, config)
- case OpConst32:
- return rewriteValueARM_OpConst32(v, config)
- case OpConst32F:
- return rewriteValueARM_OpConst32F(v, config)
- case OpConst64F:
- return rewriteValueARM_OpConst64F(v, config)
- case OpConst8:
- return rewriteValueARM_OpConst8(v, config)
- case OpConstBool:
- return rewriteValueARM_OpConstBool(v, config)
- case OpConstNil:
- return rewriteValueARM_OpConstNil(v, config)
- case OpConvert:
- return rewriteValueARM_OpConvert(v, config)
- case OpCvt32Fto32:
- return rewriteValueARM_OpCvt32Fto32(v, config)
- case OpCvt32Fto32U:
- return rewriteValueARM_OpCvt32Fto32U(v, config)
- case OpCvt32Fto64F:
- return rewriteValueARM_OpCvt32Fto64F(v, config)
- case OpCvt32Uto32F:
- return rewriteValueARM_OpCvt32Uto32F(v, config)
- case OpCvt32Uto64F:
- return rewriteValueARM_OpCvt32Uto64F(v, config)
- case OpCvt32to32F:
- return rewriteValueARM_OpCvt32to32F(v, config)
- case OpCvt32to64F:
- return rewriteValueARM_OpCvt32to64F(v, config)
- case OpCvt64Fto32:
- return rewriteValueARM_OpCvt64Fto32(v, config)
- case OpCvt64Fto32F:
- return rewriteValueARM_OpCvt64Fto32F(v, config)
- case OpCvt64Fto32U:
- return rewriteValueARM_OpCvt64Fto32U(v, config)
case OpARMDIV:
return rewriteValueARM_OpARMDIV(v, config)
case OpARMDIVU:
return rewriteValueARM_OpARMDIVU(v, config)
- case OpDeferCall:
- return rewriteValueARM_OpDeferCall(v, config)
- case OpDiv16:
- return rewriteValueARM_OpDiv16(v, config)
- case OpDiv16u:
- return rewriteValueARM_OpDiv16u(v, config)
- case OpDiv32:
- return rewriteValueARM_OpDiv32(v, config)
- case OpDiv32F:
- return rewriteValueARM_OpDiv32F(v, config)
- case OpDiv32u:
- return rewriteValueARM_OpDiv32u(v, config)
- case OpDiv64F:
- return rewriteValueARM_OpDiv64F(v, config)
- case OpDiv8:
- return rewriteValueARM_OpDiv8(v, config)
- case OpDiv8u:
- return rewriteValueARM_OpDiv8u(v, config)
- case OpEq16:
- return rewriteValueARM_OpEq16(v, config)
- case OpEq32:
- return rewriteValueARM_OpEq32(v, config)
- case OpEq32F:
- return rewriteValueARM_OpEq32F(v, config)
- case OpEq64F:
- return rewriteValueARM_OpEq64F(v, config)
- case OpEq8:
- return rewriteValueARM_OpEq8(v, config)
- case OpEqB:
- return rewriteValueARM_OpEqB(v, config)
- case OpEqPtr:
- return rewriteValueARM_OpEqPtr(v, config)
case OpARMEqual:
return rewriteValueARM_OpARMEqual(v, config)
- case OpGeq16:
- return rewriteValueARM_OpGeq16(v, config)
- case OpGeq16U:
- return rewriteValueARM_OpGeq16U(v, config)
- case OpGeq32:
- return rewriteValueARM_OpGeq32(v, config)
- case OpGeq32F:
- return rewriteValueARM_OpGeq32F(v, config)
- case OpGeq32U:
- return rewriteValueARM_OpGeq32U(v, config)
- case OpGeq64F:
- return rewriteValueARM_OpGeq64F(v, config)
- case OpGeq8:
- return rewriteValueARM_OpGeq8(v, config)
- case OpGeq8U:
- return rewriteValueARM_OpGeq8U(v, config)
- case OpGetClosurePtr:
- return rewriteValueARM_OpGetClosurePtr(v, config)
- case OpGoCall:
- return rewriteValueARM_OpGoCall(v, config)
- case OpGreater16:
- return rewriteValueARM_OpGreater16(v, config)
- case OpGreater16U:
- return rewriteValueARM_OpGreater16U(v, config)
- case OpGreater32:
- return rewriteValueARM_OpGreater32(v, config)
- case OpGreater32F:
- return rewriteValueARM_OpGreater32F(v, config)
- case OpGreater32U:
- return rewriteValueARM_OpGreater32U(v, config)
- case OpGreater64F:
- return rewriteValueARM_OpGreater64F(v, config)
- case OpGreater8:
- return rewriteValueARM_OpGreater8(v, config)
- case OpGreater8U:
- return rewriteValueARM_OpGreater8U(v, config)
case OpARMGreaterEqual:
return rewriteValueARM_OpARMGreaterEqual(v, config)
case OpARMGreaterEqualU:
@@ -262,58 +124,6 @@
return rewriteValueARM_OpARMGreaterThan(v, config)
case OpARMGreaterThanU:
return rewriteValueARM_OpARMGreaterThanU(v, config)
- case OpHmul16:
- return rewriteValueARM_OpHmul16(v, config)
- case OpHmul16u:
- return rewriteValueARM_OpHmul16u(v, config)
- case OpHmul32:
- return rewriteValueARM_OpHmul32(v, config)
- case OpHmul32u:
- return rewriteValueARM_OpHmul32u(v, config)
- case OpHmul8:
- return rewriteValueARM_OpHmul8(v, config)
- case OpHmul8u:
- return rewriteValueARM_OpHmul8u(v, config)
- case OpInterCall:
- return rewriteValueARM_OpInterCall(v, config)
- case OpIsInBounds:
- return rewriteValueARM_OpIsInBounds(v, config)
- case OpIsNonNil:
- return rewriteValueARM_OpIsNonNil(v, config)
- case OpIsSliceInBounds:
- return rewriteValueARM_OpIsSliceInBounds(v, config)
- case OpLeq16:
- return rewriteValueARM_OpLeq16(v, config)
- case OpLeq16U:
- return rewriteValueARM_OpLeq16U(v, config)
- case OpLeq32:
- return rewriteValueARM_OpLeq32(v, config)
- case OpLeq32F:
- return rewriteValueARM_OpLeq32F(v, config)
- case OpLeq32U:
- return rewriteValueARM_OpLeq32U(v, config)
- case OpLeq64F:
- return rewriteValueARM_OpLeq64F(v, config)
- case OpLeq8:
- return rewriteValueARM_OpLeq8(v, config)
- case OpLeq8U:
- return rewriteValueARM_OpLeq8U(v, config)
- case OpLess16:
- return rewriteValueARM_OpLess16(v, config)
- case OpLess16U:
- return rewriteValueARM_OpLess16U(v, config)
- case OpLess32:
- return rewriteValueARM_OpLess32(v, config)
- case OpLess32F:
- return rewriteValueARM_OpLess32F(v, config)
- case OpLess32U:
- return rewriteValueARM_OpLess32U(v, config)
- case OpLess64F:
- return rewriteValueARM_OpLess64F(v, config)
- case OpLess8:
- return rewriteValueARM_OpLess8(v, config)
- case OpLess8U:
- return rewriteValueARM_OpLess8U(v, config)
case OpARMLessEqual:
return rewriteValueARM_OpARMLessEqual(v, config)
case OpARMLessEqualU:
@@ -322,38 +132,6 @@
return rewriteValueARM_OpARMLessThan(v, config)
case OpARMLessThanU:
return rewriteValueARM_OpARMLessThanU(v, config)
- case OpLoad:
- return rewriteValueARM_OpLoad(v, config)
- case OpLrot16:
- return rewriteValueARM_OpLrot16(v, config)
- case OpLrot32:
- return rewriteValueARM_OpLrot32(v, config)
- case OpLrot8:
- return rewriteValueARM_OpLrot8(v, config)
- case OpLsh16x16:
- return rewriteValueARM_OpLsh16x16(v, config)
- case OpLsh16x32:
- return rewriteValueARM_OpLsh16x32(v, config)
- case OpLsh16x64:
- return rewriteValueARM_OpLsh16x64(v, config)
- case OpLsh16x8:
- return rewriteValueARM_OpLsh16x8(v, config)
- case OpLsh32x16:
- return rewriteValueARM_OpLsh32x16(v, config)
- case OpLsh32x32:
- return rewriteValueARM_OpLsh32x32(v, config)
- case OpLsh32x64:
- return rewriteValueARM_OpLsh32x64(v, config)
- case OpLsh32x8:
- return rewriteValueARM_OpLsh32x8(v, config)
- case OpLsh8x16:
- return rewriteValueARM_OpLsh8x16(v, config)
- case OpLsh8x32:
- return rewriteValueARM_OpLsh8x32(v, config)
- case OpLsh8x64:
- return rewriteValueARM_OpLsh8x64(v, config)
- case OpLsh8x8:
- return rewriteValueARM_OpLsh8x8(v, config)
case OpARMMOVBUload:
return rewriteValueARM_OpARMMOVBUload(v, config)
case OpARMMOVBUreg:
@@ -422,60 +200,6 @@
return rewriteValueARM_OpARMMVNshiftRL(v, config)
case OpARMMVNshiftRLreg:
return rewriteValueARM_OpARMMVNshiftRLreg(v, config)
- case OpMod16:
- return rewriteValueARM_OpMod16(v, config)
- case OpMod16u:
- return rewriteValueARM_OpMod16u(v, config)
- case OpMod32:
- return rewriteValueARM_OpMod32(v, config)
- case OpMod32u:
- return rewriteValueARM_OpMod32u(v, config)
- case OpMod8:
- return rewriteValueARM_OpMod8(v, config)
- case OpMod8u:
- return rewriteValueARM_OpMod8u(v, config)
- case OpMove:
- return rewriteValueARM_OpMove(v, config)
- case OpMul16:
- return rewriteValueARM_OpMul16(v, config)
- case OpMul32:
- return rewriteValueARM_OpMul32(v, config)
- case OpMul32F:
- return rewriteValueARM_OpMul32F(v, config)
- case OpMul32uhilo:
- return rewriteValueARM_OpMul32uhilo(v, config)
- case OpMul64F:
- return rewriteValueARM_OpMul64F(v, config)
- case OpMul8:
- return rewriteValueARM_OpMul8(v, config)
- case OpNeg16:
- return rewriteValueARM_OpNeg16(v, config)
- case OpNeg32:
- return rewriteValueARM_OpNeg32(v, config)
- case OpNeg32F:
- return rewriteValueARM_OpNeg32F(v, config)
- case OpNeg64F:
- return rewriteValueARM_OpNeg64F(v, config)
- case OpNeg8:
- return rewriteValueARM_OpNeg8(v, config)
- case OpNeq16:
- return rewriteValueARM_OpNeq16(v, config)
- case OpNeq32:
- return rewriteValueARM_OpNeq32(v, config)
- case OpNeq32F:
- return rewriteValueARM_OpNeq32F(v, config)
- case OpNeq64F:
- return rewriteValueARM_OpNeq64F(v, config)
- case OpNeq8:
- return rewriteValueARM_OpNeq8(v, config)
- case OpNeqB:
- return rewriteValueARM_OpNeqB(v, config)
- case OpNeqPtr:
- return rewriteValueARM_OpNeqPtr(v, config)
- case OpNilCheck:
- return rewriteValueARM_OpNilCheck(v, config)
- case OpNot:
- return rewriteValueARM_OpNot(v, config)
case OpARMNotEqual:
return rewriteValueARM_OpARMNotEqual(v, config)
case OpARMOR:
@@ -494,16 +218,6 @@
return rewriteValueARM_OpARMORshiftRL(v, config)
case OpARMORshiftRLreg:
return rewriteValueARM_OpARMORshiftRLreg(v, config)
- case OpOffPtr:
- return rewriteValueARM_OpOffPtr(v, config)
- case OpOr16:
- return rewriteValueARM_OpOr16(v, config)
- case OpOr32:
- return rewriteValueARM_OpOr32(v, config)
- case OpOr8:
- return rewriteValueARM_OpOr8(v, config)
- case OpOrB:
- return rewriteValueARM_OpOrB(v, config)
case OpARMRSB:
return rewriteValueARM_OpARMRSB(v, config)
case OpARMRSBSshiftLL:
@@ -546,54 +260,6 @@
return rewriteValueARM_OpARMRSCshiftRL(v, config)
case OpARMRSCshiftRLreg:
return rewriteValueARM_OpARMRSCshiftRLreg(v, config)
- case OpRsh16Ux16:
- return rewriteValueARM_OpRsh16Ux16(v, config)
- case OpRsh16Ux32:
- return rewriteValueARM_OpRsh16Ux32(v, config)
- case OpRsh16Ux64:
- return rewriteValueARM_OpRsh16Ux64(v, config)
- case OpRsh16Ux8:
- return rewriteValueARM_OpRsh16Ux8(v, config)
- case OpRsh16x16:
- return rewriteValueARM_OpRsh16x16(v, config)
- case OpRsh16x32:
- return rewriteValueARM_OpRsh16x32(v, config)
- case OpRsh16x64:
- return rewriteValueARM_OpRsh16x64(v, config)
- case OpRsh16x8:
- return rewriteValueARM_OpRsh16x8(v, config)
- case OpRsh32Ux16:
- return rewriteValueARM_OpRsh32Ux16(v, config)
- case OpRsh32Ux32:
- return rewriteValueARM_OpRsh32Ux32(v, config)
- case OpRsh32Ux64:
- return rewriteValueARM_OpRsh32Ux64(v, config)
- case OpRsh32Ux8:
- return rewriteValueARM_OpRsh32Ux8(v, config)
- case OpRsh32x16:
- return rewriteValueARM_OpRsh32x16(v, config)
- case OpRsh32x32:
- return rewriteValueARM_OpRsh32x32(v, config)
- case OpRsh32x64:
- return rewriteValueARM_OpRsh32x64(v, config)
- case OpRsh32x8:
- return rewriteValueARM_OpRsh32x8(v, config)
- case OpRsh8Ux16:
- return rewriteValueARM_OpRsh8Ux16(v, config)
- case OpRsh8Ux32:
- return rewriteValueARM_OpRsh8Ux32(v, config)
- case OpRsh8Ux64:
- return rewriteValueARM_OpRsh8Ux64(v, config)
- case OpRsh8Ux8:
- return rewriteValueARM_OpRsh8Ux8(v, config)
- case OpRsh8x16:
- return rewriteValueARM_OpRsh8x16(v, config)
- case OpRsh8x32:
- return rewriteValueARM_OpRsh8x32(v, config)
- case OpRsh8x64:
- return rewriteValueARM_OpRsh8x64(v, config)
- case OpRsh8x8:
- return rewriteValueARM_OpRsh8x8(v, config)
case OpARMSBC:
return rewriteValueARM_OpARMSBC(v, config)
case OpARMSBCconst:
@@ -654,6 +320,356 @@
return rewriteValueARM_OpARMSUBshiftRL(v, config)
case OpARMSUBshiftRLreg:
return rewriteValueARM_OpARMSUBshiftRLreg(v, config)
+ case OpARMXOR:
+ return rewriteValueARM_OpARMXOR(v, config)
+ case OpARMXORconst:
+ return rewriteValueARM_OpARMXORconst(v, config)
+ case OpARMXORshiftLL:
+ return rewriteValueARM_OpARMXORshiftLL(v, config)
+ case OpARMXORshiftLLreg:
+ return rewriteValueARM_OpARMXORshiftLLreg(v, config)
+ case OpARMXORshiftRA:
+ return rewriteValueARM_OpARMXORshiftRA(v, config)
+ case OpARMXORshiftRAreg:
+ return rewriteValueARM_OpARMXORshiftRAreg(v, config)
+ case OpARMXORshiftRL:
+ return rewriteValueARM_OpARMXORshiftRL(v, config)
+ case OpARMXORshiftRLreg:
+ return rewriteValueARM_OpARMXORshiftRLreg(v, config)
+ case OpAdd16:
+ return rewriteValueARM_OpAdd16(v, config)
+ case OpAdd32:
+ return rewriteValueARM_OpAdd32(v, config)
+ case OpAdd32F:
+ return rewriteValueARM_OpAdd32F(v, config)
+ case OpAdd32carry:
+ return rewriteValueARM_OpAdd32carry(v, config)
+ case OpAdd32withcarry:
+ return rewriteValueARM_OpAdd32withcarry(v, config)
+ case OpAdd64F:
+ return rewriteValueARM_OpAdd64F(v, config)
+ case OpAdd8:
+ return rewriteValueARM_OpAdd8(v, config)
+ case OpAddPtr:
+ return rewriteValueARM_OpAddPtr(v, config)
+ case OpAddr:
+ return rewriteValueARM_OpAddr(v, config)
+ case OpAnd16:
+ return rewriteValueARM_OpAnd16(v, config)
+ case OpAnd32:
+ return rewriteValueARM_OpAnd32(v, config)
+ case OpAnd8:
+ return rewriteValueARM_OpAnd8(v, config)
+ case OpAndB:
+ return rewriteValueARM_OpAndB(v, config)
+ case OpClosureCall:
+ return rewriteValueARM_OpClosureCall(v, config)
+ case OpCom16:
+ return rewriteValueARM_OpCom16(v, config)
+ case OpCom32:
+ return rewriteValueARM_OpCom32(v, config)
+ case OpCom8:
+ return rewriteValueARM_OpCom8(v, config)
+ case OpConst16:
+ return rewriteValueARM_OpConst16(v, config)
+ case OpConst32:
+ return rewriteValueARM_OpConst32(v, config)
+ case OpConst32F:
+ return rewriteValueARM_OpConst32F(v, config)
+ case OpConst64F:
+ return rewriteValueARM_OpConst64F(v, config)
+ case OpConst8:
+ return rewriteValueARM_OpConst8(v, config)
+ case OpConstBool:
+ return rewriteValueARM_OpConstBool(v, config)
+ case OpConstNil:
+ return rewriteValueARM_OpConstNil(v, config)
+ case OpConvert:
+ return rewriteValueARM_OpConvert(v, config)
+ case OpCvt32Fto32:
+ return rewriteValueARM_OpCvt32Fto32(v, config)
+ case OpCvt32Fto32U:
+ return rewriteValueARM_OpCvt32Fto32U(v, config)
+ case OpCvt32Fto64F:
+ return rewriteValueARM_OpCvt32Fto64F(v, config)
+ case OpCvt32Uto32F:
+ return rewriteValueARM_OpCvt32Uto32F(v, config)
+ case OpCvt32Uto64F:
+ return rewriteValueARM_OpCvt32Uto64F(v, config)
+ case OpCvt32to32F:
+ return rewriteValueARM_OpCvt32to32F(v, config)
+ case OpCvt32to64F:
+ return rewriteValueARM_OpCvt32to64F(v, config)
+ case OpCvt64Fto32:
+ return rewriteValueARM_OpCvt64Fto32(v, config)
+ case OpCvt64Fto32F:
+ return rewriteValueARM_OpCvt64Fto32F(v, config)
+ case OpCvt64Fto32U:
+ return rewriteValueARM_OpCvt64Fto32U(v, config)
+ case OpDeferCall:
+ return rewriteValueARM_OpDeferCall(v, config)
+ case OpDiv16:
+ return rewriteValueARM_OpDiv16(v, config)
+ case OpDiv16u:
+ return rewriteValueARM_OpDiv16u(v, config)
+ case OpDiv32:
+ return rewriteValueARM_OpDiv32(v, config)
+ case OpDiv32F:
+ return rewriteValueARM_OpDiv32F(v, config)
+ case OpDiv32u:
+ return rewriteValueARM_OpDiv32u(v, config)
+ case OpDiv64F:
+ return rewriteValueARM_OpDiv64F(v, config)
+ case OpDiv8:
+ return rewriteValueARM_OpDiv8(v, config)
+ case OpDiv8u:
+ return rewriteValueARM_OpDiv8u(v, config)
+ case OpEq16:
+ return rewriteValueARM_OpEq16(v, config)
+ case OpEq32:
+ return rewriteValueARM_OpEq32(v, config)
+ case OpEq32F:
+ return rewriteValueARM_OpEq32F(v, config)
+ case OpEq64F:
+ return rewriteValueARM_OpEq64F(v, config)
+ case OpEq8:
+ return rewriteValueARM_OpEq8(v, config)
+ case OpEqB:
+ return rewriteValueARM_OpEqB(v, config)
+ case OpEqPtr:
+ return rewriteValueARM_OpEqPtr(v, config)
+ case OpGeq16:
+ return rewriteValueARM_OpGeq16(v, config)
+ case OpGeq16U:
+ return rewriteValueARM_OpGeq16U(v, config)
+ case OpGeq32:
+ return rewriteValueARM_OpGeq32(v, config)
+ case OpGeq32F:
+ return rewriteValueARM_OpGeq32F(v, config)
+ case OpGeq32U:
+ return rewriteValueARM_OpGeq32U(v, config)
+ case OpGeq64F:
+ return rewriteValueARM_OpGeq64F(v, config)
+ case OpGeq8:
+ return rewriteValueARM_OpGeq8(v, config)
+ case OpGeq8U:
+ return rewriteValueARM_OpGeq8U(v, config)
+ case OpGetClosurePtr:
+ return rewriteValueARM_OpGetClosurePtr(v, config)
+ case OpGoCall:
+ return rewriteValueARM_OpGoCall(v, config)
+ case OpGreater16:
+ return rewriteValueARM_OpGreater16(v, config)
+ case OpGreater16U:
+ return rewriteValueARM_OpGreater16U(v, config)
+ case OpGreater32:
+ return rewriteValueARM_OpGreater32(v, config)
+ case OpGreater32F:
+ return rewriteValueARM_OpGreater32F(v, config)
+ case OpGreater32U:
+ return rewriteValueARM_OpGreater32U(v, config)
+ case OpGreater64F:
+ return rewriteValueARM_OpGreater64F(v, config)
+ case OpGreater8:
+ return rewriteValueARM_OpGreater8(v, config)
+ case OpGreater8U:
+ return rewriteValueARM_OpGreater8U(v, config)
+ case OpHmul16:
+ return rewriteValueARM_OpHmul16(v, config)
+ case OpHmul16u:
+ return rewriteValueARM_OpHmul16u(v, config)
+ case OpHmul32:
+ return rewriteValueARM_OpHmul32(v, config)
+ case OpHmul32u:
+ return rewriteValueARM_OpHmul32u(v, config)
+ case OpHmul8:
+ return rewriteValueARM_OpHmul8(v, config)
+ case OpHmul8u:
+ return rewriteValueARM_OpHmul8u(v, config)
+ case OpInterCall:
+ return rewriteValueARM_OpInterCall(v, config)
+ case OpIsInBounds:
+ return rewriteValueARM_OpIsInBounds(v, config)
+ case OpIsNonNil:
+ return rewriteValueARM_OpIsNonNil(v, config)
+ case OpIsSliceInBounds:
+ return rewriteValueARM_OpIsSliceInBounds(v, config)
+ case OpLeq16:
+ return rewriteValueARM_OpLeq16(v, config)
+ case OpLeq16U:
+ return rewriteValueARM_OpLeq16U(v, config)
+ case OpLeq32:
+ return rewriteValueARM_OpLeq32(v, config)
+ case OpLeq32F:
+ return rewriteValueARM_OpLeq32F(v, config)
+ case OpLeq32U:
+ return rewriteValueARM_OpLeq32U(v, config)
+ case OpLeq64F:
+ return rewriteValueARM_OpLeq64F(v, config)
+ case OpLeq8:
+ return rewriteValueARM_OpLeq8(v, config)
+ case OpLeq8U:
+ return rewriteValueARM_OpLeq8U(v, config)
+ case OpLess16:
+ return rewriteValueARM_OpLess16(v, config)
+ case OpLess16U:
+ return rewriteValueARM_OpLess16U(v, config)
+ case OpLess32:
+ return rewriteValueARM_OpLess32(v, config)
+ case OpLess32F:
+ return rewriteValueARM_OpLess32F(v, config)
+ case OpLess32U:
+ return rewriteValueARM_OpLess32U(v, config)
+ case OpLess64F:
+ return rewriteValueARM_OpLess64F(v, config)
+ case OpLess8:
+ return rewriteValueARM_OpLess8(v, config)
+ case OpLess8U:
+ return rewriteValueARM_OpLess8U(v, config)
+ case OpLoad:
+ return rewriteValueARM_OpLoad(v, config)
+ case OpLrot16:
+ return rewriteValueARM_OpLrot16(v, config)
+ case OpLrot32:
+ return rewriteValueARM_OpLrot32(v, config)
+ case OpLrot8:
+ return rewriteValueARM_OpLrot8(v, config)
+ case OpLsh16x16:
+ return rewriteValueARM_OpLsh16x16(v, config)
+ case OpLsh16x32:
+ return rewriteValueARM_OpLsh16x32(v, config)
+ case OpLsh16x64:
+ return rewriteValueARM_OpLsh16x64(v, config)
+ case OpLsh16x8:
+ return rewriteValueARM_OpLsh16x8(v, config)
+ case OpLsh32x16:
+ return rewriteValueARM_OpLsh32x16(v, config)
+ case OpLsh32x32:
+ return rewriteValueARM_OpLsh32x32(v, config)
+ case OpLsh32x64:
+ return rewriteValueARM_OpLsh32x64(v, config)
+ case OpLsh32x8:
+ return rewriteValueARM_OpLsh32x8(v, config)
+ case OpLsh8x16:
+ return rewriteValueARM_OpLsh8x16(v, config)
+ case OpLsh8x32:
+ return rewriteValueARM_OpLsh8x32(v, config)
+ case OpLsh8x64:
+ return rewriteValueARM_OpLsh8x64(v, config)
+ case OpLsh8x8:
+ return rewriteValueARM_OpLsh8x8(v, config)
+ case OpMod16:
+ return rewriteValueARM_OpMod16(v, config)
+ case OpMod16u:
+ return rewriteValueARM_OpMod16u(v, config)
+ case OpMod32:
+ return rewriteValueARM_OpMod32(v, config)
+ case OpMod32u:
+ return rewriteValueARM_OpMod32u(v, config)
+ case OpMod8:
+ return rewriteValueARM_OpMod8(v, config)
+ case OpMod8u:
+ return rewriteValueARM_OpMod8u(v, config)
+ case OpMove:
+ return rewriteValueARM_OpMove(v, config)
+ case OpMul16:
+ return rewriteValueARM_OpMul16(v, config)
+ case OpMul32:
+ return rewriteValueARM_OpMul32(v, config)
+ case OpMul32F:
+ return rewriteValueARM_OpMul32F(v, config)
+ case OpMul32uhilo:
+ return rewriteValueARM_OpMul32uhilo(v, config)
+ case OpMul64F:
+ return rewriteValueARM_OpMul64F(v, config)
+ case OpMul8:
+ return rewriteValueARM_OpMul8(v, config)
+ case OpNeg16:
+ return rewriteValueARM_OpNeg16(v, config)
+ case OpNeg32:
+ return rewriteValueARM_OpNeg32(v, config)
+ case OpNeg32F:
+ return rewriteValueARM_OpNeg32F(v, config)
+ case OpNeg64F:
+ return rewriteValueARM_OpNeg64F(v, config)
+ case OpNeg8:
+ return rewriteValueARM_OpNeg8(v, config)
+ case OpNeq16:
+ return rewriteValueARM_OpNeq16(v, config)
+ case OpNeq32:
+ return rewriteValueARM_OpNeq32(v, config)
+ case OpNeq32F:
+ return rewriteValueARM_OpNeq32F(v, config)
+ case OpNeq64F:
+ return rewriteValueARM_OpNeq64F(v, config)
+ case OpNeq8:
+ return rewriteValueARM_OpNeq8(v, config)
+ case OpNeqB:
+ return rewriteValueARM_OpNeqB(v, config)
+ case OpNeqPtr:
+ return rewriteValueARM_OpNeqPtr(v, config)
+ case OpNilCheck:
+ return rewriteValueARM_OpNilCheck(v, config)
+ case OpNot:
+ return rewriteValueARM_OpNot(v, config)
+ case OpOffPtr:
+ return rewriteValueARM_OpOffPtr(v, config)
+ case OpOr16:
+ return rewriteValueARM_OpOr16(v, config)
+ case OpOr32:
+ return rewriteValueARM_OpOr32(v, config)
+ case OpOr8:
+ return rewriteValueARM_OpOr8(v, config)
+ case OpOrB:
+ return rewriteValueARM_OpOrB(v, config)
+ case OpRsh16Ux16:
+ return rewriteValueARM_OpRsh16Ux16(v, config)
+ case OpRsh16Ux32:
+ return rewriteValueARM_OpRsh16Ux32(v, config)
+ case OpRsh16Ux64:
+ return rewriteValueARM_OpRsh16Ux64(v, config)
+ case OpRsh16Ux8:
+ return rewriteValueARM_OpRsh16Ux8(v, config)
+ case OpRsh16x16:
+ return rewriteValueARM_OpRsh16x16(v, config)
+ case OpRsh16x32:
+ return rewriteValueARM_OpRsh16x32(v, config)
+ case OpRsh16x64:
+ return rewriteValueARM_OpRsh16x64(v, config)
+ case OpRsh16x8:
+ return rewriteValueARM_OpRsh16x8(v, config)
+ case OpRsh32Ux16:
+ return rewriteValueARM_OpRsh32Ux16(v, config)
+ case OpRsh32Ux32:
+ return rewriteValueARM_OpRsh32Ux32(v, config)
+ case OpRsh32Ux64:
+ return rewriteValueARM_OpRsh32Ux64(v, config)
+ case OpRsh32Ux8:
+ return rewriteValueARM_OpRsh32Ux8(v, config)
+ case OpRsh32x16:
+ return rewriteValueARM_OpRsh32x16(v, config)
+ case OpRsh32x32:
+ return rewriteValueARM_OpRsh32x32(v, config)
+ case OpRsh32x64:
+ return rewriteValueARM_OpRsh32x64(v, config)
+ case OpRsh32x8:
+ return rewriteValueARM_OpRsh32x8(v, config)
+ case OpRsh8Ux16:
+ return rewriteValueARM_OpRsh8Ux16(v, config)
+ case OpRsh8Ux32:
+ return rewriteValueARM_OpRsh8Ux32(v, config)
+ case OpRsh8Ux64:
+ return rewriteValueARM_OpRsh8Ux64(v, config)
+ case OpRsh8Ux8:
+ return rewriteValueARM_OpRsh8Ux8(v, config)
+ case OpRsh8x16:
+ return rewriteValueARM_OpRsh8x16(v, config)
+ case OpRsh8x32:
+ return rewriteValueARM_OpRsh8x32(v, config)
+ case OpRsh8x64:
+ return rewriteValueARM_OpRsh8x64(v, config)
+ case OpRsh8x8:
+ return rewriteValueARM_OpRsh8x8(v, config)
case OpSignExt16to32:
return rewriteValueARM_OpSignExt16to32(v, config)
case OpSignExt8to16:
@@ -690,22 +706,6 @@
return rewriteValueARM_OpTrunc32to16(v, config)
case OpTrunc32to8:
return rewriteValueARM_OpTrunc32to8(v, config)
- case OpARMXOR:
- return rewriteValueARM_OpARMXOR(v, config)
- case OpARMXORconst:
- return rewriteValueARM_OpARMXORconst(v, config)
- case OpARMXORshiftLL:
- return rewriteValueARM_OpARMXORshiftLL(v, config)
- case OpARMXORshiftLLreg:
- return rewriteValueARM_OpARMXORshiftLLreg(v, config)
- case OpARMXORshiftRA:
- return rewriteValueARM_OpARMXORshiftRA(v, config)
- case OpARMXORshiftRAreg:
- return rewriteValueARM_OpARMXORshiftRAreg(v, config)
- case OpARMXORshiftRL:
- return rewriteValueARM_OpARMXORshiftRL(v, config)
- case OpARMXORshiftRLreg:
- return rewriteValueARM_OpARMXORshiftRLreg(v, config)
case OpXor16:
return rewriteValueARM_OpXor16(v, config)
case OpXor32:
@@ -775,9 +775,9 @@
y := v_1.Args[0]
flags := v.Args[2]
v.reset(OpARMADCshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -794,9 +794,9 @@
x := v.Args[1]
flags := v.Args[2]
v.reset(OpARMADCshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -813,9 +813,9 @@
y := v_1.Args[0]
flags := v.Args[2]
v.reset(OpARMADCshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -832,9 +832,9 @@
x := v.Args[1]
flags := v.Args[2]
v.reset(OpARMADCshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -851,9 +851,9 @@
y := v_1.Args[0]
flags := v.Args[2]
v.reset(OpARMADCshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -870,9 +870,9 @@
x := v.Args[1]
flags := v.Args[2]
v.reset(OpARMADCshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -1040,19 +1040,19 @@
// cond:
// result: (ADCconst [c] (SLLconst <x.Type> x [d]) flags)
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
flags := v.Args[2]
v.reset(OpARMADCconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
v.AddArg(flags)
return true
@@ -1061,17 +1061,17 @@
// cond:
// result: (ADCconst x [int64(uint32(c)<<uint64(d))] flags)
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
flags := v.Args[2]
v.reset(OpARMADCconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
v.AddArg(flags)
return true
}
@@ -1114,9 +1114,9 @@
c := v_2.AuxInt
flags := v.Args[3]
v.reset(OpARMADCshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -1129,19 +1129,19 @@
// cond:
// result: (ADCconst [c] (SRAconst <x.Type> x [d]) flags)
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
flags := v.Args[2]
v.reset(OpARMADCconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
v.AddArg(flags)
return true
@@ -1150,17 +1150,17 @@
// cond:
// result: (ADCconst x [int64(int32(c)>>uint64(d))] flags)
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
flags := v.Args[2]
v.reset(OpARMADCconst)
- v.AddArg(x)
v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
v.AddArg(flags)
return true
}
@@ -1203,9 +1203,9 @@
c := v_2.AuxInt
flags := v.Args[3]
v.reset(OpARMADCshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -1218,19 +1218,19 @@
// cond:
// result: (ADCconst [c] (SRLconst <x.Type> x [d]) flags)
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
flags := v.Args[2]
v.reset(OpARMADCconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
v.AddArg(flags)
return true
@@ -1239,17 +1239,17 @@
// cond:
// result: (ADCconst x [int64(uint32(c)>>uint64(d))] flags)
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
flags := v.Args[2]
v.reset(OpARMADCconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
v.AddArg(flags)
return true
}
@@ -1292,9 +1292,9 @@
c := v_2.AuxInt
flags := v.Args[3]
v.reset(OpARMADCshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
v.AddArg(flags)
return true
}
@@ -1345,9 +1345,9 @@
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMADDshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (ADD (SLLconst [c] y) x)
@@ -1362,9 +1362,9 @@
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMADDshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (ADD x (SRLconst [c] y))
@@ -1379,9 +1379,9 @@
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMADDshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (ADD (SRLconst [c] y) x)
@@ -1396,9 +1396,9 @@
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMADDshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (ADD x (SRAconst [c] y))
@@ -1413,9 +1413,9 @@
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMADDshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (ADD (SRAconst [c] y) x)
@@ -1430,9 +1430,9 @@
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMADDshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (ADD x (SLL y z))
@@ -1654,9 +1654,9 @@
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMADDSshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (ADDS (SLLconst [c] y) x)
@@ -1671,9 +1671,9 @@
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMADDSshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (ADDS x (SRLconst [c] y))
@@ -1688,9 +1688,9 @@
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMADDSshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (ADDS (SRLconst [c] y) x)
@@ -1705,9 +1705,9 @@
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMADDSshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (ADDS x (SRAconst [c] y))
@@ -1722,9 +1722,9 @@
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMADDSshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (ADDS (SRAconst [c] y) x)
@@ -1739,9 +1739,9 @@
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMADDSshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (ADDS x (SLL y z))
@@ -1855,18 +1855,18 @@
// cond:
// result: (ADDSconst [c] (SLLconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMADDSconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -1874,16 +1874,16 @@
// cond:
// result: (ADDSconst x [int64(uint32(c)<<uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMADDSconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
return true
}
return false
@@ -1922,9 +1922,9 @@
}
c := v_2.AuxInt
v.reset(OpARMADDSshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -1936,18 +1936,18 @@
// cond:
// result: (ADDSconst [c] (SRAconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMADDSconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -1955,16 +1955,16 @@
// cond:
// result: (ADDSconst x [int64(int32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMADDSconst)
- v.AddArg(x)
v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
return false
@@ -2003,9 +2003,9 @@
}
c := v_2.AuxInt
v.reset(OpARMADDSshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -2017,18 +2017,18 @@
// cond:
// result: (ADDSconst [c] (SRLconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMADDSconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -2036,16 +2036,16 @@
// cond:
// result: (ADDSconst x [int64(uint32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMADDSconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
return false
@@ -2084,9 +2084,9 @@
}
c := v_2.AuxInt
v.reset(OpARMADDSshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -2196,18 +2196,18 @@
// cond:
// result: (ADDconst [c] (SLLconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMADDconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -2215,16 +2215,16 @@
// cond:
// result: (ADDconst x [int64(uint32(c)<<uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMADDconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
return true
}
return false
@@ -2263,9 +2263,9 @@
}
c := v_2.AuxInt
v.reset(OpARMADDshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -2277,18 +2277,18 @@
// cond:
// result: (ADDconst [c] (SRAconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMADDconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -2296,16 +2296,16 @@
// cond:
// result: (ADDconst x [int64(int32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMADDconst)
- v.AddArg(x)
v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
return false
@@ -2344,9 +2344,9 @@
}
c := v_2.AuxInt
v.reset(OpARMADDshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -2358,18 +2358,18 @@
// cond:
// result: (ADDconst [c] (SRLconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMADDconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -2377,16 +2377,16 @@
// cond:
// result: (ADDconst x [int64(uint32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMADDconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
return false
@@ -2425,9 +2425,9 @@
}
c := v_2.AuxInt
v.reset(OpARMADDshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -2477,9 +2477,9 @@
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMANDshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (AND (SLLconst [c] y) x)
@@ -2494,9 +2494,9 @@
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMANDshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (AND x (SRLconst [c] y))
@@ -2511,9 +2511,9 @@
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMANDshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (AND (SRLconst [c] y) x)
@@ -2528,9 +2528,9 @@
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMANDshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (AND x (SRAconst [c] y))
@@ -2545,9 +2545,9 @@
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMANDshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (AND (SRAconst [c] y) x)
@@ -2562,9 +2562,9 @@
y := v_0.Args[0]
x := v.Args[1]
v.reset(OpARMANDshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (AND x (SLL y z))
@@ -2706,12 +2706,12 @@
if v_1.Op != OpARMMVNshiftLL {
break
}
- y := v_1.Args[0]
c := v_1.AuxInt
+ y := v_1.Args[0]
v.reset(OpARMBICshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (AND x (MVNshiftRL y [c]))
@@ -2723,12 +2723,12 @@
if v_1.Op != OpARMMVNshiftRL {
break
}
- y := v_1.Args[0]
c := v_1.AuxInt
+ y := v_1.Args[0]
v.reset(OpARMBICshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (AND x (MVNshiftRA y [c]))
@@ -2740,12 +2740,12 @@
if v_1.Op != OpARMMVNshiftRA {
break
}
- y := v_1.Args[0]
c := v_1.AuxInt
+ y := v_1.Args[0]
v.reset(OpARMBICshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -2817,18 +2817,18 @@
// cond:
// result: (ANDconst [c] (SLLconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMANDconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -2836,32 +2836,32 @@
// cond:
// result: (ANDconst x [int64(uint32(c)<<uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMANDconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
return true
}
// match: (ANDshiftLL x y:(SLLconst x [c]) [d])
// cond: c==d
// result: y
for {
+ d := v.AuxInt
x := v.Args[0]
y := v.Args[1]
if y.Op != OpARMSLLconst {
break
}
+ c := y.AuxInt
if x != y.Args[0] {
break
}
- c := y.AuxInt
- d := v.AuxInt
if !(c == d) {
break
}
@@ -2906,9 +2906,9 @@
}
c := v_2.AuxInt
v.reset(OpARMANDshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -2920,18 +2920,18 @@
// cond:
// result: (ANDconst [c] (SRAconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMANDconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -2939,32 +2939,32 @@
// cond:
// result: (ANDconst x [int64(int32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMANDconst)
- v.AddArg(x)
v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
// match: (ANDshiftRA x y:(SRAconst x [c]) [d])
// cond: c==d
// result: y
for {
+ d := v.AuxInt
x := v.Args[0]
y := v.Args[1]
if y.Op != OpARMSRAconst {
break
}
+ c := y.AuxInt
if x != y.Args[0] {
break
}
- c := y.AuxInt
- d := v.AuxInt
if !(c == d) {
break
}
@@ -3009,9 +3009,9 @@
}
c := v_2.AuxInt
v.reset(OpARMANDshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -3023,18 +3023,18 @@
// cond:
// result: (ANDconst [c] (SRLconst <x.Type> x [d]))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMANDconst)
v.AuxInt = c
v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
- v0.AddArg(x)
v0.AuxInt = d
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -3042,32 +3042,32 @@
// cond:
// result: (ANDconst x [int64(uint32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMANDconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
// match: (ANDshiftRL x y:(SRLconst x [c]) [d])
// cond: c==d
// result: y
for {
+ d := v.AuxInt
x := v.Args[0]
y := v.Args[1]
if y.Op != OpARMSRLconst {
break
}
+ c := y.AuxInt
if x != y.Args[0] {
break
}
- c := y.AuxInt
- d := v.AuxInt
if !(c == d) {
break
}
@@ -3112,210 +3112,13 @@
}
c := v_2.AuxInt
v.reset(OpARMANDshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
}
-func rewriteValueARM_OpAdd16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add16 x y)
- // cond:
- // result: (ADD x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMADD)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpAdd32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add32 x y)
- // cond:
- // result: (ADD x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMADD)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpAdd32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add32F x y)
- // cond:
- // result: (ADDF x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMADDF)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpAdd32carry(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add32carry x y)
- // cond:
- // result: (ADDS x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMADDS)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpAdd32withcarry(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add32withcarry x y c)
- // cond:
- // result: (ADC x y c)
- for {
- x := v.Args[0]
- y := v.Args[1]
- c := v.Args[2]
- v.reset(OpARMADC)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(c)
- return true
- }
-}
-func rewriteValueARM_OpAdd64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add64F x y)
- // cond:
- // result: (ADDD x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMADDD)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpAdd8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Add8 x y)
- // cond:
- // result: (ADD x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMADD)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpAddPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (AddPtr x y)
- // cond:
- // result: (ADD x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMADD)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpAddr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Addr {sym} base)
- // cond:
- // result: (MOVWaddr {sym} base)
- for {
- sym := v.Aux
- base := v.Args[0]
- v.reset(OpARMMOVWaddr)
- v.Aux = sym
- v.AddArg(base)
- return true
- }
-}
-func rewriteValueARM_OpAnd16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (And16 x y)
- // cond:
- // result: (AND x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMAND)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpAnd32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (And32 x y)
- // cond:
- // result: (AND x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMAND)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpAnd8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (And8 x y)
- // cond:
- // result: (AND x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMAND)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpAndB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (AndB x y)
- // cond:
- // result: (AND x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMAND)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
func rewriteValueARM_OpARMBIC(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -3346,9 +3149,9 @@
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMBICshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (BIC x (SRLconst [c] y))
@@ -3363,9 +3166,9 @@
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMBICshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (BIC x (SRAconst [c] y))
@@ -3380,9 +3183,9 @@
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMBICshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (BIC x (SLL y z))
@@ -3501,32 +3304,32 @@
// cond:
// result: (BICconst x [int64(uint32(c)<<uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMBICconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
return true
}
// match: (BICshiftLL x (SLLconst x [c]) [d])
// cond: c==d
// result: (MOVWconst [0])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMSLLconst {
break
}
+ c := v_1.AuxInt
if x != v_1.Args[0] {
break
}
- c := v_1.AuxInt
- d := v.AuxInt
if !(c == d) {
break
}
@@ -3551,9 +3354,9 @@
}
c := v_2.AuxInt
v.reset(OpARMBICshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -3565,32 +3368,32 @@
// cond:
// result: (BICconst x [int64(int32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMBICconst)
- v.AddArg(x)
v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
// match: (BICshiftRA x (SRAconst x [c]) [d])
// cond: c==d
// result: (MOVWconst [0])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMSRAconst {
break
}
+ c := v_1.AuxInt
if x != v_1.Args[0] {
break
}
- c := v_1.AuxInt
- d := v.AuxInt
if !(c == d) {
break
}
@@ -3615,9 +3418,9 @@
}
c := v_2.AuxInt
v.reset(OpARMBICshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -3629,32 +3432,32 @@
// cond:
// result: (BICconst x [int64(uint32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMBICconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
// match: (BICshiftRL x (SRLconst x [c]) [d])
// cond: c==d
// result: (MOVWconst [0])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMSRLconst {
break
}
+ c := v_1.AuxInt
if x != v_1.Args[0] {
break
}
- c := v_1.AuxInt
- d := v.AuxInt
if !(c == d) {
break
}
@@ -3679,9 +3482,9 @@
}
c := v_2.AuxInt
v.reset(OpARMBICshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -3693,11 +3496,11 @@
// cond:
// result: (MOVWconst [c])
for {
+ c := v.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpARMFlagEQ {
break
}
- c := v.AuxInt
v.reset(OpARMMOVWconst)
v.AuxInt = c
return true
@@ -3720,11 +3523,11 @@
// cond:
// result: (MOVWconst [c])
for {
+ c := v.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpARMFlagLT_UGT {
break
}
- c := v.AuxInt
v.reset(OpARMMOVWconst)
v.AuxInt = c
return true
@@ -3747,11 +3550,11 @@
// cond:
// result: (MOVWconst [c])
for {
+ c := v.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpARMFlagGT_UGT {
break
}
- c := v.AuxInt
v.reset(OpARMMOVWconst)
v.AuxInt = c
return true
@@ -3760,17 +3563,17 @@
// cond:
// result: (CMOVWLSconst x flags [c])
for {
+ c := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMInvertFlags {
break
}
flags := v_1.Args[0]
- c := v.AuxInt
v.reset(OpARMCMOVWLSconst)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(flags)
- v.AuxInt = c
return true
}
return false
@@ -3782,11 +3585,11 @@
// cond:
// result: (MOVWconst [c])
for {
+ c := v.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpARMFlagEQ {
break
}
- c := v.AuxInt
v.reset(OpARMMOVWconst)
v.AuxInt = c
return true
@@ -3795,11 +3598,11 @@
// cond:
// result: (MOVWconst [c])
for {
+ c := v.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpARMFlagLT_ULT {
break
}
- c := v.AuxInt
v.reset(OpARMMOVWconst)
v.AuxInt = c
return true
@@ -3822,11 +3625,11 @@
// cond:
// result: (MOVWconst [c])
for {
+ c := v.AuxInt
v_1 := v.Args[1]
if v_1.Op != OpARMFlagGT_ULT {
break
}
- c := v.AuxInt
v.reset(OpARMMOVWconst)
v.AuxInt = c
return true
@@ -3849,17 +3652,17 @@
// cond:
// result: (CMOVWHSconst x flags [c])
for {
+ c := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMInvertFlags {
break
}
flags := v_1.Args[0]
- c := v.AuxInt
v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(flags)
- v.AuxInt = c
return true
}
return false
@@ -3911,9 +3714,9 @@
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMCMPshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (CMP (SLLconst [c] y) x)
@@ -3929,9 +3732,9 @@
x := v.Args[1]
v.reset(OpARMInvertFlags)
v0 := b.NewValue0(v.Line, OpARMCMPshiftLL, TypeFlags)
+ v0.AuxInt = c
v0.AddArg(x)
v0.AddArg(y)
- v0.AuxInt = c
v.AddArg(v0)
return true
}
@@ -3947,9 +3750,9 @@
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMCMPshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (CMP (SRLconst [c] y) x)
@@ -3965,9 +3768,9 @@
x := v.Args[1]
v.reset(OpARMInvertFlags)
v0 := b.NewValue0(v.Line, OpARMCMPshiftRL, TypeFlags)
+ v0.AuxInt = c
v0.AddArg(x)
v0.AddArg(y)
- v0.AuxInt = c
v.AddArg(v0)
return true
}
@@ -3983,9 +3786,9 @@
c := v_1.AuxInt
y := v_1.Args[0]
v.reset(OpARMCMPshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
// match: (CMP (SRAconst [c] y) x)
@@ -4001,9 +3804,9 @@
x := v.Args[1]
v.reset(OpARMInvertFlags)
v0 := b.NewValue0(v.Line, OpARMCMPshiftRA, TypeFlags)
+ v0.AuxInt = c
v0.AddArg(x)
v0.AddArg(y)
- v0.AuxInt = c
v.AddArg(v0)
return true
}
@@ -4166,12 +3969,12 @@
// cond: int32(x)==int32(y)
// result: (FlagEQ)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int32(x) == int32(y)) {
break
}
@@ -4182,12 +3985,12 @@
// cond: int32(x)<int32(y) && uint32(x)<uint32(y)
// result: (FlagLT_ULT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int32(x) < int32(y) && uint32(x) < uint32(y)) {
break
}
@@ -4198,12 +4001,12 @@
// cond: int32(x)<int32(y) && uint32(x)>uint32(y)
// result: (FlagLT_UGT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int32(x) < int32(y) && uint32(x) > uint32(y)) {
break
}
@@ -4214,12 +4017,12 @@
// cond: int32(x)>int32(y) && uint32(x)<uint32(y)
// result: (FlagGT_ULT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int32(x) > int32(y) && uint32(x) < uint32(y)) {
break
}
@@ -4230,12 +4033,12 @@
// cond: int32(x)>int32(y) && uint32(x)>uint32(y)
// result: (FlagGT_UGT)
for {
+ y := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
x := v_0.AuxInt
- y := v.AuxInt
if !(int32(x) > int32(y) && uint32(x) > uint32(y)) {
break
}
@@ -4246,11 +4049,11 @@
// cond: 0xff < c
// result: (FlagLT_ULT)
for {
+ c := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVBUreg {
break
}
- c := v.AuxInt
if !(0xff < c) {
break
}
@@ -4261,11 +4064,11 @@
// cond: 0xffff < c
// result: (FlagLT_ULT)
for {
+ c := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVHUreg {
break
}
- c := v.AuxInt
if !(0xffff < c) {
break
}
@@ -4276,12 +4079,12 @@
// cond: 0 <= int32(m) && int32(m) < int32(n)
// result: (FlagLT_ULT)
for {
+ n := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMANDconst {
break
}
m := v_0.AuxInt
- n := v.AuxInt
if !(0 <= int32(m) && int32(m) < int32(n)) {
break
}
@@ -4292,12 +4095,12 @@
// cond: 0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n)
// result: (FlagLT_ULT)
for {
+ n := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMSRLconst {
break
}
c := v_0.AuxInt
- n := v.AuxInt
if !(0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n)) {
break
}
@@ -4313,19 +4116,19 @@
// cond:
// result: (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d])))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMInvertFlags)
v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
v0.AuxInt = c
v1 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
- v1.AddArg(x)
v1.AuxInt = d
+ v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
return true
@@ -4334,16 +4137,16 @@
// cond:
// result: (CMPconst x [int64(uint32(c)<<uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMCMPconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
return true
}
return false
@@ -4384,9 +4187,9 @@
}
c := v_2.AuxInt
v.reset(OpARMCMPshiftLL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -4398,19 +4201,19 @@
// cond:
// result: (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d])))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMInvertFlags)
v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
v0.AuxInt = c
v1 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
- v1.AddArg(x)
v1.AuxInt = d
+ v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
return true
@@ -4419,16 +4222,16 @@
// cond:
// result: (CMPconst x [int64(int32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMCMPconst)
- v.AddArg(x)
v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
return false
@@ -4469,9 +4272,9 @@
}
c := v_2.AuxInt
v.reset(OpARMCMPshiftRA)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
@@ -4483,19 +4286,19 @@
// cond:
// result: (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d])))
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- d := v.AuxInt
v.reset(OpARMInvertFlags)
v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
v0.AuxInt = c
v1 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
- v1.AddArg(x)
v1.AuxInt = d
+ v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
return true
@@ -4504,16 +4307,16 @@
// cond:
// result: (CMPconst x [int64(uint32(c)>>uint64(d))])
for {
+ d := v.AuxInt
x := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
v.reset(OpARMCMPconst)
- v.AddArg(x)
v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
return true
}
return false
@@ -4554,306 +4357,13 @@
}
c := v_2.AuxInt
v.reset(OpARMCMPshiftRL)
+ v.AuxInt = c
v.AddArg(x)
v.AddArg(y)
- v.AuxInt = c
return true
}
return false
}
-func rewriteValueARM_OpClosureCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ClosureCall [argwid] entry closure mem)
- // cond:
- // result: (CALLclosure [argwid] entry closure mem)
- for {
- argwid := v.AuxInt
- entry := v.Args[0]
- closure := v.Args[1]
- mem := v.Args[2]
- v.reset(OpARMCALLclosure)
- v.AuxInt = argwid
- v.AddArg(entry)
- v.AddArg(closure)
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValueARM_OpCom16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Com16 x)
- // cond:
- // result: (MVN x)
- for {
- x := v.Args[0]
- v.reset(OpARMMVN)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpCom32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Com32 x)
- // cond:
- // result: (MVN x)
- for {
- x := v.Args[0]
- v.reset(OpARMMVN)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpCom8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Com8 x)
- // cond:
- // result: (MVN x)
- for {
- x := v.Args[0]
- v.reset(OpARMMVN)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpConst16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Const16 [val])
- // cond:
- // result: (MOVWconst [val])
- for {
- val := v.AuxInt
- v.reset(OpARMMOVWconst)
- v.AuxInt = val
- return true
- }
-}
-func rewriteValueARM_OpConst32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Const32 [val])
- // cond:
- // result: (MOVWconst [val])
- for {
- val := v.AuxInt
- v.reset(OpARMMOVWconst)
- v.AuxInt = val
- return true
- }
-}
-func rewriteValueARM_OpConst32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Const32F [val])
- // cond:
- // result: (MOVFconst [val])
- for {
- val := v.AuxInt
- v.reset(OpARMMOVFconst)
- v.AuxInt = val
- return true
- }
-}
-func rewriteValueARM_OpConst64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Const64F [val])
- // cond:
- // result: (MOVDconst [val])
- for {
- val := v.AuxInt
- v.reset(OpARMMOVDconst)
- v.AuxInt = val
- return true
- }
-}
-func rewriteValueARM_OpConst8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Const8 [val])
- // cond:
- // result: (MOVWconst [val])
- for {
- val := v.AuxInt
- v.reset(OpARMMOVWconst)
- v.AuxInt = val
- return true
- }
-}
-func rewriteValueARM_OpConstBool(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ConstBool [b])
- // cond:
- // result: (MOVWconst [b])
- for {
- b := v.AuxInt
- v.reset(OpARMMOVWconst)
- v.AuxInt = b
- return true
- }
-}
-func rewriteValueARM_OpConstNil(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ConstNil)
- // cond:
- // result: (MOVWconst [0])
- for {
- v.reset(OpARMMOVWconst)
- v.AuxInt = 0
- return true
- }
-}
-func rewriteValueARM_OpConvert(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Convert x mem)
- // cond:
- // result: (MOVWconvert x mem)
- for {
- x := v.Args[0]
- mem := v.Args[1]
- v.reset(OpARMMOVWconvert)
- v.AddArg(x)
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValueARM_OpCvt32Fto32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt32Fto32 x)
- // cond:
- // result: (MOVFW x)
- for {
- x := v.Args[0]
- v.reset(OpARMMOVFW)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpCvt32Fto32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt32Fto32U x)
- // cond:
- // result: (MOVFWU x)
- for {
- x := v.Args[0]
- v.reset(OpARMMOVFWU)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpCvt32Fto64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt32Fto64F x)
- // cond:
- // result: (MOVFD x)
- for {
- x := v.Args[0]
- v.reset(OpARMMOVFD)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpCvt32Uto32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt32Uto32F x)
- // cond:
- // result: (MOVWUF x)
- for {
- x := v.Args[0]
- v.reset(OpARMMOVWUF)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpCvt32Uto64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt32Uto64F x)
- // cond:
- // result: (MOVWUD x)
- for {
- x := v.Args[0]
- v.reset(OpARMMOVWUD)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpCvt32to32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt32to32F x)
- // cond:
- // result: (MOVWF x)
- for {
- x := v.Args[0]
- v.reset(OpARMMOVWF)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpCvt32to64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt32to64F x)
- // cond:
- // result: (MOVWD x)
- for {
- x := v.Args[0]
- v.reset(OpARMMOVWD)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpCvt64Fto32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt64Fto32 x)
- // cond:
- // result: (MOVDW x)
- for {
- x := v.Args[0]
- v.reset(OpARMMOVDW)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpCvt64Fto32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt64Fto32F x)
- // cond:
- // result: (MOVDF x)
- for {
- x := v.Args[0]
- v.reset(OpARMMOVDF)
- v.AddArg(x)
- return true
- }
-}
-func rewriteValueARM_OpCvt64Fto32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Cvt64Fto32U x)
- // cond:
- // result: (MOVDWU x)
- for {
- x := v.Args[0]
- v.reset(OpARMMOVDWU)
- v.AddArg(x)
- return true
- }
-}
func rewriteValueARM_OpARMDIV(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -4935,285 +4445,6 @@
}
return false
}
-func rewriteValueARM_OpDeferCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (DeferCall [argwid] mem)
- // cond:
- // result: (CALLdefer [argwid] mem)
- for {
- argwid := v.AuxInt
- mem := v.Args[0]
- v.reset(OpARMCALLdefer)
- v.AuxInt = argwid
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValueARM_OpDiv16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div16 x y)
- // cond:
- // result: (DIV (SignExt16to32 x) (SignExt16to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMDIV)
- v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueARM_OpDiv16u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div16u x y)
- // cond:
- // result: (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMDIVU)
- v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueARM_OpDiv32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div32 x y)
- // cond:
- // result: (DIV x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMDIV)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpDiv32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div32F x y)
- // cond:
- // result: (DIVF x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMDIVF)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpDiv32u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div32u x y)
- // cond:
- // result: (DIVU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMDIVU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpDiv64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div64F x y)
- // cond:
- // result: (DIVD x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMDIVD)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpDiv8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div8 x y)
- // cond:
- // result: (DIV (SignExt8to32 x) (SignExt8to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMDIV)
- v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueARM_OpDiv8u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Div8u x y)
- // cond:
- // result: (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMDIVU)
- v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v0.AddArg(x)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueARM_OpEq16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Eq16 x y)
- // cond:
- // result: (Equal (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpEq32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Eq32 x y)
- // cond:
- // result: (Equal (CMP x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpEq32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Eq32F x y)
- // cond:
- // result: (Equal (CMPF x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMEqual)
- v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpEq64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Eq64F x y)
- // cond:
- // result: (Equal (CMPD x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMEqual)
- v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpEq8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Eq8 x y)
- // cond:
- // result: (Equal (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpEqB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (EqB x y)
- // cond:
- // result: (XORconst [1] (XOR <config.fe.TypeBool()> x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMXORconst)
- v.AuxInt = 1
- v0 := b.NewValue0(v.Line, OpARMXOR, config.fe.TypeBool())
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpEqPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (EqPtr x y)
- // cond:
- // result: (Equal (CMP x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
func rewriteValueARM_OpARMEqual(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -5292,336 +4523,6 @@
}
return false
}
-func rewriteValueARM_OpGeq16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq16 x y)
- // cond:
- // result: (GreaterEqual (CMP (SignExt16to32 x) (SignExt16to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGeq16U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq16U x y)
- // cond:
- // result: (GreaterEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterEqualU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGeq32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq32 x y)
- // cond:
- // result: (GreaterEqual (CMP x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGeq32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq32F x y)
- // cond:
- // result: (GreaterEqual (CMPF x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterEqual)
- v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGeq32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq32U x y)
- // cond:
- // result: (GreaterEqualU (CMP x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterEqualU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGeq64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq64F x y)
- // cond:
- // result: (GreaterEqual (CMPD x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterEqual)
- v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGeq8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq8 x y)
- // cond:
- // result: (GreaterEqual (CMP (SignExt8to32 x) (SignExt8to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGeq8U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Geq8U x y)
- // cond:
- // result: (GreaterEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterEqualU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGetClosurePtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (GetClosurePtr)
- // cond:
- // result: (LoweredGetClosurePtr)
- for {
- v.reset(OpARMLoweredGetClosurePtr)
- return true
- }
-}
-func rewriteValueARM_OpGoCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (GoCall [argwid] mem)
- // cond:
- // result: (CALLgo [argwid] mem)
- for {
- argwid := v.AuxInt
- mem := v.Args[0]
- v.reset(OpARMCALLgo)
- v.AuxInt = argwid
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValueARM_OpGreater16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater16 x y)
- // cond:
- // result: (GreaterThan (CMP (SignExt16to32 x) (SignExt16to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterThan)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGreater16U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater16U x y)
- // cond:
- // result: (GreaterThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterThanU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGreater32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater32 x y)
- // cond:
- // result: (GreaterThan (CMP x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterThan)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGreater32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater32F x y)
- // cond:
- // result: (GreaterThan (CMPF x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterThan)
- v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGreater32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater32U x y)
- // cond:
- // result: (GreaterThanU (CMP x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterThanU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGreater64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater64F x y)
- // cond:
- // result: (GreaterThan (CMPD x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterThan)
- v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGreater8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater8 x y)
- // cond:
- // result: (GreaterThan (CMP (SignExt8to32 x) (SignExt8to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterThan)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpGreater8U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Greater8U x y)
- // cond:
- // result: (GreaterThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterThanU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
func rewriteValueARM_OpARMGreaterEqual(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -5934,495 +4835,6 @@
}
return false
}
-func rewriteValueARM_OpHmul16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul16 x y)
- // cond:
- // result: (SRAconst (MUL <config.fe.TypeInt32()> (SignExt16to32 x) (SignExt16to32 y)) [16])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRAconst)
- v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeInt32())
- v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- v.AuxInt = 16
- return true
- }
-}
-func rewriteValueARM_OpHmul16u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul16u x y)
- // cond:
- // result: (SRLconst (MUL <config.fe.TypeUInt32()> (ZeroExt16to32 x) (ZeroExt16to32 y)) [16])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRLconst)
- v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeUInt32())
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- v.AuxInt = 16
- return true
- }
-}
-func rewriteValueARM_OpHmul32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul32 x y)
- // cond:
- // result: (HMUL x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMHMUL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpHmul32u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul32u x y)
- // cond:
- // result: (HMULU x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMHMULU)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueARM_OpHmul8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul8 x y)
- // cond:
- // result: (SRAconst (MUL <config.fe.TypeInt16()> (SignExt8to32 x) (SignExt8to32 y)) [8])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRAconst)
- v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeInt16())
- v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- v.AuxInt = 8
- return true
- }
-}
-func rewriteValueARM_OpHmul8u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Hmul8u x y)
- // cond:
- // result: (SRLconst (MUL <config.fe.TypeUInt16()> (ZeroExt8to32 x) (ZeroExt8to32 y)) [8])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSRLconst)
- v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeUInt16())
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- v.AuxInt = 8
- return true
- }
-}
-func rewriteValueARM_OpInterCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (InterCall [argwid] entry mem)
- // cond:
- // result: (CALLinter [argwid] entry mem)
- for {
- argwid := v.AuxInt
- entry := v.Args[0]
- mem := v.Args[1]
- v.reset(OpARMCALLinter)
- v.AuxInt = argwid
- v.AddArg(entry)
- v.AddArg(mem)
- return true
- }
-}
-func rewriteValueARM_OpIsInBounds(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (IsInBounds idx len)
- // cond:
- // result: (LessThanU (CMP idx len))
- for {
- idx := v.Args[0]
- len := v.Args[1]
- v.reset(OpARMLessThanU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(idx)
- v0.AddArg(len)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpIsNonNil(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (IsNonNil ptr)
- // cond:
- // result: (NotEqual (CMPconst [0] ptr))
- for {
- ptr := v.Args[0]
- v.reset(OpARMNotEqual)
- v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v0.AuxInt = 0
- v0.AddArg(ptr)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpIsSliceInBounds(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (IsSliceInBounds idx len)
- // cond:
- // result: (LessEqualU (CMP idx len))
- for {
- idx := v.Args[0]
- len := v.Args[1]
- v.reset(OpARMLessEqualU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(idx)
- v0.AddArg(len)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLeq16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq16 x y)
- // cond:
- // result: (LessEqual (CMP (SignExt16to32 x) (SignExt16to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMLessEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLeq16U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq16U x y)
- // cond:
- // result: (LessEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMLessEqualU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLeq32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq32 x y)
- // cond:
- // result: (LessEqual (CMP x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMLessEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLeq32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq32F x y)
- // cond:
- // result: (GreaterEqual (CMPF y x))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterEqual)
- v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLeq32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq32U x y)
- // cond:
- // result: (LessEqualU (CMP x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMLessEqualU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLeq64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq64F x y)
- // cond:
- // result: (GreaterEqual (CMPD y x))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterEqual)
- v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLeq8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq8 x y)
- // cond:
- // result: (LessEqual (CMP (SignExt8to32 x) (SignExt8to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMLessEqual)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLeq8U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Leq8U x y)
- // cond:
- // result: (LessEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMLessEqualU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLess16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less16 x y)
- // cond:
- // result: (LessThan (CMP (SignExt16to32 x) (SignExt16to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMLessThan)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLess16U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less16U x y)
- // cond:
- // result: (LessThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMLessThanU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLess32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less32 x y)
- // cond:
- // result: (LessThan (CMP x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMLessThan)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLess32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less32F x y)
- // cond:
- // result: (GreaterThan (CMPF y x))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterThan)
- v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLess32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less32U x y)
- // cond:
- // result: (LessThanU (CMP x y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMLessThanU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLess64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less64F x y)
- // cond:
- // result: (GreaterThan (CMPD y x))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMGreaterThan)
- v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
- v0.AddArg(y)
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLess8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less8 x y)
- // cond:
- // result: (LessThan (CMP (SignExt8to32 x) (SignExt8to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMLessThan)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLess8U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Less8U x y)
- // cond:
- // result: (LessThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMLessThanU)
- v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v1.AddArg(x)
- v0.AddArg(v1)
- v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v2.AddArg(y)
- v0.AddArg(v2)
- v.AddArg(v0)
- return true
- }
-}
func rewriteValueARM_OpARMLessEqual(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -6735,502 +5147,6 @@
}
return false
}
-func rewriteValueARM_OpLoad(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Load <t> ptr mem)
- // cond: t.IsBoolean()
- // result: (MOVBUload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(t.IsBoolean()) {
- break
- }
- v.reset(OpARMMOVBUload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (Load <t> ptr mem)
- // cond: (is8BitInt(t) && isSigned(t))
- // result: (MOVBload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is8BitInt(t) && isSigned(t)) {
- break
- }
- v.reset(OpARMMOVBload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (Load <t> ptr mem)
- // cond: (is8BitInt(t) && !isSigned(t))
- // result: (MOVBUload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is8BitInt(t) && !isSigned(t)) {
- break
- }
- v.reset(OpARMMOVBUload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (Load <t> ptr mem)
- // cond: (is16BitInt(t) && isSigned(t))
- // result: (MOVHload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is16BitInt(t) && isSigned(t)) {
- break
- }
- v.reset(OpARMMOVHload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (Load <t> ptr mem)
- // cond: (is16BitInt(t) && !isSigned(t))
- // result: (MOVHUload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is16BitInt(t) && !isSigned(t)) {
- break
- }
- v.reset(OpARMMOVHUload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (Load <t> ptr mem)
- // cond: (is32BitInt(t) || isPtr(t))
- // result: (MOVWload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is32BitInt(t) || isPtr(t)) {
- break
- }
- v.reset(OpARMMOVWload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (Load <t> ptr mem)
- // cond: is32BitFloat(t)
- // result: (MOVFload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is32BitFloat(t)) {
- break
- }
- v.reset(OpARMMOVFload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (Load <t> ptr mem)
- // cond: is64BitFloat(t)
- // result: (MOVDload ptr mem)
- for {
- t := v.Type
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(is64BitFloat(t)) {
- break
- }
- v.reset(OpARMMOVDload)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueARM_OpLrot16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lrot16 <t> x [c])
- // cond:
- // result: (OR (SLLconst <t> x [c&15]) (SRLconst <t> x [16-c&15]))
- for {
- t := v.Type
- x := v.Args[0]
- c := v.AuxInt
- v.reset(OpARMOR)
- v0 := b.NewValue0(v.Line, OpARMSLLconst, t)
- v0.AddArg(x)
- v0.AuxInt = c & 15
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpARMSRLconst, t)
- v1.AddArg(x)
- v1.AuxInt = 16 - c&15
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueARM_OpLrot32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lrot32 x [c])
- // cond:
- // result: (SRRconst x [32-c&31])
- for {
- x := v.Args[0]
- c := v.AuxInt
- v.reset(OpARMSRRconst)
- v.AddArg(x)
- v.AuxInt = 32 - c&31
- return true
- }
-}
-func rewriteValueARM_OpLrot8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lrot8 <t> x [c])
- // cond:
- // result: (OR (SLLconst <t> x [c&7]) (SRLconst <t> x [8-c&7]))
- for {
- t := v.Type
- x := v.Args[0]
- c := v.AuxInt
- v.reset(OpARMOR)
- v0 := b.NewValue0(v.Line, OpARMSLLconst, t)
- v0.AddArg(x)
- v0.AuxInt = c & 7
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpARMSRLconst, t)
- v1.AddArg(x)
- v1.AuxInt = 8 - c&7
- v.AddArg(v1)
- return true
- }
-}
-func rewriteValueARM_OpLsh16x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh16x16 x y)
- // cond:
- // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMCMOVWHSconst)
- v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
- v0.AddArg(x)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v0.AddArg(v1)
- v.AddArg(v0)
- v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v2.AuxInt = 256
- v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v3.AddArg(y)
- v2.AddArg(v3)
- v.AddArg(v2)
- v.AuxInt = 0
- return true
- }
-}
-func rewriteValueARM_OpLsh16x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh16x32 x y)
- // cond:
- // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMCMOVWHSconst)
- v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v1.AuxInt = 256
- v1.AddArg(y)
- v.AddArg(v1)
- v.AuxInt = 0
- return true
- }
-}
-func rewriteValueARM_OpLsh16x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh16x64 x (Const64 [c]))
- // cond: uint64(c) < 16
- // result: (SLLconst x [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) < 16) {
- break
- }
- v.reset(OpARMSLLconst)
- v.AddArg(x)
- v.AuxInt = c
- return true
- }
- // match: (Lsh16x64 _ (Const64 [c]))
- // cond: uint64(c) >= 16
- // result: (Const16 [0])
- for {
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) >= 16) {
- break
- }
- v.reset(OpConst16)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValueARM_OpLsh16x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh16x8 x y)
- // cond:
- // result: (SLL x (ZeroExt8to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSLL)
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLsh32x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh32x16 x y)
- // cond:
- // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMCMOVWHSconst)
- v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
- v0.AddArg(x)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v0.AddArg(v1)
- v.AddArg(v0)
- v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v2.AuxInt = 256
- v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v3.AddArg(y)
- v2.AddArg(v3)
- v.AddArg(v2)
- v.AuxInt = 0
- return true
- }
-}
-func rewriteValueARM_OpLsh32x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh32x32 x y)
- // cond:
- // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMCMOVWHSconst)
- v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v1.AuxInt = 256
- v1.AddArg(y)
- v.AddArg(v1)
- v.AuxInt = 0
- return true
- }
-}
-func rewriteValueARM_OpLsh32x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh32x64 x (Const64 [c]))
- // cond: uint64(c) < 32
- // result: (SLLconst x [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) < 32) {
- break
- }
- v.reset(OpARMSLLconst)
- v.AddArg(x)
- v.AuxInt = c
- return true
- }
- // match: (Lsh32x64 _ (Const64 [c]))
- // cond: uint64(c) >= 32
- // result: (Const32 [0])
- for {
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) >= 32) {
- break
- }
- v.reset(OpConst32)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValueARM_OpLsh32x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh32x8 x y)
- // cond:
- // result: (SLL x (ZeroExt8to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSLL)
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueARM_OpLsh8x16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh8x16 x y)
- // cond:
- // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMCMOVWHSconst)
- v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
- v0.AddArg(x)
- v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v1.AddArg(y)
- v0.AddArg(v1)
- v.AddArg(v0)
- v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v2.AuxInt = 256
- v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
- v3.AddArg(y)
- v2.AddArg(v3)
- v.AddArg(v2)
- v.AuxInt = 0
- return true
- }
-}
-func rewriteValueARM_OpLsh8x32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh8x32 x y)
- // cond:
- // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMCMOVWHSconst)
- v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
- v1.AuxInt = 256
- v1.AddArg(y)
- v.AddArg(v1)
- v.AuxInt = 0
- return true
- }
-}
-func rewriteValueARM_OpLsh8x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh8x64 x (Const64 [c]))
- // cond: uint64(c) < 8
- // result: (SLLconst x [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) < 8) {
- break
- }
- v.reset(OpARMSLLconst)
- v.AddArg(x)
- v.AuxInt = c
- return true
- }
- // match: (Lsh8x64 _ (Const64 [c]))
- // cond: uint64(c) >= 8
- // result: (Const8 [0])
- for {
- v_1 := v.Args[1]
- if v_1.Op != OpConst64 {
- break
- }
- c := v_1.AuxInt
- if !(uint64(c) >= 8) {
- break
- }
- v.reset(OpConst8)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValueARM_OpLsh8x8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (Lsh8x8 x y)
- // cond:
- // result: (SLL x (ZeroExt8to32 y))
- for {
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpARMSLL)
- v.AddArg(x)
- v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
-}
func rewriteValueARM_OpARMMOVBUload(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -8413,17 +6329,17 @@
if v_0.Op != OpARMADDshiftLL {
break
}
+ c := v_0.AuxInt
ptr := v_0.Args[0]
idx := v_0.Args[1]
- c := v_0.AuxInt
mem := v.Args[1]
if !(sym == nil && !config.nacl) {
break
}
v.reset(OpARMMOVWloadshiftLL)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(mem)
return true
}
@@ -8439,17 +6355,17 @@
if v_0.Op != OpARMADDshiftRL {
break
}
+ c := v_0.AuxInt
ptr := v_0.Args[0]
idx := v_0.Args[1]
- c := v_0.AuxInt
mem := v.Args[1]
if !(sym == nil && !config.nacl) {
break
}
v.reset(OpARMMOVWloadshiftRL)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(mem)
return true
}
@@ -8465,17 +6381,17 @@
if v_0.Op != OpARMADDshiftRA {
break
}
+ c := v_0.AuxInt
ptr := v_0.Args[0]
idx := v_0.Args[1]
- c := v_0.AuxInt
mem := v.Args[1]
if !(sym == nil && !config.nacl) {
break
}
v.reset(OpARMMOVWloadshiftRA)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(mem)
return true
}
@@ -8550,13 +6466,13 @@
if v_1.Op != OpARMSLLconst {
break
}
- idx := v_1.Args[0]
c := v_1.AuxInt
+ idx := v_1.Args[0]
mem := v.Args[2]
v.reset(OpARMMOVWloadshiftLL)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(mem)
return true
}
@@ -8568,14 +6484,14 @@
if v_0.Op != OpARMSLLconst {
break
}
- idx := v_0.Args[0]
c := v_0.AuxInt
+ idx := v_0.Args[0]
ptr := v.Args[1]
mem := v.Args[2]
v.reset(OpARMMOVWloadshiftLL)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(mem)
return true
}
@@ -8588,13 +6504,13 @@
if v_1.Op != OpARMSRLconst {
break
}
- idx := v_1.Args[0]
c := v_1.AuxInt
+ idx := v_1.Args[0]
mem := v.Args[2]
v.reset(OpARMMOVWloadshiftRL)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(mem)
return true
}
@@ -8606,14 +6522,14 @@
if v_0.Op != OpARMSRLconst {
break
}
- idx := v_0.Args[0]
c := v_0.AuxInt
+ idx := v_0.Args[0]
ptr := v.Args[1]
mem := v.Args[2]
v.reset(OpARMMOVWloadshiftRL)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(mem)
return true
}
@@ -8626,13 +6542,13 @@
if v_1.Op != OpARMSRAconst {
break
}
- idx := v_1.Args[0]
c := v_1.AuxInt
+ idx := v_1.Args[0]
mem := v.Args[2]
v.reset(OpARMMOVWloadshiftRA)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(mem)
return true
}
@@ -8644,14 +6560,14 @@
if v_0.Op != OpARMSRAconst {
break
}
- idx := v_0.Args[0]
c := v_0.AuxInt
+ idx := v_0.Args[0]
ptr := v.Args[1]
mem := v.Args[2]
v.reset(OpARMMOVWloadshiftRA)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(mem)
return true
}
@@ -8664,18 +6580,18 @@
// cond: c==d && isSamePtr(ptr, ptr2)
// result: x
for {
+ c := v.AuxInt
ptr := v.Args[0]
idx := v.Args[1]
- c := v.AuxInt
v_2 := v.Args[2]
if v_2.Op != OpARMMOVWstoreshiftLL {
break
}
+ d := v_2.AuxInt
ptr2 := v_2.Args[0]
if idx != v_2.Args[1] {
break
}
- d := v_2.AuxInt
x := v_2.Args[2]
if !(c == d && isSamePtr(ptr, ptr2)) {
break
@@ -8689,13 +6605,13 @@
// cond:
// result: (MOVWload [int64(uint32(c)<<uint64(d))] ptr mem)
for {
+ d := v.AuxInt
ptr := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
mem := v.Args[2]
v.reset(OpARMMOVWload)
v.AuxInt = int64(uint32(c) << uint64(d))
@@ -8712,18 +6628,18 @@
// cond: c==d && isSamePtr(ptr, ptr2)
// result: x
for {
+ c := v.AuxInt
ptr := v.Args[0]
idx := v.Args[1]
- c := v.AuxInt
v_2 := v.Args[2]
if v_2.Op != OpARMMOVWstoreshiftRA {
break
}
+ d := v_2.AuxInt
ptr2 := v_2.Args[0]
if idx != v_2.Args[1] {
break
}
- d := v_2.AuxInt
x := v_2.Args[2]
if !(c == d && isSamePtr(ptr, ptr2)) {
break
@@ -8737,13 +6653,13 @@
// cond:
// result: (MOVWload [int64(int32(c)>>uint64(d))] ptr mem)
for {
+ d := v.AuxInt
ptr := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
mem := v.Args[2]
v.reset(OpARMMOVWload)
v.AuxInt = int64(int32(c) >> uint64(d))
@@ -8760,18 +6676,18 @@
// cond: c==d && isSamePtr(ptr, ptr2)
// result: x
for {
+ c := v.AuxInt
ptr := v.Args[0]
idx := v.Args[1]
- c := v.AuxInt
v_2 := v.Args[2]
if v_2.Op != OpARMMOVWstoreshiftRL {
break
}
+ d := v_2.AuxInt
ptr2 := v_2.Args[0]
if idx != v_2.Args[1] {
break
}
- d := v_2.AuxInt
x := v_2.Args[2]
if !(c == d && isSamePtr(ptr, ptr2)) {
break
@@ -8785,13 +6701,13 @@
// cond:
// result: (MOVWload [int64(uint32(c)>>uint64(d))] ptr mem)
for {
+ d := v.AuxInt
ptr := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
mem := v.Args[2]
v.reset(OpARMMOVWload)
v.AuxInt = int64(uint32(c) >> uint64(d))
@@ -8920,18 +6836,18 @@
if v_0.Op != OpARMADDshiftLL {
break
}
+ c := v_0.AuxInt
ptr := v_0.Args[0]
idx := v_0.Args[1]
- c := v_0.AuxInt
val := v.Args[1]
mem := v.Args[2]
if !(sym == nil && !config.nacl) {
break
}
v.reset(OpARMMOVWstoreshiftLL)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(val)
v.AddArg(mem)
return true
@@ -8948,18 +6864,18 @@
if v_0.Op != OpARMADDshiftRL {
break
}
+ c := v_0.AuxInt
ptr := v_0.Args[0]
idx := v_0.Args[1]
- c := v_0.AuxInt
val := v.Args[1]
mem := v.Args[2]
if !(sym == nil && !config.nacl) {
break
}
v.reset(OpARMMOVWstoreshiftRL)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(val)
v.AddArg(mem)
return true
@@ -8976,18 +6892,18 @@
if v_0.Op != OpARMADDshiftRA {
break
}
+ c := v_0.AuxInt
ptr := v_0.Args[0]
idx := v_0.Args[1]
- c := v_0.AuxInt
val := v.Args[1]
mem := v.Args[2]
if !(sym == nil && !config.nacl) {
break
}
v.reset(OpARMMOVWstoreshiftRA)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(val)
v.AddArg(mem)
return true
@@ -9044,14 +6960,14 @@
if v_1.Op != OpARMSLLconst {
break
}
- idx := v_1.Args[0]
c := v_1.AuxInt
+ idx := v_1.Args[0]
val := v.Args[2]
mem := v.Args[3]
v.reset(OpARMMOVWstoreshiftLL)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(val)
v.AddArg(mem)
return true
@@ -9064,15 +6980,15 @@
if v_0.Op != OpARMSLLconst {
break
}
- idx := v_0.Args[0]
c := v_0.AuxInt
+ idx := v_0.Args[0]
ptr := v.Args[1]
val := v.Args[2]
mem := v.Args[3]
v.reset(OpARMMOVWstoreshiftLL)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(val)
v.AddArg(mem)
return true
@@ -9086,14 +7002,14 @@
if v_1.Op != OpARMSRLconst {
break
}
- idx := v_1.Args[0]
c := v_1.AuxInt
+ idx := v_1.Args[0]
val := v.Args[2]
mem := v.Args[3]
v.reset(OpARMMOVWstoreshiftRL)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(val)
v.AddArg(mem)
return true
@@ -9106,15 +7022,15 @@
if v_0.Op != OpARMSRLconst {
break
}
- idx := v_0.Args[0]
c := v_0.AuxInt
+ idx := v_0.Args[0]
ptr := v.Args[1]
val := v.Args[2]
mem := v.Args[3]
v.reset(OpARMMOVWstoreshiftRL)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(val)
v.AddArg(mem)
return true
@@ -9128,14 +7044,14 @@
if v_1.Op != OpARMSRAconst {
break
}
- idx := v_1.Args[0]
c := v_1.AuxInt
+ idx := v_1.Args[0]
val := v.Args[2]
mem := v.Args[3]
v.reset(OpARMMOVWstoreshiftRA)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(val)
v.AddArg(mem)
return true
@@ -9148,15 +7064,15 @@
if v_0.Op != OpARMSRAconst {
break
}
- idx := v_0.Args[0]
c := v_0.AuxInt
+ idx := v_0.Args[0]
ptr := v.Args[1]
val := v.Args[2]
mem := v.Args[3]
v.reset(OpARMMOVWstoreshiftRA)
+ v.AuxInt = c
v.AddArg(ptr)
v.AddArg(idx)
- v.AuxInt = c
v.AddArg(val)
v.AddArg(mem)
return true
@@ -9170,13 +7086,13 @@
// cond:
// result: (MOVWstore [int64(uint32(c)<<uint64(d))] ptr val mem)
for {
+ d := v.AuxInt
ptr := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
val := v.Args[2]
mem := v.Args[3]
v.reset(OpARMMOVWstore)
@@ -9195,13 +7111,13 @@
// cond:
// result: (MOVWstore [int64(int32(c)>>uint64(d))] ptr val mem)
for {
+ d := v.AuxInt
ptr := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
val := v.Args[2]
mem := v.Args[3]
v.reset(OpARMMOVWstore)
@@ -9220,13 +7136,13 @@
// cond:
// result: (MOVWstore [int64(uint32(c)>>uint64(d))] ptr val mem)
for {
+ d := v.AuxInt
ptr := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpARMMOVWconst {
break
}
c := v_1.AuxInt
- d := v.AuxInt
val := v.Args[2]
mem := v.Args[3]
v.reset(OpARMMOVWstore)
@@ -9323,9 +7239,9 @@
break
}
v.reset(OpARMADDshiftLL)
- v.AddArg(x)
- v.AddArg(x)
v.AuxInt = log2(c - 1)
+ v.AddArg(x)
+ v.AddArg(x)
return true
}
// match: (MUL x (MOVWconst [c]))
@@ -9342,9 +7258,9 @@
break
}
v.reset(OpARMRSBshiftLL)
- v.AddArg(x)
- v.AddArg(x)
v.AuxInt = log2(c + 1)
+ v.AddArg(x)
+ v.AddArg(x)
return true
}
// match: (MUL x (MOVWconst [c]))
@@ -9363,9 +7279,9 @@
v.reset(OpARMSLLconst)
v.AuxInt = log2(c / 3)
v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
- v0.AddArg(x)
- v0.AddArg(x)
v0.AuxInt = 1
+ v0.AddArg(x)
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -9385,9 +7301,9 @@
v.reset(OpARMSLLconst)
v.AuxInt = log2(c / 5)
v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
- v0.AddArg(x)
- v0.AddArg(x)
v0.AuxInt = 2
+ v0.AddArg(x)
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -9407,9 +7323,9 @@
v.reset(OpARMSLLconst)
v.AuxInt = log2(c / 7)
v0 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
- v0.AddArg(x)
- v0.AddArg(x)
v0.AuxInt = 3
+ v0.AddArg(x)
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -9429,9 +7345,9 @@
v.reset(OpARMSLLconst)
v.AuxInt = log2(c / 9)
v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
- v0.AddArg(x)
- v0.AddArg(x)
v0.AuxInt = 3
+ v0.AddArg(x)
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -9517,9 +7433,9 @@
break
}
v.reset(OpARMADDshiftLL)
- v.AddArg(x)
- v.AddArg(x)
v.AuxInt = log2(c - 1)
+ v.AddArg(x)
+ v.AddArg(x)
return true
}
// match: (MUL (MOVWconst [c]) x)
@@ -9536,9 +7452,9 @@
break
}
v.reset(OpARMRSBshiftLL)
- v.AddArg(x)
- v.AddArg(x)
v.AuxInt = log2(c + 1)
+ v.AddArg(x)
+ v.AddArg(x)
return true
}
// match: (MUL (MOVWconst [c]) x)
@@ -9557,9 +7473,9 @@
v.reset(OpARMSLLconst)
v.AuxInt = log2(c / 3)
v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
- v0.AddArg(x)
- v0.AddArg(x)
v0.AuxInt = 1
+ v0.AddArg(x)
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -9579,9 +7495,9 @@
v.reset(OpARMSLLconst)
v.AuxInt = log2(c / 5)
v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
- v0.AddArg(x)
- v0.AddArg(x)
v0.AuxInt = 2
+ v0.AddArg(x)
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -9601,9 +7517,9 @@
v.reset(OpARMSLLconst)
v.AuxInt = log2(c / 7)
v0 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
- v0.AddArg(x)
- v0.AddArg(x)
v0.AuxInt = 3
+ v0.AddArg(x)
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -9623,9 +7539,9 @@
v.reset(OpARMSLLconst)
v.AuxInt = log2(c / 9)
v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
- v0.AddArg(x)
- v0.AddArg(x)
v0.AuxInt = 3
+ v0.AddArg(x)
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
@@ -9744,9 +7660,9 @@
}
v.reset(OpARMADD)
v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
- v0.AddArg(x)
- v0.AddArg(x)
v0.AuxInt = log2(c - 1)
+ v0.AddArg(x)
+ v0.AddArg(x)
v.AddArg(v0)
v.AddArg(a)
return true
@@ -9767,9 +7683,9 @@
}
v.reset(OpARMADD)
v0 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
- v0.AddArg(x)
- v0.AddArg(x)
v0.AuxInt = log2(c + 1)
+ v0.AddArg(x)
+ v0.AddArg(x)
v.AddArg(v0)
v.AddArg(a)
return true
@@ -9792,9 +7708,9 @@
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
v0.AuxInt = log2(c / 3)
v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
- v1.AddArg(x)
- v1.AddArg(x)
v1.AuxInt = 1
+ v1.AddArg(x)
+ v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
v.AddArg(a)
@@ -9818,9 +7734,9 @@
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
v0.AuxInt = log2(c / 5)
v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
- v1.AddArg(x)
- v1.AddArg(x)
v1.AuxInt = 2
+ v1.AddArg(x)
+ v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
v.AddArg(a)
@@ -9844,9 +7760,9 @@
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
v0.AuxInt = log2(c / 7)
v1 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
- v1.AddArg(x)
- v1.AddArg(x)
v1.AuxInt = 3
+ v1.AddArg(x)
+ v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
v.AddArg(a)
@@ -9870,9 +7786,9 @@
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
v0.AuxInt = log2(c / 9)
v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
- v1.AddArg(x)
- v1.AddArg(x)
v1.AuxInt = 3
+ v1.AddArg(x)
+ v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
v.AddArg(a)
@@ -9970,9 +7886,9 @@
}
v.reset(OpARMADD)
v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
- v0.AddArg(x)
- v0.AddArg(x)
v0.AuxInt = log2(c - 1)
+ v0.AddArg(x)
+ v0.AddArg(x)
v.AddArg(v0)
v.AddArg(a)
return true
@@ -9993,9 +7909,9 @@
}
v.reset(OpARMADD)
v0 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
- v0.AddArg(x)
- v0.AddArg(x)
v0.AuxInt = log2(c + 1)
+ v0.AddArg(x)
+ v0.AddArg(x)
v.AddArg(v0)
v.AddArg(a)
return true
@@ -10018,9 +7934,9 @@
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
v0.AuxInt = log2(c / 3)
v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
- v1.AddArg(x)
- v1.AddArg(x)
v1.AuxInt = 1
+ v1.AddArg(x)
+ v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
v.AddArg(a)
@@ -10044,9 +7960,9 @@
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
v0.AuxInt = log2(c / 5)
v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
- v1.AddArg(x)
- v1.AddArg(x)
v1.AuxInt = 2
+ v1.AddArg(x)
+ v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
v.AddArg(a)
@@ -10070,9 +7986,9 @@
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
v0.AuxInt = log2(c / 7)
v1 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
- v1.AddArg(x)
- v1.AddArg(x)
v1.AuxInt = 3
+ v1.AddArg(x)
+ v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
v.AddArg(a)
@@ -10096,9 +8012,9 @@
v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
v0.AuxInt = log2(c / 9)
v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
- v1.AddArg(x)
- v1.AddArg(x)
v1.AuxInt = 3
+ v1.AddArg(x)
+ v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
v.AddArg(a)
@@ -10153,8 +8069,8 @@
c := v_0.AuxInt
x := v_0.Args[0]
v.reset(OpARMMVNshiftLL)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
// match: (MVN (SRLconst [c] x))
@@ -10168,8 +8084,8 @@
c := v_0.AuxInt
x := v_0.Args[0]
v.reset(OpARMMVNshiftRL)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
// match: (MVN (SRAconst [c] x))
@@ -10183,8 +8099,8 @@
c := v_0.AuxInt
x := v_0.Args[0]
v.reset(OpARMMVNshiftRA)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
// match: (MVN (SLL x y))
@@ -10241,12 +8157,12 @@
// cond:
// result: (MOVWconst [^int64(uint32(c)<<uint64(d))])
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
- d := v.AuxInt
v.reset(OpARMMOVWconst)
v.AuxInt = ^int64(uint32(c) << uint64(d))
return true
@@ -10267,8 +8183,8 @@
}
c := v_1.AuxInt
v.reset(OpARMMVNshiftLL)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
return false
@@ -10280,12 +8196,12 @@
// cond:
// result: (MOVWconst [^int64(int32(c)>>uint64(d))])
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
- d := v.AuxInt
v.reset(OpARMMOVWconst)
v.AuxInt = ^int64(int32(c) >> uint64(d))
return true
@@ -10306,8 +8222,8 @@
}
c := v_1.AuxInt
v.reset(OpARMMVNshiftRA)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
return false
@@ -10319,12 +8235,12 @@
// cond:
// result: (MOVWconst [^int64(uint32(c)>>uint64(d))])
for {
+ d := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpARMMOVWconst {
break
}
c := v_0.AuxInt
- d := v.AuxInt
v.reset(OpARMMOVWconst)
v.AuxInt = ^int64(uint32(c) >> uint64(d))
return true
@@ -10345,12 +8261,6463 @@
}
c := v_1.AuxInt
v.reset(OpARMMVNshiftRL)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
return false
}
+func rewriteValueARM_OpARMNotEqual(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NotEqual (FlagEQ))
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagEQ {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (NotEqual (FlagLT_ULT))
+ // cond:
+ // result: (MOVWconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagLT_ULT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (NotEqual (FlagLT_UGT))
+ // cond:
+ // result: (MOVWconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagLT_UGT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (NotEqual (FlagGT_ULT))
+ // cond:
+ // result: (MOVWconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagGT_ULT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (NotEqual (FlagGT_UGT))
+ // cond:
+ // result: (MOVWconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMFlagGT_UGT {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (NotEqual (InvertFlags x))
+ // cond:
+ // result: (NotEqual x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMNotEqual)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMOR(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (OR (MOVWconst [c]) x)
+ // cond:
+ // result: (ORconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMORconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (OR x (MOVWconst [c]))
+ // cond:
+ // result: (ORconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMORconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (OR x (SLLconst [c] y))
+ // cond:
+ // result: (ORshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMORshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (OR (SLLconst [c] y) x)
+ // cond:
+ // result: (ORshiftLL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMORshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (OR x (SRLconst [c] y))
+ // cond:
+ // result: (ORshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMORshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (OR (SRLconst [c] y) x)
+ // cond:
+ // result: (ORshiftRL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMORshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (OR x (SRAconst [c] y))
+ // cond:
+ // result: (ORshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMORshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (OR (SRAconst [c] y) x)
+ // cond:
+ // result: (ORshiftRA x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMORshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (OR x (SLL y z))
+ // cond:
+ // result: (ORshiftLLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMORshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (OR (SLL y z) x)
+ // cond:
+ // result: (ORshiftLLreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMORshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (OR x (SRL y z))
+ // cond:
+ // result: (ORshiftRLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMORshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (OR (SRL y z) x)
+ // cond:
+ // result: (ORshiftRLreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMORshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (OR x (SRA y z))
+ // cond:
+ // result: (ORshiftRAreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMORshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (OR (SRA y z) x)
+ // cond:
+ // result: (ORshiftRAreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMORshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (OR x x)
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x != v.Args[1] {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMORconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ORconst [0] x)
+ // cond:
+ // result: x
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORconst [c] _)
+ // cond: int32(c)==-1
+ // result: (MOVWconst [-1])
+ for {
+ c := v.AuxInt
+ if !(int32(c) == -1) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = -1
+ return true
+ }
+ // match: (ORconst [c] (MOVWconst [d]))
+ // cond:
+ // result: (MOVWconst [c|d])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = c | d
+ return true
+ }
+ // match: (ORconst [c] (ORconst [d] x))
+ // cond:
+ // result: (ORconst [c|d] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMORconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARMORconst)
+ v.AuxInt = c | d
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMORshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ORshiftLL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (ORconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMORconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftLL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (ORconst x [int64(uint32(c)<<uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMORconst)
+ v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftLL x y:(SLLconst x [c]) [d])
+ // cond: c==d
+ // result: y
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ y := v.Args[1]
+ if y.Op != OpARMSLLconst {
+ break
+ }
+ c := y.AuxInt
+ if x != y.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMORshiftLLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ORshiftLLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (ORconst [c] (SLL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMORconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftLLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (ORshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMORshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMORshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ORshiftRA (MOVWconst [c]) x [d])
+ // cond:
+ // result: (ORconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMORconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftRA x (MOVWconst [c]) [d])
+ // cond:
+ // result: (ORconst x [int64(int32(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMORconst)
+ v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftRA x y:(SRAconst x [c]) [d])
+ // cond: c==d
+ // result: y
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ y := v.Args[1]
+ if y.Op != OpARMSRAconst {
+ break
+ }
+ c := y.AuxInt
+ if x != y.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMORshiftRAreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ORshiftRAreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (ORconst [c] (SRA <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMORconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftRAreg x y (MOVWconst [c]))
+ // cond:
+ // result: (ORshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMORshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMORshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ORshiftRL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (ORconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMORconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftRL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (ORconst x [int64(uint32(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMORconst)
+ v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftRL x y:(SRLconst x [c]) [d])
+ // cond: c==d
+ // result: y
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ y := v.Args[1]
+ if y.Op != OpARMSRLconst {
+ break
+ }
+ c := y.AuxInt
+ if x != y.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMORshiftRLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ORshiftRLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (ORconst [c] (SRL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMORconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftRLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (ORshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMORshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSB (MOVWconst [c]) x)
+ // cond:
+ // result: (SUBconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMSUBconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (RSB x (MOVWconst [c]))
+ // cond:
+ // result: (RSBconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMRSBconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (RSB x (SLLconst [c] y))
+ // cond:
+ // result: (RSBshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMRSBshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (RSB (SLLconst [c] y) x)
+ // cond:
+ // result: (SUBshiftLL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMSUBshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (RSB x (SRLconst [c] y))
+ // cond:
+ // result: (RSBshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMRSBshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (RSB (SRLconst [c] y) x)
+ // cond:
+ // result: (SUBshiftRL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMSUBshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (RSB x (SRAconst [c] y))
+ // cond:
+ // result: (RSBshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMRSBshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (RSB (SRAconst [c] y) x)
+ // cond:
+ // result: (SUBshiftRA x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMSUBshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (RSB x (SLL y z))
+ // cond:
+ // result: (RSBshiftLLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMRSBshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (RSB (SLL y z) x)
+ // cond:
+ // result: (SUBshiftLLreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMSUBshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (RSB x (SRL y z))
+ // cond:
+ // result: (RSBshiftRLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMRSBshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (RSB (SRL y z) x)
+ // cond:
+ // result: (SUBshiftRLreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMSUBshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (RSB x (SRA y z))
+ // cond:
+ // result: (RSBshiftRAreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMRSBshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (RSB (SRA y z) x)
+ // cond:
+ // result: (SUBshiftRAreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMSUBshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (RSB x x)
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ x := v.Args[0]
+ if x != v.Args[1] {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBSshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSBSshiftLL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (SUBSconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBSshiftLL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (RSBSconst x [int64(uint32(c)<<uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBSshiftLLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSBSshiftLLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (SUBSconst [c] (SLL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBSshiftLLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (RSBSshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMRSBSshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBSshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSBSshiftRA (MOVWconst [c]) x [d])
+ // cond:
+ // result: (SUBSconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBSshiftRA x (MOVWconst [c]) [d])
+ // cond:
+ // result: (RSBSconst x [int64(int32(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBSshiftRAreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSBSshiftRAreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (SUBSconst [c] (SRA <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBSshiftRAreg x y (MOVWconst [c]))
+ // cond:
+ // result: (RSBSshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMRSBSshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBSshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSBSshiftRL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (SUBSconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBSshiftRL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (RSBSconst x [int64(uint32(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBSshiftRLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSBSshiftRLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (SUBSconst [c] (SRL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBSshiftRLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (RSBSshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMRSBSshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSBconst [c] (MOVWconst [d]))
+ // cond:
+ // result: (MOVWconst [int64(int32(c-d))])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int64(int32(c - d))
+ return true
+ }
+ // match: (RSBconst [c] (RSBconst [d] x))
+ // cond:
+ // result: (ADDconst [int64(int32(c-d))] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMRSBconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARMADDconst)
+ v.AuxInt = int64(int32(c - d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RSBconst [c] (ADDconst [d] x))
+ // cond:
+ // result: (RSBconst [int64(int32(c-d))] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int64(int32(c - d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RSBconst [c] (SUBconst [d] x))
+ // cond:
+ // result: (RSBconst [int64(int32(c+d))] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int64(int32(c + d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSBshiftLL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (SUBconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMSUBconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBshiftLL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (RSBconst x [int64(uint32(c)<<uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RSBshiftLL x (SLLconst x [c]) [d])
+ // cond: c==d
+ // result: (MOVWconst [0])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ if x != v_1.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBshiftLLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSBshiftLLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (SUBconst [c] (SLL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMSUBconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBshiftLLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (RSBshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMRSBshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSBshiftRA (MOVWconst [c]) x [d])
+ // cond:
+ // result: (SUBconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMSUBconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBshiftRA x (MOVWconst [c]) [d])
+ // cond:
+ // result: (RSBconst x [int64(int32(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RSBshiftRA x (SRAconst x [c]) [d])
+ // cond: c==d
+ // result: (MOVWconst [0])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ if x != v_1.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBshiftRAreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSBshiftRAreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (SUBconst [c] (SRA <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMSUBconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBshiftRAreg x y (MOVWconst [c]))
+ // cond:
+ // result: (RSBshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMRSBshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSBshiftRL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (SUBconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMSUBconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBshiftRL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (RSBconst x [int64(uint32(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RSBshiftRL x (SRLconst x [c]) [d])
+ // cond: c==d
+ // result: (MOVWconst [0])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ if x != v_1.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBshiftRLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSBshiftRLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (SUBconst [c] (SRL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMSUBconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBshiftRLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (RSBshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMRSBshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSCconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSCconst [c] (ADDconst [d] x) flags)
+ // cond:
+ // result: (RSCconst [int64(int32(c-d))] x flags)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ flags := v.Args[1]
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int64(int32(c - d))
+ v.AddArg(x)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (RSCconst [c] (SUBconst [d] x) flags)
+ // cond:
+ // result: (RSCconst [int64(int32(c+d))] x flags)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ flags := v.Args[1]
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int64(int32(c + d))
+ v.AddArg(x)
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSCshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSCshiftLL (MOVWconst [c]) x [d] flags)
+ // cond:
+ // result: (SBCconst [c] (SLLconst <x.Type> x [d]) flags)
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMSBCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (RSCshiftLL x (MOVWconst [c]) [d] flags)
+ // cond:
+ // result: (RSCconst x [int64(uint32(c)<<uint64(d))] flags)
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ flags := v.Args[2]
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSCshiftLLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSCshiftLLreg (MOVWconst [c]) x y flags)
+ // cond:
+ // result: (SBCconst [c] (SLL <x.Type> x y) flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ flags := v.Args[3]
+ v.reset(OpARMSBCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (RSCshiftLLreg x y (MOVWconst [c]) flags)
+ // cond:
+ // result: (RSCshiftLL x y [c] flags)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ flags := v.Args[3]
+ v.reset(OpARMRSCshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSCshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSCshiftRA (MOVWconst [c]) x [d] flags)
+ // cond:
+ // result: (SBCconst [c] (SRAconst <x.Type> x [d]) flags)
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMSBCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (RSCshiftRA x (MOVWconst [c]) [d] flags)
+ // cond:
+ // result: (RSCconst x [int64(int32(c)>>uint64(d))] flags)
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ flags := v.Args[2]
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSCshiftRAreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSCshiftRAreg (MOVWconst [c]) x y flags)
+ // cond:
+ // result: (SBCconst [c] (SRA <x.Type> x y) flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ flags := v.Args[3]
+ v.reset(OpARMSBCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (RSCshiftRAreg x y (MOVWconst [c]) flags)
+ // cond:
+ // result: (RSCshiftRA x y [c] flags)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ flags := v.Args[3]
+ v.reset(OpARMRSCshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSCshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSCshiftRL (MOVWconst [c]) x [d] flags)
+ // cond:
+ // result: (SBCconst [c] (SRLconst <x.Type> x [d]) flags)
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMSBCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (RSCshiftRL x (MOVWconst [c]) [d] flags)
+ // cond:
+ // result: (RSCconst x [int64(uint32(c)>>uint64(d))] flags)
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ flags := v.Args[2]
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSCshiftRLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (RSCshiftRLreg (MOVWconst [c]) x y flags)
+ // cond:
+ // result: (SBCconst [c] (SRL <x.Type> x y) flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ flags := v.Args[3]
+ v.reset(OpARMSBCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (RSCshiftRLreg x y (MOVWconst [c]) flags)
+ // cond:
+ // result: (RSCshiftRL x y [c] flags)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ flags := v.Args[3]
+ v.reset(OpARMRSCshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBC(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SBC (MOVWconst [c]) x flags)
+ // cond:
+ // result: (RSCconst [c] x flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMRSCconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBC x (MOVWconst [c]) flags)
+ // cond:
+ // result: (SBCconst [c] x flags)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ flags := v.Args[2]
+ v.reset(OpARMSBCconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBC x (SLLconst [c] y) flags)
+ // cond:
+ // result: (SBCshiftLL x y [c] flags)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ flags := v.Args[2]
+ v.reset(OpARMSBCshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBC (SLLconst [c] y) x flags)
+ // cond:
+ // result: (RSCshiftLL x y [c] flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMRSCshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBC x (SRLconst [c] y) flags)
+ // cond:
+ // result: (SBCshiftRL x y [c] flags)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ flags := v.Args[2]
+ v.reset(OpARMSBCshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBC (SRLconst [c] y) x flags)
+ // cond:
+ // result: (RSCshiftRL x y [c] flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMRSCshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBC x (SRAconst [c] y) flags)
+ // cond:
+ // result: (SBCshiftRA x y [c] flags)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ flags := v.Args[2]
+ v.reset(OpARMSBCshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBC (SRAconst [c] y) x flags)
+ // cond:
+ // result: (RSCshiftRA x y [c] flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMRSCshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBC x (SLL y z) flags)
+ // cond:
+ // result: (SBCshiftLLreg x y z flags)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMSBCshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBC (SLL y z) x flags)
+ // cond:
+ // result: (RSCshiftLLreg x y z flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMRSCshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBC x (SRL y z) flags)
+ // cond:
+ // result: (SBCshiftRLreg x y z flags)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMSBCshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBC (SRL y z) x flags)
+ // cond:
+ // result: (RSCshiftRLreg x y z flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMRSCshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBC x (SRA y z) flags)
+ // cond:
+ // result: (SBCshiftRAreg x y z flags)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMSBCshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBC (SRA y z) x flags)
+ // cond:
+ // result: (RSCshiftRAreg x y z flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMRSCshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBCconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SBCconst [c] (ADDconst [d] x) flags)
+ // cond:
+ // result: (SBCconst [int64(int32(c-d))] x flags)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ flags := v.Args[1]
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int64(int32(c - d))
+ v.AddArg(x)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBCconst [c] (SUBconst [d] x) flags)
+ // cond:
+ // result: (SBCconst [int64(int32(c+d))] x flags)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ flags := v.Args[1]
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int64(int32(c + d))
+ v.AddArg(x)
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBCshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SBCshiftLL (MOVWconst [c]) x [d] flags)
+ // cond:
+ // result: (RSCconst [c] (SLLconst <x.Type> x [d]) flags)
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMRSCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBCshiftLL x (MOVWconst [c]) [d] flags)
+ // cond:
+ // result: (SBCconst x [int64(uint32(c)<<uint64(d))] flags)
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ flags := v.Args[2]
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBCshiftLLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SBCshiftLLreg (MOVWconst [c]) x y flags)
+ // cond:
+ // result: (RSCconst [c] (SLL <x.Type> x y) flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ flags := v.Args[3]
+ v.reset(OpARMRSCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBCshiftLLreg x y (MOVWconst [c]) flags)
+ // cond:
+ // result: (SBCshiftLL x y [c] flags)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ flags := v.Args[3]
+ v.reset(OpARMSBCshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBCshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SBCshiftRA (MOVWconst [c]) x [d] flags)
+ // cond:
+ // result: (RSCconst [c] (SRAconst <x.Type> x [d]) flags)
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMRSCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBCshiftRA x (MOVWconst [c]) [d] flags)
+ // cond:
+ // result: (SBCconst x [int64(int32(c)>>uint64(d))] flags)
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ flags := v.Args[2]
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBCshiftRAreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SBCshiftRAreg (MOVWconst [c]) x y flags)
+ // cond:
+ // result: (RSCconst [c] (SRA <x.Type> x y) flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ flags := v.Args[3]
+ v.reset(OpARMRSCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBCshiftRAreg x y (MOVWconst [c]) flags)
+ // cond:
+ // result: (SBCshiftRA x y [c] flags)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ flags := v.Args[3]
+ v.reset(OpARMSBCshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBCshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SBCshiftRL (MOVWconst [c]) x [d] flags)
+ // cond:
+ // result: (RSCconst [c] (SRLconst <x.Type> x [d]) flags)
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ flags := v.Args[2]
+ v.reset(OpARMRSCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBCshiftRL x (MOVWconst [c]) [d] flags)
+ // cond:
+ // result: (SBCconst x [int64(uint32(c)>>uint64(d))] flags)
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ flags := v.Args[2]
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBCshiftRLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SBCshiftRLreg (MOVWconst [c]) x y flags)
+ // cond:
+ // result: (RSCconst [c] (SRL <x.Type> x y) flags)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ flags := v.Args[3]
+ v.reset(OpARMRSCconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v.AddArg(flags)
+ return true
+ }
+ // match: (SBCshiftRLreg x y (MOVWconst [c]) flags)
+ // cond:
+ // result: (SBCshiftRL x y [c] flags)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ flags := v.Args[3]
+ v.reset(OpARMSBCshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SLL x (MOVWconst [c]))
+ // cond:
+ // result: (SLLconst x [c&31])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMSLLconst)
+ v.AuxInt = c & 31
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSLLconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SLLconst [c] (MOVWconst [d]))
+ // cond:
+ // result: (MOVWconst [int64(uint32(d)<<uint64(c))])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int64(uint32(d) << uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SRA x (MOVWconst [c]))
+ // cond:
+ // result: (SRAconst x [c&31])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMSRAconst)
+ v.AuxInt = c & 31
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSRAcond(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SRAcond x _ (FlagEQ))
+ // cond:
+ // result: (SRAconst x [31])
+ for {
+ x := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMFlagEQ {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = 31
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRAcond x y (FlagLT_ULT))
+ // cond:
+ // result: (SRA x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMFlagLT_ULT {
+ break
+ }
+ v.reset(OpARMSRA)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (SRAcond x _ (FlagLT_UGT))
+ // cond:
+ // result: (SRAconst x [31])
+ for {
+ x := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMFlagLT_UGT {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = 31
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRAcond x y (FlagGT_ULT))
+ // cond:
+ // result: (SRA x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMFlagGT_ULT {
+ break
+ }
+ v.reset(OpARMSRA)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (SRAcond x _ (FlagGT_UGT))
+ // cond:
+ // result: (SRAconst x [31])
+ for {
+ x := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMFlagGT_UGT {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = 31
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSRAconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SRAconst [c] (MOVWconst [d]))
+ // cond:
+ // result: (MOVWconst [int64(int32(d)>>uint64(c))])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int64(int32(d) >> uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SRL x (MOVWconst [c]))
+ // cond:
+ // result: (SRLconst x [c&31])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMSRLconst)
+ v.AuxInt = c & 31
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSRLconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SRLconst [c] (MOVWconst [d]))
+ // cond:
+ // result: (MOVWconst [int64(uint32(d)>>uint64(c))])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int64(uint32(d) >> uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUB (MOVWconst [c]) x)
+ // cond:
+ // result: (RSBconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUB x (MOVWconst [c]))
+ // cond:
+ // result: (SUBconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMSUBconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUB x (SLLconst [c] y))
+ // cond:
+ // result: (SUBshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMSUBshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (SUB (SLLconst [c] y) x)
+ // cond:
+ // result: (RSBshiftLL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMRSBshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (SUB x (SRLconst [c] y))
+ // cond:
+ // result: (SUBshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMSUBshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (SUB (SRLconst [c] y) x)
+ // cond:
+ // result: (RSBshiftRL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMRSBshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (SUB x (SRAconst [c] y))
+ // cond:
+ // result: (SUBshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMSUBshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (SUB (SRAconst [c] y) x)
+ // cond:
+ // result: (RSBshiftRA x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMRSBshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (SUB x (SLL y z))
+ // cond:
+ // result: (SUBshiftLLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMSUBshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (SUB (SLL y z) x)
+ // cond:
+ // result: (RSBshiftLLreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMRSBshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (SUB x (SRL y z))
+ // cond:
+ // result: (SUBshiftRLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMSUBshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (SUB (SRL y z) x)
+ // cond:
+ // result: (RSBshiftRLreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMRSBshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (SUB x (SRA y z))
+ // cond:
+ // result: (SUBshiftRAreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMSUBshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (SUB (SRA y z) x)
+ // cond:
+ // result: (RSBshiftRAreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMRSBshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (SUB x x)
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ x := v.Args[0]
+ if x != v.Args[1] {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBS(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBS (MOVWconst [c]) x)
+ // cond:
+ // result: (RSBSconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBS x (MOVWconst [c]))
+ // cond:
+ // result: (SUBSconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBS x (SLLconst [c] y))
+ // cond:
+ // result: (SUBSshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMSUBSshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (SUBS (SLLconst [c] y) x)
+ // cond:
+ // result: (RSBSshiftLL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMRSBSshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (SUBS x (SRLconst [c] y))
+ // cond:
+ // result: (SUBSshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMSUBSshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (SUBS (SRLconst [c] y) x)
+ // cond:
+ // result: (RSBSshiftRL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMRSBSshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (SUBS x (SRAconst [c] y))
+ // cond:
+ // result: (SUBSshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMSUBSshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (SUBS (SRAconst [c] y) x)
+ // cond:
+ // result: (RSBSshiftRA x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMRSBSshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (SUBS x (SLL y z))
+ // cond:
+ // result: (SUBSshiftLLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMSUBSshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (SUBS (SLL y z) x)
+ // cond:
+ // result: (RSBSshiftLLreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMRSBSshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (SUBS x (SRL y z))
+ // cond:
+ // result: (SUBSshiftRLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMSUBSshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (SUBS (SRL y z) x)
+ // cond:
+ // result: (RSBSshiftRLreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMRSBSshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (SUBS x (SRA y z))
+ // cond:
+ // result: (SUBSshiftRAreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMSUBSshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (SUBS (SRA y z) x)
+ // cond:
+ // result: (RSBSshiftRAreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMRSBSshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBSshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBSshiftLL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (RSBSconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBSshiftLL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (SUBSconst x [int64(uint32(c)<<uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBSshiftLLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBSshiftLLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (RSBSconst [c] (SLL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBSshiftLLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (SUBSshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMSUBSshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBSshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBSshiftRA (MOVWconst [c]) x [d])
+ // cond:
+ // result: (RSBSconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBSshiftRA x (MOVWconst [c]) [d])
+ // cond:
+ // result: (SUBSconst x [int64(int32(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBSshiftRAreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBSshiftRAreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (RSBSconst [c] (SRA <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBSshiftRAreg x y (MOVWconst [c]))
+ // cond:
+ // result: (SUBSshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMSUBSshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBSshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBSshiftRL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (RSBSconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBSshiftRL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (SUBSconst x [int64(uint32(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBSshiftRLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBSshiftRLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (RSBSconst [c] (SRL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBSshiftRLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (SUBSshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMSUBSshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBconst [0] x)
+ // cond:
+ // result: x
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBconst [c] (MOVWconst [d]))
+ // cond:
+ // result: (MOVWconst [int64(int32(d-c))])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int64(int32(d - c))
+ return true
+ }
+ // match: (SUBconst [c] (SUBconst [d] x))
+ // cond:
+ // result: (ADDconst [int64(int32(-c-d))] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARMADDconst)
+ v.AuxInt = int64(int32(-c - d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBconst [c] (ADDconst [d] x))
+ // cond:
+ // result: (ADDconst [int64(int32(-c+d))] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARMADDconst)
+ v.AuxInt = int64(int32(-c + d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBconst [c] (RSBconst [d] x))
+ // cond:
+ // result: (RSBconst [int64(int32(-c+d))] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMRSBconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int64(int32(-c + d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBshiftLL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (RSBconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBshiftLL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (SUBconst x [int64(uint32(c)<<uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBshiftLL x (SLLconst x [c]) [d])
+ // cond: c==d
+ // result: (MOVWconst [0])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ if x != v_1.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBshiftLLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBshiftLLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (RSBconst [c] (SLL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBshiftLLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (SUBshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMSUBshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBshiftRA (MOVWconst [c]) x [d])
+ // cond:
+ // result: (RSBconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBshiftRA x (MOVWconst [c]) [d])
+ // cond:
+ // result: (SUBconst x [int64(int32(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBshiftRA x (SRAconst x [c]) [d])
+ // cond: c==d
+ // result: (MOVWconst [0])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ if x != v_1.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBshiftRAreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBshiftRAreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (RSBconst [c] (SRA <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBshiftRAreg x y (MOVWconst [c]))
+ // cond:
+ // result: (SUBshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMSUBshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBshiftRL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (RSBconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBshiftRL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (SUBconst x [int64(uint32(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBshiftRL x (SRLconst x [c]) [d])
+ // cond: c==d
+ // result: (MOVWconst [0])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ if x != v_1.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBshiftRLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBshiftRLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (RSBconst [c] (SRL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBshiftRLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (SUBshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMSUBshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXOR(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XOR (MOVWconst [c]) x)
+ // cond:
+ // result: (XORconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMXORconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (XOR x (MOVWconst [c]))
+ // cond:
+ // result: (XORconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMXORconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (XOR x (SLLconst [c] y))
+ // cond:
+ // result: (XORshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMXORshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (XOR (SLLconst [c] y) x)
+ // cond:
+ // result: (XORshiftLL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMXORshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (XOR x (SRLconst [c] y))
+ // cond:
+ // result: (XORshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMXORshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (XOR (SRLconst [c] y) x)
+ // cond:
+ // result: (XORshiftRL x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMXORshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (XOR x (SRAconst [c] y))
+ // cond:
+ // result: (XORshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ v.reset(OpARMXORshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (XOR (SRAconst [c] y) x)
+ // cond:
+ // result: (XORshiftRA x y [c])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpARMXORshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (XOR x (SLL y z))
+ // cond:
+ // result: (XORshiftLLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMXORshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (XOR (SLL y z) x)
+ // cond:
+ // result: (XORshiftLLreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMXORshiftLLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (XOR x (SRL y z))
+ // cond:
+ // result: (XORshiftRLreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMXORshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (XOR (SRL y z) x)
+ // cond:
+ // result: (XORshiftRLreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMXORshiftRLreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (XOR x (SRA y z))
+ // cond:
+ // result: (XORshiftRAreg x y z)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ y := v_1.Args[0]
+ z := v_1.Args[1]
+ v.reset(OpARMXORshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (XOR (SRA y z) x)
+ // cond:
+ // result: (XORshiftRAreg x y z)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ y := v_0.Args[0]
+ z := v_0.Args[1]
+ x := v.Args[1]
+ v.reset(OpARMXORshiftRAreg)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(z)
+ return true
+ }
+ // match: (XOR x x)
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ x := v.Args[0]
+ if x != v.Args[1] {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXORconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORconst [0] x)
+ // cond:
+ // result: x
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORconst [c] (MOVWconst [d]))
+ // cond:
+ // result: (MOVWconst [c^d])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = c ^ d
+ return true
+ }
+ // match: (XORconst [c] (XORconst [d] x))
+ // cond:
+ // result: (XORconst [c^d] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMXORconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARMXORconst)
+ v.AuxInt = c ^ d
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXORshiftLL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORshiftLL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (XORconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMXORconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftLL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (XORconst x [int64(uint32(c)<<uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMXORconst)
+ v.AuxInt = int64(uint32(c) << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftLL x (SLLconst x [c]) [d])
+ // cond: c==d
+ // result: (MOVWconst [0])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ if x != v_1.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXORshiftLLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORshiftLLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (XORconst [c] (SLL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMXORconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftLLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (XORshiftLL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMXORshiftLL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXORshiftRA(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORshiftRA (MOVWconst [c]) x [d])
+ // cond:
+ // result: (XORconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMXORconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftRA x (MOVWconst [c]) [d])
+ // cond:
+ // result: (XORconst x [int64(int32(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMXORconst)
+ v.AuxInt = int64(int32(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftRA x (SRAconst x [c]) [d])
+ // cond: c==d
+ // result: (MOVWconst [0])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := v_1.AuxInt
+ if x != v_1.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXORshiftRAreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORshiftRAreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (XORconst [c] (SRA <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMXORconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftRAreg x y (MOVWconst [c]))
+ // cond:
+ // result: (XORshiftRA x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMXORshiftRA)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXORshiftRL(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORshiftRL (MOVWconst [c]) x [d])
+ // cond:
+ // result: (XORconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARMXORconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftRL x (MOVWconst [c]) [d])
+ // cond:
+ // result: (XORconst x [int64(uint32(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARMXORconst)
+ v.AuxInt = int64(uint32(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftRL x (SRLconst x [c]) [d])
+ // cond: c==d
+ // result: (MOVWconst [0])
+ for {
+ d := v.AuxInt
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := v_1.AuxInt
+ if x != v_1.Args[0] {
+ break
+ }
+ if !(c == d) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXORshiftRLreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (XORshiftRLreg (MOVWconst [c]) x y)
+ // cond:
+ // result: (XORconst [c] (SRL <x.Type> x y))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARMXORconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftRLreg x y (MOVWconst [c]))
+ // cond:
+ // result: (XORshiftRL x y [c])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v.reset(OpARMXORshiftRL)
+ v.AuxInt = c
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpAdd16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add16 x y)
+ // cond:
+ // result: (ADD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMADD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpAdd32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add32 x y)
+ // cond:
+ // result: (ADD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMADD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpAdd32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add32F x y)
+ // cond:
+ // result: (ADDF x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMADDF)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpAdd32carry(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add32carry x y)
+ // cond:
+ // result: (ADDS x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMADDS)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpAdd32withcarry(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add32withcarry x y c)
+ // cond:
+ // result: (ADC x y c)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ c := v.Args[2]
+ v.reset(OpARMADC)
+ v.AddArg(x)
+ v.AddArg(y)
+ v.AddArg(c)
+ return true
+ }
+}
+func rewriteValueARM_OpAdd64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add64F x y)
+ // cond:
+ // result: (ADDD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMADDD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpAdd8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Add8 x y)
+ // cond:
+ // result: (ADD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMADD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpAddPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (AddPtr x y)
+ // cond:
+ // result: (ADD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMADD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpAddr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Addr {sym} base)
+ // cond:
+ // result: (MOVWaddr {sym} base)
+ for {
+ sym := v.Aux
+ base := v.Args[0]
+ v.reset(OpARMMOVWaddr)
+ v.Aux = sym
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueARM_OpAnd16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And16 x y)
+ // cond:
+ // result: (AND x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMAND)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpAnd32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And32 x y)
+ // cond:
+ // result: (AND x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMAND)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpAnd8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (And8 x y)
+ // cond:
+ // result: (AND x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMAND)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpAndB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (AndB x y)
+ // cond:
+ // result: (AND x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMAND)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpClosureCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ClosureCall [argwid] entry closure mem)
+ // cond:
+ // result: (CALLclosure [argwid] entry closure mem)
+ for {
+ argwid := v.AuxInt
+ entry := v.Args[0]
+ closure := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARMCALLclosure)
+ v.AuxInt = argwid
+ v.AddArg(entry)
+ v.AddArg(closure)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueARM_OpCom16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com16 x)
+ // cond:
+ // result: (MVN x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMVN)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpCom32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com32 x)
+ // cond:
+ // result: (MVN x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMVN)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpCom8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Com8 x)
+ // cond:
+ // result: (MVN x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMVN)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpConst16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const16 [val])
+ // cond:
+ // result: (MOVWconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueARM_OpConst32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const32 [val])
+ // cond:
+ // result: (MOVWconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueARM_OpConst32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const32F [val])
+ // cond:
+ // result: (MOVFconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpARMMOVFconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueARM_OpConst64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const64F [val])
+ // cond:
+ // result: (MOVDconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpARMMOVDconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueARM_OpConst8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Const8 [val])
+ // cond:
+ // result: (MOVWconst [val])
+ for {
+ val := v.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = val
+ return true
+ }
+}
+func rewriteValueARM_OpConstBool(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ConstBool [b])
+ // cond:
+ // result: (MOVWconst [b])
+ for {
+ b := v.AuxInt
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = b
+ return true
+ }
+}
+func rewriteValueARM_OpConstNil(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ConstNil)
+ // cond:
+ // result: (MOVWconst [0])
+ for {
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = 0
+ return true
+ }
+}
+func rewriteValueARM_OpConvert(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Convert x mem)
+ // cond:
+ // result: (MOVWconvert x mem)
+ for {
+ x := v.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARMMOVWconvert)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueARM_OpCvt32Fto32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Fto32 x)
+ // cond:
+ // result: (MOVFW x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVFW)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpCvt32Fto32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Fto32U x)
+ // cond:
+ // result: (MOVFWU x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVFWU)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpCvt32Fto64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Fto64F x)
+ // cond:
+ // result: (MOVFD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVFD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpCvt32Uto32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Uto32F x)
+ // cond:
+ // result: (MOVWUF x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVWUF)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpCvt32Uto64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32Uto64F x)
+ // cond:
+ // result: (MOVWUD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVWUD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpCvt32to32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32to32F x)
+ // cond:
+ // result: (MOVWF x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVWF)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpCvt32to64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt32to64F x)
+ // cond:
+ // result: (MOVWD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVWD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpCvt64Fto32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64Fto32 x)
+ // cond:
+ // result: (MOVDW x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVDW)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpCvt64Fto32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64Fto32F x)
+ // cond:
+ // result: (MOVDF x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVDF)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpCvt64Fto32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Cvt64Fto32U x)
+ // cond:
+ // result: (MOVDWU x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARMMOVDWU)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpDeferCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (DeferCall [argwid] mem)
+ // cond:
+ // result: (CALLdefer [argwid] mem)
+ for {
+ argwid := v.AuxInt
+ mem := v.Args[0]
+ v.reset(OpARMCALLdefer)
+ v.AuxInt = argwid
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div16 x y)
+ // cond:
+ // result: (DIV (SignExt16to32 x) (SignExt16to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMDIV)
+ v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv16u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div16u x y)
+ // cond:
+ // result: (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMDIVU)
+ v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div32 x y)
+ // cond:
+ // result: (DIV x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMDIV)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div32F x y)
+ // cond:
+ // result: (DIVF x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMDIVF)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv32u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div32u x y)
+ // cond:
+ // result: (DIVU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMDIVU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div64F x y)
+ // cond:
+ // result: (DIVD x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMDIVD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div8 x y)
+ // cond:
+ // result: (DIV (SignExt8to32 x) (SignExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMDIV)
+ v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv8u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Div8u x y)
+ // cond:
+ // result: (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMDIVU)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpEq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq16 x y)
+ // cond:
+ // result: (Equal (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpEq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq32 x y)
+ // cond:
+ // result: (Equal (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpEq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq32F x y)
+ // cond:
+ // result: (Equal (CMPF x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpEq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq64F x y)
+ // cond:
+ // result: (Equal (CMPD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpEq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Eq8 x y)
+ // cond:
+ // result: (Equal (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpEqB(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (EqB x y)
+ // cond:
+ // result: (XORconst [1] (XOR <config.fe.TypeBool()> x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMXORconst)
+ v.AuxInt = 1
+ v0 := b.NewValue0(v.Line, OpARMXOR, config.fe.TypeBool())
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpEqPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (EqPtr x y)
+ // cond:
+ // result: (Equal (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGeq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq16 x y)
+ // cond:
+ // result: (GreaterEqual (CMP (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGeq16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq16U x y)
+ // cond:
+ // result: (GreaterEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterEqualU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGeq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq32 x y)
+ // cond:
+ // result: (GreaterEqual (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGeq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq32F x y)
+ // cond:
+ // result: (GreaterEqual (CMPF x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGeq32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq32U x y)
+ // cond:
+ // result: (GreaterEqualU (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterEqualU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGeq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq64F x y)
+ // cond:
+ // result: (GreaterEqual (CMPD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGeq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq8 x y)
+ // cond:
+ // result: (GreaterEqual (CMP (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGeq8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Geq8U x y)
+ // cond:
+ // result: (GreaterEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterEqualU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGetClosurePtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GetClosurePtr)
+ // cond:
+ // result: (LoweredGetClosurePtr)
+ for {
+ v.reset(OpARMLoweredGetClosurePtr)
+ return true
+ }
+}
+func rewriteValueARM_OpGoCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (GoCall [argwid] mem)
+ // cond:
+ // result: (CALLgo [argwid] mem)
+ for {
+ argwid := v.AuxInt
+ mem := v.Args[0]
+ v.reset(OpARMCALLgo)
+ v.AuxInt = argwid
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueARM_OpGreater16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater16 x y)
+ // cond:
+ // result: (GreaterThan (CMP (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterThan)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGreater16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater16U x y)
+ // cond:
+ // result: (GreaterThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterThanU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGreater32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater32 x y)
+ // cond:
+ // result: (GreaterThan (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterThan)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGreater32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater32F x y)
+ // cond:
+ // result: (GreaterThan (CMPF x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterThan)
+ v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGreater32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater32U x y)
+ // cond:
+ // result: (GreaterThanU (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterThanU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGreater64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater64F x y)
+ // cond:
+ // result: (GreaterThan (CMPD x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterThan)
+ v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGreater8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater8 x y)
+ // cond:
+ // result: (GreaterThan (CMP (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterThan)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpGreater8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Greater8U x y)
+ // cond:
+ // result: (GreaterThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterThanU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpHmul16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul16 x y)
+ // cond:
+ // result: (SRAconst (MUL <config.fe.TypeInt32()> (SignExt16to32 x) (SignExt16to32 y)) [16])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRAconst)
+ v.AuxInt = 16
+ v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpHmul16u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul16u x y)
+ // cond:
+ // result: (SRLconst (MUL <config.fe.TypeUInt32()> (ZeroExt16to32 x) (ZeroExt16to32 y)) [16])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRLconst)
+ v.AuxInt = 16
+ v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpHmul32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul32 x y)
+ // cond:
+ // result: (HMUL x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMHMUL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpHmul32u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul32u x y)
+ // cond:
+ // result: (HMULU x y)
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMHMULU)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
+func rewriteValueARM_OpHmul8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul8 x y)
+ // cond:
+ // result: (SRAconst (MUL <config.fe.TypeInt16()> (SignExt8to32 x) (SignExt8to32 y)) [8])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRAconst)
+ v.AuxInt = 8
+ v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeInt16())
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpHmul8u(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Hmul8u x y)
+ // cond:
+ // result: (SRLconst (MUL <config.fe.TypeUInt16()> (ZeroExt8to32 x) (ZeroExt8to32 y)) [8])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSRLconst)
+ v.AuxInt = 8
+ v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeUInt16())
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpInterCall(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (InterCall [argwid] entry mem)
+ // cond:
+ // result: (CALLinter [argwid] entry mem)
+ for {
+ argwid := v.AuxInt
+ entry := v.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARMCALLinter)
+ v.AuxInt = argwid
+ v.AddArg(entry)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueARM_OpIsInBounds(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsInBounds idx len)
+ // cond:
+ // result: (LessThanU (CMP idx len))
+ for {
+ idx := v.Args[0]
+ len := v.Args[1]
+ v.reset(OpARMLessThanU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(idx)
+ v0.AddArg(len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpIsNonNil(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsNonNil ptr)
+ // cond:
+ // result: (NotEqual (CMPconst [0] ptr))
+ for {
+ ptr := v.Args[0]
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v0.AuxInt = 0
+ v0.AddArg(ptr)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpIsSliceInBounds(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (IsSliceInBounds idx len)
+ // cond:
+ // result: (LessEqualU (CMP idx len))
+ for {
+ idx := v.Args[0]
+ len := v.Args[1]
+ v.reset(OpARMLessEqualU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(idx)
+ v0.AddArg(len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq16 x y)
+ // cond:
+ // result: (LessEqual (CMP (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq16U x y)
+ // cond:
+ // result: (LessEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessEqualU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32 x y)
+ // cond:
+ // result: (LessEqual (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32F x y)
+ // cond:
+ // result: (GreaterEqual (CMPF y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq32U x y)
+ // cond:
+ // result: (LessEqualU (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessEqualU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq64F x y)
+ // cond:
+ // result: (GreaterEqual (CMPD y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq8 x y)
+ // cond:
+ // result: (LessEqual (CMP (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessEqual)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Leq8U x y)
+ // cond:
+ // result: (LessEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessEqualU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less16 x y)
+ // cond:
+ // result: (LessThan (CMP (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessThan)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess16U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less16U x y)
+ // cond:
+ // result: (LessThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessThanU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less32 x y)
+ // cond:
+ // result: (LessThan (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessThan)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess32F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less32F x y)
+ // cond:
+ // result: (GreaterThan (CMPF y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterThan)
+ v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess32U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less32U x y)
+ // cond:
+ // result: (LessThanU (CMP x y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessThanU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess64F(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less64F x y)
+ // cond:
+ // result: (GreaterThan (CMPD y x))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMGreaterThan)
+ v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
+ v0.AddArg(y)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less8 x y)
+ // cond:
+ // result: (LessThan (CMP (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessThan)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess8U(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Less8U x y)
+ // cond:
+ // result: (LessThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMLessThanU)
+ v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLoad(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Load <t> ptr mem)
+ // cond: t.IsBoolean()
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(t.IsBoolean()) {
+ break
+ }
+ v.reset(OpARMMOVBUload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is8BitInt(t) && isSigned(t))
+ // result: (MOVBload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is8BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpARMMOVBload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is8BitInt(t) && !isSigned(t))
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is8BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpARMMOVBUload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && isSigned(t))
+ // result: (MOVHload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is16BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpARMMOVHload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && !isSigned(t))
+ // result: (MOVHUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is16BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpARMMOVHUload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is32BitInt(t) || isPtr(t))
+ // result: (MOVWload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is32BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpARMMOVWload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (MOVFload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(OpARMMOVFload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (MOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(OpARMMOVDload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpLrot16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lrot16 <t> x [c])
+ // cond:
+ // result: (OR (SLLconst <t> x [c&15]) (SRLconst <t> x [16-c&15]))
+ for {
+ t := v.Type
+ c := v.AuxInt
+ x := v.Args[0]
+ v.reset(OpARMOR)
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, t)
+ v0.AuxInt = c & 15
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMSRLconst, t)
+ v1.AuxInt = 16 - c&15
+ v1.AddArg(x)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpLrot32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lrot32 x [c])
+ // cond:
+ // result: (SRRconst x [32-c&31])
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ v.reset(OpARMSRRconst)
+ v.AuxInt = 32 - c&31
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpLrot8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lrot8 <t> x [c])
+ // cond:
+ // result: (OR (SLLconst <t> x [c&7]) (SRLconst <t> x [8-c&7]))
+ for {
+ t := v.Type
+ c := v.AuxInt
+ x := v.Args[0]
+ v.reset(OpARMOR)
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, t)
+ v0.AuxInt = c & 7
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMSRLconst, t)
+ v1.AuxInt = 8 - c&7
+ v1.AddArg(x)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh16x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x16 x y)
+ // cond:
+ // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v2.AuxInt = 256
+ v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh16x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x32 x y)
+ // cond:
+ // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v1.AuxInt = 256
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh16x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x64 x (Const64 [c]))
+ // cond: uint64(c) < 16
+ // result: (SLLconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh16x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (Const16 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpLsh16x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh16x8 x y)
+ // cond:
+ // result: (SLL x (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSLL)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh32x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x16 x y)
+ // cond:
+ // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v2.AuxInt = 256
+ v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh32x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x32 x y)
+ // cond:
+ // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v1.AuxInt = 256
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh32x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x64 x (Const64 [c]))
+ // cond: uint64(c) < 32
+ // result: (SLLconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh32x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (Const32 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpLsh32x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh32x8 x y)
+ // cond:
+ // result: (SLL x (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSLL)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh8x16(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x16 x y)
+ // cond:
+ // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v2.AuxInt = 256
+ v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg(v2)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh8x32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x32 x y)
+ // cond:
+ // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
+ v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+ v1.AuxInt = 256
+ v1.AddArg(y)
+ v.AddArg(v1)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh8x64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x64 x (Const64 [c]))
+ // cond: uint64(c) < 8
+ // result: (SLLconst x [c])
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh8x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (Const8 [0])
+ for {
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := v_1.AuxInt
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = 0
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpLsh8x8(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (Lsh8x8 x y)
+ // cond:
+ // result: (SLL x (ZeroExt8to32 y))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARMSLL)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
func rewriteValueARM_OpMod16(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -10711,8 +15078,8 @@
v.AddArg(dst)
v.AddArg(src)
v0 := b.NewValue0(v.Line, OpARMADDconst, src.Type)
- v0.AddArg(src)
v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
+ v0.AddArg(src)
v.AddArg(v0)
v.AddArg(mem)
return true
@@ -11031,705 +15398,6 @@
return true
}
}
-func rewriteValueARM_OpARMNotEqual(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (NotEqual (FlagEQ))
- // cond:
- // result: (MOVWconst [0])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMFlagEQ {
- break
- }
- v.reset(OpARMMOVWconst)
- v.AuxInt = 0
- return true
- }
- // match: (NotEqual (FlagLT_ULT))
- // cond:
- // result: (MOVWconst [1])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMFlagLT_ULT {
- break
- }
- v.reset(OpARMMOVWconst)
- v.AuxInt = 1
- return true
- }
- // match: (NotEqual (FlagLT_UGT))
- // cond:
- // result: (MOVWconst [1])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMFlagLT_UGT {
- break
- }
- v.reset(OpARMMOVWconst)
- v.AuxInt = 1
- return true
- }
- // match: (NotEqual (FlagGT_ULT))
- // cond:
- // result: (MOVWconst [1])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMFlagGT_ULT {
- break
- }
- v.reset(OpARMMOVWconst)
- v.AuxInt = 1
- return true
- }
- // match: (NotEqual (FlagGT_UGT))
- // cond:
- // result: (MOVWconst [1])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMFlagGT_UGT {
- break
- }
- v.reset(OpARMMOVWconst)
- v.AuxInt = 1
- return true
- }
- // match: (NotEqual (InvertFlags x))
- // cond:
- // result: (NotEqual x)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMInvertFlags {
- break
- }
- x := v_0.Args[0]
- v.reset(OpARMNotEqual)
- v.AddArg(x)
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMOR(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (OR (MOVWconst [c]) x)
- // cond:
- // result: (ORconst [c] x)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- v.reset(OpARMORconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (OR x (MOVWconst [c]))
- // cond:
- // result: (ORconst [c] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMMOVWconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpARMORconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (OR x (SLLconst [c] y))
- // cond:
- // result: (ORshiftLL x y [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSLLconst {
- break
- }
- c := v_1.AuxInt
- y := v_1.Args[0]
- v.reset(OpARMORshiftLL)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- // match: (OR (SLLconst [c] y) x)
- // cond:
- // result: (ORshiftLL x y [c])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMSLLconst {
- break
- }
- c := v_0.AuxInt
- y := v_0.Args[0]
- x := v.Args[1]
- v.reset(OpARMORshiftLL)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- // match: (OR x (SRLconst [c] y))
- // cond:
- // result: (ORshiftRL x y [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSRLconst {
- break
- }
- c := v_1.AuxInt
- y := v_1.Args[0]
- v.reset(OpARMORshiftRL)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- // match: (OR (SRLconst [c] y) x)
- // cond:
- // result: (ORshiftRL x y [c])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMSRLconst {
- break
- }
- c := v_0.AuxInt
- y := v_0.Args[0]
- x := v.Args[1]
- v.reset(OpARMORshiftRL)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- // match: (OR x (SRAconst [c] y))
- // cond:
- // result: (ORshiftRA x y [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSRAconst {
- break
- }
- c := v_1.AuxInt
- y := v_1.Args[0]
- v.reset(OpARMORshiftRA)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- // match: (OR (SRAconst [c] y) x)
- // cond:
- // result: (ORshiftRA x y [c])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMSRAconst {
- break
- }
- c := v_0.AuxInt
- y := v_0.Args[0]
- x := v.Args[1]
- v.reset(OpARMORshiftRA)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- // match: (OR x (SLL y z))
- // cond:
- // result: (ORshiftLLreg x y z)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSLL {
- break
- }
- y := v_1.Args[0]
- z := v_1.Args[1]
- v.reset(OpARMORshiftLLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- return true
- }
- // match: (OR (SLL y z) x)
- // cond:
- // result: (ORshiftLLreg x y z)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMSLL {
- break
- }
- y := v_0.Args[0]
- z := v_0.Args[1]
- x := v.Args[1]
- v.reset(OpARMORshiftLLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- return true
- }
- // match: (OR x (SRL y z))
- // cond:
- // result: (ORshiftRLreg x y z)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSRL {
- break
- }
- y := v_1.Args[0]
- z := v_1.Args[1]
- v.reset(OpARMORshiftRLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- return true
- }
- // match: (OR (SRL y z) x)
- // cond:
- // result: (ORshiftRLreg x y z)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMSRL {
- break
- }
- y := v_0.Args[0]
- z := v_0.Args[1]
- x := v.Args[1]
- v.reset(OpARMORshiftRLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- return true
- }
- // match: (OR x (SRA y z))
- // cond:
- // result: (ORshiftRAreg x y z)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSRA {
- break
- }
- y := v_1.Args[0]
- z := v_1.Args[1]
- v.reset(OpARMORshiftRAreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- return true
- }
- // match: (OR (SRA y z) x)
- // cond:
- // result: (ORshiftRAreg x y z)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMSRA {
- break
- }
- y := v_0.Args[0]
- z := v_0.Args[1]
- x := v.Args[1]
- v.reset(OpARMORshiftRAreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- return true
- }
- // match: (OR x x)
- // cond:
- // result: x
- for {
- x := v.Args[0]
- if x != v.Args[1] {
- break
- }
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMORconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ORconst [0] x)
- // cond:
- // result: x
- for {
- if v.AuxInt != 0 {
- break
- }
- x := v.Args[0]
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
- // match: (ORconst [c] _)
- // cond: int32(c)==-1
- // result: (MOVWconst [-1])
- for {
- c := v.AuxInt
- if !(int32(c) == -1) {
- break
- }
- v.reset(OpARMMOVWconst)
- v.AuxInt = -1
- return true
- }
- // match: (ORconst [c] (MOVWconst [d]))
- // cond:
- // result: (MOVWconst [c|d])
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- d := v_0.AuxInt
- v.reset(OpARMMOVWconst)
- v.AuxInt = c | d
- return true
- }
- // match: (ORconst [c] (ORconst [d] x))
- // cond:
- // result: (ORconst [c|d] x)
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpARMORconst {
- break
- }
- d := v_0.AuxInt
- x := v_0.Args[0]
- v.reset(OpARMORconst)
- v.AuxInt = c | d
- v.AddArg(x)
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMORshiftLL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ORshiftLL (MOVWconst [c]) x [d])
- // cond:
- // result: (ORconst [c] (SLLconst <x.Type> x [d]))
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- d := v.AuxInt
- v.reset(OpARMORconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
- v0.AddArg(x)
- v0.AuxInt = d
- v.AddArg(v0)
- return true
- }
- // match: (ORshiftLL x (MOVWconst [c]) [d])
- // cond:
- // result: (ORconst x [int64(uint32(c)<<uint64(d))])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMMOVWconst {
- break
- }
- c := v_1.AuxInt
- d := v.AuxInt
- v.reset(OpARMORconst)
- v.AddArg(x)
- v.AuxInt = int64(uint32(c) << uint64(d))
- return true
- }
- // match: (ORshiftLL x y:(SLLconst x [c]) [d])
- // cond: c==d
- // result: y
- for {
- x := v.Args[0]
- y := v.Args[1]
- if y.Op != OpARMSLLconst {
- break
- }
- if x != y.Args[0] {
- break
- }
- c := y.AuxInt
- d := v.AuxInt
- if !(c == d) {
- break
- }
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMORshiftLLreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ORshiftLLreg (MOVWconst [c]) x y)
- // cond:
- // result: (ORconst [c] (SLL <x.Type> x y))
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- y := v.Args[2]
- v.reset(OpARMORconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
- // match: (ORshiftLLreg x y (MOVWconst [c]))
- // cond:
- // result: (ORshiftLL x y [c])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpARMMOVWconst {
- break
- }
- c := v_2.AuxInt
- v.reset(OpARMORshiftLL)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMORshiftRA(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ORshiftRA (MOVWconst [c]) x [d])
- // cond:
- // result: (ORconst [c] (SRAconst <x.Type> x [d]))
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- d := v.AuxInt
- v.reset(OpARMORconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
- v0.AddArg(x)
- v0.AuxInt = d
- v.AddArg(v0)
- return true
- }
- // match: (ORshiftRA x (MOVWconst [c]) [d])
- // cond:
- // result: (ORconst x [int64(int32(c)>>uint64(d))])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMMOVWconst {
- break
- }
- c := v_1.AuxInt
- d := v.AuxInt
- v.reset(OpARMORconst)
- v.AddArg(x)
- v.AuxInt = int64(int32(c) >> uint64(d))
- return true
- }
- // match: (ORshiftRA x y:(SRAconst x [c]) [d])
- // cond: c==d
- // result: y
- for {
- x := v.Args[0]
- y := v.Args[1]
- if y.Op != OpARMSRAconst {
- break
- }
- if x != y.Args[0] {
- break
- }
- c := y.AuxInt
- d := v.AuxInt
- if !(c == d) {
- break
- }
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMORshiftRAreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ORshiftRAreg (MOVWconst [c]) x y)
- // cond:
- // result: (ORconst [c] (SRA <x.Type> x y))
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- y := v.Args[2]
- v.reset(OpARMORconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
- // match: (ORshiftRAreg x y (MOVWconst [c]))
- // cond:
- // result: (ORshiftRA x y [c])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpARMMOVWconst {
- break
- }
- c := v_2.AuxInt
- v.reset(OpARMORshiftRA)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMORshiftRL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ORshiftRL (MOVWconst [c]) x [d])
- // cond:
- // result: (ORconst [c] (SRLconst <x.Type> x [d]))
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- d := v.AuxInt
- v.reset(OpARMORconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
- v0.AddArg(x)
- v0.AuxInt = d
- v.AddArg(v0)
- return true
- }
- // match: (ORshiftRL x (MOVWconst [c]) [d])
- // cond:
- // result: (ORconst x [int64(uint32(c)>>uint64(d))])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMMOVWconst {
- break
- }
- c := v_1.AuxInt
- d := v.AuxInt
- v.reset(OpARMORconst)
- v.AddArg(x)
- v.AuxInt = int64(uint32(c) >> uint64(d))
- return true
- }
- // match: (ORshiftRL x y:(SRLconst x [c]) [d])
- // cond: c==d
- // result: y
- for {
- x := v.Args[0]
- y := v.Args[1]
- if y.Op != OpARMSRLconst {
- break
- }
- if x != y.Args[0] {
- break
- }
- c := y.AuxInt
- d := v.AuxInt
- if !(c == d) {
- break
- }
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMORshiftRLreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ORshiftRLreg (MOVWconst [c]) x y)
- // cond:
- // result: (ORconst [c] (SRL <x.Type> x y))
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- y := v.Args[2]
- v.reset(OpARMORconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
- // match: (ORshiftRLreg x y (MOVWconst [c]))
- // cond:
- // result: (ORshiftRL x y [c])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpARMMOVWconst {
- break
- }
- c := v_2.AuxInt
- v.reset(OpARMORshiftRL)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- return false
-}
func rewriteValueARM_OpOffPtr(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -11819,1181 +15487,6 @@
return true
}
}
-func rewriteValueARM_OpARMRSB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (RSB (MOVWconst [c]) x)
- // cond:
- // result: (SUBconst [c] x)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- v.reset(OpARMSUBconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (RSB x (MOVWconst [c]))
- // cond:
- // result: (RSBconst [c] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMMOVWconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpARMRSBconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (RSB x (SLLconst [c] y))
- // cond:
- // result: (RSBshiftLL x y [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSLLconst {
- break
- }
- c := v_1.AuxInt
- y := v_1.Args[0]
- v.reset(OpARMRSBshiftLL)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- // match: (RSB (SLLconst [c] y) x)
- // cond:
- // result: (SUBshiftLL x y [c])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMSLLconst {
- break
- }
- c := v_0.AuxInt
- y := v_0.Args[0]
- x := v.Args[1]
- v.reset(OpARMSUBshiftLL)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- // match: (RSB x (SRLconst [c] y))
- // cond:
- // result: (RSBshiftRL x y [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSRLconst {
- break
- }
- c := v_1.AuxInt
- y := v_1.Args[0]
- v.reset(OpARMRSBshiftRL)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- // match: (RSB (SRLconst [c] y) x)
- // cond:
- // result: (SUBshiftRL x y [c])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMSRLconst {
- break
- }
- c := v_0.AuxInt
- y := v_0.Args[0]
- x := v.Args[1]
- v.reset(OpARMSUBshiftRL)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- // match: (RSB x (SRAconst [c] y))
- // cond:
- // result: (RSBshiftRA x y [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSRAconst {
- break
- }
- c := v_1.AuxInt
- y := v_1.Args[0]
- v.reset(OpARMRSBshiftRA)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- // match: (RSB (SRAconst [c] y) x)
- // cond:
- // result: (SUBshiftRA x y [c])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMSRAconst {
- break
- }
- c := v_0.AuxInt
- y := v_0.Args[0]
- x := v.Args[1]
- v.reset(OpARMSUBshiftRA)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- // match: (RSB x (SLL y z))
- // cond:
- // result: (RSBshiftLLreg x y z)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSLL {
- break
- }
- y := v_1.Args[0]
- z := v_1.Args[1]
- v.reset(OpARMRSBshiftLLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- return true
- }
- // match: (RSB (SLL y z) x)
- // cond:
- // result: (SUBshiftLLreg x y z)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMSLL {
- break
- }
- y := v_0.Args[0]
- z := v_0.Args[1]
- x := v.Args[1]
- v.reset(OpARMSUBshiftLLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- return true
- }
- // match: (RSB x (SRL y z))
- // cond:
- // result: (RSBshiftRLreg x y z)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSRL {
- break
- }
- y := v_1.Args[0]
- z := v_1.Args[1]
- v.reset(OpARMRSBshiftRLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- return true
- }
- // match: (RSB (SRL y z) x)
- // cond:
- // result: (SUBshiftRLreg x y z)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMSRL {
- break
- }
- y := v_0.Args[0]
- z := v_0.Args[1]
- x := v.Args[1]
- v.reset(OpARMSUBshiftRLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- return true
- }
- // match: (RSB x (SRA y z))
- // cond:
- // result: (RSBshiftRAreg x y z)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSRA {
- break
- }
- y := v_1.Args[0]
- z := v_1.Args[1]
- v.reset(OpARMRSBshiftRAreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- return true
- }
- // match: (RSB (SRA y z) x)
- // cond:
- // result: (SUBshiftRAreg x y z)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMSRA {
- break
- }
- y := v_0.Args[0]
- z := v_0.Args[1]
- x := v.Args[1]
- v.reset(OpARMSUBshiftRAreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- return true
- }
- // match: (RSB x x)
- // cond:
- // result: (MOVWconst [0])
- for {
- x := v.Args[0]
- if x != v.Args[1] {
- break
- }
- v.reset(OpARMMOVWconst)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMRSBSshiftLL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (RSBSshiftLL (MOVWconst [c]) x [d])
- // cond:
- // result: (SUBSconst [c] (SLLconst <x.Type> x [d]))
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- d := v.AuxInt
- v.reset(OpARMSUBSconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
- v0.AddArg(x)
- v0.AuxInt = d
- v.AddArg(v0)
- return true
- }
- // match: (RSBSshiftLL x (MOVWconst [c]) [d])
- // cond:
- // result: (RSBSconst x [int64(uint32(c)<<uint64(d))])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMMOVWconst {
- break
- }
- c := v_1.AuxInt
- d := v.AuxInt
- v.reset(OpARMRSBSconst)
- v.AddArg(x)
- v.AuxInt = int64(uint32(c) << uint64(d))
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMRSBSshiftLLreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (RSBSshiftLLreg (MOVWconst [c]) x y)
- // cond:
- // result: (SUBSconst [c] (SLL <x.Type> x y))
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- y := v.Args[2]
- v.reset(OpARMSUBSconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
- // match: (RSBSshiftLLreg x y (MOVWconst [c]))
- // cond:
- // result: (RSBSshiftLL x y [c])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpARMMOVWconst {
- break
- }
- c := v_2.AuxInt
- v.reset(OpARMRSBSshiftLL)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMRSBSshiftRA(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (RSBSshiftRA (MOVWconst [c]) x [d])
- // cond:
- // result: (SUBSconst [c] (SRAconst <x.Type> x [d]))
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- d := v.AuxInt
- v.reset(OpARMSUBSconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
- v0.AddArg(x)
- v0.AuxInt = d
- v.AddArg(v0)
- return true
- }
- // match: (RSBSshiftRA x (MOVWconst [c]) [d])
- // cond:
- // result: (RSBSconst x [int64(int32(c)>>uint64(d))])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMMOVWconst {
- break
- }
- c := v_1.AuxInt
- d := v.AuxInt
- v.reset(OpARMRSBSconst)
- v.AddArg(x)
- v.AuxInt = int64(int32(c) >> uint64(d))
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMRSBSshiftRAreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (RSBSshiftRAreg (MOVWconst [c]) x y)
- // cond:
- // result: (SUBSconst [c] (SRA <x.Type> x y))
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- y := v.Args[2]
- v.reset(OpARMSUBSconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
- // match: (RSBSshiftRAreg x y (MOVWconst [c]))
- // cond:
- // result: (RSBSshiftRA x y [c])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpARMMOVWconst {
- break
- }
- c := v_2.AuxInt
- v.reset(OpARMRSBSshiftRA)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMRSBSshiftRL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (RSBSshiftRL (MOVWconst [c]) x [d])
- // cond:
- // result: (SUBSconst [c] (SRLconst <x.Type> x [d]))
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- d := v.AuxInt
- v.reset(OpARMSUBSconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
- v0.AddArg(x)
- v0.AuxInt = d
- v.AddArg(v0)
- return true
- }
- // match: (RSBSshiftRL x (MOVWconst [c]) [d])
- // cond:
- // result: (RSBSconst x [int64(uint32(c)>>uint64(d))])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMMOVWconst {
- break
- }
- c := v_1.AuxInt
- d := v.AuxInt
- v.reset(OpARMRSBSconst)
- v.AddArg(x)
- v.AuxInt = int64(uint32(c) >> uint64(d))
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMRSBSshiftRLreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (RSBSshiftRLreg (MOVWconst [c]) x y)
- // cond:
- // result: (SUBSconst [c] (SRL <x.Type> x y))
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- y := v.Args[2]
- v.reset(OpARMSUBSconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
- // match: (RSBSshiftRLreg x y (MOVWconst [c]))
- // cond:
- // result: (RSBSshiftRL x y [c])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpARMMOVWconst {
- break
- }
- c := v_2.AuxInt
- v.reset(OpARMRSBSshiftRL)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMRSBconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (RSBconst [c] (MOVWconst [d]))
- // cond:
- // result: (MOVWconst [int64(int32(c-d))])
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- d := v_0.AuxInt
- v.reset(OpARMMOVWconst)
- v.AuxInt = int64(int32(c - d))
- return true
- }
- // match: (RSBconst [c] (RSBconst [d] x))
- // cond:
- // result: (ADDconst [int64(int32(c-d))] x)
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpARMRSBconst {
- break
- }
- d := v_0.AuxInt
- x := v_0.Args[0]
- v.reset(OpARMADDconst)
- v.AuxInt = int64(int32(c - d))
- v.AddArg(x)
- return true
- }
- // match: (RSBconst [c] (ADDconst [d] x))
- // cond:
- // result: (RSBconst [int64(int32(c-d))] x)
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpARMADDconst {
- break
- }
- d := v_0.AuxInt
- x := v_0.Args[0]
- v.reset(OpARMRSBconst)
- v.AuxInt = int64(int32(c - d))
- v.AddArg(x)
- return true
- }
- // match: (RSBconst [c] (SUBconst [d] x))
- // cond:
- // result: (RSBconst [int64(int32(c+d))] x)
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpARMSUBconst {
- break
- }
- d := v_0.AuxInt
- x := v_0.Args[0]
- v.reset(OpARMRSBconst)
- v.AuxInt = int64(int32(c + d))
- v.AddArg(x)
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMRSBshiftLL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (RSBshiftLL (MOVWconst [c]) x [d])
- // cond:
- // result: (SUBconst [c] (SLLconst <x.Type> x [d]))
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- d := v.AuxInt
- v.reset(OpARMSUBconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
- v0.AddArg(x)
- v0.AuxInt = d
- v.AddArg(v0)
- return true
- }
- // match: (RSBshiftLL x (MOVWconst [c]) [d])
- // cond:
- // result: (RSBconst x [int64(uint32(c)<<uint64(d))])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMMOVWconst {
- break
- }
- c := v_1.AuxInt
- d := v.AuxInt
- v.reset(OpARMRSBconst)
- v.AddArg(x)
- v.AuxInt = int64(uint32(c) << uint64(d))
- return true
- }
- // match: (RSBshiftLL x (SLLconst x [c]) [d])
- // cond: c==d
- // result: (MOVWconst [0])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSLLconst {
- break
- }
- if x != v_1.Args[0] {
- break
- }
- c := v_1.AuxInt
- d := v.AuxInt
- if !(c == d) {
- break
- }
- v.reset(OpARMMOVWconst)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMRSBshiftLLreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (RSBshiftLLreg (MOVWconst [c]) x y)
- // cond:
- // result: (SUBconst [c] (SLL <x.Type> x y))
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- y := v.Args[2]
- v.reset(OpARMSUBconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
- // match: (RSBshiftLLreg x y (MOVWconst [c]))
- // cond:
- // result: (RSBshiftLL x y [c])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpARMMOVWconst {
- break
- }
- c := v_2.AuxInt
- v.reset(OpARMRSBshiftLL)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMRSBshiftRA(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (RSBshiftRA (MOVWconst [c]) x [d])
- // cond:
- // result: (SUBconst [c] (SRAconst <x.Type> x [d]))
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- d := v.AuxInt
- v.reset(OpARMSUBconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
- v0.AddArg(x)
- v0.AuxInt = d
- v.AddArg(v0)
- return true
- }
- // match: (RSBshiftRA x (MOVWconst [c]) [d])
- // cond:
- // result: (RSBconst x [int64(int32(c)>>uint64(d))])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMMOVWconst {
- break
- }
- c := v_1.AuxInt
- d := v.AuxInt
- v.reset(OpARMRSBconst)
- v.AddArg(x)
- v.AuxInt = int64(int32(c) >> uint64(d))
- return true
- }
- // match: (RSBshiftRA x (SRAconst x [c]) [d])
- // cond: c==d
- // result: (MOVWconst [0])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSRAconst {
- break
- }
- if x != v_1.Args[0] {
- break
- }
- c := v_1.AuxInt
- d := v.AuxInt
- if !(c == d) {
- break
- }
- v.reset(OpARMMOVWconst)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMRSBshiftRAreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (RSBshiftRAreg (MOVWconst [c]) x y)
- // cond:
- // result: (SUBconst [c] (SRA <x.Type> x y))
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- y := v.Args[2]
- v.reset(OpARMSUBconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
- // match: (RSBshiftRAreg x y (MOVWconst [c]))
- // cond:
- // result: (RSBshiftRA x y [c])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpARMMOVWconst {
- break
- }
- c := v_2.AuxInt
- v.reset(OpARMRSBshiftRA)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMRSBshiftRL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (RSBshiftRL (MOVWconst [c]) x [d])
- // cond:
- // result: (SUBconst [c] (SRLconst <x.Type> x [d]))
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- d := v.AuxInt
- v.reset(OpARMSUBconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
- v0.AddArg(x)
- v0.AuxInt = d
- v.AddArg(v0)
- return true
- }
- // match: (RSBshiftRL x (MOVWconst [c]) [d])
- // cond:
- // result: (RSBconst x [int64(uint32(c)>>uint64(d))])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMMOVWconst {
- break
- }
- c := v_1.AuxInt
- d := v.AuxInt
- v.reset(OpARMRSBconst)
- v.AddArg(x)
- v.AuxInt = int64(uint32(c) >> uint64(d))
- return true
- }
- // match: (RSBshiftRL x (SRLconst x [c]) [d])
- // cond: c==d
- // result: (MOVWconst [0])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSRLconst {
- break
- }
- if x != v_1.Args[0] {
- break
- }
- c := v_1.AuxInt
- d := v.AuxInt
- if !(c == d) {
- break
- }
- v.reset(OpARMMOVWconst)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMRSBshiftRLreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (RSBshiftRLreg (MOVWconst [c]) x y)
- // cond:
- // result: (SUBconst [c] (SRL <x.Type> x y))
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- y := v.Args[2]
- v.reset(OpARMSUBconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
- // match: (RSBshiftRLreg x y (MOVWconst [c]))
- // cond:
- // result: (RSBshiftRL x y [c])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpARMMOVWconst {
- break
- }
- c := v_2.AuxInt
- v.reset(OpARMRSBshiftRL)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMRSCconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (RSCconst [c] (ADDconst [d] x) flags)
- // cond:
- // result: (RSCconst [int64(int32(c-d))] x flags)
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpARMADDconst {
- break
- }
- d := v_0.AuxInt
- x := v_0.Args[0]
- flags := v.Args[1]
- v.reset(OpARMRSCconst)
- v.AuxInt = int64(int32(c - d))
- v.AddArg(x)
- v.AddArg(flags)
- return true
- }
- // match: (RSCconst [c] (SUBconst [d] x) flags)
- // cond:
- // result: (RSCconst [int64(int32(c+d))] x flags)
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpARMSUBconst {
- break
- }
- d := v_0.AuxInt
- x := v_0.Args[0]
- flags := v.Args[1]
- v.reset(OpARMRSCconst)
- v.AuxInt = int64(int32(c + d))
- v.AddArg(x)
- v.AddArg(flags)
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMRSCshiftLL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (RSCshiftLL (MOVWconst [c]) x [d] flags)
- // cond:
- // result: (SBCconst [c] (SLLconst <x.Type> x [d]) flags)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- d := v.AuxInt
- flags := v.Args[2]
- v.reset(OpARMSBCconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
- v0.AddArg(x)
- v0.AuxInt = d
- v.AddArg(v0)
- v.AddArg(flags)
- return true
- }
- // match: (RSCshiftLL x (MOVWconst [c]) [d] flags)
- // cond:
- // result: (RSCconst x [int64(uint32(c)<<uint64(d))] flags)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMMOVWconst {
- break
- }
- c := v_1.AuxInt
- d := v.AuxInt
- flags := v.Args[2]
- v.reset(OpARMRSCconst)
- v.AddArg(x)
- v.AuxInt = int64(uint32(c) << uint64(d))
- v.AddArg(flags)
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMRSCshiftLLreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (RSCshiftLLreg (MOVWconst [c]) x y flags)
- // cond:
- // result: (SBCconst [c] (SLL <x.Type> x y) flags)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- y := v.Args[2]
- flags := v.Args[3]
- v.reset(OpARMSBCconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v.AddArg(flags)
- return true
- }
- // match: (RSCshiftLLreg x y (MOVWconst [c]) flags)
- // cond:
- // result: (RSCshiftLL x y [c] flags)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpARMMOVWconst {
- break
- }
- c := v_2.AuxInt
- flags := v.Args[3]
- v.reset(OpARMRSCshiftLL)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- v.AddArg(flags)
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMRSCshiftRA(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (RSCshiftRA (MOVWconst [c]) x [d] flags)
- // cond:
- // result: (SBCconst [c] (SRAconst <x.Type> x [d]) flags)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- d := v.AuxInt
- flags := v.Args[2]
- v.reset(OpARMSBCconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
- v0.AddArg(x)
- v0.AuxInt = d
- v.AddArg(v0)
- v.AddArg(flags)
- return true
- }
- // match: (RSCshiftRA x (MOVWconst [c]) [d] flags)
- // cond:
- // result: (RSCconst x [int64(int32(c)>>uint64(d))] flags)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMMOVWconst {
- break
- }
- c := v_1.AuxInt
- d := v.AuxInt
- flags := v.Args[2]
- v.reset(OpARMRSCconst)
- v.AddArg(x)
- v.AuxInt = int64(int32(c) >> uint64(d))
- v.AddArg(flags)
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMRSCshiftRAreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (RSCshiftRAreg (MOVWconst [c]) x y flags)
- // cond:
- // result: (SBCconst [c] (SRA <x.Type> x y) flags)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- y := v.Args[2]
- flags := v.Args[3]
- v.reset(OpARMSBCconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v.AddArg(flags)
- return true
- }
- // match: (RSCshiftRAreg x y (MOVWconst [c]) flags)
- // cond:
- // result: (RSCshiftRA x y [c] flags)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpARMMOVWconst {
- break
- }
- c := v_2.AuxInt
- flags := v.Args[3]
- v.reset(OpARMRSCshiftRA)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- v.AddArg(flags)
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMRSCshiftRL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (RSCshiftRL (MOVWconst [c]) x [d] flags)
- // cond:
- // result: (SBCconst [c] (SRLconst <x.Type> x [d]) flags)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- d := v.AuxInt
- flags := v.Args[2]
- v.reset(OpARMSBCconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
- v0.AddArg(x)
- v0.AuxInt = d
- v.AddArg(v0)
- v.AddArg(flags)
- return true
- }
- // match: (RSCshiftRL x (MOVWconst [c]) [d] flags)
- // cond:
- // result: (RSCconst x [int64(uint32(c)>>uint64(d))] flags)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMMOVWconst {
- break
- }
- c := v_1.AuxInt
- d := v.AuxInt
- flags := v.Args[2]
- v.reset(OpARMRSCconst)
- v.AddArg(x)
- v.AuxInt = int64(uint32(c) >> uint64(d))
- v.AddArg(flags)
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMRSCshiftRLreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (RSCshiftRLreg (MOVWconst [c]) x y flags)
- // cond:
- // result: (SBCconst [c] (SRL <x.Type> x y) flags)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- y := v.Args[2]
- flags := v.Args[3]
- v.reset(OpARMSBCconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v.AddArg(flags)
- return true
- }
- // match: (RSCshiftRLreg x y (MOVWconst [c]) flags)
- // cond:
- // result: (RSCshiftRL x y [c] flags)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpARMMOVWconst {
- break
- }
- c := v_2.AuxInt
- flags := v.Args[3]
- v.reset(OpARMRSCshiftRL)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- v.AddArg(flags)
- return true
- }
- return false
-}
func rewriteValueARM_OpRsh16Ux16(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -13004,6 +15497,7 @@
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
v1.AddArg(x)
@@ -13018,7 +15512,6 @@
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
- v.AuxInt = 0
return true
}
}
@@ -13032,6 +15525,7 @@
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
v1.AddArg(x)
@@ -13042,7 +15536,6 @@
v2.AuxInt = 256
v2.AddArg(y)
v.AddArg(v2)
- v.AuxInt = 0
return true
}
}
@@ -13063,11 +15556,11 @@
break
}
v.reset(OpARMSRLconst)
- v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
- v0.AddArg(x)
- v0.AuxInt = 16
- v.AddArg(v0)
v.AuxInt = c + 16
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
+ v0.AuxInt = 16
+ v0.AddArg(x)
+ v.AddArg(v0)
return true
}
// match: (Rsh16Ux64 _ (Const64 [c]))
@@ -13170,11 +15663,11 @@
break
}
v.reset(OpARMSRAconst)
- v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
- v0.AddArg(x)
- v0.AuxInt = 16
- v.AddArg(v0)
v.AuxInt = c + 16
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
+ v0.AuxInt = 16
+ v0.AddArg(x)
+ v.AddArg(v0)
return true
}
// match: (Rsh16x64 x (Const64 [c]))
@@ -13191,11 +15684,11 @@
break
}
v.reset(OpARMSRAconst)
- v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
- v0.AddArg(x)
- v0.AuxInt = 16
- v.AddArg(v0)
v.AuxInt = 31
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
+ v0.AuxInt = 16
+ v0.AddArg(x)
+ v.AddArg(v0)
return true
}
return false
@@ -13229,6 +15722,7 @@
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
v0.AddArg(x)
v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
@@ -13241,7 +15735,6 @@
v3.AddArg(y)
v2.AddArg(v3)
v.AddArg(v2)
- v.AuxInt = 0
return true
}
}
@@ -13255,6 +15748,7 @@
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
v0.AddArg(x)
v0.AddArg(y)
@@ -13263,7 +15757,6 @@
v1.AuxInt = 256
v1.AddArg(y)
v.AddArg(v1)
- v.AuxInt = 0
return true
}
}
@@ -13284,8 +15777,8 @@
break
}
v.reset(OpARMSRLconst)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
// match: (Rsh32Ux64 _ (Const64 [c]))
@@ -13382,8 +15875,8 @@
break
}
v.reset(OpARMSRAconst)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
// match: (Rsh32x64 x (Const64 [c]))
@@ -13400,8 +15893,8 @@
break
}
v.reset(OpARMSRAconst)
- v.AddArg(x)
v.AuxInt = 31
+ v.AddArg(x)
return true
}
return false
@@ -13433,6 +15926,7 @@
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
v1.AddArg(x)
@@ -13447,7 +15941,6 @@
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
- v.AuxInt = 0
return true
}
}
@@ -13461,6 +15954,7 @@
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = 0
v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
v1.AddArg(x)
@@ -13471,7 +15965,6 @@
v2.AuxInt = 256
v2.AddArg(y)
v.AddArg(v2)
- v.AuxInt = 0
return true
}
}
@@ -13492,11 +15985,11 @@
break
}
v.reset(OpARMSRLconst)
- v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
- v0.AddArg(x)
- v0.AuxInt = 24
- v.AddArg(v0)
v.AuxInt = c + 24
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
+ v0.AuxInt = 24
+ v0.AddArg(x)
+ v.AddArg(v0)
return true
}
// match: (Rsh8Ux64 _ (Const64 [c]))
@@ -13599,11 +16092,11 @@
break
}
v.reset(OpARMSRAconst)
- v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
- v0.AddArg(x)
- v0.AuxInt = 24
- v.AddArg(v0)
v.AuxInt = c + 24
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
+ v0.AuxInt = 24
+ v0.AddArg(x)
+ v.AddArg(v0)
return true
}
// match: (Rsh8x64 x (Const64 [c]))
@@ -13620,11 +16113,11 @@
break
}
v.reset(OpARMSRAconst)
- v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
- v0.AddArg(x)
- v0.AuxInt = 24
- v.AddArg(v0)
v.AuxInt = 31
+ v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
+ v0.AuxInt = 24
+ v0.AddArg(x)
+ v.AddArg(v0)
return true
}
return false
@@ -13648,1894 +16141,6 @@
return true
}
}
-func rewriteValueARM_OpARMSBC(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SBC (MOVWconst [c]) x flags)
- // cond:
- // result: (RSCconst [c] x flags)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- flags := v.Args[2]
- v.reset(OpARMRSCconst)
- v.AuxInt = c
- v.AddArg(x)
- v.AddArg(flags)
- return true
- }
- // match: (SBC x (MOVWconst [c]) flags)
- // cond:
- // result: (SBCconst [c] x flags)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMMOVWconst {
- break
- }
- c := v_1.AuxInt
- flags := v.Args[2]
- v.reset(OpARMSBCconst)
- v.AuxInt = c
- v.AddArg(x)
- v.AddArg(flags)
- return true
- }
- // match: (SBC x (SLLconst [c] y) flags)
- // cond:
- // result: (SBCshiftLL x y [c] flags)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSLLconst {
- break
- }
- c := v_1.AuxInt
- y := v_1.Args[0]
- flags := v.Args[2]
- v.reset(OpARMSBCshiftLL)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- v.AddArg(flags)
- return true
- }
- // match: (SBC (SLLconst [c] y) x flags)
- // cond:
- // result: (RSCshiftLL x y [c] flags)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMSLLconst {
- break
- }
- c := v_0.AuxInt
- y := v_0.Args[0]
- x := v.Args[1]
- flags := v.Args[2]
- v.reset(OpARMRSCshiftLL)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- v.AddArg(flags)
- return true
- }
- // match: (SBC x (SRLconst [c] y) flags)
- // cond:
- // result: (SBCshiftRL x y [c] flags)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSRLconst {
- break
- }
- c := v_1.AuxInt
- y := v_1.Args[0]
- flags := v.Args[2]
- v.reset(OpARMSBCshiftRL)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- v.AddArg(flags)
- return true
- }
- // match: (SBC (SRLconst [c] y) x flags)
- // cond:
- // result: (RSCshiftRL x y [c] flags)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMSRLconst {
- break
- }
- c := v_0.AuxInt
- y := v_0.Args[0]
- x := v.Args[1]
- flags := v.Args[2]
- v.reset(OpARMRSCshiftRL)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- v.AddArg(flags)
- return true
- }
- // match: (SBC x (SRAconst [c] y) flags)
- // cond:
- // result: (SBCshiftRA x y [c] flags)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSRAconst {
- break
- }
- c := v_1.AuxInt
- y := v_1.Args[0]
- flags := v.Args[2]
- v.reset(OpARMSBCshiftRA)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- v.AddArg(flags)
- return true
- }
- // match: (SBC (SRAconst [c] y) x flags)
- // cond:
- // result: (RSCshiftRA x y [c] flags)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMSRAconst {
- break
- }
- c := v_0.AuxInt
- y := v_0.Args[0]
- x := v.Args[1]
- flags := v.Args[2]
- v.reset(OpARMRSCshiftRA)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- v.AddArg(flags)
- return true
- }
- // match: (SBC x (SLL y z) flags)
- // cond:
- // result: (SBCshiftLLreg x y z flags)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSLL {
- break
- }
- y := v_1.Args[0]
- z := v_1.Args[1]
- flags := v.Args[2]
- v.reset(OpARMSBCshiftLLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- v.AddArg(flags)
- return true
- }
- // match: (SBC (SLL y z) x flags)
- // cond:
- // result: (RSCshiftLLreg x y z flags)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMSLL {
- break
- }
- y := v_0.Args[0]
- z := v_0.Args[1]
- x := v.Args[1]
- flags := v.Args[2]
- v.reset(OpARMRSCshiftLLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- v.AddArg(flags)
- return true
- }
- // match: (SBC x (SRL y z) flags)
- // cond:
- // result: (SBCshiftRLreg x y z flags)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSRL {
- break
- }
- y := v_1.Args[0]
- z := v_1.Args[1]
- flags := v.Args[2]
- v.reset(OpARMSBCshiftRLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- v.AddArg(flags)
- return true
- }
- // match: (SBC (SRL y z) x flags)
- // cond:
- // result: (RSCshiftRLreg x y z flags)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMSRL {
- break
- }
- y := v_0.Args[0]
- z := v_0.Args[1]
- x := v.Args[1]
- flags := v.Args[2]
- v.reset(OpARMRSCshiftRLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- v.AddArg(flags)
- return true
- }
- // match: (SBC x (SRA y z) flags)
- // cond:
- // result: (SBCshiftRAreg x y z flags)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSRA {
- break
- }
- y := v_1.Args[0]
- z := v_1.Args[1]
- flags := v.Args[2]
- v.reset(OpARMSBCshiftRAreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- v.AddArg(flags)
- return true
- }
- // match: (SBC (SRA y z) x flags)
- // cond:
- // result: (RSCshiftRAreg x y z flags)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMSRA {
- break
- }
- y := v_0.Args[0]
- z := v_0.Args[1]
- x := v.Args[1]
- flags := v.Args[2]
- v.reset(OpARMRSCshiftRAreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- v.AddArg(flags)
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMSBCconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SBCconst [c] (ADDconst [d] x) flags)
- // cond:
- // result: (SBCconst [int64(int32(c-d))] x flags)
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpARMADDconst {
- break
- }
- d := v_0.AuxInt
- x := v_0.Args[0]
- flags := v.Args[1]
- v.reset(OpARMSBCconst)
- v.AuxInt = int64(int32(c - d))
- v.AddArg(x)
- v.AddArg(flags)
- return true
- }
- // match: (SBCconst [c] (SUBconst [d] x) flags)
- // cond:
- // result: (SBCconst [int64(int32(c+d))] x flags)
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpARMSUBconst {
- break
- }
- d := v_0.AuxInt
- x := v_0.Args[0]
- flags := v.Args[1]
- v.reset(OpARMSBCconst)
- v.AuxInt = int64(int32(c + d))
- v.AddArg(x)
- v.AddArg(flags)
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMSBCshiftLL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SBCshiftLL (MOVWconst [c]) x [d] flags)
- // cond:
- // result: (RSCconst [c] (SLLconst <x.Type> x [d]) flags)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- d := v.AuxInt
- flags := v.Args[2]
- v.reset(OpARMRSCconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
- v0.AddArg(x)
- v0.AuxInt = d
- v.AddArg(v0)
- v.AddArg(flags)
- return true
- }
- // match: (SBCshiftLL x (MOVWconst [c]) [d] flags)
- // cond:
- // result: (SBCconst x [int64(uint32(c)<<uint64(d))] flags)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMMOVWconst {
- break
- }
- c := v_1.AuxInt
- d := v.AuxInt
- flags := v.Args[2]
- v.reset(OpARMSBCconst)
- v.AddArg(x)
- v.AuxInt = int64(uint32(c) << uint64(d))
- v.AddArg(flags)
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMSBCshiftLLreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SBCshiftLLreg (MOVWconst [c]) x y flags)
- // cond:
- // result: (RSCconst [c] (SLL <x.Type> x y) flags)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- y := v.Args[2]
- flags := v.Args[3]
- v.reset(OpARMRSCconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v.AddArg(flags)
- return true
- }
- // match: (SBCshiftLLreg x y (MOVWconst [c]) flags)
- // cond:
- // result: (SBCshiftLL x y [c] flags)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpARMMOVWconst {
- break
- }
- c := v_2.AuxInt
- flags := v.Args[3]
- v.reset(OpARMSBCshiftLL)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- v.AddArg(flags)
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMSBCshiftRA(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SBCshiftRA (MOVWconst [c]) x [d] flags)
- // cond:
- // result: (RSCconst [c] (SRAconst <x.Type> x [d]) flags)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- d := v.AuxInt
- flags := v.Args[2]
- v.reset(OpARMRSCconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
- v0.AddArg(x)
- v0.AuxInt = d
- v.AddArg(v0)
- v.AddArg(flags)
- return true
- }
- // match: (SBCshiftRA x (MOVWconst [c]) [d] flags)
- // cond:
- // result: (SBCconst x [int64(int32(c)>>uint64(d))] flags)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMMOVWconst {
- break
- }
- c := v_1.AuxInt
- d := v.AuxInt
- flags := v.Args[2]
- v.reset(OpARMSBCconst)
- v.AddArg(x)
- v.AuxInt = int64(int32(c) >> uint64(d))
- v.AddArg(flags)
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMSBCshiftRAreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SBCshiftRAreg (MOVWconst [c]) x y flags)
- // cond:
- // result: (RSCconst [c] (SRA <x.Type> x y) flags)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- y := v.Args[2]
- flags := v.Args[3]
- v.reset(OpARMRSCconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v.AddArg(flags)
- return true
- }
- // match: (SBCshiftRAreg x y (MOVWconst [c]) flags)
- // cond:
- // result: (SBCshiftRA x y [c] flags)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpARMMOVWconst {
- break
- }
- c := v_2.AuxInt
- flags := v.Args[3]
- v.reset(OpARMSBCshiftRA)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- v.AddArg(flags)
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMSBCshiftRL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SBCshiftRL (MOVWconst [c]) x [d] flags)
- // cond:
- // result: (RSCconst [c] (SRLconst <x.Type> x [d]) flags)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- d := v.AuxInt
- flags := v.Args[2]
- v.reset(OpARMRSCconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
- v0.AddArg(x)
- v0.AuxInt = d
- v.AddArg(v0)
- v.AddArg(flags)
- return true
- }
- // match: (SBCshiftRL x (MOVWconst [c]) [d] flags)
- // cond:
- // result: (SBCconst x [int64(uint32(c)>>uint64(d))] flags)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMMOVWconst {
- break
- }
- c := v_1.AuxInt
- d := v.AuxInt
- flags := v.Args[2]
- v.reset(OpARMSBCconst)
- v.AddArg(x)
- v.AuxInt = int64(uint32(c) >> uint64(d))
- v.AddArg(flags)
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMSBCshiftRLreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SBCshiftRLreg (MOVWconst [c]) x y flags)
- // cond:
- // result: (RSCconst [c] (SRL <x.Type> x y) flags)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- y := v.Args[2]
- flags := v.Args[3]
- v.reset(OpARMRSCconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- v.AddArg(flags)
- return true
- }
- // match: (SBCshiftRLreg x y (MOVWconst [c]) flags)
- // cond:
- // result: (SBCshiftRL x y [c] flags)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpARMMOVWconst {
- break
- }
- c := v_2.AuxInt
- flags := v.Args[3]
- v.reset(OpARMSBCshiftRL)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- v.AddArg(flags)
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMSLL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SLL x (MOVWconst [c]))
- // cond:
- // result: (SLLconst x [c&31])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMMOVWconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpARMSLLconst)
- v.AddArg(x)
- v.AuxInt = c & 31
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMSLLconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SLLconst [c] (MOVWconst [d]))
- // cond:
- // result: (MOVWconst [int64(uint32(d)<<uint64(c))])
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- d := v_0.AuxInt
- v.reset(OpARMMOVWconst)
- v.AuxInt = int64(uint32(d) << uint64(c))
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMSRA(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SRA x (MOVWconst [c]))
- // cond:
- // result: (SRAconst x [c&31])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMMOVWconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpARMSRAconst)
- v.AddArg(x)
- v.AuxInt = c & 31
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMSRAcond(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SRAcond x _ (FlagEQ))
- // cond:
- // result: (SRAconst x [31])
- for {
- x := v.Args[0]
- v_2 := v.Args[2]
- if v_2.Op != OpARMFlagEQ {
- break
- }
- v.reset(OpARMSRAconst)
- v.AddArg(x)
- v.AuxInt = 31
- return true
- }
- // match: (SRAcond x y (FlagLT_ULT))
- // cond:
- // result: (SRA x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpARMFlagLT_ULT {
- break
- }
- v.reset(OpARMSRA)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
- // match: (SRAcond x _ (FlagLT_UGT))
- // cond:
- // result: (SRAconst x [31])
- for {
- x := v.Args[0]
- v_2 := v.Args[2]
- if v_2.Op != OpARMFlagLT_UGT {
- break
- }
- v.reset(OpARMSRAconst)
- v.AddArg(x)
- v.AuxInt = 31
- return true
- }
- // match: (SRAcond x y (FlagGT_ULT))
- // cond:
- // result: (SRA x y)
- for {
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpARMFlagGT_ULT {
- break
- }
- v.reset(OpARMSRA)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
- // match: (SRAcond x _ (FlagGT_UGT))
- // cond:
- // result: (SRAconst x [31])
- for {
- x := v.Args[0]
- v_2 := v.Args[2]
- if v_2.Op != OpARMFlagGT_UGT {
- break
- }
- v.reset(OpARMSRAconst)
- v.AddArg(x)
- v.AuxInt = 31
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMSRAconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SRAconst [c] (MOVWconst [d]))
- // cond:
- // result: (MOVWconst [int64(int32(d)>>uint64(c))])
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- d := v_0.AuxInt
- v.reset(OpARMMOVWconst)
- v.AuxInt = int64(int32(d) >> uint64(c))
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMSRL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SRL x (MOVWconst [c]))
- // cond:
- // result: (SRLconst x [c&31])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMMOVWconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpARMSRLconst)
- v.AddArg(x)
- v.AuxInt = c & 31
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMSRLconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SRLconst [c] (MOVWconst [d]))
- // cond:
- // result: (MOVWconst [int64(uint32(d)>>uint64(c))])
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- d := v_0.AuxInt
- v.reset(OpARMMOVWconst)
- v.AuxInt = int64(uint32(d) >> uint64(c))
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMSUB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SUB (MOVWconst [c]) x)
- // cond:
- // result: (RSBconst [c] x)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- v.reset(OpARMRSBconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (SUB x (MOVWconst [c]))
- // cond:
- // result: (SUBconst [c] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMMOVWconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpARMSUBconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (SUB x (SLLconst [c] y))
- // cond:
- // result: (SUBshiftLL x y [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSLLconst {
- break
- }
- c := v_1.AuxInt
- y := v_1.Args[0]
- v.reset(OpARMSUBshiftLL)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- // match: (SUB (SLLconst [c] y) x)
- // cond:
- // result: (RSBshiftLL x y [c])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMSLLconst {
- break
- }
- c := v_0.AuxInt
- y := v_0.Args[0]
- x := v.Args[1]
- v.reset(OpARMRSBshiftLL)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- // match: (SUB x (SRLconst [c] y))
- // cond:
- // result: (SUBshiftRL x y [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSRLconst {
- break
- }
- c := v_1.AuxInt
- y := v_1.Args[0]
- v.reset(OpARMSUBshiftRL)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- // match: (SUB (SRLconst [c] y) x)
- // cond:
- // result: (RSBshiftRL x y [c])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMSRLconst {
- break
- }
- c := v_0.AuxInt
- y := v_0.Args[0]
- x := v.Args[1]
- v.reset(OpARMRSBshiftRL)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- // match: (SUB x (SRAconst [c] y))
- // cond:
- // result: (SUBshiftRA x y [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSRAconst {
- break
- }
- c := v_1.AuxInt
- y := v_1.Args[0]
- v.reset(OpARMSUBshiftRA)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- // match: (SUB (SRAconst [c] y) x)
- // cond:
- // result: (RSBshiftRA x y [c])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMSRAconst {
- break
- }
- c := v_0.AuxInt
- y := v_0.Args[0]
- x := v.Args[1]
- v.reset(OpARMRSBshiftRA)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- // match: (SUB x (SLL y z))
- // cond:
- // result: (SUBshiftLLreg x y z)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSLL {
- break
- }
- y := v_1.Args[0]
- z := v_1.Args[1]
- v.reset(OpARMSUBshiftLLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- return true
- }
- // match: (SUB (SLL y z) x)
- // cond:
- // result: (RSBshiftLLreg x y z)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMSLL {
- break
- }
- y := v_0.Args[0]
- z := v_0.Args[1]
- x := v.Args[1]
- v.reset(OpARMRSBshiftLLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- return true
- }
- // match: (SUB x (SRL y z))
- // cond:
- // result: (SUBshiftRLreg x y z)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSRL {
- break
- }
- y := v_1.Args[0]
- z := v_1.Args[1]
- v.reset(OpARMSUBshiftRLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- return true
- }
- // match: (SUB (SRL y z) x)
- // cond:
- // result: (RSBshiftRLreg x y z)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMSRL {
- break
- }
- y := v_0.Args[0]
- z := v_0.Args[1]
- x := v.Args[1]
- v.reset(OpARMRSBshiftRLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- return true
- }
- // match: (SUB x (SRA y z))
- // cond:
- // result: (SUBshiftRAreg x y z)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSRA {
- break
- }
- y := v_1.Args[0]
- z := v_1.Args[1]
- v.reset(OpARMSUBshiftRAreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- return true
- }
- // match: (SUB (SRA y z) x)
- // cond:
- // result: (RSBshiftRAreg x y z)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMSRA {
- break
- }
- y := v_0.Args[0]
- z := v_0.Args[1]
- x := v.Args[1]
- v.reset(OpARMRSBshiftRAreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- return true
- }
- // match: (SUB x x)
- // cond:
- // result: (MOVWconst [0])
- for {
- x := v.Args[0]
- if x != v.Args[1] {
- break
- }
- v.reset(OpARMMOVWconst)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMSUBS(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SUBS (MOVWconst [c]) x)
- // cond:
- // result: (RSBSconst [c] x)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- v.reset(OpARMRSBSconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (SUBS x (MOVWconst [c]))
- // cond:
- // result: (SUBSconst [c] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMMOVWconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpARMSUBSconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (SUBS x (SLLconst [c] y))
- // cond:
- // result: (SUBSshiftLL x y [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSLLconst {
- break
- }
- c := v_1.AuxInt
- y := v_1.Args[0]
- v.reset(OpARMSUBSshiftLL)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- // match: (SUBS (SLLconst [c] y) x)
- // cond:
- // result: (RSBSshiftLL x y [c])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMSLLconst {
- break
- }
- c := v_0.AuxInt
- y := v_0.Args[0]
- x := v.Args[1]
- v.reset(OpARMRSBSshiftLL)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- // match: (SUBS x (SRLconst [c] y))
- // cond:
- // result: (SUBSshiftRL x y [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSRLconst {
- break
- }
- c := v_1.AuxInt
- y := v_1.Args[0]
- v.reset(OpARMSUBSshiftRL)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- // match: (SUBS (SRLconst [c] y) x)
- // cond:
- // result: (RSBSshiftRL x y [c])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMSRLconst {
- break
- }
- c := v_0.AuxInt
- y := v_0.Args[0]
- x := v.Args[1]
- v.reset(OpARMRSBSshiftRL)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- // match: (SUBS x (SRAconst [c] y))
- // cond:
- // result: (SUBSshiftRA x y [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSRAconst {
- break
- }
- c := v_1.AuxInt
- y := v_1.Args[0]
- v.reset(OpARMSUBSshiftRA)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- // match: (SUBS (SRAconst [c] y) x)
- // cond:
- // result: (RSBSshiftRA x y [c])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMSRAconst {
- break
- }
- c := v_0.AuxInt
- y := v_0.Args[0]
- x := v.Args[1]
- v.reset(OpARMRSBSshiftRA)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- // match: (SUBS x (SLL y z))
- // cond:
- // result: (SUBSshiftLLreg x y z)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSLL {
- break
- }
- y := v_1.Args[0]
- z := v_1.Args[1]
- v.reset(OpARMSUBSshiftLLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- return true
- }
- // match: (SUBS (SLL y z) x)
- // cond:
- // result: (RSBSshiftLLreg x y z)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMSLL {
- break
- }
- y := v_0.Args[0]
- z := v_0.Args[1]
- x := v.Args[1]
- v.reset(OpARMRSBSshiftLLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- return true
- }
- // match: (SUBS x (SRL y z))
- // cond:
- // result: (SUBSshiftRLreg x y z)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSRL {
- break
- }
- y := v_1.Args[0]
- z := v_1.Args[1]
- v.reset(OpARMSUBSshiftRLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- return true
- }
- // match: (SUBS (SRL y z) x)
- // cond:
- // result: (RSBSshiftRLreg x y z)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMSRL {
- break
- }
- y := v_0.Args[0]
- z := v_0.Args[1]
- x := v.Args[1]
- v.reset(OpARMRSBSshiftRLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- return true
- }
- // match: (SUBS x (SRA y z))
- // cond:
- // result: (SUBSshiftRAreg x y z)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSRA {
- break
- }
- y := v_1.Args[0]
- z := v_1.Args[1]
- v.reset(OpARMSUBSshiftRAreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- return true
- }
- // match: (SUBS (SRA y z) x)
- // cond:
- // result: (RSBSshiftRAreg x y z)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMSRA {
- break
- }
- y := v_0.Args[0]
- z := v_0.Args[1]
- x := v.Args[1]
- v.reset(OpARMRSBSshiftRAreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMSUBSshiftLL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SUBSshiftLL (MOVWconst [c]) x [d])
- // cond:
- // result: (RSBSconst [c] (SLLconst <x.Type> x [d]))
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- d := v.AuxInt
- v.reset(OpARMRSBSconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
- v0.AddArg(x)
- v0.AuxInt = d
- v.AddArg(v0)
- return true
- }
- // match: (SUBSshiftLL x (MOVWconst [c]) [d])
- // cond:
- // result: (SUBSconst x [int64(uint32(c)<<uint64(d))])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMMOVWconst {
- break
- }
- c := v_1.AuxInt
- d := v.AuxInt
- v.reset(OpARMSUBSconst)
- v.AddArg(x)
- v.AuxInt = int64(uint32(c) << uint64(d))
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMSUBSshiftLLreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SUBSshiftLLreg (MOVWconst [c]) x y)
- // cond:
- // result: (RSBSconst [c] (SLL <x.Type> x y))
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- y := v.Args[2]
- v.reset(OpARMRSBSconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
- // match: (SUBSshiftLLreg x y (MOVWconst [c]))
- // cond:
- // result: (SUBSshiftLL x y [c])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpARMMOVWconst {
- break
- }
- c := v_2.AuxInt
- v.reset(OpARMSUBSshiftLL)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMSUBSshiftRA(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SUBSshiftRA (MOVWconst [c]) x [d])
- // cond:
- // result: (RSBSconst [c] (SRAconst <x.Type> x [d]))
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- d := v.AuxInt
- v.reset(OpARMRSBSconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
- v0.AddArg(x)
- v0.AuxInt = d
- v.AddArg(v0)
- return true
- }
- // match: (SUBSshiftRA x (MOVWconst [c]) [d])
- // cond:
- // result: (SUBSconst x [int64(int32(c)>>uint64(d))])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMMOVWconst {
- break
- }
- c := v_1.AuxInt
- d := v.AuxInt
- v.reset(OpARMSUBSconst)
- v.AddArg(x)
- v.AuxInt = int64(int32(c) >> uint64(d))
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMSUBSshiftRAreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SUBSshiftRAreg (MOVWconst [c]) x y)
- // cond:
- // result: (RSBSconst [c] (SRA <x.Type> x y))
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- y := v.Args[2]
- v.reset(OpARMRSBSconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
- // match: (SUBSshiftRAreg x y (MOVWconst [c]))
- // cond:
- // result: (SUBSshiftRA x y [c])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpARMMOVWconst {
- break
- }
- c := v_2.AuxInt
- v.reset(OpARMSUBSshiftRA)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMSUBSshiftRL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SUBSshiftRL (MOVWconst [c]) x [d])
- // cond:
- // result: (RSBSconst [c] (SRLconst <x.Type> x [d]))
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- d := v.AuxInt
- v.reset(OpARMRSBSconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
- v0.AddArg(x)
- v0.AuxInt = d
- v.AddArg(v0)
- return true
- }
- // match: (SUBSshiftRL x (MOVWconst [c]) [d])
- // cond:
- // result: (SUBSconst x [int64(uint32(c)>>uint64(d))])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMMOVWconst {
- break
- }
- c := v_1.AuxInt
- d := v.AuxInt
- v.reset(OpARMSUBSconst)
- v.AddArg(x)
- v.AuxInt = int64(uint32(c) >> uint64(d))
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMSUBSshiftRLreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SUBSshiftRLreg (MOVWconst [c]) x y)
- // cond:
- // result: (RSBSconst [c] (SRL <x.Type> x y))
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- y := v.Args[2]
- v.reset(OpARMRSBSconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
- // match: (SUBSshiftRLreg x y (MOVWconst [c]))
- // cond:
- // result: (SUBSshiftRL x y [c])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpARMMOVWconst {
- break
- }
- c := v_2.AuxInt
- v.reset(OpARMSUBSshiftRL)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMSUBconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SUBconst [0] x)
- // cond:
- // result: x
- for {
- if v.AuxInt != 0 {
- break
- }
- x := v.Args[0]
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
- // match: (SUBconst [c] (MOVWconst [d]))
- // cond:
- // result: (MOVWconst [int64(int32(d-c))])
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- d := v_0.AuxInt
- v.reset(OpARMMOVWconst)
- v.AuxInt = int64(int32(d - c))
- return true
- }
- // match: (SUBconst [c] (SUBconst [d] x))
- // cond:
- // result: (ADDconst [int64(int32(-c-d))] x)
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpARMSUBconst {
- break
- }
- d := v_0.AuxInt
- x := v_0.Args[0]
- v.reset(OpARMADDconst)
- v.AuxInt = int64(int32(-c - d))
- v.AddArg(x)
- return true
- }
- // match: (SUBconst [c] (ADDconst [d] x))
- // cond:
- // result: (ADDconst [int64(int32(-c+d))] x)
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpARMADDconst {
- break
- }
- d := v_0.AuxInt
- x := v_0.Args[0]
- v.reset(OpARMADDconst)
- v.AuxInt = int64(int32(-c + d))
- v.AddArg(x)
- return true
- }
- // match: (SUBconst [c] (RSBconst [d] x))
- // cond:
- // result: (RSBconst [int64(int32(-c+d))] x)
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpARMRSBconst {
- break
- }
- d := v_0.AuxInt
- x := v_0.Args[0]
- v.reset(OpARMRSBconst)
- v.AuxInt = int64(int32(-c + d))
- v.AddArg(x)
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMSUBshiftLL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SUBshiftLL (MOVWconst [c]) x [d])
- // cond:
- // result: (RSBconst [c] (SLLconst <x.Type> x [d]))
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- d := v.AuxInt
- v.reset(OpARMRSBconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
- v0.AddArg(x)
- v0.AuxInt = d
- v.AddArg(v0)
- return true
- }
- // match: (SUBshiftLL x (MOVWconst [c]) [d])
- // cond:
- // result: (SUBconst x [int64(uint32(c)<<uint64(d))])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMMOVWconst {
- break
- }
- c := v_1.AuxInt
- d := v.AuxInt
- v.reset(OpARMSUBconst)
- v.AddArg(x)
- v.AuxInt = int64(uint32(c) << uint64(d))
- return true
- }
- // match: (SUBshiftLL x (SLLconst x [c]) [d])
- // cond: c==d
- // result: (MOVWconst [0])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSLLconst {
- break
- }
- if x != v_1.Args[0] {
- break
- }
- c := v_1.AuxInt
- d := v.AuxInt
- if !(c == d) {
- break
- }
- v.reset(OpARMMOVWconst)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMSUBshiftLLreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SUBshiftLLreg (MOVWconst [c]) x y)
- // cond:
- // result: (RSBconst [c] (SLL <x.Type> x y))
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- y := v.Args[2]
- v.reset(OpARMRSBconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
- // match: (SUBshiftLLreg x y (MOVWconst [c]))
- // cond:
- // result: (SUBshiftLL x y [c])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpARMMOVWconst {
- break
- }
- c := v_2.AuxInt
- v.reset(OpARMSUBshiftLL)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMSUBshiftRA(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SUBshiftRA (MOVWconst [c]) x [d])
- // cond:
- // result: (RSBconst [c] (SRAconst <x.Type> x [d]))
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- d := v.AuxInt
- v.reset(OpARMRSBconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
- v0.AddArg(x)
- v0.AuxInt = d
- v.AddArg(v0)
- return true
- }
- // match: (SUBshiftRA x (MOVWconst [c]) [d])
- // cond:
- // result: (SUBconst x [int64(int32(c)>>uint64(d))])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMMOVWconst {
- break
- }
- c := v_1.AuxInt
- d := v.AuxInt
- v.reset(OpARMSUBconst)
- v.AddArg(x)
- v.AuxInt = int64(int32(c) >> uint64(d))
- return true
- }
- // match: (SUBshiftRA x (SRAconst x [c]) [d])
- // cond: c==d
- // result: (MOVWconst [0])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSRAconst {
- break
- }
- if x != v_1.Args[0] {
- break
- }
- c := v_1.AuxInt
- d := v.AuxInt
- if !(c == d) {
- break
- }
- v.reset(OpARMMOVWconst)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMSUBshiftRAreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SUBshiftRAreg (MOVWconst [c]) x y)
- // cond:
- // result: (RSBconst [c] (SRA <x.Type> x y))
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- y := v.Args[2]
- v.reset(OpARMRSBconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
- // match: (SUBshiftRAreg x y (MOVWconst [c]))
- // cond:
- // result: (SUBshiftRA x y [c])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpARMMOVWconst {
- break
- }
- c := v_2.AuxInt
- v.reset(OpARMSUBshiftRA)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMSUBshiftRL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SUBshiftRL (MOVWconst [c]) x [d])
- // cond:
- // result: (RSBconst [c] (SRLconst <x.Type> x [d]))
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- d := v.AuxInt
- v.reset(OpARMRSBconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
- v0.AddArg(x)
- v0.AuxInt = d
- v.AddArg(v0)
- return true
- }
- // match: (SUBshiftRL x (MOVWconst [c]) [d])
- // cond:
- // result: (SUBconst x [int64(uint32(c)>>uint64(d))])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMMOVWconst {
- break
- }
- c := v_1.AuxInt
- d := v.AuxInt
- v.reset(OpARMSUBconst)
- v.AddArg(x)
- v.AuxInt = int64(uint32(c) >> uint64(d))
- return true
- }
- // match: (SUBshiftRL x (SRLconst x [c]) [d])
- // cond: c==d
- // result: (MOVWconst [0])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSRLconst {
- break
- }
- if x != v_1.Args[0] {
- break
- }
- c := v_1.AuxInt
- d := v.AuxInt
- if !(c == d) {
- break
- }
- v.reset(OpARMMOVWconst)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMSUBshiftRLreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (SUBshiftRLreg (MOVWconst [c]) x y)
- // cond:
- // result: (RSBconst [c] (SRL <x.Type> x y))
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- y := v.Args[2]
- v.reset(OpARMRSBconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
- // match: (SUBshiftRLreg x y (MOVWconst [c]))
- // cond:
- // result: (SUBshiftRL x y [c])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpARMMOVWconst {
- break
- }
- c := v_2.AuxInt
- v.reset(OpARMSUBshiftRL)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- return false
-}
func rewriteValueARM_OpSignExt16to32(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -15584,8 +16189,8 @@
for {
x := v.Args[0]
v.reset(OpARMSRAconst)
- v.AddArg(x)
v.AuxInt = 31
+ v.AddArg(x)
return true
}
}
@@ -15877,611 +16482,6 @@
return true
}
}
-func rewriteValueARM_OpARMXOR(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (XOR (MOVWconst [c]) x)
- // cond:
- // result: (XORconst [c] x)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- v.reset(OpARMXORconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (XOR x (MOVWconst [c]))
- // cond:
- // result: (XORconst [c] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMMOVWconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpARMXORconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (XOR x (SLLconst [c] y))
- // cond:
- // result: (XORshiftLL x y [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSLLconst {
- break
- }
- c := v_1.AuxInt
- y := v_1.Args[0]
- v.reset(OpARMXORshiftLL)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- // match: (XOR (SLLconst [c] y) x)
- // cond:
- // result: (XORshiftLL x y [c])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMSLLconst {
- break
- }
- c := v_0.AuxInt
- y := v_0.Args[0]
- x := v.Args[1]
- v.reset(OpARMXORshiftLL)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- // match: (XOR x (SRLconst [c] y))
- // cond:
- // result: (XORshiftRL x y [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSRLconst {
- break
- }
- c := v_1.AuxInt
- y := v_1.Args[0]
- v.reset(OpARMXORshiftRL)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- // match: (XOR (SRLconst [c] y) x)
- // cond:
- // result: (XORshiftRL x y [c])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMSRLconst {
- break
- }
- c := v_0.AuxInt
- y := v_0.Args[0]
- x := v.Args[1]
- v.reset(OpARMXORshiftRL)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- // match: (XOR x (SRAconst [c] y))
- // cond:
- // result: (XORshiftRA x y [c])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSRAconst {
- break
- }
- c := v_1.AuxInt
- y := v_1.Args[0]
- v.reset(OpARMXORshiftRA)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- // match: (XOR (SRAconst [c] y) x)
- // cond:
- // result: (XORshiftRA x y [c])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMSRAconst {
- break
- }
- c := v_0.AuxInt
- y := v_0.Args[0]
- x := v.Args[1]
- v.reset(OpARMXORshiftRA)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- // match: (XOR x (SLL y z))
- // cond:
- // result: (XORshiftLLreg x y z)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSLL {
- break
- }
- y := v_1.Args[0]
- z := v_1.Args[1]
- v.reset(OpARMXORshiftLLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- return true
- }
- // match: (XOR (SLL y z) x)
- // cond:
- // result: (XORshiftLLreg x y z)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMSLL {
- break
- }
- y := v_0.Args[0]
- z := v_0.Args[1]
- x := v.Args[1]
- v.reset(OpARMXORshiftLLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- return true
- }
- // match: (XOR x (SRL y z))
- // cond:
- // result: (XORshiftRLreg x y z)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSRL {
- break
- }
- y := v_1.Args[0]
- z := v_1.Args[1]
- v.reset(OpARMXORshiftRLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- return true
- }
- // match: (XOR (SRL y z) x)
- // cond:
- // result: (XORshiftRLreg x y z)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMSRL {
- break
- }
- y := v_0.Args[0]
- z := v_0.Args[1]
- x := v.Args[1]
- v.reset(OpARMXORshiftRLreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- return true
- }
- // match: (XOR x (SRA y z))
- // cond:
- // result: (XORshiftRAreg x y z)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSRA {
- break
- }
- y := v_1.Args[0]
- z := v_1.Args[1]
- v.reset(OpARMXORshiftRAreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- return true
- }
- // match: (XOR (SRA y z) x)
- // cond:
- // result: (XORshiftRAreg x y z)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMSRA {
- break
- }
- y := v_0.Args[0]
- z := v_0.Args[1]
- x := v.Args[1]
- v.reset(OpARMXORshiftRAreg)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(z)
- return true
- }
- // match: (XOR x x)
- // cond:
- // result: (MOVWconst [0])
- for {
- x := v.Args[0]
- if x != v.Args[1] {
- break
- }
- v.reset(OpARMMOVWconst)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMXORconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (XORconst [0] x)
- // cond:
- // result: x
- for {
- if v.AuxInt != 0 {
- break
- }
- x := v.Args[0]
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
- // match: (XORconst [c] (MOVWconst [d]))
- // cond:
- // result: (MOVWconst [c^d])
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- d := v_0.AuxInt
- v.reset(OpARMMOVWconst)
- v.AuxInt = c ^ d
- return true
- }
- // match: (XORconst [c] (XORconst [d] x))
- // cond:
- // result: (XORconst [c^d] x)
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpARMXORconst {
- break
- }
- d := v_0.AuxInt
- x := v_0.Args[0]
- v.reset(OpARMXORconst)
- v.AuxInt = c ^ d
- v.AddArg(x)
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMXORshiftLL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (XORshiftLL (MOVWconst [c]) x [d])
- // cond:
- // result: (XORconst [c] (SLLconst <x.Type> x [d]))
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- d := v.AuxInt
- v.reset(OpARMXORconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
- v0.AddArg(x)
- v0.AuxInt = d
- v.AddArg(v0)
- return true
- }
- // match: (XORshiftLL x (MOVWconst [c]) [d])
- // cond:
- // result: (XORconst x [int64(uint32(c)<<uint64(d))])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMMOVWconst {
- break
- }
- c := v_1.AuxInt
- d := v.AuxInt
- v.reset(OpARMXORconst)
- v.AddArg(x)
- v.AuxInt = int64(uint32(c) << uint64(d))
- return true
- }
- // match: (XORshiftLL x (SLLconst x [c]) [d])
- // cond: c==d
- // result: (MOVWconst [0])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSLLconst {
- break
- }
- if x != v_1.Args[0] {
- break
- }
- c := v_1.AuxInt
- d := v.AuxInt
- if !(c == d) {
- break
- }
- v.reset(OpARMMOVWconst)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMXORshiftLLreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (XORshiftLLreg (MOVWconst [c]) x y)
- // cond:
- // result: (XORconst [c] (SLL <x.Type> x y))
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- y := v.Args[2]
- v.reset(OpARMXORconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
- // match: (XORshiftLLreg x y (MOVWconst [c]))
- // cond:
- // result: (XORshiftLL x y [c])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpARMMOVWconst {
- break
- }
- c := v_2.AuxInt
- v.reset(OpARMXORshiftLL)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMXORshiftRA(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (XORshiftRA (MOVWconst [c]) x [d])
- // cond:
- // result: (XORconst [c] (SRAconst <x.Type> x [d]))
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- d := v.AuxInt
- v.reset(OpARMXORconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
- v0.AddArg(x)
- v0.AuxInt = d
- v.AddArg(v0)
- return true
- }
- // match: (XORshiftRA x (MOVWconst [c]) [d])
- // cond:
- // result: (XORconst x [int64(int32(c)>>uint64(d))])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMMOVWconst {
- break
- }
- c := v_1.AuxInt
- d := v.AuxInt
- v.reset(OpARMXORconst)
- v.AddArg(x)
- v.AuxInt = int64(int32(c) >> uint64(d))
- return true
- }
- // match: (XORshiftRA x (SRAconst x [c]) [d])
- // cond: c==d
- // result: (MOVWconst [0])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSRAconst {
- break
- }
- if x != v_1.Args[0] {
- break
- }
- c := v_1.AuxInt
- d := v.AuxInt
- if !(c == d) {
- break
- }
- v.reset(OpARMMOVWconst)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMXORshiftRAreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (XORshiftRAreg (MOVWconst [c]) x y)
- // cond:
- // result: (XORconst [c] (SRA <x.Type> x y))
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- y := v.Args[2]
- v.reset(OpARMXORconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
- // match: (XORshiftRAreg x y (MOVWconst [c]))
- // cond:
- // result: (XORshiftRA x y [c])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpARMMOVWconst {
- break
- }
- c := v_2.AuxInt
- v.reset(OpARMXORshiftRA)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMXORshiftRL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (XORshiftRL (MOVWconst [c]) x [d])
- // cond:
- // result: (XORconst [c] (SRLconst <x.Type> x [d]))
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- d := v.AuxInt
- v.reset(OpARMXORconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
- v0.AddArg(x)
- v0.AuxInt = d
- v.AddArg(v0)
- return true
- }
- // match: (XORshiftRL x (MOVWconst [c]) [d])
- // cond:
- // result: (XORconst x [int64(uint32(c)>>uint64(d))])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMMOVWconst {
- break
- }
- c := v_1.AuxInt
- d := v.AuxInt
- v.reset(OpARMXORconst)
- v.AddArg(x)
- v.AuxInt = int64(uint32(c) >> uint64(d))
- return true
- }
- // match: (XORshiftRL x (SRLconst x [c]) [d])
- // cond: c==d
- // result: (MOVWconst [0])
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpARMSRLconst {
- break
- }
- if x != v_1.Args[0] {
- break
- }
- c := v_1.AuxInt
- d := v.AuxInt
- if !(c == d) {
- break
- }
- v.reset(OpARMMOVWconst)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValueARM_OpARMXORshiftRLreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (XORshiftRLreg (MOVWconst [c]) x y)
- // cond:
- // result: (XORconst [c] (SRL <x.Type> x y))
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpARMMOVWconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- y := v.Args[2]
- v.reset(OpARMXORconst)
- v.AuxInt = c
- v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
- // match: (XORshiftRLreg x y (MOVWconst [c]))
- // cond:
- // result: (XORshiftRL x y [c])
- for {
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpARMMOVWconst {
- break
- }
- c := v_2.AuxInt
- v.reset(OpARMXORshiftRL)
- v.AddArg(x)
- v.AddArg(y)
- v.AuxInt = c
- return true
- }
- return false
-}
func rewriteValueARM_OpXor16(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -16756,8 +16756,8 @@
v.AuxInt = SizeAndAlign(s).Align()
v.AddArg(ptr)
v0 := b.NewValue0(v.Line, OpARMADDconst, ptr.Type)
- v0.AddArg(ptr)
v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
+ v0.AddArg(ptr)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
v1.AuxInt = 0
@@ -16815,12 +16815,12 @@
for {
x := v.Args[0]
v.reset(OpARMSRAconst)
- v0 := b.NewValue0(v.Line, OpARMRSBshiftRL, config.fe.TypeInt32())
- v0.AddArg(x)
- v0.AddArg(x)
- v0.AuxInt = 1
- v.AddArg(v0)
v.AuxInt = 31
+ v0 := b.NewValue0(v.Line, OpARMRSBshiftRL, config.fe.TypeInt32())
+ v0.AuxInt = 1
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
return true
}
}
diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go
index ecde744..e268f59 100644
--- a/src/cmd/compile/internal/ssa/rewriteARM64.go
+++ b/src/cmd/compile/internal/ssa/rewriteARM64.go
@@ -10,6 +10,36 @@
switch v.Op {
case OpARM64ADDconst:
return rewriteValueARM64_OpARM64ADDconst(v, config)
+ case OpARM64FMOVDload:
+ return rewriteValueARM64_OpARM64FMOVDload(v, config)
+ case OpARM64FMOVDstore:
+ return rewriteValueARM64_OpARM64FMOVDstore(v, config)
+ case OpARM64FMOVSload:
+ return rewriteValueARM64_OpARM64FMOVSload(v, config)
+ case OpARM64FMOVSstore:
+ return rewriteValueARM64_OpARM64FMOVSstore(v, config)
+ case OpARM64MOVBUload:
+ return rewriteValueARM64_OpARM64MOVBUload(v, config)
+ case OpARM64MOVBload:
+ return rewriteValueARM64_OpARM64MOVBload(v, config)
+ case OpARM64MOVBstore:
+ return rewriteValueARM64_OpARM64MOVBstore(v, config)
+ case OpARM64MOVDload:
+ return rewriteValueARM64_OpARM64MOVDload(v, config)
+ case OpARM64MOVDstore:
+ return rewriteValueARM64_OpARM64MOVDstore(v, config)
+ case OpARM64MOVHUload:
+ return rewriteValueARM64_OpARM64MOVHUload(v, config)
+ case OpARM64MOVHload:
+ return rewriteValueARM64_OpARM64MOVHload(v, config)
+ case OpARM64MOVHstore:
+ return rewriteValueARM64_OpARM64MOVHstore(v, config)
+ case OpARM64MOVWUload:
+ return rewriteValueARM64_OpARM64MOVWUload(v, config)
+ case OpARM64MOVWload:
+ return rewriteValueARM64_OpARM64MOVWload(v, config)
+ case OpARM64MOVWstore:
+ return rewriteValueARM64_OpARM64MOVWstore(v, config)
case OpAdd16:
return rewriteValueARM64_OpAdd16(v, config)
case OpAdd32:
@@ -132,14 +162,6 @@
return rewriteValueARM64_OpEqB(v, config)
case OpEqPtr:
return rewriteValueARM64_OpEqPtr(v, config)
- case OpARM64FMOVDload:
- return rewriteValueARM64_OpARM64FMOVDload(v, config)
- case OpARM64FMOVDstore:
- return rewriteValueARM64_OpARM64FMOVDstore(v, config)
- case OpARM64FMOVSload:
- return rewriteValueARM64_OpARM64FMOVSload(v, config)
- case OpARM64FMOVSstore:
- return rewriteValueARM64_OpARM64FMOVSstore(v, config)
case OpGeq16:
return rewriteValueARM64_OpGeq16(v, config)
case OpGeq16U:
@@ -290,28 +312,6 @@
return rewriteValueARM64_OpLsh8x64(v, config)
case OpLsh8x8:
return rewriteValueARM64_OpLsh8x8(v, config)
- case OpARM64MOVBUload:
- return rewriteValueARM64_OpARM64MOVBUload(v, config)
- case OpARM64MOVBload:
- return rewriteValueARM64_OpARM64MOVBload(v, config)
- case OpARM64MOVBstore:
- return rewriteValueARM64_OpARM64MOVBstore(v, config)
- case OpARM64MOVDload:
- return rewriteValueARM64_OpARM64MOVDload(v, config)
- case OpARM64MOVDstore:
- return rewriteValueARM64_OpARM64MOVDstore(v, config)
- case OpARM64MOVHUload:
- return rewriteValueARM64_OpARM64MOVHUload(v, config)
- case OpARM64MOVHload:
- return rewriteValueARM64_OpARM64MOVHload(v, config)
- case OpARM64MOVHstore:
- return rewriteValueARM64_OpARM64MOVHstore(v, config)
- case OpARM64MOVWUload:
- return rewriteValueARM64_OpARM64MOVWUload(v, config)
- case OpARM64MOVWload:
- return rewriteValueARM64_OpARM64MOVWload(v, config)
- case OpARM64MOVWstore:
- return rewriteValueARM64_OpARM64MOVWstore(v, config)
case OpMod16:
return rewriteValueARM64_OpMod16(v, config)
case OpMod16u:
@@ -542,6 +542,765 @@
}
return false
}
+func rewriteValueARM64_OpARM64FMOVDload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (FMOVDload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64FMOVDload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64FMOVDload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVDstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond:
+ // result: (FMOVDstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARM64FMOVDstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64FMOVDstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVSload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (FMOVSload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64FMOVSload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64FMOVSload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVSstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond:
+ // result: (FMOVSstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARM64FMOVSstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64FMOVSstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBUload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (MOVBUload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64MOVBUload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVBUload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (MOVBload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64MOVBload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVBload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond:
+ // result: (MOVBstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVDload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (MOVDload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64MOVDload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVDload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVDstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond:
+ // result: (MOVDstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHUload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (MOVHUload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64MOVHUload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVHUload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (MOVHload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64MOVHload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVHload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond:
+ // result: (MOVHstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWUload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (MOVWUload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64MOVWUload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVWUload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWload(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond:
+ // result: (MOVWload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ v.reset(OpARM64MOVWload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVWload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond:
+ // result: (MOVWstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
func rewriteValueARM64_OpAdd16(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -750,12 +1509,12 @@
v.reset(OpARM64ADD)
v0 := b.NewValue0(v.Line, OpARM64ADD, t)
v1 := b.NewValue0(v.Line, OpARM64SRLconst, t)
- v1.AddArg(x)
v1.AuxInt = 1
+ v1.AddArg(x)
v0.AddArg(v1)
v2 := b.NewValue0(v.Line, OpARM64SRLconst, t)
- v2.AddArg(y)
v2.AuxInt = 1
+ v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
v3 := b.NewValue0(v.Line, OpARM64AND, t)
@@ -1469,210 +2228,6 @@
return true
}
}
-func rewriteValueARM64_OpARM64FMOVDload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond:
- // result: (FMOVDload [off1+off2] {sym} ptr mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- mem := v.Args[1]
- v.reset(OpARM64FMOVDload)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64MOVDaddr {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpARM64FMOVDload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueARM64_OpARM64FMOVDstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
- // cond:
- // result: (FMOVDstore [off1+off2] {sym} ptr val mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- v.reset(OpARM64FMOVDstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- // match: (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64MOVDaddr {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- if !(canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpARM64FMOVDstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueARM64_OpARM64FMOVSload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond:
- // result: (FMOVSload [off1+off2] {sym} ptr mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- mem := v.Args[1]
- v.reset(OpARM64FMOVSload)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64MOVDaddr {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpARM64FMOVSload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueARM64_OpARM64FMOVSstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem)
- // cond:
- // result: (FMOVSstore [off1+off2] {sym} ptr val mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- v.reset(OpARM64FMOVSstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- // match: (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64MOVDaddr {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- if !(canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpARM64FMOVSstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- return false
-}
func rewriteValueARM64_OpGeq16(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -2081,6 +2636,7 @@
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64SRAconst)
+ v.AuxInt = 16
v0 := b.NewValue0(v.Line, OpARM64MULW, config.fe.TypeInt32())
v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
v1.AddArg(x)
@@ -2089,7 +2645,6 @@
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v.AuxInt = 16
return true
}
}
@@ -2103,6 +2658,7 @@
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64SRLconst)
+ v.AuxInt = 16
v0 := b.NewValue0(v.Line, OpARM64MUL, config.fe.TypeUInt32())
v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
v1.AddArg(x)
@@ -2111,7 +2667,6 @@
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v.AuxInt = 16
return true
}
}
@@ -2125,11 +2680,11 @@
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64SRAconst)
+ v.AuxInt = 32
v0 := b.NewValue0(v.Line, OpARM64MULL, config.fe.TypeInt64())
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
- v.AuxInt = 32
return true
}
}
@@ -2143,11 +2698,11 @@
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64SRAconst)
+ v.AuxInt = 32
v0 := b.NewValue0(v.Line, OpARM64UMULL, config.fe.TypeUInt64())
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
- v.AuxInt = 32
return true
}
}
@@ -2191,6 +2746,7 @@
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64SRAconst)
+ v.AuxInt = 8
v0 := b.NewValue0(v.Line, OpARM64MULW, config.fe.TypeInt16())
v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
v1.AddArg(x)
@@ -2199,7 +2755,6 @@
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v.AuxInt = 8
return true
}
}
@@ -2213,6 +2768,7 @@
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64SRLconst)
+ v.AuxInt = 8
v0 := b.NewValue0(v.Line, OpARM64MUL, config.fe.TypeUInt16())
v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
v1.AddArg(x)
@@ -2221,7 +2777,6 @@
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v.AuxInt = 8
return true
}
}
@@ -2827,18 +3382,18 @@
// result: (OR (SLLconst <t> x [c&15]) (SRLconst <t> (ZeroExt16to64 x) [16-c&15]))
for {
t := v.Type
- x := v.Args[0]
c := v.AuxInt
+ x := v.Args[0]
v.reset(OpARM64OR)
v0 := b.NewValue0(v.Line, OpARM64SLLconst, t)
- v0.AddArg(x)
v0.AuxInt = c & 15
+ v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpARM64SRLconst, t)
+ v1.AuxInt = 16 - c&15
v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
v2.AddArg(x)
v1.AddArg(v2)
- v1.AuxInt = 16 - c&15
v.AddArg(v1)
return true
}
@@ -2850,11 +3405,11 @@
// cond:
// result: (RORWconst x [32-c&31])
for {
- x := v.Args[0]
c := v.AuxInt
+ x := v.Args[0]
v.reset(OpARM64RORWconst)
- v.AddArg(x)
v.AuxInt = 32 - c&31
+ v.AddArg(x)
return true
}
}
@@ -2865,11 +3420,11 @@
// cond:
// result: (RORconst x [64-c&63])
for {
- x := v.Args[0]
c := v.AuxInt
+ x := v.Args[0]
v.reset(OpARM64RORconst)
- v.AddArg(x)
v.AuxInt = 64 - c&63
+ v.AddArg(x)
return true
}
}
@@ -2881,18 +3436,18 @@
// result: (OR (SLLconst <t> x [c&7]) (SRLconst <t> (ZeroExt8to64 x) [8-c&7]))
for {
t := v.Type
- x := v.Args[0]
c := v.AuxInt
+ x := v.Args[0]
v.reset(OpARM64OR)
v0 := b.NewValue0(v.Line, OpARM64SLLconst, t)
- v0.AddArg(x)
v0.AuxInt = c & 7
+ v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpARM64SRLconst, t)
+ v1.AuxInt = 8 - c&7
v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
v2.AddArg(x)
v1.AddArg(v2)
- v1.AuxInt = 8 - c&7
v.AddArg(v1)
return true
}
@@ -2972,8 +3527,8 @@
break
}
v.reset(OpARM64SLLconst)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
// match: (Lsh16x64 _ (MOVDconst [c]))
@@ -3118,8 +3673,8 @@
break
}
v.reset(OpARM64SLLconst)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
// match: (Lsh32x64 _ (MOVDconst [c]))
@@ -3264,8 +3819,8 @@
break
}
v.reset(OpARM64SLLconst)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
// match: (Lsh64x64 _ (MOVDconst [c]))
@@ -3410,8 +3965,8 @@
break
}
v.reset(OpARM64SLLconst)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
// match: (Lsh8x64 _ (MOVDconst [c]))
@@ -3481,561 +4036,6 @@
return true
}
}
-func rewriteValueARM64_OpARM64MOVBUload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond:
- // result: (MOVBUload [off1+off2] {sym} ptr mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- mem := v.Args[1]
- v.reset(OpARM64MOVBUload)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64MOVDaddr {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpARM64MOVBUload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueARM64_OpARM64MOVBload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond:
- // result: (MOVBload [off1+off2] {sym} ptr mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- mem := v.Args[1]
- v.reset(OpARM64MOVBload)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64MOVDaddr {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpARM64MOVBload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueARM64_OpARM64MOVBstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem)
- // cond:
- // result: (MOVBstore [off1+off2] {sym} ptr val mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- v.reset(OpARM64MOVBstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- // match: (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64MOVDaddr {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- if !(canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpARM64MOVBstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueARM64_OpARM64MOVDload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond:
- // result: (MOVDload [off1+off2] {sym} ptr mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- mem := v.Args[1]
- v.reset(OpARM64MOVDload)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64MOVDaddr {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpARM64MOVDload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueARM64_OpARM64MOVDstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
- // cond:
- // result: (MOVDstore [off1+off2] {sym} ptr val mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- v.reset(OpARM64MOVDstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- // match: (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64MOVDaddr {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- if !(canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpARM64MOVDstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueARM64_OpARM64MOVHUload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond:
- // result: (MOVHUload [off1+off2] {sym} ptr mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- mem := v.Args[1]
- v.reset(OpARM64MOVHUload)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64MOVDaddr {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpARM64MOVHUload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueARM64_OpARM64MOVHload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond:
- // result: (MOVHload [off1+off2] {sym} ptr mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- mem := v.Args[1]
- v.reset(OpARM64MOVHload)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64MOVDaddr {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpARM64MOVHload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueARM64_OpARM64MOVHstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem)
- // cond:
- // result: (MOVHstore [off1+off2] {sym} ptr val mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- v.reset(OpARM64MOVHstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- // match: (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64MOVDaddr {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- if !(canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpARM64MOVHstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueARM64_OpARM64MOVWUload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond:
- // result: (MOVWUload [off1+off2] {sym} ptr mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- mem := v.Args[1]
- v.reset(OpARM64MOVWUload)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64MOVDaddr {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpARM64MOVWUload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueARM64_OpARM64MOVWload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond:
- // result: (MOVWload [off1+off2] {sym} ptr mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- mem := v.Args[1]
- v.reset(OpARM64MOVWload)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64MOVDaddr {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- mem := v.Args[1]
- if !(canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpARM64MOVWload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueARM64_OpARM64MOVWstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem)
- // cond:
- // result: (MOVWstore [off1+off2] {sym} ptr val mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- v.reset(OpARM64MOVWstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- // match: (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2)
- // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpARM64MOVDaddr {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- ptr := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- if !(canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpARM64MOVWstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- return false
-}
func rewriteValueARM64_OpMod16(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -4647,8 +4647,8 @@
v.AddArg(dst)
v.AddArg(src)
v0 := b.NewValue0(v.Line, OpARM64ADDconst, src.Type)
- v0.AddArg(src)
v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
+ v0.AddArg(src)
v.AddArg(v0)
v.AddArg(mem)
return true
@@ -5179,10 +5179,10 @@
break
}
v.reset(OpARM64SRLconst)
+ v.AuxInt = c
v0 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
v0.AddArg(x)
v.AddArg(v0)
- v.AuxInt = c
return true
}
// match: (Rsh16Ux64 _ (MOVDconst [c]))
@@ -5333,10 +5333,10 @@
break
}
v.reset(OpARM64SRAconst)
+ v.AuxInt = c
v0 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
- v.AuxInt = c
return true
}
// match: (Rsh16x64 x (MOVDconst [c]))
@@ -5353,10 +5353,10 @@
break
}
v.reset(OpARM64SRAconst)
+ v.AuxInt = 63
v0 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
- v.AuxInt = 63
return true
}
// match: (Rsh16x64 x y)
@@ -5491,10 +5491,10 @@
break
}
v.reset(OpARM64SRLconst)
+ v.AuxInt = c
v0 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
v0.AddArg(x)
v.AddArg(v0)
- v.AuxInt = c
return true
}
// match: (Rsh32Ux64 _ (MOVDconst [c]))
@@ -5645,10 +5645,10 @@
break
}
v.reset(OpARM64SRAconst)
+ v.AuxInt = c
v0 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
- v.AuxInt = c
return true
}
// match: (Rsh32x64 x (MOVDconst [c]))
@@ -5665,10 +5665,10 @@
break
}
v.reset(OpARM64SRAconst)
+ v.AuxInt = 63
v0 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
- v.AuxInt = 63
return true
}
// match: (Rsh32x64 x y)
@@ -5799,8 +5799,8 @@
break
}
v.reset(OpARM64SRLconst)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
// match: (Rsh64Ux64 _ (MOVDconst [c]))
@@ -5943,8 +5943,8 @@
break
}
v.reset(OpARM64SRAconst)
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
// match: (Rsh64x64 x (MOVDconst [c]))
@@ -5961,8 +5961,8 @@
break
}
v.reset(OpARM64SRAconst)
- v.AddArg(x)
v.AuxInt = 63
+ v.AddArg(x)
return true
}
// match: (Rsh64x64 x y)
@@ -6093,10 +6093,10 @@
break
}
v.reset(OpARM64SRLconst)
+ v.AuxInt = c
v0 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
v0.AddArg(x)
v.AddArg(v0)
- v.AuxInt = c
return true
}
// match: (Rsh8Ux64 _ (MOVDconst [c]))
@@ -6247,10 +6247,10 @@
break
}
v.reset(OpARM64SRAconst)
+ v.AuxInt = c
v0 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
- v.AuxInt = c
return true
}
// match: (Rsh8x64 x (MOVDconst [c]))
@@ -6267,10 +6267,10 @@
break
}
v.reset(OpARM64SRAconst)
+ v.AuxInt = 63
v0 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
- v.AuxInt = 63
return true
}
// match: (Rsh8x64 x y)
diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go
index 96b5759..d304542 100644
--- a/src/cmd/compile/internal/ssa/rewritePPC64.go
+++ b/src/cmd/compile/internal/ssa/rewritePPC64.go
@@ -8,8 +8,6 @@
var _ = math.MinInt8 // in case not otherwise used
func rewriteValuePPC64(v *Value, config *Config) bool {
switch v.Op {
- case OpPPC64ADD:
- return rewriteValuePPC64_OpPPC64ADD(v, config)
case OpAdd16:
return rewriteValuePPC64_OpAdd16(v, config)
case OpAdd32:
@@ -154,22 +152,6 @@
return rewriteValuePPC64_OpLess8U(v, config)
case OpLoad:
return rewriteValuePPC64_OpLoad(v, config)
- case OpPPC64MOVBstore:
- return rewriteValuePPC64_OpPPC64MOVBstore(v, config)
- case OpPPC64MOVBstorezero:
- return rewriteValuePPC64_OpPPC64MOVBstorezero(v, config)
- case OpPPC64MOVDstore:
- return rewriteValuePPC64_OpPPC64MOVDstore(v, config)
- case OpPPC64MOVDstorezero:
- return rewriteValuePPC64_OpPPC64MOVDstorezero(v, config)
- case OpPPC64MOVHstore:
- return rewriteValuePPC64_OpPPC64MOVHstore(v, config)
- case OpPPC64MOVHstorezero:
- return rewriteValuePPC64_OpPPC64MOVHstorezero(v, config)
- case OpPPC64MOVWstore:
- return rewriteValuePPC64_OpPPC64MOVWstore(v, config)
- case OpPPC64MOVWstorezero:
- return rewriteValuePPC64_OpPPC64MOVWstorezero(v, config)
case OpMove:
return rewriteValuePPC64_OpMove(v, config)
case OpMul16:
@@ -216,6 +198,24 @@
return rewriteValuePPC64_OpOr64(v, config)
case OpOr8:
return rewriteValuePPC64_OpOr8(v, config)
+ case OpPPC64ADD:
+ return rewriteValuePPC64_OpPPC64ADD(v, config)
+ case OpPPC64MOVBstore:
+ return rewriteValuePPC64_OpPPC64MOVBstore(v, config)
+ case OpPPC64MOVBstorezero:
+ return rewriteValuePPC64_OpPPC64MOVBstorezero(v, config)
+ case OpPPC64MOVDstore:
+ return rewriteValuePPC64_OpPPC64MOVDstore(v, config)
+ case OpPPC64MOVDstorezero:
+ return rewriteValuePPC64_OpPPC64MOVDstorezero(v, config)
+ case OpPPC64MOVHstore:
+ return rewriteValuePPC64_OpPPC64MOVHstore(v, config)
+ case OpPPC64MOVHstorezero:
+ return rewriteValuePPC64_OpPPC64MOVHstorezero(v, config)
+ case OpPPC64MOVWstore:
+ return rewriteValuePPC64_OpPPC64MOVWstore(v, config)
+ case OpPPC64MOVWstorezero:
+ return rewriteValuePPC64_OpPPC64MOVWstorezero(v, config)
case OpSignExt16to32:
return rewriteValuePPC64_OpSignExt16to32(v, config)
case OpSignExt16to64:
@@ -283,41 +283,6 @@
}
return false
}
-func rewriteValuePPC64_OpPPC64ADD(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (ADD (MOVDconst [c]) x)
- // cond:
- // result: (ADDconst [c] x)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpPPC64MOVDconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- v.reset(OpPPC64ADDconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (ADD x (MOVDconst [c]))
- // cond:
- // result: (ADDconst [c] x)
- for {
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpPPC64MOVDconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpPPC64ADDconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- return false
-}
func rewriteValuePPC64_OpAdd16(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -1691,330 +1656,6 @@
}
return false
}
-func rewriteValuePPC64_OpPPC64MOVBstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MOVBstore [off1] {sym} (ADDconst [off2] x) val mem)
- // cond: is16Bit(off1+off2)
- // result: (MOVBstore [off1+off2] {sym} x val mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpPPC64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- x := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- if !(is16Bit(off1 + off2)) {
- break
- }
- v.reset(OpPPC64MOVBstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(x)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- // match: (MOVBstore [off] {sym} ptr (MOVDconst [c]) mem)
- // cond: c == 0
- // result: (MOVBstorezero [off] {sym} ptr mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- ptr := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpPPC64MOVDconst {
- break
- }
- c := v_1.AuxInt
- mem := v.Args[2]
- if !(c == 0) {
- break
- }
- v.reset(OpPPC64MOVBstorezero)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValuePPC64_OpPPC64MOVBstorezero(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MOVBstorezero [off1] {sym} (ADDconst [off2] x) mem)
- // cond: is16Bit(off1+off2)
- // result: (MOVBstorezero [off1+off2] {sym} x mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpPPC64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- x := v_0.Args[0]
- mem := v.Args[1]
- if !(is16Bit(off1 + off2)) {
- break
- }
- v.reset(OpPPC64MOVBstorezero)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(x)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValuePPC64_OpPPC64MOVDstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MOVDstore [off1] {sym} (ADDconst [off2] x) val mem)
- // cond: is16Bit(off1+off2)
- // result: (MOVDstore [off1+off2] {sym} x val mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpPPC64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- x := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- if !(is16Bit(off1 + off2)) {
- break
- }
- v.reset(OpPPC64MOVDstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(x)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- // match: (MOVDstore [off] {sym} ptr (MOVDconst [c]) mem)
- // cond: c == 0
- // result: (MOVDstorezero [off] {sym} ptr mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- ptr := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpPPC64MOVDconst {
- break
- }
- c := v_1.AuxInt
- mem := v.Args[2]
- if !(c == 0) {
- break
- }
- v.reset(OpPPC64MOVDstorezero)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValuePPC64_OpPPC64MOVDstorezero(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MOVDstorezero [off1] {sym} (ADDconst [off2] x) mem)
- // cond: is16Bit(off1+off2)
- // result: (MOVDstorezero [off1+off2] {sym} x mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpPPC64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- x := v_0.Args[0]
- mem := v.Args[1]
- if !(is16Bit(off1 + off2)) {
- break
- }
- v.reset(OpPPC64MOVDstorezero)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(x)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValuePPC64_OpPPC64MOVHstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MOVHstore [off1] {sym} (ADDconst [off2] x) val mem)
- // cond: is16Bit(off1+off2)
- // result: (MOVHstore [off1+off2] {sym} x val mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpPPC64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- x := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- if !(is16Bit(off1 + off2)) {
- break
- }
- v.reset(OpPPC64MOVHstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(x)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- // match: (MOVHstore [off] {sym} ptr (MOVDconst [c]) mem)
- // cond: c == 0
- // result: (MOVHstorezero [off] {sym} ptr mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- ptr := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpPPC64MOVDconst {
- break
- }
- c := v_1.AuxInt
- mem := v.Args[2]
- if !(c == 0) {
- break
- }
- v.reset(OpPPC64MOVHstorezero)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValuePPC64_OpPPC64MOVHstorezero(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MOVHstorezero [off1] {sym} (ADDconst [off2] x) mem)
- // cond: is16Bit(off1+off2)
- // result: (MOVHstorezero [off1+off2] {sym} x mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpPPC64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- x := v_0.Args[0]
- mem := v.Args[1]
- if !(is16Bit(off1 + off2)) {
- break
- }
- v.reset(OpPPC64MOVHstorezero)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(x)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValuePPC64_OpPPC64MOVWstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MOVWstore [off1] {sym} (ADDconst [off2] x) val mem)
- // cond: is16Bit(off1+off2)
- // result: (MOVWstore [off1+off2] {sym} x val mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpPPC64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- x := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- if !(is16Bit(off1 + off2)) {
- break
- }
- v.reset(OpPPC64MOVWstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(x)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- // match: (MOVWstore [off] {sym} ptr (MOVDconst [c]) mem)
- // cond: c == 0
- // result: (MOVWstorezero [off] {sym} ptr mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- ptr := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpPPC64MOVDconst {
- break
- }
- c := v_1.AuxInt
- mem := v.Args[2]
- if !(c == 0) {
- break
- }
- v.reset(OpPPC64MOVWstorezero)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValuePPC64_OpPPC64MOVWstorezero(v *Value, config *Config) bool {
- b := v.Block
- _ = b
- // match: (MOVWstorezero [off1] {sym} (ADDconst [off2] x) mem)
- // cond: is16Bit(off1+off2)
- // result: (MOVWstorezero [off1+off2] {sym} x mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpPPC64ADDconst {
- break
- }
- off2 := v_0.AuxInt
- x := v_0.Args[0]
- mem := v.Args[1]
- if !(is16Bit(off1 + off2)) {
- break
- }
- v.reset(OpPPC64MOVWstorezero)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(x)
- v.AddArg(mem)
- return true
- }
- return false
-}
func rewriteValuePPC64_OpMove(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -2347,8 +1988,8 @@
v.AddArg(dst)
v.AddArg(src)
v0 := b.NewValue0(v.Line, OpPPC64ADDconst, src.Type)
- v0.AddArg(src)
v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
+ v0.AddArg(src)
v.AddArg(v0)
v.AddArg(mem)
return true
@@ -2725,6 +2366,365 @@
return true
}
}
+func rewriteValuePPC64_OpPPC64ADD(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ADD (MOVDconst [c]) x)
+ // cond:
+ // result: (ADDconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpPPC64ADDconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADD x (MOVDconst [c]))
+ // cond:
+ // result: (ADDconst [c] x)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpPPC64ADDconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVBstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBstore [off1] {sym} (ADDconst [off2] x) val mem)
+ // cond: is16Bit(off1+off2)
+ // result: (MOVBstore [off1+off2] {sym} x val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ x := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is16Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(x)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVDconst [c]) mem)
+ // cond: c == 0
+ // result: (MOVBstorezero [off] {sym} ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ mem := v.Args[2]
+ if !(c == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVBstorezero)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVBstorezero(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBstorezero [off1] {sym} (ADDconst [off2] x) mem)
+ // cond: is16Bit(off1+off2)
+ // result: (MOVBstorezero [off1+off2] {sym} x mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ x := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is16Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVBstorezero)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVDstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVDstore [off1] {sym} (ADDconst [off2] x) val mem)
+ // cond: is16Bit(off1+off2)
+ // result: (MOVDstore [off1+off2] {sym} x val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ x := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is16Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVDstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(x)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVDstore [off] {sym} ptr (MOVDconst [c]) mem)
+ // cond: c == 0
+ // result: (MOVDstorezero [off] {sym} ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ mem := v.Args[2]
+ if !(c == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVDstorezero)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVDstorezero(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVDstorezero [off1] {sym} (ADDconst [off2] x) mem)
+ // cond: is16Bit(off1+off2)
+ // result: (MOVDstorezero [off1+off2] {sym} x mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ x := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is16Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVDstorezero)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVHstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVHstore [off1] {sym} (ADDconst [off2] x) val mem)
+ // cond: is16Bit(off1+off2)
+ // result: (MOVHstore [off1+off2] {sym} x val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ x := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is16Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVHstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(x)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVDconst [c]) mem)
+ // cond: c == 0
+ // result: (MOVHstorezero [off] {sym} ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ mem := v.Args[2]
+ if !(c == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVHstorezero)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVHstorezero(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVHstorezero [off1] {sym} (ADDconst [off2] x) mem)
+ // cond: is16Bit(off1+off2)
+ // result: (MOVHstorezero [off1+off2] {sym} x mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ x := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is16Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVHstorezero)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVWstore(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWstore [off1] {sym} (ADDconst [off2] x) val mem)
+ // cond: is16Bit(off1+off2)
+ // result: (MOVWstore [off1+off2] {sym} x val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ x := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is16Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVWstore)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(x)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVDconst [c]) mem)
+ // cond: c == 0
+ // result: (MOVWstorezero [off] {sym} ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ mem := v.Args[2]
+ if !(c == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVWstorezero)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVWstorezero(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVWstorezero [off1] {sym} (ADDconst [off2] x) mem)
+ // cond: is16Bit(off1+off2)
+ // result: (MOVWstorezero [off1+off2] {sym} x mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ x := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is16Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVWstorezero)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
func rewriteValuePPC64_OpSignExt16to32(v *Value, config *Config) bool {
b := v.Block
_ = b
@@ -3456,8 +3456,8 @@
v.AuxInt = SizeAndAlign(s).Align()
v.AddArg(ptr)
v0 := b.NewValue0(v.Line, OpPPC64ADDconst, ptr.Type)
- v0.AddArg(ptr)
v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
+ v0.AddArg(ptr)
v.AddArg(v0)
v.AddArg(mem)
return true
diff --git a/src/cmd/compile/internal/ssa/rewritedec64.go b/src/cmd/compile/internal/ssa/rewritedec64.go
index d2fbfb9..33d90f5 100644
--- a/src/cmd/compile/internal/ssa/rewritedec64.go
+++ b/src/cmd/compile/internal/ssa/rewritedec64.go
@@ -198,19 +198,19 @@
// cond: is64BitInt(v.Type) && v.Type.IsSigned()
// result: (Int64Make (Arg <config.fe.TypeInt32()> {n} [off+4]) (Arg <config.fe.TypeUInt32()> {n} [off]))
for {
- n := v.Aux
off := v.AuxInt
+ n := v.Aux
if !(is64BitInt(v.Type) && v.Type.IsSigned()) {
break
}
v.reset(OpInt64Make)
v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeInt32())
- v0.Aux = n
v0.AuxInt = off + 4
+ v0.Aux = n
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeUInt32())
- v1.Aux = n
v1.AuxInt = off
+ v1.Aux = n
v.AddArg(v1)
return true
}
@@ -218,19 +218,19 @@
// cond: is64BitInt(v.Type) && !v.Type.IsSigned()
// result: (Int64Make (Arg <config.fe.TypeUInt32()> {n} [off+4]) (Arg <config.fe.TypeUInt32()> {n} [off]))
for {
- n := v.Aux
off := v.AuxInt
+ n := v.Aux
if !(is64BitInt(v.Type) && !v.Type.IsSigned()) {
break
}
v.reset(OpInt64Make)
v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeUInt32())
- v0.Aux = n
v0.AuxInt = off + 4
+ v0.Aux = n
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeUInt32())
- v1.Aux = n
v1.AuxInt = off
+ v1.Aux = n
v.AddArg(v1)
return true
}
@@ -738,13 +738,13 @@
// cond: c <= 32
// result: (Int64Make (Or32 <config.fe.TypeUInt32()> (Lsh32x32 <config.fe.TypeUInt32()> hi (Const32 <config.fe.TypeUInt32()> [c])) (Rsh32Ux32 <config.fe.TypeUInt32()> lo (Const32 <config.fe.TypeUInt32()> [32-c]))) (Or32 <config.fe.TypeUInt32()> (Lsh32x32 <config.fe.TypeUInt32()> lo (Const32 <config.fe.TypeUInt32()> [c])) (Rsh32Ux32 <config.fe.TypeUInt32()> hi (Const32 <config.fe.TypeUInt32()> [32-c]))))
for {
+ c := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpInt64Make {
break
}
hi := v_0.Args[0]
lo := v_0.Args[1]
- c := v.AuxInt
if !(c <= 32) {
break
}
@@ -783,22 +783,22 @@
// cond: c > 32
// result: (Lrot64 (Int64Make lo hi) [c-32])
for {
+ c := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpInt64Make {
break
}
hi := v_0.Args[0]
lo := v_0.Args[1]
- c := v.AuxInt
if !(c > 32) {
break
}
v.reset(OpLrot64)
+ v.AuxInt = c - 32
v0 := b.NewValue0(v.Line, OpInt64Make, config.fe.TypeUInt64())
v0.AddArg(lo)
v0.AddArg(hi)
v.AddArg(v0)
- v.AuxInt = c - 32
return true
}
return false
diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go
index 00bb24a..f4f2b50 100644
--- a/src/cmd/compile/internal/ssa/rewritegeneric.go
+++ b/src/cmd/compile/internal/ssa/rewritegeneric.go
@@ -733,8 +733,8 @@
c := v_1.AuxInt
v.reset(OpOffPtr)
v.Type = t
- v.AddArg(x)
v.AuxInt = c
+ v.AddArg(x)
return true
}
return false
@@ -1370,19 +1370,19 @@
// cond: v.Type.IsString()
// result: (StringMake (Arg <config.fe.TypeBytePtr()> {n} [off]) (Arg <config.fe.TypeInt()> {n} [off+config.PtrSize]))
for {
- n := v.Aux
off := v.AuxInt
+ n := v.Aux
if !(v.Type.IsString()) {
break
}
v.reset(OpStringMake)
v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeBytePtr())
- v0.Aux = n
v0.AuxInt = off
+ v0.Aux = n
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeInt())
- v1.Aux = n
v1.AuxInt = off + config.PtrSize
+ v1.Aux = n
v.AddArg(v1)
return true
}
@@ -1390,23 +1390,23 @@
// cond: v.Type.IsSlice()
// result: (SliceMake (Arg <v.Type.ElemType().PtrTo()> {n} [off]) (Arg <config.fe.TypeInt()> {n} [off+config.PtrSize]) (Arg <config.fe.TypeInt()> {n} [off+2*config.PtrSize]))
for {
- n := v.Aux
off := v.AuxInt
+ n := v.Aux
if !(v.Type.IsSlice()) {
break
}
v.reset(OpSliceMake)
v0 := b.NewValue0(v.Line, OpArg, v.Type.ElemType().PtrTo())
- v0.Aux = n
v0.AuxInt = off
+ v0.Aux = n
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeInt())
- v1.Aux = n
v1.AuxInt = off + config.PtrSize
+ v1.Aux = n
v.AddArg(v1)
v2 := b.NewValue0(v.Line, OpArg, config.fe.TypeInt())
- v2.Aux = n
v2.AuxInt = off + 2*config.PtrSize
+ v2.Aux = n
v.AddArg(v2)
return true
}
@@ -1414,19 +1414,19 @@
// cond: v.Type.IsInterface()
// result: (IMake (Arg <config.fe.TypeBytePtr()> {n} [off]) (Arg <config.fe.TypeBytePtr()> {n} [off+config.PtrSize]))
for {
- n := v.Aux
off := v.AuxInt
+ n := v.Aux
if !(v.Type.IsInterface()) {
break
}
v.reset(OpIMake)
v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeBytePtr())
- v0.Aux = n
v0.AuxInt = off
+ v0.Aux = n
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeBytePtr())
- v1.Aux = n
v1.AuxInt = off + config.PtrSize
+ v1.Aux = n
v.AddArg(v1)
return true
}
@@ -1434,19 +1434,19 @@
// cond: v.Type.IsComplex() && v.Type.Size() == 16
// result: (ComplexMake (Arg <config.fe.TypeFloat64()> {n} [off]) (Arg <config.fe.TypeFloat64()> {n} [off+8]))
for {
- n := v.Aux
off := v.AuxInt
+ n := v.Aux
if !(v.Type.IsComplex() && v.Type.Size() == 16) {
break
}
v.reset(OpComplexMake)
v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeFloat64())
- v0.Aux = n
v0.AuxInt = off
+ v0.Aux = n
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeFloat64())
- v1.Aux = n
v1.AuxInt = off + 8
+ v1.Aux = n
v.AddArg(v1)
return true
}
@@ -1454,19 +1454,19 @@
// cond: v.Type.IsComplex() && v.Type.Size() == 8
// result: (ComplexMake (Arg <config.fe.TypeFloat32()> {n} [off]) (Arg <config.fe.TypeFloat32()> {n} [off+4]))
for {
- n := v.Aux
off := v.AuxInt
+ n := v.Aux
if !(v.Type.IsComplex() && v.Type.Size() == 8) {
break
}
v.reset(OpComplexMake)
v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeFloat32())
- v0.Aux = n
v0.AuxInt = off
+ v0.Aux = n
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeFloat32())
- v1.Aux = n
v1.AuxInt = off + 4
+ v1.Aux = n
v.AddArg(v1)
return true
}
@@ -1486,15 +1486,15 @@
// result: (StructMake1 (Arg <t.FieldType(0)> {n} [off+t.FieldOff(0)]))
for {
t := v.Type
- n := v.Aux
off := v.AuxInt
+ n := v.Aux
if !(t.IsStruct() && t.NumFields() == 1 && config.fe.CanSSA(t)) {
break
}
v.reset(OpStructMake1)
v0 := b.NewValue0(v.Line, OpArg, t.FieldType(0))
- v0.Aux = n
v0.AuxInt = off + t.FieldOff(0)
+ v0.Aux = n
v.AddArg(v0)
return true
}
@@ -1503,19 +1503,19 @@
// result: (StructMake2 (Arg <t.FieldType(0)> {n} [off+t.FieldOff(0)]) (Arg <t.FieldType(1)> {n} [off+t.FieldOff(1)]))
for {
t := v.Type
- n := v.Aux
off := v.AuxInt
+ n := v.Aux
if !(t.IsStruct() && t.NumFields() == 2 && config.fe.CanSSA(t)) {
break
}
v.reset(OpStructMake2)
v0 := b.NewValue0(v.Line, OpArg, t.FieldType(0))
- v0.Aux = n
v0.AuxInt = off + t.FieldOff(0)
+ v0.Aux = n
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpArg, t.FieldType(1))
- v1.Aux = n
v1.AuxInt = off + t.FieldOff(1)
+ v1.Aux = n
v.AddArg(v1)
return true
}
@@ -1524,23 +1524,23 @@
// result: (StructMake3 (Arg <t.FieldType(0)> {n} [off+t.FieldOff(0)]) (Arg <t.FieldType(1)> {n} [off+t.FieldOff(1)]) (Arg <t.FieldType(2)> {n} [off+t.FieldOff(2)]))
for {
t := v.Type
- n := v.Aux
off := v.AuxInt
+ n := v.Aux
if !(t.IsStruct() && t.NumFields() == 3 && config.fe.CanSSA(t)) {
break
}
v.reset(OpStructMake3)
v0 := b.NewValue0(v.Line, OpArg, t.FieldType(0))
- v0.Aux = n
v0.AuxInt = off + t.FieldOff(0)
+ v0.Aux = n
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpArg, t.FieldType(1))
- v1.Aux = n
v1.AuxInt = off + t.FieldOff(1)
+ v1.Aux = n
v.AddArg(v1)
v2 := b.NewValue0(v.Line, OpArg, t.FieldType(2))
- v2.Aux = n
v2.AuxInt = off + t.FieldOff(2)
+ v2.Aux = n
v.AddArg(v2)
return true
}
@@ -1549,27 +1549,27 @@
// result: (StructMake4 (Arg <t.FieldType(0)> {n} [off+t.FieldOff(0)]) (Arg <t.FieldType(1)> {n} [off+t.FieldOff(1)]) (Arg <t.FieldType(2)> {n} [off+t.FieldOff(2)]) (Arg <t.FieldType(3)> {n} [off+t.FieldOff(3)]))
for {
t := v.Type
- n := v.Aux
off := v.AuxInt
+ n := v.Aux
if !(t.IsStruct() && t.NumFields() == 4 && config.fe.CanSSA(t)) {
break
}
v.reset(OpStructMake4)
v0 := b.NewValue0(v.Line, OpArg, t.FieldType(0))
- v0.Aux = n
v0.AuxInt = off + t.FieldOff(0)
+ v0.Aux = n
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpArg, t.FieldType(1))
- v1.Aux = n
v1.AuxInt = off + t.FieldOff(1)
+ v1.Aux = n
v.AddArg(v1)
v2 := b.NewValue0(v.Line, OpArg, t.FieldType(2))
- v2.Aux = n
v2.AuxInt = off + t.FieldOff(2)
+ v2.Aux = n
v.AddArg(v2)
v3 := b.NewValue0(v.Line, OpArg, t.FieldType(3))
- v3.Aux = n
v3.AuxInt = off + t.FieldOff(3)
+ v3.Aux = n
v.AddArg(v3)
return true
}
@@ -6359,26 +6359,26 @@
// cond:
// result: (OffPtr p [a+b])
for {
+ a := v.AuxInt
v_0 := v.Args[0]
if v_0.Op != OpOffPtr {
break
}
- p := v_0.Args[0]
b := v_0.AuxInt
- a := v.AuxInt
+ p := v_0.Args[0]
v.reset(OpOffPtr)
- v.AddArg(p)
v.AuxInt = a + b
+ v.AddArg(p)
return true
}
// match: (OffPtr p [0])
// cond: v.Type.Compare(p.Type) == CMPeq
// result: p
for {
- p := v.Args[0]
if v.AuxInt != 0 {
break
}
+ p := v.Args[0]
if !(v.Type.Compare(p.Type) == CMPeq) {
break
}