[dev.ssa] cmd/compile/ssa: add comparison ops
Increase SSA coverage of functions in the
standard library from 20.79% to 27.81%.
The most significant unimplemented items are now:
10.16% 2597 SSA unimplemented: zero for type error not implemented
8.44% 2157 SSA unimplemented: addr: bad op DOTPTR
7.98% 2039 SSA unimplemented: unhandled OLITERAL 7
6.29% 1607 SSA unimplemented: unhandled expr OROR
4.73% 1209 SSA unimplemented: unhandled expr LEN
4.55% 1163 SSA unimplemented: unhandled expr LROT
3.42% 874 SSA unimplemented: unhandled OLITERAL 6
2.46% 629 SSA unimplemented: unhandled expr DOT
2.41% 615 SSA unimplemented: zero for type []byte not implemented
2.02% 516 SSA unimplemented: unhandled stmt CALLMETH
1.90% 486 SSA unimplemented: unhandled expr ANDAND
1.79% 458 SSA unimplemented: unhandled expr CALLINTER
1.69% 433 SSA unimplemented: unhandled stmt SWITCH
1.67% 428 SSA unimplemented: unhandled expr CALLMETH
1.67% 426 SSA unimplemented: unhandled expr CLOSUREVAR
Change-Id: I40959b22993c4f70784b4eca472cae752347879c
Reviewed-on: https://go-review.googlesource.com/11452
Reviewed-by: Keith Randall <khr@golang.org>
diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules
index 124b13b..d3d14c3 100644
--- a/src/cmd/compile/internal/ssa/gen/AMD64.rules
+++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules
@@ -48,6 +48,11 @@
y))
(Less x y) && is64BitInt(v.Args[0].Type) && isSigned(v.Args[0].Type) -> (SETL (CMPQ <TypeFlags> x y))
+(Leq x y) && is64BitInt(v.Args[0].Type) && isSigned(v.Args[0].Type) -> (SETLE (CMPQ <TypeFlags> x y))
+(Greater x y) && is64BitInt(v.Args[0].Type) && isSigned(v.Args[0].Type) -> (SETG (CMPQ <TypeFlags> x y))
+(Geq x y) && is64BitInt(v.Args[0].Type) && isSigned(v.Args[0].Type) -> (SETGE (CMPQ <TypeFlags> x y))
+(Eq x y) && is64BitInt(v.Args[0].Type) && isSigned(v.Args[0].Type) -> (SETEQ (CMPQ <TypeFlags> x y))
+(Neq x y) && is64BitInt(v.Args[0].Type) && isSigned(v.Args[0].Type) -> (SETNE (CMPQ <TypeFlags> x y))
(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) -> (MOVQload ptr mem)
(Load <t> ptr mem) && is32BitInt(t) -> (MOVLload ptr mem)
diff --git a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go
index c0f36b5..6d0b4ec 100644
--- a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go
+++ b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go
@@ -122,6 +122,7 @@
{name: "SETEQ", reg: flagsgp}, // extract == condition from arg0
{name: "SETNE", reg: flagsgp}, // extract != condition from arg0
{name: "SETL", reg: flagsgp}, // extract signed < condition from arg0
+ {name: "SETLE", reg: flagsgp}, // extract signed <= condition from arg0
{name: "SETG", reg: flagsgp}, // extract signed > condition from arg0
{name: "SETGE", reg: flagsgp}, // extract signed >= condition from arg0
{name: "SETB", reg: flagsgp}, // extract unsigned < condition from arg0
diff --git a/src/cmd/compile/internal/ssa/gen/genericOps.go b/src/cmd/compile/internal/ssa/gen/genericOps.go
index e7c4de8..151e8e1 100644
--- a/src/cmd/compile/internal/ssa/gen/genericOps.go
+++ b/src/cmd/compile/internal/ssa/gen/genericOps.go
@@ -15,7 +15,12 @@
{name: "Rsh"}, // arg0 >> arg1 (signed/unsigned depending on signedness of type)
// 2-input comparisons
- {name: "Less"}, // arg0 < arg1
+ {name: "Eq"}, // arg0 == arg1
+ {name: "Neq"}, // arg0 != arg1
+ {name: "Less"}, // arg0 < arg1
+ {name: "Leq"}, // arg0 <= arg1
+ {name: "Greater"}, // arg0 > arg1
+ {name: "Geq"}, // arg0 <= arg1
// Data movement
{name: "Phi"}, // select an argument based on which predecessor block we came from