blob: f74a42eac11a6a38d406229a96ff8d7319b3399a [file] [log] [blame] [edit]
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package reflectdata
import (
"fmt"
"go/constant"
"strconv"
"strings"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/objw"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/obj"
)
// genhash returns a symbol which is the closure used to compute
// the hash of a value of type t.
func genhash(t *types.Type) *obj.LSym {
return genhashSig(eqSignature(t))
}
func genhashSig(sig string) *obj.LSym {
if len(sig) > 0 && sig[0] == sigAlign {
_, sig = parseNum(sig[1:])
}
switch sig {
case "":
return sysClosure("memhash0")
case string(sigMemory) + "1":
return sysClosure("memhash8")
case string(sigMemory) + "2":
return sysClosure("memhash16")
case string(sigMemory) + "4":
return sysClosure("memhash32")
case string(sigMemory) + "8":
return sysClosure("memhash64")
case string(sigMemory) + "16":
return sysClosure("memhash128")
case string(sigString):
return sysClosure("strhash")
case string(sigIface):
return sysClosure("interhash")
case string(sigEface):
return sysClosure("nilinterhash")
case string(sigFloat32):
return sysClosure("f32hash")
case string(sigFloat64):
return sysClosure("f64hash")
case string(sigFloat32) + string(sigFloat32):
return sysClosure("c64hash")
case string(sigFloat64) + string(sigFloat64):
return sysClosure("c128hash")
}
closure := TypeLinksymLookup(".hashfunc." + sig)
if len(closure.P) > 0 { // already generated
return closure
}
if sig[0] == sigMemory {
n, rest := parseNum(sig[1:])
if rest == "" {
// Just M%d. We can make a memhash_varlen closure.
// The size of the memory region to hash is encoded in the closure.
if memhashvarlen == nil {
memhashvarlen = typecheck.LookupRuntimeFunc("memhash_varlen")
}
ot := 0
ot = objw.SymPtr(closure, ot, memhashvarlen, 0)
ot = objw.Uintptr(closure, ot, uint64(n)) // size encoded in closue
objw.Global(closure, int32(ot), obj.DUPOK|obj.RODATA)
return closure
}
}
if base.Flag.LowerR != 0 {
fmt.Printf("genhash %s\n", sig)
}
fn := hashFunc(sig)
// Build closure. It doesn't close over any variables, so
// it contains just the function pointer.
objw.SymPtr(closure, 0, fn.Linksym(), 0)
objw.Global(closure, int32(types.PtrSize), obj.DUPOK|obj.RODATA)
return closure
}
func hashFunc(sig string) *ir.Func {
sym := types.TypeSymLookup(".hash." + sig)
if sym.Def != nil {
return sym.Def.(*ir.Name).Func
}
pos := base.AutogeneratedPos // less confusing than end of input
base.Pos = pos
// func sym(p unsafe.Pointer, h uintptr) uintptr
fn := ir.NewFunc(pos, pos, sym, types.NewSignature(nil,
[]*types.Field{
types.NewField(pos, typecheck.Lookup("p"), types.Types[types.TUNSAFEPTR]),
types.NewField(pos, typecheck.Lookup("h"), types.Types[types.TUINTPTR]),
},
[]*types.Field{
types.NewField(pos, nil, types.Types[types.TUINTPTR]),
},
))
sym.Def = fn.Nname
fn.Pragma |= ir.Noinline // TODO(mdempsky): We need to emit this during the unified frontend instead, to allow inlining.
typecheck.DeclFunc(fn)
np := fn.Dcl[0]
nh := fn.Dcl[1]
// Skip alignment, hash functions can handle unaligned data.
if len(sig) > 0 && sig[0] == sigAlign {
_, sig = parseNum(sig[1:])
}
// offset from np that we're currently working on
var off int64
// Return np+off (as an unsafe.Pointer).
ptr := func() ir.Node {
c := ir.NewBasicLit(pos, types.Types[types.TUINTPTR], constant.MakeInt64(off))
return ir.NewBinaryExpr(pos, ir.OUNSAFEADD, np, c)
}
// hash data using function name at np+off.
// Increment off by size.
hash := func(name string, size int64) {
p := ptr()
hashFn := typecheck.LookupRuntime(name)
call := ir.NewCallExpr(pos, ir.OCALL, hashFn, []ir.Node{p, nh})
fn.Body.Append(ir.NewAssignStmt(pos, nh, call))
off += size
}
for len(sig) > 0 {
kind := sig[0]
sig = sig[1:]
switch kind {
case sigMemory:
var n int64
n, sig = parseNum(sig)
switch {
case n == 4:
p := ptr()
memhash := typecheck.LookupRuntime("memhash32")
call := ir.NewCallExpr(pos, ir.OCALL, memhash, []ir.Node{p, nh})
fn.Body.Append(ir.NewAssignStmt(pos, nh, call))
case n == 8:
p := ptr()
memhash := typecheck.LookupRuntime("memhash64")
call := ir.NewCallExpr(pos, ir.OCALL, memhash, []ir.Node{p, nh})
fn.Body.Append(ir.NewAssignStmt(pos, nh, call))
default:
p := ptr()
memhash := typecheck.LookupRuntime("memhash")
size := ir.NewBasicLit(pos, types.Types[types.TUINTPTR], constant.MakeInt64(n))
call := ir.NewCallExpr(pos, ir.OCALL, memhash, []ir.Node{p, nh, size})
fn.Body.Append(ir.NewAssignStmt(pos, nh, call))
}
off += n
case sigFloat32:
hash("f32hash", 4)
case sigFloat64:
hash("f64hash", 8)
case sigString:
hash("strhash", 2*int64(types.PtrSize))
case sigEface:
hash("nilinterhash", 2*int64(types.PtrSize))
case sigIface:
hash("interhash", 2*int64(types.PtrSize))
case sigSkip:
var n int64
n, sig = parseNum(sig)
off += n
case sigArrayStart:
var n int64
var elemSig string
n, elemSig, sig = parseArray(sig)
elemSize := sigSize(elemSig)
// Loop N times, calling hash function for the element.
// for i := off; i < off + N*elemSize; i += elemSize {
// h = elemfn(p+i, h)
// }
elemFn := hashFunc(elemSig).Nname
idx := typecheck.TempAt(pos, ir.CurFunc, types.Types[types.TUINTPTR])
init := ir.NewAssignStmt(pos, idx, ir.NewInt(pos, off))
cond := ir.NewBinaryExpr(pos, ir.OLT, idx, ir.NewInt(pos, off+n*elemSize))
post := ir.NewAssignStmt(pos, idx, ir.NewBinaryExpr(pos, ir.OADD, idx, ir.NewInt(pos, elemSize)))
p := ir.NewBinaryExpr(pos, ir.OUNSAFEADD, np, idx)
call := typecheck.Call(pos, elemFn, []ir.Node{p, nh}, false)
as := ir.NewAssignStmt(pos, nh, call)
loop := ir.NewForStmt(pos, init, cond, post, []ir.Node{as}, false)
fn.Body.Append(loop)
off += n * elemSize
}
}
fn.Body.Append(ir.NewReturnStmt(pos, []ir.Node{nh}))
if base.Flag.LowerR != 0 {
ir.DumpList("genhash body", fn.Body)
}
typecheck.FinishFuncBody()
fn.SetDupok(true)
ir.WithFunc(fn, func() {
typecheck.Stmts(fn.Body)
})
fn.SetNilCheckDisabled(true)
return fn
}
// sysClosure returns a closure which will call the
// given runtime function (with no closed-over variables).
func sysClosure(name string) *obj.LSym {
s := typecheck.LookupRuntimeVar(name + "·f")
if len(s.P) == 0 {
f := typecheck.LookupRuntimeFunc(name)
objw.SymPtr(s, 0, f, 0)
objw.Global(s, int32(types.PtrSize), obj.DUPOK|obj.RODATA)
}
return s
}
// geneq returns a symbol which is the closure used to compute
// equality for two objects of type t.
func geneq(t *types.Type) *obj.LSym {
switch types.AlgType(t) {
case types.ANOEQ, types.ANOALG:
// The runtime will panic if it tries to compare
// a type with a nil equality function.
return nil
}
return geneqSig(eqSignature(t))
}
// geneqSig returns a symbol which is the closure used to compute
// equality for two objects with equality signature sig.
func geneqSig(sig string) *obj.LSym {
align := int64(types.PtrSize)
if len(sig) > 0 && sig[0] == sigAlign {
align, sig = parseNum(sig[1:])
}
if base.Ctxt.Arch.CanMergeLoads {
align = 8
}
switch sig {
case "":
return sysClosure("memequal0")
case string(sigMemory) + "1":
return sysClosure("memequal8")
case string(sigMemory) + "2":
if align >= 2 {
return sysClosure("memequal16")
}
case string(sigMemory) + "4":
if align >= 4 {
return sysClosure("memequal32")
}
case string(sigMemory) + "8":
if align >= 8 {
return sysClosure("memequal64")
}
case string(sigMemory) + "16":
if align >= 8 {
return sysClosure("memequal128")
}
case string(sigString):
return sysClosure("strequal")
case string(sigIface):
return sysClosure("interequal")
case string(sigEface):
return sysClosure("nilinterequal")
case string(sigFloat32):
return sysClosure("f32equal")
case string(sigFloat64):
return sysClosure("f64equal")
case string(sigFloat32) + string(sigFloat32):
return sysClosure("c64equal")
case string(sigFloat64) + string(sigFloat64):
return sysClosure("c128equal")
}
closure := TypeLinksymLookup(".eqfunc." + sig)
if len(closure.P) > 0 { // already generated
return closure
}
if sig[0] == sigMemory {
n, rest := parseNum(sig[1:])
if rest == "" {
// Just M%d. We can make a memequal_varlen closure.
// The size of the memory region to compare is encoded in the closure.
if memequalvarlen == nil {
memequalvarlen = typecheck.LookupRuntimeFunc("memequal_varlen")
}
ot := 0
ot = objw.SymPtr(closure, ot, memequalvarlen, 0)
ot = objw.Uintptr(closure, ot, uint64(n))
objw.Global(closure, int32(ot), obj.DUPOK|obj.RODATA)
return closure
}
}
if base.Flag.LowerR != 0 {
fmt.Printf("geneqSig %s\n", sig)
}
fn := eqFunc(sig)
// Generate a closure which points at the function we just generated.
objw.SymPtr(closure, 0, fn.Linksym(), 0)
objw.Global(closure, int32(types.PtrSize), obj.DUPOK|obj.RODATA)
return closure
}
func eqFunc(sig string) *ir.Func {
sym := types.TypeSymLookup(".eq." + sig)
if sym.Def != nil {
return sym.Def.(*ir.Name).Func
}
pos := base.AutogeneratedPos // less confusing than end of input
base.Pos = pos
// func sym(p, q unsafe.Pointer) bool
fn := ir.NewFunc(pos, pos, sym, types.NewSignature(nil,
[]*types.Field{
types.NewField(pos, typecheck.Lookup("p"), types.Types[types.TUNSAFEPTR]),
types.NewField(pos, typecheck.Lookup("q"), types.Types[types.TUNSAFEPTR]),
},
[]*types.Field{
types.NewField(pos, typecheck.Lookup("r"), types.Types[types.TBOOL]),
},
))
sym.Def = fn.Nname
fn.Pragma |= ir.Noinline // TODO(mdempsky): We need to emit this during the unified frontend instead, to allow inlining.
typecheck.DeclFunc(fn)
np := fn.Dcl[0]
nq := fn.Dcl[1]
nr := fn.Dcl[2]
// Label to jump to if an equality test fails.
neq := typecheck.AutoLabel(".neq")
// Grab known alignment of argument pointers. (ptrSize is the default.)
align := int64(types.PtrSize)
if len(sig) > 0 && sig[0] == sigAlign {
sig = sig[1:]
align, sig = parseNum(sig)
}
unalignedOk := base.Ctxt.Arch.CanMergeLoads
// offset from np/nq that we're currently working on
var off int64
var hasCall bool
// test takes a boolean. If it evaluates to false, short circuit
// and return false immediately. Otherwise, keep checking.
var lastTest ir.Node
test := func(eq ir.Node) {
// Buffer one test in lastTest so we can use the
// last one as the return value.
if lastTest != nil {
nif := ir.NewIfStmt(pos, lastTest, nil, []ir.Node{ir.NewBranchStmt(pos, ir.OGOTO, neq)})
fn.Body.Append(nif)
}
lastTest = eq
}
// load loads data of type t from np+off and nq+off.
// Increments off by the size of t.
load := func(t *types.Type) (ir.Node, ir.Node) {
c := ir.NewBasicLit(pos, types.Types[types.TUINTPTR], constant.MakeInt64(off))
p := ir.NewBinaryExpr(pos, ir.OUNSAFEADD, np, c)
q := ir.NewBinaryExpr(pos, ir.OUNSAFEADD, nq, c)
x := ir.NewStarExpr(pos, ir.NewConvExpr(pos, ir.OCONVNOP, t.PtrTo(), p))
y := ir.NewStarExpr(pos, ir.NewConvExpr(pos, ir.OCONVNOP, t.PtrTo(), q))
off += t.Size()
return x, y
}
// compare compares x and y and jumps to neq if they are not equal.
compare := func(x, y ir.Node) {
test(ir.NewBinaryExpr(pos, ir.OEQ, x, y))
}
// We keep track of string contents that we don't compare immediately.
// We delay comparing string contents because they might be large and
// we'd rather compare scalars farther along in the signature first.
var pendingStrings []int64
flushStrings := func() {
defer func(saveOff int64) {
off = saveOff
}(off)
for _, x := range pendingStrings {
off = x
ptrA, ptrB := load(types.Types[types.TUNSAFEPTR])
len, _ := load(types.Types[types.TUINTPTR])
// Note: we already checked that the lengths are equal.
memeq := typecheck.LookupRuntime("memequal")
test(typecheck.Call(pos, memeq, []ir.Node{ptrA, ptrB, len}, false))
hasCall = true
}
pendingStrings = pendingStrings[:0]
}
for len(sig) > 0 {
kind := sig[0]
sig = sig[1:]
switch kind {
case sigMemory:
var n int64
n, sig = parseNum(sig)
if n > 64 { // TODO: why 64?
// For big regions, call memequal.
c := ir.NewBasicLit(pos, types.Types[types.TUINTPTR], constant.MakeInt64(off))
p := ir.NewBinaryExpr(pos, ir.OUNSAFEADD, np, c)
q := ir.NewBinaryExpr(pos, ir.OUNSAFEADD, nq, c)
len := ir.NewBasicLit(pos, types.Types[types.TUINTPTR], constant.MakeInt64(n))
memeq := typecheck.LookupRuntime("memequal")
test(typecheck.Call(pos, memeq, []ir.Node{p, q, len}, false))
hasCall = true
off += n
n = 0
}
n0 := n
for n != 0 {
switch {
case n >= 8 && (unalignedOk || align >= 8 && off%8 == 0):
compare(load(types.Types[types.TUINT64]))
n -= 8
case (n == 5 || n == 6 || n == 7) && unalignedOk && n0 >= 8:
off -= 8 - n
compare(load(types.Types[types.TUINT64]))
n = 0
case n >= 4 && (unalignedOk || align >= 4 && off%4 == 0):
compare(load(types.Types[types.TUINT32]))
n -= 4
case n == 3 && unalignedOk && n0 >= 4:
off--
compare(load(types.Types[types.TUINT32]))
n = 0
case n >= 2 && (unalignedOk || align >= 2 && off%2 == 0):
compare(load(types.Types[types.TUINT16]))
n -= 2
default:
compare(load(types.Types[types.TUINT8]))
n--
}
}
case sigFloat32:
compare(load(types.Types[types.TFLOAT32]))
case sigFloat64:
compare(load(types.Types[types.TFLOAT64]))
case sigString:
// Compare just the lengths right now.
// Save the contents for later.
pendingStrings = append(pendingStrings, off)
off += int64(types.PtrSize)
compare(load(types.Types[types.TUINTPTR]))
case sigEface, sigIface:
// flushStrings here to ensure that we only get a panic from
// this interface test if all previous equality checks pass.
flushStrings()
typeX, typeY := load(types.Types[types.TUINTPTR].PtrTo())
compare(typeX, typeY)
dataX, dataY := load(types.Types[types.TUNSAFEPTR])
var eqFn *ir.Name
if kind == sigEface {
eqFn = typecheck.LookupRuntime("efaceeq")
} else {
eqFn = typecheck.LookupRuntime("ifaceeq")
}
test(typecheck.Call(pos, eqFn, []ir.Node{typeX, dataX, dataY}, false))
hasCall = true
case sigSkip:
var n int64
n, sig = parseNum(sig)
off += n
case sigArrayStart:
// Flush any pending test.
flushStrings()
// TODO: if the element comparison can't panic (no E or I), then
// maybe we don't need to do this flushStrings?
// On the other hand, maybe the unflushed string is not equal, but
// a big following array is all equal.
if lastTest != nil {
nif := ir.NewIfStmt(pos, lastTest, nil, []ir.Node{ir.NewBranchStmt(pos, ir.OGOTO, neq)})
fn.Body.Append(nif)
lastTest = nil
}
var n int64
var elemSig string
n, elemSig, sig = parseArray(sig)
elemSize := sigSize(elemSig)
// Loop N times, calling comparison function for the element.
// for i := off; i < off + N*elemSize; i += elemSize {
// if !eqfn(p+i, q+i) { goto neq }
// }
elemFn := eqFunc(sigTrimSkip(elemSig)).Nname
idx := typecheck.TempAt(pos, ir.CurFunc, types.Types[types.TUINTPTR])
init := ir.NewAssignStmt(pos, idx, ir.NewInt(pos, off))
cond := ir.NewBinaryExpr(pos, ir.OLT, idx, ir.NewInt(pos, off+n*elemSize))
post := ir.NewAssignStmt(pos, idx, ir.NewBinaryExpr(pos, ir.OADD, idx, ir.NewInt(pos, elemSize)))
p := ir.NewBinaryExpr(pos, ir.OUNSAFEADD, np, idx)
q := ir.NewBinaryExpr(pos, ir.OUNSAFEADD, nq, idx)
call := typecheck.Call(pos, elemFn, []ir.Node{p, q}, false)
nif := ir.NewIfStmt(pos, call, nil, []ir.Node{ir.NewBranchStmt(pos, ir.OGOTO, neq)})
loop := ir.NewForStmt(pos, init, cond, post, []ir.Node{nif}, false)
fn.Body.Append(loop)
off += n * elemSize
// TODO: if the element comparison can't panic, but has strings
// in it, maybe we do a loop first without string contents and a
// second loop with string contents. There is no way to accomplish
// this now they way this code works (to call the equality
// function of the sub-signature).
}
}
// Flush any pending tests.
// The last test is used directly as a result (instead of branching using it).
flushStrings()
if lastTest == nil {
lastTest = ir.NewBool(pos, true)
}
as := ir.NewAssignStmt(pos, nr, lastTest)
fn.Body.Append(as)
// ret:
// return
ret := typecheck.AutoLabel(".ret")
fn.Body.Append(ir.NewLabelStmt(pos, ret))
fn.Body.Append(ir.NewReturnStmt(pos, nil))
// neq:
// r = false
// return (or goto ret)
fn.Body.Append(ir.NewLabelStmt(pos, neq))
fn.Body.Append(ir.NewAssignStmt(pos, nr, ir.NewBool(pos, false)))
if hasCall {
// Epilogue is large, so share it with the equal case.
fn.Body.Append(ir.NewBranchStmt(pos, ir.OGOTO, ret))
} else {
// Epilogue is small, so don't bother sharing.
fn.Body.Append(ir.NewReturnStmt(pos, nil))
}
// TODO(khr): the epilogue size detection condition above isn't perfect.
// We should really do a generic CL that shares epilogues across
// the board. See #24936.
if base.Flag.LowerR != 0 {
ir.DumpList("geneq body", fn.Body)
}
typecheck.FinishFuncBody()
fn.SetDupok(true)
ir.WithFunc(fn, func() {
typecheck.Stmts(fn.Body)
})
// Disable checknils while compiling this code.
// We are comparing a struct or an array,
// neither of which can be nil, and our comparisons
// are shallow.
fn.SetNilCheckDisabled(true)
return fn
}
// EqFor returns ONAME node represents type t's equal function, and a boolean
// to indicates whether a length needs to be passed when calling the function.
func EqFor(t *types.Type) (ir.Node, bool) {
switch types.AlgType(t) {
case types.AMEM:
return typecheck.LookupRuntime("memequal"), true
case types.ASPECIAL:
fn := eqFunc(eqSignature(t))
return fn.Nname, false
}
base.Fatalf("EqFor %v", t)
return nil, false
}
// eqSignature returns a signature of the equality function for type t.
// If two types have the same signature, they can use the same equality function.
// The signature lists the comparisons that the equality function needs
// to make, in order. So for instance, a type like:
//
// type S struct {
// i int32
// j uint32
// s string
// e error
// }
//
// Will have the signature "M8SI".
//
// M8 = 8 bytes of regular memory
// S = string
// I = nonempty interface
//
// The content of the signature is not intended for users. It is an
// internal condensation of the comparison operations that need to be
// performed.
// (Although, note that these names might be seen in tracebacks where
// the equality test panics due to incomparable interfaces.)
//
// Full signature spec:
//
// M%d = %d bytes of memory that should be compared directly
// K%d = %d bytes of memory that should not be compared (sKip)
// F = float32
// G = float64
// S = string
// I = non-empty interface
// E = empty interface
// [%d%s] = array: repeat signature %s %d times.
// A%d = known alignment of type pointers (defaults to ptrSize)
//
// An alignment directive is only needed on platforms that can't do
// unaligned loads.
// If an alignment directive is present, it must be first.
// Signatures can end early; a K%d is not required at the end.
func eqSignature(t *types.Type) string {
var e eqSigBuilder
if !base.Ctxt.Arch.CanMergeLoads { // alignment only matters if we can't use unaligned loads
if a := t.Alignment(); a != int64(types.PtrSize) {
e.r.WriteString(fmt.Sprintf("%c%d", sigAlign, a))
}
}
e.build(t)
e.flush(true)
return e.r.String()
}
const (
sigMemory = 'M' // followed by an integer number of bytes
sigSkip = 'K' // followed by an integer number of bytes
sigFloat32 = 'F'
sigFloat64 = 'G'
sigString = 'S'
sigIface = 'I' // non-empty interface
sigEface = 'E' // empty interface
sigArrayStart = '[' // followed by an iteration count, element signature, and sigArrayEnd
sigArrayEnd = ']'
sigAlign = 'A' // followed by an integer byte alignment
)
type eqSigBuilder struct {
r strings.Builder
regMem int64 // queued up region of regular memory
skipMem int64 // queued up region of memory to skip
}
func (e *eqSigBuilder) flush(atEnd bool) {
if e.regMem > 0 {
e.r.WriteString(fmt.Sprintf("%c%d", sigMemory, e.regMem))
e.regMem = 0
}
if e.skipMem > 0 {
if !atEnd {
e.r.WriteString(fmt.Sprintf("%c%d", sigSkip, e.skipMem))
}
e.skipMem = 0
}
}
func (e *eqSigBuilder) regular(n int64) {
if e.regMem == 0 {
e.flush(false)
}
e.regMem += n
}
func (e *eqSigBuilder) skip(n int64) {
if e.skipMem == 0 {
e.flush(false)
}
e.skipMem += n
}
func (e *eqSigBuilder) float32() {
e.flush(false)
e.r.WriteByte(sigFloat32)
}
func (e *eqSigBuilder) float64() {
e.flush(false)
e.r.WriteByte(sigFloat64)
}
func (e *eqSigBuilder) string() {
e.flush(false)
e.r.WriteByte(sigString)
}
func (e *eqSigBuilder) eface() {
e.flush(false)
e.r.WriteByte(sigEface)
}
func (e *eqSigBuilder) iface() {
e.flush(false)
e.r.WriteByte(sigIface)
}
func (e *eqSigBuilder) build(t *types.Type) {
switch t.Kind() {
case types.TINT8, types.TUINT8, types.TBOOL:
e.regular(1)
case types.TINT16, types.TUINT16:
e.regular(2)
case types.TINT32, types.TUINT32:
e.regular(4)
case types.TINT64, types.TUINT64:
e.regular(8)
case types.TINT, types.TUINT, types.TUINTPTR, types.TPTR, types.TUNSAFEPTR, types.TCHAN:
e.regular(int64(types.PtrSize))
case types.TFLOAT32:
e.float32()
case types.TFLOAT64:
e.float64()
case types.TCOMPLEX64:
e.float32()
e.float32()
case types.TCOMPLEX128:
e.float64()
e.float64()
case types.TSTRING:
e.string()
case types.TINTER:
if t.IsEmptyInterface() {
e.eface()
} else {
e.iface()
}
case types.TSTRUCT:
var off int64
for _, f := range t.Fields() {
if f.Sym.IsBlank() {
continue
}
if off < f.Offset {
e.skip(f.Offset - off)
}
e.build(f.Type)
off = f.Offset + f.Type.Size()
}
if off < t.Size() {
e.skip(t.Size() - off)
}
case types.TARRAY:
if types.AlgType(t) == types.AMEM {
// TODO: some "regular equality" types don't hit here,
// like [8]sync/atomic.Pointer. Figure out how to
// handle the subtle difference between "AMEM" and
// "can be compared byte-by-byte for equality".
e.regular(t.Size())
break
}
et := t.Elem()
n := t.NumElem()
switch n {
case 0:
case 1:
e.build(et)
default:
// To keep signatures small, we can't just repeat
// the element signature N times. Instead, we issue
// an array into the signature. Note that this can
// lead to a situation where two types which could
// share an equality function do not, like
// struct { a, b, c, d string } sig: SSSS
// [4]string sig: [4S]
// That's ok, just a tad inefficient.
//
// The generated loops are kind of inefficient as well,
// so unroll the loop a bit.
const unrollSize = 32 // make loop body compare around this many bytes
if et.Size() == 0 {
break // zero-size elements need no comparison
}
unroll := max(1, unrollSize/et.Size())
// Do partial loops directly.
for n%unroll != 0 {
e.build(et)
n--
}
if n == 0 {
break
}
// If we only have one loop left, do it directly.
if n == unroll {
for range n {
e.build(et)
}
break
}
e.flush(false)
e.r.WriteString(fmt.Sprintf("%c%d", sigArrayStart, n/unroll))
for range unroll {
e.build(et)
}
e.flush(false)
e.r.WriteByte(sigArrayEnd)
}
default:
base.Fatalf("eqSigBuilder %v", t)
}
}
// Parse and remove the number at the start of s.
func parseNum(s string) (int64, string) {
n := 0
for n < len(s) && s[n] >= '0' && s[n] <= '9' {
n++
}
x, err := strconv.ParseInt(s[:n], 10, 64)
if err != nil {
base.Fatalf("bad integer: %s", s[:n])
}
return x, s[n:]
}
// parseArray parses "%d%s]" from the front of a signature.
// Returns the repeat count (the %d), the element signature
// (the %s), and any remaining signature after the closing ']'.
func parseArray(sig string) (int64, string, string) {
var n int64
n, sig = parseNum(sig)
// Find matching closing brace.
i := 0
depth := 1
for {
if i == len(sig) {
base.Fatalf("mismatched brackets in %s", sig)
}
switch sig[i] {
case sigArrayStart:
depth++
case sigArrayEnd:
depth--
if depth == 0 {
return n, sig[:i], sig[i+1:]
}
}
i++
}
}
// sigSize returns the size of the type described by the signature.
func sigSize(sig string) int64 {
var size int64
for len(sig) > 0 {
kind := sig[0]
sig = sig[1:]
switch kind {
case sigMemory, sigSkip:
var n int64
n, sig = parseNum(sig)
size += n
case sigFloat32:
size += 4
case sigFloat64:
size += 8
case sigString, sigIface, sigEface:
size += 2 * int64(types.PtrSize)
case sigArrayStart:
var n int64
var elemSig string
n, elemSig, sig = parseArray(sig)
size += n * sigSize(elemSig)
}
}
return size
}
func sigTrimSkip(s string) string {
i := strings.LastIndexByte(s, sigSkip)
if i < 0 {
return s
}
for j := i + 1; j < len(s); j++ {
if s[j] < '0' || s[j] > '9' {
return s
}
}
return s[:i]
}