gocore: overhaul roots, expand testing
This change overhauls how roots work to prepare for supporting composite
values (roots "de-structured" and broken up into pieces, despite
representing a single type). It also expands testing a little bit to
prevent backsliding from this point.
Roots now do not have an address identity at all, and callers do not
rely on that fact anymore. They have a forged identity which is useful
for some calculations, but otherwise can only be identified by a *Root.
Pointers may now be found in composite roots, which is nice, so all
destructured values work correctly for the sake of iterating over all
pointers.
Finally, this fixes a small, old bug in typeObject where the type
pointer would be interrogated before we filtered out whether a interface
was nil or not. Because we filter dead pointers by making them appear
nil, this means we could end up looking at a bogus, clobbered type
pointer. (This may be a bug in the compiler's DWARF generation, the type
shouldn't be clobbered.)
Change-Id: I09217b2070dc6f4bf4b1dbd28d52f570ba61ca4a
Reviewed-on: https://go-review.googlesource.com/c/debug/+/635857
Auto-Submit: Michael Knyszek <mknyszek@google.com>
Reviewed-by: Nicolas Hillegeer <aktau@google.com>
Reviewed-by: Michael Pratt <mpratt@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
diff --git a/cmd/viewcore/html.go b/cmd/viewcore/html.go
index 1b16c14..5180514 100644
--- a/cmd/viewcore/html.go
+++ b/cmd/viewcore/html.go
@@ -170,7 +170,9 @@
fmt.Fprintf(w, "<table>\n")
fmt.Fprintf(w, "<tr><th align=left>field</th><th align=left colspan=\"2\">type</th><th align=left>value</th></tr>\n")
for _, r := range f.Roots() {
- htmlObject(w, c, r.Name, r.Addr, r.Type, f.Live)
+ if r.HasAddress() {
+ htmlObject(w, c, r.Name, r.Addr(), r.Type, f.Live)
+ }
}
fmt.Fprintf(w, "</table>\n")
}
@@ -181,7 +183,9 @@
fmt.Fprintf(w, "<table>\n")
fmt.Fprintf(w, "<tr><th align=left>field</th><th align=left colspan=\"2\">type</th><th align=left>value</th></tr>\n")
for _, r := range c.Globals() {
- htmlObject(w, c, r.Name, r.Addr, r.Type, nil)
+ if r.HasAddress() {
+ htmlObject(w, c, r.Name, r.Addr(), r.Type, nil)
+ }
}
fmt.Fprintf(w, "</table>\n")
})
diff --git a/internal/gocore/dominator.go b/internal/gocore/dominator.go
index ca2709f..ecfc7a2 100644
--- a/internal/gocore/dominator.go
+++ b/internal/gocore/dominator.go
@@ -228,7 +228,7 @@
d.p.ForEachReversePtr(obj, func(x Object, r *Root, _, _ int64) bool {
var v int
if r != nil {
- v = d.p.findRootIndex(r) + 1
+ v = r.id + 1
} else {
v, _ = d.p.findObjectIndex(d.p.Addr(x))
v += d.nRoots + 1
@@ -397,7 +397,7 @@
if len(typeName) > 30 {
typeName = typeName[:30]
}
- label = fmt.Sprintf("root %s %#x (type %s)", root.Name, root.Addr, typeName)
+ label = fmt.Sprintf("root %s (type %s)", root.Name, typeName)
default:
typ, _ := d.p.Type(obj)
var typeName string
diff --git a/internal/gocore/dominator_test.go b/internal/gocore/dominator_test.go
index 2f9c85d..a057499 100644
--- a/internal/gocore/dominator_test.go
+++ b/internal/gocore/dominator_test.go
@@ -123,7 +123,7 @@
case n == pseudoRoot:
return &pRoot
case r != nil:
- return &roots[d.p.findRootIndex(r)]
+ return &roots[r.id]
default:
idx, _ := d.p.findObjectIndex(d.p.Addr(o))
return &objects[idx]
@@ -168,7 +168,7 @@
func (v *sanityVertex) String(p *Process) string {
switch {
case v.root != nil:
- return fmt.Sprintf("root %s %#x (type %s)", v.root.Name, v.root.Addr, v.root.Type)
+ return fmt.Sprintf("root %s (type %s)", v.root.Name, v.root.Type)
case v.obj != 0:
typ, _ := p.Type(v.obj)
var typeName string
diff --git a/internal/gocore/dwarf.go b/internal/gocore/dwarf.go
index e3fc321..aba997a 100644
--- a/internal/gocore/dwarf.go
+++ b/internal/gocore/dwarf.go
@@ -299,7 +299,7 @@
return consts, nil
}
-func readGlobals(p *core.Process, dwarfTypeMap map[dwarf.Type]*Type) ([]*Root, error) {
+func readGlobals(p *core.Process, nRoots *int, dwarfTypeMap map[dwarf.Type]*Type) ([]*Root, error) {
d, err := p.DWARF()
if err != nil {
return nil, fmt.Errorf("failed to read DWARF: %v", err)
@@ -354,12 +354,8 @@
if nf == nil {
continue
}
- roots = append(roots, &Root{
- Name: nf.Val.(string),
- Addr: a,
- Type: dwarfTypeMap[dt],
- Frame: nil,
- })
+ typ := dwarfTypeMap[dt]
+ roots = append(roots, makeMemRoot(nRoots, nf.Val.(string), typ, nil, a))
}
return roots, nil
}
diff --git a/internal/gocore/gocore_test.go b/internal/gocore/gocore_test.go
index a8bd3e4..02fdece 100644
--- a/internal/gocore/gocore_test.go
+++ b/internal/gocore/gocore_test.go
@@ -253,15 +253,31 @@
{"-buildmode=pie"},
} {
t.Run(strings.Join(buildFlags, ","), func(t *testing.T) {
- p := loadExampleGenerated(t, buildFlags...)
- n := 0
const largeObjectThreshold = 32768
- large := 0 // Number of objects larger than (or equal to largeObjectThreshold)
+
+ p := loadExampleGenerated(t, buildFlags...)
+
+ // Statistics to check.
+ n := 0
+ largeObjects := 0 // Number of objects larger than (or equal to largeObjectThreshold)
+ myPairObjects := 0
+ anyNodeObjects := 0
+ typeSafeNodeObjects := 0
+
p.ForEachObject(func(x Object) bool {
siz := p.Size(x)
- t.Logf("%s size=%d", typeName(p, x), p.Size(x))
+ typ := typeName(p, x)
+ t.Logf("%s size=%d", typ, p.Size(x))
if siz >= largeObjectThreshold {
- large++
+ largeObjects++
+ }
+ switch typ {
+ case "main.myPair":
+ myPairObjects++
+ case "main.anyNode":
+ anyNodeObjects++
+ case "main.typeSafeNode[main.myPair]":
+ typeSafeNodeObjects++
}
n++
return true
@@ -269,8 +285,23 @@
if n < 10 {
t.Errorf("#objects = %d, want >10", n)
}
- if large != 1 {
- t.Errorf("expected exactly one object larger than %d, found %d", largeObjectThreshold, large)
+ if largeObjects != 1 {
+ t.Errorf("expected exactly one object larger than %d, found %d", largeObjectThreshold, largeObjects)
+ }
+
+ // Check object counts.
+ const depth = 5
+ const tsTrees = 3
+ const anTrees = 2
+ const nodes = 1<<depth - 1
+ if want := tsTrees*nodes + anTrees*nodes*2; myPairObjects != want {
+ t.Errorf("expected exactly %d main.myPair objects, found %d", want, myPairObjects)
+ }
+ if want := anTrees * nodes; anyNodeObjects != want {
+ t.Errorf("expected exactly %d main.anyNode objects, found %d", want, anyNodeObjects)
+ }
+ if want := tsTrees * nodes; typeSafeNodeObjects != want {
+ t.Errorf("expected exactly %d main.typeSafeNode[main.myPair] objects, found %d", want, typeSafeNodeObjects)
}
})
}
diff --git a/internal/gocore/goroutine.go b/internal/gocore/goroutine.go
index b3eebb6..d6f65b5 100644
--- a/internal/gocore/goroutine.go
+++ b/internal/gocore/goroutine.go
@@ -13,9 +13,6 @@
stackSize int64 // current stack allocation
frames []*Frame
- // Registers containing live pointers.
- regRoots []*Root
-
// TODO: defers, in-progress panics
}
diff --git a/internal/gocore/object.go b/internal/gocore/object.go
index b0e2704..5638430 100644
--- a/internal/gocore/object.go
+++ b/internal/gocore/object.go
@@ -62,9 +62,6 @@
// Not a huge deal given that we'll just ignore outright bad pointers, but
// we may accidentally mark some objects as live erroneously.
for _, g := range p.goroutines {
- for _, r := range g.regRoots {
- add(core.Address(r.RegValue))
- }
for _, f := range g.frames {
for a := range f.Live {
add(p.proc.ReadPtr(a))
@@ -92,11 +89,10 @@
if !strings.HasPrefix(r.Name, "finalizer for ") {
continue
}
- for _, f := range r.Type.Fields {
- if f.Type.Kind == KindPtr {
- add(p.proc.ReadPtr(r.Addr.Add(f.Off)))
- }
- }
+ p.ForEachRootPtr(r, func(_ int64, o Object, _ int64) bool {
+ add(core.Address(o))
+ return true
+ })
}
// Expand root set to all reachable objects.
@@ -224,11 +220,6 @@
}
}
for _, g := range p.goroutines {
- for _, r := range g.regRoots {
- if !fn(r) {
- return
- }
- }
for _, f := range g.frames {
for _, r := range f.roots {
if !fn(r) {
@@ -287,65 +278,8 @@
// ForEachRootPtr behaves like ForEachPtr but it starts with a Root instead of an Object.
func (p *Process) ForEachRootPtr(r *Root, fn func(int64, Object, int64) bool) {
- edges1(p, r, 0, r.Type, fn)
-}
-
-// edges1 calls fn for the edges found in an object of type t living at offset off in the root r.
-// If fn returns false, return immediately with false.
-func edges1(p *Process, r *Root, off int64, t *Type, fn func(int64, Object, int64) bool) bool {
- switch t.Kind {
- case KindBool, KindInt, KindUint, KindFloat, KindComplex:
- // no edges here
- case KindIface, KindEface:
- // The first word is a type or itab.
- // Itabs are never in the heap.
- // Types might be, though.
- a := r.Addr.Add(off)
- if r.Frame == nil || r.Frame.Live[a] {
- dst, off2 := p.FindObject(p.proc.ReadPtr(a))
- if dst != 0 {
- if !fn(off, dst, off2) {
- return false
- }
- }
- }
- // Treat second word like a pointer.
- off += p.proc.PtrSize()
- fallthrough
- case KindPtr, KindString, KindSlice, KindFunc:
- if t.Kind == KindPtr && r.Addr == 0 {
- dst, off2 := p.FindObject(core.Address(r.RegValue))
- if dst != 0 {
- if !fn(off, dst, off2) {
- return false
- }
- }
- break
- }
- a := r.Addr.Add(off)
- if r.Frame == nil || r.Frame.Live[a] {
- dst, off2 := p.FindObject(p.proc.ReadPtr(a))
- if dst != 0 {
- if !fn(off, dst, off2) {
- return false
- }
- }
- }
- case KindArray:
- s := t.Elem.Size
- for i := int64(0); i < t.Count; i++ {
- if !edges1(p, r, off+i*s, t.Elem, fn) {
- return false
- }
- }
- case KindStruct:
- for _, f := range t.Fields {
- if !edges1(p, r, off+f.Off, f.Type, fn) {
- return false
- }
- }
- }
- return true
+ ptrBuf := make([]byte, 8)
+ walkRootTypePtrs(p, r, ptrBuf, 0, r.Type, fn)
}
const heapInfoSize = 512
diff --git a/internal/gocore/process.go b/internal/gocore/process.go
index 3a5b55c..5e86ace 100644
--- a/internal/gocore/process.go
+++ b/internal/gocore/process.go
@@ -68,11 +68,17 @@
// A "reverse edge" for object #i is a location in memory where a pointer
// to object #i lives.
initReverseEdges sync.Once
- redge []core.Address
+ redge []reverseEdge
ridx []int64
- // Sorted list of all roots.
- // Only initialized if FlagReverse is passed to Core.
+ // Sorted list of all roots, sorted by id.
rootIdx []*Root
+ nRoots int
+}
+
+type reverseEdge struct {
+ addr core.Address // 0 if root != nil and vice versa.
+ root *Root // Roots do not always have well-defined addresses.
+ rOff int64 // Offset of pointer in root.
}
// Core takes a loaded core file and extracts Go information from it.
@@ -96,7 +102,7 @@
if err != nil {
return nil, err
}
- p.globals, err = readGlobals(proc, p.dwarfTypeMap)
+ p.globals, err = readGlobals(proc, &p.nRoots, p.dwarfTypeMap)
if err != nil {
return nil, err
}
@@ -105,7 +111,10 @@
p.rtGlobals = make(map[string]region)
for _, g := range p.globals {
if strings.HasPrefix(g.Name, "runtime.") {
- p.rtGlobals[g.Name[8:]] = region{p: proc, a: g.Addr, typ: g.Type}
+ if g.pieces[0].kind != addrPiece || len(g.pieces) > 1 {
+ panic("global is unexpectedly stored in pieces")
+ }
+ p.rtGlobals[g.Name[8:]] = region{p: proc, a: core.Address(g.pieces[0].value), typ: g.Type}
}
}
@@ -398,13 +407,8 @@
off = int64(offField.Uint16())
}
obj := min.Add(off)
- p.globals = append(p.globals,
- &Root{
- Name: fmt.Sprintf("finalizer for %x", obj),
- Addr: sp.a,
- Type: p.rtTypeByName["runtime.specialfinalizer"],
- Frame: nil,
- })
+ typ := p.rtTypeByName["runtime.specialfinalizer"]
+ p.globals = append(p.globals, p.makeMemRoot(fmt.Sprintf("finalizer for %x", obj), typ, nil, sp.a))
// TODO: these aren't really "globals", as they
// are kept alive by the object they reference being alive.
// But we have no way of adding edges from an object to
@@ -642,7 +646,6 @@
regs := op.NewDwarfRegisters(p.proc.StaticBase(), dregs, binary.LittleEndian, regnum.AMD64_Rip, regnum.AMD64_Rsp, regnum.AMD64_Rbp, 0)
// Read all the frames.
- isCrashFrame := false
for {
f, err := readFrame(p, sp, pc)
if err != nil {
@@ -666,7 +669,6 @@
for a := range f.Live {
unnamed[a] = true
}
- conservativeLiveness := isCrashFrame && len(f.Live) == 0
// Emit roots for DWARF entries.
for _, v := range dwarfVars[f.f] {
@@ -680,45 +682,35 @@
if err != nil {
return nil, fmt.Errorf("failed to execute DWARF stack program for variable %s: %v", v.name, err)
}
- if addr == 0 {
- // TODO(mknyszek): Handle composites via pieces returned by the stack program.
- continue
- }
- // If the stack program indicates that there's just a single value
- // in a single register, ExecuteStackProgram will return that register's
- // contents. Otherwise, if it returns an address, it's referring to a
- // location on the stack, which is one indirection from what we actually
- // want.
- if addr != 0 && len(pieces) == 1 && v.typ.Kind == KindPtr {
- r := &Root{
- Name: v.name,
- RegName: regnum.AMD64ToName(pieces[0].Val),
- RegValue: core.Address(addr),
- Type: v.typ,
- Frame: f,
- }
- g.regRoots = append(g.regRoots, r)
- } else {
- r := &Root{
- Name: v.name,
- Addr: core.Address(addr),
- Type: v.typ,
- Frame: f,
- }
- f.roots = append(f.roots, r)
+ if len(pieces) != 0 {
+ // The variable is "de-structured" and broken up into multiple pieces.
+ var rps []rootPiece
+ off := int64(0)
+ for _, p := range pieces {
+ rp := rootPiece{size: int64(p.Size), off: off}
+ switch p.Kind {
+ case op.AddrPiece:
+ // Remove this variable from the set of unnamed pointers.
+ delete(unnamed, core.Address(p.Val))
- if conservativeLiveness {
- // This is a frame that is likely not at a safe point, so the liveness
- // map is almost certainly empty. If it's not, then great, we can use
- // that for precise information, but otherwise, let's be conservative
- // and populate liveness data from the DWARF.
- r.Type.forEachPointer(0, p.proc.PtrSize(), func(off int64) {
- f.Live[r.Addr.Add(off)] = true
- })
+ rp.kind = addrPiece
+ rp.value = p.Val
+ case op.RegPiece:
+ rp.kind = regPiece
+ rp.value = regs.Uint64Val(p.Val)
+ case op.ImmPiece:
+ rp.kind = immPiece
+ rp.value = p.Val
+ }
+ off += int64(p.Size)
}
+ f.roots = append(f.roots, p.makeCompositeRoot(v.name, v.typ, f, rps))
+ } else if addr != 0 && len(pieces) == 0 {
+ // Simple contiguous stack location.
+ f.roots = append(f.roots, p.makeMemRoot(v.name, v.typ, f, core.Address(addr)))
// Remove this variable from the set of unnamed pointers.
- for a := r.Addr; a < r.Addr.Add(r.Type.Size); a = a.Add(p.proc.PtrSize()) {
+ for a := core.Address(addr); a < core.Address(addr).Add(v.typ.Size); a = a.Add(p.proc.PtrSize()) {
delete(unnamed, a)
}
}
@@ -732,13 +724,8 @@
}
sort.Slice(s, func(i, j int) bool { return s[i] < s[j] })
for _, a := range s {
- r := &Root{
- Name: "unk",
- Addr: a,
- Type: p.rtTypeByName["unsafe.Pointer"],
- Frame: f,
- }
- f.roots = append(f.roots, r)
+ typ := p.rtTypeByName["unsafe.Pointer"]
+ f.roots = append(f.roots, p.makeMemRoot("unk", typ, f, a))
}
// Figure out how to unwind to the next frame.
@@ -853,13 +840,9 @@
// Update register state.
dregs := hardwareRegs2DWARF(hregs)
regs = op.NewDwarfRegisters(p.proc.StaticBase(), dregs, binary.LittleEndian, regnum.AMD64_Rip, regnum.AMD64_Rsp, regnum.AMD64_Rbp, 0)
-
- isCrashFrame = true
} else {
sp = f.max
pc = core.Address(p.proc.ReadUintptr(sp - 8)) // TODO:amd64 only
-
- isCrashFrame = false
}
if pc == 0 {
// TODO: when would this happen?
diff --git a/internal/gocore/reverse.go b/internal/gocore/reverse.go
index bc1e56a..ee9553a 100644
--- a/internal/gocore/reverse.go
+++ b/internal/gocore/reverse.go
@@ -4,12 +4,6 @@
package gocore
-import (
- "sort"
-
- "golang.org/x/debug/internal/core"
-)
-
func (p *Process) reverseEdges() {
p.initReverseEdges.Do(func() {
// First, count the number of edges into each object.
@@ -40,7 +34,7 @@
}
// Allocate all the storage for the reverse edges.
- p.redge = make([]core.Address, n)
+ p.redge = make([]reverseEdge, n)
// Add edges to the lists.
p.ForEachObject(func(x Object) bool {
@@ -49,7 +43,7 @@
e := cnt[idx]
e--
cnt[idx] = e
- p.redge[e] = p.Addr(x).Add(i)
+ p.redge[e] = reverseEdge{addr: p.Addr(x).Add(i)}
return true
})
return true
@@ -60,7 +54,7 @@
e := cnt[idx]
e--
cnt[idx] = e
- p.redge[e] = r.Addr.Add(i)
+ p.redge[e] = reverseEdge{root: r, rOff: i}
return true
})
return true
@@ -70,11 +64,11 @@
p.ridx = cnt
// Make root index.
+ p.rootIdx = make([]*Root, p.nRoots)
p.ForEachRoot(func(r *Root) bool {
- p.rootIdx = append(p.rootIdx, r)
+ p.rootIdx[r.id] = r
return true
})
- sort.Slice(p.rootIdx, func(i, j int) bool { return p.rootIdx[i].Addr < p.rootIdx[j].Addr })
})
}
@@ -91,33 +85,24 @@
idx, _ := p.findObjectIndex(p.Addr(y))
for _, a := range p.redge[p.ridx[idx]:p.ridx[idx+1]] {
- // Read pointer, compute offset in y.
- ptr := p.proc.ReadPtr(a)
- j := ptr.Sub(p.Addr(y))
-
- // Find source of pointer.
- x, i := p.FindObject(a)
- if x != 0 {
- // Source is an object.
- if !fn(x, nil, i, j) {
+ if a.root != nil {
+ ptr := p.readRootPtr(a.root, a.rOff)
+ if !fn(0, a.root, a.rOff, ptr.Sub(p.Addr(y))) {
return
}
- continue
- }
- // Source is a root.
- k := sort.Search(len(p.rootIdx), func(k int) bool {
- r := p.rootIdx[k]
- return a < r.Addr.Add(r.Type.Size)
- })
- r := p.rootIdx[k]
- if !fn(0, r, a.Sub(r.Addr), j) {
- return
+ } else {
+ // Read pointer, compute offset in y.
+ ptr := p.proc.ReadPtr(a.addr)
+ j := ptr.Sub(p.Addr(y))
+
+ // Find source of pointer.
+ x, i := p.FindObject(a.addr)
+ if x != 0 {
+ // Source is an object.
+ if !fn(x, nil, i, j) {
+ return
+ }
+ }
}
}
}
-
-func (p *Process) findRootIndex(r *Root) int {
- return sort.Search(len(p.rootIdx), func(k int) bool {
- return p.rootIdx[k].Addr >= r.Addr
- })
-}
diff --git a/internal/gocore/root.go b/internal/gocore/root.go
index f44dea0..8d4ac99 100644
--- a/internal/gocore/root.go
+++ b/internal/gocore/root.go
@@ -5,20 +5,199 @@
package gocore
import (
+ "encoding/binary"
+ "unsafe"
+
"golang.org/x/debug/internal/core"
)
// A Root is an area of memory that might have pointers into the Go heap.
type Root struct {
- Name string
- Addr core.Address
- RegName string
- RegValue core.Address // set if Addr == 0
- Type *Type // always non-nil
+ Name string
+ Type *Type // always non-nil
// Frame, if non-nil, points to the frame in which this root lives.
// Roots with non-nil Frame fields refer to local variables on a stack.
// A stack root might be a large type, with some of its fields live and
// others dead. Consult Frame.Live to find out which pointers in a stack
// root are live.
Frame *Frame
+
+ pieces []rootPiece
+ id int
+}
+
+// HasAddress returns true if the root is simple and contiguous, and can be
+// described with just a single address.
+func (r *Root) HasAddress() bool {
+ return len(r.pieces) == 1 && r.pieces[0].kind == addrPiece
+}
+
+// Addr returns the address of the root, if it has one.
+func (r *Root) Addr() core.Address {
+ if r.HasAddress() {
+ return core.Address(r.pieces[0].value)
+ }
+ return 0
+}
+
+func (p *Process) makeMemRoot(name string, typ *Type, fr *Frame, addr core.Address) *Root {
+ return makeMemRoot(&p.nRoots, name, typ, fr, addr)
+}
+
+func makeMemRoot(nRoots *int, name string, typ *Type, fr *Frame, addr core.Address) *Root {
+ r := &Root{
+ Name: name,
+ Type: typ,
+ Frame: fr,
+ pieces: []rootPiece{{size: typ.Size, kind: addrPiece, value: uint64(addr)}},
+ id: *nRoots,
+ }
+ *nRoots += 1
+ return r
+}
+
+func (p *Process) makeCompositeRoot(name string, typ *Type, fr *Frame, pieces []rootPiece) *Root {
+ r := &Root{
+ Name: name,
+ Type: typ,
+ Frame: fr,
+ pieces: pieces,
+ id: p.nRoots,
+ }
+ p.nRoots++
+ return r
+}
+
+func (pr *Process) readRootPtr(r *Root, offset int64) core.Address {
+ // TODO(mknyszek): Little-endian only.
+ ptrBuf := make([]byte, pr.proc.PtrSize())
+ pr.readRootAt(r, ptrBuf, offset)
+ if pr.proc.PtrSize() == 4 {
+ return core.Address(binary.LittleEndian.Uint32(ptrBuf))
+ }
+ return core.Address(binary.LittleEndian.Uint64(ptrBuf))
+}
+
+// ReadRootAt reads data out of this root. offset+len(b) must be less than r.Type.Size.
+// Returns the address read from, if the read was contiguous and from memory.
+func (pr *Process) readRootAt(r *Root, b []byte, offset int64) core.Address {
+ if offset+int64(len(b)) > r.Type.Size {
+ panic("invalid range to read from root")
+ }
+ if len(b) == 0 {
+ return 0
+ }
+ bOff := int64(0)
+ var addr core.Address
+ first := true
+ for _, p := range r.pieces {
+ if offset > p.off+p.size {
+ continue
+ }
+ if offset+int64(len(b)) <= p.off {
+ break
+ }
+ pOff := max(p.off, offset)
+ base := pOff - p.off
+ rlen := min(int64(len(b))-bOff, p.size-base)
+ switch p.kind {
+ case addrPiece:
+ pr.proc.ReadAt(b[bOff:bOff+rlen], core.Address(p.value).Add(base))
+ if first {
+ addr = core.Address(p.value).Add(base)
+ } else {
+ addr = 0
+ }
+ case regPiece, immPiece:
+ // TODO(mknyszek): Supports little-endian only.
+ v := ((*[8]byte)(unsafe.Pointer(&p.value)))[:p.size]
+ copy(b[bOff:bOff+rlen], v[base:base+rlen])
+ addr = 0
+ }
+ if first {
+ first = false
+ }
+ bOff += rlen
+ if bOff == int64(len(b)) {
+ break
+ }
+ }
+ return addr
+}
+
+// walkRootTypePtrs calls fn for the edges found in an object of type t living at offset off in the root r.
+// If fn returns false, return immediately with false.
+func walkRootTypePtrs(p *Process, r *Root, ptrBuf []byte, off int64, t *Type, fn func(int64, Object, int64) bool) bool {
+ switch t.Kind {
+ case KindBool, KindInt, KindUint, KindFloat, KindComplex:
+ // no edges here
+ case KindIface, KindEface:
+ // The first word is a type or itab.
+ // Itabs are never in the heap.
+ // Types might be, though.
+ // We have no idea about the liveness of registers, when a == 0.
+ a := p.readRootAt(r, ptrBuf[:p.proc.PtrSize()], off)
+ if a != 0 && (r.Frame == nil || r.Frame.Live[a]) {
+ var ptr core.Address
+ if p.proc.PtrSize() == 4 {
+ ptr = core.Address(binary.LittleEndian.Uint32(ptrBuf[:]))
+ } else {
+ ptr = core.Address(binary.LittleEndian.Uint64(ptrBuf[:]))
+ }
+ dst, off2 := p.FindObject(ptr)
+ if dst != 0 {
+ if !fn(off, dst, off2) {
+ return false
+ }
+ }
+ }
+ // Treat second word like a pointer.
+ off += p.proc.PtrSize()
+ fallthrough
+ case KindPtr, KindString, KindSlice, KindFunc:
+ a := p.readRootAt(r, ptrBuf[:p.proc.PtrSize()], off)
+ if a != 0 && (r.Frame == nil || r.Frame.Live[a]) {
+ var ptr core.Address
+ if p.proc.PtrSize() == 4 {
+ ptr = core.Address(binary.LittleEndian.Uint32(ptrBuf[:]))
+ } else {
+ ptr = core.Address(binary.LittleEndian.Uint64(ptrBuf[:]))
+ }
+ dst, off2 := p.FindObject(ptr)
+ if dst != 0 {
+ if !fn(off, dst, off2) {
+ return false
+ }
+ }
+ }
+ case KindArray:
+ s := t.Elem.Size
+ for i := int64(0); i < t.Count; i++ {
+ if !walkRootTypePtrs(p, r, ptrBuf, off+i*s, t.Elem, fn) {
+ return false
+ }
+ }
+ case KindStruct:
+ for _, f := range t.Fields {
+ if !walkRootTypePtrs(p, r, ptrBuf, off+f.Off, f.Type, fn) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+type rootPieceKind int
+
+const (
+ addrPiece rootPieceKind = iota
+ regPiece
+ immPiece
+)
+
+type rootPiece struct {
+ off int64 // Logical offset into the root, or specifically the root's Type.
+ size int64 // Size of the piece.
+ kind rootPieceKind // Where the piece is.
+ value uint64 // Address if kind == AddrPiece, value if kind == RegPiece or kind == ImmPiece.
}
diff --git a/internal/gocore/testdata/coretest/test.go b/internal/gocore/testdata/coretest/test.go
index d78fa55..bd35566 100644
--- a/internal/gocore/testdata/coretest/test.go
+++ b/internal/gocore/testdata/coretest/test.go
@@ -12,16 +12,165 @@
arr [32768 - 8]uint8
}
-func ping(o *Large) {
+func useLarge(o *Large, ready chan<- struct{}) {
o.ptr = &o.arr[5]
o.arr[5] = 0xCA
+ ready <- struct{}{}
+ <-block
}
+type AnyTree struct {
+ root any
+}
+
+type anyNode struct {
+ left, right any // *anyNode, to test direct eface type walking
+ entry1 any // myPair, to test indirect eface type walking
+ entry2 Xer // myPair, to test indirect iface type walking
+}
+
+func makeAnyTree(depth int64) any {
+ if depth == 0 {
+ return nil
+ }
+ e1 := myPair{depth, depth}
+ e2 := myPair{depth, depth}
+ n := &anyNode{
+ left: makeAnyTree(depth - 1),
+ right: makeAnyTree(depth - 1),
+ entry1: e1,
+ entry2: e2,
+ }
+ if depth%2 == 0 {
+ // Test dealing with direct interfaces of wrapped structures.
+ return anyNodeWrap2{{n}}
+ }
+ return n
+}
+
+//go:noinline
+func (a *AnyTree) count() int {
+ return countAnyNode(a.root)
+}
+
+func countAnyNode(a any) int {
+ switch v := a.(type) {
+ case *anyNode:
+ return v.count()
+ case anyNodeWrap2:
+ return v.unwrap().count()
+ }
+ return 0
+}
+
+// This is load-bearing to make sure anyNodeWrap2 ends up in the binary.
+//
+//go:noinline
+func (w anyNodeWrap2) unwrap() *anyNode {
+ return w[0].anyNode
+}
+
+func (a *anyNode) count() int {
+ return 1 + countAnyNode(a.left) + countAnyNode(a.right)
+}
+
+type anyNodeWrap struct{ *anyNode }
+type anyNodeWrap2 [1]anyNodeWrap
+
+type TypeSafeTree[K any] struct {
+ root *typeSafeNode[K]
+}
+
+type typeSafeNode[K any] struct {
+ left, right *typeSafeNode[K]
+ entry *K
+}
+
+func makeTypeSafeTree(depth int64) *typeSafeNode[myPair] {
+ if depth == 0 {
+ return nil
+ }
+ return &typeSafeNode[myPair]{
+ left: makeTypeSafeTree(depth - 1),
+ right: makeTypeSafeTree(depth - 1),
+ entry: &myPair{depth, depth},
+ }
+}
+
+//go:noinline
+func (t *TypeSafeTree[K]) count() int {
+ return t.root.count()
+}
+
+func (t *typeSafeNode[K]) count() int {
+ if t == nil {
+ return 0
+ }
+ return 1 + t.left.count() + t.right.count()
+}
+
+type myPair struct {
+ x, y int64
+}
+
+func (p myPair) X() int64 {
+ return p.x
+}
+
+type Xer interface {
+ X() int64
+}
+
+var globalAnyTree AnyTree
+var globalAnyTreeFM func() int
+var globalTypeSafeTree TypeSafeTree[myPair]
+var globalTypeSafeTreeFM func() int
+
+var block = make(chan struct{})
+
+var a anyNode
+
func main() {
+ globalAnyTree.root = makeAnyTree(5)
+ globalTypeSafeTree.root = makeTypeSafeTree(5)
+
+ ready := make(chan struct{})
+ go func() {
+ var anyTree AnyTree
+ var anyTreeFM AnyTree // Captured in a method value.
+ var typeSafeTree TypeSafeTree[myPair]
+ var typeSafeTreeFM TypeSafeTree[myPair] // Captured in a method value.
+
+ // TODO(mknyszek): AnyTree is described in DWARF in pieces, and we can't handle
+ // that yet.
+ //
+ // anyTree.root = makeAnyTree(5)
+ anyTreeFM.root = makeAnyTree(5)
+ globalAnyTreeFM = anyTreeFM.count
+ typeSafeTree.root = makeTypeSafeTree(5)
+ typeSafeTreeFM.root = makeTypeSafeTree(5)
+ globalTypeSafeTreeFM = typeSafeTreeFM.count
+
+ ready <- struct{}{}
+ <-block
+
+ runtime.KeepAlive(anyTree)
+ runtime.KeepAlive(typeSafeTree)
+ }()
+
+ // Create a large value and reference
var o Large
- go ping(&o) // Force an escape of o.
- o.arr[14] = 0xDE // Prevent a future smart compiler from allocating o directly on pings stack.
+ go useLarge(&o, ready) // Force an escape of o.
+ o.arr[14] = 0xDE // Prevent a future smart compiler from allocating o directly on useLarge's stack.
+
+ // This is load-bearing to make sure anyNodeWrap2 and the count methods end up in the DWARF.
+ println("tree counts:", globalAnyTree.count(), globalTypeSafeTree.count())
+
+ // Make sure both goroutines are ready.
+ <-ready
+ <-ready
_ = *(*int)(nil)
+
runtime.KeepAlive(&o)
}
diff --git a/internal/gocore/type.go b/internal/gocore/type.go
index 1d63740..1d052a2 100644
--- a/internal/gocore/type.go
+++ b/internal/gocore/type.go
@@ -527,13 +527,16 @@
// Get typings starting at roots.
fr := &frameReader{p: p}
p.ForEachRoot(func(r *Root) bool {
- if r.Frame != nil {
- fr.live = r.Frame.Live
- p.typeObject(r.Addr, r.Type, fr, add)
- } else if r.Addr == 0 && r.Type.Kind == KindPtr && r.Type.Elem != nil {
- p.typeObject(r.RegValue, r.Type.Elem, fr, add)
- } else if r.Addr != 0 {
- p.typeObject(r.Addr, r.Type, p.proc, add)
+ if len(r.pieces) == 1 && r.pieces[0].kind == addrPiece {
+ addr := core.Address(r.pieces[0].value)
+ if r.Frame != nil {
+ fr.live = r.Frame.Live
+ p.typeObject(addr, r.Type, fr, add)
+ } else {
+ p.typeObject(addr, r.Type, p.proc, add)
+ }
+ } else {
+ // TODO(mknyszek): Support roots broken into pieces.
}
return true
})
@@ -620,13 +623,24 @@
case KindBool, KindInt, KindUint, KindFloat, KindComplex:
// Nothing to do
case KindEface, KindIface:
- // interface. Use the type word to determine the type
+ // Interface.
+ //
+ // Load the data word first and back out if its a nil
+ // or typed-nil interface. We must do this because it's
+ // our only signal that this value is dead. Consider
+ // a dead eface variable on the stack: the data field
+ // will return nil because it's dead, but the type pointer
+ // will likely be bogus.
+ data := a.Add(ptrSize)
+ if r.ReadPtr(data) == 0 { // nil interface
+ return
+ }
+ // Use the type word to determine the type
// of the pointed-to object.
typPtr := r.ReadPtr(a)
if typPtr == 0 { // nil interface
return
}
- data := a.Add(ptrSize)
if t.Kind == KindIface {
typPtr = p.proc.ReadPtr(typPtr.Add(p.findItab().Type().Off))
}
@@ -743,17 +757,6 @@
// TODO: also oldbuckets
}
for _, f := range t.Fields {
- // sync.entry.p(in sync.map) is an unsafe.pointer to an empty interface.
- if t.Name == "sync.entry" && f.Name == "p" && f.Type.Kind == KindPtr && f.Type.Elem == nil {
- ptr := r.ReadPtr(a.Add(f.Off))
- if ptr != 0 {
- typ := &Type{
- Name: "sync.entry<interface{}>",
- Kind: KindEface,
- }
- add(ptr, typ, 1)
- }
- }
// hchan.buf(in chan) is an unsafe.pointer to an [dataqsiz]elemtype.
if strings.HasPrefix(t.Name, "hchan<") && f.Name == "buf" && f.Type.Kind == KindPtr {
elemType := p.proc.ReadPtr(a.Add(t.field("elemtype").Off))